blob: 74f56093d2f3b6bc3fda777e7f815a6ee21f66d4 [file] [log] [blame]
Mike Chanef969692010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/module.h>
Lianwei Wangd72db422012-11-01 09:59:52 +080023#include <linux/moduleparam.h>
Todd Poynor5cad6092012-12-18 17:50:44 -080024#include <linux/rwsem.h>
Mike Chanef969692010-06-22 11:26:45 -070025#include <linux/sched.h>
26#include <linux/sched/rt.h>
27#include <linux/tick.h>
28#include <linux/time.h>
29#include <linux/timer.h>
30#include <linux/workqueue.h>
31#include <linux/kthread.h>
Todd Poynorab8dc402012-04-02 17:17:14 -070032#include <linux/slab.h>
Todd Poynor15a9ea02012-04-23 20:42:41 -070033#include <asm/cputime.h>
Mike Chanef969692010-06-22 11:26:45 -070034
Todd Poynorae010472012-02-16 16:27:59 -080035#define CREATE_TRACE_POINTS
36#include <trace/events/cpufreq_interactive.h>
37
Mike Chanef969692010-06-22 11:26:45 -070038static atomic_t active_count = ATOMIC_INIT(0);
39
40struct cpufreq_interactive_cpuinfo {
41 struct timer_list cpu_timer;
Todd Poynor4add2592012-12-18 17:50:10 -080042 struct timer_list cpu_slack_timer;
Todd Poynor0e58da22012-12-11 16:05:03 -080043 spinlock_t load_lock; /* protects the next 4 fields */
Mike Chanef969692010-06-22 11:26:45 -070044 u64 time_in_idle;
Todd Poynor8eccd412012-10-08 20:14:34 -070045 u64 time_in_idle_timestamp;
Todd Poynor0e58da22012-12-11 16:05:03 -080046 u64 cputime_speedadj;
47 u64 cputime_speedadj_timestamp;
Mike Chanef969692010-06-22 11:26:45 -070048 struct cpufreq_policy *policy;
49 struct cpufreq_frequency_table *freq_table;
50 unsigned int target_freq;
Todd Poynor6d15fa32012-04-26 21:41:40 -070051 unsigned int floor_freq;
52 u64 floor_validate_time;
Todd Poynor1a0389a2012-05-10 23:28:06 -070053 u64 hispeed_validate_time;
Todd Poynor5cad6092012-12-18 17:50:44 -080054 struct rw_semaphore enable_sem;
Mike Chanef969692010-06-22 11:26:45 -070055 int governor_enabled;
56};
57
58static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
59
Todd Poynor0f1920b2012-07-16 17:07:15 -070060/* realtime thread handles frequency scaling */
61static struct task_struct *speedchange_task;
62static cpumask_t speedchange_cpumask;
63static spinlock_t speedchange_cpumask_lock;
Mike Chanef969692010-06-22 11:26:45 -070064
65/* Hi speed to bump to from lo speed when load burst (default max) */
Todd Poynor3b7b5f82012-10-03 00:39:56 -070066static unsigned int hispeed_freq;
Mike Chanef969692010-06-22 11:26:45 -070067
68/* Go to hi speed when CPU load at or above this value. */
Todd Poynora380aa82012-04-17 17:39:34 -070069#define DEFAULT_GO_HISPEED_LOAD 85
Mike Chanef969692010-06-22 11:26:45 -070070static unsigned long go_hispeed_load;
71
Todd Poynor8d2d93f2012-11-28 17:58:17 -080072/* Target load. Lower values result in higher CPU speeds. */
73#define DEFAULT_TARGET_LOAD 90
Todd Poynore9c60742012-11-14 11:41:21 -080074static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
75static spinlock_t target_loads_lock;
76static unsigned int *target_loads = default_target_loads;
77static int ntarget_loads = ARRAY_SIZE(default_target_loads);
Todd Poynor8d2d93f2012-11-28 17:58:17 -080078
Mike Chanef969692010-06-22 11:26:45 -070079/*
80 * The minimum amount of time to spend at a frequency before we can ramp down.
81 */
Todd Poynora380aa82012-04-17 17:39:34 -070082#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
Mike Chanef969692010-06-22 11:26:45 -070083static unsigned long min_sample_time;
84
85/*
86 * The sample rate of the timer used to increase frequency
87 */
Todd Poynora380aa82012-04-17 17:39:34 -070088#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
Mike Chanef969692010-06-22 11:26:45 -070089static unsigned long timer_rate;
90
Todd Poynorcbbe17d2012-04-13 20:18:02 -070091/*
92 * Wait this long before raising speed above hispeed, by default a single
93 * timer interval.
94 */
95#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
96static unsigned long above_hispeed_delay_val;
97
Todd Poynore16d5922012-12-14 17:31:19 -080098/* Non-zero means indefinite speed boost active */
Todd Poynor15a9ea02012-04-23 20:42:41 -070099static int boost_val;
Todd Poynore16d5922012-12-14 17:31:19 -0800100/* Duration of a boot pulse in usecs */
101static int boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
102/* End time of boost pulse in ktime converted to usecs */
103static u64 boostpulse_endtime;
Todd Poynor15a9ea02012-04-23 20:42:41 -0700104
Todd Poynor4add2592012-12-18 17:50:10 -0800105/*
106 * Max additional time to wait in idle, beyond timer_rate, at speeds above
107 * minimum before wakeup to reduce speed, or -1 if unnecessary.
108 */
109#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
110static int timer_slack_val = DEFAULT_TIMER_SLACK;
Lianwei Wangd72db422012-11-01 09:59:52 +0800111
Mike Chanef969692010-06-22 11:26:45 -0700112static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
113 unsigned int event);
114
115#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
116static
117#endif
118struct cpufreq_governor cpufreq_gov_interactive = {
119 .name = "interactive",
120 .governor = cpufreq_governor_interactive,
121 .max_transition_latency = 10000000,
122 .owner = THIS_MODULE,
123};
124
Todd Poynor8eccd412012-10-08 20:14:34 -0700125static void cpufreq_interactive_timer_resched(
126 struct cpufreq_interactive_cpuinfo *pcpu)
127{
Todd Poynor4add2592012-12-18 17:50:10 -0800128 unsigned long expires = jiffies + usecs_to_jiffies(timer_rate);
129
130 mod_timer_pinned(&pcpu->cpu_timer, expires);
131 if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
132 expires += usecs_to_jiffies(timer_slack_val);
133 mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
134 }
135
Todd Poynor0e58da22012-12-11 16:05:03 -0800136 spin_lock(&pcpu->load_lock);
Todd Poynor8eccd412012-10-08 20:14:34 -0700137 pcpu->time_in_idle =
138 get_cpu_idle_time_us(smp_processor_id(),
139 &pcpu->time_in_idle_timestamp);
Todd Poynor0e58da22012-12-11 16:05:03 -0800140 pcpu->cputime_speedadj = 0;
141 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
142 spin_unlock(&pcpu->load_lock);
Todd Poynor8eccd412012-10-08 20:14:34 -0700143}
144
Todd Poynore9c60742012-11-14 11:41:21 -0800145static unsigned int freq_to_targetload(unsigned int freq)
146{
147 int i;
148 unsigned int ret;
149
150 spin_lock(&target_loads_lock);
151
152 for (i = 0; i < ntarget_loads - 1 && freq >= target_loads[i+1]; i += 2)
153 ;
154
155 ret = target_loads[i];
156 spin_unlock(&target_loads_lock);
157 return ret;
158}
159
160/*
161 * If increasing frequencies never map to a lower target load then
162 * choose_freq() will find the minimum frequency that does not exceed its
163 * target load given the current load.
164 */
165
166static unsigned int choose_freq(
Todd Poynor0e58da22012-12-11 16:05:03 -0800167 struct cpufreq_interactive_cpuinfo *pcpu, unsigned int loadadjfreq)
Todd Poynore9c60742012-11-14 11:41:21 -0800168{
169 unsigned int freq = pcpu->policy->cur;
Todd Poynore9c60742012-11-14 11:41:21 -0800170 unsigned int prevfreq, freqmin, freqmax;
171 unsigned int tl;
172 int index;
173
174 freqmin = 0;
175 freqmax = UINT_MAX;
176
177 do {
178 prevfreq = freq;
179 tl = freq_to_targetload(freq);
180
181 /*
182 * Find the lowest frequency where the computed load is less
183 * than or equal to the target load.
184 */
185
186 cpufreq_frequency_table_target(
187 pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
188 CPUFREQ_RELATION_L, &index);
189 freq = pcpu->freq_table[index].frequency;
190
191 if (freq > prevfreq) {
192 /* The previous frequency is too low. */
193 freqmin = prevfreq;
194
195 if (freq >= freqmax) {
196 /*
197 * Find the highest frequency that is less
198 * than freqmax.
199 */
200 cpufreq_frequency_table_target(
201 pcpu->policy, pcpu->freq_table,
202 freqmax - 1, CPUFREQ_RELATION_H,
203 &index);
204 freq = pcpu->freq_table[index].frequency;
205
206 if (freq == freqmin) {
207 /*
208 * The first frequency below freqmax
209 * has already been found to be too
210 * low. freqmax is the lowest speed
211 * we found that is fast enough.
212 */
213 freq = freqmax;
214 break;
215 }
216 }
217 } else if (freq < prevfreq) {
218 /* The previous frequency is high enough. */
219 freqmax = prevfreq;
220
221 if (freq <= freqmin) {
222 /*
223 * Find the lowest frequency that is higher
224 * than freqmin.
225 */
226 cpufreq_frequency_table_target(
227 pcpu->policy, pcpu->freq_table,
228 freqmin + 1, CPUFREQ_RELATION_L,
229 &index);
230 freq = pcpu->freq_table[index].frequency;
231
232 /*
233 * If freqmax is the first frequency above
234 * freqmin then we have already found that
235 * this speed is fast enough.
236 */
237 if (freq == freqmax)
238 break;
239 }
240 }
241
242 /* If same frequency chosen as previous then done. */
243 } while (freq != prevfreq);
244
245 return freq;
246}
247
Todd Poynor0e58da22012-12-11 16:05:03 -0800248static u64 update_load(int cpu)
249{
250 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
251 u64 now;
252 u64 now_idle;
253 unsigned int delta_idle;
254 unsigned int delta_time;
255 u64 active_time;
256
257 now_idle = get_cpu_idle_time_us(cpu, &now);
258 delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
259 delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
260 active_time = delta_time - delta_idle;
261 pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
262
263 pcpu->time_in_idle = now_idle;
264 pcpu->time_in_idle_timestamp = now;
265 return now;
266}
267
Mike Chanef969692010-06-22 11:26:45 -0700268static void cpufreq_interactive_timer(unsigned long data)
269{
Todd Poynore7afb7e2012-11-05 13:09:03 -0800270 u64 now;
Mike Chanef969692010-06-22 11:26:45 -0700271 unsigned int delta_time;
Todd Poynor0e58da22012-12-11 16:05:03 -0800272 u64 cputime_speedadj;
Mike Chanef969692010-06-22 11:26:45 -0700273 int cpu_load;
Mike Chanef969692010-06-22 11:26:45 -0700274 struct cpufreq_interactive_cpuinfo *pcpu =
275 &per_cpu(cpuinfo, data);
Mike Chanef969692010-06-22 11:26:45 -0700276 unsigned int new_freq;
Todd Poynor0e58da22012-12-11 16:05:03 -0800277 unsigned int loadadjfreq;
Mike Chanef969692010-06-22 11:26:45 -0700278 unsigned int index;
279 unsigned long flags;
Todd Poynore16d5922012-12-14 17:31:19 -0800280 bool boosted;
Mike Chanef969692010-06-22 11:26:45 -0700281
Todd Poynor5cad6092012-12-18 17:50:44 -0800282 if (!down_read_trylock(&pcpu->enable_sem))
283 return;
Mike Chanef969692010-06-22 11:26:45 -0700284 if (!pcpu->governor_enabled)
285 goto exit;
286
Todd Poynor0e58da22012-12-11 16:05:03 -0800287 spin_lock(&pcpu->load_lock);
288 now = update_load(data);
289 delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
290 cputime_speedadj = pcpu->cputime_speedadj;
291 spin_unlock(&pcpu->load_lock);
Mike Chanef969692010-06-22 11:26:45 -0700292
Todd Poynor0e58da22012-12-11 16:05:03 -0800293 if (WARN_ON_ONCE(!delta_time))
Mike Chanef969692010-06-22 11:26:45 -0700294 goto rearm;
295
Todd Poynor0e58da22012-12-11 16:05:03 -0800296 do_div(cputime_speedadj, delta_time);
297 loadadjfreq = (unsigned int)cputime_speedadj * 100;
298 cpu_load = loadadjfreq / pcpu->target_freq;
Todd Poynore16d5922012-12-14 17:31:19 -0800299 boosted = boost_val || now < boostpulse_endtime;
Mike Chanef969692010-06-22 11:26:45 -0700300
Todd Poynor2b660492012-12-19 16:06:48 -0800301 if (cpu_load >= go_hispeed_load || boosted) {
302 if (pcpu->target_freq < hispeed_freq) {
303 new_freq = hispeed_freq;
304 } else {
305 new_freq = choose_freq(pcpu, loadadjfreq);
306
307 if (new_freq < hispeed_freq)
308 new_freq = hispeed_freq;
309 }
310 } else {
Todd Poynor0e58da22012-12-11 16:05:03 -0800311 new_freq = choose_freq(pcpu, loadadjfreq);
Todd Poynor2b660492012-12-19 16:06:48 -0800312 }
Todd Poynor131ff022012-11-08 15:06:55 -0800313
314 if (pcpu->target_freq >= hispeed_freq &&
315 new_freq > pcpu->target_freq &&
316 now - pcpu->hispeed_validate_time < above_hispeed_delay_val) {
317 trace_cpufreq_interactive_notyet(
318 data, cpu_load, pcpu->target_freq,
319 pcpu->policy->cur, new_freq);
320 goto rearm;
Mike Chanef969692010-06-22 11:26:45 -0700321 }
322
Todd Poynor131ff022012-11-08 15:06:55 -0800323 pcpu->hispeed_validate_time = now;
Todd Poynor1a0389a2012-05-10 23:28:06 -0700324
Mike Chanef969692010-06-22 11:26:45 -0700325 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800326 new_freq, CPUFREQ_RELATION_L,
Mike Chanef969692010-06-22 11:26:45 -0700327 &index)) {
328 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
329 (int) data);
330 goto rearm;
331 }
332
333 new_freq = pcpu->freq_table[index].frequency;
334
Mike Chanef969692010-06-22 11:26:45 -0700335 /*
Todd Poynor6d15fa32012-04-26 21:41:40 -0700336 * Do not scale below floor_freq unless we have been at or above the
337 * floor frequency for the minimum sample time since last validated.
Mike Chanef969692010-06-22 11:26:45 -0700338 */
Todd Poynor6d15fa32012-04-26 21:41:40 -0700339 if (new_freq < pcpu->floor_freq) {
Todd Poynore7afb7e2012-11-05 13:09:03 -0800340 if (now - pcpu->floor_validate_time < min_sample_time) {
Todd Poynore60cc1b2012-11-28 17:56:09 -0800341 trace_cpufreq_interactive_notyet(
342 data, cpu_load, pcpu->target_freq,
343 pcpu->policy->cur, new_freq);
Mike Chanef969692010-06-22 11:26:45 -0700344 goto rearm;
Todd Poynorae010472012-02-16 16:27:59 -0800345 }
Mike Chanef969692010-06-22 11:26:45 -0700346 }
347
Todd Poynore16d5922012-12-14 17:31:19 -0800348 /*
349 * Update the timestamp for checking whether speed has been held at
350 * or above the selected frequency for a minimum of min_sample_time,
351 * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
352 * allow the speed to drop as soon as the boostpulse duration expires
353 * (or the indefinite boost is turned off).
354 */
355
356 if (!boosted || new_freq > hispeed_freq) {
357 pcpu->floor_freq = new_freq;
358 pcpu->floor_validate_time = now;
359 }
Todd Poynor1f408dc2012-04-06 19:59:36 -0700360
361 if (pcpu->target_freq == new_freq) {
Todd Poynore60cc1b2012-11-28 17:56:09 -0800362 trace_cpufreq_interactive_already(
363 data, cpu_load, pcpu->target_freq,
364 pcpu->policy->cur, new_freq);
Todd Poynor1f408dc2012-04-06 19:59:36 -0700365 goto rearm_if_notmax;
366 }
367
Todd Poynorae010472012-02-16 16:27:59 -0800368 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
Todd Poynore60cc1b2012-11-28 17:56:09 -0800369 pcpu->policy->cur, new_freq);
Todd Poynorae010472012-02-16 16:27:59 -0800370
Todd Poynor0f1920b2012-07-16 17:07:15 -0700371 pcpu->target_freq = new_freq;
372 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
373 cpumask_set_cpu(data, &speedchange_cpumask);
374 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
375 wake_up_process(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -0700376
377rearm_if_notmax:
378 /*
379 * Already set max speed and don't see a need to change that,
380 * wait until next idle to re-evaluate, don't need timer.
381 */
382 if (pcpu->target_freq == pcpu->policy->max)
383 goto exit;
384
385rearm:
Todd Poynor4add2592012-12-18 17:50:10 -0800386 if (!timer_pending(&pcpu->cpu_timer))
Todd Poynor8eccd412012-10-08 20:14:34 -0700387 cpufreq_interactive_timer_resched(pcpu);
Mike Chanef969692010-06-22 11:26:45 -0700388
389exit:
Todd Poynor5cad6092012-12-18 17:50:44 -0800390 up_read(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700391 return;
392}
393
394static void cpufreq_interactive_idle_start(void)
395{
396 struct cpufreq_interactive_cpuinfo *pcpu =
397 &per_cpu(cpuinfo, smp_processor_id());
398 int pending;
399
Todd Poynor5cad6092012-12-18 17:50:44 -0800400 if (!down_read_trylock(&pcpu->enable_sem))
Mike Chanef969692010-06-22 11:26:45 -0700401 return;
Todd Poynor5cad6092012-12-18 17:50:44 -0800402 if (!pcpu->governor_enabled) {
403 up_read(&pcpu->enable_sem);
404 return;
405 }
Mike Chanef969692010-06-22 11:26:45 -0700406
Mike Chanef969692010-06-22 11:26:45 -0700407 pending = timer_pending(&pcpu->cpu_timer);
408
409 if (pcpu->target_freq != pcpu->policy->min) {
Mike Chanef969692010-06-22 11:26:45 -0700410 /*
411 * Entering idle while not at lowest speed. On some
412 * platforms this can hold the other CPU(s) at that speed
413 * even though the CPU is idle. Set a timer to re-evaluate
414 * speed so this idle CPU doesn't hold the other CPUs above
415 * min indefinitely. This should probably be a quirk of
416 * the CPUFreq driver.
417 */
Todd Poynor4add2592012-12-18 17:50:10 -0800418 if (!pending)
Todd Poynor8eccd412012-10-08 20:14:34 -0700419 cpufreq_interactive_timer_resched(pcpu);
Mike Chanef969692010-06-22 11:26:45 -0700420 }
421
Todd Poynor5cad6092012-12-18 17:50:44 -0800422 up_read(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700423}
424
425static void cpufreq_interactive_idle_end(void)
426{
427 struct cpufreq_interactive_cpuinfo *pcpu =
428 &per_cpu(cpuinfo, smp_processor_id());
429
Todd Poynor5cad6092012-12-18 17:50:44 -0800430 if (!down_read_trylock(&pcpu->enable_sem))
Sam Leffler3ab7c2b2012-06-27 10:12:04 -0700431 return;
Todd Poynor5cad6092012-12-18 17:50:44 -0800432 if (!pcpu->governor_enabled) {
433 up_read(&pcpu->enable_sem);
434 return;
435 }
Sam Leffler3ab7c2b2012-06-27 10:12:04 -0700436
Todd Poynore7afb7e2012-11-05 13:09:03 -0800437 /* Arm the timer for 1-2 ticks later if not already. */
438 if (!timer_pending(&pcpu->cpu_timer)) {
Todd Poynor8eccd412012-10-08 20:14:34 -0700439 cpufreq_interactive_timer_resched(pcpu);
Todd Poynor4add2592012-12-18 17:50:10 -0800440 } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
Todd Poynor8eccd412012-10-08 20:14:34 -0700441 del_timer(&pcpu->cpu_timer);
Todd Poynor4add2592012-12-18 17:50:10 -0800442 del_timer(&pcpu->cpu_slack_timer);
Todd Poynor8eccd412012-10-08 20:14:34 -0700443 cpufreq_interactive_timer(smp_processor_id());
Mike Chanef969692010-06-22 11:26:45 -0700444 }
Todd Poynor5cad6092012-12-18 17:50:44 -0800445
446 up_read(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700447}
448
Todd Poynor0f1920b2012-07-16 17:07:15 -0700449static int cpufreq_interactive_speedchange_task(void *data)
Mike Chanef969692010-06-22 11:26:45 -0700450{
451 unsigned int cpu;
452 cpumask_t tmp_mask;
453 unsigned long flags;
454 struct cpufreq_interactive_cpuinfo *pcpu;
455
456 while (1) {
457 set_current_state(TASK_INTERRUPTIBLE);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700458 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700459
Todd Poynor0f1920b2012-07-16 17:07:15 -0700460 if (cpumask_empty(&speedchange_cpumask)) {
461 spin_unlock_irqrestore(&speedchange_cpumask_lock,
462 flags);
Mike Chanef969692010-06-22 11:26:45 -0700463 schedule();
464
465 if (kthread_should_stop())
466 break;
467
Todd Poynor0f1920b2012-07-16 17:07:15 -0700468 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700469 }
470
471 set_current_state(TASK_RUNNING);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700472 tmp_mask = speedchange_cpumask;
473 cpumask_clear(&speedchange_cpumask);
474 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700475
476 for_each_cpu(cpu, &tmp_mask) {
477 unsigned int j;
478 unsigned int max_freq = 0;
479
480 pcpu = &per_cpu(cpuinfo, cpu);
Todd Poynor5cad6092012-12-18 17:50:44 -0800481 if (!down_read_trylock(&pcpu->enable_sem))
Mike Chanef969692010-06-22 11:26:45 -0700482 continue;
Todd Poynor5cad6092012-12-18 17:50:44 -0800483 if (!pcpu->governor_enabled) {
484 up_read(&pcpu->enable_sem);
485 continue;
486 }
Mike Chanef969692010-06-22 11:26:45 -0700487
Mike Chanef969692010-06-22 11:26:45 -0700488 for_each_cpu(j, pcpu->policy->cpus) {
489 struct cpufreq_interactive_cpuinfo *pjcpu =
490 &per_cpu(cpuinfo, j);
491
492 if (pjcpu->target_freq > max_freq)
493 max_freq = pjcpu->target_freq;
494 }
495
496 if (max_freq != pcpu->policy->cur)
497 __cpufreq_driver_target(pcpu->policy,
498 max_freq,
499 CPUFREQ_RELATION_H);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700500 trace_cpufreq_interactive_setspeed(cpu,
501 pcpu->target_freq,
Todd Poynorae010472012-02-16 16:27:59 -0800502 pcpu->policy->cur);
Todd Poynor5cad6092012-12-18 17:50:44 -0800503
504 up_read(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700505 }
506 }
507
508 return 0;
509}
510
Todd Poynorab8dc402012-04-02 17:17:14 -0700511static void cpufreq_interactive_boost(void)
512{
513 int i;
514 int anyboost = 0;
515 unsigned long flags;
516 struct cpufreq_interactive_cpuinfo *pcpu;
517
Todd Poynor0f1920b2012-07-16 17:07:15 -0700518 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Todd Poynorab8dc402012-04-02 17:17:14 -0700519
520 for_each_online_cpu(i) {
521 pcpu = &per_cpu(cpuinfo, i);
522
523 if (pcpu->target_freq < hispeed_freq) {
524 pcpu->target_freq = hispeed_freq;
Todd Poynor0f1920b2012-07-16 17:07:15 -0700525 cpumask_set_cpu(i, &speedchange_cpumask);
Todd Poynor31817c92012-12-07 20:08:45 -0800526 pcpu->hispeed_validate_time =
527 ktime_to_us(ktime_get());
Todd Poynorab8dc402012-04-02 17:17:14 -0700528 anyboost = 1;
529 }
530
531 /*
Todd Poynor6d15fa32012-04-26 21:41:40 -0700532 * Set floor freq and (re)start timer for when last
533 * validated.
Todd Poynorab8dc402012-04-02 17:17:14 -0700534 */
535
Todd Poynor6d15fa32012-04-26 21:41:40 -0700536 pcpu->floor_freq = hispeed_freq;
537 pcpu->floor_validate_time = ktime_to_us(ktime_get());
Todd Poynorab8dc402012-04-02 17:17:14 -0700538 }
539
Todd Poynor0f1920b2012-07-16 17:07:15 -0700540 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Todd Poynorab8dc402012-04-02 17:17:14 -0700541
542 if (anyboost)
Todd Poynor0f1920b2012-07-16 17:07:15 -0700543 wake_up_process(speedchange_task);
Todd Poynorab8dc402012-04-02 17:17:14 -0700544}
545
Todd Poynor0e58da22012-12-11 16:05:03 -0800546static int cpufreq_interactive_notifier(
547 struct notifier_block *nb, unsigned long val, void *data)
548{
549 struct cpufreq_freqs *freq = data;
550 struct cpufreq_interactive_cpuinfo *pcpu;
551 int cpu;
552
553 if (val == CPUFREQ_POSTCHANGE) {
554 pcpu = &per_cpu(cpuinfo, freq->cpu);
555
556 for_each_cpu(cpu, pcpu->policy->cpus) {
557 struct cpufreq_interactive_cpuinfo *pjcpu =
558 &per_cpu(cpuinfo, cpu);
559 spin_lock(&pjcpu->load_lock);
560 update_load(cpu);
561 spin_unlock(&pjcpu->load_lock);
562 }
563 }
564
565 return 0;
566}
567
568static struct notifier_block cpufreq_notifier_block = {
569 .notifier_call = cpufreq_interactive_notifier,
570};
571
Todd Poynore9c60742012-11-14 11:41:21 -0800572static ssize_t show_target_loads(
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800573 struct kobject *kobj, struct attribute *attr, char *buf)
574{
Todd Poynore9c60742012-11-14 11:41:21 -0800575 int i;
576 ssize_t ret = 0;
577
578 spin_lock(&target_loads_lock);
579
580 for (i = 0; i < ntarget_loads; i++)
581 ret += sprintf(buf + ret, "%u%s", target_loads[i],
582 i & 0x1 ? ":" : " ");
583
584 ret += sprintf(buf + ret, "\n");
585 spin_unlock(&target_loads_lock);
586 return ret;
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800587}
588
Todd Poynore9c60742012-11-14 11:41:21 -0800589static ssize_t store_target_loads(
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800590 struct kobject *kobj, struct attribute *attr, const char *buf,
591 size_t count)
592{
593 int ret;
Todd Poynore9c60742012-11-14 11:41:21 -0800594 const char *cp;
595 unsigned int *new_target_loads = NULL;
596 int ntokens = 1;
597 int i;
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800598
Todd Poynore9c60742012-11-14 11:41:21 -0800599 cp = buf;
600 while ((cp = strpbrk(cp + 1, " :")))
601 ntokens++;
602
603 if (!(ntokens & 0x1))
604 goto err_inval;
605
606 new_target_loads = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
607 if (!new_target_loads) {
608 ret = -ENOMEM;
609 goto err;
610 }
611
612 cp = buf;
613 i = 0;
614 while (i < ntokens) {
615 if (sscanf(cp, "%u", &new_target_loads[i++]) != 1)
616 goto err_inval;
617
618 cp = strpbrk(cp, " :");
619 if (!cp)
620 break;
621 cp++;
622 }
623
624 if (i != ntokens)
625 goto err_inval;
626
627 spin_lock(&target_loads_lock);
628 if (target_loads != default_target_loads)
629 kfree(target_loads);
630 target_loads = new_target_loads;
631 ntarget_loads = ntokens;
632 spin_unlock(&target_loads_lock);
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800633 return count;
Todd Poynore9c60742012-11-14 11:41:21 -0800634
635err_inval:
636 ret = -EINVAL;
637err:
638 kfree(new_target_loads);
639 return ret;
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800640}
641
Todd Poynore9c60742012-11-14 11:41:21 -0800642static struct global_attr target_loads_attr =
643 __ATTR(target_loads, S_IRUGO | S_IWUSR,
644 show_target_loads, store_target_loads);
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800645
Mike Chanef969692010-06-22 11:26:45 -0700646static ssize_t show_hispeed_freq(struct kobject *kobj,
647 struct attribute *attr, char *buf)
648{
Todd Poynor3b7b5f82012-10-03 00:39:56 -0700649 return sprintf(buf, "%u\n", hispeed_freq);
Mike Chanef969692010-06-22 11:26:45 -0700650}
651
652static ssize_t store_hispeed_freq(struct kobject *kobj,
653 struct attribute *attr, const char *buf,
654 size_t count)
655{
656 int ret;
Todd Poynor3b7b5f82012-10-03 00:39:56 -0700657 long unsigned int val;
Mike Chanef969692010-06-22 11:26:45 -0700658
Todd Poynor3b7b5f82012-10-03 00:39:56 -0700659 ret = strict_strtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -0700660 if (ret < 0)
661 return ret;
662 hispeed_freq = val;
663 return count;
664}
665
666static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
667 show_hispeed_freq, store_hispeed_freq);
668
669
670static ssize_t show_go_hispeed_load(struct kobject *kobj,
671 struct attribute *attr, char *buf)
672{
673 return sprintf(buf, "%lu\n", go_hispeed_load);
674}
675
676static ssize_t store_go_hispeed_load(struct kobject *kobj,
677 struct attribute *attr, const char *buf, size_t count)
678{
679 int ret;
680 unsigned long val;
681
682 ret = strict_strtoul(buf, 0, &val);
683 if (ret < 0)
684 return ret;
685 go_hispeed_load = val;
686 return count;
687}
688
689static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
690 show_go_hispeed_load, store_go_hispeed_load);
691
692static ssize_t show_min_sample_time(struct kobject *kobj,
693 struct attribute *attr, char *buf)
694{
695 return sprintf(buf, "%lu\n", min_sample_time);
696}
697
698static ssize_t store_min_sample_time(struct kobject *kobj,
699 struct attribute *attr, const char *buf, size_t count)
700{
701 int ret;
702 unsigned long val;
703
704 ret = strict_strtoul(buf, 0, &val);
705 if (ret < 0)
706 return ret;
707 min_sample_time = val;
708 return count;
709}
710
711static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
712 show_min_sample_time, store_min_sample_time);
713
Todd Poynorcbbe17d2012-04-13 20:18:02 -0700714static ssize_t show_above_hispeed_delay(struct kobject *kobj,
715 struct attribute *attr, char *buf)
716{
717 return sprintf(buf, "%lu\n", above_hispeed_delay_val);
718}
719
720static ssize_t store_above_hispeed_delay(struct kobject *kobj,
721 struct attribute *attr,
722 const char *buf, size_t count)
723{
724 int ret;
725 unsigned long val;
726
727 ret = strict_strtoul(buf, 0, &val);
728 if (ret < 0)
729 return ret;
730 above_hispeed_delay_val = val;
731 return count;
732}
733
734define_one_global_rw(above_hispeed_delay);
735
Mike Chanef969692010-06-22 11:26:45 -0700736static ssize_t show_timer_rate(struct kobject *kobj,
737 struct attribute *attr, char *buf)
738{
739 return sprintf(buf, "%lu\n", timer_rate);
740}
741
742static ssize_t store_timer_rate(struct kobject *kobj,
743 struct attribute *attr, const char *buf, size_t count)
744{
745 int ret;
746 unsigned long val;
747
748 ret = strict_strtoul(buf, 0, &val);
749 if (ret < 0)
750 return ret;
751 timer_rate = val;
752 return count;
753}
754
755static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
756 show_timer_rate, store_timer_rate);
757
Todd Poynor4add2592012-12-18 17:50:10 -0800758static ssize_t show_timer_slack(
759 struct kobject *kobj, struct attribute *attr, char *buf)
760{
761 return sprintf(buf, "%d\n", timer_slack_val);
762}
763
764static ssize_t store_timer_slack(
765 struct kobject *kobj, struct attribute *attr, const char *buf,
766 size_t count)
767{
768 int ret;
769 unsigned long val;
770
771 ret = kstrtol(buf, 10, &val);
772 if (ret < 0)
773 return ret;
774
775 timer_slack_val = val;
776 return count;
777}
778
779define_one_global_rw(timer_slack);
780
Todd Poynor15a9ea02012-04-23 20:42:41 -0700781static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
782 char *buf)
783{
784 return sprintf(buf, "%d\n", boost_val);
785}
786
787static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
788 const char *buf, size_t count)
789{
790 int ret;
791 unsigned long val;
792
793 ret = kstrtoul(buf, 0, &val);
794 if (ret < 0)
795 return ret;
796
797 boost_val = val;
798
Todd Poynor442a3122012-05-03 00:16:55 -0700799 if (boost_val) {
800 trace_cpufreq_interactive_boost("on");
Todd Poynor15a9ea02012-04-23 20:42:41 -0700801 cpufreq_interactive_boost();
Todd Poynor442a3122012-05-03 00:16:55 -0700802 } else {
803 trace_cpufreq_interactive_unboost("off");
804 }
Todd Poynor15a9ea02012-04-23 20:42:41 -0700805
806 return count;
807}
808
809define_one_global_rw(boost);
810
Todd Poynor442a3122012-05-03 00:16:55 -0700811static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
812 const char *buf, size_t count)
813{
814 int ret;
815 unsigned long val;
816
817 ret = kstrtoul(buf, 0, &val);
818 if (ret < 0)
819 return ret;
820
Todd Poynore16d5922012-12-14 17:31:19 -0800821 boostpulse_endtime = ktime_to_us(ktime_get()) + boostpulse_duration_val;
Todd Poynor442a3122012-05-03 00:16:55 -0700822 trace_cpufreq_interactive_boost("pulse");
823 cpufreq_interactive_boost();
824 return count;
825}
826
827static struct global_attr boostpulse =
828 __ATTR(boostpulse, 0200, NULL, store_boostpulse);
829
Todd Poynore16d5922012-12-14 17:31:19 -0800830static ssize_t show_boostpulse_duration(
831 struct kobject *kobj, struct attribute *attr, char *buf)
832{
833 return sprintf(buf, "%d\n", boostpulse_duration_val);
834}
835
836static ssize_t store_boostpulse_duration(
837 struct kobject *kobj, struct attribute *attr, const char *buf,
838 size_t count)
839{
840 int ret;
841 unsigned long val;
842
843 ret = kstrtoul(buf, 0, &val);
844 if (ret < 0)
845 return ret;
846
847 boostpulse_duration_val = val;
848 return count;
849}
850
851define_one_global_rw(boostpulse_duration);
852
Mike Chanef969692010-06-22 11:26:45 -0700853static struct attribute *interactive_attributes[] = {
Todd Poynore9c60742012-11-14 11:41:21 -0800854 &target_loads_attr.attr,
Mike Chanef969692010-06-22 11:26:45 -0700855 &hispeed_freq_attr.attr,
856 &go_hispeed_load_attr.attr,
Todd Poynorcbbe17d2012-04-13 20:18:02 -0700857 &above_hispeed_delay.attr,
Mike Chanef969692010-06-22 11:26:45 -0700858 &min_sample_time_attr.attr,
859 &timer_rate_attr.attr,
Todd Poynor4add2592012-12-18 17:50:10 -0800860 &timer_slack.attr,
Todd Poynor15a9ea02012-04-23 20:42:41 -0700861 &boost.attr,
Todd Poynor442a3122012-05-03 00:16:55 -0700862 &boostpulse.attr,
Todd Poynore16d5922012-12-14 17:31:19 -0800863 &boostpulse_duration.attr,
Mike Chanef969692010-06-22 11:26:45 -0700864 NULL,
865};
866
867static struct attribute_group interactive_attr_group = {
868 .attrs = interactive_attributes,
869 .name = "interactive",
870};
871
Sam Leffler3ab7c2b2012-06-27 10:12:04 -0700872static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
873 unsigned long val,
874 void *data)
875{
876 switch (val) {
877 case IDLE_START:
878 cpufreq_interactive_idle_start();
879 break;
880 case IDLE_END:
881 cpufreq_interactive_idle_end();
882 break;
883 }
884
885 return 0;
886}
887
888static struct notifier_block cpufreq_interactive_idle_nb = {
889 .notifier_call = cpufreq_interactive_idle_notifier,
890};
891
Mike Chanef969692010-06-22 11:26:45 -0700892static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
893 unsigned int event)
894{
895 int rc;
896 unsigned int j;
897 struct cpufreq_interactive_cpuinfo *pcpu;
898 struct cpufreq_frequency_table *freq_table;
899
900 switch (event) {
901 case CPUFREQ_GOV_START:
902 if (!cpu_online(policy->cpu))
903 return -EINVAL;
904
905 freq_table =
906 cpufreq_frequency_get_table(policy->cpu);
Todd Poynore7afb7e2012-11-05 13:09:03 -0800907 if (!hispeed_freq)
908 hispeed_freq = policy->max;
Mike Chanef969692010-06-22 11:26:45 -0700909
910 for_each_cpu(j, policy->cpus) {
Todd Poynor4add2592012-12-18 17:50:10 -0800911 unsigned long expires;
912
Mike Chanef969692010-06-22 11:26:45 -0700913 pcpu = &per_cpu(cpuinfo, j);
914 pcpu->policy = policy;
915 pcpu->target_freq = policy->cur;
916 pcpu->freq_table = freq_table;
Todd Poynor6d15fa32012-04-26 21:41:40 -0700917 pcpu->floor_freq = pcpu->target_freq;
918 pcpu->floor_validate_time =
Todd Poynor31817c92012-12-07 20:08:45 -0800919 ktime_to_us(ktime_get());
Todd Poynor1a0389a2012-05-10 23:28:06 -0700920 pcpu->hispeed_validate_time =
Todd Poynor31817c92012-12-07 20:08:45 -0800921 pcpu->floor_validate_time;
Mike Chanef969692010-06-22 11:26:45 -0700922 pcpu->governor_enabled = 1;
923 smp_wmb();
Todd Poynor4add2592012-12-18 17:50:10 -0800924 expires = jiffies + usecs_to_jiffies(timer_rate);
925 pcpu->cpu_timer.expires = expires;
Todd Poynore7afb7e2012-11-05 13:09:03 -0800926 add_timer_on(&pcpu->cpu_timer, j);
Todd Poynor4add2592012-12-18 17:50:10 -0800927
928 if (timer_slack_val >= 0) {
929 expires += usecs_to_jiffies(timer_slack_val);
930 pcpu->cpu_slack_timer.expires = expires;
931 add_timer_on(&pcpu->cpu_slack_timer, j);
932 }
Mike Chanef969692010-06-22 11:26:45 -0700933 }
934
Mike Chanef969692010-06-22 11:26:45 -0700935 /*
936 * Do not register the idle hook and create sysfs
937 * entries if we have already done so.
938 */
939 if (atomic_inc_return(&active_count) > 1)
940 return 0;
941
942 rc = sysfs_create_group(cpufreq_global_kobject,
943 &interactive_attr_group);
944 if (rc)
945 return rc;
946
Sam Leffler3ab7c2b2012-06-27 10:12:04 -0700947 idle_notifier_register(&cpufreq_interactive_idle_nb);
Todd Poynor0e58da22012-12-11 16:05:03 -0800948 cpufreq_register_notifier(
949 &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
Mike Chanef969692010-06-22 11:26:45 -0700950 break;
951
952 case CPUFREQ_GOV_STOP:
953 for_each_cpu(j, policy->cpus) {
954 pcpu = &per_cpu(cpuinfo, j);
Todd Poynor5cad6092012-12-18 17:50:44 -0800955 down_write(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700956 pcpu->governor_enabled = 0;
Mike Chanef969692010-06-22 11:26:45 -0700957 del_timer_sync(&pcpu->cpu_timer);
Todd Poynor4add2592012-12-18 17:50:10 -0800958 del_timer_sync(&pcpu->cpu_slack_timer);
Todd Poynor5cad6092012-12-18 17:50:44 -0800959 up_write(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700960 }
961
Mike Chanef969692010-06-22 11:26:45 -0700962 if (atomic_dec_return(&active_count) > 0)
963 return 0;
964
Todd Poynor0e58da22012-12-11 16:05:03 -0800965 cpufreq_unregister_notifier(
966 &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
Sam Leffler3ab7c2b2012-06-27 10:12:04 -0700967 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
Mike Chanef969692010-06-22 11:26:45 -0700968 sysfs_remove_group(cpufreq_global_kobject,
969 &interactive_attr_group);
970
971 break;
972
973 case CPUFREQ_GOV_LIMITS:
974 if (policy->max < policy->cur)
975 __cpufreq_driver_target(policy,
976 policy->max, CPUFREQ_RELATION_H);
977 else if (policy->min > policy->cur)
978 __cpufreq_driver_target(policy,
979 policy->min, CPUFREQ_RELATION_L);
980 break;
981 }
982 return 0;
983}
984
Todd Poynor4add2592012-12-18 17:50:10 -0800985static void cpufreq_interactive_nop_timer(unsigned long data)
986{
987}
988
Mike Chanef969692010-06-22 11:26:45 -0700989static int __init cpufreq_interactive_init(void)
990{
991 unsigned int i;
992 struct cpufreq_interactive_cpuinfo *pcpu;
993 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
994
995 go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
996 min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
Todd Poynorcbbe17d2012-04-13 20:18:02 -0700997 above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
Mike Chanef969692010-06-22 11:26:45 -0700998 timer_rate = DEFAULT_TIMER_RATE;
999
1000 /* Initalize per-cpu timers */
1001 for_each_possible_cpu(i) {
1002 pcpu = &per_cpu(cpuinfo, i);
Todd Poynor4add2592012-12-18 17:50:10 -08001003 init_timer_deferrable(&pcpu->cpu_timer);
Mike Chanef969692010-06-22 11:26:45 -07001004 pcpu->cpu_timer.function = cpufreq_interactive_timer;
1005 pcpu->cpu_timer.data = i;
Todd Poynor4add2592012-12-18 17:50:10 -08001006 init_timer(&pcpu->cpu_slack_timer);
1007 pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
Todd Poynor0e58da22012-12-11 16:05:03 -08001008 spin_lock_init(&pcpu->load_lock);
Todd Poynor5cad6092012-12-18 17:50:44 -08001009 init_rwsem(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -07001010 }
1011
Todd Poynore9c60742012-11-14 11:41:21 -08001012 spin_lock_init(&target_loads_lock);
Todd Poynor0f1920b2012-07-16 17:07:15 -07001013 spin_lock_init(&speedchange_cpumask_lock);
1014 speedchange_task =
1015 kthread_create(cpufreq_interactive_speedchange_task, NULL,
1016 "cfinteractive");
1017 if (IS_ERR(speedchange_task))
1018 return PTR_ERR(speedchange_task);
Sam Leffler5c9b8272012-06-27 12:55:56 -07001019
Todd Poynor0f1920b2012-07-16 17:07:15 -07001020 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
1021 get_task_struct(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -07001022
Sam Leffler5c9b8272012-06-27 12:55:56 -07001023 /* NB: wake up so the thread does not look hung to the freezer */
Todd Poynor0f1920b2012-07-16 17:07:15 -07001024 wake_up_process(speedchange_task);
Sam Leffler5c9b8272012-06-27 12:55:56 -07001025
Mike Chanef969692010-06-22 11:26:45 -07001026 return cpufreq_register_governor(&cpufreq_gov_interactive);
Mike Chanef969692010-06-22 11:26:45 -07001027}
1028
1029#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1030fs_initcall(cpufreq_interactive_init);
1031#else
1032module_init(cpufreq_interactive_init);
1033#endif
1034
1035static void __exit cpufreq_interactive_exit(void)
1036{
1037 cpufreq_unregister_governor(&cpufreq_gov_interactive);
Todd Poynor0f1920b2012-07-16 17:07:15 -07001038 kthread_stop(speedchange_task);
1039 put_task_struct(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -07001040}
1041
1042module_exit(cpufreq_interactive_exit);
1043
1044MODULE_AUTHOR("Mike Chan <mike@android.com>");
1045MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1046 "Latency sensitive workloads");
1047MODULE_LICENSE("GPL");