blob: 7ad6c003ac084a7f124c18ef4e2cb7d18c7299ac [file] [log] [blame]
Mike Chan9d49b702010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/module.h>
Lianwei Wangba6c6bb2012-11-01 09:59:52 +080023#include <linux/moduleparam.h>
Todd Poynor5b63a2e2012-12-18 17:50:44 -080024#include <linux/rwsem.h>
Mike Chan9d49b702010-06-22 11:26:45 -070025#include <linux/sched.h>
26#include <linux/tick.h>
27#include <linux/time.h>
28#include <linux/timer.h>
29#include <linux/workqueue.h>
30#include <linux/kthread.h>
Todd Poynor7820a652012-04-02 17:17:14 -070031#include <linux/slab.h>
Todd Poynor9fb15312012-04-23 20:42:41 -070032#include <asm/cputime.h>
Mike Chan9d49b702010-06-22 11:26:45 -070033
Todd Poynora1e19512012-02-16 16:27:59 -080034#define CREATE_TRACE_POINTS
35#include <trace/events/cpufreq_interactive.h>
36
Mike Chan9d49b702010-06-22 11:26:45 -070037static atomic_t active_count = ATOMIC_INIT(0);
38
39struct cpufreq_interactive_cpuinfo {
40 struct timer_list cpu_timer;
Todd Poynorcba9f3e2012-12-18 17:50:10 -080041 struct timer_list cpu_slack_timer;
Todd Poynor5a75e9d2012-12-11 16:05:03 -080042 spinlock_t load_lock; /* protects the next 4 fields */
Mike Chan9d49b702010-06-22 11:26:45 -070043 u64 time_in_idle;
Todd Poynor22b5c3a2012-10-08 20:14:34 -070044 u64 time_in_idle_timestamp;
Todd Poynor5a75e9d2012-12-11 16:05:03 -080045 u64 cputime_speedadj;
46 u64 cputime_speedadj_timestamp;
Mike Chan9d49b702010-06-22 11:26:45 -070047 struct cpufreq_policy *policy;
48 struct cpufreq_frequency_table *freq_table;
49 unsigned int target_freq;
Todd Poynoraad27322012-04-26 21:41:40 -070050 unsigned int floor_freq;
51 u64 floor_validate_time;
Todd Poynor5a5aa702012-05-10 23:28:06 -070052 u64 hispeed_validate_time;
Todd Poynor5b63a2e2012-12-18 17:50:44 -080053 struct rw_semaphore enable_sem;
Mike Chan9d49b702010-06-22 11:26:45 -070054 int governor_enabled;
55};
56
57static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
58
Todd Poynor8a37bb72012-07-16 17:07:15 -070059/* realtime thread handles frequency scaling */
60static struct task_struct *speedchange_task;
61static cpumask_t speedchange_cpumask;
62static spinlock_t speedchange_cpumask_lock;
Mike Chan9d49b702010-06-22 11:26:45 -070063
64/* Hi speed to bump to from lo speed when load burst (default max) */
Todd Poynoracfaec92012-10-03 00:39:56 -070065static unsigned int hispeed_freq;
Mike Chan9d49b702010-06-22 11:26:45 -070066
67/* Go to hi speed when CPU load at or above this value. */
Todd Poynor8b2ace62012-12-21 15:13:01 -080068#define DEFAULT_GO_HISPEED_LOAD 99
Mike Chan9d49b702010-06-22 11:26:45 -070069static unsigned long go_hispeed_load;
70
Todd Poynorbc51d672012-11-28 17:58:17 -080071/* Target load. Lower values result in higher CPU speeds. */
72#define DEFAULT_TARGET_LOAD 90
Todd Poynor2fbf5e12012-11-14 11:41:21 -080073static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
74static spinlock_t target_loads_lock;
75static unsigned int *target_loads = default_target_loads;
76static int ntarget_loads = ARRAY_SIZE(default_target_loads);
Todd Poynorbc51d672012-11-28 17:58:17 -080077
Mike Chan9d49b702010-06-22 11:26:45 -070078/*
79 * The minimum amount of time to spend at a frequency before we can ramp down.
80 */
Todd Poynora0ec4362012-04-17 17:39:34 -070081#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
Mike Chan9d49b702010-06-22 11:26:45 -070082static unsigned long min_sample_time;
83
84/*
85 * The sample rate of the timer used to increase frequency
86 */
Todd Poynora0ec4362012-04-17 17:39:34 -070087#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
Mike Chan9d49b702010-06-22 11:26:45 -070088static unsigned long timer_rate;
89
Todd Poynor596cf1f2012-04-13 20:18:02 -070090/*
91 * Wait this long before raising speed above hispeed, by default a single
92 * timer interval.
93 */
94#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
95static unsigned long above_hispeed_delay_val;
96
Todd Poynorf437e182012-12-14 17:31:19 -080097/* Non-zero means indefinite speed boost active */
Todd Poynor9fb15312012-04-23 20:42:41 -070098static int boost_val;
Todd Poynorf437e182012-12-14 17:31:19 -080099/* Duration of a boot pulse in usecs */
100static int boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
101/* End time of boost pulse in ktime converted to usecs */
102static u64 boostpulse_endtime;
Todd Poynor9fb15312012-04-23 20:42:41 -0700103
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800104/*
105 * Max additional time to wait in idle, beyond timer_rate, at speeds above
106 * minimum before wakeup to reduce speed, or -1 if unnecessary.
107 */
108#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
109static int timer_slack_val = DEFAULT_TIMER_SLACK;
Lianwei Wangba6c6bb2012-11-01 09:59:52 +0800110
Mike Chan9d49b702010-06-22 11:26:45 -0700111static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
112 unsigned int event);
113
114#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
115static
116#endif
117struct cpufreq_governor cpufreq_gov_interactive = {
118 .name = "interactive",
119 .governor = cpufreq_governor_interactive,
120 .max_transition_latency = 10000000,
121 .owner = THIS_MODULE,
122};
123
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700124static void cpufreq_interactive_timer_resched(
125 struct cpufreq_interactive_cpuinfo *pcpu)
126{
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800127 unsigned long expires = jiffies + usecs_to_jiffies(timer_rate);
128
129 mod_timer_pinned(&pcpu->cpu_timer, expires);
130 if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
131 expires += usecs_to_jiffies(timer_slack_val);
132 mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
133 }
134
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800135 spin_lock(&pcpu->load_lock);
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700136 pcpu->time_in_idle =
137 get_cpu_idle_time_us(smp_processor_id(),
138 &pcpu->time_in_idle_timestamp);
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800139 pcpu->cputime_speedadj = 0;
140 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
141 spin_unlock(&pcpu->load_lock);
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700142}
143
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800144static unsigned int freq_to_targetload(unsigned int freq)
145{
146 int i;
147 unsigned int ret;
148
149 spin_lock(&target_loads_lock);
150
151 for (i = 0; i < ntarget_loads - 1 && freq >= target_loads[i+1]; i += 2)
152 ;
153
154 ret = target_loads[i];
155 spin_unlock(&target_loads_lock);
156 return ret;
157}
158
159/*
160 * If increasing frequencies never map to a lower target load then
161 * choose_freq() will find the minimum frequency that does not exceed its
162 * target load given the current load.
163 */
164
165static unsigned int choose_freq(
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800166 struct cpufreq_interactive_cpuinfo *pcpu, unsigned int loadadjfreq)
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800167{
168 unsigned int freq = pcpu->policy->cur;
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800169 unsigned int prevfreq, freqmin, freqmax;
170 unsigned int tl;
171 int index;
172
173 freqmin = 0;
174 freqmax = UINT_MAX;
175
176 do {
177 prevfreq = freq;
178 tl = freq_to_targetload(freq);
179
180 /*
181 * Find the lowest frequency where the computed load is less
182 * than or equal to the target load.
183 */
184
185 cpufreq_frequency_table_target(
186 pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
187 CPUFREQ_RELATION_L, &index);
188 freq = pcpu->freq_table[index].frequency;
189
190 if (freq > prevfreq) {
191 /* The previous frequency is too low. */
192 freqmin = prevfreq;
193
194 if (freq >= freqmax) {
195 /*
196 * Find the highest frequency that is less
197 * than freqmax.
198 */
199 cpufreq_frequency_table_target(
200 pcpu->policy, pcpu->freq_table,
201 freqmax - 1, CPUFREQ_RELATION_H,
202 &index);
203 freq = pcpu->freq_table[index].frequency;
204
205 if (freq == freqmin) {
206 /*
207 * The first frequency below freqmax
208 * has already been found to be too
209 * low. freqmax is the lowest speed
210 * we found that is fast enough.
211 */
212 freq = freqmax;
213 break;
214 }
215 }
216 } else if (freq < prevfreq) {
217 /* The previous frequency is high enough. */
218 freqmax = prevfreq;
219
220 if (freq <= freqmin) {
221 /*
222 * Find the lowest frequency that is higher
223 * than freqmin.
224 */
225 cpufreq_frequency_table_target(
226 pcpu->policy, pcpu->freq_table,
227 freqmin + 1, CPUFREQ_RELATION_L,
228 &index);
229 freq = pcpu->freq_table[index].frequency;
230
231 /*
232 * If freqmax is the first frequency above
233 * freqmin then we have already found that
234 * this speed is fast enough.
235 */
236 if (freq == freqmax)
237 break;
238 }
239 }
240
241 /* If same frequency chosen as previous then done. */
242 } while (freq != prevfreq);
243
244 return freq;
245}
246
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800247static u64 update_load(int cpu)
248{
249 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
250 u64 now;
251 u64 now_idle;
252 unsigned int delta_idle;
253 unsigned int delta_time;
254 u64 active_time;
255
256 now_idle = get_cpu_idle_time_us(cpu, &now);
257 delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
258 delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
259 active_time = delta_time - delta_idle;
260 pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
261
262 pcpu->time_in_idle = now_idle;
263 pcpu->time_in_idle_timestamp = now;
264 return now;
265}
266
Mike Chan9d49b702010-06-22 11:26:45 -0700267static void cpufreq_interactive_timer(unsigned long data)
268{
Todd Poynor7aa95c82012-11-05 13:09:03 -0800269 u64 now;
Mike Chan9d49b702010-06-22 11:26:45 -0700270 unsigned int delta_time;
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800271 u64 cputime_speedadj;
Mike Chan9d49b702010-06-22 11:26:45 -0700272 int cpu_load;
Mike Chan9d49b702010-06-22 11:26:45 -0700273 struct cpufreq_interactive_cpuinfo *pcpu =
274 &per_cpu(cpuinfo, data);
Mike Chan9d49b702010-06-22 11:26:45 -0700275 unsigned int new_freq;
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800276 unsigned int loadadjfreq;
Mike Chan9d49b702010-06-22 11:26:45 -0700277 unsigned int index;
278 unsigned long flags;
Todd Poynorf437e182012-12-14 17:31:19 -0800279 bool boosted;
Mike Chan9d49b702010-06-22 11:26:45 -0700280
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800281 if (!down_read_trylock(&pcpu->enable_sem))
282 return;
Mike Chan9d49b702010-06-22 11:26:45 -0700283 if (!pcpu->governor_enabled)
284 goto exit;
285
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800286 spin_lock(&pcpu->load_lock);
287 now = update_load(data);
288 delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
289 cputime_speedadj = pcpu->cputime_speedadj;
290 spin_unlock(&pcpu->load_lock);
Mike Chan9d49b702010-06-22 11:26:45 -0700291
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800292 if (WARN_ON_ONCE(!delta_time))
Mike Chan9d49b702010-06-22 11:26:45 -0700293 goto rearm;
294
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800295 do_div(cputime_speedadj, delta_time);
296 loadadjfreq = (unsigned int)cputime_speedadj * 100;
297 cpu_load = loadadjfreq / pcpu->target_freq;
Todd Poynorf437e182012-12-14 17:31:19 -0800298 boosted = boost_val || now < boostpulse_endtime;
Mike Chan9d49b702010-06-22 11:26:45 -0700299
Todd Poynor16dfc9d2012-12-19 16:06:48 -0800300 if (cpu_load >= go_hispeed_load || boosted) {
301 if (pcpu->target_freq < hispeed_freq) {
302 new_freq = hispeed_freq;
303 } else {
304 new_freq = choose_freq(pcpu, loadadjfreq);
305
306 if (new_freq < hispeed_freq)
307 new_freq = hispeed_freq;
308 }
309 } else {
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800310 new_freq = choose_freq(pcpu, loadadjfreq);
Todd Poynor16dfc9d2012-12-19 16:06:48 -0800311 }
Todd Poynorf96f2c82012-11-08 15:06:55 -0800312
313 if (pcpu->target_freq >= hispeed_freq &&
314 new_freq > pcpu->target_freq &&
315 now - pcpu->hispeed_validate_time < above_hispeed_delay_val) {
316 trace_cpufreq_interactive_notyet(
317 data, cpu_load, pcpu->target_freq,
318 pcpu->policy->cur, new_freq);
319 goto rearm;
Mike Chan9d49b702010-06-22 11:26:45 -0700320 }
321
Todd Poynorf96f2c82012-11-08 15:06:55 -0800322 pcpu->hispeed_validate_time = now;
Todd Poynor5a5aa702012-05-10 23:28:06 -0700323
Mike Chan9d49b702010-06-22 11:26:45 -0700324 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
Todd Poynorbc51d672012-11-28 17:58:17 -0800325 new_freq, CPUFREQ_RELATION_L,
Mike Chan9d49b702010-06-22 11:26:45 -0700326 &index)) {
327 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
328 (int) data);
329 goto rearm;
330 }
331
332 new_freq = pcpu->freq_table[index].frequency;
333
Mike Chan9d49b702010-06-22 11:26:45 -0700334 /*
Todd Poynoraad27322012-04-26 21:41:40 -0700335 * Do not scale below floor_freq unless we have been at or above the
336 * floor frequency for the minimum sample time since last validated.
Mike Chan9d49b702010-06-22 11:26:45 -0700337 */
Todd Poynoraad27322012-04-26 21:41:40 -0700338 if (new_freq < pcpu->floor_freq) {
Todd Poynor7aa95c82012-11-05 13:09:03 -0800339 if (now - pcpu->floor_validate_time < min_sample_time) {
Todd Poynor46660b02012-11-28 17:56:09 -0800340 trace_cpufreq_interactive_notyet(
341 data, cpu_load, pcpu->target_freq,
342 pcpu->policy->cur, new_freq);
Mike Chan9d49b702010-06-22 11:26:45 -0700343 goto rearm;
Todd Poynora1e19512012-02-16 16:27:59 -0800344 }
Mike Chan9d49b702010-06-22 11:26:45 -0700345 }
346
Todd Poynorf437e182012-12-14 17:31:19 -0800347 /*
348 * Update the timestamp for checking whether speed has been held at
349 * or above the selected frequency for a minimum of min_sample_time,
350 * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
351 * allow the speed to drop as soon as the boostpulse duration expires
352 * (or the indefinite boost is turned off).
353 */
354
355 if (!boosted || new_freq > hispeed_freq) {
356 pcpu->floor_freq = new_freq;
357 pcpu->floor_validate_time = now;
358 }
Todd Poynor0a92d482012-04-06 19:59:36 -0700359
360 if (pcpu->target_freq == new_freq) {
Todd Poynor46660b02012-11-28 17:56:09 -0800361 trace_cpufreq_interactive_already(
362 data, cpu_load, pcpu->target_freq,
363 pcpu->policy->cur, new_freq);
Todd Poynor0a92d482012-04-06 19:59:36 -0700364 goto rearm_if_notmax;
365 }
366
Todd Poynora1e19512012-02-16 16:27:59 -0800367 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
Todd Poynor46660b02012-11-28 17:56:09 -0800368 pcpu->policy->cur, new_freq);
Todd Poynora1e19512012-02-16 16:27:59 -0800369
Todd Poynor8a37bb72012-07-16 17:07:15 -0700370 pcpu->target_freq = new_freq;
371 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
372 cpumask_set_cpu(data, &speedchange_cpumask);
373 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
374 wake_up_process(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -0700375
376rearm_if_notmax:
377 /*
378 * Already set max speed and don't see a need to change that,
379 * wait until next idle to re-evaluate, don't need timer.
380 */
381 if (pcpu->target_freq == pcpu->policy->max)
382 goto exit;
383
384rearm:
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800385 if (!timer_pending(&pcpu->cpu_timer))
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700386 cpufreq_interactive_timer_resched(pcpu);
Mike Chan9d49b702010-06-22 11:26:45 -0700387
388exit:
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800389 up_read(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700390 return;
391}
392
393static void cpufreq_interactive_idle_start(void)
394{
395 struct cpufreq_interactive_cpuinfo *pcpu =
396 &per_cpu(cpuinfo, smp_processor_id());
397 int pending;
398
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800399 if (!down_read_trylock(&pcpu->enable_sem))
Mike Chan9d49b702010-06-22 11:26:45 -0700400 return;
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800401 if (!pcpu->governor_enabled) {
402 up_read(&pcpu->enable_sem);
403 return;
404 }
Mike Chan9d49b702010-06-22 11:26:45 -0700405
Mike Chan9d49b702010-06-22 11:26:45 -0700406 pending = timer_pending(&pcpu->cpu_timer);
407
408 if (pcpu->target_freq != pcpu->policy->min) {
Mike Chan9d49b702010-06-22 11:26:45 -0700409 /*
410 * Entering idle while not at lowest speed. On some
411 * platforms this can hold the other CPU(s) at that speed
412 * even though the CPU is idle. Set a timer to re-evaluate
413 * speed so this idle CPU doesn't hold the other CPUs above
414 * min indefinitely. This should probably be a quirk of
415 * the CPUFreq driver.
416 */
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800417 if (!pending)
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700418 cpufreq_interactive_timer_resched(pcpu);
Mike Chan9d49b702010-06-22 11:26:45 -0700419 }
420
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800421 up_read(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700422}
423
424static void cpufreq_interactive_idle_end(void)
425{
426 struct cpufreq_interactive_cpuinfo *pcpu =
427 &per_cpu(cpuinfo, smp_processor_id());
428
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800429 if (!down_read_trylock(&pcpu->enable_sem))
Sam Lefflerae0d23f2012-06-27 10:12:04 -0700430 return;
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800431 if (!pcpu->governor_enabled) {
432 up_read(&pcpu->enable_sem);
433 return;
434 }
Sam Lefflerae0d23f2012-06-27 10:12:04 -0700435
Todd Poynor7aa95c82012-11-05 13:09:03 -0800436 /* Arm the timer for 1-2 ticks later if not already. */
437 if (!timer_pending(&pcpu->cpu_timer)) {
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700438 cpufreq_interactive_timer_resched(pcpu);
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800439 } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700440 del_timer(&pcpu->cpu_timer);
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800441 del_timer(&pcpu->cpu_slack_timer);
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700442 cpufreq_interactive_timer(smp_processor_id());
Mike Chan9d49b702010-06-22 11:26:45 -0700443 }
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800444
445 up_read(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700446}
447
Todd Poynor8a37bb72012-07-16 17:07:15 -0700448static int cpufreq_interactive_speedchange_task(void *data)
Mike Chan9d49b702010-06-22 11:26:45 -0700449{
450 unsigned int cpu;
451 cpumask_t tmp_mask;
452 unsigned long flags;
453 struct cpufreq_interactive_cpuinfo *pcpu;
454
455 while (1) {
456 set_current_state(TASK_INTERRUPTIBLE);
Todd Poynor8a37bb72012-07-16 17:07:15 -0700457 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700458
Todd Poynor8a37bb72012-07-16 17:07:15 -0700459 if (cpumask_empty(&speedchange_cpumask)) {
460 spin_unlock_irqrestore(&speedchange_cpumask_lock,
461 flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700462 schedule();
463
464 if (kthread_should_stop())
465 break;
466
Todd Poynor8a37bb72012-07-16 17:07:15 -0700467 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700468 }
469
470 set_current_state(TASK_RUNNING);
Todd Poynor8a37bb72012-07-16 17:07:15 -0700471 tmp_mask = speedchange_cpumask;
472 cpumask_clear(&speedchange_cpumask);
473 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700474
475 for_each_cpu(cpu, &tmp_mask) {
476 unsigned int j;
477 unsigned int max_freq = 0;
478
479 pcpu = &per_cpu(cpuinfo, cpu);
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800480 if (!down_read_trylock(&pcpu->enable_sem))
Mike Chan9d49b702010-06-22 11:26:45 -0700481 continue;
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800482 if (!pcpu->governor_enabled) {
483 up_read(&pcpu->enable_sem);
484 continue;
485 }
Mike Chan9d49b702010-06-22 11:26:45 -0700486
Mike Chan9d49b702010-06-22 11:26:45 -0700487 for_each_cpu(j, pcpu->policy->cpus) {
488 struct cpufreq_interactive_cpuinfo *pjcpu =
489 &per_cpu(cpuinfo, j);
490
491 if (pjcpu->target_freq > max_freq)
492 max_freq = pjcpu->target_freq;
493 }
494
495 if (max_freq != pcpu->policy->cur)
496 __cpufreq_driver_target(pcpu->policy,
497 max_freq,
498 CPUFREQ_RELATION_H);
Todd Poynor8a37bb72012-07-16 17:07:15 -0700499 trace_cpufreq_interactive_setspeed(cpu,
500 pcpu->target_freq,
Todd Poynora1e19512012-02-16 16:27:59 -0800501 pcpu->policy->cur);
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800502
503 up_read(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700504 }
505 }
506
507 return 0;
508}
509
Todd Poynor7820a652012-04-02 17:17:14 -0700510static void cpufreq_interactive_boost(void)
511{
512 int i;
513 int anyboost = 0;
514 unsigned long flags;
515 struct cpufreq_interactive_cpuinfo *pcpu;
516
Todd Poynor8a37bb72012-07-16 17:07:15 -0700517 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Todd Poynor7820a652012-04-02 17:17:14 -0700518
519 for_each_online_cpu(i) {
520 pcpu = &per_cpu(cpuinfo, i);
521
522 if (pcpu->target_freq < hispeed_freq) {
523 pcpu->target_freq = hispeed_freq;
Todd Poynor8a37bb72012-07-16 17:07:15 -0700524 cpumask_set_cpu(i, &speedchange_cpumask);
Todd Poynor3c081182012-12-07 20:08:45 -0800525 pcpu->hispeed_validate_time =
526 ktime_to_us(ktime_get());
Todd Poynor7820a652012-04-02 17:17:14 -0700527 anyboost = 1;
528 }
529
530 /*
Todd Poynoraad27322012-04-26 21:41:40 -0700531 * Set floor freq and (re)start timer for when last
532 * validated.
Todd Poynor7820a652012-04-02 17:17:14 -0700533 */
534
Todd Poynoraad27322012-04-26 21:41:40 -0700535 pcpu->floor_freq = hispeed_freq;
536 pcpu->floor_validate_time = ktime_to_us(ktime_get());
Todd Poynor7820a652012-04-02 17:17:14 -0700537 }
538
Todd Poynor8a37bb72012-07-16 17:07:15 -0700539 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Todd Poynor7820a652012-04-02 17:17:14 -0700540
541 if (anyboost)
Todd Poynor8a37bb72012-07-16 17:07:15 -0700542 wake_up_process(speedchange_task);
Todd Poynor7820a652012-04-02 17:17:14 -0700543}
544
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800545static int cpufreq_interactive_notifier(
546 struct notifier_block *nb, unsigned long val, void *data)
547{
548 struct cpufreq_freqs *freq = data;
549 struct cpufreq_interactive_cpuinfo *pcpu;
550 int cpu;
551
552 if (val == CPUFREQ_POSTCHANGE) {
553 pcpu = &per_cpu(cpuinfo, freq->cpu);
554
555 for_each_cpu(cpu, pcpu->policy->cpus) {
556 struct cpufreq_interactive_cpuinfo *pjcpu =
557 &per_cpu(cpuinfo, cpu);
558 spin_lock(&pjcpu->load_lock);
559 update_load(cpu);
560 spin_unlock(&pjcpu->load_lock);
561 }
562 }
563
564 return 0;
565}
566
567static struct notifier_block cpufreq_notifier_block = {
568 .notifier_call = cpufreq_interactive_notifier,
569};
570
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800571static ssize_t show_target_loads(
Todd Poynorbc51d672012-11-28 17:58:17 -0800572 struct kobject *kobj, struct attribute *attr, char *buf)
573{
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800574 int i;
575 ssize_t ret = 0;
576
577 spin_lock(&target_loads_lock);
578
579 for (i = 0; i < ntarget_loads; i++)
580 ret += sprintf(buf + ret, "%u%s", target_loads[i],
581 i & 0x1 ? ":" : " ");
582
583 ret += sprintf(buf + ret, "\n");
584 spin_unlock(&target_loads_lock);
585 return ret;
Todd Poynorbc51d672012-11-28 17:58:17 -0800586}
587
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800588static ssize_t store_target_loads(
Todd Poynorbc51d672012-11-28 17:58:17 -0800589 struct kobject *kobj, struct attribute *attr, const char *buf,
590 size_t count)
591{
592 int ret;
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800593 const char *cp;
594 unsigned int *new_target_loads = NULL;
595 int ntokens = 1;
596 int i;
Todd Poynorbc51d672012-11-28 17:58:17 -0800597
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800598 cp = buf;
599 while ((cp = strpbrk(cp + 1, " :")))
600 ntokens++;
601
602 if (!(ntokens & 0x1))
603 goto err_inval;
604
605 new_target_loads = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
606 if (!new_target_loads) {
607 ret = -ENOMEM;
608 goto err;
609 }
610
611 cp = buf;
612 i = 0;
613 while (i < ntokens) {
614 if (sscanf(cp, "%u", &new_target_loads[i++]) != 1)
615 goto err_inval;
616
617 cp = strpbrk(cp, " :");
618 if (!cp)
619 break;
620 cp++;
621 }
622
623 if (i != ntokens)
624 goto err_inval;
625
626 spin_lock(&target_loads_lock);
627 if (target_loads != default_target_loads)
628 kfree(target_loads);
629 target_loads = new_target_loads;
630 ntarget_loads = ntokens;
631 spin_unlock(&target_loads_lock);
Todd Poynorbc51d672012-11-28 17:58:17 -0800632 return count;
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800633
634err_inval:
635 ret = -EINVAL;
636err:
637 kfree(new_target_loads);
638 return ret;
Todd Poynorbc51d672012-11-28 17:58:17 -0800639}
640
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800641static struct global_attr target_loads_attr =
642 __ATTR(target_loads, S_IRUGO | S_IWUSR,
643 show_target_loads, store_target_loads);
Todd Poynorbc51d672012-11-28 17:58:17 -0800644
Mike Chan9d49b702010-06-22 11:26:45 -0700645static ssize_t show_hispeed_freq(struct kobject *kobj,
646 struct attribute *attr, char *buf)
647{
Todd Poynoracfaec92012-10-03 00:39:56 -0700648 return sprintf(buf, "%u\n", hispeed_freq);
Mike Chan9d49b702010-06-22 11:26:45 -0700649}
650
651static ssize_t store_hispeed_freq(struct kobject *kobj,
652 struct attribute *attr, const char *buf,
653 size_t count)
654{
655 int ret;
Todd Poynoracfaec92012-10-03 00:39:56 -0700656 long unsigned int val;
Mike Chan9d49b702010-06-22 11:26:45 -0700657
Todd Poynoracfaec92012-10-03 00:39:56 -0700658 ret = strict_strtoul(buf, 0, &val);
Mike Chan9d49b702010-06-22 11:26:45 -0700659 if (ret < 0)
660 return ret;
661 hispeed_freq = val;
662 return count;
663}
664
665static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
666 show_hispeed_freq, store_hispeed_freq);
667
668
669static ssize_t show_go_hispeed_load(struct kobject *kobj,
670 struct attribute *attr, char *buf)
671{
672 return sprintf(buf, "%lu\n", go_hispeed_load);
673}
674
675static ssize_t store_go_hispeed_load(struct kobject *kobj,
676 struct attribute *attr, const char *buf, size_t count)
677{
678 int ret;
679 unsigned long val;
680
681 ret = strict_strtoul(buf, 0, &val);
682 if (ret < 0)
683 return ret;
684 go_hispeed_load = val;
685 return count;
686}
687
688static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
689 show_go_hispeed_load, store_go_hispeed_load);
690
691static ssize_t show_min_sample_time(struct kobject *kobj,
692 struct attribute *attr, char *buf)
693{
694 return sprintf(buf, "%lu\n", min_sample_time);
695}
696
697static ssize_t store_min_sample_time(struct kobject *kobj,
698 struct attribute *attr, const char *buf, size_t count)
699{
700 int ret;
701 unsigned long val;
702
703 ret = strict_strtoul(buf, 0, &val);
704 if (ret < 0)
705 return ret;
706 min_sample_time = val;
707 return count;
708}
709
710static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
711 show_min_sample_time, store_min_sample_time);
712
Todd Poynor596cf1f2012-04-13 20:18:02 -0700713static ssize_t show_above_hispeed_delay(struct kobject *kobj,
714 struct attribute *attr, char *buf)
715{
716 return sprintf(buf, "%lu\n", above_hispeed_delay_val);
717}
718
719static ssize_t store_above_hispeed_delay(struct kobject *kobj,
720 struct attribute *attr,
721 const char *buf, size_t count)
722{
723 int ret;
724 unsigned long val;
725
726 ret = strict_strtoul(buf, 0, &val);
727 if (ret < 0)
728 return ret;
729 above_hispeed_delay_val = val;
730 return count;
731}
732
733define_one_global_rw(above_hispeed_delay);
734
Mike Chan9d49b702010-06-22 11:26:45 -0700735static ssize_t show_timer_rate(struct kobject *kobj,
736 struct attribute *attr, char *buf)
737{
738 return sprintf(buf, "%lu\n", timer_rate);
739}
740
741static ssize_t store_timer_rate(struct kobject *kobj,
742 struct attribute *attr, const char *buf, size_t count)
743{
744 int ret;
745 unsigned long val;
746
747 ret = strict_strtoul(buf, 0, &val);
748 if (ret < 0)
749 return ret;
750 timer_rate = val;
751 return count;
752}
753
754static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
755 show_timer_rate, store_timer_rate);
756
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800757static ssize_t show_timer_slack(
758 struct kobject *kobj, struct attribute *attr, char *buf)
759{
760 return sprintf(buf, "%d\n", timer_slack_val);
761}
762
763static ssize_t store_timer_slack(
764 struct kobject *kobj, struct attribute *attr, const char *buf,
765 size_t count)
766{
767 int ret;
768 unsigned long val;
769
770 ret = kstrtol(buf, 10, &val);
771 if (ret < 0)
772 return ret;
773
774 timer_slack_val = val;
775 return count;
776}
777
778define_one_global_rw(timer_slack);
779
Todd Poynor9fb15312012-04-23 20:42:41 -0700780static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
781 char *buf)
782{
783 return sprintf(buf, "%d\n", boost_val);
784}
785
786static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
787 const char *buf, size_t count)
788{
789 int ret;
790 unsigned long val;
791
792 ret = kstrtoul(buf, 0, &val);
793 if (ret < 0)
794 return ret;
795
796 boost_val = val;
797
Todd Poynor2e739a02012-05-03 00:16:55 -0700798 if (boost_val) {
799 trace_cpufreq_interactive_boost("on");
Todd Poynor9fb15312012-04-23 20:42:41 -0700800 cpufreq_interactive_boost();
Todd Poynor2e739a02012-05-03 00:16:55 -0700801 } else {
802 trace_cpufreq_interactive_unboost("off");
803 }
Todd Poynor9fb15312012-04-23 20:42:41 -0700804
805 return count;
806}
807
808define_one_global_rw(boost);
809
Todd Poynor2e739a02012-05-03 00:16:55 -0700810static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
811 const char *buf, size_t count)
812{
813 int ret;
814 unsigned long val;
815
816 ret = kstrtoul(buf, 0, &val);
817 if (ret < 0)
818 return ret;
819
Todd Poynorf437e182012-12-14 17:31:19 -0800820 boostpulse_endtime = ktime_to_us(ktime_get()) + boostpulse_duration_val;
Todd Poynor2e739a02012-05-03 00:16:55 -0700821 trace_cpufreq_interactive_boost("pulse");
822 cpufreq_interactive_boost();
823 return count;
824}
825
826static struct global_attr boostpulse =
827 __ATTR(boostpulse, 0200, NULL, store_boostpulse);
828
Todd Poynorf437e182012-12-14 17:31:19 -0800829static ssize_t show_boostpulse_duration(
830 struct kobject *kobj, struct attribute *attr, char *buf)
831{
832 return sprintf(buf, "%d\n", boostpulse_duration_val);
833}
834
835static ssize_t store_boostpulse_duration(
836 struct kobject *kobj, struct attribute *attr, const char *buf,
837 size_t count)
838{
839 int ret;
840 unsigned long val;
841
842 ret = kstrtoul(buf, 0, &val);
843 if (ret < 0)
844 return ret;
845
846 boostpulse_duration_val = val;
847 return count;
848}
849
850define_one_global_rw(boostpulse_duration);
851
Mike Chan9d49b702010-06-22 11:26:45 -0700852static struct attribute *interactive_attributes[] = {
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800853 &target_loads_attr.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700854 &hispeed_freq_attr.attr,
855 &go_hispeed_load_attr.attr,
Todd Poynor596cf1f2012-04-13 20:18:02 -0700856 &above_hispeed_delay.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700857 &min_sample_time_attr.attr,
858 &timer_rate_attr.attr,
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800859 &timer_slack.attr,
Todd Poynor9fb15312012-04-23 20:42:41 -0700860 &boost.attr,
Todd Poynor2e739a02012-05-03 00:16:55 -0700861 &boostpulse.attr,
Todd Poynorf437e182012-12-14 17:31:19 -0800862 &boostpulse_duration.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700863 NULL,
864};
865
866static struct attribute_group interactive_attr_group = {
867 .attrs = interactive_attributes,
868 .name = "interactive",
869};
870
Sam Lefflerae0d23f2012-06-27 10:12:04 -0700871static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
872 unsigned long val,
873 void *data)
874{
875 switch (val) {
876 case IDLE_START:
877 cpufreq_interactive_idle_start();
878 break;
879 case IDLE_END:
880 cpufreq_interactive_idle_end();
881 break;
882 }
883
884 return 0;
885}
886
887static struct notifier_block cpufreq_interactive_idle_nb = {
888 .notifier_call = cpufreq_interactive_idle_notifier,
889};
890
Mike Chan9d49b702010-06-22 11:26:45 -0700891static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
892 unsigned int event)
893{
894 int rc;
895 unsigned int j;
896 struct cpufreq_interactive_cpuinfo *pcpu;
897 struct cpufreq_frequency_table *freq_table;
898
899 switch (event) {
900 case CPUFREQ_GOV_START:
901 if (!cpu_online(policy->cpu))
902 return -EINVAL;
903
904 freq_table =
905 cpufreq_frequency_get_table(policy->cpu);
Todd Poynor7aa95c82012-11-05 13:09:03 -0800906 if (!hispeed_freq)
907 hispeed_freq = policy->max;
Mike Chan9d49b702010-06-22 11:26:45 -0700908
909 for_each_cpu(j, policy->cpus) {
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800910 unsigned long expires;
911
Mike Chan9d49b702010-06-22 11:26:45 -0700912 pcpu = &per_cpu(cpuinfo, j);
913 pcpu->policy = policy;
914 pcpu->target_freq = policy->cur;
915 pcpu->freq_table = freq_table;
Todd Poynoraad27322012-04-26 21:41:40 -0700916 pcpu->floor_freq = pcpu->target_freq;
917 pcpu->floor_validate_time =
Todd Poynor3c081182012-12-07 20:08:45 -0800918 ktime_to_us(ktime_get());
Todd Poynor5a5aa702012-05-10 23:28:06 -0700919 pcpu->hispeed_validate_time =
Todd Poynor3c081182012-12-07 20:08:45 -0800920 pcpu->floor_validate_time;
Todd Poynor7fd53c92012-12-20 15:51:00 -0800921 down_write(&pcpu->enable_sem);
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800922 expires = jiffies + usecs_to_jiffies(timer_rate);
923 pcpu->cpu_timer.expires = expires;
Todd Poynor7aa95c82012-11-05 13:09:03 -0800924 add_timer_on(&pcpu->cpu_timer, j);
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800925 if (timer_slack_val >= 0) {
926 expires += usecs_to_jiffies(timer_slack_val);
927 pcpu->cpu_slack_timer.expires = expires;
928 add_timer_on(&pcpu->cpu_slack_timer, j);
929 }
Todd Poynor7fd53c92012-12-20 15:51:00 -0800930 pcpu->governor_enabled = 1;
931 up_write(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700932 }
933
Mike Chan9d49b702010-06-22 11:26:45 -0700934 /*
935 * Do not register the idle hook and create sysfs
936 * entries if we have already done so.
937 */
938 if (atomic_inc_return(&active_count) > 1)
939 return 0;
940
941 rc = sysfs_create_group(cpufreq_global_kobject,
942 &interactive_attr_group);
943 if (rc)
944 return rc;
945
Sam Lefflerae0d23f2012-06-27 10:12:04 -0700946 idle_notifier_register(&cpufreq_interactive_idle_nb);
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800947 cpufreq_register_notifier(
948 &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
Mike Chan9d49b702010-06-22 11:26:45 -0700949 break;
950
951 case CPUFREQ_GOV_STOP:
952 for_each_cpu(j, policy->cpus) {
953 pcpu = &per_cpu(cpuinfo, j);
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800954 down_write(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700955 pcpu->governor_enabled = 0;
Mike Chan9d49b702010-06-22 11:26:45 -0700956 del_timer_sync(&pcpu->cpu_timer);
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800957 del_timer_sync(&pcpu->cpu_slack_timer);
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800958 up_write(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700959 }
960
Mike Chan9d49b702010-06-22 11:26:45 -0700961 if (atomic_dec_return(&active_count) > 0)
962 return 0;
963
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800964 cpufreq_unregister_notifier(
965 &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
Sam Lefflerae0d23f2012-06-27 10:12:04 -0700966 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
Mike Chan9d49b702010-06-22 11:26:45 -0700967 sysfs_remove_group(cpufreq_global_kobject,
968 &interactive_attr_group);
969
970 break;
971
972 case CPUFREQ_GOV_LIMITS:
973 if (policy->max < policy->cur)
974 __cpufreq_driver_target(policy,
975 policy->max, CPUFREQ_RELATION_H);
976 else if (policy->min > policy->cur)
977 __cpufreq_driver_target(policy,
978 policy->min, CPUFREQ_RELATION_L);
979 break;
980 }
981 return 0;
982}
983
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800984static void cpufreq_interactive_nop_timer(unsigned long data)
985{
986}
987
Mike Chan9d49b702010-06-22 11:26:45 -0700988static int __init cpufreq_interactive_init(void)
989{
990 unsigned int i;
991 struct cpufreq_interactive_cpuinfo *pcpu;
992 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
993
994 go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
995 min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
Todd Poynor596cf1f2012-04-13 20:18:02 -0700996 above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
Mike Chan9d49b702010-06-22 11:26:45 -0700997 timer_rate = DEFAULT_TIMER_RATE;
998
999 /* Initalize per-cpu timers */
1000 for_each_possible_cpu(i) {
1001 pcpu = &per_cpu(cpuinfo, i);
Todd Poynorcba9f3e2012-12-18 17:50:10 -08001002 init_timer_deferrable(&pcpu->cpu_timer);
Mike Chan9d49b702010-06-22 11:26:45 -07001003 pcpu->cpu_timer.function = cpufreq_interactive_timer;
1004 pcpu->cpu_timer.data = i;
Todd Poynorcba9f3e2012-12-18 17:50:10 -08001005 init_timer(&pcpu->cpu_slack_timer);
1006 pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
Todd Poynor5a75e9d2012-12-11 16:05:03 -08001007 spin_lock_init(&pcpu->load_lock);
Todd Poynor5b63a2e2012-12-18 17:50:44 -08001008 init_rwsem(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -07001009 }
1010
Todd Poynor2fbf5e12012-11-14 11:41:21 -08001011 spin_lock_init(&target_loads_lock);
Todd Poynor8a37bb72012-07-16 17:07:15 -07001012 spin_lock_init(&speedchange_cpumask_lock);
1013 speedchange_task =
1014 kthread_create(cpufreq_interactive_speedchange_task, NULL,
1015 "cfinteractive");
1016 if (IS_ERR(speedchange_task))
1017 return PTR_ERR(speedchange_task);
Sam Leffler9f1dcd62012-06-27 12:55:56 -07001018
Todd Poynor8a37bb72012-07-16 17:07:15 -07001019 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
1020 get_task_struct(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -07001021
Sam Leffler9f1dcd62012-06-27 12:55:56 -07001022 /* NB: wake up so the thread does not look hung to the freezer */
Todd Poynor8a37bb72012-07-16 17:07:15 -07001023 wake_up_process(speedchange_task);
Sam Leffler9f1dcd62012-06-27 12:55:56 -07001024
Mike Chan9d49b702010-06-22 11:26:45 -07001025 return cpufreq_register_governor(&cpufreq_gov_interactive);
Mike Chan9d49b702010-06-22 11:26:45 -07001026}
1027
1028#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1029fs_initcall(cpufreq_interactive_init);
1030#else
1031module_init(cpufreq_interactive_init);
1032#endif
1033
1034static void __exit cpufreq_interactive_exit(void)
1035{
1036 cpufreq_unregister_governor(&cpufreq_gov_interactive);
Todd Poynor8a37bb72012-07-16 17:07:15 -07001037 kthread_stop(speedchange_task);
1038 put_task_struct(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -07001039}
1040
1041module_exit(cpufreq_interactive_exit);
1042
1043MODULE_AUTHOR("Mike Chan <mike@android.com>");
1044MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1045 "Latency sensitive workloads");
1046MODULE_LICENSE("GPL");