blob: 286781ff9dd1cd4d3d887bd630a0fe968c2e5cb3 [file] [log] [blame]
Mike Chan9d49b702010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/module.h>
Lianwei Wangba6c6bb2012-11-01 09:59:52 +080023#include <linux/moduleparam.h>
Todd Poynor5b63a2e2012-12-18 17:50:44 -080024#include <linux/rwsem.h>
Mike Chan9d49b702010-06-22 11:26:45 -070025#include <linux/sched.h>
26#include <linux/tick.h>
27#include <linux/time.h>
28#include <linux/timer.h>
29#include <linux/workqueue.h>
30#include <linux/kthread.h>
Todd Poynor7820a652012-04-02 17:17:14 -070031#include <linux/slab.h>
Todd Poynor9fb15312012-04-23 20:42:41 -070032#include <asm/cputime.h>
Mike Chan9d49b702010-06-22 11:26:45 -070033
Todd Poynora1e19512012-02-16 16:27:59 -080034#define CREATE_TRACE_POINTS
35#include <trace/events/cpufreq_interactive.h>
36
Mike Chan9d49b702010-06-22 11:26:45 -070037static atomic_t active_count = ATOMIC_INIT(0);
38
39struct cpufreq_interactive_cpuinfo {
40 struct timer_list cpu_timer;
Todd Poynorcba9f3e2012-12-18 17:50:10 -080041 struct timer_list cpu_slack_timer;
Todd Poynor5a75e9d2012-12-11 16:05:03 -080042 spinlock_t load_lock; /* protects the next 4 fields */
Mike Chan9d49b702010-06-22 11:26:45 -070043 u64 time_in_idle;
Todd Poynor22b5c3a2012-10-08 20:14:34 -070044 u64 time_in_idle_timestamp;
Todd Poynor5a75e9d2012-12-11 16:05:03 -080045 u64 cputime_speedadj;
46 u64 cputime_speedadj_timestamp;
Mike Chan9d49b702010-06-22 11:26:45 -070047 struct cpufreq_policy *policy;
48 struct cpufreq_frequency_table *freq_table;
49 unsigned int target_freq;
Todd Poynoraad27322012-04-26 21:41:40 -070050 unsigned int floor_freq;
51 u64 floor_validate_time;
Todd Poynor5a5aa702012-05-10 23:28:06 -070052 u64 hispeed_validate_time;
Todd Poynor5b63a2e2012-12-18 17:50:44 -080053 struct rw_semaphore enable_sem;
Mike Chan9d49b702010-06-22 11:26:45 -070054 int governor_enabled;
55};
56
57static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
58
Todd Poynor8a37bb72012-07-16 17:07:15 -070059/* realtime thread handles frequency scaling */
60static struct task_struct *speedchange_task;
61static cpumask_t speedchange_cpumask;
62static spinlock_t speedchange_cpumask_lock;
Mike Chan9d49b702010-06-22 11:26:45 -070063
64/* Hi speed to bump to from lo speed when load burst (default max) */
Todd Poynoracfaec92012-10-03 00:39:56 -070065static unsigned int hispeed_freq;
Mike Chan9d49b702010-06-22 11:26:45 -070066
67/* Go to hi speed when CPU load at or above this value. */
Todd Poynor8b2ace62012-12-21 15:13:01 -080068#define DEFAULT_GO_HISPEED_LOAD 99
Todd Poynor2c8d73d2012-12-21 15:32:21 -080069static unsigned long go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
Mike Chan9d49b702010-06-22 11:26:45 -070070
Todd Poynorbc51d672012-11-28 17:58:17 -080071/* Target load. Lower values result in higher CPU speeds. */
72#define DEFAULT_TARGET_LOAD 90
Todd Poynor2fbf5e12012-11-14 11:41:21 -080073static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
74static spinlock_t target_loads_lock;
75static unsigned int *target_loads = default_target_loads;
76static int ntarget_loads = ARRAY_SIZE(default_target_loads);
Todd Poynorbc51d672012-11-28 17:58:17 -080077
Mike Chan9d49b702010-06-22 11:26:45 -070078/*
79 * The minimum amount of time to spend at a frequency before we can ramp down.
80 */
Todd Poynora0ec4362012-04-17 17:39:34 -070081#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
Todd Poynor2c8d73d2012-12-21 15:32:21 -080082static unsigned long min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
Mike Chan9d49b702010-06-22 11:26:45 -070083
84/*
85 * The sample rate of the timer used to increase frequency
86 */
Todd Poynora0ec4362012-04-17 17:39:34 -070087#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
Todd Poynor2c8d73d2012-12-21 15:32:21 -080088static unsigned long timer_rate = DEFAULT_TIMER_RATE;
Mike Chan9d49b702010-06-22 11:26:45 -070089
Todd Poynor596cf1f2012-04-13 20:18:02 -070090/*
91 * Wait this long before raising speed above hispeed, by default a single
92 * timer interval.
93 */
94#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
Todd Poynor2c8d73d2012-12-21 15:32:21 -080095static unsigned long above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
Todd Poynor596cf1f2012-04-13 20:18:02 -070096
Todd Poynorf437e182012-12-14 17:31:19 -080097/* Non-zero means indefinite speed boost active */
Todd Poynor9fb15312012-04-23 20:42:41 -070098static int boost_val;
Todd Poynorf437e182012-12-14 17:31:19 -080099/* Duration of a boot pulse in usecs */
100static int boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
101/* End time of boost pulse in ktime converted to usecs */
102static u64 boostpulse_endtime;
Todd Poynor9fb15312012-04-23 20:42:41 -0700103
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800104/*
105 * Max additional time to wait in idle, beyond timer_rate, at speeds above
106 * minimum before wakeup to reduce speed, or -1 if unnecessary.
107 */
108#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
109static int timer_slack_val = DEFAULT_TIMER_SLACK;
Lianwei Wangba6c6bb2012-11-01 09:59:52 +0800110
Mike Chan9d49b702010-06-22 11:26:45 -0700111static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
112 unsigned int event);
113
114#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
115static
116#endif
117struct cpufreq_governor cpufreq_gov_interactive = {
118 .name = "interactive",
119 .governor = cpufreq_governor_interactive,
120 .max_transition_latency = 10000000,
121 .owner = THIS_MODULE,
122};
123
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700124static void cpufreq_interactive_timer_resched(
125 struct cpufreq_interactive_cpuinfo *pcpu)
126{
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800127 unsigned long expires = jiffies + usecs_to_jiffies(timer_rate);
128
129 mod_timer_pinned(&pcpu->cpu_timer, expires);
130 if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
131 expires += usecs_to_jiffies(timer_slack_val);
132 mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
133 }
134
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800135 spin_lock(&pcpu->load_lock);
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700136 pcpu->time_in_idle =
137 get_cpu_idle_time_us(smp_processor_id(),
138 &pcpu->time_in_idle_timestamp);
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800139 pcpu->cputime_speedadj = 0;
140 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
141 spin_unlock(&pcpu->load_lock);
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700142}
143
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800144static unsigned int freq_to_targetload(unsigned int freq)
145{
146 int i;
147 unsigned int ret;
148
149 spin_lock(&target_loads_lock);
150
151 for (i = 0; i < ntarget_loads - 1 && freq >= target_loads[i+1]; i += 2)
152 ;
153
154 ret = target_loads[i];
155 spin_unlock(&target_loads_lock);
156 return ret;
157}
158
159/*
160 * If increasing frequencies never map to a lower target load then
161 * choose_freq() will find the minimum frequency that does not exceed its
162 * target load given the current load.
163 */
164
165static unsigned int choose_freq(
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800166 struct cpufreq_interactive_cpuinfo *pcpu, unsigned int loadadjfreq)
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800167{
168 unsigned int freq = pcpu->policy->cur;
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800169 unsigned int prevfreq, freqmin, freqmax;
170 unsigned int tl;
171 int index;
172
173 freqmin = 0;
174 freqmax = UINT_MAX;
175
176 do {
177 prevfreq = freq;
178 tl = freq_to_targetload(freq);
179
180 /*
181 * Find the lowest frequency where the computed load is less
182 * than or equal to the target load.
183 */
184
185 cpufreq_frequency_table_target(
186 pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
187 CPUFREQ_RELATION_L, &index);
188 freq = pcpu->freq_table[index].frequency;
189
190 if (freq > prevfreq) {
191 /* The previous frequency is too low. */
192 freqmin = prevfreq;
193
194 if (freq >= freqmax) {
195 /*
196 * Find the highest frequency that is less
197 * than freqmax.
198 */
199 cpufreq_frequency_table_target(
200 pcpu->policy, pcpu->freq_table,
201 freqmax - 1, CPUFREQ_RELATION_H,
202 &index);
203 freq = pcpu->freq_table[index].frequency;
204
205 if (freq == freqmin) {
206 /*
207 * The first frequency below freqmax
208 * has already been found to be too
209 * low. freqmax is the lowest speed
210 * we found that is fast enough.
211 */
212 freq = freqmax;
213 break;
214 }
215 }
216 } else if (freq < prevfreq) {
217 /* The previous frequency is high enough. */
218 freqmax = prevfreq;
219
220 if (freq <= freqmin) {
221 /*
222 * Find the lowest frequency that is higher
223 * than freqmin.
224 */
225 cpufreq_frequency_table_target(
226 pcpu->policy, pcpu->freq_table,
227 freqmin + 1, CPUFREQ_RELATION_L,
228 &index);
229 freq = pcpu->freq_table[index].frequency;
230
231 /*
232 * If freqmax is the first frequency above
233 * freqmin then we have already found that
234 * this speed is fast enough.
235 */
236 if (freq == freqmax)
237 break;
238 }
239 }
240
241 /* If same frequency chosen as previous then done. */
242 } while (freq != prevfreq);
243
244 return freq;
245}
246
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800247static u64 update_load(int cpu)
248{
249 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
250 u64 now;
251 u64 now_idle;
252 unsigned int delta_idle;
253 unsigned int delta_time;
254 u64 active_time;
255
256 now_idle = get_cpu_idle_time_us(cpu, &now);
257 delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
258 delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
259 active_time = delta_time - delta_idle;
260 pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
261
262 pcpu->time_in_idle = now_idle;
263 pcpu->time_in_idle_timestamp = now;
264 return now;
265}
266
Mike Chan9d49b702010-06-22 11:26:45 -0700267static void cpufreq_interactive_timer(unsigned long data)
268{
Todd Poynor7aa95c82012-11-05 13:09:03 -0800269 u64 now;
Mike Chan9d49b702010-06-22 11:26:45 -0700270 unsigned int delta_time;
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800271 u64 cputime_speedadj;
Mike Chan9d49b702010-06-22 11:26:45 -0700272 int cpu_load;
Mike Chan9d49b702010-06-22 11:26:45 -0700273 struct cpufreq_interactive_cpuinfo *pcpu =
274 &per_cpu(cpuinfo, data);
Mike Chan9d49b702010-06-22 11:26:45 -0700275 unsigned int new_freq;
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800276 unsigned int loadadjfreq;
Mike Chan9d49b702010-06-22 11:26:45 -0700277 unsigned int index;
278 unsigned long flags;
Todd Poynorf437e182012-12-14 17:31:19 -0800279 bool boosted;
Mike Chan9d49b702010-06-22 11:26:45 -0700280
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800281 if (!down_read_trylock(&pcpu->enable_sem))
282 return;
Mike Chan9d49b702010-06-22 11:26:45 -0700283 if (!pcpu->governor_enabled)
284 goto exit;
285
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800286 spin_lock(&pcpu->load_lock);
287 now = update_load(data);
288 delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
289 cputime_speedadj = pcpu->cputime_speedadj;
290 spin_unlock(&pcpu->load_lock);
Mike Chan9d49b702010-06-22 11:26:45 -0700291
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800292 if (WARN_ON_ONCE(!delta_time))
Mike Chan9d49b702010-06-22 11:26:45 -0700293 goto rearm;
294
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800295 do_div(cputime_speedadj, delta_time);
296 loadadjfreq = (unsigned int)cputime_speedadj * 100;
297 cpu_load = loadadjfreq / pcpu->target_freq;
Todd Poynorf437e182012-12-14 17:31:19 -0800298 boosted = boost_val || now < boostpulse_endtime;
Mike Chan9d49b702010-06-22 11:26:45 -0700299
Todd Poynor16dfc9d2012-12-19 16:06:48 -0800300 if (cpu_load >= go_hispeed_load || boosted) {
301 if (pcpu->target_freq < hispeed_freq) {
302 new_freq = hispeed_freq;
303 } else {
304 new_freq = choose_freq(pcpu, loadadjfreq);
305
306 if (new_freq < hispeed_freq)
307 new_freq = hispeed_freq;
308 }
309 } else {
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800310 new_freq = choose_freq(pcpu, loadadjfreq);
Todd Poynor16dfc9d2012-12-19 16:06:48 -0800311 }
Todd Poynorf96f2c82012-11-08 15:06:55 -0800312
313 if (pcpu->target_freq >= hispeed_freq &&
314 new_freq > pcpu->target_freq &&
315 now - pcpu->hispeed_validate_time < above_hispeed_delay_val) {
316 trace_cpufreq_interactive_notyet(
317 data, cpu_load, pcpu->target_freq,
318 pcpu->policy->cur, new_freq);
319 goto rearm;
Mike Chan9d49b702010-06-22 11:26:45 -0700320 }
321
Todd Poynorf96f2c82012-11-08 15:06:55 -0800322 pcpu->hispeed_validate_time = now;
Todd Poynor5a5aa702012-05-10 23:28:06 -0700323
Mike Chan9d49b702010-06-22 11:26:45 -0700324 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
Todd Poynorbc51d672012-11-28 17:58:17 -0800325 new_freq, CPUFREQ_RELATION_L,
Mike Chan9d49b702010-06-22 11:26:45 -0700326 &index)) {
327 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
328 (int) data);
329 goto rearm;
330 }
331
332 new_freq = pcpu->freq_table[index].frequency;
333
Mike Chan9d49b702010-06-22 11:26:45 -0700334 /*
Todd Poynoraad27322012-04-26 21:41:40 -0700335 * Do not scale below floor_freq unless we have been at or above the
336 * floor frequency for the minimum sample time since last validated.
Mike Chan9d49b702010-06-22 11:26:45 -0700337 */
Todd Poynoraad27322012-04-26 21:41:40 -0700338 if (new_freq < pcpu->floor_freq) {
Todd Poynor7aa95c82012-11-05 13:09:03 -0800339 if (now - pcpu->floor_validate_time < min_sample_time) {
Todd Poynor46660b02012-11-28 17:56:09 -0800340 trace_cpufreq_interactive_notyet(
341 data, cpu_load, pcpu->target_freq,
342 pcpu->policy->cur, new_freq);
Mike Chan9d49b702010-06-22 11:26:45 -0700343 goto rearm;
Todd Poynora1e19512012-02-16 16:27:59 -0800344 }
Mike Chan9d49b702010-06-22 11:26:45 -0700345 }
346
Todd Poynorf437e182012-12-14 17:31:19 -0800347 /*
348 * Update the timestamp for checking whether speed has been held at
349 * or above the selected frequency for a minimum of min_sample_time,
350 * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
351 * allow the speed to drop as soon as the boostpulse duration expires
352 * (or the indefinite boost is turned off).
353 */
354
355 if (!boosted || new_freq > hispeed_freq) {
356 pcpu->floor_freq = new_freq;
357 pcpu->floor_validate_time = now;
358 }
Todd Poynor0a92d482012-04-06 19:59:36 -0700359
360 if (pcpu->target_freq == new_freq) {
Todd Poynor46660b02012-11-28 17:56:09 -0800361 trace_cpufreq_interactive_already(
362 data, cpu_load, pcpu->target_freq,
363 pcpu->policy->cur, new_freq);
Todd Poynor0a92d482012-04-06 19:59:36 -0700364 goto rearm_if_notmax;
365 }
366
Todd Poynora1e19512012-02-16 16:27:59 -0800367 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
Todd Poynor46660b02012-11-28 17:56:09 -0800368 pcpu->policy->cur, new_freq);
Todd Poynora1e19512012-02-16 16:27:59 -0800369
Todd Poynor8a37bb72012-07-16 17:07:15 -0700370 pcpu->target_freq = new_freq;
371 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
372 cpumask_set_cpu(data, &speedchange_cpumask);
373 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
374 wake_up_process(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -0700375
376rearm_if_notmax:
377 /*
378 * Already set max speed and don't see a need to change that,
379 * wait until next idle to re-evaluate, don't need timer.
380 */
381 if (pcpu->target_freq == pcpu->policy->max)
382 goto exit;
383
384rearm:
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800385 if (!timer_pending(&pcpu->cpu_timer))
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700386 cpufreq_interactive_timer_resched(pcpu);
Mike Chan9d49b702010-06-22 11:26:45 -0700387
388exit:
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800389 up_read(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700390 return;
391}
392
393static void cpufreq_interactive_idle_start(void)
394{
395 struct cpufreq_interactive_cpuinfo *pcpu =
396 &per_cpu(cpuinfo, smp_processor_id());
397 int pending;
398
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800399 if (!down_read_trylock(&pcpu->enable_sem))
Mike Chan9d49b702010-06-22 11:26:45 -0700400 return;
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800401 if (!pcpu->governor_enabled) {
402 up_read(&pcpu->enable_sem);
403 return;
404 }
Mike Chan9d49b702010-06-22 11:26:45 -0700405
Mike Chan9d49b702010-06-22 11:26:45 -0700406 pending = timer_pending(&pcpu->cpu_timer);
407
408 if (pcpu->target_freq != pcpu->policy->min) {
Mike Chan9d49b702010-06-22 11:26:45 -0700409 /*
410 * Entering idle while not at lowest speed. On some
411 * platforms this can hold the other CPU(s) at that speed
412 * even though the CPU is idle. Set a timer to re-evaluate
413 * speed so this idle CPU doesn't hold the other CPUs above
414 * min indefinitely. This should probably be a quirk of
415 * the CPUFreq driver.
416 */
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800417 if (!pending)
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700418 cpufreq_interactive_timer_resched(pcpu);
Mike Chan9d49b702010-06-22 11:26:45 -0700419 }
420
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800421 up_read(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700422}
423
424static void cpufreq_interactive_idle_end(void)
425{
426 struct cpufreq_interactive_cpuinfo *pcpu =
427 &per_cpu(cpuinfo, smp_processor_id());
428
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800429 if (!down_read_trylock(&pcpu->enable_sem))
Sam Lefflerae0d23f2012-06-27 10:12:04 -0700430 return;
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800431 if (!pcpu->governor_enabled) {
432 up_read(&pcpu->enable_sem);
433 return;
434 }
Sam Lefflerae0d23f2012-06-27 10:12:04 -0700435
Todd Poynor7aa95c82012-11-05 13:09:03 -0800436 /* Arm the timer for 1-2 ticks later if not already. */
437 if (!timer_pending(&pcpu->cpu_timer)) {
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700438 cpufreq_interactive_timer_resched(pcpu);
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800439 } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700440 del_timer(&pcpu->cpu_timer);
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800441 del_timer(&pcpu->cpu_slack_timer);
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700442 cpufreq_interactive_timer(smp_processor_id());
Mike Chan9d49b702010-06-22 11:26:45 -0700443 }
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800444
445 up_read(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700446}
447
Todd Poynor8a37bb72012-07-16 17:07:15 -0700448static int cpufreq_interactive_speedchange_task(void *data)
Mike Chan9d49b702010-06-22 11:26:45 -0700449{
450 unsigned int cpu;
451 cpumask_t tmp_mask;
452 unsigned long flags;
453 struct cpufreq_interactive_cpuinfo *pcpu;
454
455 while (1) {
456 set_current_state(TASK_INTERRUPTIBLE);
Todd Poynor8a37bb72012-07-16 17:07:15 -0700457 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700458
Todd Poynor8a37bb72012-07-16 17:07:15 -0700459 if (cpumask_empty(&speedchange_cpumask)) {
460 spin_unlock_irqrestore(&speedchange_cpumask_lock,
461 flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700462 schedule();
463
464 if (kthread_should_stop())
465 break;
466
Todd Poynor8a37bb72012-07-16 17:07:15 -0700467 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700468 }
469
470 set_current_state(TASK_RUNNING);
Todd Poynor8a37bb72012-07-16 17:07:15 -0700471 tmp_mask = speedchange_cpumask;
472 cpumask_clear(&speedchange_cpumask);
473 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700474
475 for_each_cpu(cpu, &tmp_mask) {
476 unsigned int j;
477 unsigned int max_freq = 0;
478
479 pcpu = &per_cpu(cpuinfo, cpu);
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800480 if (!down_read_trylock(&pcpu->enable_sem))
Mike Chan9d49b702010-06-22 11:26:45 -0700481 continue;
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800482 if (!pcpu->governor_enabled) {
483 up_read(&pcpu->enable_sem);
484 continue;
485 }
Mike Chan9d49b702010-06-22 11:26:45 -0700486
Mike Chan9d49b702010-06-22 11:26:45 -0700487 for_each_cpu(j, pcpu->policy->cpus) {
488 struct cpufreq_interactive_cpuinfo *pjcpu =
489 &per_cpu(cpuinfo, j);
490
491 if (pjcpu->target_freq > max_freq)
492 max_freq = pjcpu->target_freq;
493 }
494
495 if (max_freq != pcpu->policy->cur)
496 __cpufreq_driver_target(pcpu->policy,
497 max_freq,
498 CPUFREQ_RELATION_H);
Todd Poynor8a37bb72012-07-16 17:07:15 -0700499 trace_cpufreq_interactive_setspeed(cpu,
500 pcpu->target_freq,
Todd Poynora1e19512012-02-16 16:27:59 -0800501 pcpu->policy->cur);
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800502
503 up_read(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700504 }
505 }
506
507 return 0;
508}
509
Todd Poynor7820a652012-04-02 17:17:14 -0700510static void cpufreq_interactive_boost(void)
511{
512 int i;
513 int anyboost = 0;
514 unsigned long flags;
515 struct cpufreq_interactive_cpuinfo *pcpu;
516
Todd Poynor8a37bb72012-07-16 17:07:15 -0700517 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Todd Poynor7820a652012-04-02 17:17:14 -0700518
519 for_each_online_cpu(i) {
520 pcpu = &per_cpu(cpuinfo, i);
521
522 if (pcpu->target_freq < hispeed_freq) {
523 pcpu->target_freq = hispeed_freq;
Todd Poynor8a37bb72012-07-16 17:07:15 -0700524 cpumask_set_cpu(i, &speedchange_cpumask);
Todd Poynor3c081182012-12-07 20:08:45 -0800525 pcpu->hispeed_validate_time =
526 ktime_to_us(ktime_get());
Todd Poynor7820a652012-04-02 17:17:14 -0700527 anyboost = 1;
528 }
529
530 /*
Todd Poynoraad27322012-04-26 21:41:40 -0700531 * Set floor freq and (re)start timer for when last
532 * validated.
Todd Poynor7820a652012-04-02 17:17:14 -0700533 */
534
Todd Poynoraad27322012-04-26 21:41:40 -0700535 pcpu->floor_freq = hispeed_freq;
536 pcpu->floor_validate_time = ktime_to_us(ktime_get());
Todd Poynor7820a652012-04-02 17:17:14 -0700537 }
538
Todd Poynor8a37bb72012-07-16 17:07:15 -0700539 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Todd Poynor7820a652012-04-02 17:17:14 -0700540
541 if (anyboost)
Todd Poynor8a37bb72012-07-16 17:07:15 -0700542 wake_up_process(speedchange_task);
Todd Poynor7820a652012-04-02 17:17:14 -0700543}
544
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800545static int cpufreq_interactive_notifier(
546 struct notifier_block *nb, unsigned long val, void *data)
547{
548 struct cpufreq_freqs *freq = data;
549 struct cpufreq_interactive_cpuinfo *pcpu;
550 int cpu;
551
552 if (val == CPUFREQ_POSTCHANGE) {
553 pcpu = &per_cpu(cpuinfo, freq->cpu);
Todd Poynora9dac7d2012-12-23 12:28:49 -0800554 if (!down_read_trylock(&pcpu->enable_sem))
555 return 0;
556 if (!pcpu->governor_enabled) {
557 up_read(&pcpu->enable_sem);
558 return 0;
559 }
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800560
561 for_each_cpu(cpu, pcpu->policy->cpus) {
562 struct cpufreq_interactive_cpuinfo *pjcpu =
563 &per_cpu(cpuinfo, cpu);
564 spin_lock(&pjcpu->load_lock);
565 update_load(cpu);
566 spin_unlock(&pjcpu->load_lock);
567 }
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800568
Todd Poynora9dac7d2012-12-23 12:28:49 -0800569 up_read(&pcpu->enable_sem);
570 }
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800571 return 0;
572}
573
574static struct notifier_block cpufreq_notifier_block = {
575 .notifier_call = cpufreq_interactive_notifier,
576};
577
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800578static ssize_t show_target_loads(
Todd Poynorbc51d672012-11-28 17:58:17 -0800579 struct kobject *kobj, struct attribute *attr, char *buf)
580{
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800581 int i;
582 ssize_t ret = 0;
583
584 spin_lock(&target_loads_lock);
585
586 for (i = 0; i < ntarget_loads; i++)
587 ret += sprintf(buf + ret, "%u%s", target_loads[i],
588 i & 0x1 ? ":" : " ");
589
590 ret += sprintf(buf + ret, "\n");
591 spin_unlock(&target_loads_lock);
592 return ret;
Todd Poynorbc51d672012-11-28 17:58:17 -0800593}
594
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800595static ssize_t store_target_loads(
Todd Poynorbc51d672012-11-28 17:58:17 -0800596 struct kobject *kobj, struct attribute *attr, const char *buf,
597 size_t count)
598{
599 int ret;
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800600 const char *cp;
601 unsigned int *new_target_loads = NULL;
602 int ntokens = 1;
603 int i;
Todd Poynorbc51d672012-11-28 17:58:17 -0800604
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800605 cp = buf;
606 while ((cp = strpbrk(cp + 1, " :")))
607 ntokens++;
608
609 if (!(ntokens & 0x1))
610 goto err_inval;
611
612 new_target_loads = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
613 if (!new_target_loads) {
614 ret = -ENOMEM;
615 goto err;
616 }
617
618 cp = buf;
619 i = 0;
620 while (i < ntokens) {
621 if (sscanf(cp, "%u", &new_target_loads[i++]) != 1)
622 goto err_inval;
623
624 cp = strpbrk(cp, " :");
625 if (!cp)
626 break;
627 cp++;
628 }
629
630 if (i != ntokens)
631 goto err_inval;
632
633 spin_lock(&target_loads_lock);
634 if (target_loads != default_target_loads)
635 kfree(target_loads);
636 target_loads = new_target_loads;
637 ntarget_loads = ntokens;
638 spin_unlock(&target_loads_lock);
Todd Poynorbc51d672012-11-28 17:58:17 -0800639 return count;
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800640
641err_inval:
642 ret = -EINVAL;
643err:
644 kfree(new_target_loads);
645 return ret;
Todd Poynorbc51d672012-11-28 17:58:17 -0800646}
647
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800648static struct global_attr target_loads_attr =
649 __ATTR(target_loads, S_IRUGO | S_IWUSR,
650 show_target_loads, store_target_loads);
Todd Poynorbc51d672012-11-28 17:58:17 -0800651
Mike Chan9d49b702010-06-22 11:26:45 -0700652static ssize_t show_hispeed_freq(struct kobject *kobj,
653 struct attribute *attr, char *buf)
654{
Todd Poynoracfaec92012-10-03 00:39:56 -0700655 return sprintf(buf, "%u\n", hispeed_freq);
Mike Chan9d49b702010-06-22 11:26:45 -0700656}
657
658static ssize_t store_hispeed_freq(struct kobject *kobj,
659 struct attribute *attr, const char *buf,
660 size_t count)
661{
662 int ret;
Todd Poynoracfaec92012-10-03 00:39:56 -0700663 long unsigned int val;
Mike Chan9d49b702010-06-22 11:26:45 -0700664
Todd Poynoracfaec92012-10-03 00:39:56 -0700665 ret = strict_strtoul(buf, 0, &val);
Mike Chan9d49b702010-06-22 11:26:45 -0700666 if (ret < 0)
667 return ret;
668 hispeed_freq = val;
669 return count;
670}
671
672static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
673 show_hispeed_freq, store_hispeed_freq);
674
675
676static ssize_t show_go_hispeed_load(struct kobject *kobj,
677 struct attribute *attr, char *buf)
678{
679 return sprintf(buf, "%lu\n", go_hispeed_load);
680}
681
682static ssize_t store_go_hispeed_load(struct kobject *kobj,
683 struct attribute *attr, const char *buf, size_t count)
684{
685 int ret;
686 unsigned long val;
687
688 ret = strict_strtoul(buf, 0, &val);
689 if (ret < 0)
690 return ret;
691 go_hispeed_load = val;
692 return count;
693}
694
695static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
696 show_go_hispeed_load, store_go_hispeed_load);
697
698static ssize_t show_min_sample_time(struct kobject *kobj,
699 struct attribute *attr, char *buf)
700{
701 return sprintf(buf, "%lu\n", min_sample_time);
702}
703
704static ssize_t store_min_sample_time(struct kobject *kobj,
705 struct attribute *attr, const char *buf, size_t count)
706{
707 int ret;
708 unsigned long val;
709
710 ret = strict_strtoul(buf, 0, &val);
711 if (ret < 0)
712 return ret;
713 min_sample_time = val;
714 return count;
715}
716
717static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
718 show_min_sample_time, store_min_sample_time);
719
Todd Poynor596cf1f2012-04-13 20:18:02 -0700720static ssize_t show_above_hispeed_delay(struct kobject *kobj,
721 struct attribute *attr, char *buf)
722{
723 return sprintf(buf, "%lu\n", above_hispeed_delay_val);
724}
725
726static ssize_t store_above_hispeed_delay(struct kobject *kobj,
727 struct attribute *attr,
728 const char *buf, size_t count)
729{
730 int ret;
731 unsigned long val;
732
733 ret = strict_strtoul(buf, 0, &val);
734 if (ret < 0)
735 return ret;
736 above_hispeed_delay_val = val;
737 return count;
738}
739
740define_one_global_rw(above_hispeed_delay);
741
Mike Chan9d49b702010-06-22 11:26:45 -0700742static ssize_t show_timer_rate(struct kobject *kobj,
743 struct attribute *attr, char *buf)
744{
745 return sprintf(buf, "%lu\n", timer_rate);
746}
747
748static ssize_t store_timer_rate(struct kobject *kobj,
749 struct attribute *attr, const char *buf, size_t count)
750{
751 int ret;
752 unsigned long val;
753
754 ret = strict_strtoul(buf, 0, &val);
755 if (ret < 0)
756 return ret;
757 timer_rate = val;
758 return count;
759}
760
761static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
762 show_timer_rate, store_timer_rate);
763
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800764static ssize_t show_timer_slack(
765 struct kobject *kobj, struct attribute *attr, char *buf)
766{
767 return sprintf(buf, "%d\n", timer_slack_val);
768}
769
770static ssize_t store_timer_slack(
771 struct kobject *kobj, struct attribute *attr, const char *buf,
772 size_t count)
773{
774 int ret;
775 unsigned long val;
776
777 ret = kstrtol(buf, 10, &val);
778 if (ret < 0)
779 return ret;
780
781 timer_slack_val = val;
782 return count;
783}
784
785define_one_global_rw(timer_slack);
786
Todd Poynor9fb15312012-04-23 20:42:41 -0700787static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
788 char *buf)
789{
790 return sprintf(buf, "%d\n", boost_val);
791}
792
793static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
794 const char *buf, size_t count)
795{
796 int ret;
797 unsigned long val;
798
799 ret = kstrtoul(buf, 0, &val);
800 if (ret < 0)
801 return ret;
802
803 boost_val = val;
804
Todd Poynor2e739a02012-05-03 00:16:55 -0700805 if (boost_val) {
806 trace_cpufreq_interactive_boost("on");
Todd Poynor9fb15312012-04-23 20:42:41 -0700807 cpufreq_interactive_boost();
Todd Poynor2e739a02012-05-03 00:16:55 -0700808 } else {
809 trace_cpufreq_interactive_unboost("off");
810 }
Todd Poynor9fb15312012-04-23 20:42:41 -0700811
812 return count;
813}
814
815define_one_global_rw(boost);
816
Todd Poynor2e739a02012-05-03 00:16:55 -0700817static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
818 const char *buf, size_t count)
819{
820 int ret;
821 unsigned long val;
822
823 ret = kstrtoul(buf, 0, &val);
824 if (ret < 0)
825 return ret;
826
Todd Poynorf437e182012-12-14 17:31:19 -0800827 boostpulse_endtime = ktime_to_us(ktime_get()) + boostpulse_duration_val;
Todd Poynor2e739a02012-05-03 00:16:55 -0700828 trace_cpufreq_interactive_boost("pulse");
829 cpufreq_interactive_boost();
830 return count;
831}
832
833static struct global_attr boostpulse =
834 __ATTR(boostpulse, 0200, NULL, store_boostpulse);
835
Todd Poynorf437e182012-12-14 17:31:19 -0800836static ssize_t show_boostpulse_duration(
837 struct kobject *kobj, struct attribute *attr, char *buf)
838{
839 return sprintf(buf, "%d\n", boostpulse_duration_val);
840}
841
842static ssize_t store_boostpulse_duration(
843 struct kobject *kobj, struct attribute *attr, const char *buf,
844 size_t count)
845{
846 int ret;
847 unsigned long val;
848
849 ret = kstrtoul(buf, 0, &val);
850 if (ret < 0)
851 return ret;
852
853 boostpulse_duration_val = val;
854 return count;
855}
856
857define_one_global_rw(boostpulse_duration);
858
Mike Chan9d49b702010-06-22 11:26:45 -0700859static struct attribute *interactive_attributes[] = {
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800860 &target_loads_attr.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700861 &hispeed_freq_attr.attr,
862 &go_hispeed_load_attr.attr,
Todd Poynor596cf1f2012-04-13 20:18:02 -0700863 &above_hispeed_delay.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700864 &min_sample_time_attr.attr,
865 &timer_rate_attr.attr,
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800866 &timer_slack.attr,
Todd Poynor9fb15312012-04-23 20:42:41 -0700867 &boost.attr,
Todd Poynor2e739a02012-05-03 00:16:55 -0700868 &boostpulse.attr,
Todd Poynorf437e182012-12-14 17:31:19 -0800869 &boostpulse_duration.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700870 NULL,
871};
872
873static struct attribute_group interactive_attr_group = {
874 .attrs = interactive_attributes,
875 .name = "interactive",
876};
877
Sam Lefflerae0d23f2012-06-27 10:12:04 -0700878static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
879 unsigned long val,
880 void *data)
881{
882 switch (val) {
883 case IDLE_START:
884 cpufreq_interactive_idle_start();
885 break;
886 case IDLE_END:
887 cpufreq_interactive_idle_end();
888 break;
889 }
890
891 return 0;
892}
893
894static struct notifier_block cpufreq_interactive_idle_nb = {
895 .notifier_call = cpufreq_interactive_idle_notifier,
896};
897
Mike Chan9d49b702010-06-22 11:26:45 -0700898static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
899 unsigned int event)
900{
901 int rc;
902 unsigned int j;
903 struct cpufreq_interactive_cpuinfo *pcpu;
904 struct cpufreq_frequency_table *freq_table;
905
906 switch (event) {
907 case CPUFREQ_GOV_START:
908 if (!cpu_online(policy->cpu))
909 return -EINVAL;
910
911 freq_table =
912 cpufreq_frequency_get_table(policy->cpu);
Todd Poynor7aa95c82012-11-05 13:09:03 -0800913 if (!hispeed_freq)
914 hispeed_freq = policy->max;
Mike Chan9d49b702010-06-22 11:26:45 -0700915
916 for_each_cpu(j, policy->cpus) {
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800917 unsigned long expires;
918
Mike Chan9d49b702010-06-22 11:26:45 -0700919 pcpu = &per_cpu(cpuinfo, j);
920 pcpu->policy = policy;
921 pcpu->target_freq = policy->cur;
922 pcpu->freq_table = freq_table;
Todd Poynoraad27322012-04-26 21:41:40 -0700923 pcpu->floor_freq = pcpu->target_freq;
924 pcpu->floor_validate_time =
Todd Poynor3c081182012-12-07 20:08:45 -0800925 ktime_to_us(ktime_get());
Todd Poynor5a5aa702012-05-10 23:28:06 -0700926 pcpu->hispeed_validate_time =
Todd Poynor3c081182012-12-07 20:08:45 -0800927 pcpu->floor_validate_time;
Todd Poynor7fd53c92012-12-20 15:51:00 -0800928 down_write(&pcpu->enable_sem);
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800929 expires = jiffies + usecs_to_jiffies(timer_rate);
930 pcpu->cpu_timer.expires = expires;
Todd Poynor7aa95c82012-11-05 13:09:03 -0800931 add_timer_on(&pcpu->cpu_timer, j);
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800932 if (timer_slack_val >= 0) {
933 expires += usecs_to_jiffies(timer_slack_val);
934 pcpu->cpu_slack_timer.expires = expires;
935 add_timer_on(&pcpu->cpu_slack_timer, j);
936 }
Todd Poynor7fd53c92012-12-20 15:51:00 -0800937 pcpu->governor_enabled = 1;
938 up_write(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700939 }
940
Mike Chan9d49b702010-06-22 11:26:45 -0700941 /*
942 * Do not register the idle hook and create sysfs
943 * entries if we have already done so.
944 */
945 if (atomic_inc_return(&active_count) > 1)
946 return 0;
947
948 rc = sysfs_create_group(cpufreq_global_kobject,
949 &interactive_attr_group);
950 if (rc)
951 return rc;
952
Sam Lefflerae0d23f2012-06-27 10:12:04 -0700953 idle_notifier_register(&cpufreq_interactive_idle_nb);
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800954 cpufreq_register_notifier(
955 &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
Mike Chan9d49b702010-06-22 11:26:45 -0700956 break;
957
958 case CPUFREQ_GOV_STOP:
959 for_each_cpu(j, policy->cpus) {
960 pcpu = &per_cpu(cpuinfo, j);
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800961 down_write(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700962 pcpu->governor_enabled = 0;
Mike Chan9d49b702010-06-22 11:26:45 -0700963 del_timer_sync(&pcpu->cpu_timer);
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800964 del_timer_sync(&pcpu->cpu_slack_timer);
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800965 up_write(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700966 }
967
Mike Chan9d49b702010-06-22 11:26:45 -0700968 if (atomic_dec_return(&active_count) > 0)
969 return 0;
970
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800971 cpufreq_unregister_notifier(
972 &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
Sam Lefflerae0d23f2012-06-27 10:12:04 -0700973 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
Mike Chan9d49b702010-06-22 11:26:45 -0700974 sysfs_remove_group(cpufreq_global_kobject,
975 &interactive_attr_group);
976
977 break;
978
979 case CPUFREQ_GOV_LIMITS:
980 if (policy->max < policy->cur)
981 __cpufreq_driver_target(policy,
982 policy->max, CPUFREQ_RELATION_H);
983 else if (policy->min > policy->cur)
984 __cpufreq_driver_target(policy,
985 policy->min, CPUFREQ_RELATION_L);
986 break;
987 }
988 return 0;
989}
990
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800991static void cpufreq_interactive_nop_timer(unsigned long data)
992{
993}
994
Mike Chan9d49b702010-06-22 11:26:45 -0700995static int __init cpufreq_interactive_init(void)
996{
997 unsigned int i;
998 struct cpufreq_interactive_cpuinfo *pcpu;
999 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1000
Mike Chan9d49b702010-06-22 11:26:45 -07001001 /* Initalize per-cpu timers */
1002 for_each_possible_cpu(i) {
1003 pcpu = &per_cpu(cpuinfo, i);
Todd Poynorcba9f3e2012-12-18 17:50:10 -08001004 init_timer_deferrable(&pcpu->cpu_timer);
Mike Chan9d49b702010-06-22 11:26:45 -07001005 pcpu->cpu_timer.function = cpufreq_interactive_timer;
1006 pcpu->cpu_timer.data = i;
Todd Poynorcba9f3e2012-12-18 17:50:10 -08001007 init_timer(&pcpu->cpu_slack_timer);
1008 pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
Todd Poynor5a75e9d2012-12-11 16:05:03 -08001009 spin_lock_init(&pcpu->load_lock);
Todd Poynor5b63a2e2012-12-18 17:50:44 -08001010 init_rwsem(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -07001011 }
1012
Todd Poynor2fbf5e12012-11-14 11:41:21 -08001013 spin_lock_init(&target_loads_lock);
Todd Poynor8a37bb72012-07-16 17:07:15 -07001014 spin_lock_init(&speedchange_cpumask_lock);
1015 speedchange_task =
1016 kthread_create(cpufreq_interactive_speedchange_task, NULL,
1017 "cfinteractive");
1018 if (IS_ERR(speedchange_task))
1019 return PTR_ERR(speedchange_task);
Sam Leffler9f1dcd62012-06-27 12:55:56 -07001020
Todd Poynor8a37bb72012-07-16 17:07:15 -07001021 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
1022 get_task_struct(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -07001023
Sam Leffler9f1dcd62012-06-27 12:55:56 -07001024 /* NB: wake up so the thread does not look hung to the freezer */
Todd Poynor8a37bb72012-07-16 17:07:15 -07001025 wake_up_process(speedchange_task);
Sam Leffler9f1dcd62012-06-27 12:55:56 -07001026
Mike Chan9d49b702010-06-22 11:26:45 -07001027 return cpufreq_register_governor(&cpufreq_gov_interactive);
Mike Chan9d49b702010-06-22 11:26:45 -07001028}
1029
1030#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1031fs_initcall(cpufreq_interactive_init);
1032#else
1033module_init(cpufreq_interactive_init);
1034#endif
1035
1036static void __exit cpufreq_interactive_exit(void)
1037{
1038 cpufreq_unregister_governor(&cpufreq_gov_interactive);
Todd Poynor8a37bb72012-07-16 17:07:15 -07001039 kthread_stop(speedchange_task);
1040 put_task_struct(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -07001041}
1042
1043module_exit(cpufreq_interactive_exit);
1044
1045MODULE_AUTHOR("Mike Chan <mike@android.com>");
1046MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1047 "Latency sensitive workloads");
1048MODULE_LICENSE("GPL");