blob: 7d1952c5cb16da4401bfb334947e951c9babd827 [file] [log] [blame]
Mike Chan9d49b702010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/module.h>
Lianwei Wangba6c6bb2012-11-01 09:59:52 +080023#include <linux/moduleparam.h>
Todd Poynor5b63a2e2012-12-18 17:50:44 -080024#include <linux/rwsem.h>
Mike Chan9d49b702010-06-22 11:26:45 -070025#include <linux/sched.h>
26#include <linux/tick.h>
27#include <linux/time.h>
28#include <linux/timer.h>
29#include <linux/workqueue.h>
30#include <linux/kthread.h>
Todd Poynor7820a652012-04-02 17:17:14 -070031#include <linux/slab.h>
Todd Poynor9fb15312012-04-23 20:42:41 -070032#include <asm/cputime.h>
Mike Chan9d49b702010-06-22 11:26:45 -070033
Todd Poynora1e19512012-02-16 16:27:59 -080034#define CREATE_TRACE_POINTS
35#include <trace/events/cpufreq_interactive.h>
36
Lianwei Wang24613b92013-01-07 14:15:51 +080037static int active_count;
Mike Chan9d49b702010-06-22 11:26:45 -070038
39struct cpufreq_interactive_cpuinfo {
40 struct timer_list cpu_timer;
Todd Poynorcba9f3e2012-12-18 17:50:10 -080041 struct timer_list cpu_slack_timer;
Todd Poynor5a75e9d2012-12-11 16:05:03 -080042 spinlock_t load_lock; /* protects the next 4 fields */
Mike Chan9d49b702010-06-22 11:26:45 -070043 u64 time_in_idle;
Todd Poynor22b5c3a2012-10-08 20:14:34 -070044 u64 time_in_idle_timestamp;
Todd Poynor5a75e9d2012-12-11 16:05:03 -080045 u64 cputime_speedadj;
46 u64 cputime_speedadj_timestamp;
Mike Chan9d49b702010-06-22 11:26:45 -070047 struct cpufreq_policy *policy;
48 struct cpufreq_frequency_table *freq_table;
49 unsigned int target_freq;
Todd Poynoraad27322012-04-26 21:41:40 -070050 unsigned int floor_freq;
51 u64 floor_validate_time;
Todd Poynor5a5aa702012-05-10 23:28:06 -070052 u64 hispeed_validate_time;
Todd Poynor5b63a2e2012-12-18 17:50:44 -080053 struct rw_semaphore enable_sem;
Mike Chan9d49b702010-06-22 11:26:45 -070054 int governor_enabled;
55};
56
57static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
58
Todd Poynor8a37bb72012-07-16 17:07:15 -070059/* realtime thread handles frequency scaling */
60static struct task_struct *speedchange_task;
61static cpumask_t speedchange_cpumask;
62static spinlock_t speedchange_cpumask_lock;
Lianwei Wang24613b92013-01-07 14:15:51 +080063static struct mutex gov_lock;
Mike Chan9d49b702010-06-22 11:26:45 -070064
65/* Hi speed to bump to from lo speed when load burst (default max) */
Todd Poynoracfaec92012-10-03 00:39:56 -070066static unsigned int hispeed_freq;
Mike Chan9d49b702010-06-22 11:26:45 -070067
68/* Go to hi speed when CPU load at or above this value. */
Todd Poynor8b2ace62012-12-21 15:13:01 -080069#define DEFAULT_GO_HISPEED_LOAD 99
Todd Poynor2c8d73d2012-12-21 15:32:21 -080070static unsigned long go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
Mike Chan9d49b702010-06-22 11:26:45 -070071
Todd Poynorbc51d672012-11-28 17:58:17 -080072/* Target load. Lower values result in higher CPU speeds. */
73#define DEFAULT_TARGET_LOAD 90
Todd Poynor2fbf5e12012-11-14 11:41:21 -080074static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
75static spinlock_t target_loads_lock;
76static unsigned int *target_loads = default_target_loads;
77static int ntarget_loads = ARRAY_SIZE(default_target_loads);
Todd Poynorbc51d672012-11-28 17:58:17 -080078
Mike Chan9d49b702010-06-22 11:26:45 -070079/*
80 * The minimum amount of time to spend at a frequency before we can ramp down.
81 */
Todd Poynora0ec4362012-04-17 17:39:34 -070082#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
Todd Poynor2c8d73d2012-12-21 15:32:21 -080083static unsigned long min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
Mike Chan9d49b702010-06-22 11:26:45 -070084
85/*
86 * The sample rate of the timer used to increase frequency
87 */
Todd Poynora0ec4362012-04-17 17:39:34 -070088#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
Todd Poynor2c8d73d2012-12-21 15:32:21 -080089static unsigned long timer_rate = DEFAULT_TIMER_RATE;
Mike Chan9d49b702010-06-22 11:26:45 -070090
Todd Poynor596cf1f2012-04-13 20:18:02 -070091/*
92 * Wait this long before raising speed above hispeed, by default a single
93 * timer interval.
94 */
95#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
Todd Poynor2c8d73d2012-12-21 15:32:21 -080096static unsigned long above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
Todd Poynor596cf1f2012-04-13 20:18:02 -070097
Todd Poynorf437e182012-12-14 17:31:19 -080098/* Non-zero means indefinite speed boost active */
Todd Poynor9fb15312012-04-23 20:42:41 -070099static int boost_val;
Todd Poynorf437e182012-12-14 17:31:19 -0800100/* Duration of a boot pulse in usecs */
101static int boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
102/* End time of boost pulse in ktime converted to usecs */
103static u64 boostpulse_endtime;
Todd Poynor9fb15312012-04-23 20:42:41 -0700104
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800105/*
106 * Max additional time to wait in idle, beyond timer_rate, at speeds above
107 * minimum before wakeup to reduce speed, or -1 if unnecessary.
108 */
109#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
110static int timer_slack_val = DEFAULT_TIMER_SLACK;
Lianwei Wangba6c6bb2012-11-01 09:59:52 +0800111
Mike Chan9d49b702010-06-22 11:26:45 -0700112static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
113 unsigned int event);
114
115#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
116static
117#endif
118struct cpufreq_governor cpufreq_gov_interactive = {
119 .name = "interactive",
120 .governor = cpufreq_governor_interactive,
121 .max_transition_latency = 10000000,
122 .owner = THIS_MODULE,
123};
124
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700125static void cpufreq_interactive_timer_resched(
126 struct cpufreq_interactive_cpuinfo *pcpu)
127{
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800128 unsigned long expires = jiffies + usecs_to_jiffies(timer_rate);
Todd Poynor726f6792013-01-02 13:14:00 -0800129 unsigned long flags;
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800130
131 mod_timer_pinned(&pcpu->cpu_timer, expires);
132 if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
133 expires += usecs_to_jiffies(timer_slack_val);
134 mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
135 }
136
Todd Poynor726f6792013-01-02 13:14:00 -0800137 spin_lock_irqsave(&pcpu->load_lock, flags);
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700138 pcpu->time_in_idle =
139 get_cpu_idle_time_us(smp_processor_id(),
140 &pcpu->time_in_idle_timestamp);
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800141 pcpu->cputime_speedadj = 0;
142 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
Todd Poynor726f6792013-01-02 13:14:00 -0800143 spin_unlock_irqrestore(&pcpu->load_lock, flags);
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700144}
145
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800146static unsigned int freq_to_targetload(unsigned int freq)
147{
148 int i;
149 unsigned int ret;
Todd Poynor726f6792013-01-02 13:14:00 -0800150 unsigned long flags;
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800151
Todd Poynor726f6792013-01-02 13:14:00 -0800152 spin_lock_irqsave(&target_loads_lock, flags);
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800153
154 for (i = 0; i < ntarget_loads - 1 && freq >= target_loads[i+1]; i += 2)
155 ;
156
157 ret = target_loads[i];
Todd Poynor726f6792013-01-02 13:14:00 -0800158 spin_unlock_irqrestore(&target_loads_lock, flags);
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800159 return ret;
160}
161
162/*
163 * If increasing frequencies never map to a lower target load then
164 * choose_freq() will find the minimum frequency that does not exceed its
165 * target load given the current load.
166 */
167
168static unsigned int choose_freq(
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800169 struct cpufreq_interactive_cpuinfo *pcpu, unsigned int loadadjfreq)
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800170{
171 unsigned int freq = pcpu->policy->cur;
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800172 unsigned int prevfreq, freqmin, freqmax;
173 unsigned int tl;
174 int index;
175
176 freqmin = 0;
177 freqmax = UINT_MAX;
178
179 do {
180 prevfreq = freq;
181 tl = freq_to_targetload(freq);
182
183 /*
184 * Find the lowest frequency where the computed load is less
185 * than or equal to the target load.
186 */
187
188 cpufreq_frequency_table_target(
189 pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
190 CPUFREQ_RELATION_L, &index);
191 freq = pcpu->freq_table[index].frequency;
192
193 if (freq > prevfreq) {
194 /* The previous frequency is too low. */
195 freqmin = prevfreq;
196
197 if (freq >= freqmax) {
198 /*
199 * Find the highest frequency that is less
200 * than freqmax.
201 */
202 cpufreq_frequency_table_target(
203 pcpu->policy, pcpu->freq_table,
204 freqmax - 1, CPUFREQ_RELATION_H,
205 &index);
206 freq = pcpu->freq_table[index].frequency;
207
208 if (freq == freqmin) {
209 /*
210 * The first frequency below freqmax
211 * has already been found to be too
212 * low. freqmax is the lowest speed
213 * we found that is fast enough.
214 */
215 freq = freqmax;
216 break;
217 }
218 }
219 } else if (freq < prevfreq) {
220 /* The previous frequency is high enough. */
221 freqmax = prevfreq;
222
223 if (freq <= freqmin) {
224 /*
225 * Find the lowest frequency that is higher
226 * than freqmin.
227 */
228 cpufreq_frequency_table_target(
229 pcpu->policy, pcpu->freq_table,
230 freqmin + 1, CPUFREQ_RELATION_L,
231 &index);
232 freq = pcpu->freq_table[index].frequency;
233
234 /*
235 * If freqmax is the first frequency above
236 * freqmin then we have already found that
237 * this speed is fast enough.
238 */
239 if (freq == freqmax)
240 break;
241 }
242 }
243
244 /* If same frequency chosen as previous then done. */
245 } while (freq != prevfreq);
246
247 return freq;
248}
249
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800250static u64 update_load(int cpu)
251{
252 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
253 u64 now;
254 u64 now_idle;
255 unsigned int delta_idle;
256 unsigned int delta_time;
257 u64 active_time;
258
259 now_idle = get_cpu_idle_time_us(cpu, &now);
260 delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
261 delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
262 active_time = delta_time - delta_idle;
263 pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
264
265 pcpu->time_in_idle = now_idle;
266 pcpu->time_in_idle_timestamp = now;
267 return now;
268}
269
Mike Chan9d49b702010-06-22 11:26:45 -0700270static void cpufreq_interactive_timer(unsigned long data)
271{
Todd Poynor7aa95c82012-11-05 13:09:03 -0800272 u64 now;
Mike Chan9d49b702010-06-22 11:26:45 -0700273 unsigned int delta_time;
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800274 u64 cputime_speedadj;
Mike Chan9d49b702010-06-22 11:26:45 -0700275 int cpu_load;
Mike Chan9d49b702010-06-22 11:26:45 -0700276 struct cpufreq_interactive_cpuinfo *pcpu =
277 &per_cpu(cpuinfo, data);
Mike Chan9d49b702010-06-22 11:26:45 -0700278 unsigned int new_freq;
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800279 unsigned int loadadjfreq;
Mike Chan9d49b702010-06-22 11:26:45 -0700280 unsigned int index;
281 unsigned long flags;
Todd Poynorf437e182012-12-14 17:31:19 -0800282 bool boosted;
Mike Chan9d49b702010-06-22 11:26:45 -0700283
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800284 if (!down_read_trylock(&pcpu->enable_sem))
285 return;
Mike Chan9d49b702010-06-22 11:26:45 -0700286 if (!pcpu->governor_enabled)
287 goto exit;
288
Todd Poynor726f6792013-01-02 13:14:00 -0800289 spin_lock_irqsave(&pcpu->load_lock, flags);
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800290 now = update_load(data);
291 delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
292 cputime_speedadj = pcpu->cputime_speedadj;
Todd Poynor726f6792013-01-02 13:14:00 -0800293 spin_unlock_irqrestore(&pcpu->load_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700294
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800295 if (WARN_ON_ONCE(!delta_time))
Mike Chan9d49b702010-06-22 11:26:45 -0700296 goto rearm;
297
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800298 do_div(cputime_speedadj, delta_time);
299 loadadjfreq = (unsigned int)cputime_speedadj * 100;
300 cpu_load = loadadjfreq / pcpu->target_freq;
Todd Poynorf437e182012-12-14 17:31:19 -0800301 boosted = boost_val || now < boostpulse_endtime;
Mike Chan9d49b702010-06-22 11:26:45 -0700302
Todd Poynor16dfc9d2012-12-19 16:06:48 -0800303 if (cpu_load >= go_hispeed_load || boosted) {
304 if (pcpu->target_freq < hispeed_freq) {
305 new_freq = hispeed_freq;
306 } else {
307 new_freq = choose_freq(pcpu, loadadjfreq);
308
309 if (new_freq < hispeed_freq)
310 new_freq = hispeed_freq;
311 }
312 } else {
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800313 new_freq = choose_freq(pcpu, loadadjfreq);
Todd Poynor16dfc9d2012-12-19 16:06:48 -0800314 }
Todd Poynorf96f2c82012-11-08 15:06:55 -0800315
316 if (pcpu->target_freq >= hispeed_freq &&
317 new_freq > pcpu->target_freq &&
318 now - pcpu->hispeed_validate_time < above_hispeed_delay_val) {
319 trace_cpufreq_interactive_notyet(
320 data, cpu_load, pcpu->target_freq,
321 pcpu->policy->cur, new_freq);
322 goto rearm;
Mike Chan9d49b702010-06-22 11:26:45 -0700323 }
324
Todd Poynorf96f2c82012-11-08 15:06:55 -0800325 pcpu->hispeed_validate_time = now;
Todd Poynor5a5aa702012-05-10 23:28:06 -0700326
Mike Chan9d49b702010-06-22 11:26:45 -0700327 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
Todd Poynorbc51d672012-11-28 17:58:17 -0800328 new_freq, CPUFREQ_RELATION_L,
Mike Chan9d49b702010-06-22 11:26:45 -0700329 &index)) {
330 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
331 (int) data);
332 goto rearm;
333 }
334
335 new_freq = pcpu->freq_table[index].frequency;
336
Mike Chan9d49b702010-06-22 11:26:45 -0700337 /*
Todd Poynoraad27322012-04-26 21:41:40 -0700338 * Do not scale below floor_freq unless we have been at or above the
339 * floor frequency for the minimum sample time since last validated.
Mike Chan9d49b702010-06-22 11:26:45 -0700340 */
Todd Poynoraad27322012-04-26 21:41:40 -0700341 if (new_freq < pcpu->floor_freq) {
Todd Poynor7aa95c82012-11-05 13:09:03 -0800342 if (now - pcpu->floor_validate_time < min_sample_time) {
Todd Poynor46660b02012-11-28 17:56:09 -0800343 trace_cpufreq_interactive_notyet(
344 data, cpu_load, pcpu->target_freq,
345 pcpu->policy->cur, new_freq);
Mike Chan9d49b702010-06-22 11:26:45 -0700346 goto rearm;
Todd Poynora1e19512012-02-16 16:27:59 -0800347 }
Mike Chan9d49b702010-06-22 11:26:45 -0700348 }
349
Todd Poynorf437e182012-12-14 17:31:19 -0800350 /*
351 * Update the timestamp for checking whether speed has been held at
352 * or above the selected frequency for a minimum of min_sample_time,
353 * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
354 * allow the speed to drop as soon as the boostpulse duration expires
355 * (or the indefinite boost is turned off).
356 */
357
358 if (!boosted || new_freq > hispeed_freq) {
359 pcpu->floor_freq = new_freq;
360 pcpu->floor_validate_time = now;
361 }
Todd Poynor0a92d482012-04-06 19:59:36 -0700362
363 if (pcpu->target_freq == new_freq) {
Todd Poynor46660b02012-11-28 17:56:09 -0800364 trace_cpufreq_interactive_already(
365 data, cpu_load, pcpu->target_freq,
366 pcpu->policy->cur, new_freq);
Todd Poynor0a92d482012-04-06 19:59:36 -0700367 goto rearm_if_notmax;
368 }
369
Todd Poynora1e19512012-02-16 16:27:59 -0800370 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
Todd Poynor46660b02012-11-28 17:56:09 -0800371 pcpu->policy->cur, new_freq);
Todd Poynora1e19512012-02-16 16:27:59 -0800372
Todd Poynor8a37bb72012-07-16 17:07:15 -0700373 pcpu->target_freq = new_freq;
374 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
375 cpumask_set_cpu(data, &speedchange_cpumask);
376 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
377 wake_up_process(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -0700378
379rearm_if_notmax:
380 /*
381 * Already set max speed and don't see a need to change that,
382 * wait until next idle to re-evaluate, don't need timer.
383 */
384 if (pcpu->target_freq == pcpu->policy->max)
385 goto exit;
386
387rearm:
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800388 if (!timer_pending(&pcpu->cpu_timer))
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700389 cpufreq_interactive_timer_resched(pcpu);
Mike Chan9d49b702010-06-22 11:26:45 -0700390
391exit:
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800392 up_read(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700393 return;
394}
395
396static void cpufreq_interactive_idle_start(void)
397{
398 struct cpufreq_interactive_cpuinfo *pcpu =
399 &per_cpu(cpuinfo, smp_processor_id());
400 int pending;
401
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800402 if (!down_read_trylock(&pcpu->enable_sem))
Mike Chan9d49b702010-06-22 11:26:45 -0700403 return;
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800404 if (!pcpu->governor_enabled) {
405 up_read(&pcpu->enable_sem);
406 return;
407 }
Mike Chan9d49b702010-06-22 11:26:45 -0700408
Mike Chan9d49b702010-06-22 11:26:45 -0700409 pending = timer_pending(&pcpu->cpu_timer);
410
411 if (pcpu->target_freq != pcpu->policy->min) {
Mike Chan9d49b702010-06-22 11:26:45 -0700412 /*
413 * Entering idle while not at lowest speed. On some
414 * platforms this can hold the other CPU(s) at that speed
415 * even though the CPU is idle. Set a timer to re-evaluate
416 * speed so this idle CPU doesn't hold the other CPUs above
417 * min indefinitely. This should probably be a quirk of
418 * the CPUFreq driver.
419 */
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800420 if (!pending)
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700421 cpufreq_interactive_timer_resched(pcpu);
Mike Chan9d49b702010-06-22 11:26:45 -0700422 }
423
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800424 up_read(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700425}
426
427static void cpufreq_interactive_idle_end(void)
428{
429 struct cpufreq_interactive_cpuinfo *pcpu =
430 &per_cpu(cpuinfo, smp_processor_id());
431
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800432 if (!down_read_trylock(&pcpu->enable_sem))
Sam Lefflerae0d23f2012-06-27 10:12:04 -0700433 return;
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800434 if (!pcpu->governor_enabled) {
435 up_read(&pcpu->enable_sem);
436 return;
437 }
Sam Lefflerae0d23f2012-06-27 10:12:04 -0700438
Todd Poynor7aa95c82012-11-05 13:09:03 -0800439 /* Arm the timer for 1-2 ticks later if not already. */
440 if (!timer_pending(&pcpu->cpu_timer)) {
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700441 cpufreq_interactive_timer_resched(pcpu);
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800442 } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700443 del_timer(&pcpu->cpu_timer);
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800444 del_timer(&pcpu->cpu_slack_timer);
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700445 cpufreq_interactive_timer(smp_processor_id());
Mike Chan9d49b702010-06-22 11:26:45 -0700446 }
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800447
448 up_read(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700449}
450
Todd Poynor8a37bb72012-07-16 17:07:15 -0700451static int cpufreq_interactive_speedchange_task(void *data)
Mike Chan9d49b702010-06-22 11:26:45 -0700452{
453 unsigned int cpu;
454 cpumask_t tmp_mask;
455 unsigned long flags;
456 struct cpufreq_interactive_cpuinfo *pcpu;
457
458 while (1) {
459 set_current_state(TASK_INTERRUPTIBLE);
Todd Poynor8a37bb72012-07-16 17:07:15 -0700460 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700461
Todd Poynor8a37bb72012-07-16 17:07:15 -0700462 if (cpumask_empty(&speedchange_cpumask)) {
463 spin_unlock_irqrestore(&speedchange_cpumask_lock,
464 flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700465 schedule();
466
467 if (kthread_should_stop())
468 break;
469
Todd Poynor8a37bb72012-07-16 17:07:15 -0700470 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700471 }
472
473 set_current_state(TASK_RUNNING);
Todd Poynor8a37bb72012-07-16 17:07:15 -0700474 tmp_mask = speedchange_cpumask;
475 cpumask_clear(&speedchange_cpumask);
476 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700477
478 for_each_cpu(cpu, &tmp_mask) {
479 unsigned int j;
480 unsigned int max_freq = 0;
481
482 pcpu = &per_cpu(cpuinfo, cpu);
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800483 if (!down_read_trylock(&pcpu->enable_sem))
Mike Chan9d49b702010-06-22 11:26:45 -0700484 continue;
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800485 if (!pcpu->governor_enabled) {
486 up_read(&pcpu->enable_sem);
487 continue;
488 }
Mike Chan9d49b702010-06-22 11:26:45 -0700489
Mike Chan9d49b702010-06-22 11:26:45 -0700490 for_each_cpu(j, pcpu->policy->cpus) {
491 struct cpufreq_interactive_cpuinfo *pjcpu =
492 &per_cpu(cpuinfo, j);
493
494 if (pjcpu->target_freq > max_freq)
495 max_freq = pjcpu->target_freq;
496 }
497
498 if (max_freq != pcpu->policy->cur)
499 __cpufreq_driver_target(pcpu->policy,
500 max_freq,
501 CPUFREQ_RELATION_H);
Todd Poynor8a37bb72012-07-16 17:07:15 -0700502 trace_cpufreq_interactive_setspeed(cpu,
503 pcpu->target_freq,
Todd Poynora1e19512012-02-16 16:27:59 -0800504 pcpu->policy->cur);
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800505
506 up_read(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700507 }
508 }
509
510 return 0;
511}
512
Todd Poynor7820a652012-04-02 17:17:14 -0700513static void cpufreq_interactive_boost(void)
514{
515 int i;
516 int anyboost = 0;
517 unsigned long flags;
518 struct cpufreq_interactive_cpuinfo *pcpu;
519
Todd Poynor8a37bb72012-07-16 17:07:15 -0700520 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Todd Poynor7820a652012-04-02 17:17:14 -0700521
522 for_each_online_cpu(i) {
523 pcpu = &per_cpu(cpuinfo, i);
524
525 if (pcpu->target_freq < hispeed_freq) {
526 pcpu->target_freq = hispeed_freq;
Todd Poynor8a37bb72012-07-16 17:07:15 -0700527 cpumask_set_cpu(i, &speedchange_cpumask);
Todd Poynor3c081182012-12-07 20:08:45 -0800528 pcpu->hispeed_validate_time =
529 ktime_to_us(ktime_get());
Todd Poynor7820a652012-04-02 17:17:14 -0700530 anyboost = 1;
531 }
532
533 /*
Todd Poynoraad27322012-04-26 21:41:40 -0700534 * Set floor freq and (re)start timer for when last
535 * validated.
Todd Poynor7820a652012-04-02 17:17:14 -0700536 */
537
Todd Poynoraad27322012-04-26 21:41:40 -0700538 pcpu->floor_freq = hispeed_freq;
539 pcpu->floor_validate_time = ktime_to_us(ktime_get());
Todd Poynor7820a652012-04-02 17:17:14 -0700540 }
541
Todd Poynor8a37bb72012-07-16 17:07:15 -0700542 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Todd Poynor7820a652012-04-02 17:17:14 -0700543
544 if (anyboost)
Todd Poynor8a37bb72012-07-16 17:07:15 -0700545 wake_up_process(speedchange_task);
Todd Poynor7820a652012-04-02 17:17:14 -0700546}
547
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800548static int cpufreq_interactive_notifier(
549 struct notifier_block *nb, unsigned long val, void *data)
550{
551 struct cpufreq_freqs *freq = data;
552 struct cpufreq_interactive_cpuinfo *pcpu;
553 int cpu;
Todd Poynor726f6792013-01-02 13:14:00 -0800554 unsigned long flags;
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800555
556 if (val == CPUFREQ_POSTCHANGE) {
557 pcpu = &per_cpu(cpuinfo, freq->cpu);
Todd Poynora9dac7d2012-12-23 12:28:49 -0800558 if (!down_read_trylock(&pcpu->enable_sem))
559 return 0;
560 if (!pcpu->governor_enabled) {
561 up_read(&pcpu->enable_sem);
562 return 0;
563 }
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800564
565 for_each_cpu(cpu, pcpu->policy->cpus) {
566 struct cpufreq_interactive_cpuinfo *pjcpu =
567 &per_cpu(cpuinfo, cpu);
Todd Poynor726f6792013-01-02 13:14:00 -0800568 spin_lock_irqsave(&pjcpu->load_lock, flags);
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800569 update_load(cpu);
Todd Poynor726f6792013-01-02 13:14:00 -0800570 spin_unlock_irqrestore(&pjcpu->load_lock, flags);
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800571 }
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800572
Todd Poynora9dac7d2012-12-23 12:28:49 -0800573 up_read(&pcpu->enable_sem);
574 }
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800575 return 0;
576}
577
578static struct notifier_block cpufreq_notifier_block = {
579 .notifier_call = cpufreq_interactive_notifier,
580};
581
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800582static ssize_t show_target_loads(
Todd Poynorbc51d672012-11-28 17:58:17 -0800583 struct kobject *kobj, struct attribute *attr, char *buf)
584{
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800585 int i;
586 ssize_t ret = 0;
Todd Poynor726f6792013-01-02 13:14:00 -0800587 unsigned long flags;
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800588
Todd Poynor726f6792013-01-02 13:14:00 -0800589 spin_lock_irqsave(&target_loads_lock, flags);
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800590
591 for (i = 0; i < ntarget_loads; i++)
592 ret += sprintf(buf + ret, "%u%s", target_loads[i],
593 i & 0x1 ? ":" : " ");
594
595 ret += sprintf(buf + ret, "\n");
Todd Poynor726f6792013-01-02 13:14:00 -0800596 spin_unlock_irqrestore(&target_loads_lock, flags);
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800597 return ret;
Todd Poynorbc51d672012-11-28 17:58:17 -0800598}
599
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800600static ssize_t store_target_loads(
Todd Poynorbc51d672012-11-28 17:58:17 -0800601 struct kobject *kobj, struct attribute *attr, const char *buf,
602 size_t count)
603{
604 int ret;
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800605 const char *cp;
606 unsigned int *new_target_loads = NULL;
607 int ntokens = 1;
608 int i;
Todd Poynor726f6792013-01-02 13:14:00 -0800609 unsigned long flags;
Todd Poynorbc51d672012-11-28 17:58:17 -0800610
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800611 cp = buf;
612 while ((cp = strpbrk(cp + 1, " :")))
613 ntokens++;
614
615 if (!(ntokens & 0x1))
616 goto err_inval;
617
618 new_target_loads = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
619 if (!new_target_loads) {
620 ret = -ENOMEM;
621 goto err;
622 }
623
624 cp = buf;
625 i = 0;
626 while (i < ntokens) {
627 if (sscanf(cp, "%u", &new_target_loads[i++]) != 1)
628 goto err_inval;
629
630 cp = strpbrk(cp, " :");
631 if (!cp)
632 break;
633 cp++;
634 }
635
636 if (i != ntokens)
637 goto err_inval;
638
Todd Poynor726f6792013-01-02 13:14:00 -0800639 spin_lock_irqsave(&target_loads_lock, flags);
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800640 if (target_loads != default_target_loads)
641 kfree(target_loads);
642 target_loads = new_target_loads;
643 ntarget_loads = ntokens;
Todd Poynor726f6792013-01-02 13:14:00 -0800644 spin_unlock_irqrestore(&target_loads_lock, flags);
Todd Poynorbc51d672012-11-28 17:58:17 -0800645 return count;
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800646
647err_inval:
648 ret = -EINVAL;
649err:
650 kfree(new_target_loads);
651 return ret;
Todd Poynorbc51d672012-11-28 17:58:17 -0800652}
653
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800654static struct global_attr target_loads_attr =
655 __ATTR(target_loads, S_IRUGO | S_IWUSR,
656 show_target_loads, store_target_loads);
Todd Poynorbc51d672012-11-28 17:58:17 -0800657
Mike Chan9d49b702010-06-22 11:26:45 -0700658static ssize_t show_hispeed_freq(struct kobject *kobj,
659 struct attribute *attr, char *buf)
660{
Todd Poynoracfaec92012-10-03 00:39:56 -0700661 return sprintf(buf, "%u\n", hispeed_freq);
Mike Chan9d49b702010-06-22 11:26:45 -0700662}
663
664static ssize_t store_hispeed_freq(struct kobject *kobj,
665 struct attribute *attr, const char *buf,
666 size_t count)
667{
668 int ret;
Todd Poynoracfaec92012-10-03 00:39:56 -0700669 long unsigned int val;
Mike Chan9d49b702010-06-22 11:26:45 -0700670
Todd Poynoracfaec92012-10-03 00:39:56 -0700671 ret = strict_strtoul(buf, 0, &val);
Mike Chan9d49b702010-06-22 11:26:45 -0700672 if (ret < 0)
673 return ret;
674 hispeed_freq = val;
675 return count;
676}
677
678static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
679 show_hispeed_freq, store_hispeed_freq);
680
681
682static ssize_t show_go_hispeed_load(struct kobject *kobj,
683 struct attribute *attr, char *buf)
684{
685 return sprintf(buf, "%lu\n", go_hispeed_load);
686}
687
688static ssize_t store_go_hispeed_load(struct kobject *kobj,
689 struct attribute *attr, const char *buf, size_t count)
690{
691 int ret;
692 unsigned long val;
693
694 ret = strict_strtoul(buf, 0, &val);
695 if (ret < 0)
696 return ret;
697 go_hispeed_load = val;
698 return count;
699}
700
701static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
702 show_go_hispeed_load, store_go_hispeed_load);
703
704static ssize_t show_min_sample_time(struct kobject *kobj,
705 struct attribute *attr, char *buf)
706{
707 return sprintf(buf, "%lu\n", min_sample_time);
708}
709
710static ssize_t store_min_sample_time(struct kobject *kobj,
711 struct attribute *attr, const char *buf, size_t count)
712{
713 int ret;
714 unsigned long val;
715
716 ret = strict_strtoul(buf, 0, &val);
717 if (ret < 0)
718 return ret;
719 min_sample_time = val;
720 return count;
721}
722
723static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
724 show_min_sample_time, store_min_sample_time);
725
Todd Poynor596cf1f2012-04-13 20:18:02 -0700726static ssize_t show_above_hispeed_delay(struct kobject *kobj,
727 struct attribute *attr, char *buf)
728{
729 return sprintf(buf, "%lu\n", above_hispeed_delay_val);
730}
731
732static ssize_t store_above_hispeed_delay(struct kobject *kobj,
733 struct attribute *attr,
734 const char *buf, size_t count)
735{
736 int ret;
737 unsigned long val;
738
739 ret = strict_strtoul(buf, 0, &val);
740 if (ret < 0)
741 return ret;
742 above_hispeed_delay_val = val;
743 return count;
744}
745
746define_one_global_rw(above_hispeed_delay);
747
Mike Chan9d49b702010-06-22 11:26:45 -0700748static ssize_t show_timer_rate(struct kobject *kobj,
749 struct attribute *attr, char *buf)
750{
751 return sprintf(buf, "%lu\n", timer_rate);
752}
753
754static ssize_t store_timer_rate(struct kobject *kobj,
755 struct attribute *attr, const char *buf, size_t count)
756{
757 int ret;
758 unsigned long val;
759
760 ret = strict_strtoul(buf, 0, &val);
761 if (ret < 0)
762 return ret;
763 timer_rate = val;
764 return count;
765}
766
767static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
768 show_timer_rate, store_timer_rate);
769
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800770static ssize_t show_timer_slack(
771 struct kobject *kobj, struct attribute *attr, char *buf)
772{
773 return sprintf(buf, "%d\n", timer_slack_val);
774}
775
776static ssize_t store_timer_slack(
777 struct kobject *kobj, struct attribute *attr, const char *buf,
778 size_t count)
779{
780 int ret;
781 unsigned long val;
782
783 ret = kstrtol(buf, 10, &val);
784 if (ret < 0)
785 return ret;
786
787 timer_slack_val = val;
788 return count;
789}
790
791define_one_global_rw(timer_slack);
792
Todd Poynor9fb15312012-04-23 20:42:41 -0700793static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
794 char *buf)
795{
796 return sprintf(buf, "%d\n", boost_val);
797}
798
799static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
800 const char *buf, size_t count)
801{
802 int ret;
803 unsigned long val;
804
805 ret = kstrtoul(buf, 0, &val);
806 if (ret < 0)
807 return ret;
808
809 boost_val = val;
810
Todd Poynor2e739a02012-05-03 00:16:55 -0700811 if (boost_val) {
812 trace_cpufreq_interactive_boost("on");
Todd Poynor9fb15312012-04-23 20:42:41 -0700813 cpufreq_interactive_boost();
Todd Poynor2e739a02012-05-03 00:16:55 -0700814 } else {
815 trace_cpufreq_interactive_unboost("off");
816 }
Todd Poynor9fb15312012-04-23 20:42:41 -0700817
818 return count;
819}
820
821define_one_global_rw(boost);
822
Todd Poynor2e739a02012-05-03 00:16:55 -0700823static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
824 const char *buf, size_t count)
825{
826 int ret;
827 unsigned long val;
828
829 ret = kstrtoul(buf, 0, &val);
830 if (ret < 0)
831 return ret;
832
Todd Poynorf437e182012-12-14 17:31:19 -0800833 boostpulse_endtime = ktime_to_us(ktime_get()) + boostpulse_duration_val;
Todd Poynor2e739a02012-05-03 00:16:55 -0700834 trace_cpufreq_interactive_boost("pulse");
835 cpufreq_interactive_boost();
836 return count;
837}
838
839static struct global_attr boostpulse =
840 __ATTR(boostpulse, 0200, NULL, store_boostpulse);
841
Todd Poynorf437e182012-12-14 17:31:19 -0800842static ssize_t show_boostpulse_duration(
843 struct kobject *kobj, struct attribute *attr, char *buf)
844{
845 return sprintf(buf, "%d\n", boostpulse_duration_val);
846}
847
848static ssize_t store_boostpulse_duration(
849 struct kobject *kobj, struct attribute *attr, const char *buf,
850 size_t count)
851{
852 int ret;
853 unsigned long val;
854
855 ret = kstrtoul(buf, 0, &val);
856 if (ret < 0)
857 return ret;
858
859 boostpulse_duration_val = val;
860 return count;
861}
862
863define_one_global_rw(boostpulse_duration);
864
Mike Chan9d49b702010-06-22 11:26:45 -0700865static struct attribute *interactive_attributes[] = {
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800866 &target_loads_attr.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700867 &hispeed_freq_attr.attr,
868 &go_hispeed_load_attr.attr,
Todd Poynor596cf1f2012-04-13 20:18:02 -0700869 &above_hispeed_delay.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700870 &min_sample_time_attr.attr,
871 &timer_rate_attr.attr,
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800872 &timer_slack.attr,
Todd Poynor9fb15312012-04-23 20:42:41 -0700873 &boost.attr,
Todd Poynor2e739a02012-05-03 00:16:55 -0700874 &boostpulse.attr,
Todd Poynorf437e182012-12-14 17:31:19 -0800875 &boostpulse_duration.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700876 NULL,
877};
878
879static struct attribute_group interactive_attr_group = {
880 .attrs = interactive_attributes,
881 .name = "interactive",
882};
883
Sam Lefflerae0d23f2012-06-27 10:12:04 -0700884static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
885 unsigned long val,
886 void *data)
887{
888 switch (val) {
889 case IDLE_START:
890 cpufreq_interactive_idle_start();
891 break;
892 case IDLE_END:
893 cpufreq_interactive_idle_end();
894 break;
895 }
896
897 return 0;
898}
899
900static struct notifier_block cpufreq_interactive_idle_nb = {
901 .notifier_call = cpufreq_interactive_idle_notifier,
902};
903
Mike Chan9d49b702010-06-22 11:26:45 -0700904static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
905 unsigned int event)
906{
907 int rc;
908 unsigned int j;
909 struct cpufreq_interactive_cpuinfo *pcpu;
910 struct cpufreq_frequency_table *freq_table;
911
912 switch (event) {
913 case CPUFREQ_GOV_START:
914 if (!cpu_online(policy->cpu))
915 return -EINVAL;
916
Lianwei Wang24613b92013-01-07 14:15:51 +0800917 mutex_lock(&gov_lock);
918
Mike Chan9d49b702010-06-22 11:26:45 -0700919 freq_table =
920 cpufreq_frequency_get_table(policy->cpu);
Todd Poynor7aa95c82012-11-05 13:09:03 -0800921 if (!hispeed_freq)
922 hispeed_freq = policy->max;
Mike Chan9d49b702010-06-22 11:26:45 -0700923
924 for_each_cpu(j, policy->cpus) {
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800925 unsigned long expires;
926
Mike Chan9d49b702010-06-22 11:26:45 -0700927 pcpu = &per_cpu(cpuinfo, j);
928 pcpu->policy = policy;
929 pcpu->target_freq = policy->cur;
930 pcpu->freq_table = freq_table;
Todd Poynoraad27322012-04-26 21:41:40 -0700931 pcpu->floor_freq = pcpu->target_freq;
932 pcpu->floor_validate_time =
Todd Poynor3c081182012-12-07 20:08:45 -0800933 ktime_to_us(ktime_get());
Todd Poynor5a5aa702012-05-10 23:28:06 -0700934 pcpu->hispeed_validate_time =
Todd Poynor3c081182012-12-07 20:08:45 -0800935 pcpu->floor_validate_time;
Todd Poynor7fd53c92012-12-20 15:51:00 -0800936 down_write(&pcpu->enable_sem);
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800937 expires = jiffies + usecs_to_jiffies(timer_rate);
938 pcpu->cpu_timer.expires = expires;
Todd Poynor7aa95c82012-11-05 13:09:03 -0800939 add_timer_on(&pcpu->cpu_timer, j);
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800940 if (timer_slack_val >= 0) {
941 expires += usecs_to_jiffies(timer_slack_val);
942 pcpu->cpu_slack_timer.expires = expires;
943 add_timer_on(&pcpu->cpu_slack_timer, j);
944 }
Todd Poynor7fd53c92012-12-20 15:51:00 -0800945 pcpu->governor_enabled = 1;
946 up_write(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700947 }
948
Mike Chan9d49b702010-06-22 11:26:45 -0700949 /*
950 * Do not register the idle hook and create sysfs
951 * entries if we have already done so.
952 */
Lianwei Wang24613b92013-01-07 14:15:51 +0800953 if (++active_count > 1) {
954 mutex_unlock(&gov_lock);
Mike Chan9d49b702010-06-22 11:26:45 -0700955 return 0;
Lianwei Wang24613b92013-01-07 14:15:51 +0800956 }
Mike Chan9d49b702010-06-22 11:26:45 -0700957
958 rc = sysfs_create_group(cpufreq_global_kobject,
959 &interactive_attr_group);
Lianwei Wang24613b92013-01-07 14:15:51 +0800960 if (rc) {
961 mutex_unlock(&gov_lock);
Mike Chan9d49b702010-06-22 11:26:45 -0700962 return rc;
Lianwei Wang24613b92013-01-07 14:15:51 +0800963 }
Mike Chan9d49b702010-06-22 11:26:45 -0700964
Sam Lefflerae0d23f2012-06-27 10:12:04 -0700965 idle_notifier_register(&cpufreq_interactive_idle_nb);
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800966 cpufreq_register_notifier(
967 &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
Lianwei Wang24613b92013-01-07 14:15:51 +0800968 mutex_unlock(&gov_lock);
Mike Chan9d49b702010-06-22 11:26:45 -0700969 break;
970
971 case CPUFREQ_GOV_STOP:
Lianwei Wang24613b92013-01-07 14:15:51 +0800972 mutex_lock(&gov_lock);
Mike Chan9d49b702010-06-22 11:26:45 -0700973 for_each_cpu(j, policy->cpus) {
974 pcpu = &per_cpu(cpuinfo, j);
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800975 down_write(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700976 pcpu->governor_enabled = 0;
Mike Chan9d49b702010-06-22 11:26:45 -0700977 del_timer_sync(&pcpu->cpu_timer);
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800978 del_timer_sync(&pcpu->cpu_slack_timer);
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800979 up_write(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700980 }
981
Lianwei Wang24613b92013-01-07 14:15:51 +0800982 if (--active_count > 0) {
983 mutex_unlock(&gov_lock);
Mike Chan9d49b702010-06-22 11:26:45 -0700984 return 0;
Lianwei Wang24613b92013-01-07 14:15:51 +0800985 }
Mike Chan9d49b702010-06-22 11:26:45 -0700986
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800987 cpufreq_unregister_notifier(
988 &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
Sam Lefflerae0d23f2012-06-27 10:12:04 -0700989 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
Mike Chan9d49b702010-06-22 11:26:45 -0700990 sysfs_remove_group(cpufreq_global_kobject,
991 &interactive_attr_group);
Lianwei Wang24613b92013-01-07 14:15:51 +0800992 mutex_unlock(&gov_lock);
Mike Chan9d49b702010-06-22 11:26:45 -0700993
994 break;
995
996 case CPUFREQ_GOV_LIMITS:
997 if (policy->max < policy->cur)
998 __cpufreq_driver_target(policy,
999 policy->max, CPUFREQ_RELATION_H);
1000 else if (policy->min > policy->cur)
1001 __cpufreq_driver_target(policy,
1002 policy->min, CPUFREQ_RELATION_L);
1003 break;
1004 }
1005 return 0;
1006}
1007
Todd Poynorcba9f3e2012-12-18 17:50:10 -08001008static void cpufreq_interactive_nop_timer(unsigned long data)
1009{
1010}
1011
Mike Chan9d49b702010-06-22 11:26:45 -07001012static int __init cpufreq_interactive_init(void)
1013{
1014 unsigned int i;
1015 struct cpufreq_interactive_cpuinfo *pcpu;
1016 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1017
Mike Chan9d49b702010-06-22 11:26:45 -07001018 /* Initalize per-cpu timers */
1019 for_each_possible_cpu(i) {
1020 pcpu = &per_cpu(cpuinfo, i);
Todd Poynorcba9f3e2012-12-18 17:50:10 -08001021 init_timer_deferrable(&pcpu->cpu_timer);
Mike Chan9d49b702010-06-22 11:26:45 -07001022 pcpu->cpu_timer.function = cpufreq_interactive_timer;
1023 pcpu->cpu_timer.data = i;
Todd Poynorcba9f3e2012-12-18 17:50:10 -08001024 init_timer(&pcpu->cpu_slack_timer);
1025 pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
Todd Poynor5a75e9d2012-12-11 16:05:03 -08001026 spin_lock_init(&pcpu->load_lock);
Todd Poynor5b63a2e2012-12-18 17:50:44 -08001027 init_rwsem(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -07001028 }
1029
Todd Poynor2fbf5e12012-11-14 11:41:21 -08001030 spin_lock_init(&target_loads_lock);
Todd Poynor8a37bb72012-07-16 17:07:15 -07001031 spin_lock_init(&speedchange_cpumask_lock);
Lianwei Wang24613b92013-01-07 14:15:51 +08001032 mutex_init(&gov_lock);
Todd Poynor8a37bb72012-07-16 17:07:15 -07001033 speedchange_task =
1034 kthread_create(cpufreq_interactive_speedchange_task, NULL,
1035 "cfinteractive");
1036 if (IS_ERR(speedchange_task))
1037 return PTR_ERR(speedchange_task);
Sam Leffler9f1dcd62012-06-27 12:55:56 -07001038
Todd Poynor8a37bb72012-07-16 17:07:15 -07001039 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
1040 get_task_struct(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -07001041
Sam Leffler9f1dcd62012-06-27 12:55:56 -07001042 /* NB: wake up so the thread does not look hung to the freezer */
Todd Poynor8a37bb72012-07-16 17:07:15 -07001043 wake_up_process(speedchange_task);
Sam Leffler9f1dcd62012-06-27 12:55:56 -07001044
Mike Chan9d49b702010-06-22 11:26:45 -07001045 return cpufreq_register_governor(&cpufreq_gov_interactive);
Mike Chan9d49b702010-06-22 11:26:45 -07001046}
1047
1048#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1049fs_initcall(cpufreq_interactive_init);
1050#else
1051module_init(cpufreq_interactive_init);
1052#endif
1053
1054static void __exit cpufreq_interactive_exit(void)
1055{
1056 cpufreq_unregister_governor(&cpufreq_gov_interactive);
Todd Poynor8a37bb72012-07-16 17:07:15 -07001057 kthread_stop(speedchange_task);
1058 put_task_struct(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -07001059}
1060
1061module_exit(cpufreq_interactive_exit);
1062
1063MODULE_AUTHOR("Mike Chan <mike@android.com>");
1064MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1065 "Latency sensitive workloads");
1066MODULE_LICENSE("GPL");