blob: c70ebf534151da33c06186cbe22dce0f5decdc30 [file] [log] [blame]
Mike Chan9d49b702010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/module.h>
Lianwei Wangba6c6bb2012-11-01 09:59:52 +080023#include <linux/moduleparam.h>
Todd Poynor5b63a2e2012-12-18 17:50:44 -080024#include <linux/rwsem.h>
Mike Chan9d49b702010-06-22 11:26:45 -070025#include <linux/sched.h>
26#include <linux/tick.h>
27#include <linux/time.h>
28#include <linux/timer.h>
29#include <linux/workqueue.h>
30#include <linux/kthread.h>
Todd Poynor7820a652012-04-02 17:17:14 -070031#include <linux/slab.h>
Todd Poynor9fb15312012-04-23 20:42:41 -070032#include <asm/cputime.h>
Mike Chan9d49b702010-06-22 11:26:45 -070033
Todd Poynora1e19512012-02-16 16:27:59 -080034#define CREATE_TRACE_POINTS
35#include <trace/events/cpufreq_interactive.h>
36
Mike Chan9d49b702010-06-22 11:26:45 -070037static atomic_t active_count = ATOMIC_INIT(0);
38
39struct cpufreq_interactive_cpuinfo {
40 struct timer_list cpu_timer;
Todd Poynorcba9f3e2012-12-18 17:50:10 -080041 struct timer_list cpu_slack_timer;
Todd Poynor5a75e9d2012-12-11 16:05:03 -080042 spinlock_t load_lock; /* protects the next 4 fields */
Mike Chan9d49b702010-06-22 11:26:45 -070043 u64 time_in_idle;
Todd Poynor22b5c3a2012-10-08 20:14:34 -070044 u64 time_in_idle_timestamp;
Todd Poynor5a75e9d2012-12-11 16:05:03 -080045 u64 cputime_speedadj;
46 u64 cputime_speedadj_timestamp;
Mike Chan9d49b702010-06-22 11:26:45 -070047 struct cpufreq_policy *policy;
48 struct cpufreq_frequency_table *freq_table;
49 unsigned int target_freq;
Todd Poynoraad27322012-04-26 21:41:40 -070050 unsigned int floor_freq;
51 u64 floor_validate_time;
Todd Poynor5a5aa702012-05-10 23:28:06 -070052 u64 hispeed_validate_time;
Todd Poynor5b63a2e2012-12-18 17:50:44 -080053 struct rw_semaphore enable_sem;
Mike Chan9d49b702010-06-22 11:26:45 -070054 int governor_enabled;
55};
56
57static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
58
Todd Poynor8a37bb72012-07-16 17:07:15 -070059/* realtime thread handles frequency scaling */
60static struct task_struct *speedchange_task;
61static cpumask_t speedchange_cpumask;
62static spinlock_t speedchange_cpumask_lock;
Mike Chan9d49b702010-06-22 11:26:45 -070063
64/* Hi speed to bump to from lo speed when load burst (default max) */
Todd Poynoracfaec92012-10-03 00:39:56 -070065static unsigned int hispeed_freq;
Mike Chan9d49b702010-06-22 11:26:45 -070066
67/* Go to hi speed when CPU load at or above this value. */
Todd Poynor8b2ace62012-12-21 15:13:01 -080068#define DEFAULT_GO_HISPEED_LOAD 99
Todd Poynor2c8d73d2012-12-21 15:32:21 -080069static unsigned long go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
Mike Chan9d49b702010-06-22 11:26:45 -070070
Todd Poynorbc51d672012-11-28 17:58:17 -080071/* Target load. Lower values result in higher CPU speeds. */
72#define DEFAULT_TARGET_LOAD 90
Todd Poynor2fbf5e12012-11-14 11:41:21 -080073static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
74static spinlock_t target_loads_lock;
75static unsigned int *target_loads = default_target_loads;
76static int ntarget_loads = ARRAY_SIZE(default_target_loads);
Todd Poynorbc51d672012-11-28 17:58:17 -080077
Mike Chan9d49b702010-06-22 11:26:45 -070078/*
79 * The minimum amount of time to spend at a frequency before we can ramp down.
80 */
Todd Poynora0ec4362012-04-17 17:39:34 -070081#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
Todd Poynor2c8d73d2012-12-21 15:32:21 -080082static unsigned long min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
Mike Chan9d49b702010-06-22 11:26:45 -070083
84/*
85 * The sample rate of the timer used to increase frequency
86 */
Todd Poynora0ec4362012-04-17 17:39:34 -070087#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
Todd Poynor2c8d73d2012-12-21 15:32:21 -080088static unsigned long timer_rate = DEFAULT_TIMER_RATE;
Mike Chan9d49b702010-06-22 11:26:45 -070089
Todd Poynor596cf1f2012-04-13 20:18:02 -070090/*
91 * Wait this long before raising speed above hispeed, by default a single
92 * timer interval.
93 */
94#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
Todd Poynor2c8d73d2012-12-21 15:32:21 -080095static unsigned long above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
Todd Poynor596cf1f2012-04-13 20:18:02 -070096
Todd Poynorf437e182012-12-14 17:31:19 -080097/* Non-zero means indefinite speed boost active */
Todd Poynor9fb15312012-04-23 20:42:41 -070098static int boost_val;
Todd Poynorf437e182012-12-14 17:31:19 -080099/* Duration of a boot pulse in usecs */
100static int boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
101/* End time of boost pulse in ktime converted to usecs */
102static u64 boostpulse_endtime;
Todd Poynor9fb15312012-04-23 20:42:41 -0700103
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800104/*
105 * Max additional time to wait in idle, beyond timer_rate, at speeds above
106 * minimum before wakeup to reduce speed, or -1 if unnecessary.
107 */
108#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
109static int timer_slack_val = DEFAULT_TIMER_SLACK;
Lianwei Wangba6c6bb2012-11-01 09:59:52 +0800110
Mike Chan9d49b702010-06-22 11:26:45 -0700111static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
112 unsigned int event);
113
114#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
115static
116#endif
117struct cpufreq_governor cpufreq_gov_interactive = {
118 .name = "interactive",
119 .governor = cpufreq_governor_interactive,
120 .max_transition_latency = 10000000,
121 .owner = THIS_MODULE,
122};
123
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700124static void cpufreq_interactive_timer_resched(
125 struct cpufreq_interactive_cpuinfo *pcpu)
126{
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800127 unsigned long expires = jiffies + usecs_to_jiffies(timer_rate);
Todd Poynor726f6792013-01-02 13:14:00 -0800128 unsigned long flags;
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800129
130 mod_timer_pinned(&pcpu->cpu_timer, expires);
131 if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
132 expires += usecs_to_jiffies(timer_slack_val);
133 mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
134 }
135
Todd Poynor726f6792013-01-02 13:14:00 -0800136 spin_lock_irqsave(&pcpu->load_lock, flags);
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700137 pcpu->time_in_idle =
138 get_cpu_idle_time_us(smp_processor_id(),
139 &pcpu->time_in_idle_timestamp);
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800140 pcpu->cputime_speedadj = 0;
141 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
Todd Poynor726f6792013-01-02 13:14:00 -0800142 spin_unlock_irqrestore(&pcpu->load_lock, flags);
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700143}
144
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800145static unsigned int freq_to_targetload(unsigned int freq)
146{
147 int i;
148 unsigned int ret;
Todd Poynor726f6792013-01-02 13:14:00 -0800149 unsigned long flags;
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800150
Todd Poynor726f6792013-01-02 13:14:00 -0800151 spin_lock_irqsave(&target_loads_lock, flags);
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800152
153 for (i = 0; i < ntarget_loads - 1 && freq >= target_loads[i+1]; i += 2)
154 ;
155
156 ret = target_loads[i];
Todd Poynor726f6792013-01-02 13:14:00 -0800157 spin_unlock_irqrestore(&target_loads_lock, flags);
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800158 return ret;
159}
160
161/*
162 * If increasing frequencies never map to a lower target load then
163 * choose_freq() will find the minimum frequency that does not exceed its
164 * target load given the current load.
165 */
166
167static unsigned int choose_freq(
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800168 struct cpufreq_interactive_cpuinfo *pcpu, unsigned int loadadjfreq)
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800169{
170 unsigned int freq = pcpu->policy->cur;
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800171 unsigned int prevfreq, freqmin, freqmax;
172 unsigned int tl;
173 int index;
174
175 freqmin = 0;
176 freqmax = UINT_MAX;
177
178 do {
179 prevfreq = freq;
180 tl = freq_to_targetload(freq);
181
182 /*
183 * Find the lowest frequency where the computed load is less
184 * than or equal to the target load.
185 */
186
187 cpufreq_frequency_table_target(
188 pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
189 CPUFREQ_RELATION_L, &index);
190 freq = pcpu->freq_table[index].frequency;
191
192 if (freq > prevfreq) {
193 /* The previous frequency is too low. */
194 freqmin = prevfreq;
195
196 if (freq >= freqmax) {
197 /*
198 * Find the highest frequency that is less
199 * than freqmax.
200 */
201 cpufreq_frequency_table_target(
202 pcpu->policy, pcpu->freq_table,
203 freqmax - 1, CPUFREQ_RELATION_H,
204 &index);
205 freq = pcpu->freq_table[index].frequency;
206
207 if (freq == freqmin) {
208 /*
209 * The first frequency below freqmax
210 * has already been found to be too
211 * low. freqmax is the lowest speed
212 * we found that is fast enough.
213 */
214 freq = freqmax;
215 break;
216 }
217 }
218 } else if (freq < prevfreq) {
219 /* The previous frequency is high enough. */
220 freqmax = prevfreq;
221
222 if (freq <= freqmin) {
223 /*
224 * Find the lowest frequency that is higher
225 * than freqmin.
226 */
227 cpufreq_frequency_table_target(
228 pcpu->policy, pcpu->freq_table,
229 freqmin + 1, CPUFREQ_RELATION_L,
230 &index);
231 freq = pcpu->freq_table[index].frequency;
232
233 /*
234 * If freqmax is the first frequency above
235 * freqmin then we have already found that
236 * this speed is fast enough.
237 */
238 if (freq == freqmax)
239 break;
240 }
241 }
242
243 /* If same frequency chosen as previous then done. */
244 } while (freq != prevfreq);
245
246 return freq;
247}
248
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800249static u64 update_load(int cpu)
250{
251 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
252 u64 now;
253 u64 now_idle;
254 unsigned int delta_idle;
255 unsigned int delta_time;
256 u64 active_time;
257
258 now_idle = get_cpu_idle_time_us(cpu, &now);
259 delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
260 delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
261 active_time = delta_time - delta_idle;
262 pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
263
264 pcpu->time_in_idle = now_idle;
265 pcpu->time_in_idle_timestamp = now;
266 return now;
267}
268
Mike Chan9d49b702010-06-22 11:26:45 -0700269static void cpufreq_interactive_timer(unsigned long data)
270{
Todd Poynor7aa95c82012-11-05 13:09:03 -0800271 u64 now;
Mike Chan9d49b702010-06-22 11:26:45 -0700272 unsigned int delta_time;
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800273 u64 cputime_speedadj;
Mike Chan9d49b702010-06-22 11:26:45 -0700274 int cpu_load;
Mike Chan9d49b702010-06-22 11:26:45 -0700275 struct cpufreq_interactive_cpuinfo *pcpu =
276 &per_cpu(cpuinfo, data);
Mike Chan9d49b702010-06-22 11:26:45 -0700277 unsigned int new_freq;
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800278 unsigned int loadadjfreq;
Mike Chan9d49b702010-06-22 11:26:45 -0700279 unsigned int index;
280 unsigned long flags;
Todd Poynorf437e182012-12-14 17:31:19 -0800281 bool boosted;
Mike Chan9d49b702010-06-22 11:26:45 -0700282
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800283 if (!down_read_trylock(&pcpu->enable_sem))
284 return;
Mike Chan9d49b702010-06-22 11:26:45 -0700285 if (!pcpu->governor_enabled)
286 goto exit;
287
Todd Poynor726f6792013-01-02 13:14:00 -0800288 spin_lock_irqsave(&pcpu->load_lock, flags);
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800289 now = update_load(data);
290 delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
291 cputime_speedadj = pcpu->cputime_speedadj;
Todd Poynor726f6792013-01-02 13:14:00 -0800292 spin_unlock_irqrestore(&pcpu->load_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700293
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800294 if (WARN_ON_ONCE(!delta_time))
Mike Chan9d49b702010-06-22 11:26:45 -0700295 goto rearm;
296
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800297 do_div(cputime_speedadj, delta_time);
298 loadadjfreq = (unsigned int)cputime_speedadj * 100;
299 cpu_load = loadadjfreq / pcpu->target_freq;
Todd Poynorf437e182012-12-14 17:31:19 -0800300 boosted = boost_val || now < boostpulse_endtime;
Mike Chan9d49b702010-06-22 11:26:45 -0700301
Todd Poynor16dfc9d2012-12-19 16:06:48 -0800302 if (cpu_load >= go_hispeed_load || boosted) {
303 if (pcpu->target_freq < hispeed_freq) {
304 new_freq = hispeed_freq;
305 } else {
306 new_freq = choose_freq(pcpu, loadadjfreq);
307
308 if (new_freq < hispeed_freq)
309 new_freq = hispeed_freq;
310 }
311 } else {
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800312 new_freq = choose_freq(pcpu, loadadjfreq);
Todd Poynor16dfc9d2012-12-19 16:06:48 -0800313 }
Todd Poynorf96f2c82012-11-08 15:06:55 -0800314
315 if (pcpu->target_freq >= hispeed_freq &&
316 new_freq > pcpu->target_freq &&
317 now - pcpu->hispeed_validate_time < above_hispeed_delay_val) {
318 trace_cpufreq_interactive_notyet(
319 data, cpu_load, pcpu->target_freq,
320 pcpu->policy->cur, new_freq);
321 goto rearm;
Mike Chan9d49b702010-06-22 11:26:45 -0700322 }
323
Todd Poynorf96f2c82012-11-08 15:06:55 -0800324 pcpu->hispeed_validate_time = now;
Todd Poynor5a5aa702012-05-10 23:28:06 -0700325
Mike Chan9d49b702010-06-22 11:26:45 -0700326 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
Todd Poynorbc51d672012-11-28 17:58:17 -0800327 new_freq, CPUFREQ_RELATION_L,
Mike Chan9d49b702010-06-22 11:26:45 -0700328 &index)) {
329 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
330 (int) data);
331 goto rearm;
332 }
333
334 new_freq = pcpu->freq_table[index].frequency;
335
Mike Chan9d49b702010-06-22 11:26:45 -0700336 /*
Todd Poynoraad27322012-04-26 21:41:40 -0700337 * Do not scale below floor_freq unless we have been at or above the
338 * floor frequency for the minimum sample time since last validated.
Mike Chan9d49b702010-06-22 11:26:45 -0700339 */
Todd Poynoraad27322012-04-26 21:41:40 -0700340 if (new_freq < pcpu->floor_freq) {
Todd Poynor7aa95c82012-11-05 13:09:03 -0800341 if (now - pcpu->floor_validate_time < min_sample_time) {
Todd Poynor46660b02012-11-28 17:56:09 -0800342 trace_cpufreq_interactive_notyet(
343 data, cpu_load, pcpu->target_freq,
344 pcpu->policy->cur, new_freq);
Mike Chan9d49b702010-06-22 11:26:45 -0700345 goto rearm;
Todd Poynora1e19512012-02-16 16:27:59 -0800346 }
Mike Chan9d49b702010-06-22 11:26:45 -0700347 }
348
Todd Poynorf437e182012-12-14 17:31:19 -0800349 /*
350 * Update the timestamp for checking whether speed has been held at
351 * or above the selected frequency for a minimum of min_sample_time,
352 * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
353 * allow the speed to drop as soon as the boostpulse duration expires
354 * (or the indefinite boost is turned off).
355 */
356
357 if (!boosted || new_freq > hispeed_freq) {
358 pcpu->floor_freq = new_freq;
359 pcpu->floor_validate_time = now;
360 }
Todd Poynor0a92d482012-04-06 19:59:36 -0700361
362 if (pcpu->target_freq == new_freq) {
Todd Poynor46660b02012-11-28 17:56:09 -0800363 trace_cpufreq_interactive_already(
364 data, cpu_load, pcpu->target_freq,
365 pcpu->policy->cur, new_freq);
Todd Poynor0a92d482012-04-06 19:59:36 -0700366 goto rearm_if_notmax;
367 }
368
Todd Poynora1e19512012-02-16 16:27:59 -0800369 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
Todd Poynor46660b02012-11-28 17:56:09 -0800370 pcpu->policy->cur, new_freq);
Todd Poynora1e19512012-02-16 16:27:59 -0800371
Todd Poynor8a37bb72012-07-16 17:07:15 -0700372 pcpu->target_freq = new_freq;
373 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
374 cpumask_set_cpu(data, &speedchange_cpumask);
375 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
376 wake_up_process(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -0700377
378rearm_if_notmax:
379 /*
380 * Already set max speed and don't see a need to change that,
381 * wait until next idle to re-evaluate, don't need timer.
382 */
383 if (pcpu->target_freq == pcpu->policy->max)
384 goto exit;
385
386rearm:
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800387 if (!timer_pending(&pcpu->cpu_timer))
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700388 cpufreq_interactive_timer_resched(pcpu);
Mike Chan9d49b702010-06-22 11:26:45 -0700389
390exit:
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800391 up_read(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700392 return;
393}
394
395static void cpufreq_interactive_idle_start(void)
396{
397 struct cpufreq_interactive_cpuinfo *pcpu =
398 &per_cpu(cpuinfo, smp_processor_id());
399 int pending;
400
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800401 if (!down_read_trylock(&pcpu->enable_sem))
Mike Chan9d49b702010-06-22 11:26:45 -0700402 return;
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800403 if (!pcpu->governor_enabled) {
404 up_read(&pcpu->enable_sem);
405 return;
406 }
Mike Chan9d49b702010-06-22 11:26:45 -0700407
Mike Chan9d49b702010-06-22 11:26:45 -0700408 pending = timer_pending(&pcpu->cpu_timer);
409
410 if (pcpu->target_freq != pcpu->policy->min) {
Mike Chan9d49b702010-06-22 11:26:45 -0700411 /*
412 * Entering idle while not at lowest speed. On some
413 * platforms this can hold the other CPU(s) at that speed
414 * even though the CPU is idle. Set a timer to re-evaluate
415 * speed so this idle CPU doesn't hold the other CPUs above
416 * min indefinitely. This should probably be a quirk of
417 * the CPUFreq driver.
418 */
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800419 if (!pending)
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700420 cpufreq_interactive_timer_resched(pcpu);
Mike Chan9d49b702010-06-22 11:26:45 -0700421 }
422
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800423 up_read(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700424}
425
426static void cpufreq_interactive_idle_end(void)
427{
428 struct cpufreq_interactive_cpuinfo *pcpu =
429 &per_cpu(cpuinfo, smp_processor_id());
430
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800431 if (!down_read_trylock(&pcpu->enable_sem))
Sam Lefflerae0d23f2012-06-27 10:12:04 -0700432 return;
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800433 if (!pcpu->governor_enabled) {
434 up_read(&pcpu->enable_sem);
435 return;
436 }
Sam Lefflerae0d23f2012-06-27 10:12:04 -0700437
Todd Poynor7aa95c82012-11-05 13:09:03 -0800438 /* Arm the timer for 1-2 ticks later if not already. */
439 if (!timer_pending(&pcpu->cpu_timer)) {
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700440 cpufreq_interactive_timer_resched(pcpu);
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800441 } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700442 del_timer(&pcpu->cpu_timer);
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800443 del_timer(&pcpu->cpu_slack_timer);
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700444 cpufreq_interactive_timer(smp_processor_id());
Mike Chan9d49b702010-06-22 11:26:45 -0700445 }
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800446
447 up_read(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700448}
449
Todd Poynor8a37bb72012-07-16 17:07:15 -0700450static int cpufreq_interactive_speedchange_task(void *data)
Mike Chan9d49b702010-06-22 11:26:45 -0700451{
452 unsigned int cpu;
453 cpumask_t tmp_mask;
454 unsigned long flags;
455 struct cpufreq_interactive_cpuinfo *pcpu;
456
457 while (1) {
458 set_current_state(TASK_INTERRUPTIBLE);
Todd Poynor8a37bb72012-07-16 17:07:15 -0700459 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700460
Todd Poynor8a37bb72012-07-16 17:07:15 -0700461 if (cpumask_empty(&speedchange_cpumask)) {
462 spin_unlock_irqrestore(&speedchange_cpumask_lock,
463 flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700464 schedule();
465
466 if (kthread_should_stop())
467 break;
468
Todd Poynor8a37bb72012-07-16 17:07:15 -0700469 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700470 }
471
472 set_current_state(TASK_RUNNING);
Todd Poynor8a37bb72012-07-16 17:07:15 -0700473 tmp_mask = speedchange_cpumask;
474 cpumask_clear(&speedchange_cpumask);
475 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700476
477 for_each_cpu(cpu, &tmp_mask) {
478 unsigned int j;
479 unsigned int max_freq = 0;
480
481 pcpu = &per_cpu(cpuinfo, cpu);
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800482 if (!down_read_trylock(&pcpu->enable_sem))
Mike Chan9d49b702010-06-22 11:26:45 -0700483 continue;
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800484 if (!pcpu->governor_enabled) {
485 up_read(&pcpu->enable_sem);
486 continue;
487 }
Mike Chan9d49b702010-06-22 11:26:45 -0700488
Mike Chan9d49b702010-06-22 11:26:45 -0700489 for_each_cpu(j, pcpu->policy->cpus) {
490 struct cpufreq_interactive_cpuinfo *pjcpu =
491 &per_cpu(cpuinfo, j);
492
493 if (pjcpu->target_freq > max_freq)
494 max_freq = pjcpu->target_freq;
495 }
496
497 if (max_freq != pcpu->policy->cur)
498 __cpufreq_driver_target(pcpu->policy,
499 max_freq,
500 CPUFREQ_RELATION_H);
Todd Poynor8a37bb72012-07-16 17:07:15 -0700501 trace_cpufreq_interactive_setspeed(cpu,
502 pcpu->target_freq,
Todd Poynora1e19512012-02-16 16:27:59 -0800503 pcpu->policy->cur);
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800504
505 up_read(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700506 }
507 }
508
509 return 0;
510}
511
Todd Poynor7820a652012-04-02 17:17:14 -0700512static void cpufreq_interactive_boost(void)
513{
514 int i;
515 int anyboost = 0;
516 unsigned long flags;
517 struct cpufreq_interactive_cpuinfo *pcpu;
518
Todd Poynor8a37bb72012-07-16 17:07:15 -0700519 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Todd Poynor7820a652012-04-02 17:17:14 -0700520
521 for_each_online_cpu(i) {
522 pcpu = &per_cpu(cpuinfo, i);
523
524 if (pcpu->target_freq < hispeed_freq) {
525 pcpu->target_freq = hispeed_freq;
Todd Poynor8a37bb72012-07-16 17:07:15 -0700526 cpumask_set_cpu(i, &speedchange_cpumask);
Todd Poynor3c081182012-12-07 20:08:45 -0800527 pcpu->hispeed_validate_time =
528 ktime_to_us(ktime_get());
Todd Poynor7820a652012-04-02 17:17:14 -0700529 anyboost = 1;
530 }
531
532 /*
Todd Poynoraad27322012-04-26 21:41:40 -0700533 * Set floor freq and (re)start timer for when last
534 * validated.
Todd Poynor7820a652012-04-02 17:17:14 -0700535 */
536
Todd Poynoraad27322012-04-26 21:41:40 -0700537 pcpu->floor_freq = hispeed_freq;
538 pcpu->floor_validate_time = ktime_to_us(ktime_get());
Todd Poynor7820a652012-04-02 17:17:14 -0700539 }
540
Todd Poynor8a37bb72012-07-16 17:07:15 -0700541 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Todd Poynor7820a652012-04-02 17:17:14 -0700542
543 if (anyboost)
Todd Poynor8a37bb72012-07-16 17:07:15 -0700544 wake_up_process(speedchange_task);
Todd Poynor7820a652012-04-02 17:17:14 -0700545}
546
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800547static int cpufreq_interactive_notifier(
548 struct notifier_block *nb, unsigned long val, void *data)
549{
550 struct cpufreq_freqs *freq = data;
551 struct cpufreq_interactive_cpuinfo *pcpu;
552 int cpu;
Todd Poynor726f6792013-01-02 13:14:00 -0800553 unsigned long flags;
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800554
555 if (val == CPUFREQ_POSTCHANGE) {
556 pcpu = &per_cpu(cpuinfo, freq->cpu);
Todd Poynora9dac7d2012-12-23 12:28:49 -0800557 if (!down_read_trylock(&pcpu->enable_sem))
558 return 0;
559 if (!pcpu->governor_enabled) {
560 up_read(&pcpu->enable_sem);
561 return 0;
562 }
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800563
564 for_each_cpu(cpu, pcpu->policy->cpus) {
565 struct cpufreq_interactive_cpuinfo *pjcpu =
566 &per_cpu(cpuinfo, cpu);
Todd Poynor726f6792013-01-02 13:14:00 -0800567 spin_lock_irqsave(&pjcpu->load_lock, flags);
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800568 update_load(cpu);
Todd Poynor726f6792013-01-02 13:14:00 -0800569 spin_unlock_irqrestore(&pjcpu->load_lock, flags);
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800570 }
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800571
Todd Poynora9dac7d2012-12-23 12:28:49 -0800572 up_read(&pcpu->enable_sem);
573 }
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800574 return 0;
575}
576
577static struct notifier_block cpufreq_notifier_block = {
578 .notifier_call = cpufreq_interactive_notifier,
579};
580
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800581static ssize_t show_target_loads(
Todd Poynorbc51d672012-11-28 17:58:17 -0800582 struct kobject *kobj, struct attribute *attr, char *buf)
583{
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800584 int i;
585 ssize_t ret = 0;
Todd Poynor726f6792013-01-02 13:14:00 -0800586 unsigned long flags;
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800587
Todd Poynor726f6792013-01-02 13:14:00 -0800588 spin_lock_irqsave(&target_loads_lock, flags);
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800589
590 for (i = 0; i < ntarget_loads; i++)
591 ret += sprintf(buf + ret, "%u%s", target_loads[i],
592 i & 0x1 ? ":" : " ");
593
594 ret += sprintf(buf + ret, "\n");
Todd Poynor726f6792013-01-02 13:14:00 -0800595 spin_unlock_irqrestore(&target_loads_lock, flags);
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800596 return ret;
Todd Poynorbc51d672012-11-28 17:58:17 -0800597}
598
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800599static ssize_t store_target_loads(
Todd Poynorbc51d672012-11-28 17:58:17 -0800600 struct kobject *kobj, struct attribute *attr, const char *buf,
601 size_t count)
602{
603 int ret;
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800604 const char *cp;
605 unsigned int *new_target_loads = NULL;
606 int ntokens = 1;
607 int i;
Todd Poynor726f6792013-01-02 13:14:00 -0800608 unsigned long flags;
Todd Poynorbc51d672012-11-28 17:58:17 -0800609
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800610 cp = buf;
611 while ((cp = strpbrk(cp + 1, " :")))
612 ntokens++;
613
614 if (!(ntokens & 0x1))
615 goto err_inval;
616
617 new_target_loads = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
618 if (!new_target_loads) {
619 ret = -ENOMEM;
620 goto err;
621 }
622
623 cp = buf;
624 i = 0;
625 while (i < ntokens) {
626 if (sscanf(cp, "%u", &new_target_loads[i++]) != 1)
627 goto err_inval;
628
629 cp = strpbrk(cp, " :");
630 if (!cp)
631 break;
632 cp++;
633 }
634
635 if (i != ntokens)
636 goto err_inval;
637
Todd Poynor726f6792013-01-02 13:14:00 -0800638 spin_lock_irqsave(&target_loads_lock, flags);
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800639 if (target_loads != default_target_loads)
640 kfree(target_loads);
641 target_loads = new_target_loads;
642 ntarget_loads = ntokens;
Todd Poynor726f6792013-01-02 13:14:00 -0800643 spin_unlock_irqrestore(&target_loads_lock, flags);
Todd Poynorbc51d672012-11-28 17:58:17 -0800644 return count;
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800645
646err_inval:
647 ret = -EINVAL;
648err:
649 kfree(new_target_loads);
650 return ret;
Todd Poynorbc51d672012-11-28 17:58:17 -0800651}
652
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800653static struct global_attr target_loads_attr =
654 __ATTR(target_loads, S_IRUGO | S_IWUSR,
655 show_target_loads, store_target_loads);
Todd Poynorbc51d672012-11-28 17:58:17 -0800656
Mike Chan9d49b702010-06-22 11:26:45 -0700657static ssize_t show_hispeed_freq(struct kobject *kobj,
658 struct attribute *attr, char *buf)
659{
Todd Poynoracfaec92012-10-03 00:39:56 -0700660 return sprintf(buf, "%u\n", hispeed_freq);
Mike Chan9d49b702010-06-22 11:26:45 -0700661}
662
663static ssize_t store_hispeed_freq(struct kobject *kobj,
664 struct attribute *attr, const char *buf,
665 size_t count)
666{
667 int ret;
Todd Poynoracfaec92012-10-03 00:39:56 -0700668 long unsigned int val;
Mike Chan9d49b702010-06-22 11:26:45 -0700669
Todd Poynoracfaec92012-10-03 00:39:56 -0700670 ret = strict_strtoul(buf, 0, &val);
Mike Chan9d49b702010-06-22 11:26:45 -0700671 if (ret < 0)
672 return ret;
673 hispeed_freq = val;
674 return count;
675}
676
677static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
678 show_hispeed_freq, store_hispeed_freq);
679
680
681static ssize_t show_go_hispeed_load(struct kobject *kobj,
682 struct attribute *attr, char *buf)
683{
684 return sprintf(buf, "%lu\n", go_hispeed_load);
685}
686
687static ssize_t store_go_hispeed_load(struct kobject *kobj,
688 struct attribute *attr, const char *buf, size_t count)
689{
690 int ret;
691 unsigned long val;
692
693 ret = strict_strtoul(buf, 0, &val);
694 if (ret < 0)
695 return ret;
696 go_hispeed_load = val;
697 return count;
698}
699
700static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
701 show_go_hispeed_load, store_go_hispeed_load);
702
703static ssize_t show_min_sample_time(struct kobject *kobj,
704 struct attribute *attr, char *buf)
705{
706 return sprintf(buf, "%lu\n", min_sample_time);
707}
708
709static ssize_t store_min_sample_time(struct kobject *kobj,
710 struct attribute *attr, const char *buf, size_t count)
711{
712 int ret;
713 unsigned long val;
714
715 ret = strict_strtoul(buf, 0, &val);
716 if (ret < 0)
717 return ret;
718 min_sample_time = val;
719 return count;
720}
721
722static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
723 show_min_sample_time, store_min_sample_time);
724
Todd Poynor596cf1f2012-04-13 20:18:02 -0700725static ssize_t show_above_hispeed_delay(struct kobject *kobj,
726 struct attribute *attr, char *buf)
727{
728 return sprintf(buf, "%lu\n", above_hispeed_delay_val);
729}
730
731static ssize_t store_above_hispeed_delay(struct kobject *kobj,
732 struct attribute *attr,
733 const char *buf, size_t count)
734{
735 int ret;
736 unsigned long val;
737
738 ret = strict_strtoul(buf, 0, &val);
739 if (ret < 0)
740 return ret;
741 above_hispeed_delay_val = val;
742 return count;
743}
744
745define_one_global_rw(above_hispeed_delay);
746
Mike Chan9d49b702010-06-22 11:26:45 -0700747static ssize_t show_timer_rate(struct kobject *kobj,
748 struct attribute *attr, char *buf)
749{
750 return sprintf(buf, "%lu\n", timer_rate);
751}
752
753static ssize_t store_timer_rate(struct kobject *kobj,
754 struct attribute *attr, const char *buf, size_t count)
755{
756 int ret;
757 unsigned long val;
758
759 ret = strict_strtoul(buf, 0, &val);
760 if (ret < 0)
761 return ret;
762 timer_rate = val;
763 return count;
764}
765
766static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
767 show_timer_rate, store_timer_rate);
768
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800769static ssize_t show_timer_slack(
770 struct kobject *kobj, struct attribute *attr, char *buf)
771{
772 return sprintf(buf, "%d\n", timer_slack_val);
773}
774
775static ssize_t store_timer_slack(
776 struct kobject *kobj, struct attribute *attr, const char *buf,
777 size_t count)
778{
779 int ret;
780 unsigned long val;
781
782 ret = kstrtol(buf, 10, &val);
783 if (ret < 0)
784 return ret;
785
786 timer_slack_val = val;
787 return count;
788}
789
790define_one_global_rw(timer_slack);
791
Todd Poynor9fb15312012-04-23 20:42:41 -0700792static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
793 char *buf)
794{
795 return sprintf(buf, "%d\n", boost_val);
796}
797
798static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
799 const char *buf, size_t count)
800{
801 int ret;
802 unsigned long val;
803
804 ret = kstrtoul(buf, 0, &val);
805 if (ret < 0)
806 return ret;
807
808 boost_val = val;
809
Todd Poynor2e739a02012-05-03 00:16:55 -0700810 if (boost_val) {
811 trace_cpufreq_interactive_boost("on");
Todd Poynor9fb15312012-04-23 20:42:41 -0700812 cpufreq_interactive_boost();
Todd Poynor2e739a02012-05-03 00:16:55 -0700813 } else {
814 trace_cpufreq_interactive_unboost("off");
815 }
Todd Poynor9fb15312012-04-23 20:42:41 -0700816
817 return count;
818}
819
820define_one_global_rw(boost);
821
Todd Poynor2e739a02012-05-03 00:16:55 -0700822static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
823 const char *buf, size_t count)
824{
825 int ret;
826 unsigned long val;
827
828 ret = kstrtoul(buf, 0, &val);
829 if (ret < 0)
830 return ret;
831
Todd Poynorf437e182012-12-14 17:31:19 -0800832 boostpulse_endtime = ktime_to_us(ktime_get()) + boostpulse_duration_val;
Todd Poynor2e739a02012-05-03 00:16:55 -0700833 trace_cpufreq_interactive_boost("pulse");
834 cpufreq_interactive_boost();
835 return count;
836}
837
838static struct global_attr boostpulse =
839 __ATTR(boostpulse, 0200, NULL, store_boostpulse);
840
Todd Poynorf437e182012-12-14 17:31:19 -0800841static ssize_t show_boostpulse_duration(
842 struct kobject *kobj, struct attribute *attr, char *buf)
843{
844 return sprintf(buf, "%d\n", boostpulse_duration_val);
845}
846
847static ssize_t store_boostpulse_duration(
848 struct kobject *kobj, struct attribute *attr, const char *buf,
849 size_t count)
850{
851 int ret;
852 unsigned long val;
853
854 ret = kstrtoul(buf, 0, &val);
855 if (ret < 0)
856 return ret;
857
858 boostpulse_duration_val = val;
859 return count;
860}
861
862define_one_global_rw(boostpulse_duration);
863
Mike Chan9d49b702010-06-22 11:26:45 -0700864static struct attribute *interactive_attributes[] = {
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800865 &target_loads_attr.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700866 &hispeed_freq_attr.attr,
867 &go_hispeed_load_attr.attr,
Todd Poynor596cf1f2012-04-13 20:18:02 -0700868 &above_hispeed_delay.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700869 &min_sample_time_attr.attr,
870 &timer_rate_attr.attr,
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800871 &timer_slack.attr,
Todd Poynor9fb15312012-04-23 20:42:41 -0700872 &boost.attr,
Todd Poynor2e739a02012-05-03 00:16:55 -0700873 &boostpulse.attr,
Todd Poynorf437e182012-12-14 17:31:19 -0800874 &boostpulse_duration.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700875 NULL,
876};
877
878static struct attribute_group interactive_attr_group = {
879 .attrs = interactive_attributes,
880 .name = "interactive",
881};
882
Sam Lefflerae0d23f2012-06-27 10:12:04 -0700883static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
884 unsigned long val,
885 void *data)
886{
887 switch (val) {
888 case IDLE_START:
889 cpufreq_interactive_idle_start();
890 break;
891 case IDLE_END:
892 cpufreq_interactive_idle_end();
893 break;
894 }
895
896 return 0;
897}
898
899static struct notifier_block cpufreq_interactive_idle_nb = {
900 .notifier_call = cpufreq_interactive_idle_notifier,
901};
902
Mike Chan9d49b702010-06-22 11:26:45 -0700903static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
904 unsigned int event)
905{
906 int rc;
907 unsigned int j;
908 struct cpufreq_interactive_cpuinfo *pcpu;
909 struct cpufreq_frequency_table *freq_table;
910
911 switch (event) {
912 case CPUFREQ_GOV_START:
913 if (!cpu_online(policy->cpu))
914 return -EINVAL;
915
916 freq_table =
917 cpufreq_frequency_get_table(policy->cpu);
Todd Poynor7aa95c82012-11-05 13:09:03 -0800918 if (!hispeed_freq)
919 hispeed_freq = policy->max;
Mike Chan9d49b702010-06-22 11:26:45 -0700920
921 for_each_cpu(j, policy->cpus) {
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800922 unsigned long expires;
923
Mike Chan9d49b702010-06-22 11:26:45 -0700924 pcpu = &per_cpu(cpuinfo, j);
925 pcpu->policy = policy;
926 pcpu->target_freq = policy->cur;
927 pcpu->freq_table = freq_table;
Todd Poynoraad27322012-04-26 21:41:40 -0700928 pcpu->floor_freq = pcpu->target_freq;
929 pcpu->floor_validate_time =
Todd Poynor3c081182012-12-07 20:08:45 -0800930 ktime_to_us(ktime_get());
Todd Poynor5a5aa702012-05-10 23:28:06 -0700931 pcpu->hispeed_validate_time =
Todd Poynor3c081182012-12-07 20:08:45 -0800932 pcpu->floor_validate_time;
Todd Poynor7fd53c92012-12-20 15:51:00 -0800933 down_write(&pcpu->enable_sem);
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800934 expires = jiffies + usecs_to_jiffies(timer_rate);
935 pcpu->cpu_timer.expires = expires;
Todd Poynor7aa95c82012-11-05 13:09:03 -0800936 add_timer_on(&pcpu->cpu_timer, j);
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800937 if (timer_slack_val >= 0) {
938 expires += usecs_to_jiffies(timer_slack_val);
939 pcpu->cpu_slack_timer.expires = expires;
940 add_timer_on(&pcpu->cpu_slack_timer, j);
941 }
Todd Poynor7fd53c92012-12-20 15:51:00 -0800942 pcpu->governor_enabled = 1;
943 up_write(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700944 }
945
Mike Chan9d49b702010-06-22 11:26:45 -0700946 /*
947 * Do not register the idle hook and create sysfs
948 * entries if we have already done so.
949 */
950 if (atomic_inc_return(&active_count) > 1)
951 return 0;
952
953 rc = sysfs_create_group(cpufreq_global_kobject,
954 &interactive_attr_group);
955 if (rc)
956 return rc;
957
Sam Lefflerae0d23f2012-06-27 10:12:04 -0700958 idle_notifier_register(&cpufreq_interactive_idle_nb);
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800959 cpufreq_register_notifier(
960 &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
Mike Chan9d49b702010-06-22 11:26:45 -0700961 break;
962
963 case CPUFREQ_GOV_STOP:
964 for_each_cpu(j, policy->cpus) {
965 pcpu = &per_cpu(cpuinfo, j);
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800966 down_write(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700967 pcpu->governor_enabled = 0;
Mike Chan9d49b702010-06-22 11:26:45 -0700968 del_timer_sync(&pcpu->cpu_timer);
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800969 del_timer_sync(&pcpu->cpu_slack_timer);
Todd Poynor5b63a2e2012-12-18 17:50:44 -0800970 up_write(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -0700971 }
972
Mike Chan9d49b702010-06-22 11:26:45 -0700973 if (atomic_dec_return(&active_count) > 0)
974 return 0;
975
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800976 cpufreq_unregister_notifier(
977 &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
Sam Lefflerae0d23f2012-06-27 10:12:04 -0700978 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
Mike Chan9d49b702010-06-22 11:26:45 -0700979 sysfs_remove_group(cpufreq_global_kobject,
980 &interactive_attr_group);
981
982 break;
983
984 case CPUFREQ_GOV_LIMITS:
985 if (policy->max < policy->cur)
986 __cpufreq_driver_target(policy,
987 policy->max, CPUFREQ_RELATION_H);
988 else if (policy->min > policy->cur)
989 __cpufreq_driver_target(policy,
990 policy->min, CPUFREQ_RELATION_L);
991 break;
992 }
993 return 0;
994}
995
Todd Poynorcba9f3e2012-12-18 17:50:10 -0800996static void cpufreq_interactive_nop_timer(unsigned long data)
997{
998}
999
Mike Chan9d49b702010-06-22 11:26:45 -07001000static int __init cpufreq_interactive_init(void)
1001{
1002 unsigned int i;
1003 struct cpufreq_interactive_cpuinfo *pcpu;
1004 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1005
Mike Chan9d49b702010-06-22 11:26:45 -07001006 /* Initalize per-cpu timers */
1007 for_each_possible_cpu(i) {
1008 pcpu = &per_cpu(cpuinfo, i);
Todd Poynorcba9f3e2012-12-18 17:50:10 -08001009 init_timer_deferrable(&pcpu->cpu_timer);
Mike Chan9d49b702010-06-22 11:26:45 -07001010 pcpu->cpu_timer.function = cpufreq_interactive_timer;
1011 pcpu->cpu_timer.data = i;
Todd Poynorcba9f3e2012-12-18 17:50:10 -08001012 init_timer(&pcpu->cpu_slack_timer);
1013 pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
Todd Poynor5a75e9d2012-12-11 16:05:03 -08001014 spin_lock_init(&pcpu->load_lock);
Todd Poynor5b63a2e2012-12-18 17:50:44 -08001015 init_rwsem(&pcpu->enable_sem);
Mike Chan9d49b702010-06-22 11:26:45 -07001016 }
1017
Todd Poynor2fbf5e12012-11-14 11:41:21 -08001018 spin_lock_init(&target_loads_lock);
Todd Poynor8a37bb72012-07-16 17:07:15 -07001019 spin_lock_init(&speedchange_cpumask_lock);
1020 speedchange_task =
1021 kthread_create(cpufreq_interactive_speedchange_task, NULL,
1022 "cfinteractive");
1023 if (IS_ERR(speedchange_task))
1024 return PTR_ERR(speedchange_task);
Sam Leffler9f1dcd62012-06-27 12:55:56 -07001025
Todd Poynor8a37bb72012-07-16 17:07:15 -07001026 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
1027 get_task_struct(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -07001028
Sam Leffler9f1dcd62012-06-27 12:55:56 -07001029 /* NB: wake up so the thread does not look hung to the freezer */
Todd Poynor8a37bb72012-07-16 17:07:15 -07001030 wake_up_process(speedchange_task);
Sam Leffler9f1dcd62012-06-27 12:55:56 -07001031
Mike Chan9d49b702010-06-22 11:26:45 -07001032 return cpufreq_register_governor(&cpufreq_gov_interactive);
Mike Chan9d49b702010-06-22 11:26:45 -07001033}
1034
1035#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1036fs_initcall(cpufreq_interactive_init);
1037#else
1038module_init(cpufreq_interactive_init);
1039#endif
1040
1041static void __exit cpufreq_interactive_exit(void)
1042{
1043 cpufreq_unregister_governor(&cpufreq_gov_interactive);
Todd Poynor8a37bb72012-07-16 17:07:15 -07001044 kthread_stop(speedchange_task);
1045 put_task_struct(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -07001046}
1047
1048module_exit(cpufreq_interactive_exit);
1049
1050MODULE_AUTHOR("Mike Chan <mike@android.com>");
1051MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1052 "Latency sensitive workloads");
1053MODULE_LICENSE("GPL");