blob: e6c8e6fb45f769d66b066c852ae2289fbc0fe116 [file] [log] [blame]
Mike Chan9d49b702010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/module.h>
Lianwei Wangba6c6bb2012-11-01 09:59:52 +080023#include <linux/moduleparam.h>
Mike Chan9d49b702010-06-22 11:26:45 -070024#include <linux/mutex.h>
25#include <linux/sched.h>
26#include <linux/tick.h>
27#include <linux/time.h>
28#include <linux/timer.h>
29#include <linux/workqueue.h>
30#include <linux/kthread.h>
31#include <linux/mutex.h>
Todd Poynor7820a652012-04-02 17:17:14 -070032#include <linux/slab.h>
Todd Poynor9fb15312012-04-23 20:42:41 -070033#include <asm/cputime.h>
Mike Chan9d49b702010-06-22 11:26:45 -070034
Todd Poynora1e19512012-02-16 16:27:59 -080035#define CREATE_TRACE_POINTS
36#include <trace/events/cpufreq_interactive.h>
37
Mike Chan9d49b702010-06-22 11:26:45 -070038static atomic_t active_count = ATOMIC_INIT(0);
39
40struct cpufreq_interactive_cpuinfo {
41 struct timer_list cpu_timer;
42 int timer_idlecancel;
Todd Poynor5a75e9d2012-12-11 16:05:03 -080043 spinlock_t load_lock; /* protects the next 4 fields */
Mike Chan9d49b702010-06-22 11:26:45 -070044 u64 time_in_idle;
Todd Poynor22b5c3a2012-10-08 20:14:34 -070045 u64 time_in_idle_timestamp;
Todd Poynor5a75e9d2012-12-11 16:05:03 -080046 u64 cputime_speedadj;
47 u64 cputime_speedadj_timestamp;
Mike Chan9d49b702010-06-22 11:26:45 -070048 struct cpufreq_policy *policy;
49 struct cpufreq_frequency_table *freq_table;
50 unsigned int target_freq;
Todd Poynoraad27322012-04-26 21:41:40 -070051 unsigned int floor_freq;
52 u64 floor_validate_time;
Todd Poynor5a5aa702012-05-10 23:28:06 -070053 u64 hispeed_validate_time;
Mike Chan9d49b702010-06-22 11:26:45 -070054 int governor_enabled;
55};
56
57static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
58
Todd Poynor8a37bb72012-07-16 17:07:15 -070059/* realtime thread handles frequency scaling */
60static struct task_struct *speedchange_task;
61static cpumask_t speedchange_cpumask;
62static spinlock_t speedchange_cpumask_lock;
Mike Chan9d49b702010-06-22 11:26:45 -070063
64/* Hi speed to bump to from lo speed when load burst (default max) */
Todd Poynoracfaec92012-10-03 00:39:56 -070065static unsigned int hispeed_freq;
Mike Chan9d49b702010-06-22 11:26:45 -070066
67/* Go to hi speed when CPU load at or above this value. */
Todd Poynora0ec4362012-04-17 17:39:34 -070068#define DEFAULT_GO_HISPEED_LOAD 85
Mike Chan9d49b702010-06-22 11:26:45 -070069static unsigned long go_hispeed_load;
70
Todd Poynorbc51d672012-11-28 17:58:17 -080071/* Target load. Lower values result in higher CPU speeds. */
72#define DEFAULT_TARGET_LOAD 90
Todd Poynor2fbf5e12012-11-14 11:41:21 -080073static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
74static spinlock_t target_loads_lock;
75static unsigned int *target_loads = default_target_loads;
76static int ntarget_loads = ARRAY_SIZE(default_target_loads);
Todd Poynorbc51d672012-11-28 17:58:17 -080077
Mike Chan9d49b702010-06-22 11:26:45 -070078/*
79 * The minimum amount of time to spend at a frequency before we can ramp down.
80 */
Todd Poynora0ec4362012-04-17 17:39:34 -070081#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
Mike Chan9d49b702010-06-22 11:26:45 -070082static unsigned long min_sample_time;
83
84/*
85 * The sample rate of the timer used to increase frequency
86 */
Todd Poynora0ec4362012-04-17 17:39:34 -070087#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
Mike Chan9d49b702010-06-22 11:26:45 -070088static unsigned long timer_rate;
89
Todd Poynor596cf1f2012-04-13 20:18:02 -070090/*
91 * Wait this long before raising speed above hispeed, by default a single
92 * timer interval.
93 */
94#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
95static unsigned long above_hispeed_delay_val;
96
Todd Poynor7820a652012-04-02 17:17:14 -070097/*
Todd Poynor9fb15312012-04-23 20:42:41 -070098 * Non-zero means longer-term speed boost active.
99 */
100
101static int boost_val;
102
Lianwei Wangba6c6bb2012-11-01 09:59:52 +0800103static bool governidle;
104module_param(governidle, bool, S_IWUSR | S_IRUGO);
105MODULE_PARM_DESC(governidle,
106 "Set to 1 to wake up CPUs from idle to reduce speed (default 0)");
107
Mike Chan9d49b702010-06-22 11:26:45 -0700108static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
109 unsigned int event);
110
111#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
112static
113#endif
114struct cpufreq_governor cpufreq_gov_interactive = {
115 .name = "interactive",
116 .governor = cpufreq_governor_interactive,
117 .max_transition_latency = 10000000,
118 .owner = THIS_MODULE,
119};
120
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700121static void cpufreq_interactive_timer_resched(
122 struct cpufreq_interactive_cpuinfo *pcpu)
123{
124 mod_timer_pinned(&pcpu->cpu_timer,
125 jiffies + usecs_to_jiffies(timer_rate));
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800126 spin_lock(&pcpu->load_lock);
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700127 pcpu->time_in_idle =
128 get_cpu_idle_time_us(smp_processor_id(),
129 &pcpu->time_in_idle_timestamp);
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800130 pcpu->cputime_speedadj = 0;
131 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
132 spin_unlock(&pcpu->load_lock);
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700133}
134
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800135static unsigned int freq_to_targetload(unsigned int freq)
136{
137 int i;
138 unsigned int ret;
139
140 spin_lock(&target_loads_lock);
141
142 for (i = 0; i < ntarget_loads - 1 && freq >= target_loads[i+1]; i += 2)
143 ;
144
145 ret = target_loads[i];
146 spin_unlock(&target_loads_lock);
147 return ret;
148}
149
150/*
151 * If increasing frequencies never map to a lower target load then
152 * choose_freq() will find the minimum frequency that does not exceed its
153 * target load given the current load.
154 */
155
156static unsigned int choose_freq(
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800157 struct cpufreq_interactive_cpuinfo *pcpu, unsigned int loadadjfreq)
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800158{
159 unsigned int freq = pcpu->policy->cur;
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800160 unsigned int prevfreq, freqmin, freqmax;
161 unsigned int tl;
162 int index;
163
164 freqmin = 0;
165 freqmax = UINT_MAX;
166
167 do {
168 prevfreq = freq;
169 tl = freq_to_targetload(freq);
170
171 /*
172 * Find the lowest frequency where the computed load is less
173 * than or equal to the target load.
174 */
175
176 cpufreq_frequency_table_target(
177 pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
178 CPUFREQ_RELATION_L, &index);
179 freq = pcpu->freq_table[index].frequency;
180
181 if (freq > prevfreq) {
182 /* The previous frequency is too low. */
183 freqmin = prevfreq;
184
185 if (freq >= freqmax) {
186 /*
187 * Find the highest frequency that is less
188 * than freqmax.
189 */
190 cpufreq_frequency_table_target(
191 pcpu->policy, pcpu->freq_table,
192 freqmax - 1, CPUFREQ_RELATION_H,
193 &index);
194 freq = pcpu->freq_table[index].frequency;
195
196 if (freq == freqmin) {
197 /*
198 * The first frequency below freqmax
199 * has already been found to be too
200 * low. freqmax is the lowest speed
201 * we found that is fast enough.
202 */
203 freq = freqmax;
204 break;
205 }
206 }
207 } else if (freq < prevfreq) {
208 /* The previous frequency is high enough. */
209 freqmax = prevfreq;
210
211 if (freq <= freqmin) {
212 /*
213 * Find the lowest frequency that is higher
214 * than freqmin.
215 */
216 cpufreq_frequency_table_target(
217 pcpu->policy, pcpu->freq_table,
218 freqmin + 1, CPUFREQ_RELATION_L,
219 &index);
220 freq = pcpu->freq_table[index].frequency;
221
222 /*
223 * If freqmax is the first frequency above
224 * freqmin then we have already found that
225 * this speed is fast enough.
226 */
227 if (freq == freqmax)
228 break;
229 }
230 }
231
232 /* If same frequency chosen as previous then done. */
233 } while (freq != prevfreq);
234
235 return freq;
236}
237
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800238static u64 update_load(int cpu)
239{
240 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
241 u64 now;
242 u64 now_idle;
243 unsigned int delta_idle;
244 unsigned int delta_time;
245 u64 active_time;
246
247 now_idle = get_cpu_idle_time_us(cpu, &now);
248 delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
249 delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
250 active_time = delta_time - delta_idle;
251 pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
252
253 pcpu->time_in_idle = now_idle;
254 pcpu->time_in_idle_timestamp = now;
255 return now;
256}
257
Mike Chan9d49b702010-06-22 11:26:45 -0700258static void cpufreq_interactive_timer(unsigned long data)
259{
Todd Poynor7aa95c82012-11-05 13:09:03 -0800260 u64 now;
Mike Chan9d49b702010-06-22 11:26:45 -0700261 unsigned int delta_time;
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800262 u64 cputime_speedadj;
Mike Chan9d49b702010-06-22 11:26:45 -0700263 int cpu_load;
Mike Chan9d49b702010-06-22 11:26:45 -0700264 struct cpufreq_interactive_cpuinfo *pcpu =
265 &per_cpu(cpuinfo, data);
Mike Chan9d49b702010-06-22 11:26:45 -0700266 unsigned int new_freq;
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800267 unsigned int loadadjfreq;
Mike Chan9d49b702010-06-22 11:26:45 -0700268 unsigned int index;
269 unsigned long flags;
270
271 smp_rmb();
272
273 if (!pcpu->governor_enabled)
274 goto exit;
275
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800276 spin_lock(&pcpu->load_lock);
277 now = update_load(data);
278 delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
279 cputime_speedadj = pcpu->cputime_speedadj;
280 spin_unlock(&pcpu->load_lock);
Mike Chan9d49b702010-06-22 11:26:45 -0700281
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800282 if (WARN_ON_ONCE(!delta_time))
Mike Chan9d49b702010-06-22 11:26:45 -0700283 goto rearm;
284
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800285 do_div(cputime_speedadj, delta_time);
286 loadadjfreq = (unsigned int)cputime_speedadj * 100;
287 cpu_load = loadadjfreq / pcpu->target_freq;
Mike Chan9d49b702010-06-22 11:26:45 -0700288
Todd Poynorf96f2c82012-11-08 15:06:55 -0800289 if ((cpu_load >= go_hispeed_load || boost_val) &&
290 pcpu->target_freq < hispeed_freq)
291 new_freq = hispeed_freq;
292 else
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800293 new_freq = choose_freq(pcpu, loadadjfreq);
Todd Poynorf96f2c82012-11-08 15:06:55 -0800294
295 if (pcpu->target_freq >= hispeed_freq &&
296 new_freq > pcpu->target_freq &&
297 now - pcpu->hispeed_validate_time < above_hispeed_delay_val) {
298 trace_cpufreq_interactive_notyet(
299 data, cpu_load, pcpu->target_freq,
300 pcpu->policy->cur, new_freq);
301 goto rearm;
Mike Chan9d49b702010-06-22 11:26:45 -0700302 }
303
Todd Poynorf96f2c82012-11-08 15:06:55 -0800304 pcpu->hispeed_validate_time = now;
Todd Poynor5a5aa702012-05-10 23:28:06 -0700305
Mike Chan9d49b702010-06-22 11:26:45 -0700306 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
Todd Poynorbc51d672012-11-28 17:58:17 -0800307 new_freq, CPUFREQ_RELATION_L,
Mike Chan9d49b702010-06-22 11:26:45 -0700308 &index)) {
309 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
310 (int) data);
311 goto rearm;
312 }
313
314 new_freq = pcpu->freq_table[index].frequency;
315
Mike Chan9d49b702010-06-22 11:26:45 -0700316 /*
Todd Poynoraad27322012-04-26 21:41:40 -0700317 * Do not scale below floor_freq unless we have been at or above the
318 * floor frequency for the minimum sample time since last validated.
Mike Chan9d49b702010-06-22 11:26:45 -0700319 */
Todd Poynoraad27322012-04-26 21:41:40 -0700320 if (new_freq < pcpu->floor_freq) {
Todd Poynor7aa95c82012-11-05 13:09:03 -0800321 if (now - pcpu->floor_validate_time < min_sample_time) {
Todd Poynor46660b02012-11-28 17:56:09 -0800322 trace_cpufreq_interactive_notyet(
323 data, cpu_load, pcpu->target_freq,
324 pcpu->policy->cur, new_freq);
Mike Chan9d49b702010-06-22 11:26:45 -0700325 goto rearm;
Todd Poynora1e19512012-02-16 16:27:59 -0800326 }
Mike Chan9d49b702010-06-22 11:26:45 -0700327 }
328
Todd Poynoraad27322012-04-26 21:41:40 -0700329 pcpu->floor_freq = new_freq;
Todd Poynor7aa95c82012-11-05 13:09:03 -0800330 pcpu->floor_validate_time = now;
Todd Poynor0a92d482012-04-06 19:59:36 -0700331
332 if (pcpu->target_freq == new_freq) {
Todd Poynor46660b02012-11-28 17:56:09 -0800333 trace_cpufreq_interactive_already(
334 data, cpu_load, pcpu->target_freq,
335 pcpu->policy->cur, new_freq);
Todd Poynor0a92d482012-04-06 19:59:36 -0700336 goto rearm_if_notmax;
337 }
338
Todd Poynora1e19512012-02-16 16:27:59 -0800339 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
Todd Poynor46660b02012-11-28 17:56:09 -0800340 pcpu->policy->cur, new_freq);
Todd Poynora1e19512012-02-16 16:27:59 -0800341
Todd Poynor8a37bb72012-07-16 17:07:15 -0700342 pcpu->target_freq = new_freq;
343 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
344 cpumask_set_cpu(data, &speedchange_cpumask);
345 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
346 wake_up_process(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -0700347
348rearm_if_notmax:
349 /*
350 * Already set max speed and don't see a need to change that,
351 * wait until next idle to re-evaluate, don't need timer.
352 */
353 if (pcpu->target_freq == pcpu->policy->max)
354 goto exit;
355
356rearm:
357 if (!timer_pending(&pcpu->cpu_timer)) {
358 /*
Lianwei Wangba6c6bb2012-11-01 09:59:52 +0800359 * If governing speed in idle and already at min, cancel the
360 * timer if that CPU goes idle. We don't need to re-evaluate
361 * speed until the next idle exit.
Mike Chan9d49b702010-06-22 11:26:45 -0700362 */
Lianwei Wangba6c6bb2012-11-01 09:59:52 +0800363 if (governidle && pcpu->target_freq == pcpu->policy->min)
Mike Chan9d49b702010-06-22 11:26:45 -0700364 pcpu->timer_idlecancel = 1;
Mike Chan9d49b702010-06-22 11:26:45 -0700365
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700366 cpufreq_interactive_timer_resched(pcpu);
Mike Chan9d49b702010-06-22 11:26:45 -0700367 }
368
369exit:
370 return;
371}
372
373static void cpufreq_interactive_idle_start(void)
374{
375 struct cpufreq_interactive_cpuinfo *pcpu =
376 &per_cpu(cpuinfo, smp_processor_id());
377 int pending;
378
379 if (!pcpu->governor_enabled)
380 return;
381
Mike Chan9d49b702010-06-22 11:26:45 -0700382 pending = timer_pending(&pcpu->cpu_timer);
383
384 if (pcpu->target_freq != pcpu->policy->min) {
Mike Chan9d49b702010-06-22 11:26:45 -0700385 /*
386 * Entering idle while not at lowest speed. On some
387 * platforms this can hold the other CPU(s) at that speed
388 * even though the CPU is idle. Set a timer to re-evaluate
389 * speed so this idle CPU doesn't hold the other CPUs above
390 * min indefinitely. This should probably be a quirk of
391 * the CPUFreq driver.
392 */
393 if (!pending) {
Mike Chan9d49b702010-06-22 11:26:45 -0700394 pcpu->timer_idlecancel = 0;
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700395 cpufreq_interactive_timer_resched(pcpu);
Mike Chan9d49b702010-06-22 11:26:45 -0700396 }
Lianwei Wangba6c6bb2012-11-01 09:59:52 +0800397 } else if (governidle) {
Mike Chan9d49b702010-06-22 11:26:45 -0700398 /*
399 * If at min speed and entering idle after load has
400 * already been evaluated, and a timer has been set just in
401 * case the CPU suddenly goes busy, cancel that timer. The
402 * CPU didn't go busy; we'll recheck things upon idle exit.
403 */
404 if (pending && pcpu->timer_idlecancel) {
405 del_timer(&pcpu->cpu_timer);
Mike Chan9d49b702010-06-22 11:26:45 -0700406 pcpu->timer_idlecancel = 0;
407 }
408 }
409
410}
411
412static void cpufreq_interactive_idle_end(void)
413{
414 struct cpufreq_interactive_cpuinfo *pcpu =
415 &per_cpu(cpuinfo, smp_processor_id());
416
Sam Lefflerae0d23f2012-06-27 10:12:04 -0700417 if (!pcpu->governor_enabled)
418 return;
419
Todd Poynor7aa95c82012-11-05 13:09:03 -0800420 /* Arm the timer for 1-2 ticks later if not already. */
421 if (!timer_pending(&pcpu->cpu_timer)) {
Mike Chan9d49b702010-06-22 11:26:45 -0700422 pcpu->timer_idlecancel = 0;
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700423 cpufreq_interactive_timer_resched(pcpu);
424 } else if (!governidle &&
425 time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
426 del_timer(&pcpu->cpu_timer);
427 cpufreq_interactive_timer(smp_processor_id());
Mike Chan9d49b702010-06-22 11:26:45 -0700428 }
Mike Chan9d49b702010-06-22 11:26:45 -0700429}
430
Todd Poynor8a37bb72012-07-16 17:07:15 -0700431static int cpufreq_interactive_speedchange_task(void *data)
Mike Chan9d49b702010-06-22 11:26:45 -0700432{
433 unsigned int cpu;
434 cpumask_t tmp_mask;
435 unsigned long flags;
436 struct cpufreq_interactive_cpuinfo *pcpu;
437
438 while (1) {
439 set_current_state(TASK_INTERRUPTIBLE);
Todd Poynor8a37bb72012-07-16 17:07:15 -0700440 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700441
Todd Poynor8a37bb72012-07-16 17:07:15 -0700442 if (cpumask_empty(&speedchange_cpumask)) {
443 spin_unlock_irqrestore(&speedchange_cpumask_lock,
444 flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700445 schedule();
446
447 if (kthread_should_stop())
448 break;
449
Todd Poynor8a37bb72012-07-16 17:07:15 -0700450 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700451 }
452
453 set_current_state(TASK_RUNNING);
Todd Poynor8a37bb72012-07-16 17:07:15 -0700454 tmp_mask = speedchange_cpumask;
455 cpumask_clear(&speedchange_cpumask);
456 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700457
458 for_each_cpu(cpu, &tmp_mask) {
459 unsigned int j;
460 unsigned int max_freq = 0;
461
462 pcpu = &per_cpu(cpuinfo, cpu);
463 smp_rmb();
464
465 if (!pcpu->governor_enabled)
466 continue;
467
Mike Chan9d49b702010-06-22 11:26:45 -0700468 for_each_cpu(j, pcpu->policy->cpus) {
469 struct cpufreq_interactive_cpuinfo *pjcpu =
470 &per_cpu(cpuinfo, j);
471
472 if (pjcpu->target_freq > max_freq)
473 max_freq = pjcpu->target_freq;
474 }
475
476 if (max_freq != pcpu->policy->cur)
477 __cpufreq_driver_target(pcpu->policy,
478 max_freq,
479 CPUFREQ_RELATION_H);
Todd Poynor8a37bb72012-07-16 17:07:15 -0700480 trace_cpufreq_interactive_setspeed(cpu,
481 pcpu->target_freq,
Todd Poynora1e19512012-02-16 16:27:59 -0800482 pcpu->policy->cur);
Mike Chan9d49b702010-06-22 11:26:45 -0700483 }
484 }
485
486 return 0;
487}
488
Todd Poynor7820a652012-04-02 17:17:14 -0700489static void cpufreq_interactive_boost(void)
490{
491 int i;
492 int anyboost = 0;
493 unsigned long flags;
494 struct cpufreq_interactive_cpuinfo *pcpu;
495
Todd Poynor8a37bb72012-07-16 17:07:15 -0700496 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Todd Poynor7820a652012-04-02 17:17:14 -0700497
498 for_each_online_cpu(i) {
499 pcpu = &per_cpu(cpuinfo, i);
500
501 if (pcpu->target_freq < hispeed_freq) {
502 pcpu->target_freq = hispeed_freq;
Todd Poynor8a37bb72012-07-16 17:07:15 -0700503 cpumask_set_cpu(i, &speedchange_cpumask);
Todd Poynor3c081182012-12-07 20:08:45 -0800504 pcpu->hispeed_validate_time =
505 ktime_to_us(ktime_get());
Todd Poynor7820a652012-04-02 17:17:14 -0700506 anyboost = 1;
507 }
508
509 /*
Todd Poynoraad27322012-04-26 21:41:40 -0700510 * Set floor freq and (re)start timer for when last
511 * validated.
Todd Poynor7820a652012-04-02 17:17:14 -0700512 */
513
Todd Poynoraad27322012-04-26 21:41:40 -0700514 pcpu->floor_freq = hispeed_freq;
515 pcpu->floor_validate_time = ktime_to_us(ktime_get());
Todd Poynor7820a652012-04-02 17:17:14 -0700516 }
517
Todd Poynor8a37bb72012-07-16 17:07:15 -0700518 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Todd Poynor7820a652012-04-02 17:17:14 -0700519
520 if (anyboost)
Todd Poynor8a37bb72012-07-16 17:07:15 -0700521 wake_up_process(speedchange_task);
Todd Poynor7820a652012-04-02 17:17:14 -0700522}
523
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800524static int cpufreq_interactive_notifier(
525 struct notifier_block *nb, unsigned long val, void *data)
526{
527 struct cpufreq_freqs *freq = data;
528 struct cpufreq_interactive_cpuinfo *pcpu;
529 int cpu;
530
531 if (val == CPUFREQ_POSTCHANGE) {
532 pcpu = &per_cpu(cpuinfo, freq->cpu);
533
534 for_each_cpu(cpu, pcpu->policy->cpus) {
535 struct cpufreq_interactive_cpuinfo *pjcpu =
536 &per_cpu(cpuinfo, cpu);
537 spin_lock(&pjcpu->load_lock);
538 update_load(cpu);
539 spin_unlock(&pjcpu->load_lock);
540 }
541 }
542
543 return 0;
544}
545
546static struct notifier_block cpufreq_notifier_block = {
547 .notifier_call = cpufreq_interactive_notifier,
548};
549
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800550static ssize_t show_target_loads(
Todd Poynorbc51d672012-11-28 17:58:17 -0800551 struct kobject *kobj, struct attribute *attr, char *buf)
552{
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800553 int i;
554 ssize_t ret = 0;
555
556 spin_lock(&target_loads_lock);
557
558 for (i = 0; i < ntarget_loads; i++)
559 ret += sprintf(buf + ret, "%u%s", target_loads[i],
560 i & 0x1 ? ":" : " ");
561
562 ret += sprintf(buf + ret, "\n");
563 spin_unlock(&target_loads_lock);
564 return ret;
Todd Poynorbc51d672012-11-28 17:58:17 -0800565}
566
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800567static ssize_t store_target_loads(
Todd Poynorbc51d672012-11-28 17:58:17 -0800568 struct kobject *kobj, struct attribute *attr, const char *buf,
569 size_t count)
570{
571 int ret;
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800572 const char *cp;
573 unsigned int *new_target_loads = NULL;
574 int ntokens = 1;
575 int i;
Todd Poynorbc51d672012-11-28 17:58:17 -0800576
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800577 cp = buf;
578 while ((cp = strpbrk(cp + 1, " :")))
579 ntokens++;
580
581 if (!(ntokens & 0x1))
582 goto err_inval;
583
584 new_target_loads = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
585 if (!new_target_loads) {
586 ret = -ENOMEM;
587 goto err;
588 }
589
590 cp = buf;
591 i = 0;
592 while (i < ntokens) {
593 if (sscanf(cp, "%u", &new_target_loads[i++]) != 1)
594 goto err_inval;
595
596 cp = strpbrk(cp, " :");
597 if (!cp)
598 break;
599 cp++;
600 }
601
602 if (i != ntokens)
603 goto err_inval;
604
605 spin_lock(&target_loads_lock);
606 if (target_loads != default_target_loads)
607 kfree(target_loads);
608 target_loads = new_target_loads;
609 ntarget_loads = ntokens;
610 spin_unlock(&target_loads_lock);
Todd Poynorbc51d672012-11-28 17:58:17 -0800611 return count;
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800612
613err_inval:
614 ret = -EINVAL;
615err:
616 kfree(new_target_loads);
617 return ret;
Todd Poynorbc51d672012-11-28 17:58:17 -0800618}
619
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800620static struct global_attr target_loads_attr =
621 __ATTR(target_loads, S_IRUGO | S_IWUSR,
622 show_target_loads, store_target_loads);
Todd Poynorbc51d672012-11-28 17:58:17 -0800623
Mike Chan9d49b702010-06-22 11:26:45 -0700624static ssize_t show_hispeed_freq(struct kobject *kobj,
625 struct attribute *attr, char *buf)
626{
Todd Poynoracfaec92012-10-03 00:39:56 -0700627 return sprintf(buf, "%u\n", hispeed_freq);
Mike Chan9d49b702010-06-22 11:26:45 -0700628}
629
630static ssize_t store_hispeed_freq(struct kobject *kobj,
631 struct attribute *attr, const char *buf,
632 size_t count)
633{
634 int ret;
Todd Poynoracfaec92012-10-03 00:39:56 -0700635 long unsigned int val;
Mike Chan9d49b702010-06-22 11:26:45 -0700636
Todd Poynoracfaec92012-10-03 00:39:56 -0700637 ret = strict_strtoul(buf, 0, &val);
Mike Chan9d49b702010-06-22 11:26:45 -0700638 if (ret < 0)
639 return ret;
640 hispeed_freq = val;
641 return count;
642}
643
644static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
645 show_hispeed_freq, store_hispeed_freq);
646
647
648static ssize_t show_go_hispeed_load(struct kobject *kobj,
649 struct attribute *attr, char *buf)
650{
651 return sprintf(buf, "%lu\n", go_hispeed_load);
652}
653
654static ssize_t store_go_hispeed_load(struct kobject *kobj,
655 struct attribute *attr, const char *buf, size_t count)
656{
657 int ret;
658 unsigned long val;
659
660 ret = strict_strtoul(buf, 0, &val);
661 if (ret < 0)
662 return ret;
663 go_hispeed_load = val;
664 return count;
665}
666
667static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
668 show_go_hispeed_load, store_go_hispeed_load);
669
670static ssize_t show_min_sample_time(struct kobject *kobj,
671 struct attribute *attr, char *buf)
672{
673 return sprintf(buf, "%lu\n", min_sample_time);
674}
675
676static ssize_t store_min_sample_time(struct kobject *kobj,
677 struct attribute *attr, const char *buf, size_t count)
678{
679 int ret;
680 unsigned long val;
681
682 ret = strict_strtoul(buf, 0, &val);
683 if (ret < 0)
684 return ret;
685 min_sample_time = val;
686 return count;
687}
688
689static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
690 show_min_sample_time, store_min_sample_time);
691
Todd Poynor596cf1f2012-04-13 20:18:02 -0700692static ssize_t show_above_hispeed_delay(struct kobject *kobj,
693 struct attribute *attr, char *buf)
694{
695 return sprintf(buf, "%lu\n", above_hispeed_delay_val);
696}
697
698static ssize_t store_above_hispeed_delay(struct kobject *kobj,
699 struct attribute *attr,
700 const char *buf, size_t count)
701{
702 int ret;
703 unsigned long val;
704
705 ret = strict_strtoul(buf, 0, &val);
706 if (ret < 0)
707 return ret;
708 above_hispeed_delay_val = val;
709 return count;
710}
711
712define_one_global_rw(above_hispeed_delay);
713
Mike Chan9d49b702010-06-22 11:26:45 -0700714static ssize_t show_timer_rate(struct kobject *kobj,
715 struct attribute *attr, char *buf)
716{
717 return sprintf(buf, "%lu\n", timer_rate);
718}
719
720static ssize_t store_timer_rate(struct kobject *kobj,
721 struct attribute *attr, const char *buf, size_t count)
722{
723 int ret;
724 unsigned long val;
725
726 ret = strict_strtoul(buf, 0, &val);
727 if (ret < 0)
728 return ret;
729 timer_rate = val;
730 return count;
731}
732
733static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
734 show_timer_rate, store_timer_rate);
735
Todd Poynor9fb15312012-04-23 20:42:41 -0700736static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
737 char *buf)
738{
739 return sprintf(buf, "%d\n", boost_val);
740}
741
742static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
743 const char *buf, size_t count)
744{
745 int ret;
746 unsigned long val;
747
748 ret = kstrtoul(buf, 0, &val);
749 if (ret < 0)
750 return ret;
751
752 boost_val = val;
753
Todd Poynor2e739a02012-05-03 00:16:55 -0700754 if (boost_val) {
755 trace_cpufreq_interactive_boost("on");
Todd Poynor9fb15312012-04-23 20:42:41 -0700756 cpufreq_interactive_boost();
Todd Poynor2e739a02012-05-03 00:16:55 -0700757 } else {
758 trace_cpufreq_interactive_unboost("off");
759 }
Todd Poynor9fb15312012-04-23 20:42:41 -0700760
761 return count;
762}
763
764define_one_global_rw(boost);
765
Todd Poynor2e739a02012-05-03 00:16:55 -0700766static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
767 const char *buf, size_t count)
768{
769 int ret;
770 unsigned long val;
771
772 ret = kstrtoul(buf, 0, &val);
773 if (ret < 0)
774 return ret;
775
776 trace_cpufreq_interactive_boost("pulse");
777 cpufreq_interactive_boost();
778 return count;
779}
780
781static struct global_attr boostpulse =
782 __ATTR(boostpulse, 0200, NULL, store_boostpulse);
783
Mike Chan9d49b702010-06-22 11:26:45 -0700784static struct attribute *interactive_attributes[] = {
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800785 &target_loads_attr.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700786 &hispeed_freq_attr.attr,
787 &go_hispeed_load_attr.attr,
Todd Poynor596cf1f2012-04-13 20:18:02 -0700788 &above_hispeed_delay.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700789 &min_sample_time_attr.attr,
790 &timer_rate_attr.attr,
Todd Poynor9fb15312012-04-23 20:42:41 -0700791 &boost.attr,
Todd Poynor2e739a02012-05-03 00:16:55 -0700792 &boostpulse.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700793 NULL,
794};
795
796static struct attribute_group interactive_attr_group = {
797 .attrs = interactive_attributes,
798 .name = "interactive",
799};
800
Sam Lefflerae0d23f2012-06-27 10:12:04 -0700801static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
802 unsigned long val,
803 void *data)
804{
805 switch (val) {
806 case IDLE_START:
807 cpufreq_interactive_idle_start();
808 break;
809 case IDLE_END:
810 cpufreq_interactive_idle_end();
811 break;
812 }
813
814 return 0;
815}
816
817static struct notifier_block cpufreq_interactive_idle_nb = {
818 .notifier_call = cpufreq_interactive_idle_notifier,
819};
820
Mike Chan9d49b702010-06-22 11:26:45 -0700821static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
822 unsigned int event)
823{
824 int rc;
825 unsigned int j;
826 struct cpufreq_interactive_cpuinfo *pcpu;
827 struct cpufreq_frequency_table *freq_table;
828
829 switch (event) {
830 case CPUFREQ_GOV_START:
831 if (!cpu_online(policy->cpu))
832 return -EINVAL;
833
834 freq_table =
835 cpufreq_frequency_get_table(policy->cpu);
Todd Poynor7aa95c82012-11-05 13:09:03 -0800836 if (!hispeed_freq)
837 hispeed_freq = policy->max;
Mike Chan9d49b702010-06-22 11:26:45 -0700838
839 for_each_cpu(j, policy->cpus) {
840 pcpu = &per_cpu(cpuinfo, j);
841 pcpu->policy = policy;
842 pcpu->target_freq = policy->cur;
843 pcpu->freq_table = freq_table;
Todd Poynoraad27322012-04-26 21:41:40 -0700844 pcpu->floor_freq = pcpu->target_freq;
845 pcpu->floor_validate_time =
Todd Poynor3c081182012-12-07 20:08:45 -0800846 ktime_to_us(ktime_get());
Todd Poynor5a5aa702012-05-10 23:28:06 -0700847 pcpu->hispeed_validate_time =
Todd Poynor3c081182012-12-07 20:08:45 -0800848 pcpu->floor_validate_time;
Mike Chan9d49b702010-06-22 11:26:45 -0700849 pcpu->governor_enabled = 1;
850 smp_wmb();
Todd Poynor7aa95c82012-11-05 13:09:03 -0800851 pcpu->cpu_timer.expires =
852 jiffies + usecs_to_jiffies(timer_rate);
853 add_timer_on(&pcpu->cpu_timer, j);
Mike Chan9d49b702010-06-22 11:26:45 -0700854 }
855
Mike Chan9d49b702010-06-22 11:26:45 -0700856 /*
857 * Do not register the idle hook and create sysfs
858 * entries if we have already done so.
859 */
860 if (atomic_inc_return(&active_count) > 1)
861 return 0;
862
863 rc = sysfs_create_group(cpufreq_global_kobject,
864 &interactive_attr_group);
865 if (rc)
866 return rc;
867
Sam Lefflerae0d23f2012-06-27 10:12:04 -0700868 idle_notifier_register(&cpufreq_interactive_idle_nb);
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800869 cpufreq_register_notifier(
870 &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
Mike Chan9d49b702010-06-22 11:26:45 -0700871 break;
872
873 case CPUFREQ_GOV_STOP:
874 for_each_cpu(j, policy->cpus) {
875 pcpu = &per_cpu(cpuinfo, j);
876 pcpu->governor_enabled = 0;
877 smp_wmb();
878 del_timer_sync(&pcpu->cpu_timer);
Mike Chan9d49b702010-06-22 11:26:45 -0700879 }
880
Mike Chan9d49b702010-06-22 11:26:45 -0700881 if (atomic_dec_return(&active_count) > 0)
882 return 0;
883
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800884 cpufreq_unregister_notifier(
885 &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
Sam Lefflerae0d23f2012-06-27 10:12:04 -0700886 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
Mike Chan9d49b702010-06-22 11:26:45 -0700887 sysfs_remove_group(cpufreq_global_kobject,
888 &interactive_attr_group);
889
890 break;
891
892 case CPUFREQ_GOV_LIMITS:
893 if (policy->max < policy->cur)
894 __cpufreq_driver_target(policy,
895 policy->max, CPUFREQ_RELATION_H);
896 else if (policy->min > policy->cur)
897 __cpufreq_driver_target(policy,
898 policy->min, CPUFREQ_RELATION_L);
899 break;
900 }
901 return 0;
902}
903
Mike Chan9d49b702010-06-22 11:26:45 -0700904static int __init cpufreq_interactive_init(void)
905{
906 unsigned int i;
907 struct cpufreq_interactive_cpuinfo *pcpu;
908 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
909
910 go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
911 min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
Todd Poynor596cf1f2012-04-13 20:18:02 -0700912 above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
Mike Chan9d49b702010-06-22 11:26:45 -0700913 timer_rate = DEFAULT_TIMER_RATE;
914
915 /* Initalize per-cpu timers */
916 for_each_possible_cpu(i) {
917 pcpu = &per_cpu(cpuinfo, i);
Lianwei Wangba6c6bb2012-11-01 09:59:52 +0800918 if (governidle)
919 init_timer(&pcpu->cpu_timer);
920 else
921 init_timer_deferrable(&pcpu->cpu_timer);
Mike Chan9d49b702010-06-22 11:26:45 -0700922 pcpu->cpu_timer.function = cpufreq_interactive_timer;
923 pcpu->cpu_timer.data = i;
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800924 spin_lock_init(&pcpu->load_lock);
Mike Chan9d49b702010-06-22 11:26:45 -0700925 }
926
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800927 spin_lock_init(&target_loads_lock);
Todd Poynor8a37bb72012-07-16 17:07:15 -0700928 spin_lock_init(&speedchange_cpumask_lock);
929 speedchange_task =
930 kthread_create(cpufreq_interactive_speedchange_task, NULL,
931 "cfinteractive");
932 if (IS_ERR(speedchange_task))
933 return PTR_ERR(speedchange_task);
Sam Leffler9f1dcd62012-06-27 12:55:56 -0700934
Todd Poynor8a37bb72012-07-16 17:07:15 -0700935 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
936 get_task_struct(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -0700937
Sam Leffler9f1dcd62012-06-27 12:55:56 -0700938 /* NB: wake up so the thread does not look hung to the freezer */
Todd Poynor8a37bb72012-07-16 17:07:15 -0700939 wake_up_process(speedchange_task);
Sam Leffler9f1dcd62012-06-27 12:55:56 -0700940
Mike Chan9d49b702010-06-22 11:26:45 -0700941 return cpufreq_register_governor(&cpufreq_gov_interactive);
Mike Chan9d49b702010-06-22 11:26:45 -0700942}
943
944#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
945fs_initcall(cpufreq_interactive_init);
946#else
947module_init(cpufreq_interactive_init);
948#endif
949
950static void __exit cpufreq_interactive_exit(void)
951{
952 cpufreq_unregister_governor(&cpufreq_gov_interactive);
Todd Poynor8a37bb72012-07-16 17:07:15 -0700953 kthread_stop(speedchange_task);
954 put_task_struct(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -0700955}
956
957module_exit(cpufreq_interactive_exit);
958
959MODULE_AUTHOR("Mike Chan <mike@android.com>");
960MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
961 "Latency sensitive workloads");
962MODULE_LICENSE("GPL");