blob: d0d51ee30727dda728a9a9902a04b45d3f6b2460 [file] [log] [blame]
Mike Chanef969692010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/module.h>
Lianwei Wangd72db422012-11-01 09:59:52 +080023#include <linux/moduleparam.h>
Mike Chanef969692010-06-22 11:26:45 -070024#include <linux/mutex.h>
25#include <linux/sched.h>
26#include <linux/sched/rt.h>
27#include <linux/tick.h>
28#include <linux/time.h>
29#include <linux/timer.h>
30#include <linux/workqueue.h>
31#include <linux/kthread.h>
32#include <linux/mutex.h>
Todd Poynorab8dc402012-04-02 17:17:14 -070033#include <linux/slab.h>
Todd Poynor15a9ea02012-04-23 20:42:41 -070034#include <asm/cputime.h>
Mike Chanef969692010-06-22 11:26:45 -070035
Todd Poynorae010472012-02-16 16:27:59 -080036#define CREATE_TRACE_POINTS
37#include <trace/events/cpufreq_interactive.h>
38
Mike Chanef969692010-06-22 11:26:45 -070039static atomic_t active_count = ATOMIC_INIT(0);
40
41struct cpufreq_interactive_cpuinfo {
42 struct timer_list cpu_timer;
43 int timer_idlecancel;
Todd Poynor0e58da22012-12-11 16:05:03 -080044 spinlock_t load_lock; /* protects the next 4 fields */
Mike Chanef969692010-06-22 11:26:45 -070045 u64 time_in_idle;
Todd Poynor8eccd412012-10-08 20:14:34 -070046 u64 time_in_idle_timestamp;
Todd Poynor0e58da22012-12-11 16:05:03 -080047 u64 cputime_speedadj;
48 u64 cputime_speedadj_timestamp;
Mike Chanef969692010-06-22 11:26:45 -070049 struct cpufreq_policy *policy;
50 struct cpufreq_frequency_table *freq_table;
51 unsigned int target_freq;
Todd Poynor6d15fa32012-04-26 21:41:40 -070052 unsigned int floor_freq;
53 u64 floor_validate_time;
Todd Poynor1a0389a2012-05-10 23:28:06 -070054 u64 hispeed_validate_time;
Mike Chanef969692010-06-22 11:26:45 -070055 int governor_enabled;
56};
57
58static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
59
Todd Poynor0f1920b2012-07-16 17:07:15 -070060/* realtime thread handles frequency scaling */
61static struct task_struct *speedchange_task;
62static cpumask_t speedchange_cpumask;
63static spinlock_t speedchange_cpumask_lock;
Mike Chanef969692010-06-22 11:26:45 -070064
65/* Hi speed to bump to from lo speed when load burst (default max) */
Todd Poynor3b7b5f82012-10-03 00:39:56 -070066static unsigned int hispeed_freq;
Mike Chanef969692010-06-22 11:26:45 -070067
68/* Go to hi speed when CPU load at or above this value. */
Todd Poynora380aa82012-04-17 17:39:34 -070069#define DEFAULT_GO_HISPEED_LOAD 85
Mike Chanef969692010-06-22 11:26:45 -070070static unsigned long go_hispeed_load;
71
Todd Poynor8d2d93f2012-11-28 17:58:17 -080072/* Target load. Lower values result in higher CPU speeds. */
73#define DEFAULT_TARGET_LOAD 90
Todd Poynore9c60742012-11-14 11:41:21 -080074static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
75static spinlock_t target_loads_lock;
76static unsigned int *target_loads = default_target_loads;
77static int ntarget_loads = ARRAY_SIZE(default_target_loads);
Todd Poynor8d2d93f2012-11-28 17:58:17 -080078
Mike Chanef969692010-06-22 11:26:45 -070079/*
80 * The minimum amount of time to spend at a frequency before we can ramp down.
81 */
Todd Poynora380aa82012-04-17 17:39:34 -070082#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
Mike Chanef969692010-06-22 11:26:45 -070083static unsigned long min_sample_time;
84
85/*
86 * The sample rate of the timer used to increase frequency
87 */
Todd Poynora380aa82012-04-17 17:39:34 -070088#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
Mike Chanef969692010-06-22 11:26:45 -070089static unsigned long timer_rate;
90
Todd Poynorcbbe17d2012-04-13 20:18:02 -070091/*
92 * Wait this long before raising speed above hispeed, by default a single
93 * timer interval.
94 */
95#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
96static unsigned long above_hispeed_delay_val;
97
Todd Poynorab8dc402012-04-02 17:17:14 -070098/*
Todd Poynor15a9ea02012-04-23 20:42:41 -070099 * Non-zero means longer-term speed boost active.
100 */
101
102static int boost_val;
103
Lianwei Wangd72db422012-11-01 09:59:52 +0800104static bool governidle;
105module_param(governidle, bool, S_IWUSR | S_IRUGO);
106MODULE_PARM_DESC(governidle,
107 "Set to 1 to wake up CPUs from idle to reduce speed (default 0)");
108
Mike Chanef969692010-06-22 11:26:45 -0700109static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
110 unsigned int event);
111
112#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
113static
114#endif
115struct cpufreq_governor cpufreq_gov_interactive = {
116 .name = "interactive",
117 .governor = cpufreq_governor_interactive,
118 .max_transition_latency = 10000000,
119 .owner = THIS_MODULE,
120};
121
Todd Poynor8eccd412012-10-08 20:14:34 -0700122static void cpufreq_interactive_timer_resched(
123 struct cpufreq_interactive_cpuinfo *pcpu)
124{
125 mod_timer_pinned(&pcpu->cpu_timer,
126 jiffies + usecs_to_jiffies(timer_rate));
Todd Poynor0e58da22012-12-11 16:05:03 -0800127 spin_lock(&pcpu->load_lock);
Todd Poynor8eccd412012-10-08 20:14:34 -0700128 pcpu->time_in_idle =
129 get_cpu_idle_time_us(smp_processor_id(),
130 &pcpu->time_in_idle_timestamp);
Todd Poynor0e58da22012-12-11 16:05:03 -0800131 pcpu->cputime_speedadj = 0;
132 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
133 spin_unlock(&pcpu->load_lock);
Todd Poynor8eccd412012-10-08 20:14:34 -0700134}
135
Todd Poynore9c60742012-11-14 11:41:21 -0800136static unsigned int freq_to_targetload(unsigned int freq)
137{
138 int i;
139 unsigned int ret;
140
141 spin_lock(&target_loads_lock);
142
143 for (i = 0; i < ntarget_loads - 1 && freq >= target_loads[i+1]; i += 2)
144 ;
145
146 ret = target_loads[i];
147 spin_unlock(&target_loads_lock);
148 return ret;
149}
150
151/*
152 * If increasing frequencies never map to a lower target load then
153 * choose_freq() will find the minimum frequency that does not exceed its
154 * target load given the current load.
155 */
156
157static unsigned int choose_freq(
Todd Poynor0e58da22012-12-11 16:05:03 -0800158 struct cpufreq_interactive_cpuinfo *pcpu, unsigned int loadadjfreq)
Todd Poynore9c60742012-11-14 11:41:21 -0800159{
160 unsigned int freq = pcpu->policy->cur;
Todd Poynore9c60742012-11-14 11:41:21 -0800161 unsigned int prevfreq, freqmin, freqmax;
162 unsigned int tl;
163 int index;
164
165 freqmin = 0;
166 freqmax = UINT_MAX;
167
168 do {
169 prevfreq = freq;
170 tl = freq_to_targetload(freq);
171
172 /*
173 * Find the lowest frequency where the computed load is less
174 * than or equal to the target load.
175 */
176
177 cpufreq_frequency_table_target(
178 pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
179 CPUFREQ_RELATION_L, &index);
180 freq = pcpu->freq_table[index].frequency;
181
182 if (freq > prevfreq) {
183 /* The previous frequency is too low. */
184 freqmin = prevfreq;
185
186 if (freq >= freqmax) {
187 /*
188 * Find the highest frequency that is less
189 * than freqmax.
190 */
191 cpufreq_frequency_table_target(
192 pcpu->policy, pcpu->freq_table,
193 freqmax - 1, CPUFREQ_RELATION_H,
194 &index);
195 freq = pcpu->freq_table[index].frequency;
196
197 if (freq == freqmin) {
198 /*
199 * The first frequency below freqmax
200 * has already been found to be too
201 * low. freqmax is the lowest speed
202 * we found that is fast enough.
203 */
204 freq = freqmax;
205 break;
206 }
207 }
208 } else if (freq < prevfreq) {
209 /* The previous frequency is high enough. */
210 freqmax = prevfreq;
211
212 if (freq <= freqmin) {
213 /*
214 * Find the lowest frequency that is higher
215 * than freqmin.
216 */
217 cpufreq_frequency_table_target(
218 pcpu->policy, pcpu->freq_table,
219 freqmin + 1, CPUFREQ_RELATION_L,
220 &index);
221 freq = pcpu->freq_table[index].frequency;
222
223 /*
224 * If freqmax is the first frequency above
225 * freqmin then we have already found that
226 * this speed is fast enough.
227 */
228 if (freq == freqmax)
229 break;
230 }
231 }
232
233 /* If same frequency chosen as previous then done. */
234 } while (freq != prevfreq);
235
236 return freq;
237}
238
Todd Poynor0e58da22012-12-11 16:05:03 -0800239static u64 update_load(int cpu)
240{
241 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
242 u64 now;
243 u64 now_idle;
244 unsigned int delta_idle;
245 unsigned int delta_time;
246 u64 active_time;
247
248 now_idle = get_cpu_idle_time_us(cpu, &now);
249 delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
250 delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
251 active_time = delta_time - delta_idle;
252 pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
253
254 pcpu->time_in_idle = now_idle;
255 pcpu->time_in_idle_timestamp = now;
256 return now;
257}
258
Mike Chanef969692010-06-22 11:26:45 -0700259static void cpufreq_interactive_timer(unsigned long data)
260{
Todd Poynore7afb7e2012-11-05 13:09:03 -0800261 u64 now;
Mike Chanef969692010-06-22 11:26:45 -0700262 unsigned int delta_time;
Todd Poynor0e58da22012-12-11 16:05:03 -0800263 u64 cputime_speedadj;
Mike Chanef969692010-06-22 11:26:45 -0700264 int cpu_load;
Mike Chanef969692010-06-22 11:26:45 -0700265 struct cpufreq_interactive_cpuinfo *pcpu =
266 &per_cpu(cpuinfo, data);
Mike Chanef969692010-06-22 11:26:45 -0700267 unsigned int new_freq;
Todd Poynor0e58da22012-12-11 16:05:03 -0800268 unsigned int loadadjfreq;
Mike Chanef969692010-06-22 11:26:45 -0700269 unsigned int index;
270 unsigned long flags;
271
272 smp_rmb();
273
274 if (!pcpu->governor_enabled)
275 goto exit;
276
Todd Poynor0e58da22012-12-11 16:05:03 -0800277 spin_lock(&pcpu->load_lock);
278 now = update_load(data);
279 delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
280 cputime_speedadj = pcpu->cputime_speedadj;
281 spin_unlock(&pcpu->load_lock);
Mike Chanef969692010-06-22 11:26:45 -0700282
Todd Poynor0e58da22012-12-11 16:05:03 -0800283 if (WARN_ON_ONCE(!delta_time))
Mike Chanef969692010-06-22 11:26:45 -0700284 goto rearm;
285
Todd Poynor0e58da22012-12-11 16:05:03 -0800286 do_div(cputime_speedadj, delta_time);
287 loadadjfreq = (unsigned int)cputime_speedadj * 100;
288 cpu_load = loadadjfreq / pcpu->target_freq;
Mike Chanef969692010-06-22 11:26:45 -0700289
Todd Poynor131ff022012-11-08 15:06:55 -0800290 if ((cpu_load >= go_hispeed_load || boost_val) &&
291 pcpu->target_freq < hispeed_freq)
292 new_freq = hispeed_freq;
293 else
Todd Poynor0e58da22012-12-11 16:05:03 -0800294 new_freq = choose_freq(pcpu, loadadjfreq);
Todd Poynor131ff022012-11-08 15:06:55 -0800295
296 if (pcpu->target_freq >= hispeed_freq &&
297 new_freq > pcpu->target_freq &&
298 now - pcpu->hispeed_validate_time < above_hispeed_delay_val) {
299 trace_cpufreq_interactive_notyet(
300 data, cpu_load, pcpu->target_freq,
301 pcpu->policy->cur, new_freq);
302 goto rearm;
Mike Chanef969692010-06-22 11:26:45 -0700303 }
304
Todd Poynor131ff022012-11-08 15:06:55 -0800305 pcpu->hispeed_validate_time = now;
Todd Poynor1a0389a2012-05-10 23:28:06 -0700306
Mike Chanef969692010-06-22 11:26:45 -0700307 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800308 new_freq, CPUFREQ_RELATION_L,
Mike Chanef969692010-06-22 11:26:45 -0700309 &index)) {
310 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
311 (int) data);
312 goto rearm;
313 }
314
315 new_freq = pcpu->freq_table[index].frequency;
316
Mike Chanef969692010-06-22 11:26:45 -0700317 /*
Todd Poynor6d15fa32012-04-26 21:41:40 -0700318 * Do not scale below floor_freq unless we have been at or above the
319 * floor frequency for the minimum sample time since last validated.
Mike Chanef969692010-06-22 11:26:45 -0700320 */
Todd Poynor6d15fa32012-04-26 21:41:40 -0700321 if (new_freq < pcpu->floor_freq) {
Todd Poynore7afb7e2012-11-05 13:09:03 -0800322 if (now - pcpu->floor_validate_time < min_sample_time) {
Todd Poynore60cc1b2012-11-28 17:56:09 -0800323 trace_cpufreq_interactive_notyet(
324 data, cpu_load, pcpu->target_freq,
325 pcpu->policy->cur, new_freq);
Mike Chanef969692010-06-22 11:26:45 -0700326 goto rearm;
Todd Poynorae010472012-02-16 16:27:59 -0800327 }
Mike Chanef969692010-06-22 11:26:45 -0700328 }
329
Todd Poynor6d15fa32012-04-26 21:41:40 -0700330 pcpu->floor_freq = new_freq;
Todd Poynore7afb7e2012-11-05 13:09:03 -0800331 pcpu->floor_validate_time = now;
Todd Poynor1f408dc2012-04-06 19:59:36 -0700332
333 if (pcpu->target_freq == new_freq) {
Todd Poynore60cc1b2012-11-28 17:56:09 -0800334 trace_cpufreq_interactive_already(
335 data, cpu_load, pcpu->target_freq,
336 pcpu->policy->cur, new_freq);
Todd Poynor1f408dc2012-04-06 19:59:36 -0700337 goto rearm_if_notmax;
338 }
339
Todd Poynorae010472012-02-16 16:27:59 -0800340 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
Todd Poynore60cc1b2012-11-28 17:56:09 -0800341 pcpu->policy->cur, new_freq);
Todd Poynorae010472012-02-16 16:27:59 -0800342
Todd Poynor0f1920b2012-07-16 17:07:15 -0700343 pcpu->target_freq = new_freq;
344 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
345 cpumask_set_cpu(data, &speedchange_cpumask);
346 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
347 wake_up_process(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -0700348
349rearm_if_notmax:
350 /*
351 * Already set max speed and don't see a need to change that,
352 * wait until next idle to re-evaluate, don't need timer.
353 */
354 if (pcpu->target_freq == pcpu->policy->max)
355 goto exit;
356
357rearm:
358 if (!timer_pending(&pcpu->cpu_timer)) {
359 /*
Lianwei Wangd72db422012-11-01 09:59:52 +0800360 * If governing speed in idle and already at min, cancel the
361 * timer if that CPU goes idle. We don't need to re-evaluate
362 * speed until the next idle exit.
Mike Chanef969692010-06-22 11:26:45 -0700363 */
Lianwei Wangd72db422012-11-01 09:59:52 +0800364 if (governidle && pcpu->target_freq == pcpu->policy->min)
Mike Chanef969692010-06-22 11:26:45 -0700365 pcpu->timer_idlecancel = 1;
Mike Chanef969692010-06-22 11:26:45 -0700366
Todd Poynor8eccd412012-10-08 20:14:34 -0700367 cpufreq_interactive_timer_resched(pcpu);
Mike Chanef969692010-06-22 11:26:45 -0700368 }
369
370exit:
371 return;
372}
373
374static void cpufreq_interactive_idle_start(void)
375{
376 struct cpufreq_interactive_cpuinfo *pcpu =
377 &per_cpu(cpuinfo, smp_processor_id());
378 int pending;
379
380 if (!pcpu->governor_enabled)
381 return;
382
Mike Chanef969692010-06-22 11:26:45 -0700383 pending = timer_pending(&pcpu->cpu_timer);
384
385 if (pcpu->target_freq != pcpu->policy->min) {
Mike Chanef969692010-06-22 11:26:45 -0700386 /*
387 * Entering idle while not at lowest speed. On some
388 * platforms this can hold the other CPU(s) at that speed
389 * even though the CPU is idle. Set a timer to re-evaluate
390 * speed so this idle CPU doesn't hold the other CPUs above
391 * min indefinitely. This should probably be a quirk of
392 * the CPUFreq driver.
393 */
394 if (!pending) {
Mike Chanef969692010-06-22 11:26:45 -0700395 pcpu->timer_idlecancel = 0;
Todd Poynor8eccd412012-10-08 20:14:34 -0700396 cpufreq_interactive_timer_resched(pcpu);
Mike Chanef969692010-06-22 11:26:45 -0700397 }
Lianwei Wangd72db422012-11-01 09:59:52 +0800398 } else if (governidle) {
Mike Chanef969692010-06-22 11:26:45 -0700399 /*
400 * If at min speed and entering idle after load has
401 * already been evaluated, and a timer has been set just in
402 * case the CPU suddenly goes busy, cancel that timer. The
403 * CPU didn't go busy; we'll recheck things upon idle exit.
404 */
405 if (pending && pcpu->timer_idlecancel) {
406 del_timer(&pcpu->cpu_timer);
Mike Chanef969692010-06-22 11:26:45 -0700407 pcpu->timer_idlecancel = 0;
408 }
409 }
410
411}
412
413static void cpufreq_interactive_idle_end(void)
414{
415 struct cpufreq_interactive_cpuinfo *pcpu =
416 &per_cpu(cpuinfo, smp_processor_id());
417
Sam Leffler3ab7c2b2012-06-27 10:12:04 -0700418 if (!pcpu->governor_enabled)
419 return;
420
Todd Poynore7afb7e2012-11-05 13:09:03 -0800421 /* Arm the timer for 1-2 ticks later if not already. */
422 if (!timer_pending(&pcpu->cpu_timer)) {
Mike Chanef969692010-06-22 11:26:45 -0700423 pcpu->timer_idlecancel = 0;
Todd Poynor8eccd412012-10-08 20:14:34 -0700424 cpufreq_interactive_timer_resched(pcpu);
425 } else if (!governidle &&
426 time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
427 del_timer(&pcpu->cpu_timer);
428 cpufreq_interactive_timer(smp_processor_id());
Mike Chanef969692010-06-22 11:26:45 -0700429 }
Mike Chanef969692010-06-22 11:26:45 -0700430}
431
Todd Poynor0f1920b2012-07-16 17:07:15 -0700432static int cpufreq_interactive_speedchange_task(void *data)
Mike Chanef969692010-06-22 11:26:45 -0700433{
434 unsigned int cpu;
435 cpumask_t tmp_mask;
436 unsigned long flags;
437 struct cpufreq_interactive_cpuinfo *pcpu;
438
439 while (1) {
440 set_current_state(TASK_INTERRUPTIBLE);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700441 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700442
Todd Poynor0f1920b2012-07-16 17:07:15 -0700443 if (cpumask_empty(&speedchange_cpumask)) {
444 spin_unlock_irqrestore(&speedchange_cpumask_lock,
445 flags);
Mike Chanef969692010-06-22 11:26:45 -0700446 schedule();
447
448 if (kthread_should_stop())
449 break;
450
Todd Poynor0f1920b2012-07-16 17:07:15 -0700451 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700452 }
453
454 set_current_state(TASK_RUNNING);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700455 tmp_mask = speedchange_cpumask;
456 cpumask_clear(&speedchange_cpumask);
457 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700458
459 for_each_cpu(cpu, &tmp_mask) {
460 unsigned int j;
461 unsigned int max_freq = 0;
462
463 pcpu = &per_cpu(cpuinfo, cpu);
464 smp_rmb();
465
466 if (!pcpu->governor_enabled)
467 continue;
468
Mike Chanef969692010-06-22 11:26:45 -0700469 for_each_cpu(j, pcpu->policy->cpus) {
470 struct cpufreq_interactive_cpuinfo *pjcpu =
471 &per_cpu(cpuinfo, j);
472
473 if (pjcpu->target_freq > max_freq)
474 max_freq = pjcpu->target_freq;
475 }
476
477 if (max_freq != pcpu->policy->cur)
478 __cpufreq_driver_target(pcpu->policy,
479 max_freq,
480 CPUFREQ_RELATION_H);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700481 trace_cpufreq_interactive_setspeed(cpu,
482 pcpu->target_freq,
Todd Poynorae010472012-02-16 16:27:59 -0800483 pcpu->policy->cur);
Mike Chanef969692010-06-22 11:26:45 -0700484 }
485 }
486
487 return 0;
488}
489
Todd Poynorab8dc402012-04-02 17:17:14 -0700490static void cpufreq_interactive_boost(void)
491{
492 int i;
493 int anyboost = 0;
494 unsigned long flags;
495 struct cpufreq_interactive_cpuinfo *pcpu;
496
Todd Poynor0f1920b2012-07-16 17:07:15 -0700497 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Todd Poynorab8dc402012-04-02 17:17:14 -0700498
499 for_each_online_cpu(i) {
500 pcpu = &per_cpu(cpuinfo, i);
501
502 if (pcpu->target_freq < hispeed_freq) {
503 pcpu->target_freq = hispeed_freq;
Todd Poynor0f1920b2012-07-16 17:07:15 -0700504 cpumask_set_cpu(i, &speedchange_cpumask);
Todd Poynor31817c92012-12-07 20:08:45 -0800505 pcpu->hispeed_validate_time =
506 ktime_to_us(ktime_get());
Todd Poynorab8dc402012-04-02 17:17:14 -0700507 anyboost = 1;
508 }
509
510 /*
Todd Poynor6d15fa32012-04-26 21:41:40 -0700511 * Set floor freq and (re)start timer for when last
512 * validated.
Todd Poynorab8dc402012-04-02 17:17:14 -0700513 */
514
Todd Poynor6d15fa32012-04-26 21:41:40 -0700515 pcpu->floor_freq = hispeed_freq;
516 pcpu->floor_validate_time = ktime_to_us(ktime_get());
Todd Poynorab8dc402012-04-02 17:17:14 -0700517 }
518
Todd Poynor0f1920b2012-07-16 17:07:15 -0700519 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Todd Poynorab8dc402012-04-02 17:17:14 -0700520
521 if (anyboost)
Todd Poynor0f1920b2012-07-16 17:07:15 -0700522 wake_up_process(speedchange_task);
Todd Poynorab8dc402012-04-02 17:17:14 -0700523}
524
Todd Poynor0e58da22012-12-11 16:05:03 -0800525static int cpufreq_interactive_notifier(
526 struct notifier_block *nb, unsigned long val, void *data)
527{
528 struct cpufreq_freqs *freq = data;
529 struct cpufreq_interactive_cpuinfo *pcpu;
530 int cpu;
531
532 if (val == CPUFREQ_POSTCHANGE) {
533 pcpu = &per_cpu(cpuinfo, freq->cpu);
534
535 for_each_cpu(cpu, pcpu->policy->cpus) {
536 struct cpufreq_interactive_cpuinfo *pjcpu =
537 &per_cpu(cpuinfo, cpu);
538 spin_lock(&pjcpu->load_lock);
539 update_load(cpu);
540 spin_unlock(&pjcpu->load_lock);
541 }
542 }
543
544 return 0;
545}
546
547static struct notifier_block cpufreq_notifier_block = {
548 .notifier_call = cpufreq_interactive_notifier,
549};
550
Todd Poynore9c60742012-11-14 11:41:21 -0800551static ssize_t show_target_loads(
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800552 struct kobject *kobj, struct attribute *attr, char *buf)
553{
Todd Poynore9c60742012-11-14 11:41:21 -0800554 int i;
555 ssize_t ret = 0;
556
557 spin_lock(&target_loads_lock);
558
559 for (i = 0; i < ntarget_loads; i++)
560 ret += sprintf(buf + ret, "%u%s", target_loads[i],
561 i & 0x1 ? ":" : " ");
562
563 ret += sprintf(buf + ret, "\n");
564 spin_unlock(&target_loads_lock);
565 return ret;
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800566}
567
Todd Poynore9c60742012-11-14 11:41:21 -0800568static ssize_t store_target_loads(
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800569 struct kobject *kobj, struct attribute *attr, const char *buf,
570 size_t count)
571{
572 int ret;
Todd Poynore9c60742012-11-14 11:41:21 -0800573 const char *cp;
574 unsigned int *new_target_loads = NULL;
575 int ntokens = 1;
576 int i;
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800577
Todd Poynore9c60742012-11-14 11:41:21 -0800578 cp = buf;
579 while ((cp = strpbrk(cp + 1, " :")))
580 ntokens++;
581
582 if (!(ntokens & 0x1))
583 goto err_inval;
584
585 new_target_loads = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
586 if (!new_target_loads) {
587 ret = -ENOMEM;
588 goto err;
589 }
590
591 cp = buf;
592 i = 0;
593 while (i < ntokens) {
594 if (sscanf(cp, "%u", &new_target_loads[i++]) != 1)
595 goto err_inval;
596
597 cp = strpbrk(cp, " :");
598 if (!cp)
599 break;
600 cp++;
601 }
602
603 if (i != ntokens)
604 goto err_inval;
605
606 spin_lock(&target_loads_lock);
607 if (target_loads != default_target_loads)
608 kfree(target_loads);
609 target_loads = new_target_loads;
610 ntarget_loads = ntokens;
611 spin_unlock(&target_loads_lock);
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800612 return count;
Todd Poynore9c60742012-11-14 11:41:21 -0800613
614err_inval:
615 ret = -EINVAL;
616err:
617 kfree(new_target_loads);
618 return ret;
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800619}
620
Todd Poynore9c60742012-11-14 11:41:21 -0800621static struct global_attr target_loads_attr =
622 __ATTR(target_loads, S_IRUGO | S_IWUSR,
623 show_target_loads, store_target_loads);
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800624
Mike Chanef969692010-06-22 11:26:45 -0700625static ssize_t show_hispeed_freq(struct kobject *kobj,
626 struct attribute *attr, char *buf)
627{
Todd Poynor3b7b5f82012-10-03 00:39:56 -0700628 return sprintf(buf, "%u\n", hispeed_freq);
Mike Chanef969692010-06-22 11:26:45 -0700629}
630
631static ssize_t store_hispeed_freq(struct kobject *kobj,
632 struct attribute *attr, const char *buf,
633 size_t count)
634{
635 int ret;
Todd Poynor3b7b5f82012-10-03 00:39:56 -0700636 long unsigned int val;
Mike Chanef969692010-06-22 11:26:45 -0700637
Todd Poynor3b7b5f82012-10-03 00:39:56 -0700638 ret = strict_strtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -0700639 if (ret < 0)
640 return ret;
641 hispeed_freq = val;
642 return count;
643}
644
645static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
646 show_hispeed_freq, store_hispeed_freq);
647
648
649static ssize_t show_go_hispeed_load(struct kobject *kobj,
650 struct attribute *attr, char *buf)
651{
652 return sprintf(buf, "%lu\n", go_hispeed_load);
653}
654
655static ssize_t store_go_hispeed_load(struct kobject *kobj,
656 struct attribute *attr, const char *buf, size_t count)
657{
658 int ret;
659 unsigned long val;
660
661 ret = strict_strtoul(buf, 0, &val);
662 if (ret < 0)
663 return ret;
664 go_hispeed_load = val;
665 return count;
666}
667
668static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
669 show_go_hispeed_load, store_go_hispeed_load);
670
671static ssize_t show_min_sample_time(struct kobject *kobj,
672 struct attribute *attr, char *buf)
673{
674 return sprintf(buf, "%lu\n", min_sample_time);
675}
676
677static ssize_t store_min_sample_time(struct kobject *kobj,
678 struct attribute *attr, const char *buf, size_t count)
679{
680 int ret;
681 unsigned long val;
682
683 ret = strict_strtoul(buf, 0, &val);
684 if (ret < 0)
685 return ret;
686 min_sample_time = val;
687 return count;
688}
689
690static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
691 show_min_sample_time, store_min_sample_time);
692
Todd Poynorcbbe17d2012-04-13 20:18:02 -0700693static ssize_t show_above_hispeed_delay(struct kobject *kobj,
694 struct attribute *attr, char *buf)
695{
696 return sprintf(buf, "%lu\n", above_hispeed_delay_val);
697}
698
699static ssize_t store_above_hispeed_delay(struct kobject *kobj,
700 struct attribute *attr,
701 const char *buf, size_t count)
702{
703 int ret;
704 unsigned long val;
705
706 ret = strict_strtoul(buf, 0, &val);
707 if (ret < 0)
708 return ret;
709 above_hispeed_delay_val = val;
710 return count;
711}
712
713define_one_global_rw(above_hispeed_delay);
714
Mike Chanef969692010-06-22 11:26:45 -0700715static ssize_t show_timer_rate(struct kobject *kobj,
716 struct attribute *attr, char *buf)
717{
718 return sprintf(buf, "%lu\n", timer_rate);
719}
720
721static ssize_t store_timer_rate(struct kobject *kobj,
722 struct attribute *attr, const char *buf, size_t count)
723{
724 int ret;
725 unsigned long val;
726
727 ret = strict_strtoul(buf, 0, &val);
728 if (ret < 0)
729 return ret;
730 timer_rate = val;
731 return count;
732}
733
734static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
735 show_timer_rate, store_timer_rate);
736
Todd Poynor15a9ea02012-04-23 20:42:41 -0700737static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
738 char *buf)
739{
740 return sprintf(buf, "%d\n", boost_val);
741}
742
743static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
744 const char *buf, size_t count)
745{
746 int ret;
747 unsigned long val;
748
749 ret = kstrtoul(buf, 0, &val);
750 if (ret < 0)
751 return ret;
752
753 boost_val = val;
754
Todd Poynor442a3122012-05-03 00:16:55 -0700755 if (boost_val) {
756 trace_cpufreq_interactive_boost("on");
Todd Poynor15a9ea02012-04-23 20:42:41 -0700757 cpufreq_interactive_boost();
Todd Poynor442a3122012-05-03 00:16:55 -0700758 } else {
759 trace_cpufreq_interactive_unboost("off");
760 }
Todd Poynor15a9ea02012-04-23 20:42:41 -0700761
762 return count;
763}
764
765define_one_global_rw(boost);
766
Todd Poynor442a3122012-05-03 00:16:55 -0700767static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
768 const char *buf, size_t count)
769{
770 int ret;
771 unsigned long val;
772
773 ret = kstrtoul(buf, 0, &val);
774 if (ret < 0)
775 return ret;
776
777 trace_cpufreq_interactive_boost("pulse");
778 cpufreq_interactive_boost();
779 return count;
780}
781
782static struct global_attr boostpulse =
783 __ATTR(boostpulse, 0200, NULL, store_boostpulse);
784
Mike Chanef969692010-06-22 11:26:45 -0700785static struct attribute *interactive_attributes[] = {
Todd Poynore9c60742012-11-14 11:41:21 -0800786 &target_loads_attr.attr,
Mike Chanef969692010-06-22 11:26:45 -0700787 &hispeed_freq_attr.attr,
788 &go_hispeed_load_attr.attr,
Todd Poynorcbbe17d2012-04-13 20:18:02 -0700789 &above_hispeed_delay.attr,
Mike Chanef969692010-06-22 11:26:45 -0700790 &min_sample_time_attr.attr,
791 &timer_rate_attr.attr,
Todd Poynor15a9ea02012-04-23 20:42:41 -0700792 &boost.attr,
Todd Poynor442a3122012-05-03 00:16:55 -0700793 &boostpulse.attr,
Mike Chanef969692010-06-22 11:26:45 -0700794 NULL,
795};
796
797static struct attribute_group interactive_attr_group = {
798 .attrs = interactive_attributes,
799 .name = "interactive",
800};
801
Sam Leffler3ab7c2b2012-06-27 10:12:04 -0700802static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
803 unsigned long val,
804 void *data)
805{
806 switch (val) {
807 case IDLE_START:
808 cpufreq_interactive_idle_start();
809 break;
810 case IDLE_END:
811 cpufreq_interactive_idle_end();
812 break;
813 }
814
815 return 0;
816}
817
818static struct notifier_block cpufreq_interactive_idle_nb = {
819 .notifier_call = cpufreq_interactive_idle_notifier,
820};
821
Mike Chanef969692010-06-22 11:26:45 -0700822static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
823 unsigned int event)
824{
825 int rc;
826 unsigned int j;
827 struct cpufreq_interactive_cpuinfo *pcpu;
828 struct cpufreq_frequency_table *freq_table;
829
830 switch (event) {
831 case CPUFREQ_GOV_START:
832 if (!cpu_online(policy->cpu))
833 return -EINVAL;
834
835 freq_table =
836 cpufreq_frequency_get_table(policy->cpu);
Todd Poynore7afb7e2012-11-05 13:09:03 -0800837 if (!hispeed_freq)
838 hispeed_freq = policy->max;
Mike Chanef969692010-06-22 11:26:45 -0700839
840 for_each_cpu(j, policy->cpus) {
841 pcpu = &per_cpu(cpuinfo, j);
842 pcpu->policy = policy;
843 pcpu->target_freq = policy->cur;
844 pcpu->freq_table = freq_table;
Todd Poynor6d15fa32012-04-26 21:41:40 -0700845 pcpu->floor_freq = pcpu->target_freq;
846 pcpu->floor_validate_time =
Todd Poynor31817c92012-12-07 20:08:45 -0800847 ktime_to_us(ktime_get());
Todd Poynor1a0389a2012-05-10 23:28:06 -0700848 pcpu->hispeed_validate_time =
Todd Poynor31817c92012-12-07 20:08:45 -0800849 pcpu->floor_validate_time;
Mike Chanef969692010-06-22 11:26:45 -0700850 pcpu->governor_enabled = 1;
851 smp_wmb();
Todd Poynore7afb7e2012-11-05 13:09:03 -0800852 pcpu->cpu_timer.expires =
853 jiffies + usecs_to_jiffies(timer_rate);
854 add_timer_on(&pcpu->cpu_timer, j);
Mike Chanef969692010-06-22 11:26:45 -0700855 }
856
Mike Chanef969692010-06-22 11:26:45 -0700857 /*
858 * Do not register the idle hook and create sysfs
859 * entries if we have already done so.
860 */
861 if (atomic_inc_return(&active_count) > 1)
862 return 0;
863
864 rc = sysfs_create_group(cpufreq_global_kobject,
865 &interactive_attr_group);
866 if (rc)
867 return rc;
868
Sam Leffler3ab7c2b2012-06-27 10:12:04 -0700869 idle_notifier_register(&cpufreq_interactive_idle_nb);
Todd Poynor0e58da22012-12-11 16:05:03 -0800870 cpufreq_register_notifier(
871 &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
Mike Chanef969692010-06-22 11:26:45 -0700872 break;
873
874 case CPUFREQ_GOV_STOP:
875 for_each_cpu(j, policy->cpus) {
876 pcpu = &per_cpu(cpuinfo, j);
877 pcpu->governor_enabled = 0;
878 smp_wmb();
879 del_timer_sync(&pcpu->cpu_timer);
Mike Chanef969692010-06-22 11:26:45 -0700880 }
881
Mike Chanef969692010-06-22 11:26:45 -0700882 if (atomic_dec_return(&active_count) > 0)
883 return 0;
884
Todd Poynor0e58da22012-12-11 16:05:03 -0800885 cpufreq_unregister_notifier(
886 &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
Sam Leffler3ab7c2b2012-06-27 10:12:04 -0700887 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
Mike Chanef969692010-06-22 11:26:45 -0700888 sysfs_remove_group(cpufreq_global_kobject,
889 &interactive_attr_group);
890
891 break;
892
893 case CPUFREQ_GOV_LIMITS:
894 if (policy->max < policy->cur)
895 __cpufreq_driver_target(policy,
896 policy->max, CPUFREQ_RELATION_H);
897 else if (policy->min > policy->cur)
898 __cpufreq_driver_target(policy,
899 policy->min, CPUFREQ_RELATION_L);
900 break;
901 }
902 return 0;
903}
904
Mike Chanef969692010-06-22 11:26:45 -0700905static int __init cpufreq_interactive_init(void)
906{
907 unsigned int i;
908 struct cpufreq_interactive_cpuinfo *pcpu;
909 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
910
911 go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
912 min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
Todd Poynorcbbe17d2012-04-13 20:18:02 -0700913 above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
Mike Chanef969692010-06-22 11:26:45 -0700914 timer_rate = DEFAULT_TIMER_RATE;
915
916 /* Initalize per-cpu timers */
917 for_each_possible_cpu(i) {
918 pcpu = &per_cpu(cpuinfo, i);
Lianwei Wangd72db422012-11-01 09:59:52 +0800919 if (governidle)
920 init_timer(&pcpu->cpu_timer);
921 else
922 init_timer_deferrable(&pcpu->cpu_timer);
Mike Chanef969692010-06-22 11:26:45 -0700923 pcpu->cpu_timer.function = cpufreq_interactive_timer;
924 pcpu->cpu_timer.data = i;
Todd Poynor0e58da22012-12-11 16:05:03 -0800925 spin_lock_init(&pcpu->load_lock);
Mike Chanef969692010-06-22 11:26:45 -0700926 }
927
Todd Poynore9c60742012-11-14 11:41:21 -0800928 spin_lock_init(&target_loads_lock);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700929 spin_lock_init(&speedchange_cpumask_lock);
930 speedchange_task =
931 kthread_create(cpufreq_interactive_speedchange_task, NULL,
932 "cfinteractive");
933 if (IS_ERR(speedchange_task))
934 return PTR_ERR(speedchange_task);
Sam Leffler5c9b8272012-06-27 12:55:56 -0700935
Todd Poynor0f1920b2012-07-16 17:07:15 -0700936 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
937 get_task_struct(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -0700938
Sam Leffler5c9b8272012-06-27 12:55:56 -0700939 /* NB: wake up so the thread does not look hung to the freezer */
Todd Poynor0f1920b2012-07-16 17:07:15 -0700940 wake_up_process(speedchange_task);
Sam Leffler5c9b8272012-06-27 12:55:56 -0700941
Mike Chanef969692010-06-22 11:26:45 -0700942 return cpufreq_register_governor(&cpufreq_gov_interactive);
Mike Chanef969692010-06-22 11:26:45 -0700943}
944
945#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
946fs_initcall(cpufreq_interactive_init);
947#else
948module_init(cpufreq_interactive_init);
949#endif
950
951static void __exit cpufreq_interactive_exit(void)
952{
953 cpufreq_unregister_governor(&cpufreq_gov_interactive);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700954 kthread_stop(speedchange_task);
955 put_task_struct(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -0700956}
957
958module_exit(cpufreq_interactive_exit);
959
960MODULE_AUTHOR("Mike Chan <mike@android.com>");
961MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
962 "Latency sensitive workloads");
963MODULE_LICENSE("GPL");