blob: 16bd23be2495bdebd46a5aff00c5df1a64524d3f [file] [log] [blame]
Mike Chanef969692010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/module.h>
23#include <linux/mutex.h>
24#include <linux/sched.h>
25#include <linux/sched/rt.h>
26#include <linux/tick.h>
27#include <linux/time.h>
28#include <linux/timer.h>
29#include <linux/workqueue.h>
30#include <linux/kthread.h>
31#include <linux/mutex.h>
Todd Poynorab8dc402012-04-02 17:17:14 -070032#include <linux/slab.h>
Todd Poynor15a9ea02012-04-23 20:42:41 -070033#include <asm/cputime.h>
Mike Chanef969692010-06-22 11:26:45 -070034
Todd Poynorae010472012-02-16 16:27:59 -080035#define CREATE_TRACE_POINTS
36#include <trace/events/cpufreq_interactive.h>
37
Mike Chanef969692010-06-22 11:26:45 -070038static atomic_t active_count = ATOMIC_INIT(0);
39
40struct cpufreq_interactive_cpuinfo {
41 struct timer_list cpu_timer;
42 int timer_idlecancel;
43 u64 time_in_idle;
44 u64 idle_exit_time;
45 u64 timer_run_time;
46 int idling;
Todd Poynor1f408dc2012-04-06 19:59:36 -070047 u64 target_set_time;
48 u64 target_set_time_in_idle;
Mike Chanef969692010-06-22 11:26:45 -070049 struct cpufreq_policy *policy;
50 struct cpufreq_frequency_table *freq_table;
51 unsigned int target_freq;
Todd Poynor6d15fa32012-04-26 21:41:40 -070052 unsigned int floor_freq;
53 u64 floor_validate_time;
Todd Poynor1a0389a2012-05-10 23:28:06 -070054 u64 hispeed_validate_time;
Mike Chanef969692010-06-22 11:26:45 -070055 int governor_enabled;
56};
57
58static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
59
Todd Poynor0f1920b2012-07-16 17:07:15 -070060/* realtime thread handles frequency scaling */
61static struct task_struct *speedchange_task;
62static cpumask_t speedchange_cpumask;
63static spinlock_t speedchange_cpumask_lock;
Mike Chanef969692010-06-22 11:26:45 -070064
65/* Hi speed to bump to from lo speed when load burst (default max) */
Todd Poynor3b7b5f82012-10-03 00:39:56 -070066static unsigned int hispeed_freq;
Mike Chanef969692010-06-22 11:26:45 -070067
68/* Go to hi speed when CPU load at or above this value. */
Todd Poynora380aa82012-04-17 17:39:34 -070069#define DEFAULT_GO_HISPEED_LOAD 85
Mike Chanef969692010-06-22 11:26:45 -070070static unsigned long go_hispeed_load;
71
72/*
73 * The minimum amount of time to spend at a frequency before we can ramp down.
74 */
Todd Poynora380aa82012-04-17 17:39:34 -070075#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
Mike Chanef969692010-06-22 11:26:45 -070076static unsigned long min_sample_time;
77
78/*
79 * The sample rate of the timer used to increase frequency
80 */
Todd Poynora380aa82012-04-17 17:39:34 -070081#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
Mike Chanef969692010-06-22 11:26:45 -070082static unsigned long timer_rate;
83
Todd Poynorcbbe17d2012-04-13 20:18:02 -070084/*
85 * Wait this long before raising speed above hispeed, by default a single
86 * timer interval.
87 */
88#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
89static unsigned long above_hispeed_delay_val;
90
Todd Poynorab8dc402012-04-02 17:17:14 -070091/*
Todd Poynor15a9ea02012-04-23 20:42:41 -070092 * Non-zero means longer-term speed boost active.
93 */
94
95static int boost_val;
96
Mike Chanef969692010-06-22 11:26:45 -070097static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
98 unsigned int event);
99
100#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
101static
102#endif
103struct cpufreq_governor cpufreq_gov_interactive = {
104 .name = "interactive",
105 .governor = cpufreq_governor_interactive,
106 .max_transition_latency = 10000000,
107 .owner = THIS_MODULE,
108};
109
110static void cpufreq_interactive_timer(unsigned long data)
111{
112 unsigned int delta_idle;
113 unsigned int delta_time;
114 int cpu_load;
115 int load_since_change;
116 u64 time_in_idle;
117 u64 idle_exit_time;
118 struct cpufreq_interactive_cpuinfo *pcpu =
119 &per_cpu(cpuinfo, data);
120 u64 now_idle;
121 unsigned int new_freq;
122 unsigned int index;
123 unsigned long flags;
124
125 smp_rmb();
126
127 if (!pcpu->governor_enabled)
128 goto exit;
129
130 /*
131 * Once pcpu->timer_run_time is updated to >= pcpu->idle_exit_time,
132 * this lets idle exit know the current idle time sample has
133 * been processed, and idle exit can generate a new sample and
134 * re-arm the timer. This prevents a concurrent idle
135 * exit on that CPU from writing a new set of info at the same time
136 * the timer function runs (the timer function can't use that info
137 * until more time passes).
138 */
139 time_in_idle = pcpu->time_in_idle;
140 idle_exit_time = pcpu->idle_exit_time;
141 now_idle = get_cpu_idle_time_us(data, &pcpu->timer_run_time);
142 smp_wmb();
143
144 /* If we raced with cancelling a timer, skip. */
145 if (!idle_exit_time)
146 goto exit;
147
148 delta_idle = (unsigned int)(now_idle - time_in_idle);
149 delta_time = (unsigned int)(pcpu->timer_run_time - idle_exit_time);
150
151 /*
152 * If timer ran less than 1ms after short-term sample started, retry.
153 */
154 if (delta_time < 1000)
155 goto rearm;
156
157 if (delta_idle > delta_time)
158 cpu_load = 0;
159 else
160 cpu_load = 100 * (delta_time - delta_idle) / delta_time;
161
Todd Poynor1f408dc2012-04-06 19:59:36 -0700162 delta_idle = (unsigned int)(now_idle - pcpu->target_set_time_in_idle);
163 delta_time = (unsigned int)(pcpu->timer_run_time -
164 pcpu->target_set_time);
Mike Chanef969692010-06-22 11:26:45 -0700165
166 if ((delta_time == 0) || (delta_idle > delta_time))
167 load_since_change = 0;
168 else
169 load_since_change =
170 100 * (delta_time - delta_idle) / delta_time;
171
172 /*
173 * Choose greater of short-term load (since last idle timer
174 * started or timer function re-armed itself) or long-term load
175 * (since last frequency change).
176 */
177 if (load_since_change > cpu_load)
178 cpu_load = load_since_change;
179
Todd Poynor15a9ea02012-04-23 20:42:41 -0700180 if (cpu_load >= go_hispeed_load || boost_val) {
Todd Poynor762d62a2012-09-24 18:03:58 -0700181 if (pcpu->target_freq < hispeed_freq &&
182 hispeed_freq < pcpu->policy->max) {
Mike Chanef969692010-06-22 11:26:45 -0700183 new_freq = hispeed_freq;
Todd Poynorf87b9d52012-04-06 19:50:12 -0700184 } else {
Mike Chanef969692010-06-22 11:26:45 -0700185 new_freq = pcpu->policy->max * cpu_load / 100;
Todd Poynorf87b9d52012-04-06 19:50:12 -0700186
187 if (new_freq < hispeed_freq)
188 new_freq = hispeed_freq;
Todd Poynorcbbe17d2012-04-13 20:18:02 -0700189
190 if (pcpu->target_freq == hispeed_freq &&
191 new_freq > hispeed_freq &&
Todd Poynor1a0389a2012-05-10 23:28:06 -0700192 pcpu->timer_run_time - pcpu->hispeed_validate_time
Todd Poynorcbbe17d2012-04-13 20:18:02 -0700193 < above_hispeed_delay_val) {
194 trace_cpufreq_interactive_notyet(data, cpu_load,
195 pcpu->target_freq,
196 new_freq);
197 goto rearm;
198 }
Todd Poynorf87b9d52012-04-06 19:50:12 -0700199 }
Mike Chanef969692010-06-22 11:26:45 -0700200 } else {
Todd Poynor3b7b5f82012-10-03 00:39:56 -0700201 new_freq = hispeed_freq * cpu_load / 100;
Mike Chanef969692010-06-22 11:26:45 -0700202 }
203
Todd Poynor1a0389a2012-05-10 23:28:06 -0700204 if (new_freq <= hispeed_freq)
205 pcpu->hispeed_validate_time = pcpu->timer_run_time;
206
Mike Chanef969692010-06-22 11:26:45 -0700207 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
208 new_freq, CPUFREQ_RELATION_H,
209 &index)) {
210 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
211 (int) data);
212 goto rearm;
213 }
214
215 new_freq = pcpu->freq_table[index].frequency;
216
Mike Chanef969692010-06-22 11:26:45 -0700217 /*
Todd Poynor6d15fa32012-04-26 21:41:40 -0700218 * Do not scale below floor_freq unless we have been at or above the
219 * floor frequency for the minimum sample time since last validated.
Mike Chanef969692010-06-22 11:26:45 -0700220 */
Todd Poynor6d15fa32012-04-26 21:41:40 -0700221 if (new_freq < pcpu->floor_freq) {
John Stultz0b950bb2012-05-01 14:10:31 -0700222 if (pcpu->timer_run_time - pcpu->floor_validate_time
Todd Poynorae010472012-02-16 16:27:59 -0800223 < min_sample_time) {
224 trace_cpufreq_interactive_notyet(data, cpu_load,
225 pcpu->target_freq, new_freq);
Mike Chanef969692010-06-22 11:26:45 -0700226 goto rearm;
Todd Poynorae010472012-02-16 16:27:59 -0800227 }
Mike Chanef969692010-06-22 11:26:45 -0700228 }
229
Todd Poynor6d15fa32012-04-26 21:41:40 -0700230 pcpu->floor_freq = new_freq;
231 pcpu->floor_validate_time = pcpu->timer_run_time;
Todd Poynor1f408dc2012-04-06 19:59:36 -0700232
233 if (pcpu->target_freq == new_freq) {
234 trace_cpufreq_interactive_already(data, cpu_load,
235 pcpu->target_freq, new_freq);
236 goto rearm_if_notmax;
237 }
238
Todd Poynorae010472012-02-16 16:27:59 -0800239 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
240 new_freq);
Todd Poynor8a833f12012-04-20 13:18:32 -0700241 pcpu->target_set_time_in_idle = now_idle;
242 pcpu->target_set_time = pcpu->timer_run_time;
Todd Poynorae010472012-02-16 16:27:59 -0800243
Todd Poynor0f1920b2012-07-16 17:07:15 -0700244 pcpu->target_freq = new_freq;
245 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
246 cpumask_set_cpu(data, &speedchange_cpumask);
247 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
248 wake_up_process(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -0700249
250rearm_if_notmax:
251 /*
252 * Already set max speed and don't see a need to change that,
253 * wait until next idle to re-evaluate, don't need timer.
254 */
255 if (pcpu->target_freq == pcpu->policy->max)
256 goto exit;
257
258rearm:
259 if (!timer_pending(&pcpu->cpu_timer)) {
260 /*
261 * If already at min: if that CPU is idle, don't set timer.
262 * Else cancel the timer if that CPU goes idle. We don't
263 * need to re-evaluate speed until the next idle exit.
264 */
265 if (pcpu->target_freq == pcpu->policy->min) {
266 smp_rmb();
267
268 if (pcpu->idling)
269 goto exit;
270
271 pcpu->timer_idlecancel = 1;
272 }
273
274 pcpu->time_in_idle = get_cpu_idle_time_us(
275 data, &pcpu->idle_exit_time);
276 mod_timer(&pcpu->cpu_timer,
277 jiffies + usecs_to_jiffies(timer_rate));
278 }
279
280exit:
281 return;
282}
283
284static void cpufreq_interactive_idle_start(void)
285{
286 struct cpufreq_interactive_cpuinfo *pcpu =
287 &per_cpu(cpuinfo, smp_processor_id());
288 int pending;
289
290 if (!pcpu->governor_enabled)
291 return;
292
293 pcpu->idling = 1;
294 smp_wmb();
295 pending = timer_pending(&pcpu->cpu_timer);
296
297 if (pcpu->target_freq != pcpu->policy->min) {
298#ifdef CONFIG_SMP
299 /*
300 * Entering idle while not at lowest speed. On some
301 * platforms this can hold the other CPU(s) at that speed
302 * even though the CPU is idle. Set a timer to re-evaluate
303 * speed so this idle CPU doesn't hold the other CPUs above
304 * min indefinitely. This should probably be a quirk of
305 * the CPUFreq driver.
306 */
307 if (!pending) {
308 pcpu->time_in_idle = get_cpu_idle_time_us(
309 smp_processor_id(), &pcpu->idle_exit_time);
310 pcpu->timer_idlecancel = 0;
311 mod_timer(&pcpu->cpu_timer,
312 jiffies + usecs_to_jiffies(timer_rate));
313 }
314#endif
315 } else {
316 /*
317 * If at min speed and entering idle after load has
318 * already been evaluated, and a timer has been set just in
319 * case the CPU suddenly goes busy, cancel that timer. The
320 * CPU didn't go busy; we'll recheck things upon idle exit.
321 */
322 if (pending && pcpu->timer_idlecancel) {
323 del_timer(&pcpu->cpu_timer);
324 /*
325 * Ensure last timer run time is after current idle
326 * sample start time, so next idle exit will always
327 * start a new idle sampling period.
328 */
329 pcpu->idle_exit_time = 0;
330 pcpu->timer_idlecancel = 0;
331 }
332 }
333
334}
335
336static void cpufreq_interactive_idle_end(void)
337{
338 struct cpufreq_interactive_cpuinfo *pcpu =
339 &per_cpu(cpuinfo, smp_processor_id());
340
Sam Leffler3ab7c2b2012-06-27 10:12:04 -0700341 if (!pcpu->governor_enabled)
342 return;
343
Mike Chanef969692010-06-22 11:26:45 -0700344 pcpu->idling = 0;
345 smp_wmb();
346
347 /*
348 * Arm the timer for 1-2 ticks later if not already, and if the timer
349 * function has already processed the previous load sampling
350 * interval. (If the timer is not pending but has not processed
351 * the previous interval, it is probably racing with us on another
352 * CPU. Let it compute load based on the previous sample and then
353 * re-arm the timer for another interval when it's done, rather
354 * than updating the interval start time to be "now", which doesn't
355 * give the timer function enough time to make a decision on this
356 * run.)
357 */
358 if (timer_pending(&pcpu->cpu_timer) == 0 &&
359 pcpu->timer_run_time >= pcpu->idle_exit_time &&
360 pcpu->governor_enabled) {
361 pcpu->time_in_idle =
362 get_cpu_idle_time_us(smp_processor_id(),
363 &pcpu->idle_exit_time);
364 pcpu->timer_idlecancel = 0;
365 mod_timer(&pcpu->cpu_timer,
366 jiffies + usecs_to_jiffies(timer_rate));
367 }
368
369}
370
Todd Poynor0f1920b2012-07-16 17:07:15 -0700371static int cpufreq_interactive_speedchange_task(void *data)
Mike Chanef969692010-06-22 11:26:45 -0700372{
373 unsigned int cpu;
374 cpumask_t tmp_mask;
375 unsigned long flags;
376 struct cpufreq_interactive_cpuinfo *pcpu;
377
378 while (1) {
379 set_current_state(TASK_INTERRUPTIBLE);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700380 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700381
Todd Poynor0f1920b2012-07-16 17:07:15 -0700382 if (cpumask_empty(&speedchange_cpumask)) {
383 spin_unlock_irqrestore(&speedchange_cpumask_lock,
384 flags);
Mike Chanef969692010-06-22 11:26:45 -0700385 schedule();
386
387 if (kthread_should_stop())
388 break;
389
Todd Poynor0f1920b2012-07-16 17:07:15 -0700390 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700391 }
392
393 set_current_state(TASK_RUNNING);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700394 tmp_mask = speedchange_cpumask;
395 cpumask_clear(&speedchange_cpumask);
396 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700397
398 for_each_cpu(cpu, &tmp_mask) {
399 unsigned int j;
400 unsigned int max_freq = 0;
401
402 pcpu = &per_cpu(cpuinfo, cpu);
403 smp_rmb();
404
405 if (!pcpu->governor_enabled)
406 continue;
407
Mike Chanef969692010-06-22 11:26:45 -0700408 for_each_cpu(j, pcpu->policy->cpus) {
409 struct cpufreq_interactive_cpuinfo *pjcpu =
410 &per_cpu(cpuinfo, j);
411
412 if (pjcpu->target_freq > max_freq)
413 max_freq = pjcpu->target_freq;
414 }
415
416 if (max_freq != pcpu->policy->cur)
417 __cpufreq_driver_target(pcpu->policy,
418 max_freq,
419 CPUFREQ_RELATION_H);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700420 trace_cpufreq_interactive_setspeed(cpu,
421 pcpu->target_freq,
Todd Poynorae010472012-02-16 16:27:59 -0800422 pcpu->policy->cur);
Mike Chanef969692010-06-22 11:26:45 -0700423 }
424 }
425
426 return 0;
427}
428
Todd Poynorab8dc402012-04-02 17:17:14 -0700429static void cpufreq_interactive_boost(void)
430{
431 int i;
432 int anyboost = 0;
433 unsigned long flags;
434 struct cpufreq_interactive_cpuinfo *pcpu;
435
Todd Poynor0f1920b2012-07-16 17:07:15 -0700436 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Todd Poynorab8dc402012-04-02 17:17:14 -0700437
438 for_each_online_cpu(i) {
439 pcpu = &per_cpu(cpuinfo, i);
440
441 if (pcpu->target_freq < hispeed_freq) {
442 pcpu->target_freq = hispeed_freq;
Todd Poynor0f1920b2012-07-16 17:07:15 -0700443 cpumask_set_cpu(i, &speedchange_cpumask);
Todd Poynorab8dc402012-04-02 17:17:14 -0700444 pcpu->target_set_time_in_idle =
445 get_cpu_idle_time_us(i, &pcpu->target_set_time);
Todd Poynor1a0389a2012-05-10 23:28:06 -0700446 pcpu->hispeed_validate_time = pcpu->target_set_time;
Todd Poynorab8dc402012-04-02 17:17:14 -0700447 anyboost = 1;
448 }
449
450 /*
Todd Poynor6d15fa32012-04-26 21:41:40 -0700451 * Set floor freq and (re)start timer for when last
452 * validated.
Todd Poynorab8dc402012-04-02 17:17:14 -0700453 */
454
Todd Poynor6d15fa32012-04-26 21:41:40 -0700455 pcpu->floor_freq = hispeed_freq;
456 pcpu->floor_validate_time = ktime_to_us(ktime_get());
Todd Poynorab8dc402012-04-02 17:17:14 -0700457 }
458
Todd Poynor0f1920b2012-07-16 17:07:15 -0700459 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Todd Poynorab8dc402012-04-02 17:17:14 -0700460
461 if (anyboost)
Todd Poynor0f1920b2012-07-16 17:07:15 -0700462 wake_up_process(speedchange_task);
Todd Poynorab8dc402012-04-02 17:17:14 -0700463}
464
Mike Chanef969692010-06-22 11:26:45 -0700465static ssize_t show_hispeed_freq(struct kobject *kobj,
466 struct attribute *attr, char *buf)
467{
Todd Poynor3b7b5f82012-10-03 00:39:56 -0700468 return sprintf(buf, "%u\n", hispeed_freq);
Mike Chanef969692010-06-22 11:26:45 -0700469}
470
471static ssize_t store_hispeed_freq(struct kobject *kobj,
472 struct attribute *attr, const char *buf,
473 size_t count)
474{
475 int ret;
Todd Poynor3b7b5f82012-10-03 00:39:56 -0700476 long unsigned int val;
Mike Chanef969692010-06-22 11:26:45 -0700477
Todd Poynor3b7b5f82012-10-03 00:39:56 -0700478 ret = strict_strtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -0700479 if (ret < 0)
480 return ret;
481 hispeed_freq = val;
482 return count;
483}
484
485static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
486 show_hispeed_freq, store_hispeed_freq);
487
488
489static ssize_t show_go_hispeed_load(struct kobject *kobj,
490 struct attribute *attr, char *buf)
491{
492 return sprintf(buf, "%lu\n", go_hispeed_load);
493}
494
495static ssize_t store_go_hispeed_load(struct kobject *kobj,
496 struct attribute *attr, const char *buf, size_t count)
497{
498 int ret;
499 unsigned long val;
500
501 ret = strict_strtoul(buf, 0, &val);
502 if (ret < 0)
503 return ret;
504 go_hispeed_load = val;
505 return count;
506}
507
508static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
509 show_go_hispeed_load, store_go_hispeed_load);
510
511static ssize_t show_min_sample_time(struct kobject *kobj,
512 struct attribute *attr, char *buf)
513{
514 return sprintf(buf, "%lu\n", min_sample_time);
515}
516
517static ssize_t store_min_sample_time(struct kobject *kobj,
518 struct attribute *attr, const char *buf, size_t count)
519{
520 int ret;
521 unsigned long val;
522
523 ret = strict_strtoul(buf, 0, &val);
524 if (ret < 0)
525 return ret;
526 min_sample_time = val;
527 return count;
528}
529
530static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
531 show_min_sample_time, store_min_sample_time);
532
Todd Poynorcbbe17d2012-04-13 20:18:02 -0700533static ssize_t show_above_hispeed_delay(struct kobject *kobj,
534 struct attribute *attr, char *buf)
535{
536 return sprintf(buf, "%lu\n", above_hispeed_delay_val);
537}
538
539static ssize_t store_above_hispeed_delay(struct kobject *kobj,
540 struct attribute *attr,
541 const char *buf, size_t count)
542{
543 int ret;
544 unsigned long val;
545
546 ret = strict_strtoul(buf, 0, &val);
547 if (ret < 0)
548 return ret;
549 above_hispeed_delay_val = val;
550 return count;
551}
552
553define_one_global_rw(above_hispeed_delay);
554
Mike Chanef969692010-06-22 11:26:45 -0700555static ssize_t show_timer_rate(struct kobject *kobj,
556 struct attribute *attr, char *buf)
557{
558 return sprintf(buf, "%lu\n", timer_rate);
559}
560
561static ssize_t store_timer_rate(struct kobject *kobj,
562 struct attribute *attr, const char *buf, size_t count)
563{
564 int ret;
565 unsigned long val;
566
567 ret = strict_strtoul(buf, 0, &val);
568 if (ret < 0)
569 return ret;
570 timer_rate = val;
571 return count;
572}
573
574static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
575 show_timer_rate, store_timer_rate);
576
Todd Poynor15a9ea02012-04-23 20:42:41 -0700577static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
578 char *buf)
579{
580 return sprintf(buf, "%d\n", boost_val);
581}
582
583static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
584 const char *buf, size_t count)
585{
586 int ret;
587 unsigned long val;
588
589 ret = kstrtoul(buf, 0, &val);
590 if (ret < 0)
591 return ret;
592
593 boost_val = val;
594
Todd Poynor442a3122012-05-03 00:16:55 -0700595 if (boost_val) {
596 trace_cpufreq_interactive_boost("on");
Todd Poynor15a9ea02012-04-23 20:42:41 -0700597 cpufreq_interactive_boost();
Todd Poynor442a3122012-05-03 00:16:55 -0700598 } else {
599 trace_cpufreq_interactive_unboost("off");
600 }
Todd Poynor15a9ea02012-04-23 20:42:41 -0700601
602 return count;
603}
604
605define_one_global_rw(boost);
606
Todd Poynor442a3122012-05-03 00:16:55 -0700607static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
608 const char *buf, size_t count)
609{
610 int ret;
611 unsigned long val;
612
613 ret = kstrtoul(buf, 0, &val);
614 if (ret < 0)
615 return ret;
616
617 trace_cpufreq_interactive_boost("pulse");
618 cpufreq_interactive_boost();
619 return count;
620}
621
622static struct global_attr boostpulse =
623 __ATTR(boostpulse, 0200, NULL, store_boostpulse);
624
Mike Chanef969692010-06-22 11:26:45 -0700625static struct attribute *interactive_attributes[] = {
626 &hispeed_freq_attr.attr,
627 &go_hispeed_load_attr.attr,
Todd Poynorcbbe17d2012-04-13 20:18:02 -0700628 &above_hispeed_delay.attr,
Mike Chanef969692010-06-22 11:26:45 -0700629 &min_sample_time_attr.attr,
630 &timer_rate_attr.attr,
Todd Poynor15a9ea02012-04-23 20:42:41 -0700631 &boost.attr,
Todd Poynor442a3122012-05-03 00:16:55 -0700632 &boostpulse.attr,
Mike Chanef969692010-06-22 11:26:45 -0700633 NULL,
634};
635
636static struct attribute_group interactive_attr_group = {
637 .attrs = interactive_attributes,
638 .name = "interactive",
639};
640
Sam Leffler3ab7c2b2012-06-27 10:12:04 -0700641static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
642 unsigned long val,
643 void *data)
644{
645 switch (val) {
646 case IDLE_START:
647 cpufreq_interactive_idle_start();
648 break;
649 case IDLE_END:
650 cpufreq_interactive_idle_end();
651 break;
652 }
653
654 return 0;
655}
656
657static struct notifier_block cpufreq_interactive_idle_nb = {
658 .notifier_call = cpufreq_interactive_idle_notifier,
659};
660
Mike Chanef969692010-06-22 11:26:45 -0700661static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
662 unsigned int event)
663{
664 int rc;
665 unsigned int j;
666 struct cpufreq_interactive_cpuinfo *pcpu;
667 struct cpufreq_frequency_table *freq_table;
668
669 switch (event) {
670 case CPUFREQ_GOV_START:
671 if (!cpu_online(policy->cpu))
672 return -EINVAL;
673
674 freq_table =
675 cpufreq_frequency_get_table(policy->cpu);
676
677 for_each_cpu(j, policy->cpus) {
678 pcpu = &per_cpu(cpuinfo, j);
679 pcpu->policy = policy;
680 pcpu->target_freq = policy->cur;
681 pcpu->freq_table = freq_table;
Todd Poynor1f408dc2012-04-06 19:59:36 -0700682 pcpu->target_set_time_in_idle =
Mike Chanef969692010-06-22 11:26:45 -0700683 get_cpu_idle_time_us(j,
Todd Poynor1f408dc2012-04-06 19:59:36 -0700684 &pcpu->target_set_time);
Todd Poynor6d15fa32012-04-26 21:41:40 -0700685 pcpu->floor_freq = pcpu->target_freq;
686 pcpu->floor_validate_time =
Todd Poynor8a833f12012-04-20 13:18:32 -0700687 pcpu->target_set_time;
Todd Poynor1a0389a2012-05-10 23:28:06 -0700688 pcpu->hispeed_validate_time =
689 pcpu->target_set_time;
Mike Chanef969692010-06-22 11:26:45 -0700690 pcpu->governor_enabled = 1;
691 smp_wmb();
692 }
693
694 if (!hispeed_freq)
695 hispeed_freq = policy->max;
696
697 /*
698 * Do not register the idle hook and create sysfs
699 * entries if we have already done so.
700 */
701 if (atomic_inc_return(&active_count) > 1)
702 return 0;
703
704 rc = sysfs_create_group(cpufreq_global_kobject,
705 &interactive_attr_group);
706 if (rc)
707 return rc;
708
Sam Leffler3ab7c2b2012-06-27 10:12:04 -0700709 idle_notifier_register(&cpufreq_interactive_idle_nb);
Mike Chanef969692010-06-22 11:26:45 -0700710 break;
711
712 case CPUFREQ_GOV_STOP:
713 for_each_cpu(j, policy->cpus) {
714 pcpu = &per_cpu(cpuinfo, j);
715 pcpu->governor_enabled = 0;
716 smp_wmb();
717 del_timer_sync(&pcpu->cpu_timer);
718
719 /*
720 * Reset idle exit time since we may cancel the timer
721 * before it can run after the last idle exit time,
722 * to avoid tripping the check in idle exit for a timer
723 * that is trying to run.
724 */
725 pcpu->idle_exit_time = 0;
726 }
727
Mike Chanef969692010-06-22 11:26:45 -0700728 if (atomic_dec_return(&active_count) > 0)
729 return 0;
730
Sam Leffler3ab7c2b2012-06-27 10:12:04 -0700731 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
Mike Chanef969692010-06-22 11:26:45 -0700732 sysfs_remove_group(cpufreq_global_kobject,
733 &interactive_attr_group);
734
735 break;
736
737 case CPUFREQ_GOV_LIMITS:
738 if (policy->max < policy->cur)
739 __cpufreq_driver_target(policy,
740 policy->max, CPUFREQ_RELATION_H);
741 else if (policy->min > policy->cur)
742 __cpufreq_driver_target(policy,
743 policy->min, CPUFREQ_RELATION_L);
744 break;
745 }
746 return 0;
747}
748
Mike Chanef969692010-06-22 11:26:45 -0700749static int __init cpufreq_interactive_init(void)
750{
751 unsigned int i;
752 struct cpufreq_interactive_cpuinfo *pcpu;
753 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
754
755 go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
756 min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
Todd Poynorcbbe17d2012-04-13 20:18:02 -0700757 above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
Mike Chanef969692010-06-22 11:26:45 -0700758 timer_rate = DEFAULT_TIMER_RATE;
759
760 /* Initalize per-cpu timers */
761 for_each_possible_cpu(i) {
762 pcpu = &per_cpu(cpuinfo, i);
763 init_timer(&pcpu->cpu_timer);
764 pcpu->cpu_timer.function = cpufreq_interactive_timer;
765 pcpu->cpu_timer.data = i;
766 }
767
Todd Poynor0f1920b2012-07-16 17:07:15 -0700768 spin_lock_init(&speedchange_cpumask_lock);
769 speedchange_task =
770 kthread_create(cpufreq_interactive_speedchange_task, NULL,
771 "cfinteractive");
772 if (IS_ERR(speedchange_task))
773 return PTR_ERR(speedchange_task);
Sam Leffler5c9b8272012-06-27 12:55:56 -0700774
Todd Poynor0f1920b2012-07-16 17:07:15 -0700775 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
776 get_task_struct(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -0700777
Sam Leffler5c9b8272012-06-27 12:55:56 -0700778 /* NB: wake up so the thread does not look hung to the freezer */
Todd Poynor0f1920b2012-07-16 17:07:15 -0700779 wake_up_process(speedchange_task);
Sam Leffler5c9b8272012-06-27 12:55:56 -0700780
Mike Chanef969692010-06-22 11:26:45 -0700781 return cpufreq_register_governor(&cpufreq_gov_interactive);
Mike Chanef969692010-06-22 11:26:45 -0700782}
783
784#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
785fs_initcall(cpufreq_interactive_init);
786#else
787module_init(cpufreq_interactive_init);
788#endif
789
790static void __exit cpufreq_interactive_exit(void)
791{
792 cpufreq_unregister_governor(&cpufreq_gov_interactive);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700793 kthread_stop(speedchange_task);
794 put_task_struct(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -0700795}
796
797module_exit(cpufreq_interactive_exit);
798
799MODULE_AUTHOR("Mike Chan <mike@android.com>");
800MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
801 "Latency sensitive workloads");
802MODULE_LICENSE("GPL");