blob: 184140aabda25ae9838d19e0eb766ce33f3beacc [file] [log] [blame]
Mike Chanef969692010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/module.h>
23#include <linux/mutex.h>
24#include <linux/sched.h>
25#include <linux/sched/rt.h>
26#include <linux/tick.h>
27#include <linux/time.h>
28#include <linux/timer.h>
29#include <linux/workqueue.h>
30#include <linux/kthread.h>
31#include <linux/mutex.h>
Todd Poynorab8dc402012-04-02 17:17:14 -070032#include <linux/slab.h>
33#include <linux/input.h>
Todd Poynor15a9ea02012-04-23 20:42:41 -070034#include <asm/cputime.h>
Mike Chanef969692010-06-22 11:26:45 -070035
Todd Poynorae010472012-02-16 16:27:59 -080036#define CREATE_TRACE_POINTS
37#include <trace/events/cpufreq_interactive.h>
38
Mike Chanef969692010-06-22 11:26:45 -070039static atomic_t active_count = ATOMIC_INIT(0);
40
41struct cpufreq_interactive_cpuinfo {
42 struct timer_list cpu_timer;
43 int timer_idlecancel;
44 u64 time_in_idle;
45 u64 idle_exit_time;
46 u64 timer_run_time;
47 int idling;
Todd Poynor1f408dc2012-04-06 19:59:36 -070048 u64 target_set_time;
49 u64 target_set_time_in_idle;
Mike Chanef969692010-06-22 11:26:45 -070050 struct cpufreq_policy *policy;
51 struct cpufreq_frequency_table *freq_table;
52 unsigned int target_freq;
Todd Poynor6d15fa32012-04-26 21:41:40 -070053 unsigned int floor_freq;
54 u64 floor_validate_time;
Todd Poynor1a0389a2012-05-10 23:28:06 -070055 u64 hispeed_validate_time;
Mike Chanef969692010-06-22 11:26:45 -070056 int governor_enabled;
57};
58
59static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
60
61/* Workqueues handle frequency scaling */
62static struct task_struct *up_task;
63static struct workqueue_struct *down_wq;
64static struct work_struct freq_scale_down_work;
65static cpumask_t up_cpumask;
66static spinlock_t up_cpumask_lock;
67static cpumask_t down_cpumask;
68static spinlock_t down_cpumask_lock;
69static struct mutex set_speed_lock;
70
71/* Hi speed to bump to from lo speed when load burst (default max) */
72static u64 hispeed_freq;
73
74/* Go to hi speed when CPU load at or above this value. */
Todd Poynora380aa82012-04-17 17:39:34 -070075#define DEFAULT_GO_HISPEED_LOAD 85
Mike Chanef969692010-06-22 11:26:45 -070076static unsigned long go_hispeed_load;
77
78/*
79 * The minimum amount of time to spend at a frequency before we can ramp down.
80 */
Todd Poynora380aa82012-04-17 17:39:34 -070081#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
Mike Chanef969692010-06-22 11:26:45 -070082static unsigned long min_sample_time;
83
84/*
85 * The sample rate of the timer used to increase frequency
86 */
Todd Poynora380aa82012-04-17 17:39:34 -070087#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
Mike Chanef969692010-06-22 11:26:45 -070088static unsigned long timer_rate;
89
Todd Poynorcbbe17d2012-04-13 20:18:02 -070090/*
91 * Wait this long before raising speed above hispeed, by default a single
92 * timer interval.
93 */
94#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
95static unsigned long above_hispeed_delay_val;
96
Todd Poynorab8dc402012-04-02 17:17:14 -070097/*
Todd Poynor15a9ea02012-04-23 20:42:41 -070098 * Boost pulse to hispeed on touchscreen input.
Todd Poynorab8dc402012-04-02 17:17:14 -070099 */
100
101static int input_boost_val;
102
103struct cpufreq_interactive_inputopen {
104 struct input_handle *handle;
105 struct work_struct inputopen_work;
106};
107
108static struct cpufreq_interactive_inputopen inputopen;
109
Todd Poynor15a9ea02012-04-23 20:42:41 -0700110/*
111 * Non-zero means longer-term speed boost active.
112 */
113
114static int boost_val;
115
Mike Chanef969692010-06-22 11:26:45 -0700116static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
117 unsigned int event);
118
119#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
120static
121#endif
122struct cpufreq_governor cpufreq_gov_interactive = {
123 .name = "interactive",
124 .governor = cpufreq_governor_interactive,
125 .max_transition_latency = 10000000,
126 .owner = THIS_MODULE,
127};
128
129static void cpufreq_interactive_timer(unsigned long data)
130{
131 unsigned int delta_idle;
132 unsigned int delta_time;
133 int cpu_load;
134 int load_since_change;
135 u64 time_in_idle;
136 u64 idle_exit_time;
137 struct cpufreq_interactive_cpuinfo *pcpu =
138 &per_cpu(cpuinfo, data);
139 u64 now_idle;
140 unsigned int new_freq;
141 unsigned int index;
142 unsigned long flags;
143
144 smp_rmb();
145
146 if (!pcpu->governor_enabled)
147 goto exit;
148
149 /*
150 * Once pcpu->timer_run_time is updated to >= pcpu->idle_exit_time,
151 * this lets idle exit know the current idle time sample has
152 * been processed, and idle exit can generate a new sample and
153 * re-arm the timer. This prevents a concurrent idle
154 * exit on that CPU from writing a new set of info at the same time
155 * the timer function runs (the timer function can't use that info
156 * until more time passes).
157 */
158 time_in_idle = pcpu->time_in_idle;
159 idle_exit_time = pcpu->idle_exit_time;
160 now_idle = get_cpu_idle_time_us(data, &pcpu->timer_run_time);
161 smp_wmb();
162
163 /* If we raced with cancelling a timer, skip. */
164 if (!idle_exit_time)
165 goto exit;
166
167 delta_idle = (unsigned int)(now_idle - time_in_idle);
168 delta_time = (unsigned int)(pcpu->timer_run_time - idle_exit_time);
169
170 /*
171 * If timer ran less than 1ms after short-term sample started, retry.
172 */
173 if (delta_time < 1000)
174 goto rearm;
175
176 if (delta_idle > delta_time)
177 cpu_load = 0;
178 else
179 cpu_load = 100 * (delta_time - delta_idle) / delta_time;
180
Todd Poynor1f408dc2012-04-06 19:59:36 -0700181 delta_idle = (unsigned int)(now_idle - pcpu->target_set_time_in_idle);
182 delta_time = (unsigned int)(pcpu->timer_run_time -
183 pcpu->target_set_time);
Mike Chanef969692010-06-22 11:26:45 -0700184
185 if ((delta_time == 0) || (delta_idle > delta_time))
186 load_since_change = 0;
187 else
188 load_since_change =
189 100 * (delta_time - delta_idle) / delta_time;
190
191 /*
192 * Choose greater of short-term load (since last idle timer
193 * started or timer function re-armed itself) or long-term load
194 * (since last frequency change).
195 */
196 if (load_since_change > cpu_load)
197 cpu_load = load_since_change;
198
Todd Poynor15a9ea02012-04-23 20:42:41 -0700199 if (cpu_load >= go_hispeed_load || boost_val) {
Todd Poynord932f222012-04-19 12:52:48 -0700200 if (pcpu->target_freq <= pcpu->policy->min) {
Mike Chanef969692010-06-22 11:26:45 -0700201 new_freq = hispeed_freq;
Todd Poynorf87b9d52012-04-06 19:50:12 -0700202 } else {
Mike Chanef969692010-06-22 11:26:45 -0700203 new_freq = pcpu->policy->max * cpu_load / 100;
Todd Poynorf87b9d52012-04-06 19:50:12 -0700204
205 if (new_freq < hispeed_freq)
206 new_freq = hispeed_freq;
Todd Poynorcbbe17d2012-04-13 20:18:02 -0700207
208 if (pcpu->target_freq == hispeed_freq &&
209 new_freq > hispeed_freq &&
Todd Poynor1a0389a2012-05-10 23:28:06 -0700210 pcpu->timer_run_time - pcpu->hispeed_validate_time
Todd Poynorcbbe17d2012-04-13 20:18:02 -0700211 < above_hispeed_delay_val) {
212 trace_cpufreq_interactive_notyet(data, cpu_load,
213 pcpu->target_freq,
214 new_freq);
215 goto rearm;
216 }
Todd Poynorf87b9d52012-04-06 19:50:12 -0700217 }
Mike Chanef969692010-06-22 11:26:45 -0700218 } else {
Todd Poynorc1128fc2012-04-06 01:13:09 -0700219 new_freq = pcpu->policy->max * cpu_load / 100;
Mike Chanef969692010-06-22 11:26:45 -0700220 }
221
Todd Poynor1a0389a2012-05-10 23:28:06 -0700222 if (new_freq <= hispeed_freq)
223 pcpu->hispeed_validate_time = pcpu->timer_run_time;
224
Mike Chanef969692010-06-22 11:26:45 -0700225 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
226 new_freq, CPUFREQ_RELATION_H,
227 &index)) {
228 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
229 (int) data);
230 goto rearm;
231 }
232
233 new_freq = pcpu->freq_table[index].frequency;
234
Mike Chanef969692010-06-22 11:26:45 -0700235 /*
Todd Poynor6d15fa32012-04-26 21:41:40 -0700236 * Do not scale below floor_freq unless we have been at or above the
237 * floor frequency for the minimum sample time since last validated.
Mike Chanef969692010-06-22 11:26:45 -0700238 */
Todd Poynor6d15fa32012-04-26 21:41:40 -0700239 if (new_freq < pcpu->floor_freq) {
John Stultz0b950bb2012-05-01 14:10:31 -0700240 if (pcpu->timer_run_time - pcpu->floor_validate_time
Todd Poynorae010472012-02-16 16:27:59 -0800241 < min_sample_time) {
242 trace_cpufreq_interactive_notyet(data, cpu_load,
243 pcpu->target_freq, new_freq);
Mike Chanef969692010-06-22 11:26:45 -0700244 goto rearm;
Todd Poynorae010472012-02-16 16:27:59 -0800245 }
Mike Chanef969692010-06-22 11:26:45 -0700246 }
247
Todd Poynor6d15fa32012-04-26 21:41:40 -0700248 pcpu->floor_freq = new_freq;
249 pcpu->floor_validate_time = pcpu->timer_run_time;
Todd Poynor1f408dc2012-04-06 19:59:36 -0700250
251 if (pcpu->target_freq == new_freq) {
252 trace_cpufreq_interactive_already(data, cpu_load,
253 pcpu->target_freq, new_freq);
254 goto rearm_if_notmax;
255 }
256
Todd Poynorae010472012-02-16 16:27:59 -0800257 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
258 new_freq);
Todd Poynor8a833f12012-04-20 13:18:32 -0700259 pcpu->target_set_time_in_idle = now_idle;
260 pcpu->target_set_time = pcpu->timer_run_time;
Todd Poynorae010472012-02-16 16:27:59 -0800261
Mike Chanef969692010-06-22 11:26:45 -0700262 if (new_freq < pcpu->target_freq) {
263 pcpu->target_freq = new_freq;
264 spin_lock_irqsave(&down_cpumask_lock, flags);
265 cpumask_set_cpu(data, &down_cpumask);
266 spin_unlock_irqrestore(&down_cpumask_lock, flags);
267 queue_work(down_wq, &freq_scale_down_work);
268 } else {
269 pcpu->target_freq = new_freq;
270 spin_lock_irqsave(&up_cpumask_lock, flags);
271 cpumask_set_cpu(data, &up_cpumask);
272 spin_unlock_irqrestore(&up_cpumask_lock, flags);
273 wake_up_process(up_task);
274 }
275
276rearm_if_notmax:
277 /*
278 * Already set max speed and don't see a need to change that,
279 * wait until next idle to re-evaluate, don't need timer.
280 */
281 if (pcpu->target_freq == pcpu->policy->max)
282 goto exit;
283
284rearm:
285 if (!timer_pending(&pcpu->cpu_timer)) {
286 /*
287 * If already at min: if that CPU is idle, don't set timer.
288 * Else cancel the timer if that CPU goes idle. We don't
289 * need to re-evaluate speed until the next idle exit.
290 */
291 if (pcpu->target_freq == pcpu->policy->min) {
292 smp_rmb();
293
294 if (pcpu->idling)
295 goto exit;
296
297 pcpu->timer_idlecancel = 1;
298 }
299
300 pcpu->time_in_idle = get_cpu_idle_time_us(
301 data, &pcpu->idle_exit_time);
302 mod_timer(&pcpu->cpu_timer,
303 jiffies + usecs_to_jiffies(timer_rate));
304 }
305
306exit:
307 return;
308}
309
310static void cpufreq_interactive_idle_start(void)
311{
312 struct cpufreq_interactive_cpuinfo *pcpu =
313 &per_cpu(cpuinfo, smp_processor_id());
314 int pending;
315
316 if (!pcpu->governor_enabled)
317 return;
318
319 pcpu->idling = 1;
320 smp_wmb();
321 pending = timer_pending(&pcpu->cpu_timer);
322
323 if (pcpu->target_freq != pcpu->policy->min) {
324#ifdef CONFIG_SMP
325 /*
326 * Entering idle while not at lowest speed. On some
327 * platforms this can hold the other CPU(s) at that speed
328 * even though the CPU is idle. Set a timer to re-evaluate
329 * speed so this idle CPU doesn't hold the other CPUs above
330 * min indefinitely. This should probably be a quirk of
331 * the CPUFreq driver.
332 */
333 if (!pending) {
334 pcpu->time_in_idle = get_cpu_idle_time_us(
335 smp_processor_id(), &pcpu->idle_exit_time);
336 pcpu->timer_idlecancel = 0;
337 mod_timer(&pcpu->cpu_timer,
338 jiffies + usecs_to_jiffies(timer_rate));
339 }
340#endif
341 } else {
342 /*
343 * If at min speed and entering idle after load has
344 * already been evaluated, and a timer has been set just in
345 * case the CPU suddenly goes busy, cancel that timer. The
346 * CPU didn't go busy; we'll recheck things upon idle exit.
347 */
348 if (pending && pcpu->timer_idlecancel) {
349 del_timer(&pcpu->cpu_timer);
350 /*
351 * Ensure last timer run time is after current idle
352 * sample start time, so next idle exit will always
353 * start a new idle sampling period.
354 */
355 pcpu->idle_exit_time = 0;
356 pcpu->timer_idlecancel = 0;
357 }
358 }
359
360}
361
362static void cpufreq_interactive_idle_end(void)
363{
364 struct cpufreq_interactive_cpuinfo *pcpu =
365 &per_cpu(cpuinfo, smp_processor_id());
366
Sam Leffler3ab7c2b2012-06-27 10:12:04 -0700367 if (!pcpu->governor_enabled)
368 return;
369
Mike Chanef969692010-06-22 11:26:45 -0700370 pcpu->idling = 0;
371 smp_wmb();
372
373 /*
374 * Arm the timer for 1-2 ticks later if not already, and if the timer
375 * function has already processed the previous load sampling
376 * interval. (If the timer is not pending but has not processed
377 * the previous interval, it is probably racing with us on another
378 * CPU. Let it compute load based on the previous sample and then
379 * re-arm the timer for another interval when it's done, rather
380 * than updating the interval start time to be "now", which doesn't
381 * give the timer function enough time to make a decision on this
382 * run.)
383 */
384 if (timer_pending(&pcpu->cpu_timer) == 0 &&
385 pcpu->timer_run_time >= pcpu->idle_exit_time &&
386 pcpu->governor_enabled) {
387 pcpu->time_in_idle =
388 get_cpu_idle_time_us(smp_processor_id(),
389 &pcpu->idle_exit_time);
390 pcpu->timer_idlecancel = 0;
391 mod_timer(&pcpu->cpu_timer,
392 jiffies + usecs_to_jiffies(timer_rate));
393 }
394
395}
396
397static int cpufreq_interactive_up_task(void *data)
398{
399 unsigned int cpu;
400 cpumask_t tmp_mask;
401 unsigned long flags;
402 struct cpufreq_interactive_cpuinfo *pcpu;
403
404 while (1) {
405 set_current_state(TASK_INTERRUPTIBLE);
406 spin_lock_irqsave(&up_cpumask_lock, flags);
407
408 if (cpumask_empty(&up_cpumask)) {
409 spin_unlock_irqrestore(&up_cpumask_lock, flags);
410 schedule();
411
412 if (kthread_should_stop())
413 break;
414
415 spin_lock_irqsave(&up_cpumask_lock, flags);
416 }
417
418 set_current_state(TASK_RUNNING);
419 tmp_mask = up_cpumask;
420 cpumask_clear(&up_cpumask);
421 spin_unlock_irqrestore(&up_cpumask_lock, flags);
422
423 for_each_cpu(cpu, &tmp_mask) {
424 unsigned int j;
425 unsigned int max_freq = 0;
426
427 pcpu = &per_cpu(cpuinfo, cpu);
428 smp_rmb();
429
430 if (!pcpu->governor_enabled)
431 continue;
432
433 mutex_lock(&set_speed_lock);
434
435 for_each_cpu(j, pcpu->policy->cpus) {
436 struct cpufreq_interactive_cpuinfo *pjcpu =
437 &per_cpu(cpuinfo, j);
438
439 if (pjcpu->target_freq > max_freq)
440 max_freq = pjcpu->target_freq;
441 }
442
443 if (max_freq != pcpu->policy->cur)
444 __cpufreq_driver_target(pcpu->policy,
445 max_freq,
446 CPUFREQ_RELATION_H);
447 mutex_unlock(&set_speed_lock);
Todd Poynorae010472012-02-16 16:27:59 -0800448 trace_cpufreq_interactive_up(cpu, pcpu->target_freq,
449 pcpu->policy->cur);
Mike Chanef969692010-06-22 11:26:45 -0700450 }
451 }
452
453 return 0;
454}
455
456static void cpufreq_interactive_freq_down(struct work_struct *work)
457{
458 unsigned int cpu;
459 cpumask_t tmp_mask;
460 unsigned long flags;
461 struct cpufreq_interactive_cpuinfo *pcpu;
462
463 spin_lock_irqsave(&down_cpumask_lock, flags);
464 tmp_mask = down_cpumask;
465 cpumask_clear(&down_cpumask);
466 spin_unlock_irqrestore(&down_cpumask_lock, flags);
467
468 for_each_cpu(cpu, &tmp_mask) {
469 unsigned int j;
470 unsigned int max_freq = 0;
471
472 pcpu = &per_cpu(cpuinfo, cpu);
473 smp_rmb();
474
475 if (!pcpu->governor_enabled)
476 continue;
477
478 mutex_lock(&set_speed_lock);
479
480 for_each_cpu(j, pcpu->policy->cpus) {
481 struct cpufreq_interactive_cpuinfo *pjcpu =
482 &per_cpu(cpuinfo, j);
483
484 if (pjcpu->target_freq > max_freq)
485 max_freq = pjcpu->target_freq;
486 }
487
488 if (max_freq != pcpu->policy->cur)
489 __cpufreq_driver_target(pcpu->policy, max_freq,
490 CPUFREQ_RELATION_H);
491
492 mutex_unlock(&set_speed_lock);
Todd Poynorae010472012-02-16 16:27:59 -0800493 trace_cpufreq_interactive_down(cpu, pcpu->target_freq,
494 pcpu->policy->cur);
Mike Chanef969692010-06-22 11:26:45 -0700495 }
496}
497
Todd Poynorab8dc402012-04-02 17:17:14 -0700498static void cpufreq_interactive_boost(void)
499{
500 int i;
501 int anyboost = 0;
502 unsigned long flags;
503 struct cpufreq_interactive_cpuinfo *pcpu;
504
Todd Poynorab8dc402012-04-02 17:17:14 -0700505 spin_lock_irqsave(&up_cpumask_lock, flags);
506
507 for_each_online_cpu(i) {
508 pcpu = &per_cpu(cpuinfo, i);
509
510 if (pcpu->target_freq < hispeed_freq) {
511 pcpu->target_freq = hispeed_freq;
512 cpumask_set_cpu(i, &up_cpumask);
513 pcpu->target_set_time_in_idle =
514 get_cpu_idle_time_us(i, &pcpu->target_set_time);
Todd Poynor1a0389a2012-05-10 23:28:06 -0700515 pcpu->hispeed_validate_time = pcpu->target_set_time;
Todd Poynorab8dc402012-04-02 17:17:14 -0700516 anyboost = 1;
517 }
518
519 /*
Todd Poynor6d15fa32012-04-26 21:41:40 -0700520 * Set floor freq and (re)start timer for when last
521 * validated.
Todd Poynorab8dc402012-04-02 17:17:14 -0700522 */
523
Todd Poynor6d15fa32012-04-26 21:41:40 -0700524 pcpu->floor_freq = hispeed_freq;
525 pcpu->floor_validate_time = ktime_to_us(ktime_get());
Todd Poynorab8dc402012-04-02 17:17:14 -0700526 }
527
528 spin_unlock_irqrestore(&up_cpumask_lock, flags);
529
530 if (anyboost)
531 wake_up_process(up_task);
532}
533
Todd Poynor15a9ea02012-04-23 20:42:41 -0700534/*
535 * Pulsed boost on input event raises CPUs to hispeed_freq and lets
536 * usual algorithm of min_sample_time decide when to allow speed
537 * to drop.
538 */
539
Todd Poynorab8dc402012-04-02 17:17:14 -0700540static void cpufreq_interactive_input_event(struct input_handle *handle,
541 unsigned int type,
542 unsigned int code, int value)
543{
Todd Poynor442a3122012-05-03 00:16:55 -0700544 if (input_boost_val && type == EV_SYN && code == SYN_REPORT) {
545 trace_cpufreq_interactive_boost("input");
Todd Poynorab8dc402012-04-02 17:17:14 -0700546 cpufreq_interactive_boost();
Todd Poynor442a3122012-05-03 00:16:55 -0700547 }
Todd Poynorab8dc402012-04-02 17:17:14 -0700548}
549
550static void cpufreq_interactive_input_open(struct work_struct *w)
551{
552 struct cpufreq_interactive_inputopen *io =
553 container_of(w, struct cpufreq_interactive_inputopen,
554 inputopen_work);
555 int error;
556
557 error = input_open_device(io->handle);
558 if (error)
559 input_unregister_handle(io->handle);
560}
561
562static int cpufreq_interactive_input_connect(struct input_handler *handler,
563 struct input_dev *dev,
564 const struct input_device_id *id)
565{
566 struct input_handle *handle;
567 int error;
568
569 pr_info("%s: connect to %s\n", __func__, dev->name);
570 handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
571 if (!handle)
572 return -ENOMEM;
573
574 handle->dev = dev;
575 handle->handler = handler;
576 handle->name = "cpufreq_interactive";
577
578 error = input_register_handle(handle);
579 if (error)
580 goto err;
581
582 inputopen.handle = handle;
583 queue_work(down_wq, &inputopen.inputopen_work);
584 return 0;
585err:
586 kfree(handle);
587 return error;
588}
589
590static void cpufreq_interactive_input_disconnect(struct input_handle *handle)
591{
592 input_close_device(handle);
593 input_unregister_handle(handle);
594 kfree(handle);
595}
596
597static const struct input_device_id cpufreq_interactive_ids[] = {
598 {
599 .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
600 INPUT_DEVICE_ID_MATCH_ABSBIT,
601 .evbit = { BIT_MASK(EV_ABS) },
602 .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
603 BIT_MASK(ABS_MT_POSITION_X) |
604 BIT_MASK(ABS_MT_POSITION_Y) },
605 }, /* multi-touch touchscreen */
606 {
607 .flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
608 INPUT_DEVICE_ID_MATCH_ABSBIT,
609 .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
610 .absbit = { [BIT_WORD(ABS_X)] =
611 BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
612 }, /* touchpad */
613 { },
614};
615
616static struct input_handler cpufreq_interactive_input_handler = {
617 .event = cpufreq_interactive_input_event,
618 .connect = cpufreq_interactive_input_connect,
619 .disconnect = cpufreq_interactive_input_disconnect,
620 .name = "cpufreq_interactive",
621 .id_table = cpufreq_interactive_ids,
622};
623
Mike Chanef969692010-06-22 11:26:45 -0700624static ssize_t show_hispeed_freq(struct kobject *kobj,
625 struct attribute *attr, char *buf)
626{
627 return sprintf(buf, "%llu\n", hispeed_freq);
628}
629
630static ssize_t store_hispeed_freq(struct kobject *kobj,
631 struct attribute *attr, const char *buf,
632 size_t count)
633{
634 int ret;
635 u64 val;
636
637 ret = strict_strtoull(buf, 0, &val);
638 if (ret < 0)
639 return ret;
640 hispeed_freq = val;
641 return count;
642}
643
644static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
645 show_hispeed_freq, store_hispeed_freq);
646
647
648static ssize_t show_go_hispeed_load(struct kobject *kobj,
649 struct attribute *attr, char *buf)
650{
651 return sprintf(buf, "%lu\n", go_hispeed_load);
652}
653
654static ssize_t store_go_hispeed_load(struct kobject *kobj,
655 struct attribute *attr, const char *buf, size_t count)
656{
657 int ret;
658 unsigned long val;
659
660 ret = strict_strtoul(buf, 0, &val);
661 if (ret < 0)
662 return ret;
663 go_hispeed_load = val;
664 return count;
665}
666
667static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
668 show_go_hispeed_load, store_go_hispeed_load);
669
670static ssize_t show_min_sample_time(struct kobject *kobj,
671 struct attribute *attr, char *buf)
672{
673 return sprintf(buf, "%lu\n", min_sample_time);
674}
675
676static ssize_t store_min_sample_time(struct kobject *kobj,
677 struct attribute *attr, const char *buf, size_t count)
678{
679 int ret;
680 unsigned long val;
681
682 ret = strict_strtoul(buf, 0, &val);
683 if (ret < 0)
684 return ret;
685 min_sample_time = val;
686 return count;
687}
688
689static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
690 show_min_sample_time, store_min_sample_time);
691
Todd Poynorcbbe17d2012-04-13 20:18:02 -0700692static ssize_t show_above_hispeed_delay(struct kobject *kobj,
693 struct attribute *attr, char *buf)
694{
695 return sprintf(buf, "%lu\n", above_hispeed_delay_val);
696}
697
698static ssize_t store_above_hispeed_delay(struct kobject *kobj,
699 struct attribute *attr,
700 const char *buf, size_t count)
701{
702 int ret;
703 unsigned long val;
704
705 ret = strict_strtoul(buf, 0, &val);
706 if (ret < 0)
707 return ret;
708 above_hispeed_delay_val = val;
709 return count;
710}
711
712define_one_global_rw(above_hispeed_delay);
713
Mike Chanef969692010-06-22 11:26:45 -0700714static ssize_t show_timer_rate(struct kobject *kobj,
715 struct attribute *attr, char *buf)
716{
717 return sprintf(buf, "%lu\n", timer_rate);
718}
719
720static ssize_t store_timer_rate(struct kobject *kobj,
721 struct attribute *attr, const char *buf, size_t count)
722{
723 int ret;
724 unsigned long val;
725
726 ret = strict_strtoul(buf, 0, &val);
727 if (ret < 0)
728 return ret;
729 timer_rate = val;
730 return count;
731}
732
733static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
734 show_timer_rate, store_timer_rate);
735
Todd Poynorab8dc402012-04-02 17:17:14 -0700736static ssize_t show_input_boost(struct kobject *kobj, struct attribute *attr,
737 char *buf)
738{
739 return sprintf(buf, "%u\n", input_boost_val);
740}
741
742static ssize_t store_input_boost(struct kobject *kobj, struct attribute *attr,
743 const char *buf, size_t count)
744{
745 int ret;
746 unsigned long val;
747
748 ret = strict_strtoul(buf, 0, &val);
749 if (ret < 0)
750 return ret;
751 input_boost_val = val;
752 return count;
753}
754
755define_one_global_rw(input_boost);
756
Todd Poynor15a9ea02012-04-23 20:42:41 -0700757static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
758 char *buf)
759{
760 return sprintf(buf, "%d\n", boost_val);
761}
762
763static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
764 const char *buf, size_t count)
765{
766 int ret;
767 unsigned long val;
768
769 ret = kstrtoul(buf, 0, &val);
770 if (ret < 0)
771 return ret;
772
773 boost_val = val;
774
Todd Poynor442a3122012-05-03 00:16:55 -0700775 if (boost_val) {
776 trace_cpufreq_interactive_boost("on");
Todd Poynor15a9ea02012-04-23 20:42:41 -0700777 cpufreq_interactive_boost();
Todd Poynor442a3122012-05-03 00:16:55 -0700778 } else {
779 trace_cpufreq_interactive_unboost("off");
780 }
Todd Poynor15a9ea02012-04-23 20:42:41 -0700781
782 return count;
783}
784
785define_one_global_rw(boost);
786
Todd Poynor442a3122012-05-03 00:16:55 -0700787static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
788 const char *buf, size_t count)
789{
790 int ret;
791 unsigned long val;
792
793 ret = kstrtoul(buf, 0, &val);
794 if (ret < 0)
795 return ret;
796
797 trace_cpufreq_interactive_boost("pulse");
798 cpufreq_interactive_boost();
799 return count;
800}
801
802static struct global_attr boostpulse =
803 __ATTR(boostpulse, 0200, NULL, store_boostpulse);
804
Mike Chanef969692010-06-22 11:26:45 -0700805static struct attribute *interactive_attributes[] = {
806 &hispeed_freq_attr.attr,
807 &go_hispeed_load_attr.attr,
Todd Poynorcbbe17d2012-04-13 20:18:02 -0700808 &above_hispeed_delay.attr,
Mike Chanef969692010-06-22 11:26:45 -0700809 &min_sample_time_attr.attr,
810 &timer_rate_attr.attr,
Todd Poynorab8dc402012-04-02 17:17:14 -0700811 &input_boost.attr,
Todd Poynor15a9ea02012-04-23 20:42:41 -0700812 &boost.attr,
Todd Poynor442a3122012-05-03 00:16:55 -0700813 &boostpulse.attr,
Mike Chanef969692010-06-22 11:26:45 -0700814 NULL,
815};
816
817static struct attribute_group interactive_attr_group = {
818 .attrs = interactive_attributes,
819 .name = "interactive",
820};
821
Sam Leffler3ab7c2b2012-06-27 10:12:04 -0700822static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
823 unsigned long val,
824 void *data)
825{
826 switch (val) {
827 case IDLE_START:
828 cpufreq_interactive_idle_start();
829 break;
830 case IDLE_END:
831 cpufreq_interactive_idle_end();
832 break;
833 }
834
835 return 0;
836}
837
838static struct notifier_block cpufreq_interactive_idle_nb = {
839 .notifier_call = cpufreq_interactive_idle_notifier,
840};
841
Mike Chanef969692010-06-22 11:26:45 -0700842static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
843 unsigned int event)
844{
845 int rc;
846 unsigned int j;
847 struct cpufreq_interactive_cpuinfo *pcpu;
848 struct cpufreq_frequency_table *freq_table;
849
850 switch (event) {
851 case CPUFREQ_GOV_START:
852 if (!cpu_online(policy->cpu))
853 return -EINVAL;
854
855 freq_table =
856 cpufreq_frequency_get_table(policy->cpu);
857
858 for_each_cpu(j, policy->cpus) {
859 pcpu = &per_cpu(cpuinfo, j);
860 pcpu->policy = policy;
861 pcpu->target_freq = policy->cur;
862 pcpu->freq_table = freq_table;
Todd Poynor1f408dc2012-04-06 19:59:36 -0700863 pcpu->target_set_time_in_idle =
Mike Chanef969692010-06-22 11:26:45 -0700864 get_cpu_idle_time_us(j,
Todd Poynor1f408dc2012-04-06 19:59:36 -0700865 &pcpu->target_set_time);
Todd Poynor6d15fa32012-04-26 21:41:40 -0700866 pcpu->floor_freq = pcpu->target_freq;
867 pcpu->floor_validate_time =
Todd Poynor8a833f12012-04-20 13:18:32 -0700868 pcpu->target_set_time;
Todd Poynor1a0389a2012-05-10 23:28:06 -0700869 pcpu->hispeed_validate_time =
870 pcpu->target_set_time;
Mike Chanef969692010-06-22 11:26:45 -0700871 pcpu->governor_enabled = 1;
872 smp_wmb();
873 }
874
875 if (!hispeed_freq)
876 hispeed_freq = policy->max;
877
878 /*
879 * Do not register the idle hook and create sysfs
880 * entries if we have already done so.
881 */
882 if (atomic_inc_return(&active_count) > 1)
883 return 0;
884
885 rc = sysfs_create_group(cpufreq_global_kobject,
886 &interactive_attr_group);
887 if (rc)
888 return rc;
889
Todd Poynorab8dc402012-04-02 17:17:14 -0700890 rc = input_register_handler(&cpufreq_interactive_input_handler);
891 if (rc)
892 pr_warn("%s: failed to register input handler\n",
893 __func__);
894
Sam Leffler3ab7c2b2012-06-27 10:12:04 -0700895 idle_notifier_register(&cpufreq_interactive_idle_nb);
Mike Chanef969692010-06-22 11:26:45 -0700896 break;
897
898 case CPUFREQ_GOV_STOP:
899 for_each_cpu(j, policy->cpus) {
900 pcpu = &per_cpu(cpuinfo, j);
901 pcpu->governor_enabled = 0;
902 smp_wmb();
903 del_timer_sync(&pcpu->cpu_timer);
904
905 /*
906 * Reset idle exit time since we may cancel the timer
907 * before it can run after the last idle exit time,
908 * to avoid tripping the check in idle exit for a timer
909 * that is trying to run.
910 */
911 pcpu->idle_exit_time = 0;
912 }
913
914 flush_work(&freq_scale_down_work);
915 if (atomic_dec_return(&active_count) > 0)
916 return 0;
917
Sam Leffler3ab7c2b2012-06-27 10:12:04 -0700918 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
Todd Poynorab8dc402012-04-02 17:17:14 -0700919 input_unregister_handler(&cpufreq_interactive_input_handler);
Mike Chanef969692010-06-22 11:26:45 -0700920 sysfs_remove_group(cpufreq_global_kobject,
921 &interactive_attr_group);
922
923 break;
924
925 case CPUFREQ_GOV_LIMITS:
926 if (policy->max < policy->cur)
927 __cpufreq_driver_target(policy,
928 policy->max, CPUFREQ_RELATION_H);
929 else if (policy->min > policy->cur)
930 __cpufreq_driver_target(policy,
931 policy->min, CPUFREQ_RELATION_L);
932 break;
933 }
934 return 0;
935}
936
Mike Chanef969692010-06-22 11:26:45 -0700937static int __init cpufreq_interactive_init(void)
938{
939 unsigned int i;
940 struct cpufreq_interactive_cpuinfo *pcpu;
941 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
942
943 go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
944 min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
Todd Poynorcbbe17d2012-04-13 20:18:02 -0700945 above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
Mike Chanef969692010-06-22 11:26:45 -0700946 timer_rate = DEFAULT_TIMER_RATE;
947
948 /* Initalize per-cpu timers */
949 for_each_possible_cpu(i) {
950 pcpu = &per_cpu(cpuinfo, i);
951 init_timer(&pcpu->cpu_timer);
952 pcpu->cpu_timer.function = cpufreq_interactive_timer;
953 pcpu->cpu_timer.data = i;
954 }
955
Sam Leffler5c9b8272012-06-27 12:55:56 -0700956 spin_lock_init(&up_cpumask_lock);
957 spin_lock_init(&down_cpumask_lock);
958 mutex_init(&set_speed_lock);
959
Mike Chanef969692010-06-22 11:26:45 -0700960 up_task = kthread_create(cpufreq_interactive_up_task, NULL,
961 "kinteractiveup");
962 if (IS_ERR(up_task))
963 return PTR_ERR(up_task);
964
965 sched_setscheduler_nocheck(up_task, SCHED_FIFO, &param);
966 get_task_struct(up_task);
967
968 /* No rescuer thread, bind to CPU queuing the work for possibly
969 warm cache (probably doesn't matter much). */
970 down_wq = alloc_workqueue("knteractive_down", 0, 1);
971
972 if (!down_wq)
973 goto err_freeuptask;
974
Sam Leffler5c9b8272012-06-27 12:55:56 -0700975 INIT_WORK(&freq_scale_down_work, cpufreq_interactive_freq_down);
Todd Poynorab8dc402012-04-02 17:17:14 -0700976 INIT_WORK(&inputopen.inputopen_work, cpufreq_interactive_input_open);
Sam Leffler5c9b8272012-06-27 12:55:56 -0700977
978 /* NB: wake up so the thread does not look hung to the freezer */
979 wake_up_process(up_task);
980
Mike Chanef969692010-06-22 11:26:45 -0700981 return cpufreq_register_governor(&cpufreq_gov_interactive);
982
983err_freeuptask:
984 put_task_struct(up_task);
985 return -ENOMEM;
986}
987
988#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
989fs_initcall(cpufreq_interactive_init);
990#else
991module_init(cpufreq_interactive_init);
992#endif
993
994static void __exit cpufreq_interactive_exit(void)
995{
996 cpufreq_unregister_governor(&cpufreq_gov_interactive);
997 kthread_stop(up_task);
998 put_task_struct(up_task);
999 destroy_workqueue(down_wq);
1000}
1001
1002module_exit(cpufreq_interactive_exit);
1003
1004MODULE_AUTHOR("Mike Chan <mike@android.com>");
1005MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1006 "Latency sensitive workloads");
1007MODULE_LICENSE("GPL");