blob: 960b0264def6030ee599afae19c4d2acb0c8a86a [file] [log] [blame]
Mike Chan9d49b702010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/module.h>
23#include <linux/mutex.h>
24#include <linux/sched.h>
25#include <linux/tick.h>
26#include <linux/time.h>
27#include <linux/timer.h>
28#include <linux/workqueue.h>
29#include <linux/kthread.h>
30#include <linux/mutex.h>
31
Todd Poynora1e19512012-02-16 16:27:59 -080032#define CREATE_TRACE_POINTS
33#include <trace/events/cpufreq_interactive.h>
34
Mike Chan9d49b702010-06-22 11:26:45 -070035#include <asm/cputime.h>
36
37static atomic_t active_count = ATOMIC_INIT(0);
38
39struct cpufreq_interactive_cpuinfo {
40 struct timer_list cpu_timer;
41 int timer_idlecancel;
42 u64 time_in_idle;
43 u64 idle_exit_time;
44 u64 timer_run_time;
45 int idling;
46 u64 freq_change_time;
47 u64 freq_change_time_in_idle;
48 struct cpufreq_policy *policy;
49 struct cpufreq_frequency_table *freq_table;
50 unsigned int target_freq;
51 int governor_enabled;
52};
53
54static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
55
56/* Workqueues handle frequency scaling */
57static struct task_struct *up_task;
58static struct workqueue_struct *down_wq;
59static struct work_struct freq_scale_down_work;
60static cpumask_t up_cpumask;
61static spinlock_t up_cpumask_lock;
62static cpumask_t down_cpumask;
63static spinlock_t down_cpumask_lock;
64static struct mutex set_speed_lock;
65
66/* Hi speed to bump to from lo speed when load burst (default max) */
67static u64 hispeed_freq;
68
69/* Go to hi speed when CPU load at or above this value. */
70#define DEFAULT_GO_HISPEED_LOAD 95
71static unsigned long go_hispeed_load;
72
73/*
74 * The minimum amount of time to spend at a frequency before we can ramp down.
75 */
76#define DEFAULT_MIN_SAMPLE_TIME 20 * USEC_PER_MSEC
77static unsigned long min_sample_time;
78
79/*
80 * The sample rate of the timer used to increase frequency
81 */
82#define DEFAULT_TIMER_RATE 20 * USEC_PER_MSEC
83static unsigned long timer_rate;
84
85static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
86 unsigned int event);
87
88#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
89static
90#endif
91struct cpufreq_governor cpufreq_gov_interactive = {
92 .name = "interactive",
93 .governor = cpufreq_governor_interactive,
94 .max_transition_latency = 10000000,
95 .owner = THIS_MODULE,
96};
97
98static void cpufreq_interactive_timer(unsigned long data)
99{
100 unsigned int delta_idle;
101 unsigned int delta_time;
102 int cpu_load;
103 int load_since_change;
104 u64 time_in_idle;
105 u64 idle_exit_time;
106 struct cpufreq_interactive_cpuinfo *pcpu =
107 &per_cpu(cpuinfo, data);
108 u64 now_idle;
109 unsigned int new_freq;
110 unsigned int index;
111 unsigned long flags;
112
113 smp_rmb();
114
115 if (!pcpu->governor_enabled)
116 goto exit;
117
118 /*
119 * Once pcpu->timer_run_time is updated to >= pcpu->idle_exit_time,
120 * this lets idle exit know the current idle time sample has
121 * been processed, and idle exit can generate a new sample and
122 * re-arm the timer. This prevents a concurrent idle
123 * exit on that CPU from writing a new set of info at the same time
124 * the timer function runs (the timer function can't use that info
125 * until more time passes).
126 */
127 time_in_idle = pcpu->time_in_idle;
128 idle_exit_time = pcpu->idle_exit_time;
129 now_idle = get_cpu_idle_time_us(data, &pcpu->timer_run_time);
130 smp_wmb();
131
132 /* If we raced with cancelling a timer, skip. */
133 if (!idle_exit_time)
134 goto exit;
135
136 delta_idle = (unsigned int)(now_idle - time_in_idle);
137 delta_time = (unsigned int)(pcpu->timer_run_time - idle_exit_time);
138
139 /*
140 * If timer ran less than 1ms after short-term sample started, retry.
141 */
142 if (delta_time < 1000)
143 goto rearm;
144
145 if (delta_idle > delta_time)
146 cpu_load = 0;
147 else
148 cpu_load = 100 * (delta_time - delta_idle) / delta_time;
149
150 delta_idle = (unsigned int)(now_idle - pcpu->freq_change_time_in_idle);
151 delta_time = (unsigned int)(pcpu->timer_run_time - pcpu->freq_change_time);
152
153 if ((delta_time == 0) || (delta_idle > delta_time))
154 load_since_change = 0;
155 else
156 load_since_change =
157 100 * (delta_time - delta_idle) / delta_time;
158
159 /*
160 * Choose greater of short-term load (since last idle timer
161 * started or timer function re-armed itself) or long-term load
162 * (since last frequency change).
163 */
164 if (load_since_change > cpu_load)
165 cpu_load = load_since_change;
166
167 if (cpu_load >= go_hispeed_load) {
Todd Poynor8dc352c2012-04-06 19:50:12 -0700168 if (pcpu->policy->cur == pcpu->policy->min) {
Mike Chan9d49b702010-06-22 11:26:45 -0700169 new_freq = hispeed_freq;
Todd Poynor8dc352c2012-04-06 19:50:12 -0700170 } else {
Mike Chan9d49b702010-06-22 11:26:45 -0700171 new_freq = pcpu->policy->max * cpu_load / 100;
Todd Poynor8dc352c2012-04-06 19:50:12 -0700172
173 if (new_freq < hispeed_freq)
174 new_freq = hispeed_freq;
175 }
Mike Chan9d49b702010-06-22 11:26:45 -0700176 } else {
Todd Poynor1f53ef22012-04-06 01:13:09 -0700177 new_freq = pcpu->policy->max * cpu_load / 100;
Mike Chan9d49b702010-06-22 11:26:45 -0700178 }
179
180 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
181 new_freq, CPUFREQ_RELATION_H,
182 &index)) {
183 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
184 (int) data);
185 goto rearm;
186 }
187
188 new_freq = pcpu->freq_table[index].frequency;
189
190 if (pcpu->target_freq == new_freq)
Todd Poynora1e19512012-02-16 16:27:59 -0800191 {
192 trace_cpufreq_interactive_already(data, cpu_load,
193 pcpu->target_freq, new_freq);
Mike Chan9d49b702010-06-22 11:26:45 -0700194 goto rearm_if_notmax;
Todd Poynora1e19512012-02-16 16:27:59 -0800195 }
Mike Chan9d49b702010-06-22 11:26:45 -0700196
197 /*
198 * Do not scale down unless we have been at this frequency for the
199 * minimum sample time.
200 */
201 if (new_freq < pcpu->target_freq) {
202 if (pcpu->timer_run_time - pcpu->freq_change_time
Todd Poynora1e19512012-02-16 16:27:59 -0800203 < min_sample_time) {
204 trace_cpufreq_interactive_notyet(data, cpu_load,
205 pcpu->target_freq, new_freq);
Mike Chan9d49b702010-06-22 11:26:45 -0700206 goto rearm;
Todd Poynora1e19512012-02-16 16:27:59 -0800207 }
Mike Chan9d49b702010-06-22 11:26:45 -0700208 }
209
Todd Poynora1e19512012-02-16 16:27:59 -0800210 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
211 new_freq);
212
Mike Chan9d49b702010-06-22 11:26:45 -0700213 if (new_freq < pcpu->target_freq) {
214 pcpu->target_freq = new_freq;
215 spin_lock_irqsave(&down_cpumask_lock, flags);
216 cpumask_set_cpu(data, &down_cpumask);
217 spin_unlock_irqrestore(&down_cpumask_lock, flags);
218 queue_work(down_wq, &freq_scale_down_work);
219 } else {
220 pcpu->target_freq = new_freq;
221 spin_lock_irqsave(&up_cpumask_lock, flags);
222 cpumask_set_cpu(data, &up_cpumask);
223 spin_unlock_irqrestore(&up_cpumask_lock, flags);
224 wake_up_process(up_task);
225 }
226
227rearm_if_notmax:
228 /*
229 * Already set max speed and don't see a need to change that,
230 * wait until next idle to re-evaluate, don't need timer.
231 */
232 if (pcpu->target_freq == pcpu->policy->max)
233 goto exit;
234
235rearm:
236 if (!timer_pending(&pcpu->cpu_timer)) {
237 /*
238 * If already at min: if that CPU is idle, don't set timer.
239 * Else cancel the timer if that CPU goes idle. We don't
240 * need to re-evaluate speed until the next idle exit.
241 */
242 if (pcpu->target_freq == pcpu->policy->min) {
243 smp_rmb();
244
245 if (pcpu->idling)
246 goto exit;
247
248 pcpu->timer_idlecancel = 1;
249 }
250
251 pcpu->time_in_idle = get_cpu_idle_time_us(
252 data, &pcpu->idle_exit_time);
253 mod_timer(&pcpu->cpu_timer,
254 jiffies + usecs_to_jiffies(timer_rate));
255 }
256
257exit:
258 return;
259}
260
261static void cpufreq_interactive_idle_start(void)
262{
263 struct cpufreq_interactive_cpuinfo *pcpu =
264 &per_cpu(cpuinfo, smp_processor_id());
265 int pending;
266
267 if (!pcpu->governor_enabled)
268 return;
269
270 pcpu->idling = 1;
271 smp_wmb();
272 pending = timer_pending(&pcpu->cpu_timer);
273
274 if (pcpu->target_freq != pcpu->policy->min) {
275#ifdef CONFIG_SMP
276 /*
277 * Entering idle while not at lowest speed. On some
278 * platforms this can hold the other CPU(s) at that speed
279 * even though the CPU is idle. Set a timer to re-evaluate
280 * speed so this idle CPU doesn't hold the other CPUs above
281 * min indefinitely. This should probably be a quirk of
282 * the CPUFreq driver.
283 */
284 if (!pending) {
285 pcpu->time_in_idle = get_cpu_idle_time_us(
286 smp_processor_id(), &pcpu->idle_exit_time);
287 pcpu->timer_idlecancel = 0;
288 mod_timer(&pcpu->cpu_timer,
289 jiffies + usecs_to_jiffies(timer_rate));
290 }
291#endif
292 } else {
293 /*
294 * If at min speed and entering idle after load has
295 * already been evaluated, and a timer has been set just in
296 * case the CPU suddenly goes busy, cancel that timer. The
297 * CPU didn't go busy; we'll recheck things upon idle exit.
298 */
299 if (pending && pcpu->timer_idlecancel) {
300 del_timer(&pcpu->cpu_timer);
301 /*
302 * Ensure last timer run time is after current idle
303 * sample start time, so next idle exit will always
304 * start a new idle sampling period.
305 */
306 pcpu->idle_exit_time = 0;
307 pcpu->timer_idlecancel = 0;
308 }
309 }
310
311}
312
313static void cpufreq_interactive_idle_end(void)
314{
315 struct cpufreq_interactive_cpuinfo *pcpu =
316 &per_cpu(cpuinfo, smp_processor_id());
317
318 pcpu->idling = 0;
319 smp_wmb();
320
321 /*
322 * Arm the timer for 1-2 ticks later if not already, and if the timer
323 * function has already processed the previous load sampling
324 * interval. (If the timer is not pending but has not processed
325 * the previous interval, it is probably racing with us on another
326 * CPU. Let it compute load based on the previous sample and then
327 * re-arm the timer for another interval when it's done, rather
328 * than updating the interval start time to be "now", which doesn't
329 * give the timer function enough time to make a decision on this
330 * run.)
331 */
332 if (timer_pending(&pcpu->cpu_timer) == 0 &&
333 pcpu->timer_run_time >= pcpu->idle_exit_time &&
334 pcpu->governor_enabled) {
335 pcpu->time_in_idle =
336 get_cpu_idle_time_us(smp_processor_id(),
337 &pcpu->idle_exit_time);
338 pcpu->timer_idlecancel = 0;
339 mod_timer(&pcpu->cpu_timer,
340 jiffies + usecs_to_jiffies(timer_rate));
341 }
342
343}
344
345static int cpufreq_interactive_up_task(void *data)
346{
347 unsigned int cpu;
348 cpumask_t tmp_mask;
349 unsigned long flags;
350 struct cpufreq_interactive_cpuinfo *pcpu;
351
352 while (1) {
353 set_current_state(TASK_INTERRUPTIBLE);
354 spin_lock_irqsave(&up_cpumask_lock, flags);
355
356 if (cpumask_empty(&up_cpumask)) {
357 spin_unlock_irqrestore(&up_cpumask_lock, flags);
358 schedule();
359
360 if (kthread_should_stop())
361 break;
362
363 spin_lock_irqsave(&up_cpumask_lock, flags);
364 }
365
366 set_current_state(TASK_RUNNING);
367 tmp_mask = up_cpumask;
368 cpumask_clear(&up_cpumask);
369 spin_unlock_irqrestore(&up_cpumask_lock, flags);
370
371 for_each_cpu(cpu, &tmp_mask) {
372 unsigned int j;
373 unsigned int max_freq = 0;
374
375 pcpu = &per_cpu(cpuinfo, cpu);
376 smp_rmb();
377
378 if (!pcpu->governor_enabled)
379 continue;
380
381 mutex_lock(&set_speed_lock);
382
383 for_each_cpu(j, pcpu->policy->cpus) {
384 struct cpufreq_interactive_cpuinfo *pjcpu =
385 &per_cpu(cpuinfo, j);
386
387 if (pjcpu->target_freq > max_freq)
388 max_freq = pjcpu->target_freq;
389 }
390
391 if (max_freq != pcpu->policy->cur)
392 __cpufreq_driver_target(pcpu->policy,
393 max_freq,
394 CPUFREQ_RELATION_H);
395 mutex_unlock(&set_speed_lock);
396
397 pcpu->freq_change_time_in_idle =
398 get_cpu_idle_time_us(cpu,
399 &pcpu->freq_change_time);
Todd Poynora1e19512012-02-16 16:27:59 -0800400 trace_cpufreq_interactive_up(cpu, pcpu->target_freq,
401 pcpu->policy->cur);
Mike Chan9d49b702010-06-22 11:26:45 -0700402 }
403 }
404
405 return 0;
406}
407
408static void cpufreq_interactive_freq_down(struct work_struct *work)
409{
410 unsigned int cpu;
411 cpumask_t tmp_mask;
412 unsigned long flags;
413 struct cpufreq_interactive_cpuinfo *pcpu;
414
415 spin_lock_irqsave(&down_cpumask_lock, flags);
416 tmp_mask = down_cpumask;
417 cpumask_clear(&down_cpumask);
418 spin_unlock_irqrestore(&down_cpumask_lock, flags);
419
420 for_each_cpu(cpu, &tmp_mask) {
421 unsigned int j;
422 unsigned int max_freq = 0;
423
424 pcpu = &per_cpu(cpuinfo, cpu);
425 smp_rmb();
426
427 if (!pcpu->governor_enabled)
428 continue;
429
430 mutex_lock(&set_speed_lock);
431
432 for_each_cpu(j, pcpu->policy->cpus) {
433 struct cpufreq_interactive_cpuinfo *pjcpu =
434 &per_cpu(cpuinfo, j);
435
436 if (pjcpu->target_freq > max_freq)
437 max_freq = pjcpu->target_freq;
438 }
439
440 if (max_freq != pcpu->policy->cur)
441 __cpufreq_driver_target(pcpu->policy, max_freq,
442 CPUFREQ_RELATION_H);
443
444 mutex_unlock(&set_speed_lock);
445 pcpu->freq_change_time_in_idle =
446 get_cpu_idle_time_us(cpu,
447 &pcpu->freq_change_time);
Todd Poynora1e19512012-02-16 16:27:59 -0800448 trace_cpufreq_interactive_down(cpu, pcpu->target_freq,
449 pcpu->policy->cur);
Mike Chan9d49b702010-06-22 11:26:45 -0700450 }
451}
452
453static ssize_t show_hispeed_freq(struct kobject *kobj,
454 struct attribute *attr, char *buf)
455{
456 return sprintf(buf, "%llu\n", hispeed_freq);
457}
458
459static ssize_t store_hispeed_freq(struct kobject *kobj,
460 struct attribute *attr, const char *buf,
461 size_t count)
462{
463 int ret;
464 u64 val;
465
466 ret = strict_strtoull(buf, 0, &val);
467 if (ret < 0)
468 return ret;
469 hispeed_freq = val;
470 return count;
471}
472
473static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
474 show_hispeed_freq, store_hispeed_freq);
475
476
477static ssize_t show_go_hispeed_load(struct kobject *kobj,
478 struct attribute *attr, char *buf)
479{
480 return sprintf(buf, "%lu\n", go_hispeed_load);
481}
482
483static ssize_t store_go_hispeed_load(struct kobject *kobj,
484 struct attribute *attr, const char *buf, size_t count)
485{
486 int ret;
487 unsigned long val;
488
489 ret = strict_strtoul(buf, 0, &val);
490 if (ret < 0)
491 return ret;
492 go_hispeed_load = val;
493 return count;
494}
495
496static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
497 show_go_hispeed_load, store_go_hispeed_load);
498
499static ssize_t show_min_sample_time(struct kobject *kobj,
500 struct attribute *attr, char *buf)
501{
502 return sprintf(buf, "%lu\n", min_sample_time);
503}
504
505static ssize_t store_min_sample_time(struct kobject *kobj,
506 struct attribute *attr, const char *buf, size_t count)
507{
508 int ret;
509 unsigned long val;
510
511 ret = strict_strtoul(buf, 0, &val);
512 if (ret < 0)
513 return ret;
514 min_sample_time = val;
515 return count;
516}
517
518static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
519 show_min_sample_time, store_min_sample_time);
520
521static ssize_t show_timer_rate(struct kobject *kobj,
522 struct attribute *attr, char *buf)
523{
524 return sprintf(buf, "%lu\n", timer_rate);
525}
526
527static ssize_t store_timer_rate(struct kobject *kobj,
528 struct attribute *attr, const char *buf, size_t count)
529{
530 int ret;
531 unsigned long val;
532
533 ret = strict_strtoul(buf, 0, &val);
534 if (ret < 0)
535 return ret;
536 timer_rate = val;
537 return count;
538}
539
540static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
541 show_timer_rate, store_timer_rate);
542
543static struct attribute *interactive_attributes[] = {
544 &hispeed_freq_attr.attr,
545 &go_hispeed_load_attr.attr,
546 &min_sample_time_attr.attr,
547 &timer_rate_attr.attr,
548 NULL,
549};
550
551static struct attribute_group interactive_attr_group = {
552 .attrs = interactive_attributes,
553 .name = "interactive",
554};
555
556static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
557 unsigned int event)
558{
559 int rc;
560 unsigned int j;
561 struct cpufreq_interactive_cpuinfo *pcpu;
562 struct cpufreq_frequency_table *freq_table;
563
564 switch (event) {
565 case CPUFREQ_GOV_START:
566 if (!cpu_online(policy->cpu))
567 return -EINVAL;
568
569 freq_table =
570 cpufreq_frequency_get_table(policy->cpu);
571
572 for_each_cpu(j, policy->cpus) {
573 pcpu = &per_cpu(cpuinfo, j);
574 pcpu->policy = policy;
575 pcpu->target_freq = policy->cur;
576 pcpu->freq_table = freq_table;
577 pcpu->freq_change_time_in_idle =
578 get_cpu_idle_time_us(j,
579 &pcpu->freq_change_time);
580 pcpu->governor_enabled = 1;
581 smp_wmb();
582 }
583
584 if (!hispeed_freq)
585 hispeed_freq = policy->max;
586
587 /*
588 * Do not register the idle hook and create sysfs
589 * entries if we have already done so.
590 */
591 if (atomic_inc_return(&active_count) > 1)
592 return 0;
593
594 rc = sysfs_create_group(cpufreq_global_kobject,
595 &interactive_attr_group);
596 if (rc)
597 return rc;
598
599 break;
600
601 case CPUFREQ_GOV_STOP:
602 for_each_cpu(j, policy->cpus) {
603 pcpu = &per_cpu(cpuinfo, j);
604 pcpu->governor_enabled = 0;
605 smp_wmb();
606 del_timer_sync(&pcpu->cpu_timer);
607
608 /*
609 * Reset idle exit time since we may cancel the timer
610 * before it can run after the last idle exit time,
611 * to avoid tripping the check in idle exit for a timer
612 * that is trying to run.
613 */
614 pcpu->idle_exit_time = 0;
615 }
616
617 flush_work(&freq_scale_down_work);
618 if (atomic_dec_return(&active_count) > 0)
619 return 0;
620
621 sysfs_remove_group(cpufreq_global_kobject,
622 &interactive_attr_group);
623
624 break;
625
626 case CPUFREQ_GOV_LIMITS:
627 if (policy->max < policy->cur)
628 __cpufreq_driver_target(policy,
629 policy->max, CPUFREQ_RELATION_H);
630 else if (policy->min > policy->cur)
631 __cpufreq_driver_target(policy,
632 policy->min, CPUFREQ_RELATION_L);
633 break;
634 }
635 return 0;
636}
637
638static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
639 unsigned long val,
640 void *data)
641{
642 switch (val) {
643 case IDLE_START:
644 cpufreq_interactive_idle_start();
645 break;
646 case IDLE_END:
647 cpufreq_interactive_idle_end();
648 break;
649 }
650
651 return 0;
652}
653
654static struct notifier_block cpufreq_interactive_idle_nb = {
655 .notifier_call = cpufreq_interactive_idle_notifier,
656};
657
658static int __init cpufreq_interactive_init(void)
659{
660 unsigned int i;
661 struct cpufreq_interactive_cpuinfo *pcpu;
662 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
663
664 go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
665 min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
666 timer_rate = DEFAULT_TIMER_RATE;
667
668 /* Initalize per-cpu timers */
669 for_each_possible_cpu(i) {
670 pcpu = &per_cpu(cpuinfo, i);
671 init_timer(&pcpu->cpu_timer);
672 pcpu->cpu_timer.function = cpufreq_interactive_timer;
673 pcpu->cpu_timer.data = i;
674 }
675
676 up_task = kthread_create(cpufreq_interactive_up_task, NULL,
677 "kinteractiveup");
678 if (IS_ERR(up_task))
679 return PTR_ERR(up_task);
680
681 sched_setscheduler_nocheck(up_task, SCHED_FIFO, &param);
682 get_task_struct(up_task);
683
684 /* No rescuer thread, bind to CPU queuing the work for possibly
685 warm cache (probably doesn't matter much). */
686 down_wq = alloc_workqueue("knteractive_down", 0, 1);
687
688 if (!down_wq)
689 goto err_freeuptask;
690
691 INIT_WORK(&freq_scale_down_work,
692 cpufreq_interactive_freq_down);
693
694 spin_lock_init(&up_cpumask_lock);
695 spin_lock_init(&down_cpumask_lock);
696 mutex_init(&set_speed_lock);
697
698 idle_notifier_register(&cpufreq_interactive_idle_nb);
699
700 return cpufreq_register_governor(&cpufreq_gov_interactive);
701
702err_freeuptask:
703 put_task_struct(up_task);
704 return -ENOMEM;
705}
706
707#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
708fs_initcall(cpufreq_interactive_init);
709#else
710module_init(cpufreq_interactive_init);
711#endif
712
713static void __exit cpufreq_interactive_exit(void)
714{
715 cpufreq_unregister_governor(&cpufreq_gov_interactive);
716 kthread_stop(up_task);
717 put_task_struct(up_task);
718 destroy_workqueue(down_wq);
719}
720
721module_exit(cpufreq_interactive_exit);
722
723MODULE_AUTHOR("Mike Chan <mike@android.com>");
724MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
725 "Latency sensitive workloads");
726MODULE_LICENSE("GPL");