blob: 5a77d912961142ed68a61d18182b47416fe33070 [file] [log] [blame]
Viresh Kumarc33cc8f2016-05-17 14:41:22 +05301/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010-2016 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 */
17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20#include <linux/cpu.h>
21#include <linux/cpumask.h>
22#include <linux/cpufreq.h>
23#include <linux/irq_work.h>
24#include <linux/module.h>
25#include <linux/moduleparam.h>
26#include <linux/rwsem.h>
27#include <linux/sched.h>
28#include <linux/sched/rt.h>
29#include <linux/tick.h>
30#include <linux/time.h>
31#include <linux/timer.h>
32#include <linux/kthread.h>
33#include <linux/slab.h>
34
35#define CREATE_TRACE_POINTS
36#include <trace/events/cpufreq_interactive.h>
37
38#define gov_attr_ro(_name) \
39static struct governor_attr _name = \
40__ATTR(_name, 0444, show_##_name, NULL)
41
42#define gov_attr_wo(_name) \
43static struct governor_attr _name = \
44__ATTR(_name, 0200, NULL, store_##_name)
45
46#define gov_attr_rw(_name) \
47static struct governor_attr _name = \
48__ATTR(_name, 0644, show_##_name, store_##_name)
49
50/* Separate instance required for each 'interactive' directory in sysfs */
51struct interactive_tunables {
52 struct gov_attr_set attr_set;
53
54 /* Hi speed to bump to from lo speed when load burst (default max) */
55 unsigned int hispeed_freq;
56
57 /* Go to hi speed when CPU load at or above this value. */
58#define DEFAULT_GO_HISPEED_LOAD 99
59 unsigned long go_hispeed_load;
60
61 /* Target load. Lower values result in higher CPU speeds. */
62 spinlock_t target_loads_lock;
63 unsigned int *target_loads;
64 int ntarget_loads;
65
66 /*
67 * The minimum amount of time to spend at a frequency before we can ramp
68 * down.
69 */
70#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
71 unsigned long min_sample_time;
72
73 /* The sample rate of the timer used to increase frequency */
74 unsigned long sampling_rate;
75
76 /*
77 * Wait this long before raising speed above hispeed, by default a
78 * single timer interval.
79 */
80 spinlock_t above_hispeed_delay_lock;
81 unsigned int *above_hispeed_delay;
82 int nabove_hispeed_delay;
83
84 /* Non-zero means indefinite speed boost active */
85 int boost;
86 /* Duration of a boot pulse in usecs */
87 int boostpulse_duration;
88 /* End time of boost pulse in ktime converted to usecs */
89 u64 boostpulse_endtime;
90 bool boosted;
91
92 /*
93 * Max additional time to wait in idle, beyond sampling_rate, at speeds
94 * above minimum before wakeup to reduce speed, or -1 if unnecessary.
95 */
96#define DEFAULT_TIMER_SLACK (4 * DEFAULT_SAMPLING_RATE)
97 unsigned long timer_slack_delay;
98 unsigned long timer_slack;
99 bool io_is_busy;
100};
101
102/* Separate instance required for each 'struct cpufreq_policy' */
103struct interactive_policy {
104 struct cpufreq_policy *policy;
105 struct interactive_tunables *tunables;
106 struct list_head tunables_hook;
107};
108
109/* Separate instance required for each CPU */
110struct interactive_cpu {
111 struct update_util_data update_util;
112 struct interactive_policy *ipolicy;
113
114 struct irq_work irq_work;
115 u64 last_sample_time;
Viresh Kumar9dcfc2a2016-05-19 14:30:54 +0530116 unsigned long next_sample_jiffies;
Viresh Kumarc33cc8f2016-05-17 14:41:22 +0530117 bool work_in_progress;
118
119 struct rw_semaphore enable_sem;
120 struct timer_list slack_timer;
121
122 spinlock_t load_lock; /* protects the next 4 fields */
123 u64 time_in_idle;
124 u64 time_in_idle_timestamp;
125 u64 cputime_speedadj;
126 u64 cputime_speedadj_timestamp;
127
128 spinlock_t target_freq_lock; /*protects target freq */
129 unsigned int target_freq;
130
131 unsigned int floor_freq;
132 u64 pol_floor_val_time; /* policy floor_validate_time */
133 u64 loc_floor_val_time; /* per-cpu floor_validate_time */
134 u64 pol_hispeed_val_time; /* policy hispeed_validate_time */
135 u64 loc_hispeed_val_time; /* per-cpu hispeed_validate_time */
136};
137
138static DEFINE_PER_CPU(struct interactive_cpu, interactive_cpu);
139
140/* Realtime thread handles frequency scaling */
141static struct task_struct *speedchange_task;
142static cpumask_t speedchange_cpumask;
143static spinlock_t speedchange_cpumask_lock;
144
145/* Target load. Lower values result in higher CPU speeds. */
146#define DEFAULT_TARGET_LOAD 90
147static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
148
149#define DEFAULT_SAMPLING_RATE (20 * USEC_PER_MSEC)
150#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_SAMPLING_RATE
151static unsigned int default_above_hispeed_delay[] = {
152 DEFAULT_ABOVE_HISPEED_DELAY
153};
154
155/* Iterate over interactive policies for tunables */
156#define for_each_ipolicy(__ip) \
157 list_for_each_entry(__ip, &tunables->attr_set.policy_list, tunables_hook)
158
159static struct interactive_tunables *global_tunables;
160static DEFINE_MUTEX(global_tunables_lock);
161
162static inline void update_slack_delay(struct interactive_tunables *tunables)
163{
164 tunables->timer_slack_delay = usecs_to_jiffies(tunables->timer_slack +
165 tunables->sampling_rate);
166}
167
168static bool timer_slack_required(struct interactive_cpu *icpu)
169{
170 struct interactive_policy *ipolicy = icpu->ipolicy;
171 struct interactive_tunables *tunables = ipolicy->tunables;
172
173 if (tunables->timer_slack < 0)
174 return false;
175
176 if (icpu->target_freq > ipolicy->policy->min)
177 return true;
178
179 return false;
180}
181
182static void gov_slack_timer_start(struct interactive_cpu *icpu, int cpu)
183{
184 struct interactive_tunables *tunables = icpu->ipolicy->tunables;
185
186 icpu->slack_timer.expires = jiffies + tunables->timer_slack_delay;
187 add_timer_on(&icpu->slack_timer, cpu);
188}
189
190static void gov_slack_timer_modify(struct interactive_cpu *icpu)
191{
192 struct interactive_tunables *tunables = icpu->ipolicy->tunables;
193
194 mod_timer(&icpu->slack_timer, jiffies + tunables->timer_slack_delay);
195}
196
197static void slack_timer_resched(struct interactive_cpu *icpu, int cpu,
198 bool modify)
199{
200 struct interactive_tunables *tunables = icpu->ipolicy->tunables;
201 unsigned long flags;
202
203 spin_lock_irqsave(&icpu->load_lock, flags);
204
205 icpu->time_in_idle = get_cpu_idle_time(cpu,
206 &icpu->time_in_idle_timestamp,
207 tunables->io_is_busy);
208 icpu->cputime_speedadj = 0;
209 icpu->cputime_speedadj_timestamp = icpu->time_in_idle_timestamp;
210
211 if (timer_slack_required(icpu)) {
212 if (modify)
213 gov_slack_timer_modify(icpu);
214 else
215 gov_slack_timer_start(icpu, cpu);
216 }
217
218 spin_unlock_irqrestore(&icpu->load_lock, flags);
219}
220
221static unsigned int
222freq_to_above_hispeed_delay(struct interactive_tunables *tunables,
223 unsigned int freq)
224{
225 unsigned long flags;
226 unsigned int ret;
227 int i;
228
229 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
230
231 for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
232 freq >= tunables->above_hispeed_delay[i + 1]; i += 2)
233 ;
234
235 ret = tunables->above_hispeed_delay[i];
236 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
237
238 return ret;
239}
240
241static unsigned int freq_to_targetload(struct interactive_tunables *tunables,
242 unsigned int freq)
243{
244 unsigned long flags;
245 unsigned int ret;
246 int i;
247
248 spin_lock_irqsave(&tunables->target_loads_lock, flags);
249
250 for (i = 0; i < tunables->ntarget_loads - 1 &&
251 freq >= tunables->target_loads[i + 1]; i += 2)
252 ;
253
254 ret = tunables->target_loads[i];
255 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
256 return ret;
257}
258
259/*
260 * If increasing frequencies never map to a lower target load then
261 * choose_freq() will find the minimum frequency that does not exceed its
262 * target load given the current load.
263 */
264static unsigned int choose_freq(struct interactive_cpu *icpu,
265 unsigned int loadadjfreq)
266{
267 struct cpufreq_policy *policy = icpu->ipolicy->policy;
268 struct cpufreq_frequency_table *freq_table = policy->freq_table;
269 unsigned int prevfreq, freqmin = 0, freqmax = UINT_MAX, tl;
270 unsigned int freq = policy->cur;
271 int index;
272
273 do {
274 prevfreq = freq;
275 tl = freq_to_targetload(icpu->ipolicy->tunables, freq);
276
277 /*
278 * Find the lowest frequency where the computed load is less
279 * than or equal to the target load.
280 */
281
282 index = cpufreq_frequency_table_target(policy, loadadjfreq / tl,
283 CPUFREQ_RELATION_L);
284
285 freq = freq_table[index].frequency;
286
287 if (freq > prevfreq) {
288 /* The previous frequency is too low */
289 freqmin = prevfreq;
290
291 if (freq < freqmax)
292 continue;
293
294 /* Find highest frequency that is less than freqmax */
295 index = cpufreq_frequency_table_target(policy,
296 freqmax - 1, CPUFREQ_RELATION_H);
297
298 freq = freq_table[index].frequency;
299
300 if (freq == freqmin) {
301 /*
302 * The first frequency below freqmax has already
303 * been found to be too low. freqmax is the
304 * lowest speed we found that is fast enough.
305 */
306 freq = freqmax;
307 break;
308 }
309 } else if (freq < prevfreq) {
310 /* The previous frequency is high enough. */
311 freqmax = prevfreq;
312
313 if (freq > freqmin)
314 continue;
315
316 /* Find lowest frequency that is higher than freqmin */
317 index = cpufreq_frequency_table_target(policy,
318 freqmin + 1, CPUFREQ_RELATION_L);
319
320 freq = freq_table[index].frequency;
321
322 /*
323 * If freqmax is the first frequency above
324 * freqmin then we have already found that
325 * this speed is fast enough.
326 */
327 if (freq == freqmax)
328 break;
329 }
330
331 /* If same frequency chosen as previous then done. */
332 } while (freq != prevfreq);
333
334 return freq;
335}
336
337static u64 update_load(struct interactive_cpu *icpu, int cpu)
338{
339 struct interactive_tunables *tunables = icpu->ipolicy->tunables;
Chris Redpath682c1e32013-06-17 18:36:56 +0100340 u64 now_idle, now, active_time, delta_idle, delta_time;
Viresh Kumarc33cc8f2016-05-17 14:41:22 +0530341
342 now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
Chris Redpath682c1e32013-06-17 18:36:56 +0100343 delta_idle = (now_idle - icpu->time_in_idle);
344 delta_time = (now - icpu->time_in_idle_timestamp);
Viresh Kumarc33cc8f2016-05-17 14:41:22 +0530345
346 if (delta_time <= delta_idle)
347 active_time = 0;
348 else
349 active_time = delta_time - delta_idle;
350
351 icpu->cputime_speedadj += active_time * icpu->ipolicy->policy->cur;
352
353 icpu->time_in_idle = now_idle;
354 icpu->time_in_idle_timestamp = now;
355
356 return now;
357}
358
359/* Re-evaluate load to see if a frequency change is required or not */
360static void eval_target_freq(struct interactive_cpu *icpu)
361{
362 struct interactive_tunables *tunables = icpu->ipolicy->tunables;
363 struct cpufreq_policy *policy = icpu->ipolicy->policy;
364 struct cpufreq_frequency_table *freq_table = policy->freq_table;
365 u64 cputime_speedadj, now, max_fvtime;
366 unsigned int new_freq, loadadjfreq, index, delta_time;
367 unsigned long flags;
368 int cpu_load;
369 int cpu = smp_processor_id();
370
371 spin_lock_irqsave(&icpu->load_lock, flags);
372 now = update_load(icpu, smp_processor_id());
373 delta_time = (unsigned int)(now - icpu->cputime_speedadj_timestamp);
374 cputime_speedadj = icpu->cputime_speedadj;
375 spin_unlock_irqrestore(&icpu->load_lock, flags);
376
377 if (WARN_ON_ONCE(!delta_time))
378 return;
379
380 spin_lock_irqsave(&icpu->target_freq_lock, flags);
381 do_div(cputime_speedadj, delta_time);
382 loadadjfreq = (unsigned int)cputime_speedadj * 100;
383 cpu_load = loadadjfreq / policy->cur;
384 tunables->boosted = tunables->boost ||
385 now < tunables->boostpulse_endtime;
386
387 if (cpu_load >= tunables->go_hispeed_load || tunables->boosted) {
388 if (policy->cur < tunables->hispeed_freq) {
389 new_freq = tunables->hispeed_freq;
390 } else {
391 new_freq = choose_freq(icpu, loadadjfreq);
392
393 if (new_freq < tunables->hispeed_freq)
394 new_freq = tunables->hispeed_freq;
395 }
396 } else {
397 new_freq = choose_freq(icpu, loadadjfreq);
398 if (new_freq > tunables->hispeed_freq &&
399 policy->cur < tunables->hispeed_freq)
400 new_freq = tunables->hispeed_freq;
401 }
402
403 if (policy->cur >= tunables->hispeed_freq &&
404 new_freq > policy->cur &&
405 now - icpu->pol_hispeed_val_time < freq_to_above_hispeed_delay(tunables, policy->cur)) {
406 trace_cpufreq_interactive_notyet(cpu, cpu_load,
407 icpu->target_freq, policy->cur, new_freq);
408 goto exit;
409 }
410
411 icpu->loc_hispeed_val_time = now;
412
413 index = cpufreq_frequency_table_target(policy, new_freq,
414 CPUFREQ_RELATION_L);
415 new_freq = freq_table[index].frequency;
416
417 /*
418 * Do not scale below floor_freq unless we have been at or above the
419 * floor frequency for the minimum sample time since last validated.
420 */
421 max_fvtime = max(icpu->pol_floor_val_time, icpu->loc_floor_val_time);
422 if (new_freq < icpu->floor_freq && icpu->target_freq >= policy->cur) {
423 if (now - max_fvtime < tunables->min_sample_time) {
424 trace_cpufreq_interactive_notyet(cpu, cpu_load,
425 icpu->target_freq, policy->cur, new_freq);
426 goto exit;
427 }
428 }
429
430 /*
431 * Update the timestamp for checking whether speed has been held at
432 * or above the selected frequency for a minimum of min_sample_time,
433 * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
434 * allow the speed to drop as soon as the boostpulse duration expires
435 * (or the indefinite boost is turned off).
436 */
437
438 if (!tunables->boosted || new_freq > tunables->hispeed_freq) {
439 icpu->floor_freq = new_freq;
440 if (icpu->target_freq >= policy->cur || new_freq >= policy->cur)
441 icpu->loc_floor_val_time = now;
442 }
443
444 if (icpu->target_freq == new_freq &&
445 icpu->target_freq <= policy->cur) {
446 trace_cpufreq_interactive_already(cpu, cpu_load,
447 icpu->target_freq, policy->cur, new_freq);
448 goto exit;
449 }
450
451 trace_cpufreq_interactive_target(cpu, cpu_load, icpu->target_freq,
452 policy->cur, new_freq);
453
454 icpu->target_freq = new_freq;
455 spin_unlock_irqrestore(&icpu->target_freq_lock, flags);
456
457 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
458 cpumask_set_cpu(cpu, &speedchange_cpumask);
459 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
460
461 wake_up_process(speedchange_task);
462 return;
463
464exit:
465 spin_unlock_irqrestore(&icpu->target_freq_lock, flags);
466}
467
468static void cpufreq_interactive_update(struct interactive_cpu *icpu)
469{
470 eval_target_freq(icpu);
471 slack_timer_resched(icpu, smp_processor_id(), true);
472}
473
Viresh Kumar9dcfc2a2016-05-19 14:30:54 +0530474static void cpufreq_interactive_idle_end(void)
475{
476 struct interactive_cpu *icpu = &per_cpu(interactive_cpu,
477 smp_processor_id());
478
479 if (!down_read_trylock(&icpu->enable_sem))
480 return;
481
482 if (icpu->ipolicy) {
483 /*
484 * We haven't sampled load for more than sampling_rate time, do
485 * it right now.
486 */
487 if (time_after_eq(jiffies, icpu->next_sample_jiffies))
488 cpufreq_interactive_update(icpu);
489 }
490
491 up_read(&icpu->enable_sem);
492}
493
Viresh Kumarc33cc8f2016-05-17 14:41:22 +0530494static void cpufreq_interactive_get_policy_info(struct cpufreq_policy *policy,
495 unsigned int *pmax_freq,
496 u64 *phvt, u64 *pfvt)
497{
498 struct interactive_cpu *icpu;
499 u64 hvt = ~0ULL, fvt = 0;
500 unsigned int max_freq = 0, i;
501
502 for_each_cpu(i, policy->cpus) {
503 icpu = &per_cpu(interactive_cpu, i);
504
505 fvt = max(fvt, icpu->loc_floor_val_time);
506 if (icpu->target_freq > max_freq) {
507 max_freq = icpu->target_freq;
508 hvt = icpu->loc_hispeed_val_time;
509 } else if (icpu->target_freq == max_freq) {
510 hvt = min(hvt, icpu->loc_hispeed_val_time);
511 }
512 }
513
514 *pmax_freq = max_freq;
515 *phvt = hvt;
516 *pfvt = fvt;
517}
518
519static void cpufreq_interactive_adjust_cpu(unsigned int cpu,
520 struct cpufreq_policy *policy)
521{
522 struct interactive_cpu *icpu;
523 u64 hvt, fvt;
524 unsigned int max_freq;
525 int i;
526
527 cpufreq_interactive_get_policy_info(policy, &max_freq, &hvt, &fvt);
528
529 for_each_cpu(i, policy->cpus) {
530 icpu = &per_cpu(interactive_cpu, i);
531 icpu->pol_floor_val_time = fvt;
532 }
533
534 if (max_freq != policy->cur) {
535 __cpufreq_driver_target(policy, max_freq, CPUFREQ_RELATION_H);
536 for_each_cpu(i, policy->cpus) {
537 icpu = &per_cpu(interactive_cpu, i);
538 icpu->pol_hispeed_val_time = hvt;
539 }
540 }
541
542 trace_cpufreq_interactive_setspeed(cpu, max_freq, policy->cur);
543}
544
545static int cpufreq_interactive_speedchange_task(void *data)
546{
547 unsigned int cpu;
548 cpumask_t tmp_mask;
549 unsigned long flags;
550
551again:
552 set_current_state(TASK_INTERRUPTIBLE);
553 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
554
555 if (cpumask_empty(&speedchange_cpumask)) {
556 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
557 schedule();
558
559 if (kthread_should_stop())
560 return 0;
561
562 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
563 }
564
565 set_current_state(TASK_RUNNING);
566 tmp_mask = speedchange_cpumask;
567 cpumask_clear(&speedchange_cpumask);
568 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
569
570 for_each_cpu(cpu, &tmp_mask) {
571 struct interactive_cpu *icpu = &per_cpu(interactive_cpu, cpu);
Zhang Bo68f3fc92017-06-01 14:21:58 +0800572 struct cpufreq_policy *policy;
Viresh Kumarc33cc8f2016-05-17 14:41:22 +0530573
574 if (unlikely(!down_read_trylock(&icpu->enable_sem)))
575 continue;
576
Zhang Bo68f3fc92017-06-01 14:21:58 +0800577 if (likely(icpu->ipolicy)) {
578 policy = icpu->ipolicy->policy;
Viresh Kumarc33cc8f2016-05-17 14:41:22 +0530579 cpufreq_interactive_adjust_cpu(cpu, policy);
Zhang Bo68f3fc92017-06-01 14:21:58 +0800580 }
Viresh Kumarc33cc8f2016-05-17 14:41:22 +0530581
582 up_read(&icpu->enable_sem);
583 }
584
585 goto again;
586}
587
588static void cpufreq_interactive_boost(struct interactive_tunables *tunables)
589{
590 struct interactive_policy *ipolicy;
591 struct cpufreq_policy *policy;
592 struct interactive_cpu *icpu;
593 unsigned long flags[2];
594 bool wakeup = false;
595 int i;
596
597 tunables->boosted = true;
598
599 spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
600
601 for_each_ipolicy(ipolicy) {
602 policy = ipolicy->policy;
603
604 for_each_cpu(i, policy->cpus) {
605 icpu = &per_cpu(interactive_cpu, i);
606
607 if (!down_read_trylock(&icpu->enable_sem))
608 continue;
609
610 if (!icpu->ipolicy) {
611 up_read(&icpu->enable_sem);
612 continue;
613 }
614
615 spin_lock_irqsave(&icpu->target_freq_lock, flags[1]);
616 if (icpu->target_freq < tunables->hispeed_freq) {
617 icpu->target_freq = tunables->hispeed_freq;
618 cpumask_set_cpu(i, &speedchange_cpumask);
619 icpu->pol_hispeed_val_time = ktime_to_us(ktime_get());
620 wakeup = true;
621 }
622 spin_unlock_irqrestore(&icpu->target_freq_lock, flags[1]);
623
624 up_read(&icpu->enable_sem);
625 }
626 }
627
628 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
629
630 if (wakeup)
631 wake_up_process(speedchange_task);
632}
633
634static int cpufreq_interactive_notifier(struct notifier_block *nb,
635 unsigned long val, void *data)
636{
637 struct cpufreq_freqs *freq = data;
638 struct interactive_cpu *icpu = &per_cpu(interactive_cpu, freq->cpu);
639 unsigned long flags;
640
641 if (val != CPUFREQ_POSTCHANGE)
642 return 0;
643
644 if (!down_read_trylock(&icpu->enable_sem))
645 return 0;
646
647 if (!icpu->ipolicy) {
648 up_read(&icpu->enable_sem);
649 return 0;
650 }
651
652 spin_lock_irqsave(&icpu->load_lock, flags);
653 update_load(icpu, freq->cpu);
654 spin_unlock_irqrestore(&icpu->load_lock, flags);
655
656 up_read(&icpu->enable_sem);
657
658 return 0;
659}
660
661static struct notifier_block cpufreq_notifier_block = {
662 .notifier_call = cpufreq_interactive_notifier,
663};
664
665static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
666{
667 const char *cp = buf;
668 int ntokens = 1, i = 0;
669 unsigned int *tokenized_data;
670 int err = -EINVAL;
671
672 while ((cp = strpbrk(cp + 1, " :")))
673 ntokens++;
674
675 if (!(ntokens & 0x1))
676 goto err;
677
678 tokenized_data = kcalloc(ntokens, sizeof(*tokenized_data), GFP_KERNEL);
679 if (!tokenized_data) {
680 err = -ENOMEM;
681 goto err;
682 }
683
684 cp = buf;
685 while (i < ntokens) {
686 if (kstrtouint(cp, 0, &tokenized_data[i++]) < 0)
687 goto err_kfree;
688
689 cp = strpbrk(cp, " :");
690 if (!cp)
691 break;
692 cp++;
693 }
694
695 if (i != ntokens)
696 goto err_kfree;
697
698 *num_tokens = ntokens;
699 return tokenized_data;
700
701err_kfree:
702 kfree(tokenized_data);
703err:
704 return ERR_PTR(err);
705}
706
707/* Interactive governor sysfs interface */
708static struct interactive_tunables *to_tunables(struct gov_attr_set *attr_set)
709{
710 return container_of(attr_set, struct interactive_tunables, attr_set);
711}
712
713#define show_one(file_name, type) \
714static ssize_t show_##file_name(struct gov_attr_set *attr_set, char *buf) \
715{ \
716 struct interactive_tunables *tunables = to_tunables(attr_set); \
717 return sprintf(buf, type "\n", tunables->file_name); \
718}
719
720static ssize_t show_target_loads(struct gov_attr_set *attr_set, char *buf)
721{
722 struct interactive_tunables *tunables = to_tunables(attr_set);
723 unsigned long flags;
724 ssize_t ret = 0;
725 int i;
726
727 spin_lock_irqsave(&tunables->target_loads_lock, flags);
728
729 for (i = 0; i < tunables->ntarget_loads; i++)
730 ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
731 i & 0x1 ? ":" : " ");
732
733 sprintf(buf + ret - 1, "\n");
734 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
735
736 return ret;
737}
738
739static ssize_t store_target_loads(struct gov_attr_set *attr_set,
740 const char *buf, size_t count)
741{
742 struct interactive_tunables *tunables = to_tunables(attr_set);
743 unsigned int *new_target_loads;
744 unsigned long flags;
745 int ntokens;
746
747 new_target_loads = get_tokenized_data(buf, &ntokens);
748 if (IS_ERR(new_target_loads))
749 return PTR_ERR(new_target_loads);
750
751 spin_lock_irqsave(&tunables->target_loads_lock, flags);
752 if (tunables->target_loads != default_target_loads)
753 kfree(tunables->target_loads);
754 tunables->target_loads = new_target_loads;
755 tunables->ntarget_loads = ntokens;
756 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
757
758 return count;
759}
760
761static ssize_t show_above_hispeed_delay(struct gov_attr_set *attr_set,
762 char *buf)
763{
764 struct interactive_tunables *tunables = to_tunables(attr_set);
765 unsigned long flags;
766 ssize_t ret = 0;
767 int i;
768
769 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
770
771 for (i = 0; i < tunables->nabove_hispeed_delay; i++)
772 ret += sprintf(buf + ret, "%u%s",
773 tunables->above_hispeed_delay[i],
774 i & 0x1 ? ":" : " ");
775
776 sprintf(buf + ret - 1, "\n");
777 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
778
779 return ret;
780}
781
782static ssize_t store_above_hispeed_delay(struct gov_attr_set *attr_set,
783 const char *buf, size_t count)
784{
785 struct interactive_tunables *tunables = to_tunables(attr_set);
786 unsigned int *new_above_hispeed_delay = NULL;
787 unsigned long flags;
788 int ntokens;
789
790 new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
791 if (IS_ERR(new_above_hispeed_delay))
792 return PTR_ERR(new_above_hispeed_delay);
793
794 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
795 if (tunables->above_hispeed_delay != default_above_hispeed_delay)
796 kfree(tunables->above_hispeed_delay);
797 tunables->above_hispeed_delay = new_above_hispeed_delay;
798 tunables->nabove_hispeed_delay = ntokens;
799 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
800
801 return count;
802}
803
804static ssize_t store_hispeed_freq(struct gov_attr_set *attr_set,
805 const char *buf, size_t count)
806{
807 struct interactive_tunables *tunables = to_tunables(attr_set);
808 unsigned long int val;
809 int ret;
810
811 ret = kstrtoul(buf, 0, &val);
812 if (ret < 0)
813 return ret;
814
815 tunables->hispeed_freq = val;
816
817 return count;
818}
819
820static ssize_t store_go_hispeed_load(struct gov_attr_set *attr_set,
821 const char *buf, size_t count)
822{
823 struct interactive_tunables *tunables = to_tunables(attr_set);
824 unsigned long val;
825 int ret;
826
827 ret = kstrtoul(buf, 0, &val);
828 if (ret < 0)
829 return ret;
830
831 tunables->go_hispeed_load = val;
832
833 return count;
834}
835
836static ssize_t store_min_sample_time(struct gov_attr_set *attr_set,
837 const char *buf, size_t count)
838{
839 struct interactive_tunables *tunables = to_tunables(attr_set);
840 unsigned long val;
841 int ret;
842
843 ret = kstrtoul(buf, 0, &val);
844 if (ret < 0)
845 return ret;
846
847 tunables->min_sample_time = val;
848
849 return count;
850}
851
852static ssize_t show_timer_rate(struct gov_attr_set *attr_set, char *buf)
853{
854 struct interactive_tunables *tunables = to_tunables(attr_set);
855
856 return sprintf(buf, "%lu\n", tunables->sampling_rate);
857}
858
859static ssize_t store_timer_rate(struct gov_attr_set *attr_set, const char *buf,
860 size_t count)
861{
862 struct interactive_tunables *tunables = to_tunables(attr_set);
863 unsigned long val, val_round;
864 int ret;
865
866 ret = kstrtoul(buf, 0, &val);
867 if (ret < 0)
868 return ret;
869
870 val_round = jiffies_to_usecs(usecs_to_jiffies(val));
871 if (val != val_round)
872 pr_warn("timer_rate not aligned to jiffy. Rounded up to %lu\n",
873 val_round);
874
875 tunables->sampling_rate = val_round;
876
877 return count;
878}
879
880static ssize_t store_timer_slack(struct gov_attr_set *attr_set, const char *buf,
881 size_t count)
882{
883 struct interactive_tunables *tunables = to_tunables(attr_set);
884 unsigned long val;
885 int ret;
886
887 ret = kstrtol(buf, 10, &val);
888 if (ret < 0)
889 return ret;
890
891 tunables->timer_slack = val;
892 update_slack_delay(tunables);
893
894 return count;
895}
896
897static ssize_t store_boost(struct gov_attr_set *attr_set, const char *buf,
898 size_t count)
899{
900 struct interactive_tunables *tunables = to_tunables(attr_set);
901 unsigned long val;
902 int ret;
903
904 ret = kstrtoul(buf, 0, &val);
905 if (ret < 0)
906 return ret;
907
908 tunables->boost = val;
909
910 if (tunables->boost) {
911 trace_cpufreq_interactive_boost("on");
912 if (!tunables->boosted)
913 cpufreq_interactive_boost(tunables);
914 } else {
915 tunables->boostpulse_endtime = ktime_to_us(ktime_get());
916 trace_cpufreq_interactive_unboost("off");
917 }
918
919 return count;
920}
921
922static ssize_t store_boostpulse(struct gov_attr_set *attr_set, const char *buf,
923 size_t count)
924{
925 struct interactive_tunables *tunables = to_tunables(attr_set);
926 unsigned long val;
927 int ret;
928
929 ret = kstrtoul(buf, 0, &val);
930 if (ret < 0)
931 return ret;
932
933 tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
934 tunables->boostpulse_duration;
935 trace_cpufreq_interactive_boost("pulse");
936 if (!tunables->boosted)
937 cpufreq_interactive_boost(tunables);
938
939 return count;
940}
941
942static ssize_t store_boostpulse_duration(struct gov_attr_set *attr_set,
943 const char *buf, size_t count)
944{
945 struct interactive_tunables *tunables = to_tunables(attr_set);
946 unsigned long val;
947 int ret;
948
949 ret = kstrtoul(buf, 0, &val);
950 if (ret < 0)
951 return ret;
952
953 tunables->boostpulse_duration = val;
954
955 return count;
956}
957
958static ssize_t store_io_is_busy(struct gov_attr_set *attr_set, const char *buf,
959 size_t count)
960{
961 struct interactive_tunables *tunables = to_tunables(attr_set);
962 unsigned long val;
963 int ret;
964
965 ret = kstrtoul(buf, 0, &val);
966 if (ret < 0)
967 return ret;
968
969 tunables->io_is_busy = val;
970
971 return count;
972}
973
974show_one(hispeed_freq, "%u");
975show_one(go_hispeed_load, "%lu");
976show_one(min_sample_time, "%lu");
977show_one(timer_slack, "%lu");
978show_one(boost, "%u");
979show_one(boostpulse_duration, "%u");
980show_one(io_is_busy, "%u");
981
982gov_attr_rw(target_loads);
983gov_attr_rw(above_hispeed_delay);
984gov_attr_rw(hispeed_freq);
985gov_attr_rw(go_hispeed_load);
986gov_attr_rw(min_sample_time);
987gov_attr_rw(timer_rate);
988gov_attr_rw(timer_slack);
989gov_attr_rw(boost);
990gov_attr_wo(boostpulse);
991gov_attr_rw(boostpulse_duration);
992gov_attr_rw(io_is_busy);
993
994static struct attribute *interactive_attributes[] = {
995 &target_loads.attr,
996 &above_hispeed_delay.attr,
997 &hispeed_freq.attr,
998 &go_hispeed_load.attr,
999 &min_sample_time.attr,
1000 &timer_rate.attr,
1001 &timer_slack.attr,
1002 &boost.attr,
1003 &boostpulse.attr,
1004 &boostpulse_duration.attr,
1005 &io_is_busy.attr,
1006 NULL
1007};
1008
1009static struct kobj_type interactive_tunables_ktype = {
1010 .default_attrs = interactive_attributes,
1011 .sysfs_ops = &governor_sysfs_ops,
1012};
1013
Viresh Kumar9dcfc2a2016-05-19 14:30:54 +05301014static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
1015 unsigned long val, void *data)
1016{
1017 if (val == IDLE_END)
1018 cpufreq_interactive_idle_end();
1019
1020 return 0;
1021}
1022
1023static struct notifier_block cpufreq_interactive_idle_nb = {
1024 .notifier_call = cpufreq_interactive_idle_notifier,
1025};
1026
Viresh Kumarc33cc8f2016-05-17 14:41:22 +05301027/* Interactive Governor callbacks */
1028struct interactive_governor {
1029 struct cpufreq_governor gov;
1030 unsigned int usage_count;
1031};
1032
1033static struct interactive_governor interactive_gov;
1034
1035#define CPU_FREQ_GOV_INTERACTIVE (&interactive_gov.gov)
1036
1037static void irq_work(struct irq_work *irq_work)
1038{
1039 struct interactive_cpu *icpu = container_of(irq_work, struct
1040 interactive_cpu, irq_work);
1041
1042 cpufreq_interactive_update(icpu);
1043 icpu->work_in_progress = false;
1044}
1045
1046static void update_util_handler(struct update_util_data *data, u64 time,
1047 unsigned int flags)
1048{
1049 struct interactive_cpu *icpu = container_of(data,
1050 struct interactive_cpu, update_util);
1051 struct interactive_policy *ipolicy = icpu->ipolicy;
1052 struct interactive_tunables *tunables = ipolicy->tunables;
1053 u64 delta_ns;
1054
1055 /*
1056 * The irq-work may not be allowed to be queued up right now.
1057 * Possible reasons:
1058 * - Work has already been queued up or is in progress.
1059 * - It is too early (too little time from the previous sample).
1060 */
1061 if (icpu->work_in_progress)
1062 return;
1063
1064 delta_ns = time - icpu->last_sample_time;
1065 if ((s64)delta_ns < tunables->sampling_rate * NSEC_PER_USEC)
1066 return;
1067
1068 icpu->last_sample_time = time;
Viresh Kumar9dcfc2a2016-05-19 14:30:54 +05301069 icpu->next_sample_jiffies = usecs_to_jiffies(tunables->sampling_rate) +
1070 jiffies;
Viresh Kumarc33cc8f2016-05-17 14:41:22 +05301071
1072 icpu->work_in_progress = true;
1073 irq_work_queue(&icpu->irq_work);
1074}
1075
1076static void gov_set_update_util(struct interactive_policy *ipolicy)
1077{
1078 struct cpufreq_policy *policy = ipolicy->policy;
1079 struct interactive_cpu *icpu;
1080 int cpu;
1081
1082 for_each_cpu(cpu, policy->cpus) {
1083 icpu = &per_cpu(interactive_cpu, cpu);
1084
1085 icpu->last_sample_time = 0;
Viresh Kumar9dcfc2a2016-05-19 14:30:54 +05301086 icpu->next_sample_jiffies = 0;
Viresh Kumarc33cc8f2016-05-17 14:41:22 +05301087 cpufreq_add_update_util_hook(cpu, &icpu->update_util,
1088 update_util_handler);
1089 }
1090}
1091
1092static inline void gov_clear_update_util(struct cpufreq_policy *policy)
1093{
1094 int i;
1095
1096 for_each_cpu(i, policy->cpus)
1097 cpufreq_remove_update_util_hook(i);
1098
1099 synchronize_sched();
1100}
1101
1102static void icpu_cancel_work(struct interactive_cpu *icpu)
1103{
1104 irq_work_sync(&icpu->irq_work);
1105 icpu->work_in_progress = false;
1106 del_timer_sync(&icpu->slack_timer);
1107}
1108
1109static struct interactive_policy *
1110interactive_policy_alloc(struct cpufreq_policy *policy)
1111{
1112 struct interactive_policy *ipolicy;
1113
1114 ipolicy = kzalloc(sizeof(*ipolicy), GFP_KERNEL);
1115 if (!ipolicy)
1116 return NULL;
1117
1118 ipolicy->policy = policy;
1119
1120 return ipolicy;
1121}
1122
1123static void interactive_policy_free(struct interactive_policy *ipolicy)
1124{
1125 kfree(ipolicy);
1126}
1127
1128static struct interactive_tunables *
1129interactive_tunables_alloc(struct interactive_policy *ipolicy)
1130{
1131 struct interactive_tunables *tunables;
1132
1133 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
1134 if (!tunables)
1135 return NULL;
1136
1137 gov_attr_set_init(&tunables->attr_set, &ipolicy->tunables_hook);
1138 if (!have_governor_per_policy())
1139 global_tunables = tunables;
1140
1141 ipolicy->tunables = tunables;
1142
1143 return tunables;
1144}
1145
1146static void interactive_tunables_free(struct interactive_tunables *tunables)
1147{
1148 if (!have_governor_per_policy())
1149 global_tunables = NULL;
1150
1151 kfree(tunables);
1152}
1153
1154int cpufreq_interactive_init(struct cpufreq_policy *policy)
1155{
1156 struct interactive_policy *ipolicy;
1157 struct interactive_tunables *tunables;
1158 int ret;
1159
1160 /* State should be equivalent to EXIT */
1161 if (policy->governor_data)
1162 return -EBUSY;
1163
1164 ipolicy = interactive_policy_alloc(policy);
1165 if (!ipolicy)
1166 return -ENOMEM;
1167
1168 mutex_lock(&global_tunables_lock);
1169
1170 if (global_tunables) {
1171 if (WARN_ON(have_governor_per_policy())) {
1172 ret = -EINVAL;
1173 goto free_int_policy;
1174 }
1175
1176 policy->governor_data = ipolicy;
1177 ipolicy->tunables = global_tunables;
1178
1179 gov_attr_set_get(&global_tunables->attr_set,
1180 &ipolicy->tunables_hook);
1181 goto out;
1182 }
1183
1184 tunables = interactive_tunables_alloc(ipolicy);
1185 if (!tunables) {
1186 ret = -ENOMEM;
1187 goto free_int_policy;
1188 }
1189
1190 tunables->hispeed_freq = policy->max;
1191 tunables->above_hispeed_delay = default_above_hispeed_delay;
1192 tunables->nabove_hispeed_delay =
1193 ARRAY_SIZE(default_above_hispeed_delay);
1194 tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
1195 tunables->target_loads = default_target_loads;
1196 tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
1197 tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
1198 tunables->boostpulse_duration = DEFAULT_MIN_SAMPLE_TIME;
1199 tunables->sampling_rate = DEFAULT_SAMPLING_RATE;
1200 tunables->timer_slack = DEFAULT_TIMER_SLACK;
1201 update_slack_delay(tunables);
1202
1203 spin_lock_init(&tunables->target_loads_lock);
1204 spin_lock_init(&tunables->above_hispeed_delay_lock);
1205
1206 policy->governor_data = ipolicy;
1207
1208 ret = kobject_init_and_add(&tunables->attr_set.kobj,
1209 &interactive_tunables_ktype,
1210 get_governor_parent_kobj(policy), "%s",
1211 interactive_gov.gov.name);
1212 if (ret)
1213 goto fail;
1214
1215 /* One time initialization for governor */
1216 if (!interactive_gov.usage_count++) {
Viresh Kumar9dcfc2a2016-05-19 14:30:54 +05301217 idle_notifier_register(&cpufreq_interactive_idle_nb);
Viresh Kumarc33cc8f2016-05-17 14:41:22 +05301218 cpufreq_register_notifier(&cpufreq_notifier_block,
1219 CPUFREQ_TRANSITION_NOTIFIER);
1220 }
1221
1222 out:
1223 mutex_unlock(&global_tunables_lock);
1224 return 0;
1225
1226 fail:
1227 policy->governor_data = NULL;
1228 interactive_tunables_free(tunables);
1229
1230 free_int_policy:
1231 mutex_unlock(&global_tunables_lock);
1232
1233 interactive_policy_free(ipolicy);
1234 pr_err("governor initialization failed (%d)\n", ret);
1235
1236 return ret;
1237}
1238
1239void cpufreq_interactive_exit(struct cpufreq_policy *policy)
1240{
1241 struct interactive_policy *ipolicy = policy->governor_data;
1242 struct interactive_tunables *tunables = ipolicy->tunables;
1243 unsigned int count;
1244
1245 mutex_lock(&global_tunables_lock);
1246
1247 /* Last policy using the governor ? */
1248 if (!--interactive_gov.usage_count) {
1249 cpufreq_unregister_notifier(&cpufreq_notifier_block,
1250 CPUFREQ_TRANSITION_NOTIFIER);
Viresh Kumar9dcfc2a2016-05-19 14:30:54 +05301251 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
Viresh Kumarc33cc8f2016-05-17 14:41:22 +05301252 }
1253
1254 count = gov_attr_set_put(&tunables->attr_set, &ipolicy->tunables_hook);
1255 policy->governor_data = NULL;
1256 if (!count)
1257 interactive_tunables_free(tunables);
1258
1259 mutex_unlock(&global_tunables_lock);
1260
1261 interactive_policy_free(ipolicy);
1262}
1263
1264int cpufreq_interactive_start(struct cpufreq_policy *policy)
1265{
1266 struct interactive_policy *ipolicy = policy->governor_data;
1267 struct interactive_cpu *icpu;
1268 unsigned int cpu;
1269
1270 for_each_cpu(cpu, policy->cpus) {
1271 icpu = &per_cpu(interactive_cpu, cpu);
1272
1273 icpu->target_freq = policy->cur;
1274 icpu->floor_freq = icpu->target_freq;
1275 icpu->pol_floor_val_time = ktime_to_us(ktime_get());
1276 icpu->loc_floor_val_time = icpu->pol_floor_val_time;
1277 icpu->pol_hispeed_val_time = icpu->pol_floor_val_time;
1278 icpu->loc_hispeed_val_time = icpu->pol_floor_val_time;
1279
1280 down_write(&icpu->enable_sem);
1281 icpu->ipolicy = ipolicy;
1282 up_write(&icpu->enable_sem);
1283
1284 slack_timer_resched(icpu, cpu, false);
1285 }
1286
1287 gov_set_update_util(ipolicy);
1288 return 0;
1289}
1290
1291void cpufreq_interactive_stop(struct cpufreq_policy *policy)
1292{
1293 struct interactive_policy *ipolicy = policy->governor_data;
1294 struct interactive_cpu *icpu;
1295 unsigned int cpu;
1296
1297 gov_clear_update_util(ipolicy->policy);
1298
1299 for_each_cpu(cpu, policy->cpus) {
1300 icpu = &per_cpu(interactive_cpu, cpu);
1301
1302 icpu_cancel_work(icpu);
1303
1304 down_write(&icpu->enable_sem);
1305 icpu->ipolicy = NULL;
1306 up_write(&icpu->enable_sem);
1307 }
1308}
1309
1310void cpufreq_interactive_limits(struct cpufreq_policy *policy)
1311{
1312 struct interactive_cpu *icpu;
1313 unsigned int cpu;
1314 unsigned long flags;
1315
1316 cpufreq_policy_apply_limits(policy);
1317
1318 for_each_cpu(cpu, policy->cpus) {
1319 icpu = &per_cpu(interactive_cpu, cpu);
1320
1321 spin_lock_irqsave(&icpu->target_freq_lock, flags);
1322
1323 if (policy->max < icpu->target_freq)
1324 icpu->target_freq = policy->max;
1325 else if (policy->min > icpu->target_freq)
1326 icpu->target_freq = policy->min;
1327
1328 spin_unlock_irqrestore(&icpu->target_freq_lock, flags);
1329 }
1330}
1331
1332static struct interactive_governor interactive_gov = {
1333 .gov = {
1334 .name = "interactive",
1335 .max_transition_latency = TRANSITION_LATENCY_LIMIT,
1336 .owner = THIS_MODULE,
1337 .init = cpufreq_interactive_init,
1338 .exit = cpufreq_interactive_exit,
1339 .start = cpufreq_interactive_start,
1340 .stop = cpufreq_interactive_stop,
1341 .limits = cpufreq_interactive_limits,
1342 }
1343};
1344
1345static void cpufreq_interactive_nop_timer(unsigned long data)
1346{
1347 /*
1348 * The purpose of slack-timer is to wake up the CPU from IDLE, in order
1349 * to decrease its frequency if it is not set to minimum already.
1350 *
1351 * This is important for platforms where CPU with higher frequencies
1352 * consume higher power even at IDLE.
1353 */
1354}
1355
1356static int __init cpufreq_interactive_gov_init(void)
1357{
1358 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
1359 struct interactive_cpu *icpu;
1360 unsigned int cpu;
1361
1362 for_each_possible_cpu(cpu) {
1363 icpu = &per_cpu(interactive_cpu, cpu);
1364
1365 init_irq_work(&icpu->irq_work, irq_work);
1366 spin_lock_init(&icpu->load_lock);
1367 spin_lock_init(&icpu->target_freq_lock);
1368 init_rwsem(&icpu->enable_sem);
1369
1370 /* Initialize per-cpu slack-timer */
1371 init_timer_pinned(&icpu->slack_timer);
1372 icpu->slack_timer.function = cpufreq_interactive_nop_timer;
1373 }
1374
1375 spin_lock_init(&speedchange_cpumask_lock);
1376 speedchange_task = kthread_create(cpufreq_interactive_speedchange_task,
1377 NULL, "cfinteractive");
1378 if (IS_ERR(speedchange_task))
1379 return PTR_ERR(speedchange_task);
1380
1381 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
1382 get_task_struct(speedchange_task);
1383
1384 /* wake up so the thread does not look hung to the freezer */
1385 wake_up_process(speedchange_task);
1386
1387 return cpufreq_register_governor(CPU_FREQ_GOV_INTERACTIVE);
1388}
1389
1390#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1391struct cpufreq_governor *cpufreq_default_governor(void)
1392{
1393 return CPU_FREQ_GOV_INTERACTIVE;
1394}
1395
1396fs_initcall(cpufreq_interactive_gov_init);
1397#else
1398module_init(cpufreq_interactive_gov_init);
1399#endif
1400
1401static void __exit cpufreq_interactive_gov_exit(void)
1402{
1403 cpufreq_unregister_governor(CPU_FREQ_GOV_INTERACTIVE);
1404 kthread_stop(speedchange_task);
1405 put_task_struct(speedchange_task);
1406}
1407module_exit(cpufreq_interactive_gov_exit);
1408
1409MODULE_AUTHOR("Mike Chan <mike@android.com>");
1410MODULE_DESCRIPTION("'cpufreq_interactive' - A dynamic cpufreq governor for Latency sensitive workloads");
1411MODULE_LICENSE("GPL");