blob: 71887cabfadbbd2d063e6b0222a2a7de7ab0e591 [file] [log] [blame]
Mike Chanef969692010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/module.h>
Lianwei Wangd72db422012-11-01 09:59:52 +080023#include <linux/moduleparam.h>
Mike Chanef969692010-06-22 11:26:45 -070024#include <linux/mutex.h>
25#include <linux/sched.h>
26#include <linux/sched/rt.h>
27#include <linux/tick.h>
28#include <linux/time.h>
29#include <linux/timer.h>
30#include <linux/workqueue.h>
31#include <linux/kthread.h>
32#include <linux/mutex.h>
Todd Poynorab8dc402012-04-02 17:17:14 -070033#include <linux/slab.h>
Todd Poynor15a9ea02012-04-23 20:42:41 -070034#include <asm/cputime.h>
Mike Chanef969692010-06-22 11:26:45 -070035
Todd Poynorae010472012-02-16 16:27:59 -080036#define CREATE_TRACE_POINTS
37#include <trace/events/cpufreq_interactive.h>
38
Mike Chanef969692010-06-22 11:26:45 -070039static atomic_t active_count = ATOMIC_INIT(0);
40
41struct cpufreq_interactive_cpuinfo {
42 struct timer_list cpu_timer;
43 int timer_idlecancel;
44 u64 time_in_idle;
45 u64 idle_exit_time;
Todd Poynor1f408dc2012-04-06 19:59:36 -070046 u64 target_set_time;
47 u64 target_set_time_in_idle;
Mike Chanef969692010-06-22 11:26:45 -070048 struct cpufreq_policy *policy;
49 struct cpufreq_frequency_table *freq_table;
50 unsigned int target_freq;
Todd Poynor6d15fa32012-04-26 21:41:40 -070051 unsigned int floor_freq;
52 u64 floor_validate_time;
Todd Poynor1a0389a2012-05-10 23:28:06 -070053 u64 hispeed_validate_time;
Mike Chanef969692010-06-22 11:26:45 -070054 int governor_enabled;
55};
56
57static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
58
Todd Poynor0f1920b2012-07-16 17:07:15 -070059/* realtime thread handles frequency scaling */
60static struct task_struct *speedchange_task;
61static cpumask_t speedchange_cpumask;
62static spinlock_t speedchange_cpumask_lock;
Mike Chanef969692010-06-22 11:26:45 -070063
64/* Hi speed to bump to from lo speed when load burst (default max) */
Todd Poynor3b7b5f82012-10-03 00:39:56 -070065static unsigned int hispeed_freq;
Mike Chanef969692010-06-22 11:26:45 -070066
67/* Go to hi speed when CPU load at or above this value. */
Todd Poynora380aa82012-04-17 17:39:34 -070068#define DEFAULT_GO_HISPEED_LOAD 85
Mike Chanef969692010-06-22 11:26:45 -070069static unsigned long go_hispeed_load;
70
71/*
72 * The minimum amount of time to spend at a frequency before we can ramp down.
73 */
Todd Poynora380aa82012-04-17 17:39:34 -070074#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
Mike Chanef969692010-06-22 11:26:45 -070075static unsigned long min_sample_time;
76
77/*
78 * The sample rate of the timer used to increase frequency
79 */
Todd Poynora380aa82012-04-17 17:39:34 -070080#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
Mike Chanef969692010-06-22 11:26:45 -070081static unsigned long timer_rate;
82
Todd Poynorcbbe17d2012-04-13 20:18:02 -070083/*
84 * Wait this long before raising speed above hispeed, by default a single
85 * timer interval.
86 */
87#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
88static unsigned long above_hispeed_delay_val;
89
Todd Poynorab8dc402012-04-02 17:17:14 -070090/*
Todd Poynor15a9ea02012-04-23 20:42:41 -070091 * Non-zero means longer-term speed boost active.
92 */
93
94static int boost_val;
95
Lianwei Wangd72db422012-11-01 09:59:52 +080096static bool governidle;
97module_param(governidle, bool, S_IWUSR | S_IRUGO);
98MODULE_PARM_DESC(governidle,
99 "Set to 1 to wake up CPUs from idle to reduce speed (default 0)");
100
Mike Chanef969692010-06-22 11:26:45 -0700101static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
102 unsigned int event);
103
104#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
105static
106#endif
107struct cpufreq_governor cpufreq_gov_interactive = {
108 .name = "interactive",
109 .governor = cpufreq_governor_interactive,
110 .max_transition_latency = 10000000,
111 .owner = THIS_MODULE,
112};
113
114static void cpufreq_interactive_timer(unsigned long data)
115{
Todd Poynore7afb7e2012-11-05 13:09:03 -0800116 u64 now;
Mike Chanef969692010-06-22 11:26:45 -0700117 unsigned int delta_idle;
118 unsigned int delta_time;
119 int cpu_load;
120 int load_since_change;
121 u64 time_in_idle;
122 u64 idle_exit_time;
123 struct cpufreq_interactive_cpuinfo *pcpu =
124 &per_cpu(cpuinfo, data);
125 u64 now_idle;
126 unsigned int new_freq;
127 unsigned int index;
128 unsigned long flags;
129
130 smp_rmb();
131
132 if (!pcpu->governor_enabled)
133 goto exit;
134
Mike Chanef969692010-06-22 11:26:45 -0700135 time_in_idle = pcpu->time_in_idle;
136 idle_exit_time = pcpu->idle_exit_time;
Todd Poynore7afb7e2012-11-05 13:09:03 -0800137 now_idle = get_cpu_idle_time_us(data, &now);
Mike Chanef969692010-06-22 11:26:45 -0700138 delta_idle = (unsigned int)(now_idle - time_in_idle);
Todd Poynore7afb7e2012-11-05 13:09:03 -0800139 delta_time = (unsigned int)(now - idle_exit_time);
Mike Chanef969692010-06-22 11:26:45 -0700140
141 /*
142 * If timer ran less than 1ms after short-term sample started, retry.
143 */
144 if (delta_time < 1000)
145 goto rearm;
146
147 if (delta_idle > delta_time)
148 cpu_load = 0;
149 else
150 cpu_load = 100 * (delta_time - delta_idle) / delta_time;
151
Todd Poynor1f408dc2012-04-06 19:59:36 -0700152 delta_idle = (unsigned int)(now_idle - pcpu->target_set_time_in_idle);
Todd Poynore7afb7e2012-11-05 13:09:03 -0800153 delta_time = (unsigned int)(now - pcpu->target_set_time);
Mike Chanef969692010-06-22 11:26:45 -0700154
155 if ((delta_time == 0) || (delta_idle > delta_time))
156 load_since_change = 0;
157 else
158 load_since_change =
159 100 * (delta_time - delta_idle) / delta_time;
160
161 /*
162 * Choose greater of short-term load (since last idle timer
163 * started or timer function re-armed itself) or long-term load
164 * (since last frequency change).
165 */
166 if (load_since_change > cpu_load)
167 cpu_load = load_since_change;
168
Todd Poynor15a9ea02012-04-23 20:42:41 -0700169 if (cpu_load >= go_hispeed_load || boost_val) {
Todd Poynor762d62a2012-09-24 18:03:58 -0700170 if (pcpu->target_freq < hispeed_freq &&
171 hispeed_freq < pcpu->policy->max) {
Mike Chanef969692010-06-22 11:26:45 -0700172 new_freq = hispeed_freq;
Todd Poynorf87b9d52012-04-06 19:50:12 -0700173 } else {
Mike Chanef969692010-06-22 11:26:45 -0700174 new_freq = pcpu->policy->max * cpu_load / 100;
Todd Poynorf87b9d52012-04-06 19:50:12 -0700175
176 if (new_freq < hispeed_freq)
177 new_freq = hispeed_freq;
Todd Poynorcbbe17d2012-04-13 20:18:02 -0700178
179 if (pcpu->target_freq == hispeed_freq &&
180 new_freq > hispeed_freq &&
Todd Poynore7afb7e2012-11-05 13:09:03 -0800181 now - pcpu->hispeed_validate_time
Todd Poynorcbbe17d2012-04-13 20:18:02 -0700182 < above_hispeed_delay_val) {
183 trace_cpufreq_interactive_notyet(data, cpu_load,
184 pcpu->target_freq,
185 new_freq);
186 goto rearm;
187 }
Todd Poynorf87b9d52012-04-06 19:50:12 -0700188 }
Mike Chanef969692010-06-22 11:26:45 -0700189 } else {
Todd Poynor3b7b5f82012-10-03 00:39:56 -0700190 new_freq = hispeed_freq * cpu_load / 100;
Mike Chanef969692010-06-22 11:26:45 -0700191 }
192
Todd Poynor1a0389a2012-05-10 23:28:06 -0700193 if (new_freq <= hispeed_freq)
Todd Poynore7afb7e2012-11-05 13:09:03 -0800194 pcpu->hispeed_validate_time = now;
Todd Poynor1a0389a2012-05-10 23:28:06 -0700195
Mike Chanef969692010-06-22 11:26:45 -0700196 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
197 new_freq, CPUFREQ_RELATION_H,
198 &index)) {
199 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
200 (int) data);
201 goto rearm;
202 }
203
204 new_freq = pcpu->freq_table[index].frequency;
205
Mike Chanef969692010-06-22 11:26:45 -0700206 /*
Todd Poynor6d15fa32012-04-26 21:41:40 -0700207 * Do not scale below floor_freq unless we have been at or above the
208 * floor frequency for the minimum sample time since last validated.
Mike Chanef969692010-06-22 11:26:45 -0700209 */
Todd Poynor6d15fa32012-04-26 21:41:40 -0700210 if (new_freq < pcpu->floor_freq) {
Todd Poynore7afb7e2012-11-05 13:09:03 -0800211 if (now - pcpu->floor_validate_time < min_sample_time) {
Todd Poynorae010472012-02-16 16:27:59 -0800212 trace_cpufreq_interactive_notyet(data, cpu_load,
213 pcpu->target_freq, new_freq);
Mike Chanef969692010-06-22 11:26:45 -0700214 goto rearm;
Todd Poynorae010472012-02-16 16:27:59 -0800215 }
Mike Chanef969692010-06-22 11:26:45 -0700216 }
217
Todd Poynor6d15fa32012-04-26 21:41:40 -0700218 pcpu->floor_freq = new_freq;
Todd Poynore7afb7e2012-11-05 13:09:03 -0800219 pcpu->floor_validate_time = now;
Todd Poynor1f408dc2012-04-06 19:59:36 -0700220
221 if (pcpu->target_freq == new_freq) {
222 trace_cpufreq_interactive_already(data, cpu_load,
223 pcpu->target_freq, new_freq);
224 goto rearm_if_notmax;
225 }
226
Todd Poynorae010472012-02-16 16:27:59 -0800227 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
228 new_freq);
Todd Poynor8a833f12012-04-20 13:18:32 -0700229 pcpu->target_set_time_in_idle = now_idle;
Todd Poynore7afb7e2012-11-05 13:09:03 -0800230 pcpu->target_set_time = now;
Todd Poynorae010472012-02-16 16:27:59 -0800231
Todd Poynor0f1920b2012-07-16 17:07:15 -0700232 pcpu->target_freq = new_freq;
233 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
234 cpumask_set_cpu(data, &speedchange_cpumask);
235 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
236 wake_up_process(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -0700237
238rearm_if_notmax:
239 /*
240 * Already set max speed and don't see a need to change that,
241 * wait until next idle to re-evaluate, don't need timer.
242 */
243 if (pcpu->target_freq == pcpu->policy->max)
244 goto exit;
245
246rearm:
247 if (!timer_pending(&pcpu->cpu_timer)) {
248 /*
Lianwei Wangd72db422012-11-01 09:59:52 +0800249 * If governing speed in idle and already at min, cancel the
250 * timer if that CPU goes idle. We don't need to re-evaluate
251 * speed until the next idle exit.
Mike Chanef969692010-06-22 11:26:45 -0700252 */
Lianwei Wangd72db422012-11-01 09:59:52 +0800253 if (governidle && pcpu->target_freq == pcpu->policy->min)
Mike Chanef969692010-06-22 11:26:45 -0700254 pcpu->timer_idlecancel = 1;
Mike Chanef969692010-06-22 11:26:45 -0700255
256 pcpu->time_in_idle = get_cpu_idle_time_us(
257 data, &pcpu->idle_exit_time);
Todd Poynore7afb7e2012-11-05 13:09:03 -0800258 mod_timer_pinned(&pcpu->cpu_timer,
259 jiffies + usecs_to_jiffies(timer_rate));
Mike Chanef969692010-06-22 11:26:45 -0700260 }
261
262exit:
263 return;
264}
265
266static void cpufreq_interactive_idle_start(void)
267{
268 struct cpufreq_interactive_cpuinfo *pcpu =
269 &per_cpu(cpuinfo, smp_processor_id());
270 int pending;
271
272 if (!pcpu->governor_enabled)
273 return;
274
Mike Chanef969692010-06-22 11:26:45 -0700275 pending = timer_pending(&pcpu->cpu_timer);
276
277 if (pcpu->target_freq != pcpu->policy->min) {
Mike Chanef969692010-06-22 11:26:45 -0700278 /*
279 * Entering idle while not at lowest speed. On some
280 * platforms this can hold the other CPU(s) at that speed
281 * even though the CPU is idle. Set a timer to re-evaluate
282 * speed so this idle CPU doesn't hold the other CPUs above
283 * min indefinitely. This should probably be a quirk of
284 * the CPUFreq driver.
285 */
286 if (!pending) {
287 pcpu->time_in_idle = get_cpu_idle_time_us(
288 smp_processor_id(), &pcpu->idle_exit_time);
289 pcpu->timer_idlecancel = 0;
Todd Poynore7afb7e2012-11-05 13:09:03 -0800290 mod_timer_pinned(
291 &pcpu->cpu_timer,
292 jiffies + usecs_to_jiffies(timer_rate));
Mike Chanef969692010-06-22 11:26:45 -0700293 }
Lianwei Wangd72db422012-11-01 09:59:52 +0800294 } else if (governidle) {
Mike Chanef969692010-06-22 11:26:45 -0700295 /*
296 * If at min speed and entering idle after load has
297 * already been evaluated, and a timer has been set just in
298 * case the CPU suddenly goes busy, cancel that timer. The
299 * CPU didn't go busy; we'll recheck things upon idle exit.
300 */
301 if (pending && pcpu->timer_idlecancel) {
302 del_timer(&pcpu->cpu_timer);
Mike Chanef969692010-06-22 11:26:45 -0700303 pcpu->timer_idlecancel = 0;
304 }
305 }
306
307}
308
309static void cpufreq_interactive_idle_end(void)
310{
311 struct cpufreq_interactive_cpuinfo *pcpu =
312 &per_cpu(cpuinfo, smp_processor_id());
313
Sam Leffler3ab7c2b2012-06-27 10:12:04 -0700314 if (!pcpu->governor_enabled)
315 return;
316
Todd Poynore7afb7e2012-11-05 13:09:03 -0800317 /* Arm the timer for 1-2 ticks later if not already. */
318 if (!timer_pending(&pcpu->cpu_timer)) {
Mike Chanef969692010-06-22 11:26:45 -0700319 pcpu->time_in_idle =
320 get_cpu_idle_time_us(smp_processor_id(),
321 &pcpu->idle_exit_time);
322 pcpu->timer_idlecancel = 0;
Todd Poynore7afb7e2012-11-05 13:09:03 -0800323 mod_timer_pinned(
324 &pcpu->cpu_timer,
325 jiffies + usecs_to_jiffies(timer_rate));
Mike Chanef969692010-06-22 11:26:45 -0700326 }
327
328}
329
Todd Poynor0f1920b2012-07-16 17:07:15 -0700330static int cpufreq_interactive_speedchange_task(void *data)
Mike Chanef969692010-06-22 11:26:45 -0700331{
332 unsigned int cpu;
333 cpumask_t tmp_mask;
334 unsigned long flags;
335 struct cpufreq_interactive_cpuinfo *pcpu;
336
337 while (1) {
338 set_current_state(TASK_INTERRUPTIBLE);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700339 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700340
Todd Poynor0f1920b2012-07-16 17:07:15 -0700341 if (cpumask_empty(&speedchange_cpumask)) {
342 spin_unlock_irqrestore(&speedchange_cpumask_lock,
343 flags);
Mike Chanef969692010-06-22 11:26:45 -0700344 schedule();
345
346 if (kthread_should_stop())
347 break;
348
Todd Poynor0f1920b2012-07-16 17:07:15 -0700349 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700350 }
351
352 set_current_state(TASK_RUNNING);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700353 tmp_mask = speedchange_cpumask;
354 cpumask_clear(&speedchange_cpumask);
355 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700356
357 for_each_cpu(cpu, &tmp_mask) {
358 unsigned int j;
359 unsigned int max_freq = 0;
360
361 pcpu = &per_cpu(cpuinfo, cpu);
362 smp_rmb();
363
364 if (!pcpu->governor_enabled)
365 continue;
366
Mike Chanef969692010-06-22 11:26:45 -0700367 for_each_cpu(j, pcpu->policy->cpus) {
368 struct cpufreq_interactive_cpuinfo *pjcpu =
369 &per_cpu(cpuinfo, j);
370
371 if (pjcpu->target_freq > max_freq)
372 max_freq = pjcpu->target_freq;
373 }
374
375 if (max_freq != pcpu->policy->cur)
376 __cpufreq_driver_target(pcpu->policy,
377 max_freq,
378 CPUFREQ_RELATION_H);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700379 trace_cpufreq_interactive_setspeed(cpu,
380 pcpu->target_freq,
Todd Poynorae010472012-02-16 16:27:59 -0800381 pcpu->policy->cur);
Mike Chanef969692010-06-22 11:26:45 -0700382 }
383 }
384
385 return 0;
386}
387
Todd Poynorab8dc402012-04-02 17:17:14 -0700388static void cpufreq_interactive_boost(void)
389{
390 int i;
391 int anyboost = 0;
392 unsigned long flags;
393 struct cpufreq_interactive_cpuinfo *pcpu;
394
Todd Poynor0f1920b2012-07-16 17:07:15 -0700395 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Todd Poynorab8dc402012-04-02 17:17:14 -0700396
397 for_each_online_cpu(i) {
398 pcpu = &per_cpu(cpuinfo, i);
399
400 if (pcpu->target_freq < hispeed_freq) {
401 pcpu->target_freq = hispeed_freq;
Todd Poynor0f1920b2012-07-16 17:07:15 -0700402 cpumask_set_cpu(i, &speedchange_cpumask);
Todd Poynorab8dc402012-04-02 17:17:14 -0700403 pcpu->target_set_time_in_idle =
404 get_cpu_idle_time_us(i, &pcpu->target_set_time);
Todd Poynor1a0389a2012-05-10 23:28:06 -0700405 pcpu->hispeed_validate_time = pcpu->target_set_time;
Todd Poynorab8dc402012-04-02 17:17:14 -0700406 anyboost = 1;
407 }
408
409 /*
Todd Poynor6d15fa32012-04-26 21:41:40 -0700410 * Set floor freq and (re)start timer for when last
411 * validated.
Todd Poynorab8dc402012-04-02 17:17:14 -0700412 */
413
Todd Poynor6d15fa32012-04-26 21:41:40 -0700414 pcpu->floor_freq = hispeed_freq;
415 pcpu->floor_validate_time = ktime_to_us(ktime_get());
Todd Poynorab8dc402012-04-02 17:17:14 -0700416 }
417
Todd Poynor0f1920b2012-07-16 17:07:15 -0700418 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Todd Poynorab8dc402012-04-02 17:17:14 -0700419
420 if (anyboost)
Todd Poynor0f1920b2012-07-16 17:07:15 -0700421 wake_up_process(speedchange_task);
Todd Poynorab8dc402012-04-02 17:17:14 -0700422}
423
Mike Chanef969692010-06-22 11:26:45 -0700424static ssize_t show_hispeed_freq(struct kobject *kobj,
425 struct attribute *attr, char *buf)
426{
Todd Poynor3b7b5f82012-10-03 00:39:56 -0700427 return sprintf(buf, "%u\n", hispeed_freq);
Mike Chanef969692010-06-22 11:26:45 -0700428}
429
430static ssize_t store_hispeed_freq(struct kobject *kobj,
431 struct attribute *attr, const char *buf,
432 size_t count)
433{
434 int ret;
Todd Poynor3b7b5f82012-10-03 00:39:56 -0700435 long unsigned int val;
Mike Chanef969692010-06-22 11:26:45 -0700436
Todd Poynor3b7b5f82012-10-03 00:39:56 -0700437 ret = strict_strtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -0700438 if (ret < 0)
439 return ret;
440 hispeed_freq = val;
441 return count;
442}
443
444static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
445 show_hispeed_freq, store_hispeed_freq);
446
447
448static ssize_t show_go_hispeed_load(struct kobject *kobj,
449 struct attribute *attr, char *buf)
450{
451 return sprintf(buf, "%lu\n", go_hispeed_load);
452}
453
454static ssize_t store_go_hispeed_load(struct kobject *kobj,
455 struct attribute *attr, const char *buf, size_t count)
456{
457 int ret;
458 unsigned long val;
459
460 ret = strict_strtoul(buf, 0, &val);
461 if (ret < 0)
462 return ret;
463 go_hispeed_load = val;
464 return count;
465}
466
467static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
468 show_go_hispeed_load, store_go_hispeed_load);
469
470static ssize_t show_min_sample_time(struct kobject *kobj,
471 struct attribute *attr, char *buf)
472{
473 return sprintf(buf, "%lu\n", min_sample_time);
474}
475
476static ssize_t store_min_sample_time(struct kobject *kobj,
477 struct attribute *attr, const char *buf, size_t count)
478{
479 int ret;
480 unsigned long val;
481
482 ret = strict_strtoul(buf, 0, &val);
483 if (ret < 0)
484 return ret;
485 min_sample_time = val;
486 return count;
487}
488
489static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
490 show_min_sample_time, store_min_sample_time);
491
Todd Poynorcbbe17d2012-04-13 20:18:02 -0700492static ssize_t show_above_hispeed_delay(struct kobject *kobj,
493 struct attribute *attr, char *buf)
494{
495 return sprintf(buf, "%lu\n", above_hispeed_delay_val);
496}
497
498static ssize_t store_above_hispeed_delay(struct kobject *kobj,
499 struct attribute *attr,
500 const char *buf, size_t count)
501{
502 int ret;
503 unsigned long val;
504
505 ret = strict_strtoul(buf, 0, &val);
506 if (ret < 0)
507 return ret;
508 above_hispeed_delay_val = val;
509 return count;
510}
511
512define_one_global_rw(above_hispeed_delay);
513
Mike Chanef969692010-06-22 11:26:45 -0700514static ssize_t show_timer_rate(struct kobject *kobj,
515 struct attribute *attr, char *buf)
516{
517 return sprintf(buf, "%lu\n", timer_rate);
518}
519
520static ssize_t store_timer_rate(struct kobject *kobj,
521 struct attribute *attr, const char *buf, size_t count)
522{
523 int ret;
524 unsigned long val;
525
526 ret = strict_strtoul(buf, 0, &val);
527 if (ret < 0)
528 return ret;
529 timer_rate = val;
530 return count;
531}
532
533static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
534 show_timer_rate, store_timer_rate);
535
Todd Poynor15a9ea02012-04-23 20:42:41 -0700536static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
537 char *buf)
538{
539 return sprintf(buf, "%d\n", boost_val);
540}
541
542static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
543 const char *buf, size_t count)
544{
545 int ret;
546 unsigned long val;
547
548 ret = kstrtoul(buf, 0, &val);
549 if (ret < 0)
550 return ret;
551
552 boost_val = val;
553
Todd Poynor442a3122012-05-03 00:16:55 -0700554 if (boost_val) {
555 trace_cpufreq_interactive_boost("on");
Todd Poynor15a9ea02012-04-23 20:42:41 -0700556 cpufreq_interactive_boost();
Todd Poynor442a3122012-05-03 00:16:55 -0700557 } else {
558 trace_cpufreq_interactive_unboost("off");
559 }
Todd Poynor15a9ea02012-04-23 20:42:41 -0700560
561 return count;
562}
563
564define_one_global_rw(boost);
565
Todd Poynor442a3122012-05-03 00:16:55 -0700566static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
567 const char *buf, size_t count)
568{
569 int ret;
570 unsigned long val;
571
572 ret = kstrtoul(buf, 0, &val);
573 if (ret < 0)
574 return ret;
575
576 trace_cpufreq_interactive_boost("pulse");
577 cpufreq_interactive_boost();
578 return count;
579}
580
581static struct global_attr boostpulse =
582 __ATTR(boostpulse, 0200, NULL, store_boostpulse);
583
Mike Chanef969692010-06-22 11:26:45 -0700584static struct attribute *interactive_attributes[] = {
585 &hispeed_freq_attr.attr,
586 &go_hispeed_load_attr.attr,
Todd Poynorcbbe17d2012-04-13 20:18:02 -0700587 &above_hispeed_delay.attr,
Mike Chanef969692010-06-22 11:26:45 -0700588 &min_sample_time_attr.attr,
589 &timer_rate_attr.attr,
Todd Poynor15a9ea02012-04-23 20:42:41 -0700590 &boost.attr,
Todd Poynor442a3122012-05-03 00:16:55 -0700591 &boostpulse.attr,
Mike Chanef969692010-06-22 11:26:45 -0700592 NULL,
593};
594
595static struct attribute_group interactive_attr_group = {
596 .attrs = interactive_attributes,
597 .name = "interactive",
598};
599
Sam Leffler3ab7c2b2012-06-27 10:12:04 -0700600static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
601 unsigned long val,
602 void *data)
603{
604 switch (val) {
605 case IDLE_START:
606 cpufreq_interactive_idle_start();
607 break;
608 case IDLE_END:
609 cpufreq_interactive_idle_end();
610 break;
611 }
612
613 return 0;
614}
615
616static struct notifier_block cpufreq_interactive_idle_nb = {
617 .notifier_call = cpufreq_interactive_idle_notifier,
618};
619
Mike Chanef969692010-06-22 11:26:45 -0700620static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
621 unsigned int event)
622{
623 int rc;
624 unsigned int j;
625 struct cpufreq_interactive_cpuinfo *pcpu;
626 struct cpufreq_frequency_table *freq_table;
627
628 switch (event) {
629 case CPUFREQ_GOV_START:
630 if (!cpu_online(policy->cpu))
631 return -EINVAL;
632
633 freq_table =
634 cpufreq_frequency_get_table(policy->cpu);
Todd Poynore7afb7e2012-11-05 13:09:03 -0800635 if (!hispeed_freq)
636 hispeed_freq = policy->max;
Mike Chanef969692010-06-22 11:26:45 -0700637
638 for_each_cpu(j, policy->cpus) {
639 pcpu = &per_cpu(cpuinfo, j);
640 pcpu->policy = policy;
641 pcpu->target_freq = policy->cur;
642 pcpu->freq_table = freq_table;
Todd Poynor1f408dc2012-04-06 19:59:36 -0700643 pcpu->target_set_time_in_idle =
Mike Chanef969692010-06-22 11:26:45 -0700644 get_cpu_idle_time_us(j,
Todd Poynor1f408dc2012-04-06 19:59:36 -0700645 &pcpu->target_set_time);
Todd Poynor6d15fa32012-04-26 21:41:40 -0700646 pcpu->floor_freq = pcpu->target_freq;
647 pcpu->floor_validate_time =
Todd Poynor8a833f12012-04-20 13:18:32 -0700648 pcpu->target_set_time;
Todd Poynor1a0389a2012-05-10 23:28:06 -0700649 pcpu->hispeed_validate_time =
650 pcpu->target_set_time;
Mike Chanef969692010-06-22 11:26:45 -0700651 pcpu->governor_enabled = 1;
652 smp_wmb();
Todd Poynore7afb7e2012-11-05 13:09:03 -0800653 pcpu->cpu_timer.expires =
654 jiffies + usecs_to_jiffies(timer_rate);
655 add_timer_on(&pcpu->cpu_timer, j);
Mike Chanef969692010-06-22 11:26:45 -0700656 }
657
Mike Chanef969692010-06-22 11:26:45 -0700658 /*
659 * Do not register the idle hook and create sysfs
660 * entries if we have already done so.
661 */
662 if (atomic_inc_return(&active_count) > 1)
663 return 0;
664
665 rc = sysfs_create_group(cpufreq_global_kobject,
666 &interactive_attr_group);
667 if (rc)
668 return rc;
669
Sam Leffler3ab7c2b2012-06-27 10:12:04 -0700670 idle_notifier_register(&cpufreq_interactive_idle_nb);
Mike Chanef969692010-06-22 11:26:45 -0700671 break;
672
673 case CPUFREQ_GOV_STOP:
674 for_each_cpu(j, policy->cpus) {
675 pcpu = &per_cpu(cpuinfo, j);
676 pcpu->governor_enabled = 0;
677 smp_wmb();
678 del_timer_sync(&pcpu->cpu_timer);
Mike Chanef969692010-06-22 11:26:45 -0700679 }
680
Mike Chanef969692010-06-22 11:26:45 -0700681 if (atomic_dec_return(&active_count) > 0)
682 return 0;
683
Sam Leffler3ab7c2b2012-06-27 10:12:04 -0700684 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
Mike Chanef969692010-06-22 11:26:45 -0700685 sysfs_remove_group(cpufreq_global_kobject,
686 &interactive_attr_group);
687
688 break;
689
690 case CPUFREQ_GOV_LIMITS:
691 if (policy->max < policy->cur)
692 __cpufreq_driver_target(policy,
693 policy->max, CPUFREQ_RELATION_H);
694 else if (policy->min > policy->cur)
695 __cpufreq_driver_target(policy,
696 policy->min, CPUFREQ_RELATION_L);
697 break;
698 }
699 return 0;
700}
701
Mike Chanef969692010-06-22 11:26:45 -0700702static int __init cpufreq_interactive_init(void)
703{
704 unsigned int i;
705 struct cpufreq_interactive_cpuinfo *pcpu;
706 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
707
708 go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
709 min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
Todd Poynorcbbe17d2012-04-13 20:18:02 -0700710 above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
Mike Chanef969692010-06-22 11:26:45 -0700711 timer_rate = DEFAULT_TIMER_RATE;
712
713 /* Initalize per-cpu timers */
714 for_each_possible_cpu(i) {
715 pcpu = &per_cpu(cpuinfo, i);
Lianwei Wangd72db422012-11-01 09:59:52 +0800716 if (governidle)
717 init_timer(&pcpu->cpu_timer);
718 else
719 init_timer_deferrable(&pcpu->cpu_timer);
Mike Chanef969692010-06-22 11:26:45 -0700720 pcpu->cpu_timer.function = cpufreq_interactive_timer;
721 pcpu->cpu_timer.data = i;
722 }
723
Todd Poynor0f1920b2012-07-16 17:07:15 -0700724 spin_lock_init(&speedchange_cpumask_lock);
725 speedchange_task =
726 kthread_create(cpufreq_interactive_speedchange_task, NULL,
727 "cfinteractive");
728 if (IS_ERR(speedchange_task))
729 return PTR_ERR(speedchange_task);
Sam Leffler5c9b8272012-06-27 12:55:56 -0700730
Todd Poynor0f1920b2012-07-16 17:07:15 -0700731 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
732 get_task_struct(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -0700733
Sam Leffler5c9b8272012-06-27 12:55:56 -0700734 /* NB: wake up so the thread does not look hung to the freezer */
Todd Poynor0f1920b2012-07-16 17:07:15 -0700735 wake_up_process(speedchange_task);
Sam Leffler5c9b8272012-06-27 12:55:56 -0700736
Mike Chanef969692010-06-22 11:26:45 -0700737 return cpufreq_register_governor(&cpufreq_gov_interactive);
Mike Chanef969692010-06-22 11:26:45 -0700738}
739
740#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
741fs_initcall(cpufreq_interactive_init);
742#else
743module_init(cpufreq_interactive_init);
744#endif
745
746static void __exit cpufreq_interactive_exit(void)
747{
748 cpufreq_unregister_governor(&cpufreq_gov_interactive);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700749 kthread_stop(speedchange_task);
750 put_task_struct(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -0700751}
752
753module_exit(cpufreq_interactive_exit);
754
755MODULE_AUTHOR("Mike Chan <mike@android.com>");
756MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
757 "Latency sensitive workloads");
758MODULE_LICENSE("GPL");