blob: c82d9fee284808038bd1381ce51131da86796760 [file] [log] [blame]
Mike Chan9d49b702010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/module.h>
Lianwei Wangba6c6bb2012-11-01 09:59:52 +080023#include <linux/moduleparam.h>
Mike Chan9d49b702010-06-22 11:26:45 -070024#include <linux/mutex.h>
25#include <linux/sched.h>
26#include <linux/tick.h>
27#include <linux/time.h>
28#include <linux/timer.h>
29#include <linux/workqueue.h>
30#include <linux/kthread.h>
31#include <linux/mutex.h>
Todd Poynor7820a652012-04-02 17:17:14 -070032#include <linux/slab.h>
Todd Poynor9fb15312012-04-23 20:42:41 -070033#include <asm/cputime.h>
Mike Chan9d49b702010-06-22 11:26:45 -070034
Todd Poynora1e19512012-02-16 16:27:59 -080035#define CREATE_TRACE_POINTS
36#include <trace/events/cpufreq_interactive.h>
37
Mike Chan9d49b702010-06-22 11:26:45 -070038static atomic_t active_count = ATOMIC_INIT(0);
39
40struct cpufreq_interactive_cpuinfo {
41 struct timer_list cpu_timer;
42 int timer_idlecancel;
43 u64 time_in_idle;
Todd Poynor22b5c3a2012-10-08 20:14:34 -070044 u64 time_in_idle_timestamp;
Todd Poynor0a92d482012-04-06 19:59:36 -070045 u64 target_set_time;
46 u64 target_set_time_in_idle;
Mike Chan9d49b702010-06-22 11:26:45 -070047 struct cpufreq_policy *policy;
48 struct cpufreq_frequency_table *freq_table;
49 unsigned int target_freq;
Todd Poynoraad27322012-04-26 21:41:40 -070050 unsigned int floor_freq;
51 u64 floor_validate_time;
Todd Poynor5a5aa702012-05-10 23:28:06 -070052 u64 hispeed_validate_time;
Mike Chan9d49b702010-06-22 11:26:45 -070053 int governor_enabled;
54};
55
56static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
57
Todd Poynor8a37bb72012-07-16 17:07:15 -070058/* realtime thread handles frequency scaling */
59static struct task_struct *speedchange_task;
60static cpumask_t speedchange_cpumask;
61static spinlock_t speedchange_cpumask_lock;
Mike Chan9d49b702010-06-22 11:26:45 -070062
63/* Hi speed to bump to from lo speed when load burst (default max) */
Todd Poynoracfaec92012-10-03 00:39:56 -070064static unsigned int hispeed_freq;
Mike Chan9d49b702010-06-22 11:26:45 -070065
66/* Go to hi speed when CPU load at or above this value. */
Todd Poynora0ec4362012-04-17 17:39:34 -070067#define DEFAULT_GO_HISPEED_LOAD 85
Mike Chan9d49b702010-06-22 11:26:45 -070068static unsigned long go_hispeed_load;
69
Todd Poynorbc51d672012-11-28 17:58:17 -080070/* Target load. Lower values result in higher CPU speeds. */
71#define DEFAULT_TARGET_LOAD 90
72static unsigned long target_load = DEFAULT_TARGET_LOAD;
73
Mike Chan9d49b702010-06-22 11:26:45 -070074/*
75 * The minimum amount of time to spend at a frequency before we can ramp down.
76 */
Todd Poynora0ec4362012-04-17 17:39:34 -070077#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
Mike Chan9d49b702010-06-22 11:26:45 -070078static unsigned long min_sample_time;
79
80/*
81 * The sample rate of the timer used to increase frequency
82 */
Todd Poynora0ec4362012-04-17 17:39:34 -070083#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
Mike Chan9d49b702010-06-22 11:26:45 -070084static unsigned long timer_rate;
85
Todd Poynor596cf1f2012-04-13 20:18:02 -070086/*
87 * Wait this long before raising speed above hispeed, by default a single
88 * timer interval.
89 */
90#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
91static unsigned long above_hispeed_delay_val;
92
Todd Poynor7820a652012-04-02 17:17:14 -070093/*
Todd Poynor9fb15312012-04-23 20:42:41 -070094 * Non-zero means longer-term speed boost active.
95 */
96
97static int boost_val;
98
Lianwei Wangba6c6bb2012-11-01 09:59:52 +080099static bool governidle;
100module_param(governidle, bool, S_IWUSR | S_IRUGO);
101MODULE_PARM_DESC(governidle,
102 "Set to 1 to wake up CPUs from idle to reduce speed (default 0)");
103
Mike Chan9d49b702010-06-22 11:26:45 -0700104static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
105 unsigned int event);
106
107#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
108static
109#endif
110struct cpufreq_governor cpufreq_gov_interactive = {
111 .name = "interactive",
112 .governor = cpufreq_governor_interactive,
113 .max_transition_latency = 10000000,
114 .owner = THIS_MODULE,
115};
116
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700117static void cpufreq_interactive_timer_resched(
118 struct cpufreq_interactive_cpuinfo *pcpu)
119{
120 mod_timer_pinned(&pcpu->cpu_timer,
121 jiffies + usecs_to_jiffies(timer_rate));
122 pcpu->time_in_idle =
123 get_cpu_idle_time_us(smp_processor_id(),
124 &pcpu->time_in_idle_timestamp);
125}
126
Mike Chan9d49b702010-06-22 11:26:45 -0700127static void cpufreq_interactive_timer(unsigned long data)
128{
Todd Poynor7aa95c82012-11-05 13:09:03 -0800129 u64 now;
Mike Chan9d49b702010-06-22 11:26:45 -0700130 unsigned int delta_idle;
131 unsigned int delta_time;
132 int cpu_load;
133 int load_since_change;
Mike Chan9d49b702010-06-22 11:26:45 -0700134 struct cpufreq_interactive_cpuinfo *pcpu =
135 &per_cpu(cpuinfo, data);
136 u64 now_idle;
137 unsigned int new_freq;
138 unsigned int index;
139 unsigned long flags;
140
141 smp_rmb();
142
143 if (!pcpu->governor_enabled)
144 goto exit;
145
Todd Poynor7aa95c82012-11-05 13:09:03 -0800146 now_idle = get_cpu_idle_time_us(data, &now);
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700147 delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
148 delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
Mike Chan9d49b702010-06-22 11:26:45 -0700149
150 /*
151 * If timer ran less than 1ms after short-term sample started, retry.
152 */
153 if (delta_time < 1000)
154 goto rearm;
155
156 if (delta_idle > delta_time)
157 cpu_load = 0;
158 else
159 cpu_load = 100 * (delta_time - delta_idle) / delta_time;
160
Todd Poynor0a92d482012-04-06 19:59:36 -0700161 delta_idle = (unsigned int)(now_idle - pcpu->target_set_time_in_idle);
Todd Poynor7aa95c82012-11-05 13:09:03 -0800162 delta_time = (unsigned int)(now - pcpu->target_set_time);
Mike Chan9d49b702010-06-22 11:26:45 -0700163
164 if ((delta_time == 0) || (delta_idle > delta_time))
165 load_since_change = 0;
166 else
167 load_since_change =
168 100 * (delta_time - delta_idle) / delta_time;
169
170 /*
171 * Choose greater of short-term load (since last idle timer
172 * started or timer function re-armed itself) or long-term load
173 * (since last frequency change).
174 */
175 if (load_since_change > cpu_load)
176 cpu_load = load_since_change;
177
Todd Poynorf96f2c82012-11-08 15:06:55 -0800178 if ((cpu_load >= go_hispeed_load || boost_val) &&
179 pcpu->target_freq < hispeed_freq)
180 new_freq = hispeed_freq;
181 else
Todd Poynorbc51d672012-11-28 17:58:17 -0800182 new_freq = pcpu->policy->cur * cpu_load / target_load;
Todd Poynorf96f2c82012-11-08 15:06:55 -0800183
184 if (pcpu->target_freq >= hispeed_freq &&
185 new_freq > pcpu->target_freq &&
186 now - pcpu->hispeed_validate_time < above_hispeed_delay_val) {
187 trace_cpufreq_interactive_notyet(
188 data, cpu_load, pcpu->target_freq,
189 pcpu->policy->cur, new_freq);
190 goto rearm;
Mike Chan9d49b702010-06-22 11:26:45 -0700191 }
192
Todd Poynorf96f2c82012-11-08 15:06:55 -0800193 pcpu->hispeed_validate_time = now;
Todd Poynor5a5aa702012-05-10 23:28:06 -0700194
Mike Chan9d49b702010-06-22 11:26:45 -0700195 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
Todd Poynorbc51d672012-11-28 17:58:17 -0800196 new_freq, CPUFREQ_RELATION_L,
Mike Chan9d49b702010-06-22 11:26:45 -0700197 &index)) {
198 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
199 (int) data);
200 goto rearm;
201 }
202
203 new_freq = pcpu->freq_table[index].frequency;
204
Mike Chan9d49b702010-06-22 11:26:45 -0700205 /*
Todd Poynoraad27322012-04-26 21:41:40 -0700206 * Do not scale below floor_freq unless we have been at or above the
207 * floor frequency for the minimum sample time since last validated.
Mike Chan9d49b702010-06-22 11:26:45 -0700208 */
Todd Poynoraad27322012-04-26 21:41:40 -0700209 if (new_freq < pcpu->floor_freq) {
Todd Poynor7aa95c82012-11-05 13:09:03 -0800210 if (now - pcpu->floor_validate_time < min_sample_time) {
Todd Poynor46660b02012-11-28 17:56:09 -0800211 trace_cpufreq_interactive_notyet(
212 data, cpu_load, pcpu->target_freq,
213 pcpu->policy->cur, new_freq);
Mike Chan9d49b702010-06-22 11:26:45 -0700214 goto rearm;
Todd Poynora1e19512012-02-16 16:27:59 -0800215 }
Mike Chan9d49b702010-06-22 11:26:45 -0700216 }
217
Todd Poynoraad27322012-04-26 21:41:40 -0700218 pcpu->floor_freq = new_freq;
Todd Poynor7aa95c82012-11-05 13:09:03 -0800219 pcpu->floor_validate_time = now;
Todd Poynor0a92d482012-04-06 19:59:36 -0700220
221 if (pcpu->target_freq == new_freq) {
Todd Poynor46660b02012-11-28 17:56:09 -0800222 trace_cpufreq_interactive_already(
223 data, cpu_load, pcpu->target_freq,
224 pcpu->policy->cur, new_freq);
Todd Poynor0a92d482012-04-06 19:59:36 -0700225 goto rearm_if_notmax;
226 }
227
Todd Poynora1e19512012-02-16 16:27:59 -0800228 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
Todd Poynor46660b02012-11-28 17:56:09 -0800229 pcpu->policy->cur, new_freq);
Todd Poynorbc699d82012-04-20 13:18:32 -0700230 pcpu->target_set_time_in_idle = now_idle;
Todd Poynor7aa95c82012-11-05 13:09:03 -0800231 pcpu->target_set_time = now;
Todd Poynora1e19512012-02-16 16:27:59 -0800232
Todd Poynor8a37bb72012-07-16 17:07:15 -0700233 pcpu->target_freq = new_freq;
234 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
235 cpumask_set_cpu(data, &speedchange_cpumask);
236 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
237 wake_up_process(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -0700238
239rearm_if_notmax:
240 /*
241 * Already set max speed and don't see a need to change that,
242 * wait until next idle to re-evaluate, don't need timer.
243 */
244 if (pcpu->target_freq == pcpu->policy->max)
245 goto exit;
246
247rearm:
248 if (!timer_pending(&pcpu->cpu_timer)) {
249 /*
Lianwei Wangba6c6bb2012-11-01 09:59:52 +0800250 * If governing speed in idle and already at min, cancel the
251 * timer if that CPU goes idle. We don't need to re-evaluate
252 * speed until the next idle exit.
Mike Chan9d49b702010-06-22 11:26:45 -0700253 */
Lianwei Wangba6c6bb2012-11-01 09:59:52 +0800254 if (governidle && pcpu->target_freq == pcpu->policy->min)
Mike Chan9d49b702010-06-22 11:26:45 -0700255 pcpu->timer_idlecancel = 1;
Mike Chan9d49b702010-06-22 11:26:45 -0700256
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700257 cpufreq_interactive_timer_resched(pcpu);
Mike Chan9d49b702010-06-22 11:26:45 -0700258 }
259
260exit:
261 return;
262}
263
264static void cpufreq_interactive_idle_start(void)
265{
266 struct cpufreq_interactive_cpuinfo *pcpu =
267 &per_cpu(cpuinfo, smp_processor_id());
268 int pending;
269
270 if (!pcpu->governor_enabled)
271 return;
272
Mike Chan9d49b702010-06-22 11:26:45 -0700273 pending = timer_pending(&pcpu->cpu_timer);
274
275 if (pcpu->target_freq != pcpu->policy->min) {
Mike Chan9d49b702010-06-22 11:26:45 -0700276 /*
277 * Entering idle while not at lowest speed. On some
278 * platforms this can hold the other CPU(s) at that speed
279 * even though the CPU is idle. Set a timer to re-evaluate
280 * speed so this idle CPU doesn't hold the other CPUs above
281 * min indefinitely. This should probably be a quirk of
282 * the CPUFreq driver.
283 */
284 if (!pending) {
Mike Chan9d49b702010-06-22 11:26:45 -0700285 pcpu->timer_idlecancel = 0;
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700286 cpufreq_interactive_timer_resched(pcpu);
Mike Chan9d49b702010-06-22 11:26:45 -0700287 }
Lianwei Wangba6c6bb2012-11-01 09:59:52 +0800288 } else if (governidle) {
Mike Chan9d49b702010-06-22 11:26:45 -0700289 /*
290 * If at min speed and entering idle after load has
291 * already been evaluated, and a timer has been set just in
292 * case the CPU suddenly goes busy, cancel that timer. The
293 * CPU didn't go busy; we'll recheck things upon idle exit.
294 */
295 if (pending && pcpu->timer_idlecancel) {
296 del_timer(&pcpu->cpu_timer);
Mike Chan9d49b702010-06-22 11:26:45 -0700297 pcpu->timer_idlecancel = 0;
298 }
299 }
300
301}
302
303static void cpufreq_interactive_idle_end(void)
304{
305 struct cpufreq_interactive_cpuinfo *pcpu =
306 &per_cpu(cpuinfo, smp_processor_id());
307
Sam Lefflerae0d23f2012-06-27 10:12:04 -0700308 if (!pcpu->governor_enabled)
309 return;
310
Todd Poynor7aa95c82012-11-05 13:09:03 -0800311 /* Arm the timer for 1-2 ticks later if not already. */
312 if (!timer_pending(&pcpu->cpu_timer)) {
Mike Chan9d49b702010-06-22 11:26:45 -0700313 pcpu->timer_idlecancel = 0;
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700314 cpufreq_interactive_timer_resched(pcpu);
315 } else if (!governidle &&
316 time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
317 del_timer(&pcpu->cpu_timer);
318 cpufreq_interactive_timer(smp_processor_id());
Mike Chan9d49b702010-06-22 11:26:45 -0700319 }
Mike Chan9d49b702010-06-22 11:26:45 -0700320}
321
Todd Poynor8a37bb72012-07-16 17:07:15 -0700322static int cpufreq_interactive_speedchange_task(void *data)
Mike Chan9d49b702010-06-22 11:26:45 -0700323{
324 unsigned int cpu;
325 cpumask_t tmp_mask;
326 unsigned long flags;
327 struct cpufreq_interactive_cpuinfo *pcpu;
328
329 while (1) {
330 set_current_state(TASK_INTERRUPTIBLE);
Todd Poynor8a37bb72012-07-16 17:07:15 -0700331 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700332
Todd Poynor8a37bb72012-07-16 17:07:15 -0700333 if (cpumask_empty(&speedchange_cpumask)) {
334 spin_unlock_irqrestore(&speedchange_cpumask_lock,
335 flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700336 schedule();
337
338 if (kthread_should_stop())
339 break;
340
Todd Poynor8a37bb72012-07-16 17:07:15 -0700341 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700342 }
343
344 set_current_state(TASK_RUNNING);
Todd Poynor8a37bb72012-07-16 17:07:15 -0700345 tmp_mask = speedchange_cpumask;
346 cpumask_clear(&speedchange_cpumask);
347 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700348
349 for_each_cpu(cpu, &tmp_mask) {
350 unsigned int j;
351 unsigned int max_freq = 0;
352
353 pcpu = &per_cpu(cpuinfo, cpu);
354 smp_rmb();
355
356 if (!pcpu->governor_enabled)
357 continue;
358
Mike Chan9d49b702010-06-22 11:26:45 -0700359 for_each_cpu(j, pcpu->policy->cpus) {
360 struct cpufreq_interactive_cpuinfo *pjcpu =
361 &per_cpu(cpuinfo, j);
362
363 if (pjcpu->target_freq > max_freq)
364 max_freq = pjcpu->target_freq;
365 }
366
367 if (max_freq != pcpu->policy->cur)
368 __cpufreq_driver_target(pcpu->policy,
369 max_freq,
370 CPUFREQ_RELATION_H);
Todd Poynor8a37bb72012-07-16 17:07:15 -0700371 trace_cpufreq_interactive_setspeed(cpu,
372 pcpu->target_freq,
Todd Poynora1e19512012-02-16 16:27:59 -0800373 pcpu->policy->cur);
Mike Chan9d49b702010-06-22 11:26:45 -0700374 }
375 }
376
377 return 0;
378}
379
Todd Poynor7820a652012-04-02 17:17:14 -0700380static void cpufreq_interactive_boost(void)
381{
382 int i;
383 int anyboost = 0;
384 unsigned long flags;
385 struct cpufreq_interactive_cpuinfo *pcpu;
386
Todd Poynor8a37bb72012-07-16 17:07:15 -0700387 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Todd Poynor7820a652012-04-02 17:17:14 -0700388
389 for_each_online_cpu(i) {
390 pcpu = &per_cpu(cpuinfo, i);
391
392 if (pcpu->target_freq < hispeed_freq) {
393 pcpu->target_freq = hispeed_freq;
Todd Poynor8a37bb72012-07-16 17:07:15 -0700394 cpumask_set_cpu(i, &speedchange_cpumask);
Todd Poynor7820a652012-04-02 17:17:14 -0700395 pcpu->target_set_time_in_idle =
396 get_cpu_idle_time_us(i, &pcpu->target_set_time);
Todd Poynor5a5aa702012-05-10 23:28:06 -0700397 pcpu->hispeed_validate_time = pcpu->target_set_time;
Todd Poynor7820a652012-04-02 17:17:14 -0700398 anyboost = 1;
399 }
400
401 /*
Todd Poynoraad27322012-04-26 21:41:40 -0700402 * Set floor freq and (re)start timer for when last
403 * validated.
Todd Poynor7820a652012-04-02 17:17:14 -0700404 */
405
Todd Poynoraad27322012-04-26 21:41:40 -0700406 pcpu->floor_freq = hispeed_freq;
407 pcpu->floor_validate_time = ktime_to_us(ktime_get());
Todd Poynor7820a652012-04-02 17:17:14 -0700408 }
409
Todd Poynor8a37bb72012-07-16 17:07:15 -0700410 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Todd Poynor7820a652012-04-02 17:17:14 -0700411
412 if (anyboost)
Todd Poynor8a37bb72012-07-16 17:07:15 -0700413 wake_up_process(speedchange_task);
Todd Poynor7820a652012-04-02 17:17:14 -0700414}
415
Todd Poynorbc51d672012-11-28 17:58:17 -0800416static ssize_t show_target_load(
417 struct kobject *kobj, struct attribute *attr, char *buf)
418{
419 return sprintf(buf, "%lu\n", target_load);
420}
421
422static ssize_t store_target_load(
423 struct kobject *kobj, struct attribute *attr, const char *buf,
424 size_t count)
425{
426 int ret;
427 unsigned long val;
428
429 ret = strict_strtoul(buf, 0, &val);
430 if (ret < 0)
431 return ret;
432 target_load = val;
433 return count;
434}
435
436static struct global_attr target_load_attr =
437 __ATTR(target_load, S_IRUGO | S_IWUSR,
438 show_target_load, store_target_load);
439
Mike Chan9d49b702010-06-22 11:26:45 -0700440static ssize_t show_hispeed_freq(struct kobject *kobj,
441 struct attribute *attr, char *buf)
442{
Todd Poynoracfaec92012-10-03 00:39:56 -0700443 return sprintf(buf, "%u\n", hispeed_freq);
Mike Chan9d49b702010-06-22 11:26:45 -0700444}
445
446static ssize_t store_hispeed_freq(struct kobject *kobj,
447 struct attribute *attr, const char *buf,
448 size_t count)
449{
450 int ret;
Todd Poynoracfaec92012-10-03 00:39:56 -0700451 long unsigned int val;
Mike Chan9d49b702010-06-22 11:26:45 -0700452
Todd Poynoracfaec92012-10-03 00:39:56 -0700453 ret = strict_strtoul(buf, 0, &val);
Mike Chan9d49b702010-06-22 11:26:45 -0700454 if (ret < 0)
455 return ret;
456 hispeed_freq = val;
457 return count;
458}
459
460static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
461 show_hispeed_freq, store_hispeed_freq);
462
463
464static ssize_t show_go_hispeed_load(struct kobject *kobj,
465 struct attribute *attr, char *buf)
466{
467 return sprintf(buf, "%lu\n", go_hispeed_load);
468}
469
470static ssize_t store_go_hispeed_load(struct kobject *kobj,
471 struct attribute *attr, const char *buf, size_t count)
472{
473 int ret;
474 unsigned long val;
475
476 ret = strict_strtoul(buf, 0, &val);
477 if (ret < 0)
478 return ret;
479 go_hispeed_load = val;
480 return count;
481}
482
483static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
484 show_go_hispeed_load, store_go_hispeed_load);
485
486static ssize_t show_min_sample_time(struct kobject *kobj,
487 struct attribute *attr, char *buf)
488{
489 return sprintf(buf, "%lu\n", min_sample_time);
490}
491
492static ssize_t store_min_sample_time(struct kobject *kobj,
493 struct attribute *attr, const char *buf, size_t count)
494{
495 int ret;
496 unsigned long val;
497
498 ret = strict_strtoul(buf, 0, &val);
499 if (ret < 0)
500 return ret;
501 min_sample_time = val;
502 return count;
503}
504
505static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
506 show_min_sample_time, store_min_sample_time);
507
Todd Poynor596cf1f2012-04-13 20:18:02 -0700508static ssize_t show_above_hispeed_delay(struct kobject *kobj,
509 struct attribute *attr, char *buf)
510{
511 return sprintf(buf, "%lu\n", above_hispeed_delay_val);
512}
513
514static ssize_t store_above_hispeed_delay(struct kobject *kobj,
515 struct attribute *attr,
516 const char *buf, size_t count)
517{
518 int ret;
519 unsigned long val;
520
521 ret = strict_strtoul(buf, 0, &val);
522 if (ret < 0)
523 return ret;
524 above_hispeed_delay_val = val;
525 return count;
526}
527
528define_one_global_rw(above_hispeed_delay);
529
Mike Chan9d49b702010-06-22 11:26:45 -0700530static ssize_t show_timer_rate(struct kobject *kobj,
531 struct attribute *attr, char *buf)
532{
533 return sprintf(buf, "%lu\n", timer_rate);
534}
535
536static ssize_t store_timer_rate(struct kobject *kobj,
537 struct attribute *attr, const char *buf, size_t count)
538{
539 int ret;
540 unsigned long val;
541
542 ret = strict_strtoul(buf, 0, &val);
543 if (ret < 0)
544 return ret;
545 timer_rate = val;
546 return count;
547}
548
549static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
550 show_timer_rate, store_timer_rate);
551
Todd Poynor9fb15312012-04-23 20:42:41 -0700552static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
553 char *buf)
554{
555 return sprintf(buf, "%d\n", boost_val);
556}
557
558static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
559 const char *buf, size_t count)
560{
561 int ret;
562 unsigned long val;
563
564 ret = kstrtoul(buf, 0, &val);
565 if (ret < 0)
566 return ret;
567
568 boost_val = val;
569
Todd Poynor2e739a02012-05-03 00:16:55 -0700570 if (boost_val) {
571 trace_cpufreq_interactive_boost("on");
Todd Poynor9fb15312012-04-23 20:42:41 -0700572 cpufreq_interactive_boost();
Todd Poynor2e739a02012-05-03 00:16:55 -0700573 } else {
574 trace_cpufreq_interactive_unboost("off");
575 }
Todd Poynor9fb15312012-04-23 20:42:41 -0700576
577 return count;
578}
579
580define_one_global_rw(boost);
581
Todd Poynor2e739a02012-05-03 00:16:55 -0700582static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
583 const char *buf, size_t count)
584{
585 int ret;
586 unsigned long val;
587
588 ret = kstrtoul(buf, 0, &val);
589 if (ret < 0)
590 return ret;
591
592 trace_cpufreq_interactive_boost("pulse");
593 cpufreq_interactive_boost();
594 return count;
595}
596
597static struct global_attr boostpulse =
598 __ATTR(boostpulse, 0200, NULL, store_boostpulse);
599
Mike Chan9d49b702010-06-22 11:26:45 -0700600static struct attribute *interactive_attributes[] = {
Todd Poynorbc51d672012-11-28 17:58:17 -0800601 &target_load_attr.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700602 &hispeed_freq_attr.attr,
603 &go_hispeed_load_attr.attr,
Todd Poynor596cf1f2012-04-13 20:18:02 -0700604 &above_hispeed_delay.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700605 &min_sample_time_attr.attr,
606 &timer_rate_attr.attr,
Todd Poynor9fb15312012-04-23 20:42:41 -0700607 &boost.attr,
Todd Poynor2e739a02012-05-03 00:16:55 -0700608 &boostpulse.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700609 NULL,
610};
611
612static struct attribute_group interactive_attr_group = {
613 .attrs = interactive_attributes,
614 .name = "interactive",
615};
616
Sam Lefflerae0d23f2012-06-27 10:12:04 -0700617static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
618 unsigned long val,
619 void *data)
620{
621 switch (val) {
622 case IDLE_START:
623 cpufreq_interactive_idle_start();
624 break;
625 case IDLE_END:
626 cpufreq_interactive_idle_end();
627 break;
628 }
629
630 return 0;
631}
632
633static struct notifier_block cpufreq_interactive_idle_nb = {
634 .notifier_call = cpufreq_interactive_idle_notifier,
635};
636
Mike Chan9d49b702010-06-22 11:26:45 -0700637static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
638 unsigned int event)
639{
640 int rc;
641 unsigned int j;
642 struct cpufreq_interactive_cpuinfo *pcpu;
643 struct cpufreq_frequency_table *freq_table;
644
645 switch (event) {
646 case CPUFREQ_GOV_START:
647 if (!cpu_online(policy->cpu))
648 return -EINVAL;
649
650 freq_table =
651 cpufreq_frequency_get_table(policy->cpu);
Todd Poynor7aa95c82012-11-05 13:09:03 -0800652 if (!hispeed_freq)
653 hispeed_freq = policy->max;
Mike Chan9d49b702010-06-22 11:26:45 -0700654
655 for_each_cpu(j, policy->cpus) {
656 pcpu = &per_cpu(cpuinfo, j);
657 pcpu->policy = policy;
658 pcpu->target_freq = policy->cur;
659 pcpu->freq_table = freq_table;
Todd Poynor0a92d482012-04-06 19:59:36 -0700660 pcpu->target_set_time_in_idle =
Mike Chan9d49b702010-06-22 11:26:45 -0700661 get_cpu_idle_time_us(j,
Todd Poynor0a92d482012-04-06 19:59:36 -0700662 &pcpu->target_set_time);
Todd Poynoraad27322012-04-26 21:41:40 -0700663 pcpu->floor_freq = pcpu->target_freq;
664 pcpu->floor_validate_time =
Todd Poynorbc699d82012-04-20 13:18:32 -0700665 pcpu->target_set_time;
Todd Poynor5a5aa702012-05-10 23:28:06 -0700666 pcpu->hispeed_validate_time =
667 pcpu->target_set_time;
Mike Chan9d49b702010-06-22 11:26:45 -0700668 pcpu->governor_enabled = 1;
669 smp_wmb();
Todd Poynor7aa95c82012-11-05 13:09:03 -0800670 pcpu->cpu_timer.expires =
671 jiffies + usecs_to_jiffies(timer_rate);
672 add_timer_on(&pcpu->cpu_timer, j);
Mike Chan9d49b702010-06-22 11:26:45 -0700673 }
674
Mike Chan9d49b702010-06-22 11:26:45 -0700675 /*
676 * Do not register the idle hook and create sysfs
677 * entries if we have already done so.
678 */
679 if (atomic_inc_return(&active_count) > 1)
680 return 0;
681
682 rc = sysfs_create_group(cpufreq_global_kobject,
683 &interactive_attr_group);
684 if (rc)
685 return rc;
686
Sam Lefflerae0d23f2012-06-27 10:12:04 -0700687 idle_notifier_register(&cpufreq_interactive_idle_nb);
Mike Chan9d49b702010-06-22 11:26:45 -0700688 break;
689
690 case CPUFREQ_GOV_STOP:
691 for_each_cpu(j, policy->cpus) {
692 pcpu = &per_cpu(cpuinfo, j);
693 pcpu->governor_enabled = 0;
694 smp_wmb();
695 del_timer_sync(&pcpu->cpu_timer);
Mike Chan9d49b702010-06-22 11:26:45 -0700696 }
697
Mike Chan9d49b702010-06-22 11:26:45 -0700698 if (atomic_dec_return(&active_count) > 0)
699 return 0;
700
Sam Lefflerae0d23f2012-06-27 10:12:04 -0700701 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
Mike Chan9d49b702010-06-22 11:26:45 -0700702 sysfs_remove_group(cpufreq_global_kobject,
703 &interactive_attr_group);
704
705 break;
706
707 case CPUFREQ_GOV_LIMITS:
708 if (policy->max < policy->cur)
709 __cpufreq_driver_target(policy,
710 policy->max, CPUFREQ_RELATION_H);
711 else if (policy->min > policy->cur)
712 __cpufreq_driver_target(policy,
713 policy->min, CPUFREQ_RELATION_L);
714 break;
715 }
716 return 0;
717}
718
Mike Chan9d49b702010-06-22 11:26:45 -0700719static int __init cpufreq_interactive_init(void)
720{
721 unsigned int i;
722 struct cpufreq_interactive_cpuinfo *pcpu;
723 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
724
725 go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
726 min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
Todd Poynor596cf1f2012-04-13 20:18:02 -0700727 above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
Mike Chan9d49b702010-06-22 11:26:45 -0700728 timer_rate = DEFAULT_TIMER_RATE;
729
730 /* Initalize per-cpu timers */
731 for_each_possible_cpu(i) {
732 pcpu = &per_cpu(cpuinfo, i);
Lianwei Wangba6c6bb2012-11-01 09:59:52 +0800733 if (governidle)
734 init_timer(&pcpu->cpu_timer);
735 else
736 init_timer_deferrable(&pcpu->cpu_timer);
Mike Chan9d49b702010-06-22 11:26:45 -0700737 pcpu->cpu_timer.function = cpufreq_interactive_timer;
738 pcpu->cpu_timer.data = i;
739 }
740
Todd Poynor8a37bb72012-07-16 17:07:15 -0700741 spin_lock_init(&speedchange_cpumask_lock);
742 speedchange_task =
743 kthread_create(cpufreq_interactive_speedchange_task, NULL,
744 "cfinteractive");
745 if (IS_ERR(speedchange_task))
746 return PTR_ERR(speedchange_task);
Sam Leffler9f1dcd62012-06-27 12:55:56 -0700747
Todd Poynor8a37bb72012-07-16 17:07:15 -0700748 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
749 get_task_struct(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -0700750
Sam Leffler9f1dcd62012-06-27 12:55:56 -0700751 /* NB: wake up so the thread does not look hung to the freezer */
Todd Poynor8a37bb72012-07-16 17:07:15 -0700752 wake_up_process(speedchange_task);
Sam Leffler9f1dcd62012-06-27 12:55:56 -0700753
Mike Chan9d49b702010-06-22 11:26:45 -0700754 return cpufreq_register_governor(&cpufreq_gov_interactive);
Mike Chan9d49b702010-06-22 11:26:45 -0700755}
756
757#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
758fs_initcall(cpufreq_interactive_init);
759#else
760module_init(cpufreq_interactive_init);
761#endif
762
763static void __exit cpufreq_interactive_exit(void)
764{
765 cpufreq_unregister_governor(&cpufreq_gov_interactive);
Todd Poynor8a37bb72012-07-16 17:07:15 -0700766 kthread_stop(speedchange_task);
767 put_task_struct(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -0700768}
769
770module_exit(cpufreq_interactive_exit);
771
772MODULE_AUTHOR("Mike Chan <mike@android.com>");
773MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
774 "Latency sensitive workloads");
775MODULE_LICENSE("GPL");