blob: 92a666f1e7d08a95e90932e5ff4954c009e6803b [file] [log] [blame]
Mike Chan9d49b702010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/module.h>
Lianwei Wangba6c6bb2012-11-01 09:59:52 +080023#include <linux/moduleparam.h>
Mike Chan9d49b702010-06-22 11:26:45 -070024#include <linux/mutex.h>
25#include <linux/sched.h>
26#include <linux/tick.h>
27#include <linux/time.h>
28#include <linux/timer.h>
29#include <linux/workqueue.h>
30#include <linux/kthread.h>
31#include <linux/mutex.h>
Todd Poynor7820a652012-04-02 17:17:14 -070032#include <linux/slab.h>
Todd Poynor9fb15312012-04-23 20:42:41 -070033#include <asm/cputime.h>
Mike Chan9d49b702010-06-22 11:26:45 -070034
Todd Poynora1e19512012-02-16 16:27:59 -080035#define CREATE_TRACE_POINTS
36#include <trace/events/cpufreq_interactive.h>
37
Mike Chan9d49b702010-06-22 11:26:45 -070038static atomic_t active_count = ATOMIC_INIT(0);
39
40struct cpufreq_interactive_cpuinfo {
41 struct timer_list cpu_timer;
42 int timer_idlecancel;
43 u64 time_in_idle;
Todd Poynor22b5c3a2012-10-08 20:14:34 -070044 u64 time_in_idle_timestamp;
Todd Poynor0a92d482012-04-06 19:59:36 -070045 u64 target_set_time;
46 u64 target_set_time_in_idle;
Mike Chan9d49b702010-06-22 11:26:45 -070047 struct cpufreq_policy *policy;
48 struct cpufreq_frequency_table *freq_table;
49 unsigned int target_freq;
Todd Poynoraad27322012-04-26 21:41:40 -070050 unsigned int floor_freq;
51 u64 floor_validate_time;
Todd Poynor5a5aa702012-05-10 23:28:06 -070052 u64 hispeed_validate_time;
Mike Chan9d49b702010-06-22 11:26:45 -070053 int governor_enabled;
54};
55
56static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
57
Todd Poynor8a37bb72012-07-16 17:07:15 -070058/* realtime thread handles frequency scaling */
59static struct task_struct *speedchange_task;
60static cpumask_t speedchange_cpumask;
61static spinlock_t speedchange_cpumask_lock;
Mike Chan9d49b702010-06-22 11:26:45 -070062
63/* Hi speed to bump to from lo speed when load burst (default max) */
Todd Poynoracfaec92012-10-03 00:39:56 -070064static unsigned int hispeed_freq;
Mike Chan9d49b702010-06-22 11:26:45 -070065
66/* Go to hi speed when CPU load at or above this value. */
Todd Poynora0ec4362012-04-17 17:39:34 -070067#define DEFAULT_GO_HISPEED_LOAD 85
Mike Chan9d49b702010-06-22 11:26:45 -070068static unsigned long go_hispeed_load;
69
Todd Poynorbc51d672012-11-28 17:58:17 -080070/* Target load. Lower values result in higher CPU speeds. */
71#define DEFAULT_TARGET_LOAD 90
72static unsigned long target_load = DEFAULT_TARGET_LOAD;
73
Mike Chan9d49b702010-06-22 11:26:45 -070074/*
75 * The minimum amount of time to spend at a frequency before we can ramp down.
76 */
Todd Poynora0ec4362012-04-17 17:39:34 -070077#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
Mike Chan9d49b702010-06-22 11:26:45 -070078static unsigned long min_sample_time;
79
80/*
81 * The sample rate of the timer used to increase frequency
82 */
Todd Poynora0ec4362012-04-17 17:39:34 -070083#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
Mike Chan9d49b702010-06-22 11:26:45 -070084static unsigned long timer_rate;
85
Todd Poynor596cf1f2012-04-13 20:18:02 -070086/*
87 * Wait this long before raising speed above hispeed, by default a single
88 * timer interval.
89 */
90#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
91static unsigned long above_hispeed_delay_val;
92
Todd Poynor7820a652012-04-02 17:17:14 -070093/*
Todd Poynor9fb15312012-04-23 20:42:41 -070094 * Non-zero means longer-term speed boost active.
95 */
96
97static int boost_val;
98
Lianwei Wangba6c6bb2012-11-01 09:59:52 +080099static bool governidle;
100module_param(governidle, bool, S_IWUSR | S_IRUGO);
101MODULE_PARM_DESC(governidle,
102 "Set to 1 to wake up CPUs from idle to reduce speed (default 0)");
103
Mike Chan9d49b702010-06-22 11:26:45 -0700104static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
105 unsigned int event);
106
107#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
108static
109#endif
110struct cpufreq_governor cpufreq_gov_interactive = {
111 .name = "interactive",
112 .governor = cpufreq_governor_interactive,
113 .max_transition_latency = 10000000,
114 .owner = THIS_MODULE,
115};
116
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700117static void cpufreq_interactive_timer_resched(
118 struct cpufreq_interactive_cpuinfo *pcpu)
119{
120 mod_timer_pinned(&pcpu->cpu_timer,
121 jiffies + usecs_to_jiffies(timer_rate));
122 pcpu->time_in_idle =
123 get_cpu_idle_time_us(smp_processor_id(),
124 &pcpu->time_in_idle_timestamp);
125}
126
Mike Chan9d49b702010-06-22 11:26:45 -0700127static void cpufreq_interactive_timer(unsigned long data)
128{
Todd Poynor7aa95c82012-11-05 13:09:03 -0800129 u64 now;
Mike Chan9d49b702010-06-22 11:26:45 -0700130 unsigned int delta_idle;
131 unsigned int delta_time;
132 int cpu_load;
133 int load_since_change;
Mike Chan9d49b702010-06-22 11:26:45 -0700134 struct cpufreq_interactive_cpuinfo *pcpu =
135 &per_cpu(cpuinfo, data);
136 u64 now_idle;
137 unsigned int new_freq;
138 unsigned int index;
139 unsigned long flags;
140
141 smp_rmb();
142
143 if (!pcpu->governor_enabled)
144 goto exit;
145
Todd Poynor7aa95c82012-11-05 13:09:03 -0800146 now_idle = get_cpu_idle_time_us(data, &now);
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700147 delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
148 delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
Mike Chan9d49b702010-06-22 11:26:45 -0700149
150 /*
151 * If timer ran less than 1ms after short-term sample started, retry.
152 */
153 if (delta_time < 1000)
154 goto rearm;
155
156 if (delta_idle > delta_time)
157 cpu_load = 0;
158 else
159 cpu_load = 100 * (delta_time - delta_idle) / delta_time;
160
Todd Poynor0a92d482012-04-06 19:59:36 -0700161 delta_idle = (unsigned int)(now_idle - pcpu->target_set_time_in_idle);
Todd Poynor7aa95c82012-11-05 13:09:03 -0800162 delta_time = (unsigned int)(now - pcpu->target_set_time);
Mike Chan9d49b702010-06-22 11:26:45 -0700163
164 if ((delta_time == 0) || (delta_idle > delta_time))
165 load_since_change = 0;
166 else
167 load_since_change =
168 100 * (delta_time - delta_idle) / delta_time;
169
170 /*
171 * Choose greater of short-term load (since last idle timer
172 * started or timer function re-armed itself) or long-term load
173 * (since last frequency change).
174 */
175 if (load_since_change > cpu_load)
176 cpu_load = load_since_change;
177
Todd Poynor9fb15312012-04-23 20:42:41 -0700178 if (cpu_load >= go_hispeed_load || boost_val) {
Todd Poynor369fff52012-09-24 18:03:58 -0700179 if (pcpu->target_freq < hispeed_freq &&
180 hispeed_freq < pcpu->policy->max) {
Mike Chan9d49b702010-06-22 11:26:45 -0700181 new_freq = hispeed_freq;
Todd Poynor8dc352c2012-04-06 19:50:12 -0700182 } else {
Todd Poynorbc51d672012-11-28 17:58:17 -0800183 new_freq = pcpu->policy->cur * cpu_load / target_load;
Todd Poynor8dc352c2012-04-06 19:50:12 -0700184
185 if (new_freq < hispeed_freq)
186 new_freq = hispeed_freq;
Todd Poynor596cf1f2012-04-13 20:18:02 -0700187
188 if (pcpu->target_freq == hispeed_freq &&
189 new_freq > hispeed_freq &&
Todd Poynor7aa95c82012-11-05 13:09:03 -0800190 now - pcpu->hispeed_validate_time
Todd Poynor596cf1f2012-04-13 20:18:02 -0700191 < above_hispeed_delay_val) {
Todd Poynor46660b02012-11-28 17:56:09 -0800192 trace_cpufreq_interactive_notyet(
193 data, cpu_load, pcpu->target_freq,
194 pcpu->policy->cur, new_freq);
Todd Poynor596cf1f2012-04-13 20:18:02 -0700195 goto rearm;
196 }
Todd Poynor8dc352c2012-04-06 19:50:12 -0700197 }
Mike Chan9d49b702010-06-22 11:26:45 -0700198 } else {
Todd Poynorbc51d672012-11-28 17:58:17 -0800199 new_freq = pcpu->policy->cur * cpu_load / target_load;
Mike Chan9d49b702010-06-22 11:26:45 -0700200 }
201
Todd Poynor5a5aa702012-05-10 23:28:06 -0700202 if (new_freq <= hispeed_freq)
Todd Poynor7aa95c82012-11-05 13:09:03 -0800203 pcpu->hispeed_validate_time = now;
Todd Poynor5a5aa702012-05-10 23:28:06 -0700204
Mike Chan9d49b702010-06-22 11:26:45 -0700205 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
Todd Poynorbc51d672012-11-28 17:58:17 -0800206 new_freq, CPUFREQ_RELATION_L,
Mike Chan9d49b702010-06-22 11:26:45 -0700207 &index)) {
208 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
209 (int) data);
210 goto rearm;
211 }
212
213 new_freq = pcpu->freq_table[index].frequency;
214
Mike Chan9d49b702010-06-22 11:26:45 -0700215 /*
Todd Poynoraad27322012-04-26 21:41:40 -0700216 * Do not scale below floor_freq unless we have been at or above the
217 * floor frequency for the minimum sample time since last validated.
Mike Chan9d49b702010-06-22 11:26:45 -0700218 */
Todd Poynoraad27322012-04-26 21:41:40 -0700219 if (new_freq < pcpu->floor_freq) {
Todd Poynor7aa95c82012-11-05 13:09:03 -0800220 if (now - pcpu->floor_validate_time < min_sample_time) {
Todd Poynor46660b02012-11-28 17:56:09 -0800221 trace_cpufreq_interactive_notyet(
222 data, cpu_load, pcpu->target_freq,
223 pcpu->policy->cur, new_freq);
Mike Chan9d49b702010-06-22 11:26:45 -0700224 goto rearm;
Todd Poynora1e19512012-02-16 16:27:59 -0800225 }
Mike Chan9d49b702010-06-22 11:26:45 -0700226 }
227
Todd Poynoraad27322012-04-26 21:41:40 -0700228 pcpu->floor_freq = new_freq;
Todd Poynor7aa95c82012-11-05 13:09:03 -0800229 pcpu->floor_validate_time = now;
Todd Poynor0a92d482012-04-06 19:59:36 -0700230
231 if (pcpu->target_freq == new_freq) {
Todd Poynor46660b02012-11-28 17:56:09 -0800232 trace_cpufreq_interactive_already(
233 data, cpu_load, pcpu->target_freq,
234 pcpu->policy->cur, new_freq);
Todd Poynor0a92d482012-04-06 19:59:36 -0700235 goto rearm_if_notmax;
236 }
237
Todd Poynora1e19512012-02-16 16:27:59 -0800238 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
Todd Poynor46660b02012-11-28 17:56:09 -0800239 pcpu->policy->cur, new_freq);
Todd Poynorbc699d82012-04-20 13:18:32 -0700240 pcpu->target_set_time_in_idle = now_idle;
Todd Poynor7aa95c82012-11-05 13:09:03 -0800241 pcpu->target_set_time = now;
Todd Poynora1e19512012-02-16 16:27:59 -0800242
Todd Poynor8a37bb72012-07-16 17:07:15 -0700243 pcpu->target_freq = new_freq;
244 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
245 cpumask_set_cpu(data, &speedchange_cpumask);
246 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
247 wake_up_process(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -0700248
249rearm_if_notmax:
250 /*
251 * Already set max speed and don't see a need to change that,
252 * wait until next idle to re-evaluate, don't need timer.
253 */
254 if (pcpu->target_freq == pcpu->policy->max)
255 goto exit;
256
257rearm:
258 if (!timer_pending(&pcpu->cpu_timer)) {
259 /*
Lianwei Wangba6c6bb2012-11-01 09:59:52 +0800260 * If governing speed in idle and already at min, cancel the
261 * timer if that CPU goes idle. We don't need to re-evaluate
262 * speed until the next idle exit.
Mike Chan9d49b702010-06-22 11:26:45 -0700263 */
Lianwei Wangba6c6bb2012-11-01 09:59:52 +0800264 if (governidle && pcpu->target_freq == pcpu->policy->min)
Mike Chan9d49b702010-06-22 11:26:45 -0700265 pcpu->timer_idlecancel = 1;
Mike Chan9d49b702010-06-22 11:26:45 -0700266
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700267 cpufreq_interactive_timer_resched(pcpu);
Mike Chan9d49b702010-06-22 11:26:45 -0700268 }
269
270exit:
271 return;
272}
273
274static void cpufreq_interactive_idle_start(void)
275{
276 struct cpufreq_interactive_cpuinfo *pcpu =
277 &per_cpu(cpuinfo, smp_processor_id());
278 int pending;
279
280 if (!pcpu->governor_enabled)
281 return;
282
Mike Chan9d49b702010-06-22 11:26:45 -0700283 pending = timer_pending(&pcpu->cpu_timer);
284
285 if (pcpu->target_freq != pcpu->policy->min) {
Mike Chan9d49b702010-06-22 11:26:45 -0700286 /*
287 * Entering idle while not at lowest speed. On some
288 * platforms this can hold the other CPU(s) at that speed
289 * even though the CPU is idle. Set a timer to re-evaluate
290 * speed so this idle CPU doesn't hold the other CPUs above
291 * min indefinitely. This should probably be a quirk of
292 * the CPUFreq driver.
293 */
294 if (!pending) {
Mike Chan9d49b702010-06-22 11:26:45 -0700295 pcpu->timer_idlecancel = 0;
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700296 cpufreq_interactive_timer_resched(pcpu);
Mike Chan9d49b702010-06-22 11:26:45 -0700297 }
Lianwei Wangba6c6bb2012-11-01 09:59:52 +0800298 } else if (governidle) {
Mike Chan9d49b702010-06-22 11:26:45 -0700299 /*
300 * If at min speed and entering idle after load has
301 * already been evaluated, and a timer has been set just in
302 * case the CPU suddenly goes busy, cancel that timer. The
303 * CPU didn't go busy; we'll recheck things upon idle exit.
304 */
305 if (pending && pcpu->timer_idlecancel) {
306 del_timer(&pcpu->cpu_timer);
Mike Chan9d49b702010-06-22 11:26:45 -0700307 pcpu->timer_idlecancel = 0;
308 }
309 }
310
311}
312
313static void cpufreq_interactive_idle_end(void)
314{
315 struct cpufreq_interactive_cpuinfo *pcpu =
316 &per_cpu(cpuinfo, smp_processor_id());
317
Sam Lefflerae0d23f2012-06-27 10:12:04 -0700318 if (!pcpu->governor_enabled)
319 return;
320
Todd Poynor7aa95c82012-11-05 13:09:03 -0800321 /* Arm the timer for 1-2 ticks later if not already. */
322 if (!timer_pending(&pcpu->cpu_timer)) {
Mike Chan9d49b702010-06-22 11:26:45 -0700323 pcpu->timer_idlecancel = 0;
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700324 cpufreq_interactive_timer_resched(pcpu);
325 } else if (!governidle &&
326 time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
327 del_timer(&pcpu->cpu_timer);
328 cpufreq_interactive_timer(smp_processor_id());
Mike Chan9d49b702010-06-22 11:26:45 -0700329 }
Mike Chan9d49b702010-06-22 11:26:45 -0700330}
331
Todd Poynor8a37bb72012-07-16 17:07:15 -0700332static int cpufreq_interactive_speedchange_task(void *data)
Mike Chan9d49b702010-06-22 11:26:45 -0700333{
334 unsigned int cpu;
335 cpumask_t tmp_mask;
336 unsigned long flags;
337 struct cpufreq_interactive_cpuinfo *pcpu;
338
339 while (1) {
340 set_current_state(TASK_INTERRUPTIBLE);
Todd Poynor8a37bb72012-07-16 17:07:15 -0700341 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700342
Todd Poynor8a37bb72012-07-16 17:07:15 -0700343 if (cpumask_empty(&speedchange_cpumask)) {
344 spin_unlock_irqrestore(&speedchange_cpumask_lock,
345 flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700346 schedule();
347
348 if (kthread_should_stop())
349 break;
350
Todd Poynor8a37bb72012-07-16 17:07:15 -0700351 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700352 }
353
354 set_current_state(TASK_RUNNING);
Todd Poynor8a37bb72012-07-16 17:07:15 -0700355 tmp_mask = speedchange_cpumask;
356 cpumask_clear(&speedchange_cpumask);
357 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700358
359 for_each_cpu(cpu, &tmp_mask) {
360 unsigned int j;
361 unsigned int max_freq = 0;
362
363 pcpu = &per_cpu(cpuinfo, cpu);
364 smp_rmb();
365
366 if (!pcpu->governor_enabled)
367 continue;
368
Mike Chan9d49b702010-06-22 11:26:45 -0700369 for_each_cpu(j, pcpu->policy->cpus) {
370 struct cpufreq_interactive_cpuinfo *pjcpu =
371 &per_cpu(cpuinfo, j);
372
373 if (pjcpu->target_freq > max_freq)
374 max_freq = pjcpu->target_freq;
375 }
376
377 if (max_freq != pcpu->policy->cur)
378 __cpufreq_driver_target(pcpu->policy,
379 max_freq,
380 CPUFREQ_RELATION_H);
Todd Poynor8a37bb72012-07-16 17:07:15 -0700381 trace_cpufreq_interactive_setspeed(cpu,
382 pcpu->target_freq,
Todd Poynora1e19512012-02-16 16:27:59 -0800383 pcpu->policy->cur);
Mike Chan9d49b702010-06-22 11:26:45 -0700384 }
385 }
386
387 return 0;
388}
389
Todd Poynor7820a652012-04-02 17:17:14 -0700390static void cpufreq_interactive_boost(void)
391{
392 int i;
393 int anyboost = 0;
394 unsigned long flags;
395 struct cpufreq_interactive_cpuinfo *pcpu;
396
Todd Poynor8a37bb72012-07-16 17:07:15 -0700397 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Todd Poynor7820a652012-04-02 17:17:14 -0700398
399 for_each_online_cpu(i) {
400 pcpu = &per_cpu(cpuinfo, i);
401
402 if (pcpu->target_freq < hispeed_freq) {
403 pcpu->target_freq = hispeed_freq;
Todd Poynor8a37bb72012-07-16 17:07:15 -0700404 cpumask_set_cpu(i, &speedchange_cpumask);
Todd Poynor7820a652012-04-02 17:17:14 -0700405 pcpu->target_set_time_in_idle =
406 get_cpu_idle_time_us(i, &pcpu->target_set_time);
Todd Poynor5a5aa702012-05-10 23:28:06 -0700407 pcpu->hispeed_validate_time = pcpu->target_set_time;
Todd Poynor7820a652012-04-02 17:17:14 -0700408 anyboost = 1;
409 }
410
411 /*
Todd Poynoraad27322012-04-26 21:41:40 -0700412 * Set floor freq and (re)start timer for when last
413 * validated.
Todd Poynor7820a652012-04-02 17:17:14 -0700414 */
415
Todd Poynoraad27322012-04-26 21:41:40 -0700416 pcpu->floor_freq = hispeed_freq;
417 pcpu->floor_validate_time = ktime_to_us(ktime_get());
Todd Poynor7820a652012-04-02 17:17:14 -0700418 }
419
Todd Poynor8a37bb72012-07-16 17:07:15 -0700420 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Todd Poynor7820a652012-04-02 17:17:14 -0700421
422 if (anyboost)
Todd Poynor8a37bb72012-07-16 17:07:15 -0700423 wake_up_process(speedchange_task);
Todd Poynor7820a652012-04-02 17:17:14 -0700424}
425
Todd Poynorbc51d672012-11-28 17:58:17 -0800426static ssize_t show_target_load(
427 struct kobject *kobj, struct attribute *attr, char *buf)
428{
429 return sprintf(buf, "%lu\n", target_load);
430}
431
432static ssize_t store_target_load(
433 struct kobject *kobj, struct attribute *attr, const char *buf,
434 size_t count)
435{
436 int ret;
437 unsigned long val;
438
439 ret = strict_strtoul(buf, 0, &val);
440 if (ret < 0)
441 return ret;
442 target_load = val;
443 return count;
444}
445
446static struct global_attr target_load_attr =
447 __ATTR(target_load, S_IRUGO | S_IWUSR,
448 show_target_load, store_target_load);
449
Mike Chan9d49b702010-06-22 11:26:45 -0700450static ssize_t show_hispeed_freq(struct kobject *kobj,
451 struct attribute *attr, char *buf)
452{
Todd Poynoracfaec92012-10-03 00:39:56 -0700453 return sprintf(buf, "%u\n", hispeed_freq);
Mike Chan9d49b702010-06-22 11:26:45 -0700454}
455
456static ssize_t store_hispeed_freq(struct kobject *kobj,
457 struct attribute *attr, const char *buf,
458 size_t count)
459{
460 int ret;
Todd Poynoracfaec92012-10-03 00:39:56 -0700461 long unsigned int val;
Mike Chan9d49b702010-06-22 11:26:45 -0700462
Todd Poynoracfaec92012-10-03 00:39:56 -0700463 ret = strict_strtoul(buf, 0, &val);
Mike Chan9d49b702010-06-22 11:26:45 -0700464 if (ret < 0)
465 return ret;
466 hispeed_freq = val;
467 return count;
468}
469
470static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
471 show_hispeed_freq, store_hispeed_freq);
472
473
474static ssize_t show_go_hispeed_load(struct kobject *kobj,
475 struct attribute *attr, char *buf)
476{
477 return sprintf(buf, "%lu\n", go_hispeed_load);
478}
479
480static ssize_t store_go_hispeed_load(struct kobject *kobj,
481 struct attribute *attr, const char *buf, size_t count)
482{
483 int ret;
484 unsigned long val;
485
486 ret = strict_strtoul(buf, 0, &val);
487 if (ret < 0)
488 return ret;
489 go_hispeed_load = val;
490 return count;
491}
492
493static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
494 show_go_hispeed_load, store_go_hispeed_load);
495
496static ssize_t show_min_sample_time(struct kobject *kobj,
497 struct attribute *attr, char *buf)
498{
499 return sprintf(buf, "%lu\n", min_sample_time);
500}
501
502static ssize_t store_min_sample_time(struct kobject *kobj,
503 struct attribute *attr, const char *buf, size_t count)
504{
505 int ret;
506 unsigned long val;
507
508 ret = strict_strtoul(buf, 0, &val);
509 if (ret < 0)
510 return ret;
511 min_sample_time = val;
512 return count;
513}
514
515static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
516 show_min_sample_time, store_min_sample_time);
517
Todd Poynor596cf1f2012-04-13 20:18:02 -0700518static ssize_t show_above_hispeed_delay(struct kobject *kobj,
519 struct attribute *attr, char *buf)
520{
521 return sprintf(buf, "%lu\n", above_hispeed_delay_val);
522}
523
524static ssize_t store_above_hispeed_delay(struct kobject *kobj,
525 struct attribute *attr,
526 const char *buf, size_t count)
527{
528 int ret;
529 unsigned long val;
530
531 ret = strict_strtoul(buf, 0, &val);
532 if (ret < 0)
533 return ret;
534 above_hispeed_delay_val = val;
535 return count;
536}
537
538define_one_global_rw(above_hispeed_delay);
539
Mike Chan9d49b702010-06-22 11:26:45 -0700540static ssize_t show_timer_rate(struct kobject *kobj,
541 struct attribute *attr, char *buf)
542{
543 return sprintf(buf, "%lu\n", timer_rate);
544}
545
546static ssize_t store_timer_rate(struct kobject *kobj,
547 struct attribute *attr, const char *buf, size_t count)
548{
549 int ret;
550 unsigned long val;
551
552 ret = strict_strtoul(buf, 0, &val);
553 if (ret < 0)
554 return ret;
555 timer_rate = val;
556 return count;
557}
558
559static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
560 show_timer_rate, store_timer_rate);
561
Todd Poynor9fb15312012-04-23 20:42:41 -0700562static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
563 char *buf)
564{
565 return sprintf(buf, "%d\n", boost_val);
566}
567
568static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
569 const char *buf, size_t count)
570{
571 int ret;
572 unsigned long val;
573
574 ret = kstrtoul(buf, 0, &val);
575 if (ret < 0)
576 return ret;
577
578 boost_val = val;
579
Todd Poynor2e739a02012-05-03 00:16:55 -0700580 if (boost_val) {
581 trace_cpufreq_interactive_boost("on");
Todd Poynor9fb15312012-04-23 20:42:41 -0700582 cpufreq_interactive_boost();
Todd Poynor2e739a02012-05-03 00:16:55 -0700583 } else {
584 trace_cpufreq_interactive_unboost("off");
585 }
Todd Poynor9fb15312012-04-23 20:42:41 -0700586
587 return count;
588}
589
590define_one_global_rw(boost);
591
Todd Poynor2e739a02012-05-03 00:16:55 -0700592static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
593 const char *buf, size_t count)
594{
595 int ret;
596 unsigned long val;
597
598 ret = kstrtoul(buf, 0, &val);
599 if (ret < 0)
600 return ret;
601
602 trace_cpufreq_interactive_boost("pulse");
603 cpufreq_interactive_boost();
604 return count;
605}
606
607static struct global_attr boostpulse =
608 __ATTR(boostpulse, 0200, NULL, store_boostpulse);
609
Mike Chan9d49b702010-06-22 11:26:45 -0700610static struct attribute *interactive_attributes[] = {
Todd Poynorbc51d672012-11-28 17:58:17 -0800611 &target_load_attr.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700612 &hispeed_freq_attr.attr,
613 &go_hispeed_load_attr.attr,
Todd Poynor596cf1f2012-04-13 20:18:02 -0700614 &above_hispeed_delay.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700615 &min_sample_time_attr.attr,
616 &timer_rate_attr.attr,
Todd Poynor9fb15312012-04-23 20:42:41 -0700617 &boost.attr,
Todd Poynor2e739a02012-05-03 00:16:55 -0700618 &boostpulse.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700619 NULL,
620};
621
622static struct attribute_group interactive_attr_group = {
623 .attrs = interactive_attributes,
624 .name = "interactive",
625};
626
Sam Lefflerae0d23f2012-06-27 10:12:04 -0700627static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
628 unsigned long val,
629 void *data)
630{
631 switch (val) {
632 case IDLE_START:
633 cpufreq_interactive_idle_start();
634 break;
635 case IDLE_END:
636 cpufreq_interactive_idle_end();
637 break;
638 }
639
640 return 0;
641}
642
643static struct notifier_block cpufreq_interactive_idle_nb = {
644 .notifier_call = cpufreq_interactive_idle_notifier,
645};
646
Mike Chan9d49b702010-06-22 11:26:45 -0700647static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
648 unsigned int event)
649{
650 int rc;
651 unsigned int j;
652 struct cpufreq_interactive_cpuinfo *pcpu;
653 struct cpufreq_frequency_table *freq_table;
654
655 switch (event) {
656 case CPUFREQ_GOV_START:
657 if (!cpu_online(policy->cpu))
658 return -EINVAL;
659
660 freq_table =
661 cpufreq_frequency_get_table(policy->cpu);
Todd Poynor7aa95c82012-11-05 13:09:03 -0800662 if (!hispeed_freq)
663 hispeed_freq = policy->max;
Mike Chan9d49b702010-06-22 11:26:45 -0700664
665 for_each_cpu(j, policy->cpus) {
666 pcpu = &per_cpu(cpuinfo, j);
667 pcpu->policy = policy;
668 pcpu->target_freq = policy->cur;
669 pcpu->freq_table = freq_table;
Todd Poynor0a92d482012-04-06 19:59:36 -0700670 pcpu->target_set_time_in_idle =
Mike Chan9d49b702010-06-22 11:26:45 -0700671 get_cpu_idle_time_us(j,
Todd Poynor0a92d482012-04-06 19:59:36 -0700672 &pcpu->target_set_time);
Todd Poynoraad27322012-04-26 21:41:40 -0700673 pcpu->floor_freq = pcpu->target_freq;
674 pcpu->floor_validate_time =
Todd Poynorbc699d82012-04-20 13:18:32 -0700675 pcpu->target_set_time;
Todd Poynor5a5aa702012-05-10 23:28:06 -0700676 pcpu->hispeed_validate_time =
677 pcpu->target_set_time;
Mike Chan9d49b702010-06-22 11:26:45 -0700678 pcpu->governor_enabled = 1;
679 smp_wmb();
Todd Poynor7aa95c82012-11-05 13:09:03 -0800680 pcpu->cpu_timer.expires =
681 jiffies + usecs_to_jiffies(timer_rate);
682 add_timer_on(&pcpu->cpu_timer, j);
Mike Chan9d49b702010-06-22 11:26:45 -0700683 }
684
Mike Chan9d49b702010-06-22 11:26:45 -0700685 /*
686 * Do not register the idle hook and create sysfs
687 * entries if we have already done so.
688 */
689 if (atomic_inc_return(&active_count) > 1)
690 return 0;
691
692 rc = sysfs_create_group(cpufreq_global_kobject,
693 &interactive_attr_group);
694 if (rc)
695 return rc;
696
Sam Lefflerae0d23f2012-06-27 10:12:04 -0700697 idle_notifier_register(&cpufreq_interactive_idle_nb);
Mike Chan9d49b702010-06-22 11:26:45 -0700698 break;
699
700 case CPUFREQ_GOV_STOP:
701 for_each_cpu(j, policy->cpus) {
702 pcpu = &per_cpu(cpuinfo, j);
703 pcpu->governor_enabled = 0;
704 smp_wmb();
705 del_timer_sync(&pcpu->cpu_timer);
Mike Chan9d49b702010-06-22 11:26:45 -0700706 }
707
Mike Chan9d49b702010-06-22 11:26:45 -0700708 if (atomic_dec_return(&active_count) > 0)
709 return 0;
710
Sam Lefflerae0d23f2012-06-27 10:12:04 -0700711 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
Mike Chan9d49b702010-06-22 11:26:45 -0700712 sysfs_remove_group(cpufreq_global_kobject,
713 &interactive_attr_group);
714
715 break;
716
717 case CPUFREQ_GOV_LIMITS:
718 if (policy->max < policy->cur)
719 __cpufreq_driver_target(policy,
720 policy->max, CPUFREQ_RELATION_H);
721 else if (policy->min > policy->cur)
722 __cpufreq_driver_target(policy,
723 policy->min, CPUFREQ_RELATION_L);
724 break;
725 }
726 return 0;
727}
728
Mike Chan9d49b702010-06-22 11:26:45 -0700729static int __init cpufreq_interactive_init(void)
730{
731 unsigned int i;
732 struct cpufreq_interactive_cpuinfo *pcpu;
733 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
734
735 go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
736 min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
Todd Poynor596cf1f2012-04-13 20:18:02 -0700737 above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
Mike Chan9d49b702010-06-22 11:26:45 -0700738 timer_rate = DEFAULT_TIMER_RATE;
739
740 /* Initalize per-cpu timers */
741 for_each_possible_cpu(i) {
742 pcpu = &per_cpu(cpuinfo, i);
Lianwei Wangba6c6bb2012-11-01 09:59:52 +0800743 if (governidle)
744 init_timer(&pcpu->cpu_timer);
745 else
746 init_timer_deferrable(&pcpu->cpu_timer);
Mike Chan9d49b702010-06-22 11:26:45 -0700747 pcpu->cpu_timer.function = cpufreq_interactive_timer;
748 pcpu->cpu_timer.data = i;
749 }
750
Todd Poynor8a37bb72012-07-16 17:07:15 -0700751 spin_lock_init(&speedchange_cpumask_lock);
752 speedchange_task =
753 kthread_create(cpufreq_interactive_speedchange_task, NULL,
754 "cfinteractive");
755 if (IS_ERR(speedchange_task))
756 return PTR_ERR(speedchange_task);
Sam Leffler9f1dcd62012-06-27 12:55:56 -0700757
Todd Poynor8a37bb72012-07-16 17:07:15 -0700758 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
759 get_task_struct(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -0700760
Sam Leffler9f1dcd62012-06-27 12:55:56 -0700761 /* NB: wake up so the thread does not look hung to the freezer */
Todd Poynor8a37bb72012-07-16 17:07:15 -0700762 wake_up_process(speedchange_task);
Sam Leffler9f1dcd62012-06-27 12:55:56 -0700763
Mike Chan9d49b702010-06-22 11:26:45 -0700764 return cpufreq_register_governor(&cpufreq_gov_interactive);
Mike Chan9d49b702010-06-22 11:26:45 -0700765}
766
767#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
768fs_initcall(cpufreq_interactive_init);
769#else
770module_init(cpufreq_interactive_init);
771#endif
772
773static void __exit cpufreq_interactive_exit(void)
774{
775 cpufreq_unregister_governor(&cpufreq_gov_interactive);
Todd Poynor8a37bb72012-07-16 17:07:15 -0700776 kthread_stop(speedchange_task);
777 put_task_struct(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -0700778}
779
780module_exit(cpufreq_interactive_exit);
781
782MODULE_AUTHOR("Mike Chan <mike@android.com>");
783MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
784 "Latency sensitive workloads");
785MODULE_LICENSE("GPL");