blob: ada7da51b789331ad0a308037908771277a1738e [file] [log] [blame]
Mike Chan9d49b702010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/module.h>
Lianwei Wangba6c6bb2012-11-01 09:59:52 +080023#include <linux/moduleparam.h>
Mike Chan9d49b702010-06-22 11:26:45 -070024#include <linux/mutex.h>
25#include <linux/sched.h>
26#include <linux/tick.h>
27#include <linux/time.h>
28#include <linux/timer.h>
29#include <linux/workqueue.h>
30#include <linux/kthread.h>
31#include <linux/mutex.h>
Todd Poynor7820a652012-04-02 17:17:14 -070032#include <linux/slab.h>
Todd Poynor9fb15312012-04-23 20:42:41 -070033#include <asm/cputime.h>
Mike Chan9d49b702010-06-22 11:26:45 -070034
Todd Poynora1e19512012-02-16 16:27:59 -080035#define CREATE_TRACE_POINTS
36#include <trace/events/cpufreq_interactive.h>
37
Mike Chan9d49b702010-06-22 11:26:45 -070038static atomic_t active_count = ATOMIC_INIT(0);
39
40struct cpufreq_interactive_cpuinfo {
41 struct timer_list cpu_timer;
42 int timer_idlecancel;
43 u64 time_in_idle;
Todd Poynor22b5c3a2012-10-08 20:14:34 -070044 u64 time_in_idle_timestamp;
Todd Poynor0a92d482012-04-06 19:59:36 -070045 u64 target_set_time;
46 u64 target_set_time_in_idle;
Mike Chan9d49b702010-06-22 11:26:45 -070047 struct cpufreq_policy *policy;
48 struct cpufreq_frequency_table *freq_table;
49 unsigned int target_freq;
Todd Poynoraad27322012-04-26 21:41:40 -070050 unsigned int floor_freq;
51 u64 floor_validate_time;
Todd Poynor5a5aa702012-05-10 23:28:06 -070052 u64 hispeed_validate_time;
Mike Chan9d49b702010-06-22 11:26:45 -070053 int governor_enabled;
54};
55
56static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
57
Todd Poynor8a37bb72012-07-16 17:07:15 -070058/* realtime thread handles frequency scaling */
59static struct task_struct *speedchange_task;
60static cpumask_t speedchange_cpumask;
61static spinlock_t speedchange_cpumask_lock;
Mike Chan9d49b702010-06-22 11:26:45 -070062
63/* Hi speed to bump to from lo speed when load burst (default max) */
Todd Poynoracfaec92012-10-03 00:39:56 -070064static unsigned int hispeed_freq;
Mike Chan9d49b702010-06-22 11:26:45 -070065
66/* Go to hi speed when CPU load at or above this value. */
Todd Poynora0ec4362012-04-17 17:39:34 -070067#define DEFAULT_GO_HISPEED_LOAD 85
Mike Chan9d49b702010-06-22 11:26:45 -070068static unsigned long go_hispeed_load;
69
Todd Poynorbc51d672012-11-28 17:58:17 -080070/* Target load. Lower values result in higher CPU speeds. */
71#define DEFAULT_TARGET_LOAD 90
Todd Poynor2fbf5e12012-11-14 11:41:21 -080072static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
73static spinlock_t target_loads_lock;
74static unsigned int *target_loads = default_target_loads;
75static int ntarget_loads = ARRAY_SIZE(default_target_loads);
Todd Poynorbc51d672012-11-28 17:58:17 -080076
Mike Chan9d49b702010-06-22 11:26:45 -070077/*
78 * The minimum amount of time to spend at a frequency before we can ramp down.
79 */
Todd Poynora0ec4362012-04-17 17:39:34 -070080#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
Mike Chan9d49b702010-06-22 11:26:45 -070081static unsigned long min_sample_time;
82
83/*
84 * The sample rate of the timer used to increase frequency
85 */
Todd Poynora0ec4362012-04-17 17:39:34 -070086#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
Mike Chan9d49b702010-06-22 11:26:45 -070087static unsigned long timer_rate;
88
Todd Poynor596cf1f2012-04-13 20:18:02 -070089/*
90 * Wait this long before raising speed above hispeed, by default a single
91 * timer interval.
92 */
93#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
94static unsigned long above_hispeed_delay_val;
95
Todd Poynor7820a652012-04-02 17:17:14 -070096/*
Todd Poynor9fb15312012-04-23 20:42:41 -070097 * Non-zero means longer-term speed boost active.
98 */
99
100static int boost_val;
101
Lianwei Wangba6c6bb2012-11-01 09:59:52 +0800102static bool governidle;
103module_param(governidle, bool, S_IWUSR | S_IRUGO);
104MODULE_PARM_DESC(governidle,
105 "Set to 1 to wake up CPUs from idle to reduce speed (default 0)");
106
Mike Chan9d49b702010-06-22 11:26:45 -0700107static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
108 unsigned int event);
109
110#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
111static
112#endif
113struct cpufreq_governor cpufreq_gov_interactive = {
114 .name = "interactive",
115 .governor = cpufreq_governor_interactive,
116 .max_transition_latency = 10000000,
117 .owner = THIS_MODULE,
118};
119
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700120static void cpufreq_interactive_timer_resched(
121 struct cpufreq_interactive_cpuinfo *pcpu)
122{
123 mod_timer_pinned(&pcpu->cpu_timer,
124 jiffies + usecs_to_jiffies(timer_rate));
125 pcpu->time_in_idle =
126 get_cpu_idle_time_us(smp_processor_id(),
127 &pcpu->time_in_idle_timestamp);
128}
129
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800130static unsigned int freq_to_targetload(unsigned int freq)
131{
132 int i;
133 unsigned int ret;
134
135 spin_lock(&target_loads_lock);
136
137 for (i = 0; i < ntarget_loads - 1 && freq >= target_loads[i+1]; i += 2)
138 ;
139
140 ret = target_loads[i];
141 spin_unlock(&target_loads_lock);
142 return ret;
143}
144
145/*
146 * If increasing frequencies never map to a lower target load then
147 * choose_freq() will find the minimum frequency that does not exceed its
148 * target load given the current load.
149 */
150
151static unsigned int choose_freq(
152 struct cpufreq_interactive_cpuinfo *pcpu, unsigned int curload)
153{
154 unsigned int freq = pcpu->policy->cur;
155 unsigned int loadadjfreq = freq * curload;
156 unsigned int prevfreq, freqmin, freqmax;
157 unsigned int tl;
158 int index;
159
160 freqmin = 0;
161 freqmax = UINT_MAX;
162
163 do {
164 prevfreq = freq;
165 tl = freq_to_targetload(freq);
166
167 /*
168 * Find the lowest frequency where the computed load is less
169 * than or equal to the target load.
170 */
171
172 cpufreq_frequency_table_target(
173 pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
174 CPUFREQ_RELATION_L, &index);
175 freq = pcpu->freq_table[index].frequency;
176
177 if (freq > prevfreq) {
178 /* The previous frequency is too low. */
179 freqmin = prevfreq;
180
181 if (freq >= freqmax) {
182 /*
183 * Find the highest frequency that is less
184 * than freqmax.
185 */
186 cpufreq_frequency_table_target(
187 pcpu->policy, pcpu->freq_table,
188 freqmax - 1, CPUFREQ_RELATION_H,
189 &index);
190 freq = pcpu->freq_table[index].frequency;
191
192 if (freq == freqmin) {
193 /*
194 * The first frequency below freqmax
195 * has already been found to be too
196 * low. freqmax is the lowest speed
197 * we found that is fast enough.
198 */
199 freq = freqmax;
200 break;
201 }
202 }
203 } else if (freq < prevfreq) {
204 /* The previous frequency is high enough. */
205 freqmax = prevfreq;
206
207 if (freq <= freqmin) {
208 /*
209 * Find the lowest frequency that is higher
210 * than freqmin.
211 */
212 cpufreq_frequency_table_target(
213 pcpu->policy, pcpu->freq_table,
214 freqmin + 1, CPUFREQ_RELATION_L,
215 &index);
216 freq = pcpu->freq_table[index].frequency;
217
218 /*
219 * If freqmax is the first frequency above
220 * freqmin then we have already found that
221 * this speed is fast enough.
222 */
223 if (freq == freqmax)
224 break;
225 }
226 }
227
228 /* If same frequency chosen as previous then done. */
229 } while (freq != prevfreq);
230
231 return freq;
232}
233
Mike Chan9d49b702010-06-22 11:26:45 -0700234static void cpufreq_interactive_timer(unsigned long data)
235{
Todd Poynor7aa95c82012-11-05 13:09:03 -0800236 u64 now;
Mike Chan9d49b702010-06-22 11:26:45 -0700237 unsigned int delta_idle;
238 unsigned int delta_time;
239 int cpu_load;
240 int load_since_change;
Mike Chan9d49b702010-06-22 11:26:45 -0700241 struct cpufreq_interactive_cpuinfo *pcpu =
242 &per_cpu(cpuinfo, data);
243 u64 now_idle;
244 unsigned int new_freq;
245 unsigned int index;
246 unsigned long flags;
247
248 smp_rmb();
249
250 if (!pcpu->governor_enabled)
251 goto exit;
252
Todd Poynor7aa95c82012-11-05 13:09:03 -0800253 now_idle = get_cpu_idle_time_us(data, &now);
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700254 delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
255 delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
Mike Chan9d49b702010-06-22 11:26:45 -0700256
257 /*
258 * If timer ran less than 1ms after short-term sample started, retry.
259 */
260 if (delta_time < 1000)
261 goto rearm;
262
263 if (delta_idle > delta_time)
264 cpu_load = 0;
265 else
266 cpu_load = 100 * (delta_time - delta_idle) / delta_time;
267
Todd Poynor0a92d482012-04-06 19:59:36 -0700268 delta_idle = (unsigned int)(now_idle - pcpu->target_set_time_in_idle);
Todd Poynor7aa95c82012-11-05 13:09:03 -0800269 delta_time = (unsigned int)(now - pcpu->target_set_time);
Mike Chan9d49b702010-06-22 11:26:45 -0700270
271 if ((delta_time == 0) || (delta_idle > delta_time))
272 load_since_change = 0;
273 else
274 load_since_change =
275 100 * (delta_time - delta_idle) / delta_time;
276
277 /*
278 * Choose greater of short-term load (since last idle timer
279 * started or timer function re-armed itself) or long-term load
280 * (since last frequency change).
281 */
282 if (load_since_change > cpu_load)
283 cpu_load = load_since_change;
284
Todd Poynorf96f2c82012-11-08 15:06:55 -0800285 if ((cpu_load >= go_hispeed_load || boost_val) &&
286 pcpu->target_freq < hispeed_freq)
287 new_freq = hispeed_freq;
288 else
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800289 new_freq = choose_freq(pcpu, cpu_load);
Todd Poynorf96f2c82012-11-08 15:06:55 -0800290
291 if (pcpu->target_freq >= hispeed_freq &&
292 new_freq > pcpu->target_freq &&
293 now - pcpu->hispeed_validate_time < above_hispeed_delay_val) {
294 trace_cpufreq_interactive_notyet(
295 data, cpu_load, pcpu->target_freq,
296 pcpu->policy->cur, new_freq);
297 goto rearm;
Mike Chan9d49b702010-06-22 11:26:45 -0700298 }
299
Todd Poynorf96f2c82012-11-08 15:06:55 -0800300 pcpu->hispeed_validate_time = now;
Todd Poynor5a5aa702012-05-10 23:28:06 -0700301
Mike Chan9d49b702010-06-22 11:26:45 -0700302 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
Todd Poynorbc51d672012-11-28 17:58:17 -0800303 new_freq, CPUFREQ_RELATION_L,
Mike Chan9d49b702010-06-22 11:26:45 -0700304 &index)) {
305 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
306 (int) data);
307 goto rearm;
308 }
309
310 new_freq = pcpu->freq_table[index].frequency;
311
Mike Chan9d49b702010-06-22 11:26:45 -0700312 /*
Todd Poynoraad27322012-04-26 21:41:40 -0700313 * Do not scale below floor_freq unless we have been at or above the
314 * floor frequency for the minimum sample time since last validated.
Mike Chan9d49b702010-06-22 11:26:45 -0700315 */
Todd Poynoraad27322012-04-26 21:41:40 -0700316 if (new_freq < pcpu->floor_freq) {
Todd Poynor7aa95c82012-11-05 13:09:03 -0800317 if (now - pcpu->floor_validate_time < min_sample_time) {
Todd Poynor46660b02012-11-28 17:56:09 -0800318 trace_cpufreq_interactive_notyet(
319 data, cpu_load, pcpu->target_freq,
320 pcpu->policy->cur, new_freq);
Mike Chan9d49b702010-06-22 11:26:45 -0700321 goto rearm;
Todd Poynora1e19512012-02-16 16:27:59 -0800322 }
Mike Chan9d49b702010-06-22 11:26:45 -0700323 }
324
Todd Poynoraad27322012-04-26 21:41:40 -0700325 pcpu->floor_freq = new_freq;
Todd Poynor7aa95c82012-11-05 13:09:03 -0800326 pcpu->floor_validate_time = now;
Todd Poynor0a92d482012-04-06 19:59:36 -0700327
328 if (pcpu->target_freq == new_freq) {
Todd Poynor46660b02012-11-28 17:56:09 -0800329 trace_cpufreq_interactive_already(
330 data, cpu_load, pcpu->target_freq,
331 pcpu->policy->cur, new_freq);
Todd Poynor0a92d482012-04-06 19:59:36 -0700332 goto rearm_if_notmax;
333 }
334
Todd Poynora1e19512012-02-16 16:27:59 -0800335 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
Todd Poynor46660b02012-11-28 17:56:09 -0800336 pcpu->policy->cur, new_freq);
Todd Poynorbc699d82012-04-20 13:18:32 -0700337 pcpu->target_set_time_in_idle = now_idle;
Todd Poynor7aa95c82012-11-05 13:09:03 -0800338 pcpu->target_set_time = now;
Todd Poynora1e19512012-02-16 16:27:59 -0800339
Todd Poynor8a37bb72012-07-16 17:07:15 -0700340 pcpu->target_freq = new_freq;
341 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
342 cpumask_set_cpu(data, &speedchange_cpumask);
343 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
344 wake_up_process(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -0700345
346rearm_if_notmax:
347 /*
348 * Already set max speed and don't see a need to change that,
349 * wait until next idle to re-evaluate, don't need timer.
350 */
351 if (pcpu->target_freq == pcpu->policy->max)
352 goto exit;
353
354rearm:
355 if (!timer_pending(&pcpu->cpu_timer)) {
356 /*
Lianwei Wangba6c6bb2012-11-01 09:59:52 +0800357 * If governing speed in idle and already at min, cancel the
358 * timer if that CPU goes idle. We don't need to re-evaluate
359 * speed until the next idle exit.
Mike Chan9d49b702010-06-22 11:26:45 -0700360 */
Lianwei Wangba6c6bb2012-11-01 09:59:52 +0800361 if (governidle && pcpu->target_freq == pcpu->policy->min)
Mike Chan9d49b702010-06-22 11:26:45 -0700362 pcpu->timer_idlecancel = 1;
Mike Chan9d49b702010-06-22 11:26:45 -0700363
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700364 cpufreq_interactive_timer_resched(pcpu);
Mike Chan9d49b702010-06-22 11:26:45 -0700365 }
366
367exit:
368 return;
369}
370
371static void cpufreq_interactive_idle_start(void)
372{
373 struct cpufreq_interactive_cpuinfo *pcpu =
374 &per_cpu(cpuinfo, smp_processor_id());
375 int pending;
376
377 if (!pcpu->governor_enabled)
378 return;
379
Mike Chan9d49b702010-06-22 11:26:45 -0700380 pending = timer_pending(&pcpu->cpu_timer);
381
382 if (pcpu->target_freq != pcpu->policy->min) {
Mike Chan9d49b702010-06-22 11:26:45 -0700383 /*
384 * Entering idle while not at lowest speed. On some
385 * platforms this can hold the other CPU(s) at that speed
386 * even though the CPU is idle. Set a timer to re-evaluate
387 * speed so this idle CPU doesn't hold the other CPUs above
388 * min indefinitely. This should probably be a quirk of
389 * the CPUFreq driver.
390 */
391 if (!pending) {
Mike Chan9d49b702010-06-22 11:26:45 -0700392 pcpu->timer_idlecancel = 0;
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700393 cpufreq_interactive_timer_resched(pcpu);
Mike Chan9d49b702010-06-22 11:26:45 -0700394 }
Lianwei Wangba6c6bb2012-11-01 09:59:52 +0800395 } else if (governidle) {
Mike Chan9d49b702010-06-22 11:26:45 -0700396 /*
397 * If at min speed and entering idle after load has
398 * already been evaluated, and a timer has been set just in
399 * case the CPU suddenly goes busy, cancel that timer. The
400 * CPU didn't go busy; we'll recheck things upon idle exit.
401 */
402 if (pending && pcpu->timer_idlecancel) {
403 del_timer(&pcpu->cpu_timer);
Mike Chan9d49b702010-06-22 11:26:45 -0700404 pcpu->timer_idlecancel = 0;
405 }
406 }
407
408}
409
410static void cpufreq_interactive_idle_end(void)
411{
412 struct cpufreq_interactive_cpuinfo *pcpu =
413 &per_cpu(cpuinfo, smp_processor_id());
414
Sam Lefflerae0d23f2012-06-27 10:12:04 -0700415 if (!pcpu->governor_enabled)
416 return;
417
Todd Poynor7aa95c82012-11-05 13:09:03 -0800418 /* Arm the timer for 1-2 ticks later if not already. */
419 if (!timer_pending(&pcpu->cpu_timer)) {
Mike Chan9d49b702010-06-22 11:26:45 -0700420 pcpu->timer_idlecancel = 0;
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700421 cpufreq_interactive_timer_resched(pcpu);
422 } else if (!governidle &&
423 time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
424 del_timer(&pcpu->cpu_timer);
425 cpufreq_interactive_timer(smp_processor_id());
Mike Chan9d49b702010-06-22 11:26:45 -0700426 }
Mike Chan9d49b702010-06-22 11:26:45 -0700427}
428
Todd Poynor8a37bb72012-07-16 17:07:15 -0700429static int cpufreq_interactive_speedchange_task(void *data)
Mike Chan9d49b702010-06-22 11:26:45 -0700430{
431 unsigned int cpu;
432 cpumask_t tmp_mask;
433 unsigned long flags;
434 struct cpufreq_interactive_cpuinfo *pcpu;
435
436 while (1) {
437 set_current_state(TASK_INTERRUPTIBLE);
Todd Poynor8a37bb72012-07-16 17:07:15 -0700438 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700439
Todd Poynor8a37bb72012-07-16 17:07:15 -0700440 if (cpumask_empty(&speedchange_cpumask)) {
441 spin_unlock_irqrestore(&speedchange_cpumask_lock,
442 flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700443 schedule();
444
445 if (kthread_should_stop())
446 break;
447
Todd Poynor8a37bb72012-07-16 17:07:15 -0700448 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700449 }
450
451 set_current_state(TASK_RUNNING);
Todd Poynor8a37bb72012-07-16 17:07:15 -0700452 tmp_mask = speedchange_cpumask;
453 cpumask_clear(&speedchange_cpumask);
454 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700455
456 for_each_cpu(cpu, &tmp_mask) {
457 unsigned int j;
458 unsigned int max_freq = 0;
459
460 pcpu = &per_cpu(cpuinfo, cpu);
461 smp_rmb();
462
463 if (!pcpu->governor_enabled)
464 continue;
465
Mike Chan9d49b702010-06-22 11:26:45 -0700466 for_each_cpu(j, pcpu->policy->cpus) {
467 struct cpufreq_interactive_cpuinfo *pjcpu =
468 &per_cpu(cpuinfo, j);
469
470 if (pjcpu->target_freq > max_freq)
471 max_freq = pjcpu->target_freq;
472 }
473
474 if (max_freq != pcpu->policy->cur)
475 __cpufreq_driver_target(pcpu->policy,
476 max_freq,
477 CPUFREQ_RELATION_H);
Todd Poynor8a37bb72012-07-16 17:07:15 -0700478 trace_cpufreq_interactive_setspeed(cpu,
479 pcpu->target_freq,
Todd Poynora1e19512012-02-16 16:27:59 -0800480 pcpu->policy->cur);
Mike Chan9d49b702010-06-22 11:26:45 -0700481 }
482 }
483
484 return 0;
485}
486
Todd Poynor7820a652012-04-02 17:17:14 -0700487static void cpufreq_interactive_boost(void)
488{
489 int i;
490 int anyboost = 0;
491 unsigned long flags;
492 struct cpufreq_interactive_cpuinfo *pcpu;
493
Todd Poynor8a37bb72012-07-16 17:07:15 -0700494 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Todd Poynor7820a652012-04-02 17:17:14 -0700495
496 for_each_online_cpu(i) {
497 pcpu = &per_cpu(cpuinfo, i);
498
499 if (pcpu->target_freq < hispeed_freq) {
500 pcpu->target_freq = hispeed_freq;
Todd Poynor8a37bb72012-07-16 17:07:15 -0700501 cpumask_set_cpu(i, &speedchange_cpumask);
Todd Poynor7820a652012-04-02 17:17:14 -0700502 pcpu->target_set_time_in_idle =
503 get_cpu_idle_time_us(i, &pcpu->target_set_time);
Todd Poynor5a5aa702012-05-10 23:28:06 -0700504 pcpu->hispeed_validate_time = pcpu->target_set_time;
Todd Poynor7820a652012-04-02 17:17:14 -0700505 anyboost = 1;
506 }
507
508 /*
Todd Poynoraad27322012-04-26 21:41:40 -0700509 * Set floor freq and (re)start timer for when last
510 * validated.
Todd Poynor7820a652012-04-02 17:17:14 -0700511 */
512
Todd Poynoraad27322012-04-26 21:41:40 -0700513 pcpu->floor_freq = hispeed_freq;
514 pcpu->floor_validate_time = ktime_to_us(ktime_get());
Todd Poynor7820a652012-04-02 17:17:14 -0700515 }
516
Todd Poynor8a37bb72012-07-16 17:07:15 -0700517 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Todd Poynor7820a652012-04-02 17:17:14 -0700518
519 if (anyboost)
Todd Poynor8a37bb72012-07-16 17:07:15 -0700520 wake_up_process(speedchange_task);
Todd Poynor7820a652012-04-02 17:17:14 -0700521}
522
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800523static ssize_t show_target_loads(
Todd Poynorbc51d672012-11-28 17:58:17 -0800524 struct kobject *kobj, struct attribute *attr, char *buf)
525{
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800526 int i;
527 ssize_t ret = 0;
528
529 spin_lock(&target_loads_lock);
530
531 for (i = 0; i < ntarget_loads; i++)
532 ret += sprintf(buf + ret, "%u%s", target_loads[i],
533 i & 0x1 ? ":" : " ");
534
535 ret += sprintf(buf + ret, "\n");
536 spin_unlock(&target_loads_lock);
537 return ret;
Todd Poynorbc51d672012-11-28 17:58:17 -0800538}
539
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800540static ssize_t store_target_loads(
Todd Poynorbc51d672012-11-28 17:58:17 -0800541 struct kobject *kobj, struct attribute *attr, const char *buf,
542 size_t count)
543{
544 int ret;
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800545 const char *cp;
546 unsigned int *new_target_loads = NULL;
547 int ntokens = 1;
548 int i;
Todd Poynorbc51d672012-11-28 17:58:17 -0800549
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800550 cp = buf;
551 while ((cp = strpbrk(cp + 1, " :")))
552 ntokens++;
553
554 if (!(ntokens & 0x1))
555 goto err_inval;
556
557 new_target_loads = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
558 if (!new_target_loads) {
559 ret = -ENOMEM;
560 goto err;
561 }
562
563 cp = buf;
564 i = 0;
565 while (i < ntokens) {
566 if (sscanf(cp, "%u", &new_target_loads[i++]) != 1)
567 goto err_inval;
568
569 cp = strpbrk(cp, " :");
570 if (!cp)
571 break;
572 cp++;
573 }
574
575 if (i != ntokens)
576 goto err_inval;
577
578 spin_lock(&target_loads_lock);
579 if (target_loads != default_target_loads)
580 kfree(target_loads);
581 target_loads = new_target_loads;
582 ntarget_loads = ntokens;
583 spin_unlock(&target_loads_lock);
Todd Poynorbc51d672012-11-28 17:58:17 -0800584 return count;
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800585
586err_inval:
587 ret = -EINVAL;
588err:
589 kfree(new_target_loads);
590 return ret;
Todd Poynorbc51d672012-11-28 17:58:17 -0800591}
592
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800593static struct global_attr target_loads_attr =
594 __ATTR(target_loads, S_IRUGO | S_IWUSR,
595 show_target_loads, store_target_loads);
Todd Poynorbc51d672012-11-28 17:58:17 -0800596
Mike Chan9d49b702010-06-22 11:26:45 -0700597static ssize_t show_hispeed_freq(struct kobject *kobj,
598 struct attribute *attr, char *buf)
599{
Todd Poynoracfaec92012-10-03 00:39:56 -0700600 return sprintf(buf, "%u\n", hispeed_freq);
Mike Chan9d49b702010-06-22 11:26:45 -0700601}
602
603static ssize_t store_hispeed_freq(struct kobject *kobj,
604 struct attribute *attr, const char *buf,
605 size_t count)
606{
607 int ret;
Todd Poynoracfaec92012-10-03 00:39:56 -0700608 long unsigned int val;
Mike Chan9d49b702010-06-22 11:26:45 -0700609
Todd Poynoracfaec92012-10-03 00:39:56 -0700610 ret = strict_strtoul(buf, 0, &val);
Mike Chan9d49b702010-06-22 11:26:45 -0700611 if (ret < 0)
612 return ret;
613 hispeed_freq = val;
614 return count;
615}
616
617static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
618 show_hispeed_freq, store_hispeed_freq);
619
620
621static ssize_t show_go_hispeed_load(struct kobject *kobj,
622 struct attribute *attr, char *buf)
623{
624 return sprintf(buf, "%lu\n", go_hispeed_load);
625}
626
627static ssize_t store_go_hispeed_load(struct kobject *kobj,
628 struct attribute *attr, const char *buf, size_t count)
629{
630 int ret;
631 unsigned long val;
632
633 ret = strict_strtoul(buf, 0, &val);
634 if (ret < 0)
635 return ret;
636 go_hispeed_load = val;
637 return count;
638}
639
640static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
641 show_go_hispeed_load, store_go_hispeed_load);
642
643static ssize_t show_min_sample_time(struct kobject *kobj,
644 struct attribute *attr, char *buf)
645{
646 return sprintf(buf, "%lu\n", min_sample_time);
647}
648
649static ssize_t store_min_sample_time(struct kobject *kobj,
650 struct attribute *attr, const char *buf, size_t count)
651{
652 int ret;
653 unsigned long val;
654
655 ret = strict_strtoul(buf, 0, &val);
656 if (ret < 0)
657 return ret;
658 min_sample_time = val;
659 return count;
660}
661
662static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
663 show_min_sample_time, store_min_sample_time);
664
Todd Poynor596cf1f2012-04-13 20:18:02 -0700665static ssize_t show_above_hispeed_delay(struct kobject *kobj,
666 struct attribute *attr, char *buf)
667{
668 return sprintf(buf, "%lu\n", above_hispeed_delay_val);
669}
670
671static ssize_t store_above_hispeed_delay(struct kobject *kobj,
672 struct attribute *attr,
673 const char *buf, size_t count)
674{
675 int ret;
676 unsigned long val;
677
678 ret = strict_strtoul(buf, 0, &val);
679 if (ret < 0)
680 return ret;
681 above_hispeed_delay_val = val;
682 return count;
683}
684
685define_one_global_rw(above_hispeed_delay);
686
Mike Chan9d49b702010-06-22 11:26:45 -0700687static ssize_t show_timer_rate(struct kobject *kobj,
688 struct attribute *attr, char *buf)
689{
690 return sprintf(buf, "%lu\n", timer_rate);
691}
692
693static ssize_t store_timer_rate(struct kobject *kobj,
694 struct attribute *attr, const char *buf, size_t count)
695{
696 int ret;
697 unsigned long val;
698
699 ret = strict_strtoul(buf, 0, &val);
700 if (ret < 0)
701 return ret;
702 timer_rate = val;
703 return count;
704}
705
706static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
707 show_timer_rate, store_timer_rate);
708
Todd Poynor9fb15312012-04-23 20:42:41 -0700709static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
710 char *buf)
711{
712 return sprintf(buf, "%d\n", boost_val);
713}
714
715static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
716 const char *buf, size_t count)
717{
718 int ret;
719 unsigned long val;
720
721 ret = kstrtoul(buf, 0, &val);
722 if (ret < 0)
723 return ret;
724
725 boost_val = val;
726
Todd Poynor2e739a02012-05-03 00:16:55 -0700727 if (boost_val) {
728 trace_cpufreq_interactive_boost("on");
Todd Poynor9fb15312012-04-23 20:42:41 -0700729 cpufreq_interactive_boost();
Todd Poynor2e739a02012-05-03 00:16:55 -0700730 } else {
731 trace_cpufreq_interactive_unboost("off");
732 }
Todd Poynor9fb15312012-04-23 20:42:41 -0700733
734 return count;
735}
736
737define_one_global_rw(boost);
738
Todd Poynor2e739a02012-05-03 00:16:55 -0700739static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
740 const char *buf, size_t count)
741{
742 int ret;
743 unsigned long val;
744
745 ret = kstrtoul(buf, 0, &val);
746 if (ret < 0)
747 return ret;
748
749 trace_cpufreq_interactive_boost("pulse");
750 cpufreq_interactive_boost();
751 return count;
752}
753
754static struct global_attr boostpulse =
755 __ATTR(boostpulse, 0200, NULL, store_boostpulse);
756
Mike Chan9d49b702010-06-22 11:26:45 -0700757static struct attribute *interactive_attributes[] = {
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800758 &target_loads_attr.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700759 &hispeed_freq_attr.attr,
760 &go_hispeed_load_attr.attr,
Todd Poynor596cf1f2012-04-13 20:18:02 -0700761 &above_hispeed_delay.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700762 &min_sample_time_attr.attr,
763 &timer_rate_attr.attr,
Todd Poynor9fb15312012-04-23 20:42:41 -0700764 &boost.attr,
Todd Poynor2e739a02012-05-03 00:16:55 -0700765 &boostpulse.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700766 NULL,
767};
768
769static struct attribute_group interactive_attr_group = {
770 .attrs = interactive_attributes,
771 .name = "interactive",
772};
773
Sam Lefflerae0d23f2012-06-27 10:12:04 -0700774static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
775 unsigned long val,
776 void *data)
777{
778 switch (val) {
779 case IDLE_START:
780 cpufreq_interactive_idle_start();
781 break;
782 case IDLE_END:
783 cpufreq_interactive_idle_end();
784 break;
785 }
786
787 return 0;
788}
789
790static struct notifier_block cpufreq_interactive_idle_nb = {
791 .notifier_call = cpufreq_interactive_idle_notifier,
792};
793
Mike Chan9d49b702010-06-22 11:26:45 -0700794static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
795 unsigned int event)
796{
797 int rc;
798 unsigned int j;
799 struct cpufreq_interactive_cpuinfo *pcpu;
800 struct cpufreq_frequency_table *freq_table;
801
802 switch (event) {
803 case CPUFREQ_GOV_START:
804 if (!cpu_online(policy->cpu))
805 return -EINVAL;
806
807 freq_table =
808 cpufreq_frequency_get_table(policy->cpu);
Todd Poynor7aa95c82012-11-05 13:09:03 -0800809 if (!hispeed_freq)
810 hispeed_freq = policy->max;
Mike Chan9d49b702010-06-22 11:26:45 -0700811
812 for_each_cpu(j, policy->cpus) {
813 pcpu = &per_cpu(cpuinfo, j);
814 pcpu->policy = policy;
815 pcpu->target_freq = policy->cur;
816 pcpu->freq_table = freq_table;
Todd Poynor0a92d482012-04-06 19:59:36 -0700817 pcpu->target_set_time_in_idle =
Mike Chan9d49b702010-06-22 11:26:45 -0700818 get_cpu_idle_time_us(j,
Todd Poynor0a92d482012-04-06 19:59:36 -0700819 &pcpu->target_set_time);
Todd Poynoraad27322012-04-26 21:41:40 -0700820 pcpu->floor_freq = pcpu->target_freq;
821 pcpu->floor_validate_time =
Todd Poynorbc699d82012-04-20 13:18:32 -0700822 pcpu->target_set_time;
Todd Poynor5a5aa702012-05-10 23:28:06 -0700823 pcpu->hispeed_validate_time =
824 pcpu->target_set_time;
Mike Chan9d49b702010-06-22 11:26:45 -0700825 pcpu->governor_enabled = 1;
826 smp_wmb();
Todd Poynor7aa95c82012-11-05 13:09:03 -0800827 pcpu->cpu_timer.expires =
828 jiffies + usecs_to_jiffies(timer_rate);
829 add_timer_on(&pcpu->cpu_timer, j);
Mike Chan9d49b702010-06-22 11:26:45 -0700830 }
831
Mike Chan9d49b702010-06-22 11:26:45 -0700832 /*
833 * Do not register the idle hook and create sysfs
834 * entries if we have already done so.
835 */
836 if (atomic_inc_return(&active_count) > 1)
837 return 0;
838
839 rc = sysfs_create_group(cpufreq_global_kobject,
840 &interactive_attr_group);
841 if (rc)
842 return rc;
843
Sam Lefflerae0d23f2012-06-27 10:12:04 -0700844 idle_notifier_register(&cpufreq_interactive_idle_nb);
Mike Chan9d49b702010-06-22 11:26:45 -0700845 break;
846
847 case CPUFREQ_GOV_STOP:
848 for_each_cpu(j, policy->cpus) {
849 pcpu = &per_cpu(cpuinfo, j);
850 pcpu->governor_enabled = 0;
851 smp_wmb();
852 del_timer_sync(&pcpu->cpu_timer);
Mike Chan9d49b702010-06-22 11:26:45 -0700853 }
854
Mike Chan9d49b702010-06-22 11:26:45 -0700855 if (atomic_dec_return(&active_count) > 0)
856 return 0;
857
Sam Lefflerae0d23f2012-06-27 10:12:04 -0700858 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
Mike Chan9d49b702010-06-22 11:26:45 -0700859 sysfs_remove_group(cpufreq_global_kobject,
860 &interactive_attr_group);
861
862 break;
863
864 case CPUFREQ_GOV_LIMITS:
865 if (policy->max < policy->cur)
866 __cpufreq_driver_target(policy,
867 policy->max, CPUFREQ_RELATION_H);
868 else if (policy->min > policy->cur)
869 __cpufreq_driver_target(policy,
870 policy->min, CPUFREQ_RELATION_L);
871 break;
872 }
873 return 0;
874}
875
Mike Chan9d49b702010-06-22 11:26:45 -0700876static int __init cpufreq_interactive_init(void)
877{
878 unsigned int i;
879 struct cpufreq_interactive_cpuinfo *pcpu;
880 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
881
882 go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
883 min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
Todd Poynor596cf1f2012-04-13 20:18:02 -0700884 above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
Mike Chan9d49b702010-06-22 11:26:45 -0700885 timer_rate = DEFAULT_TIMER_RATE;
886
887 /* Initalize per-cpu timers */
888 for_each_possible_cpu(i) {
889 pcpu = &per_cpu(cpuinfo, i);
Lianwei Wangba6c6bb2012-11-01 09:59:52 +0800890 if (governidle)
891 init_timer(&pcpu->cpu_timer);
892 else
893 init_timer_deferrable(&pcpu->cpu_timer);
Mike Chan9d49b702010-06-22 11:26:45 -0700894 pcpu->cpu_timer.function = cpufreq_interactive_timer;
895 pcpu->cpu_timer.data = i;
896 }
897
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800898 spin_lock_init(&target_loads_lock);
Todd Poynor8a37bb72012-07-16 17:07:15 -0700899 spin_lock_init(&speedchange_cpumask_lock);
900 speedchange_task =
901 kthread_create(cpufreq_interactive_speedchange_task, NULL,
902 "cfinteractive");
903 if (IS_ERR(speedchange_task))
904 return PTR_ERR(speedchange_task);
Sam Leffler9f1dcd62012-06-27 12:55:56 -0700905
Todd Poynor8a37bb72012-07-16 17:07:15 -0700906 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
907 get_task_struct(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -0700908
Sam Leffler9f1dcd62012-06-27 12:55:56 -0700909 /* NB: wake up so the thread does not look hung to the freezer */
Todd Poynor8a37bb72012-07-16 17:07:15 -0700910 wake_up_process(speedchange_task);
Sam Leffler9f1dcd62012-06-27 12:55:56 -0700911
Mike Chan9d49b702010-06-22 11:26:45 -0700912 return cpufreq_register_governor(&cpufreq_gov_interactive);
Mike Chan9d49b702010-06-22 11:26:45 -0700913}
914
915#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
916fs_initcall(cpufreq_interactive_init);
917#else
918module_init(cpufreq_interactive_init);
919#endif
920
921static void __exit cpufreq_interactive_exit(void)
922{
923 cpufreq_unregister_governor(&cpufreq_gov_interactive);
Todd Poynor8a37bb72012-07-16 17:07:15 -0700924 kthread_stop(speedchange_task);
925 put_task_struct(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -0700926}
927
928module_exit(cpufreq_interactive_exit);
929
930MODULE_AUTHOR("Mike Chan <mike@android.com>");
931MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
932 "Latency sensitive workloads");
933MODULE_LICENSE("GPL");