blob: 604f06af6dc2a5a4377abaeee4dbfcc9ce1386e8 [file] [log] [blame]
Mike Chan9d49b702010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/module.h>
Lianwei Wangba6c6bb2012-11-01 09:59:52 +080023#include <linux/moduleparam.h>
Mike Chan9d49b702010-06-22 11:26:45 -070024#include <linux/mutex.h>
25#include <linux/sched.h>
26#include <linux/tick.h>
27#include <linux/time.h>
28#include <linux/timer.h>
29#include <linux/workqueue.h>
30#include <linux/kthread.h>
31#include <linux/mutex.h>
Todd Poynor7820a652012-04-02 17:17:14 -070032#include <linux/slab.h>
Todd Poynor9fb15312012-04-23 20:42:41 -070033#include <asm/cputime.h>
Mike Chan9d49b702010-06-22 11:26:45 -070034
Todd Poynora1e19512012-02-16 16:27:59 -080035#define CREATE_TRACE_POINTS
36#include <trace/events/cpufreq_interactive.h>
37
Mike Chan9d49b702010-06-22 11:26:45 -070038static atomic_t active_count = ATOMIC_INIT(0);
39
40struct cpufreq_interactive_cpuinfo {
41 struct timer_list cpu_timer;
42 int timer_idlecancel;
Todd Poynor5a75e9d2012-12-11 16:05:03 -080043 spinlock_t load_lock; /* protects the next 4 fields */
Mike Chan9d49b702010-06-22 11:26:45 -070044 u64 time_in_idle;
Todd Poynor22b5c3a2012-10-08 20:14:34 -070045 u64 time_in_idle_timestamp;
Todd Poynor5a75e9d2012-12-11 16:05:03 -080046 u64 cputime_speedadj;
47 u64 cputime_speedadj_timestamp;
Mike Chan9d49b702010-06-22 11:26:45 -070048 struct cpufreq_policy *policy;
49 struct cpufreq_frequency_table *freq_table;
50 unsigned int target_freq;
Todd Poynoraad27322012-04-26 21:41:40 -070051 unsigned int floor_freq;
52 u64 floor_validate_time;
Todd Poynor5a5aa702012-05-10 23:28:06 -070053 u64 hispeed_validate_time;
Mike Chan9d49b702010-06-22 11:26:45 -070054 int governor_enabled;
55};
56
57static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
58
Todd Poynor8a37bb72012-07-16 17:07:15 -070059/* realtime thread handles frequency scaling */
60static struct task_struct *speedchange_task;
61static cpumask_t speedchange_cpumask;
62static spinlock_t speedchange_cpumask_lock;
Mike Chan9d49b702010-06-22 11:26:45 -070063
64/* Hi speed to bump to from lo speed when load burst (default max) */
Todd Poynoracfaec92012-10-03 00:39:56 -070065static unsigned int hispeed_freq;
Mike Chan9d49b702010-06-22 11:26:45 -070066
67/* Go to hi speed when CPU load at or above this value. */
Todd Poynora0ec4362012-04-17 17:39:34 -070068#define DEFAULT_GO_HISPEED_LOAD 85
Mike Chan9d49b702010-06-22 11:26:45 -070069static unsigned long go_hispeed_load;
70
Todd Poynorbc51d672012-11-28 17:58:17 -080071/* Target load. Lower values result in higher CPU speeds. */
72#define DEFAULT_TARGET_LOAD 90
Todd Poynor2fbf5e12012-11-14 11:41:21 -080073static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
74static spinlock_t target_loads_lock;
75static unsigned int *target_loads = default_target_loads;
76static int ntarget_loads = ARRAY_SIZE(default_target_loads);
Todd Poynorbc51d672012-11-28 17:58:17 -080077
Mike Chan9d49b702010-06-22 11:26:45 -070078/*
79 * The minimum amount of time to spend at a frequency before we can ramp down.
80 */
Todd Poynora0ec4362012-04-17 17:39:34 -070081#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
Mike Chan9d49b702010-06-22 11:26:45 -070082static unsigned long min_sample_time;
83
84/*
85 * The sample rate of the timer used to increase frequency
86 */
Todd Poynora0ec4362012-04-17 17:39:34 -070087#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
Mike Chan9d49b702010-06-22 11:26:45 -070088static unsigned long timer_rate;
89
Todd Poynor596cf1f2012-04-13 20:18:02 -070090/*
91 * Wait this long before raising speed above hispeed, by default a single
92 * timer interval.
93 */
94#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
95static unsigned long above_hispeed_delay_val;
96
Todd Poynorf437e182012-12-14 17:31:19 -080097/* Non-zero means indefinite speed boost active */
Todd Poynor9fb15312012-04-23 20:42:41 -070098static int boost_val;
Todd Poynorf437e182012-12-14 17:31:19 -080099/* Duration of a boot pulse in usecs */
100static int boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
101/* End time of boost pulse in ktime converted to usecs */
102static u64 boostpulse_endtime;
Todd Poynor9fb15312012-04-23 20:42:41 -0700103
Lianwei Wangba6c6bb2012-11-01 09:59:52 +0800104static bool governidle;
105module_param(governidle, bool, S_IWUSR | S_IRUGO);
106MODULE_PARM_DESC(governidle,
107 "Set to 1 to wake up CPUs from idle to reduce speed (default 0)");
108
Mike Chan9d49b702010-06-22 11:26:45 -0700109static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
110 unsigned int event);
111
112#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
113static
114#endif
115struct cpufreq_governor cpufreq_gov_interactive = {
116 .name = "interactive",
117 .governor = cpufreq_governor_interactive,
118 .max_transition_latency = 10000000,
119 .owner = THIS_MODULE,
120};
121
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700122static void cpufreq_interactive_timer_resched(
123 struct cpufreq_interactive_cpuinfo *pcpu)
124{
125 mod_timer_pinned(&pcpu->cpu_timer,
126 jiffies + usecs_to_jiffies(timer_rate));
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800127 spin_lock(&pcpu->load_lock);
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700128 pcpu->time_in_idle =
129 get_cpu_idle_time_us(smp_processor_id(),
130 &pcpu->time_in_idle_timestamp);
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800131 pcpu->cputime_speedadj = 0;
132 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
133 spin_unlock(&pcpu->load_lock);
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700134}
135
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800136static unsigned int freq_to_targetload(unsigned int freq)
137{
138 int i;
139 unsigned int ret;
140
141 spin_lock(&target_loads_lock);
142
143 for (i = 0; i < ntarget_loads - 1 && freq >= target_loads[i+1]; i += 2)
144 ;
145
146 ret = target_loads[i];
147 spin_unlock(&target_loads_lock);
148 return ret;
149}
150
151/*
152 * If increasing frequencies never map to a lower target load then
153 * choose_freq() will find the minimum frequency that does not exceed its
154 * target load given the current load.
155 */
156
157static unsigned int choose_freq(
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800158 struct cpufreq_interactive_cpuinfo *pcpu, unsigned int loadadjfreq)
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800159{
160 unsigned int freq = pcpu->policy->cur;
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800161 unsigned int prevfreq, freqmin, freqmax;
162 unsigned int tl;
163 int index;
164
165 freqmin = 0;
166 freqmax = UINT_MAX;
167
168 do {
169 prevfreq = freq;
170 tl = freq_to_targetload(freq);
171
172 /*
173 * Find the lowest frequency where the computed load is less
174 * than or equal to the target load.
175 */
176
177 cpufreq_frequency_table_target(
178 pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
179 CPUFREQ_RELATION_L, &index);
180 freq = pcpu->freq_table[index].frequency;
181
182 if (freq > prevfreq) {
183 /* The previous frequency is too low. */
184 freqmin = prevfreq;
185
186 if (freq >= freqmax) {
187 /*
188 * Find the highest frequency that is less
189 * than freqmax.
190 */
191 cpufreq_frequency_table_target(
192 pcpu->policy, pcpu->freq_table,
193 freqmax - 1, CPUFREQ_RELATION_H,
194 &index);
195 freq = pcpu->freq_table[index].frequency;
196
197 if (freq == freqmin) {
198 /*
199 * The first frequency below freqmax
200 * has already been found to be too
201 * low. freqmax is the lowest speed
202 * we found that is fast enough.
203 */
204 freq = freqmax;
205 break;
206 }
207 }
208 } else if (freq < prevfreq) {
209 /* The previous frequency is high enough. */
210 freqmax = prevfreq;
211
212 if (freq <= freqmin) {
213 /*
214 * Find the lowest frequency that is higher
215 * than freqmin.
216 */
217 cpufreq_frequency_table_target(
218 pcpu->policy, pcpu->freq_table,
219 freqmin + 1, CPUFREQ_RELATION_L,
220 &index);
221 freq = pcpu->freq_table[index].frequency;
222
223 /*
224 * If freqmax is the first frequency above
225 * freqmin then we have already found that
226 * this speed is fast enough.
227 */
228 if (freq == freqmax)
229 break;
230 }
231 }
232
233 /* If same frequency chosen as previous then done. */
234 } while (freq != prevfreq);
235
236 return freq;
237}
238
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800239static u64 update_load(int cpu)
240{
241 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
242 u64 now;
243 u64 now_idle;
244 unsigned int delta_idle;
245 unsigned int delta_time;
246 u64 active_time;
247
248 now_idle = get_cpu_idle_time_us(cpu, &now);
249 delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
250 delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
251 active_time = delta_time - delta_idle;
252 pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
253
254 pcpu->time_in_idle = now_idle;
255 pcpu->time_in_idle_timestamp = now;
256 return now;
257}
258
Mike Chan9d49b702010-06-22 11:26:45 -0700259static void cpufreq_interactive_timer(unsigned long data)
260{
Todd Poynor7aa95c82012-11-05 13:09:03 -0800261 u64 now;
Mike Chan9d49b702010-06-22 11:26:45 -0700262 unsigned int delta_time;
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800263 u64 cputime_speedadj;
Mike Chan9d49b702010-06-22 11:26:45 -0700264 int cpu_load;
Mike Chan9d49b702010-06-22 11:26:45 -0700265 struct cpufreq_interactive_cpuinfo *pcpu =
266 &per_cpu(cpuinfo, data);
Mike Chan9d49b702010-06-22 11:26:45 -0700267 unsigned int new_freq;
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800268 unsigned int loadadjfreq;
Mike Chan9d49b702010-06-22 11:26:45 -0700269 unsigned int index;
270 unsigned long flags;
Todd Poynorf437e182012-12-14 17:31:19 -0800271 bool boosted;
Mike Chan9d49b702010-06-22 11:26:45 -0700272
273 smp_rmb();
274
275 if (!pcpu->governor_enabled)
276 goto exit;
277
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800278 spin_lock(&pcpu->load_lock);
279 now = update_load(data);
280 delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
281 cputime_speedadj = pcpu->cputime_speedadj;
282 spin_unlock(&pcpu->load_lock);
Mike Chan9d49b702010-06-22 11:26:45 -0700283
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800284 if (WARN_ON_ONCE(!delta_time))
Mike Chan9d49b702010-06-22 11:26:45 -0700285 goto rearm;
286
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800287 do_div(cputime_speedadj, delta_time);
288 loadadjfreq = (unsigned int)cputime_speedadj * 100;
289 cpu_load = loadadjfreq / pcpu->target_freq;
Todd Poynorf437e182012-12-14 17:31:19 -0800290 boosted = boost_val || now < boostpulse_endtime;
Mike Chan9d49b702010-06-22 11:26:45 -0700291
Todd Poynorf437e182012-12-14 17:31:19 -0800292 if ((cpu_load >= go_hispeed_load || boosted) &&
Todd Poynorf96f2c82012-11-08 15:06:55 -0800293 pcpu->target_freq < hispeed_freq)
294 new_freq = hispeed_freq;
295 else
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800296 new_freq = choose_freq(pcpu, loadadjfreq);
Todd Poynorf96f2c82012-11-08 15:06:55 -0800297
298 if (pcpu->target_freq >= hispeed_freq &&
299 new_freq > pcpu->target_freq &&
300 now - pcpu->hispeed_validate_time < above_hispeed_delay_val) {
301 trace_cpufreq_interactive_notyet(
302 data, cpu_load, pcpu->target_freq,
303 pcpu->policy->cur, new_freq);
304 goto rearm;
Mike Chan9d49b702010-06-22 11:26:45 -0700305 }
306
Todd Poynorf96f2c82012-11-08 15:06:55 -0800307 pcpu->hispeed_validate_time = now;
Todd Poynor5a5aa702012-05-10 23:28:06 -0700308
Mike Chan9d49b702010-06-22 11:26:45 -0700309 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
Todd Poynorbc51d672012-11-28 17:58:17 -0800310 new_freq, CPUFREQ_RELATION_L,
Mike Chan9d49b702010-06-22 11:26:45 -0700311 &index)) {
312 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
313 (int) data);
314 goto rearm;
315 }
316
317 new_freq = pcpu->freq_table[index].frequency;
318
Mike Chan9d49b702010-06-22 11:26:45 -0700319 /*
Todd Poynoraad27322012-04-26 21:41:40 -0700320 * Do not scale below floor_freq unless we have been at or above the
321 * floor frequency for the minimum sample time since last validated.
Mike Chan9d49b702010-06-22 11:26:45 -0700322 */
Todd Poynoraad27322012-04-26 21:41:40 -0700323 if (new_freq < pcpu->floor_freq) {
Todd Poynor7aa95c82012-11-05 13:09:03 -0800324 if (now - pcpu->floor_validate_time < min_sample_time) {
Todd Poynor46660b02012-11-28 17:56:09 -0800325 trace_cpufreq_interactive_notyet(
326 data, cpu_load, pcpu->target_freq,
327 pcpu->policy->cur, new_freq);
Mike Chan9d49b702010-06-22 11:26:45 -0700328 goto rearm;
Todd Poynora1e19512012-02-16 16:27:59 -0800329 }
Mike Chan9d49b702010-06-22 11:26:45 -0700330 }
331
Todd Poynorf437e182012-12-14 17:31:19 -0800332 /*
333 * Update the timestamp for checking whether speed has been held at
334 * or above the selected frequency for a minimum of min_sample_time,
335 * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
336 * allow the speed to drop as soon as the boostpulse duration expires
337 * (or the indefinite boost is turned off).
338 */
339
340 if (!boosted || new_freq > hispeed_freq) {
341 pcpu->floor_freq = new_freq;
342 pcpu->floor_validate_time = now;
343 }
Todd Poynor0a92d482012-04-06 19:59:36 -0700344
345 if (pcpu->target_freq == new_freq) {
Todd Poynor46660b02012-11-28 17:56:09 -0800346 trace_cpufreq_interactive_already(
347 data, cpu_load, pcpu->target_freq,
348 pcpu->policy->cur, new_freq);
Todd Poynor0a92d482012-04-06 19:59:36 -0700349 goto rearm_if_notmax;
350 }
351
Todd Poynora1e19512012-02-16 16:27:59 -0800352 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
Todd Poynor46660b02012-11-28 17:56:09 -0800353 pcpu->policy->cur, new_freq);
Todd Poynora1e19512012-02-16 16:27:59 -0800354
Todd Poynor8a37bb72012-07-16 17:07:15 -0700355 pcpu->target_freq = new_freq;
356 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
357 cpumask_set_cpu(data, &speedchange_cpumask);
358 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
359 wake_up_process(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -0700360
361rearm_if_notmax:
362 /*
363 * Already set max speed and don't see a need to change that,
364 * wait until next idle to re-evaluate, don't need timer.
365 */
366 if (pcpu->target_freq == pcpu->policy->max)
367 goto exit;
368
369rearm:
370 if (!timer_pending(&pcpu->cpu_timer)) {
371 /*
Lianwei Wangba6c6bb2012-11-01 09:59:52 +0800372 * If governing speed in idle and already at min, cancel the
373 * timer if that CPU goes idle. We don't need to re-evaluate
374 * speed until the next idle exit.
Mike Chan9d49b702010-06-22 11:26:45 -0700375 */
Lianwei Wangba6c6bb2012-11-01 09:59:52 +0800376 if (governidle && pcpu->target_freq == pcpu->policy->min)
Mike Chan9d49b702010-06-22 11:26:45 -0700377 pcpu->timer_idlecancel = 1;
Mike Chan9d49b702010-06-22 11:26:45 -0700378
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700379 cpufreq_interactive_timer_resched(pcpu);
Mike Chan9d49b702010-06-22 11:26:45 -0700380 }
381
382exit:
383 return;
384}
385
386static void cpufreq_interactive_idle_start(void)
387{
388 struct cpufreq_interactive_cpuinfo *pcpu =
389 &per_cpu(cpuinfo, smp_processor_id());
390 int pending;
391
392 if (!pcpu->governor_enabled)
393 return;
394
Mike Chan9d49b702010-06-22 11:26:45 -0700395 pending = timer_pending(&pcpu->cpu_timer);
396
397 if (pcpu->target_freq != pcpu->policy->min) {
Mike Chan9d49b702010-06-22 11:26:45 -0700398 /*
399 * Entering idle while not at lowest speed. On some
400 * platforms this can hold the other CPU(s) at that speed
401 * even though the CPU is idle. Set a timer to re-evaluate
402 * speed so this idle CPU doesn't hold the other CPUs above
403 * min indefinitely. This should probably be a quirk of
404 * the CPUFreq driver.
405 */
406 if (!pending) {
Mike Chan9d49b702010-06-22 11:26:45 -0700407 pcpu->timer_idlecancel = 0;
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700408 cpufreq_interactive_timer_resched(pcpu);
Mike Chan9d49b702010-06-22 11:26:45 -0700409 }
Lianwei Wangba6c6bb2012-11-01 09:59:52 +0800410 } else if (governidle) {
Mike Chan9d49b702010-06-22 11:26:45 -0700411 /*
412 * If at min speed and entering idle after load has
413 * already been evaluated, and a timer has been set just in
414 * case the CPU suddenly goes busy, cancel that timer. The
415 * CPU didn't go busy; we'll recheck things upon idle exit.
416 */
417 if (pending && pcpu->timer_idlecancel) {
418 del_timer(&pcpu->cpu_timer);
Mike Chan9d49b702010-06-22 11:26:45 -0700419 pcpu->timer_idlecancel = 0;
420 }
421 }
422
423}
424
425static void cpufreq_interactive_idle_end(void)
426{
427 struct cpufreq_interactive_cpuinfo *pcpu =
428 &per_cpu(cpuinfo, smp_processor_id());
429
Sam Lefflerae0d23f2012-06-27 10:12:04 -0700430 if (!pcpu->governor_enabled)
431 return;
432
Todd Poynor7aa95c82012-11-05 13:09:03 -0800433 /* Arm the timer for 1-2 ticks later if not already. */
434 if (!timer_pending(&pcpu->cpu_timer)) {
Mike Chan9d49b702010-06-22 11:26:45 -0700435 pcpu->timer_idlecancel = 0;
Todd Poynor22b5c3a2012-10-08 20:14:34 -0700436 cpufreq_interactive_timer_resched(pcpu);
437 } else if (!governidle &&
438 time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
439 del_timer(&pcpu->cpu_timer);
440 cpufreq_interactive_timer(smp_processor_id());
Mike Chan9d49b702010-06-22 11:26:45 -0700441 }
Mike Chan9d49b702010-06-22 11:26:45 -0700442}
443
Todd Poynor8a37bb72012-07-16 17:07:15 -0700444static int cpufreq_interactive_speedchange_task(void *data)
Mike Chan9d49b702010-06-22 11:26:45 -0700445{
446 unsigned int cpu;
447 cpumask_t tmp_mask;
448 unsigned long flags;
449 struct cpufreq_interactive_cpuinfo *pcpu;
450
451 while (1) {
452 set_current_state(TASK_INTERRUPTIBLE);
Todd Poynor8a37bb72012-07-16 17:07:15 -0700453 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700454
Todd Poynor8a37bb72012-07-16 17:07:15 -0700455 if (cpumask_empty(&speedchange_cpumask)) {
456 spin_unlock_irqrestore(&speedchange_cpumask_lock,
457 flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700458 schedule();
459
460 if (kthread_should_stop())
461 break;
462
Todd Poynor8a37bb72012-07-16 17:07:15 -0700463 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700464 }
465
466 set_current_state(TASK_RUNNING);
Todd Poynor8a37bb72012-07-16 17:07:15 -0700467 tmp_mask = speedchange_cpumask;
468 cpumask_clear(&speedchange_cpumask);
469 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Mike Chan9d49b702010-06-22 11:26:45 -0700470
471 for_each_cpu(cpu, &tmp_mask) {
472 unsigned int j;
473 unsigned int max_freq = 0;
474
475 pcpu = &per_cpu(cpuinfo, cpu);
476 smp_rmb();
477
478 if (!pcpu->governor_enabled)
479 continue;
480
Mike Chan9d49b702010-06-22 11:26:45 -0700481 for_each_cpu(j, pcpu->policy->cpus) {
482 struct cpufreq_interactive_cpuinfo *pjcpu =
483 &per_cpu(cpuinfo, j);
484
485 if (pjcpu->target_freq > max_freq)
486 max_freq = pjcpu->target_freq;
487 }
488
489 if (max_freq != pcpu->policy->cur)
490 __cpufreq_driver_target(pcpu->policy,
491 max_freq,
492 CPUFREQ_RELATION_H);
Todd Poynor8a37bb72012-07-16 17:07:15 -0700493 trace_cpufreq_interactive_setspeed(cpu,
494 pcpu->target_freq,
Todd Poynora1e19512012-02-16 16:27:59 -0800495 pcpu->policy->cur);
Mike Chan9d49b702010-06-22 11:26:45 -0700496 }
497 }
498
499 return 0;
500}
501
Todd Poynor7820a652012-04-02 17:17:14 -0700502static void cpufreq_interactive_boost(void)
503{
504 int i;
505 int anyboost = 0;
506 unsigned long flags;
507 struct cpufreq_interactive_cpuinfo *pcpu;
508
Todd Poynor8a37bb72012-07-16 17:07:15 -0700509 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Todd Poynor7820a652012-04-02 17:17:14 -0700510
511 for_each_online_cpu(i) {
512 pcpu = &per_cpu(cpuinfo, i);
513
514 if (pcpu->target_freq < hispeed_freq) {
515 pcpu->target_freq = hispeed_freq;
Todd Poynor8a37bb72012-07-16 17:07:15 -0700516 cpumask_set_cpu(i, &speedchange_cpumask);
Todd Poynor3c081182012-12-07 20:08:45 -0800517 pcpu->hispeed_validate_time =
518 ktime_to_us(ktime_get());
Todd Poynor7820a652012-04-02 17:17:14 -0700519 anyboost = 1;
520 }
521
522 /*
Todd Poynoraad27322012-04-26 21:41:40 -0700523 * Set floor freq and (re)start timer for when last
524 * validated.
Todd Poynor7820a652012-04-02 17:17:14 -0700525 */
526
Todd Poynoraad27322012-04-26 21:41:40 -0700527 pcpu->floor_freq = hispeed_freq;
528 pcpu->floor_validate_time = ktime_to_us(ktime_get());
Todd Poynor7820a652012-04-02 17:17:14 -0700529 }
530
Todd Poynor8a37bb72012-07-16 17:07:15 -0700531 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Todd Poynor7820a652012-04-02 17:17:14 -0700532
533 if (anyboost)
Todd Poynor8a37bb72012-07-16 17:07:15 -0700534 wake_up_process(speedchange_task);
Todd Poynor7820a652012-04-02 17:17:14 -0700535}
536
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800537static int cpufreq_interactive_notifier(
538 struct notifier_block *nb, unsigned long val, void *data)
539{
540 struct cpufreq_freqs *freq = data;
541 struct cpufreq_interactive_cpuinfo *pcpu;
542 int cpu;
543
544 if (val == CPUFREQ_POSTCHANGE) {
545 pcpu = &per_cpu(cpuinfo, freq->cpu);
546
547 for_each_cpu(cpu, pcpu->policy->cpus) {
548 struct cpufreq_interactive_cpuinfo *pjcpu =
549 &per_cpu(cpuinfo, cpu);
550 spin_lock(&pjcpu->load_lock);
551 update_load(cpu);
552 spin_unlock(&pjcpu->load_lock);
553 }
554 }
555
556 return 0;
557}
558
559static struct notifier_block cpufreq_notifier_block = {
560 .notifier_call = cpufreq_interactive_notifier,
561};
562
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800563static ssize_t show_target_loads(
Todd Poynorbc51d672012-11-28 17:58:17 -0800564 struct kobject *kobj, struct attribute *attr, char *buf)
565{
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800566 int i;
567 ssize_t ret = 0;
568
569 spin_lock(&target_loads_lock);
570
571 for (i = 0; i < ntarget_loads; i++)
572 ret += sprintf(buf + ret, "%u%s", target_loads[i],
573 i & 0x1 ? ":" : " ");
574
575 ret += sprintf(buf + ret, "\n");
576 spin_unlock(&target_loads_lock);
577 return ret;
Todd Poynorbc51d672012-11-28 17:58:17 -0800578}
579
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800580static ssize_t store_target_loads(
Todd Poynorbc51d672012-11-28 17:58:17 -0800581 struct kobject *kobj, struct attribute *attr, const char *buf,
582 size_t count)
583{
584 int ret;
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800585 const char *cp;
586 unsigned int *new_target_loads = NULL;
587 int ntokens = 1;
588 int i;
Todd Poynorbc51d672012-11-28 17:58:17 -0800589
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800590 cp = buf;
591 while ((cp = strpbrk(cp + 1, " :")))
592 ntokens++;
593
594 if (!(ntokens & 0x1))
595 goto err_inval;
596
597 new_target_loads = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
598 if (!new_target_loads) {
599 ret = -ENOMEM;
600 goto err;
601 }
602
603 cp = buf;
604 i = 0;
605 while (i < ntokens) {
606 if (sscanf(cp, "%u", &new_target_loads[i++]) != 1)
607 goto err_inval;
608
609 cp = strpbrk(cp, " :");
610 if (!cp)
611 break;
612 cp++;
613 }
614
615 if (i != ntokens)
616 goto err_inval;
617
618 spin_lock(&target_loads_lock);
619 if (target_loads != default_target_loads)
620 kfree(target_loads);
621 target_loads = new_target_loads;
622 ntarget_loads = ntokens;
623 spin_unlock(&target_loads_lock);
Todd Poynorbc51d672012-11-28 17:58:17 -0800624 return count;
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800625
626err_inval:
627 ret = -EINVAL;
628err:
629 kfree(new_target_loads);
630 return ret;
Todd Poynorbc51d672012-11-28 17:58:17 -0800631}
632
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800633static struct global_attr target_loads_attr =
634 __ATTR(target_loads, S_IRUGO | S_IWUSR,
635 show_target_loads, store_target_loads);
Todd Poynorbc51d672012-11-28 17:58:17 -0800636
Mike Chan9d49b702010-06-22 11:26:45 -0700637static ssize_t show_hispeed_freq(struct kobject *kobj,
638 struct attribute *attr, char *buf)
639{
Todd Poynoracfaec92012-10-03 00:39:56 -0700640 return sprintf(buf, "%u\n", hispeed_freq);
Mike Chan9d49b702010-06-22 11:26:45 -0700641}
642
643static ssize_t store_hispeed_freq(struct kobject *kobj,
644 struct attribute *attr, const char *buf,
645 size_t count)
646{
647 int ret;
Todd Poynoracfaec92012-10-03 00:39:56 -0700648 long unsigned int val;
Mike Chan9d49b702010-06-22 11:26:45 -0700649
Todd Poynoracfaec92012-10-03 00:39:56 -0700650 ret = strict_strtoul(buf, 0, &val);
Mike Chan9d49b702010-06-22 11:26:45 -0700651 if (ret < 0)
652 return ret;
653 hispeed_freq = val;
654 return count;
655}
656
657static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
658 show_hispeed_freq, store_hispeed_freq);
659
660
661static ssize_t show_go_hispeed_load(struct kobject *kobj,
662 struct attribute *attr, char *buf)
663{
664 return sprintf(buf, "%lu\n", go_hispeed_load);
665}
666
667static ssize_t store_go_hispeed_load(struct kobject *kobj,
668 struct attribute *attr, const char *buf, size_t count)
669{
670 int ret;
671 unsigned long val;
672
673 ret = strict_strtoul(buf, 0, &val);
674 if (ret < 0)
675 return ret;
676 go_hispeed_load = val;
677 return count;
678}
679
680static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
681 show_go_hispeed_load, store_go_hispeed_load);
682
683static ssize_t show_min_sample_time(struct kobject *kobj,
684 struct attribute *attr, char *buf)
685{
686 return sprintf(buf, "%lu\n", min_sample_time);
687}
688
689static ssize_t store_min_sample_time(struct kobject *kobj,
690 struct attribute *attr, const char *buf, size_t count)
691{
692 int ret;
693 unsigned long val;
694
695 ret = strict_strtoul(buf, 0, &val);
696 if (ret < 0)
697 return ret;
698 min_sample_time = val;
699 return count;
700}
701
702static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
703 show_min_sample_time, store_min_sample_time);
704
Todd Poynor596cf1f2012-04-13 20:18:02 -0700705static ssize_t show_above_hispeed_delay(struct kobject *kobj,
706 struct attribute *attr, char *buf)
707{
708 return sprintf(buf, "%lu\n", above_hispeed_delay_val);
709}
710
711static ssize_t store_above_hispeed_delay(struct kobject *kobj,
712 struct attribute *attr,
713 const char *buf, size_t count)
714{
715 int ret;
716 unsigned long val;
717
718 ret = strict_strtoul(buf, 0, &val);
719 if (ret < 0)
720 return ret;
721 above_hispeed_delay_val = val;
722 return count;
723}
724
725define_one_global_rw(above_hispeed_delay);
726
Mike Chan9d49b702010-06-22 11:26:45 -0700727static ssize_t show_timer_rate(struct kobject *kobj,
728 struct attribute *attr, char *buf)
729{
730 return sprintf(buf, "%lu\n", timer_rate);
731}
732
733static ssize_t store_timer_rate(struct kobject *kobj,
734 struct attribute *attr, const char *buf, size_t count)
735{
736 int ret;
737 unsigned long val;
738
739 ret = strict_strtoul(buf, 0, &val);
740 if (ret < 0)
741 return ret;
742 timer_rate = val;
743 return count;
744}
745
746static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
747 show_timer_rate, store_timer_rate);
748
Todd Poynor9fb15312012-04-23 20:42:41 -0700749static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
750 char *buf)
751{
752 return sprintf(buf, "%d\n", boost_val);
753}
754
755static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
756 const char *buf, size_t count)
757{
758 int ret;
759 unsigned long val;
760
761 ret = kstrtoul(buf, 0, &val);
762 if (ret < 0)
763 return ret;
764
765 boost_val = val;
766
Todd Poynor2e739a02012-05-03 00:16:55 -0700767 if (boost_val) {
768 trace_cpufreq_interactive_boost("on");
Todd Poynor9fb15312012-04-23 20:42:41 -0700769 cpufreq_interactive_boost();
Todd Poynor2e739a02012-05-03 00:16:55 -0700770 } else {
771 trace_cpufreq_interactive_unboost("off");
772 }
Todd Poynor9fb15312012-04-23 20:42:41 -0700773
774 return count;
775}
776
777define_one_global_rw(boost);
778
Todd Poynor2e739a02012-05-03 00:16:55 -0700779static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
780 const char *buf, size_t count)
781{
782 int ret;
783 unsigned long val;
784
785 ret = kstrtoul(buf, 0, &val);
786 if (ret < 0)
787 return ret;
788
Todd Poynorf437e182012-12-14 17:31:19 -0800789 boostpulse_endtime = ktime_to_us(ktime_get()) + boostpulse_duration_val;
Todd Poynor2e739a02012-05-03 00:16:55 -0700790 trace_cpufreq_interactive_boost("pulse");
791 cpufreq_interactive_boost();
792 return count;
793}
794
795static struct global_attr boostpulse =
796 __ATTR(boostpulse, 0200, NULL, store_boostpulse);
797
Todd Poynorf437e182012-12-14 17:31:19 -0800798static ssize_t show_boostpulse_duration(
799 struct kobject *kobj, struct attribute *attr, char *buf)
800{
801 return sprintf(buf, "%d\n", boostpulse_duration_val);
802}
803
804static ssize_t store_boostpulse_duration(
805 struct kobject *kobj, struct attribute *attr, const char *buf,
806 size_t count)
807{
808 int ret;
809 unsigned long val;
810
811 ret = kstrtoul(buf, 0, &val);
812 if (ret < 0)
813 return ret;
814
815 boostpulse_duration_val = val;
816 return count;
817}
818
819define_one_global_rw(boostpulse_duration);
820
Mike Chan9d49b702010-06-22 11:26:45 -0700821static struct attribute *interactive_attributes[] = {
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800822 &target_loads_attr.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700823 &hispeed_freq_attr.attr,
824 &go_hispeed_load_attr.attr,
Todd Poynor596cf1f2012-04-13 20:18:02 -0700825 &above_hispeed_delay.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700826 &min_sample_time_attr.attr,
827 &timer_rate_attr.attr,
Todd Poynor9fb15312012-04-23 20:42:41 -0700828 &boost.attr,
Todd Poynor2e739a02012-05-03 00:16:55 -0700829 &boostpulse.attr,
Todd Poynorf437e182012-12-14 17:31:19 -0800830 &boostpulse_duration.attr,
Mike Chan9d49b702010-06-22 11:26:45 -0700831 NULL,
832};
833
834static struct attribute_group interactive_attr_group = {
835 .attrs = interactive_attributes,
836 .name = "interactive",
837};
838
Sam Lefflerae0d23f2012-06-27 10:12:04 -0700839static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
840 unsigned long val,
841 void *data)
842{
843 switch (val) {
844 case IDLE_START:
845 cpufreq_interactive_idle_start();
846 break;
847 case IDLE_END:
848 cpufreq_interactive_idle_end();
849 break;
850 }
851
852 return 0;
853}
854
855static struct notifier_block cpufreq_interactive_idle_nb = {
856 .notifier_call = cpufreq_interactive_idle_notifier,
857};
858
Mike Chan9d49b702010-06-22 11:26:45 -0700859static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
860 unsigned int event)
861{
862 int rc;
863 unsigned int j;
864 struct cpufreq_interactive_cpuinfo *pcpu;
865 struct cpufreq_frequency_table *freq_table;
866
867 switch (event) {
868 case CPUFREQ_GOV_START:
869 if (!cpu_online(policy->cpu))
870 return -EINVAL;
871
872 freq_table =
873 cpufreq_frequency_get_table(policy->cpu);
Todd Poynor7aa95c82012-11-05 13:09:03 -0800874 if (!hispeed_freq)
875 hispeed_freq = policy->max;
Mike Chan9d49b702010-06-22 11:26:45 -0700876
877 for_each_cpu(j, policy->cpus) {
878 pcpu = &per_cpu(cpuinfo, j);
879 pcpu->policy = policy;
880 pcpu->target_freq = policy->cur;
881 pcpu->freq_table = freq_table;
Todd Poynoraad27322012-04-26 21:41:40 -0700882 pcpu->floor_freq = pcpu->target_freq;
883 pcpu->floor_validate_time =
Todd Poynor3c081182012-12-07 20:08:45 -0800884 ktime_to_us(ktime_get());
Todd Poynor5a5aa702012-05-10 23:28:06 -0700885 pcpu->hispeed_validate_time =
Todd Poynor3c081182012-12-07 20:08:45 -0800886 pcpu->floor_validate_time;
Mike Chan9d49b702010-06-22 11:26:45 -0700887 pcpu->governor_enabled = 1;
888 smp_wmb();
Todd Poynor7aa95c82012-11-05 13:09:03 -0800889 pcpu->cpu_timer.expires =
890 jiffies + usecs_to_jiffies(timer_rate);
891 add_timer_on(&pcpu->cpu_timer, j);
Mike Chan9d49b702010-06-22 11:26:45 -0700892 }
893
Mike Chan9d49b702010-06-22 11:26:45 -0700894 /*
895 * Do not register the idle hook and create sysfs
896 * entries if we have already done so.
897 */
898 if (atomic_inc_return(&active_count) > 1)
899 return 0;
900
901 rc = sysfs_create_group(cpufreq_global_kobject,
902 &interactive_attr_group);
903 if (rc)
904 return rc;
905
Sam Lefflerae0d23f2012-06-27 10:12:04 -0700906 idle_notifier_register(&cpufreq_interactive_idle_nb);
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800907 cpufreq_register_notifier(
908 &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
Mike Chan9d49b702010-06-22 11:26:45 -0700909 break;
910
911 case CPUFREQ_GOV_STOP:
912 for_each_cpu(j, policy->cpus) {
913 pcpu = &per_cpu(cpuinfo, j);
914 pcpu->governor_enabled = 0;
915 smp_wmb();
916 del_timer_sync(&pcpu->cpu_timer);
Mike Chan9d49b702010-06-22 11:26:45 -0700917 }
918
Mike Chan9d49b702010-06-22 11:26:45 -0700919 if (atomic_dec_return(&active_count) > 0)
920 return 0;
921
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800922 cpufreq_unregister_notifier(
923 &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
Sam Lefflerae0d23f2012-06-27 10:12:04 -0700924 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
Mike Chan9d49b702010-06-22 11:26:45 -0700925 sysfs_remove_group(cpufreq_global_kobject,
926 &interactive_attr_group);
927
928 break;
929
930 case CPUFREQ_GOV_LIMITS:
931 if (policy->max < policy->cur)
932 __cpufreq_driver_target(policy,
933 policy->max, CPUFREQ_RELATION_H);
934 else if (policy->min > policy->cur)
935 __cpufreq_driver_target(policy,
936 policy->min, CPUFREQ_RELATION_L);
937 break;
938 }
939 return 0;
940}
941
Mike Chan9d49b702010-06-22 11:26:45 -0700942static int __init cpufreq_interactive_init(void)
943{
944 unsigned int i;
945 struct cpufreq_interactive_cpuinfo *pcpu;
946 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
947
948 go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
949 min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
Todd Poynor596cf1f2012-04-13 20:18:02 -0700950 above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
Mike Chan9d49b702010-06-22 11:26:45 -0700951 timer_rate = DEFAULT_TIMER_RATE;
952
953 /* Initalize per-cpu timers */
954 for_each_possible_cpu(i) {
955 pcpu = &per_cpu(cpuinfo, i);
Lianwei Wangba6c6bb2012-11-01 09:59:52 +0800956 if (governidle)
957 init_timer(&pcpu->cpu_timer);
958 else
959 init_timer_deferrable(&pcpu->cpu_timer);
Mike Chan9d49b702010-06-22 11:26:45 -0700960 pcpu->cpu_timer.function = cpufreq_interactive_timer;
961 pcpu->cpu_timer.data = i;
Todd Poynor5a75e9d2012-12-11 16:05:03 -0800962 spin_lock_init(&pcpu->load_lock);
Mike Chan9d49b702010-06-22 11:26:45 -0700963 }
964
Todd Poynor2fbf5e12012-11-14 11:41:21 -0800965 spin_lock_init(&target_loads_lock);
Todd Poynor8a37bb72012-07-16 17:07:15 -0700966 spin_lock_init(&speedchange_cpumask_lock);
967 speedchange_task =
968 kthread_create(cpufreq_interactive_speedchange_task, NULL,
969 "cfinteractive");
970 if (IS_ERR(speedchange_task))
971 return PTR_ERR(speedchange_task);
Sam Leffler9f1dcd62012-06-27 12:55:56 -0700972
Todd Poynor8a37bb72012-07-16 17:07:15 -0700973 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
974 get_task_struct(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -0700975
Sam Leffler9f1dcd62012-06-27 12:55:56 -0700976 /* NB: wake up so the thread does not look hung to the freezer */
Todd Poynor8a37bb72012-07-16 17:07:15 -0700977 wake_up_process(speedchange_task);
Sam Leffler9f1dcd62012-06-27 12:55:56 -0700978
Mike Chan9d49b702010-06-22 11:26:45 -0700979 return cpufreq_register_governor(&cpufreq_gov_interactive);
Mike Chan9d49b702010-06-22 11:26:45 -0700980}
981
982#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
983fs_initcall(cpufreq_interactive_init);
984#else
985module_init(cpufreq_interactive_init);
986#endif
987
988static void __exit cpufreq_interactive_exit(void)
989{
990 cpufreq_unregister_governor(&cpufreq_gov_interactive);
Todd Poynor8a37bb72012-07-16 17:07:15 -0700991 kthread_stop(speedchange_task);
992 put_task_struct(speedchange_task);
Mike Chan9d49b702010-06-22 11:26:45 -0700993}
994
995module_exit(cpufreq_interactive_exit);
996
997MODULE_AUTHOR("Mike Chan <mike@android.com>");
998MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
999 "Latency sensitive workloads");
1000MODULE_LICENSE("GPL");