blob: b0685c74a907e1b43aae0dca64217278d0f6bc08 [file] [log] [blame]
Mike Chanef969692010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/module.h>
Lianwei Wangd72db422012-11-01 09:59:52 +080023#include <linux/moduleparam.h>
Todd Poynor5cad6092012-12-18 17:50:44 -080024#include <linux/rwsem.h>
Mike Chanef969692010-06-22 11:26:45 -070025#include <linux/sched.h>
26#include <linux/sched/rt.h>
27#include <linux/tick.h>
28#include <linux/time.h>
29#include <linux/timer.h>
30#include <linux/workqueue.h>
31#include <linux/kthread.h>
Todd Poynorab8dc402012-04-02 17:17:14 -070032#include <linux/slab.h>
Lianwei Wang72e40572013-02-22 11:39:18 +080033#include <linux/kernel_stat.h>
Todd Poynor15a9ea02012-04-23 20:42:41 -070034#include <asm/cputime.h>
Mike Chanef969692010-06-22 11:26:45 -070035
Todd Poynorae010472012-02-16 16:27:59 -080036#define CREATE_TRACE_POINTS
37#include <trace/events/cpufreq_interactive.h>
38
Lianwei Wang1d4f9a72013-01-07 14:15:51 +080039static int active_count;
Mike Chanef969692010-06-22 11:26:45 -070040
41struct cpufreq_interactive_cpuinfo {
42 struct timer_list cpu_timer;
Todd Poynor4add2592012-12-18 17:50:10 -080043 struct timer_list cpu_slack_timer;
Todd Poynor0e58da22012-12-11 16:05:03 -080044 spinlock_t load_lock; /* protects the next 4 fields */
Mike Chanef969692010-06-22 11:26:45 -070045 u64 time_in_idle;
Todd Poynor8eccd412012-10-08 20:14:34 -070046 u64 time_in_idle_timestamp;
Todd Poynor0e58da22012-12-11 16:05:03 -080047 u64 cputime_speedadj;
48 u64 cputime_speedadj_timestamp;
Mike Chanef969692010-06-22 11:26:45 -070049 struct cpufreq_policy *policy;
50 struct cpufreq_frequency_table *freq_table;
51 unsigned int target_freq;
Todd Poynor6d15fa32012-04-26 21:41:40 -070052 unsigned int floor_freq;
53 u64 floor_validate_time;
Todd Poynor1a0389a2012-05-10 23:28:06 -070054 u64 hispeed_validate_time;
Todd Poynor5cad6092012-12-18 17:50:44 -080055 struct rw_semaphore enable_sem;
Mike Chanef969692010-06-22 11:26:45 -070056 int governor_enabled;
57};
58
59static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
60
Todd Poynor0f1920b2012-07-16 17:07:15 -070061/* realtime thread handles frequency scaling */
62static struct task_struct *speedchange_task;
63static cpumask_t speedchange_cpumask;
64static spinlock_t speedchange_cpumask_lock;
Lianwei Wang1d4f9a72013-01-07 14:15:51 +080065static struct mutex gov_lock;
Mike Chanef969692010-06-22 11:26:45 -070066
67/* Hi speed to bump to from lo speed when load burst (default max) */
Todd Poynor3b7b5f82012-10-03 00:39:56 -070068static unsigned int hispeed_freq;
Mike Chanef969692010-06-22 11:26:45 -070069
70/* Go to hi speed when CPU load at or above this value. */
Todd Poynor9cdc1302012-12-21 15:13:01 -080071#define DEFAULT_GO_HISPEED_LOAD 99
Todd Poynor4b3d9ae2012-12-21 15:32:21 -080072static unsigned long go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
Mike Chanef969692010-06-22 11:26:45 -070073
Todd Poynor8d2d93f2012-11-28 17:58:17 -080074/* Target load. Lower values result in higher CPU speeds. */
75#define DEFAULT_TARGET_LOAD 90
Todd Poynore9c60742012-11-14 11:41:21 -080076static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
77static spinlock_t target_loads_lock;
78static unsigned int *target_loads = default_target_loads;
79static int ntarget_loads = ARRAY_SIZE(default_target_loads);
Todd Poynor8d2d93f2012-11-28 17:58:17 -080080
Mike Chanef969692010-06-22 11:26:45 -070081/*
82 * The minimum amount of time to spend at a frequency before we can ramp down.
83 */
Todd Poynora380aa82012-04-17 17:39:34 -070084#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
Todd Poynor4b3d9ae2012-12-21 15:32:21 -080085static unsigned long min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
Mike Chanef969692010-06-22 11:26:45 -070086
87/*
88 * The sample rate of the timer used to increase frequency
89 */
Todd Poynora380aa82012-04-17 17:39:34 -070090#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
Todd Poynor4b3d9ae2012-12-21 15:32:21 -080091static unsigned long timer_rate = DEFAULT_TIMER_RATE;
Mike Chanef969692010-06-22 11:26:45 -070092
Todd Poynorcbbe17d2012-04-13 20:18:02 -070093/*
94 * Wait this long before raising speed above hispeed, by default a single
95 * timer interval.
96 */
97#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
Minsung Kim9c1f83a2013-02-25 23:48:04 +090098static unsigned int default_above_hispeed_delay[] = {
99 DEFAULT_ABOVE_HISPEED_DELAY };
100static spinlock_t above_hispeed_delay_lock;
101static unsigned int *above_hispeed_delay = default_above_hispeed_delay;
102static int nabove_hispeed_delay = ARRAY_SIZE(default_above_hispeed_delay);
Todd Poynorcbbe17d2012-04-13 20:18:02 -0700103
Todd Poynore16d5922012-12-14 17:31:19 -0800104/* Non-zero means indefinite speed boost active */
Todd Poynor15a9ea02012-04-23 20:42:41 -0700105static int boost_val;
Todd Poynore16d5922012-12-14 17:31:19 -0800106/* Duration of a boot pulse in usecs */
107static int boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
108/* End time of boost pulse in ktime converted to usecs */
109static u64 boostpulse_endtime;
Todd Poynor15a9ea02012-04-23 20:42:41 -0700110
Todd Poynor4add2592012-12-18 17:50:10 -0800111/*
112 * Max additional time to wait in idle, beyond timer_rate, at speeds above
113 * minimum before wakeup to reduce speed, or -1 if unnecessary.
114 */
115#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
116static int timer_slack_val = DEFAULT_TIMER_SLACK;
Lianwei Wangd72db422012-11-01 09:59:52 +0800117
Lianwei Wang72e40572013-02-22 11:39:18 +0800118static bool io_is_busy;
119
Lianwei Wang72e40572013-02-22 11:39:18 +0800120static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
121 cputime64_t *wall)
122{
123 u64 idle_time;
124 u64 cur_wall_time;
125 u64 busy_time;
126
127 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
128
129 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
130 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
131 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
132 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
133 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
134 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
135
136 idle_time = cur_wall_time - busy_time;
137 if (wall)
138 *wall = jiffies_to_usecs(cur_wall_time);
139
140 return jiffies_to_usecs(idle_time);
141}
142
143static inline cputime64_t get_cpu_idle_time(unsigned int cpu,
144 cputime64_t *wall)
145{
146 u64 idle_time = get_cpu_idle_time_us(cpu, wall);
147
148 if (idle_time == -1ULL)
149 idle_time = get_cpu_idle_time_jiffy(cpu, wall);
150 else if (!io_is_busy)
151 idle_time += get_cpu_iowait_time_us(cpu, wall);
152
153 return idle_time;
154}
155
Todd Poynor8eccd412012-10-08 20:14:34 -0700156static void cpufreq_interactive_timer_resched(
157 struct cpufreq_interactive_cpuinfo *pcpu)
158{
Todd Poynor4e25bf92013-04-05 13:25:21 -0700159 unsigned long expires;
Todd Poynordf673d12013-01-02 13:14:00 -0800160 unsigned long flags;
Todd Poynor4add2592012-12-18 17:50:10 -0800161
Todd Poynordf673d12013-01-02 13:14:00 -0800162 spin_lock_irqsave(&pcpu->load_lock, flags);
Todd Poynor8eccd412012-10-08 20:14:34 -0700163 pcpu->time_in_idle =
Lianwei Wang72e40572013-02-22 11:39:18 +0800164 get_cpu_idle_time(smp_processor_id(),
Todd Poynor8eccd412012-10-08 20:14:34 -0700165 &pcpu->time_in_idle_timestamp);
Todd Poynor0e58da22012-12-11 16:05:03 -0800166 pcpu->cputime_speedadj = 0;
167 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
Todd Poynor4e25bf92013-04-05 13:25:21 -0700168 expires = jiffies + usecs_to_jiffies(timer_rate);
169 mod_timer_pinned(&pcpu->cpu_timer, expires);
170
171 if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
172 expires += usecs_to_jiffies(timer_slack_val);
173 mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
174 }
175
Todd Poynordf673d12013-01-02 13:14:00 -0800176 spin_unlock_irqrestore(&pcpu->load_lock, flags);
Todd Poynor8eccd412012-10-08 20:14:34 -0700177}
178
Lianwei Wang90c6c152013-04-26 13:30:51 +0800179/* The caller shall take enable_sem write semaphore to avoid any timer race.
180 * The cpu_timer and cpu_slack_timer must be deactivated when calling this
181 * function.
182 */
183static void cpufreq_interactive_timer_start(int cpu)
184{
185 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
186 unsigned long expires = jiffies + usecs_to_jiffies(timer_rate);
187 unsigned long flags;
188
189 pcpu->cpu_timer.expires = expires;
190 add_timer_on(&pcpu->cpu_timer, cpu);
191 if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
192 expires += usecs_to_jiffies(timer_slack_val);
193 pcpu->cpu_slack_timer.expires = expires;
194 add_timer_on(&pcpu->cpu_slack_timer, cpu);
195 }
196
197 spin_lock_irqsave(&pcpu->load_lock, flags);
198 pcpu->time_in_idle =
199 get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp);
200 pcpu->cputime_speedadj = 0;
201 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
202 spin_unlock_irqrestore(&pcpu->load_lock, flags);
203}
204
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900205static unsigned int freq_to_above_hispeed_delay(unsigned int freq)
206{
207 int i;
208 unsigned int ret;
209 unsigned long flags;
210
211 spin_lock_irqsave(&above_hispeed_delay_lock, flags);
212
213 for (i = 0; i < nabove_hispeed_delay - 1 &&
214 freq >= above_hispeed_delay[i+1]; i += 2)
215 ;
216
217 ret = above_hispeed_delay[i];
218 spin_unlock_irqrestore(&above_hispeed_delay_lock, flags);
219 return ret;
220}
221
Todd Poynore9c60742012-11-14 11:41:21 -0800222static unsigned int freq_to_targetload(unsigned int freq)
223{
224 int i;
225 unsigned int ret;
Todd Poynordf673d12013-01-02 13:14:00 -0800226 unsigned long flags;
Todd Poynore9c60742012-11-14 11:41:21 -0800227
Todd Poynordf673d12013-01-02 13:14:00 -0800228 spin_lock_irqsave(&target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800229
230 for (i = 0; i < ntarget_loads - 1 && freq >= target_loads[i+1]; i += 2)
231 ;
232
233 ret = target_loads[i];
Todd Poynordf673d12013-01-02 13:14:00 -0800234 spin_unlock_irqrestore(&target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800235 return ret;
236}
237
238/*
239 * If increasing frequencies never map to a lower target load then
240 * choose_freq() will find the minimum frequency that does not exceed its
241 * target load given the current load.
242 */
243
244static unsigned int choose_freq(
Todd Poynor0e58da22012-12-11 16:05:03 -0800245 struct cpufreq_interactive_cpuinfo *pcpu, unsigned int loadadjfreq)
Todd Poynore9c60742012-11-14 11:41:21 -0800246{
247 unsigned int freq = pcpu->policy->cur;
Todd Poynore9c60742012-11-14 11:41:21 -0800248 unsigned int prevfreq, freqmin, freqmax;
249 unsigned int tl;
250 int index;
251
252 freqmin = 0;
253 freqmax = UINT_MAX;
254
255 do {
256 prevfreq = freq;
257 tl = freq_to_targetload(freq);
258
259 /*
260 * Find the lowest frequency where the computed load is less
261 * than or equal to the target load.
262 */
263
Todd Poynor8cdabdc2013-04-22 16:44:58 -0700264 if (cpufreq_frequency_table_target(
265 pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
266 CPUFREQ_RELATION_L, &index))
267 break;
Todd Poynore9c60742012-11-14 11:41:21 -0800268 freq = pcpu->freq_table[index].frequency;
269
270 if (freq > prevfreq) {
271 /* The previous frequency is too low. */
272 freqmin = prevfreq;
273
274 if (freq >= freqmax) {
275 /*
276 * Find the highest frequency that is less
277 * than freqmax.
278 */
Todd Poynor8cdabdc2013-04-22 16:44:58 -0700279 if (cpufreq_frequency_table_target(
280 pcpu->policy, pcpu->freq_table,
281 freqmax - 1, CPUFREQ_RELATION_H,
282 &index))
283 break;
Todd Poynore9c60742012-11-14 11:41:21 -0800284 freq = pcpu->freq_table[index].frequency;
285
286 if (freq == freqmin) {
287 /*
288 * The first frequency below freqmax
289 * has already been found to be too
290 * low. freqmax is the lowest speed
291 * we found that is fast enough.
292 */
293 freq = freqmax;
294 break;
295 }
296 }
297 } else if (freq < prevfreq) {
298 /* The previous frequency is high enough. */
299 freqmax = prevfreq;
300
301 if (freq <= freqmin) {
302 /*
303 * Find the lowest frequency that is higher
304 * than freqmin.
305 */
Todd Poynor8cdabdc2013-04-22 16:44:58 -0700306 if (cpufreq_frequency_table_target(
307 pcpu->policy, pcpu->freq_table,
308 freqmin + 1, CPUFREQ_RELATION_L,
309 &index))
310 break;
Todd Poynore9c60742012-11-14 11:41:21 -0800311 freq = pcpu->freq_table[index].frequency;
312
313 /*
314 * If freqmax is the first frequency above
315 * freqmin then we have already found that
316 * this speed is fast enough.
317 */
318 if (freq == freqmax)
319 break;
320 }
321 }
322
323 /* If same frequency chosen as previous then done. */
324 } while (freq != prevfreq);
325
326 return freq;
327}
328
Todd Poynor0e58da22012-12-11 16:05:03 -0800329static u64 update_load(int cpu)
330{
331 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
332 u64 now;
333 u64 now_idle;
334 unsigned int delta_idle;
335 unsigned int delta_time;
336 u64 active_time;
337
Lianwei Wang72e40572013-02-22 11:39:18 +0800338 now_idle = get_cpu_idle_time(cpu, &now);
Todd Poynor0e58da22012-12-11 16:05:03 -0800339 delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
340 delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
Minsung Kim98b3b562013-04-23 22:32:01 +0900341
342 if (delta_time <= delta_idle)
343 active_time = 0;
344 else
345 active_time = delta_time - delta_idle;
346
Todd Poynor0e58da22012-12-11 16:05:03 -0800347 pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
348
349 pcpu->time_in_idle = now_idle;
350 pcpu->time_in_idle_timestamp = now;
351 return now;
352}
353
Mike Chanef969692010-06-22 11:26:45 -0700354static void cpufreq_interactive_timer(unsigned long data)
355{
Todd Poynore7afb7e2012-11-05 13:09:03 -0800356 u64 now;
Mike Chanef969692010-06-22 11:26:45 -0700357 unsigned int delta_time;
Todd Poynor0e58da22012-12-11 16:05:03 -0800358 u64 cputime_speedadj;
Mike Chanef969692010-06-22 11:26:45 -0700359 int cpu_load;
Mike Chanef969692010-06-22 11:26:45 -0700360 struct cpufreq_interactive_cpuinfo *pcpu =
361 &per_cpu(cpuinfo, data);
Mike Chanef969692010-06-22 11:26:45 -0700362 unsigned int new_freq;
Todd Poynor0e58da22012-12-11 16:05:03 -0800363 unsigned int loadadjfreq;
Mike Chanef969692010-06-22 11:26:45 -0700364 unsigned int index;
365 unsigned long flags;
Todd Poynore16d5922012-12-14 17:31:19 -0800366 bool boosted;
Mike Chanef969692010-06-22 11:26:45 -0700367
Todd Poynor5cad6092012-12-18 17:50:44 -0800368 if (!down_read_trylock(&pcpu->enable_sem))
369 return;
Mike Chanef969692010-06-22 11:26:45 -0700370 if (!pcpu->governor_enabled)
371 goto exit;
372
Todd Poynordf673d12013-01-02 13:14:00 -0800373 spin_lock_irqsave(&pcpu->load_lock, flags);
Todd Poynor0e58da22012-12-11 16:05:03 -0800374 now = update_load(data);
375 delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
376 cputime_speedadj = pcpu->cputime_speedadj;
Todd Poynordf673d12013-01-02 13:14:00 -0800377 spin_unlock_irqrestore(&pcpu->load_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700378
Todd Poynor0e58da22012-12-11 16:05:03 -0800379 if (WARN_ON_ONCE(!delta_time))
Mike Chanef969692010-06-22 11:26:45 -0700380 goto rearm;
381
Todd Poynor0e58da22012-12-11 16:05:03 -0800382 do_div(cputime_speedadj, delta_time);
383 loadadjfreq = (unsigned int)cputime_speedadj * 100;
384 cpu_load = loadadjfreq / pcpu->target_freq;
Todd Poynore16d5922012-12-14 17:31:19 -0800385 boosted = boost_val || now < boostpulse_endtime;
Mike Chanef969692010-06-22 11:26:45 -0700386
Todd Poynor2b660492012-12-19 16:06:48 -0800387 if (cpu_load >= go_hispeed_load || boosted) {
388 if (pcpu->target_freq < hispeed_freq) {
389 new_freq = hispeed_freq;
390 } else {
391 new_freq = choose_freq(pcpu, loadadjfreq);
392
393 if (new_freq < hispeed_freq)
394 new_freq = hispeed_freq;
395 }
396 } else {
Todd Poynor0e58da22012-12-11 16:05:03 -0800397 new_freq = choose_freq(pcpu, loadadjfreq);
Todd Poynor2b660492012-12-19 16:06:48 -0800398 }
Todd Poynor131ff022012-11-08 15:06:55 -0800399
400 if (pcpu->target_freq >= hispeed_freq &&
401 new_freq > pcpu->target_freq &&
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900402 now - pcpu->hispeed_validate_time <
Todd Poynor94e90952013-03-21 20:46:00 -0700403 freq_to_above_hispeed_delay(pcpu->target_freq)) {
Todd Poynor131ff022012-11-08 15:06:55 -0800404 trace_cpufreq_interactive_notyet(
405 data, cpu_load, pcpu->target_freq,
406 pcpu->policy->cur, new_freq);
407 goto rearm;
Mike Chanef969692010-06-22 11:26:45 -0700408 }
409
Todd Poynor131ff022012-11-08 15:06:55 -0800410 pcpu->hispeed_validate_time = now;
Todd Poynor1a0389a2012-05-10 23:28:06 -0700411
Mike Chanef969692010-06-22 11:26:45 -0700412 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800413 new_freq, CPUFREQ_RELATION_L,
Todd Poynor8cdabdc2013-04-22 16:44:58 -0700414 &index))
Mike Chanef969692010-06-22 11:26:45 -0700415 goto rearm;
Mike Chanef969692010-06-22 11:26:45 -0700416
417 new_freq = pcpu->freq_table[index].frequency;
418
Mike Chanef969692010-06-22 11:26:45 -0700419 /*
Todd Poynor6d15fa32012-04-26 21:41:40 -0700420 * Do not scale below floor_freq unless we have been at or above the
421 * floor frequency for the minimum sample time since last validated.
Mike Chanef969692010-06-22 11:26:45 -0700422 */
Todd Poynor6d15fa32012-04-26 21:41:40 -0700423 if (new_freq < pcpu->floor_freq) {
Todd Poynore7afb7e2012-11-05 13:09:03 -0800424 if (now - pcpu->floor_validate_time < min_sample_time) {
Todd Poynore60cc1b2012-11-28 17:56:09 -0800425 trace_cpufreq_interactive_notyet(
426 data, cpu_load, pcpu->target_freq,
427 pcpu->policy->cur, new_freq);
Mike Chanef969692010-06-22 11:26:45 -0700428 goto rearm;
Todd Poynorae010472012-02-16 16:27:59 -0800429 }
Mike Chanef969692010-06-22 11:26:45 -0700430 }
431
Todd Poynore16d5922012-12-14 17:31:19 -0800432 /*
433 * Update the timestamp for checking whether speed has been held at
434 * or above the selected frequency for a minimum of min_sample_time,
435 * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
436 * allow the speed to drop as soon as the boostpulse duration expires
437 * (or the indefinite boost is turned off).
438 */
439
440 if (!boosted || new_freq > hispeed_freq) {
441 pcpu->floor_freq = new_freq;
442 pcpu->floor_validate_time = now;
443 }
Todd Poynor1f408dc2012-04-06 19:59:36 -0700444
445 if (pcpu->target_freq == new_freq) {
Todd Poynore60cc1b2012-11-28 17:56:09 -0800446 trace_cpufreq_interactive_already(
447 data, cpu_load, pcpu->target_freq,
448 pcpu->policy->cur, new_freq);
Todd Poynor1f408dc2012-04-06 19:59:36 -0700449 goto rearm_if_notmax;
450 }
451
Todd Poynorae010472012-02-16 16:27:59 -0800452 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
Todd Poynore60cc1b2012-11-28 17:56:09 -0800453 pcpu->policy->cur, new_freq);
Todd Poynorae010472012-02-16 16:27:59 -0800454
Todd Poynor0f1920b2012-07-16 17:07:15 -0700455 pcpu->target_freq = new_freq;
456 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
457 cpumask_set_cpu(data, &speedchange_cpumask);
458 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
459 wake_up_process(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -0700460
461rearm_if_notmax:
462 /*
463 * Already set max speed and don't see a need to change that,
464 * wait until next idle to re-evaluate, don't need timer.
465 */
466 if (pcpu->target_freq == pcpu->policy->max)
467 goto exit;
468
469rearm:
Todd Poynor4add2592012-12-18 17:50:10 -0800470 if (!timer_pending(&pcpu->cpu_timer))
Todd Poynor8eccd412012-10-08 20:14:34 -0700471 cpufreq_interactive_timer_resched(pcpu);
Mike Chanef969692010-06-22 11:26:45 -0700472
473exit:
Todd Poynor5cad6092012-12-18 17:50:44 -0800474 up_read(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700475 return;
476}
477
478static void cpufreq_interactive_idle_start(void)
479{
480 struct cpufreq_interactive_cpuinfo *pcpu =
481 &per_cpu(cpuinfo, smp_processor_id());
482 int pending;
483
Todd Poynor5cad6092012-12-18 17:50:44 -0800484 if (!down_read_trylock(&pcpu->enable_sem))
Mike Chanef969692010-06-22 11:26:45 -0700485 return;
Todd Poynor5cad6092012-12-18 17:50:44 -0800486 if (!pcpu->governor_enabled) {
487 up_read(&pcpu->enable_sem);
488 return;
489 }
Mike Chanef969692010-06-22 11:26:45 -0700490
Mike Chanef969692010-06-22 11:26:45 -0700491 pending = timer_pending(&pcpu->cpu_timer);
492
493 if (pcpu->target_freq != pcpu->policy->min) {
Mike Chanef969692010-06-22 11:26:45 -0700494 /*
495 * Entering idle while not at lowest speed. On some
496 * platforms this can hold the other CPU(s) at that speed
497 * even though the CPU is idle. Set a timer to re-evaluate
498 * speed so this idle CPU doesn't hold the other CPUs above
499 * min indefinitely. This should probably be a quirk of
500 * the CPUFreq driver.
501 */
Todd Poynor4add2592012-12-18 17:50:10 -0800502 if (!pending)
Todd Poynor8eccd412012-10-08 20:14:34 -0700503 cpufreq_interactive_timer_resched(pcpu);
Mike Chanef969692010-06-22 11:26:45 -0700504 }
505
Todd Poynor5cad6092012-12-18 17:50:44 -0800506 up_read(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700507}
508
509static void cpufreq_interactive_idle_end(void)
510{
511 struct cpufreq_interactive_cpuinfo *pcpu =
512 &per_cpu(cpuinfo, smp_processor_id());
513
Todd Poynor5cad6092012-12-18 17:50:44 -0800514 if (!down_read_trylock(&pcpu->enable_sem))
Sam Leffler3ab7c2b2012-06-27 10:12:04 -0700515 return;
Todd Poynor5cad6092012-12-18 17:50:44 -0800516 if (!pcpu->governor_enabled) {
517 up_read(&pcpu->enable_sem);
518 return;
519 }
Sam Leffler3ab7c2b2012-06-27 10:12:04 -0700520
Todd Poynore7afb7e2012-11-05 13:09:03 -0800521 /* Arm the timer for 1-2 ticks later if not already. */
522 if (!timer_pending(&pcpu->cpu_timer)) {
Todd Poynor8eccd412012-10-08 20:14:34 -0700523 cpufreq_interactive_timer_resched(pcpu);
Todd Poynor4add2592012-12-18 17:50:10 -0800524 } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
Todd Poynor8eccd412012-10-08 20:14:34 -0700525 del_timer(&pcpu->cpu_timer);
Todd Poynor4add2592012-12-18 17:50:10 -0800526 del_timer(&pcpu->cpu_slack_timer);
Todd Poynor8eccd412012-10-08 20:14:34 -0700527 cpufreq_interactive_timer(smp_processor_id());
Mike Chanef969692010-06-22 11:26:45 -0700528 }
Todd Poynor5cad6092012-12-18 17:50:44 -0800529
530 up_read(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700531}
532
Todd Poynor0f1920b2012-07-16 17:07:15 -0700533static int cpufreq_interactive_speedchange_task(void *data)
Mike Chanef969692010-06-22 11:26:45 -0700534{
535 unsigned int cpu;
536 cpumask_t tmp_mask;
537 unsigned long flags;
538 struct cpufreq_interactive_cpuinfo *pcpu;
539
540 while (1) {
541 set_current_state(TASK_INTERRUPTIBLE);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700542 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700543
Todd Poynor0f1920b2012-07-16 17:07:15 -0700544 if (cpumask_empty(&speedchange_cpumask)) {
545 spin_unlock_irqrestore(&speedchange_cpumask_lock,
546 flags);
Mike Chanef969692010-06-22 11:26:45 -0700547 schedule();
548
549 if (kthread_should_stop())
550 break;
551
Todd Poynor0f1920b2012-07-16 17:07:15 -0700552 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700553 }
554
555 set_current_state(TASK_RUNNING);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700556 tmp_mask = speedchange_cpumask;
557 cpumask_clear(&speedchange_cpumask);
558 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700559
560 for_each_cpu(cpu, &tmp_mask) {
561 unsigned int j;
562 unsigned int max_freq = 0;
563
564 pcpu = &per_cpu(cpuinfo, cpu);
Todd Poynor5cad6092012-12-18 17:50:44 -0800565 if (!down_read_trylock(&pcpu->enable_sem))
Mike Chanef969692010-06-22 11:26:45 -0700566 continue;
Todd Poynor5cad6092012-12-18 17:50:44 -0800567 if (!pcpu->governor_enabled) {
568 up_read(&pcpu->enable_sem);
569 continue;
570 }
Mike Chanef969692010-06-22 11:26:45 -0700571
Mike Chanef969692010-06-22 11:26:45 -0700572 for_each_cpu(j, pcpu->policy->cpus) {
573 struct cpufreq_interactive_cpuinfo *pjcpu =
574 &per_cpu(cpuinfo, j);
575
576 if (pjcpu->target_freq > max_freq)
577 max_freq = pjcpu->target_freq;
578 }
579
580 if (max_freq != pcpu->policy->cur)
581 __cpufreq_driver_target(pcpu->policy,
582 max_freq,
583 CPUFREQ_RELATION_H);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700584 trace_cpufreq_interactive_setspeed(cpu,
585 pcpu->target_freq,
Todd Poynorae010472012-02-16 16:27:59 -0800586 pcpu->policy->cur);
Todd Poynor5cad6092012-12-18 17:50:44 -0800587
588 up_read(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700589 }
590 }
591
592 return 0;
593}
594
Todd Poynorab8dc402012-04-02 17:17:14 -0700595static void cpufreq_interactive_boost(void)
596{
597 int i;
598 int anyboost = 0;
599 unsigned long flags;
600 struct cpufreq_interactive_cpuinfo *pcpu;
601
Todd Poynor0f1920b2012-07-16 17:07:15 -0700602 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Todd Poynorab8dc402012-04-02 17:17:14 -0700603
604 for_each_online_cpu(i) {
605 pcpu = &per_cpu(cpuinfo, i);
606
607 if (pcpu->target_freq < hispeed_freq) {
608 pcpu->target_freq = hispeed_freq;
Todd Poynor0f1920b2012-07-16 17:07:15 -0700609 cpumask_set_cpu(i, &speedchange_cpumask);
Todd Poynor31817c92012-12-07 20:08:45 -0800610 pcpu->hispeed_validate_time =
611 ktime_to_us(ktime_get());
Todd Poynorab8dc402012-04-02 17:17:14 -0700612 anyboost = 1;
613 }
614
615 /*
Todd Poynor6d15fa32012-04-26 21:41:40 -0700616 * Set floor freq and (re)start timer for when last
617 * validated.
Todd Poynorab8dc402012-04-02 17:17:14 -0700618 */
619
Todd Poynor6d15fa32012-04-26 21:41:40 -0700620 pcpu->floor_freq = hispeed_freq;
621 pcpu->floor_validate_time = ktime_to_us(ktime_get());
Todd Poynorab8dc402012-04-02 17:17:14 -0700622 }
623
Todd Poynor0f1920b2012-07-16 17:07:15 -0700624 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Todd Poynorab8dc402012-04-02 17:17:14 -0700625
626 if (anyboost)
Todd Poynor0f1920b2012-07-16 17:07:15 -0700627 wake_up_process(speedchange_task);
Todd Poynorab8dc402012-04-02 17:17:14 -0700628}
629
Todd Poynor0e58da22012-12-11 16:05:03 -0800630static int cpufreq_interactive_notifier(
631 struct notifier_block *nb, unsigned long val, void *data)
632{
633 struct cpufreq_freqs *freq = data;
634 struct cpufreq_interactive_cpuinfo *pcpu;
635 int cpu;
Todd Poynordf673d12013-01-02 13:14:00 -0800636 unsigned long flags;
Todd Poynor0e58da22012-12-11 16:05:03 -0800637
638 if (val == CPUFREQ_POSTCHANGE) {
639 pcpu = &per_cpu(cpuinfo, freq->cpu);
Todd Poynor34974c32012-12-23 12:28:49 -0800640 if (!down_read_trylock(&pcpu->enable_sem))
641 return 0;
642 if (!pcpu->governor_enabled) {
643 up_read(&pcpu->enable_sem);
644 return 0;
645 }
Todd Poynor0e58da22012-12-11 16:05:03 -0800646
647 for_each_cpu(cpu, pcpu->policy->cpus) {
648 struct cpufreq_interactive_cpuinfo *pjcpu =
649 &per_cpu(cpuinfo, cpu);
Lianwei Wangc79705d2013-05-16 12:07:23 +0800650 if (cpu != freq->cpu) {
651 if (!down_read_trylock(&pjcpu->enable_sem))
652 continue;
653 if (!pjcpu->governor_enabled) {
654 up_read(&pjcpu->enable_sem);
655 continue;
656 }
657 }
Todd Poynordf673d12013-01-02 13:14:00 -0800658 spin_lock_irqsave(&pjcpu->load_lock, flags);
Todd Poynor0e58da22012-12-11 16:05:03 -0800659 update_load(cpu);
Todd Poynordf673d12013-01-02 13:14:00 -0800660 spin_unlock_irqrestore(&pjcpu->load_lock, flags);
Lianwei Wangc79705d2013-05-16 12:07:23 +0800661 if (cpu != freq->cpu)
662 up_read(&pjcpu->enable_sem);
Todd Poynor0e58da22012-12-11 16:05:03 -0800663 }
Todd Poynor0e58da22012-12-11 16:05:03 -0800664
Todd Poynor34974c32012-12-23 12:28:49 -0800665 up_read(&pcpu->enable_sem);
666 }
Todd Poynor0e58da22012-12-11 16:05:03 -0800667 return 0;
668}
669
670static struct notifier_block cpufreq_notifier_block = {
671 .notifier_call = cpufreq_interactive_notifier,
672};
673
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900674static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
675{
676 const char *cp;
677 int i;
678 int ntokens = 1;
679 unsigned int *tokenized_data;
Todd Poynor233dfa02013-03-20 15:40:46 -0700680 int err = -EINVAL;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900681
682 cp = buf;
683 while ((cp = strpbrk(cp + 1, " :")))
684 ntokens++;
685
Todd Poynor233dfa02013-03-20 15:40:46 -0700686 if (!(ntokens & 0x1))
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900687 goto err;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900688
689 tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
690 if (!tokenized_data) {
Todd Poynor233dfa02013-03-20 15:40:46 -0700691 err = -ENOMEM;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900692 goto err;
693 }
694
695 cp = buf;
696 i = 0;
697 while (i < ntokens) {
Todd Poynor233dfa02013-03-20 15:40:46 -0700698 if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900699 goto err_kfree;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900700
701 cp = strpbrk(cp, " :");
702 if (!cp)
703 break;
704 cp++;
705 }
706
Todd Poynor233dfa02013-03-20 15:40:46 -0700707 if (i != ntokens)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900708 goto err_kfree;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900709
710 *num_tokens = ntokens;
711 return tokenized_data;
712
713err_kfree:
714 kfree(tokenized_data);
715err:
Todd Poynor233dfa02013-03-20 15:40:46 -0700716 return ERR_PTR(err);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900717}
718
Todd Poynore9c60742012-11-14 11:41:21 -0800719static ssize_t show_target_loads(
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800720 struct kobject *kobj, struct attribute *attr, char *buf)
721{
Todd Poynore9c60742012-11-14 11:41:21 -0800722 int i;
723 ssize_t ret = 0;
Todd Poynordf673d12013-01-02 13:14:00 -0800724 unsigned long flags;
Todd Poynore9c60742012-11-14 11:41:21 -0800725
Todd Poynordf673d12013-01-02 13:14:00 -0800726 spin_lock_irqsave(&target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800727
728 for (i = 0; i < ntarget_loads; i++)
729 ret += sprintf(buf + ret, "%u%s", target_loads[i],
730 i & 0x1 ? ":" : " ");
731
Minsung Kim26ca9382013-08-25 19:23:34 +0900732 ret += sprintf(buf + --ret, "\n");
Todd Poynordf673d12013-01-02 13:14:00 -0800733 spin_unlock_irqrestore(&target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800734 return ret;
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800735}
736
Todd Poynore9c60742012-11-14 11:41:21 -0800737static ssize_t store_target_loads(
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800738 struct kobject *kobj, struct attribute *attr, const char *buf,
739 size_t count)
740{
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900741 int ntokens;
Todd Poynore9c60742012-11-14 11:41:21 -0800742 unsigned int *new_target_loads = NULL;
Todd Poynordf673d12013-01-02 13:14:00 -0800743 unsigned long flags;
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800744
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900745 new_target_loads = get_tokenized_data(buf, &ntokens);
746 if (IS_ERR(new_target_loads))
747 return PTR_RET(new_target_loads);
Todd Poynore9c60742012-11-14 11:41:21 -0800748
Todd Poynordf673d12013-01-02 13:14:00 -0800749 spin_lock_irqsave(&target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800750 if (target_loads != default_target_loads)
751 kfree(target_loads);
752 target_loads = new_target_loads;
753 ntarget_loads = ntokens;
Todd Poynordf673d12013-01-02 13:14:00 -0800754 spin_unlock_irqrestore(&target_loads_lock, flags);
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800755 return count;
756}
757
Todd Poynore9c60742012-11-14 11:41:21 -0800758static struct global_attr target_loads_attr =
759 __ATTR(target_loads, S_IRUGO | S_IWUSR,
760 show_target_loads, store_target_loads);
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800761
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900762static ssize_t show_above_hispeed_delay(
763 struct kobject *kobj, struct attribute *attr, char *buf)
764{
765 int i;
766 ssize_t ret = 0;
767 unsigned long flags;
768
769 spin_lock_irqsave(&above_hispeed_delay_lock, flags);
770
771 for (i = 0; i < nabove_hispeed_delay; i++)
772 ret += sprintf(buf + ret, "%u%s", above_hispeed_delay[i],
773 i & 0x1 ? ":" : " ");
774
Minsung Kim26ca9382013-08-25 19:23:34 +0900775 ret += sprintf(buf + --ret, "\n");
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900776 spin_unlock_irqrestore(&above_hispeed_delay_lock, flags);
777 return ret;
778}
779
780static ssize_t store_above_hispeed_delay(
781 struct kobject *kobj, struct attribute *attr, const char *buf,
782 size_t count)
783{
784 int ntokens;
785 unsigned int *new_above_hispeed_delay = NULL;
786 unsigned long flags;
787
788 new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
789 if (IS_ERR(new_above_hispeed_delay))
790 return PTR_RET(new_above_hispeed_delay);
791
792 spin_lock_irqsave(&above_hispeed_delay_lock, flags);
793 if (above_hispeed_delay != default_above_hispeed_delay)
794 kfree(above_hispeed_delay);
795 above_hispeed_delay = new_above_hispeed_delay;
796 nabove_hispeed_delay = ntokens;
797 spin_unlock_irqrestore(&above_hispeed_delay_lock, flags);
798 return count;
799
800}
801
802static struct global_attr above_hispeed_delay_attr =
803 __ATTR(above_hispeed_delay, S_IRUGO | S_IWUSR,
804 show_above_hispeed_delay, store_above_hispeed_delay);
805
Mike Chanef969692010-06-22 11:26:45 -0700806static ssize_t show_hispeed_freq(struct kobject *kobj,
807 struct attribute *attr, char *buf)
808{
Todd Poynor3b7b5f82012-10-03 00:39:56 -0700809 return sprintf(buf, "%u\n", hispeed_freq);
Mike Chanef969692010-06-22 11:26:45 -0700810}
811
812static ssize_t store_hispeed_freq(struct kobject *kobj,
813 struct attribute *attr, const char *buf,
814 size_t count)
815{
816 int ret;
Todd Poynor3b7b5f82012-10-03 00:39:56 -0700817 long unsigned int val;
Mike Chanef969692010-06-22 11:26:45 -0700818
Todd Poynor3b7b5f82012-10-03 00:39:56 -0700819 ret = strict_strtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -0700820 if (ret < 0)
821 return ret;
822 hispeed_freq = val;
823 return count;
824}
825
826static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
827 show_hispeed_freq, store_hispeed_freq);
828
829
830static ssize_t show_go_hispeed_load(struct kobject *kobj,
831 struct attribute *attr, char *buf)
832{
833 return sprintf(buf, "%lu\n", go_hispeed_load);
834}
835
836static ssize_t store_go_hispeed_load(struct kobject *kobj,
837 struct attribute *attr, const char *buf, size_t count)
838{
839 int ret;
840 unsigned long val;
841
842 ret = strict_strtoul(buf, 0, &val);
843 if (ret < 0)
844 return ret;
845 go_hispeed_load = val;
846 return count;
847}
848
849static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
850 show_go_hispeed_load, store_go_hispeed_load);
851
852static ssize_t show_min_sample_time(struct kobject *kobj,
853 struct attribute *attr, char *buf)
854{
855 return sprintf(buf, "%lu\n", min_sample_time);
856}
857
858static ssize_t store_min_sample_time(struct kobject *kobj,
859 struct attribute *attr, const char *buf, size_t count)
860{
861 int ret;
862 unsigned long val;
863
864 ret = strict_strtoul(buf, 0, &val);
865 if (ret < 0)
866 return ret;
867 min_sample_time = val;
868 return count;
869}
870
871static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
872 show_min_sample_time, store_min_sample_time);
873
874static ssize_t show_timer_rate(struct kobject *kobj,
875 struct attribute *attr, char *buf)
876{
877 return sprintf(buf, "%lu\n", timer_rate);
878}
879
880static ssize_t store_timer_rate(struct kobject *kobj,
881 struct attribute *attr, const char *buf, size_t count)
882{
883 int ret;
884 unsigned long val;
885
886 ret = strict_strtoul(buf, 0, &val);
887 if (ret < 0)
888 return ret;
889 timer_rate = val;
890 return count;
891}
892
893static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
894 show_timer_rate, store_timer_rate);
895
Todd Poynor4add2592012-12-18 17:50:10 -0800896static ssize_t show_timer_slack(
897 struct kobject *kobj, struct attribute *attr, char *buf)
898{
899 return sprintf(buf, "%d\n", timer_slack_val);
900}
901
902static ssize_t store_timer_slack(
903 struct kobject *kobj, struct attribute *attr, const char *buf,
904 size_t count)
905{
906 int ret;
907 unsigned long val;
908
909 ret = kstrtol(buf, 10, &val);
910 if (ret < 0)
911 return ret;
912
913 timer_slack_val = val;
914 return count;
915}
916
917define_one_global_rw(timer_slack);
918
Todd Poynor15a9ea02012-04-23 20:42:41 -0700919static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
920 char *buf)
921{
922 return sprintf(buf, "%d\n", boost_val);
923}
924
925static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
926 const char *buf, size_t count)
927{
928 int ret;
929 unsigned long val;
930
931 ret = kstrtoul(buf, 0, &val);
932 if (ret < 0)
933 return ret;
934
935 boost_val = val;
936
Todd Poynor442a3122012-05-03 00:16:55 -0700937 if (boost_val) {
938 trace_cpufreq_interactive_boost("on");
Todd Poynor15a9ea02012-04-23 20:42:41 -0700939 cpufreq_interactive_boost();
Todd Poynor442a3122012-05-03 00:16:55 -0700940 } else {
941 trace_cpufreq_interactive_unboost("off");
942 }
Todd Poynor15a9ea02012-04-23 20:42:41 -0700943
944 return count;
945}
946
947define_one_global_rw(boost);
948
Todd Poynor442a3122012-05-03 00:16:55 -0700949static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
950 const char *buf, size_t count)
951{
952 int ret;
953 unsigned long val;
954
955 ret = kstrtoul(buf, 0, &val);
956 if (ret < 0)
957 return ret;
958
Todd Poynore16d5922012-12-14 17:31:19 -0800959 boostpulse_endtime = ktime_to_us(ktime_get()) + boostpulse_duration_val;
Todd Poynor442a3122012-05-03 00:16:55 -0700960 trace_cpufreq_interactive_boost("pulse");
961 cpufreq_interactive_boost();
962 return count;
963}
964
965static struct global_attr boostpulse =
966 __ATTR(boostpulse, 0200, NULL, store_boostpulse);
967
Todd Poynore16d5922012-12-14 17:31:19 -0800968static ssize_t show_boostpulse_duration(
969 struct kobject *kobj, struct attribute *attr, char *buf)
970{
971 return sprintf(buf, "%d\n", boostpulse_duration_val);
972}
973
974static ssize_t store_boostpulse_duration(
975 struct kobject *kobj, struct attribute *attr, const char *buf,
976 size_t count)
977{
978 int ret;
979 unsigned long val;
980
981 ret = kstrtoul(buf, 0, &val);
982 if (ret < 0)
983 return ret;
984
985 boostpulse_duration_val = val;
986 return count;
987}
988
989define_one_global_rw(boostpulse_duration);
990
Lianwei Wang72e40572013-02-22 11:39:18 +0800991static ssize_t show_io_is_busy(struct kobject *kobj,
992 struct attribute *attr, char *buf)
993{
994 return sprintf(buf, "%u\n", io_is_busy);
995}
996
997static ssize_t store_io_is_busy(struct kobject *kobj,
998 struct attribute *attr, const char *buf, size_t count)
999{
1000 int ret;
1001 unsigned long val;
1002
1003 ret = kstrtoul(buf, 0, &val);
1004 if (ret < 0)
1005 return ret;
1006 io_is_busy = val;
1007 return count;
1008}
1009
1010static struct global_attr io_is_busy_attr = __ATTR(io_is_busy, 0644,
1011 show_io_is_busy, store_io_is_busy);
1012
Mike Chanef969692010-06-22 11:26:45 -07001013static struct attribute *interactive_attributes[] = {
Todd Poynore9c60742012-11-14 11:41:21 -08001014 &target_loads_attr.attr,
Minsung Kim9c1f83a2013-02-25 23:48:04 +09001015 &above_hispeed_delay_attr.attr,
Mike Chanef969692010-06-22 11:26:45 -07001016 &hispeed_freq_attr.attr,
1017 &go_hispeed_load_attr.attr,
1018 &min_sample_time_attr.attr,
1019 &timer_rate_attr.attr,
Todd Poynor4add2592012-12-18 17:50:10 -08001020 &timer_slack.attr,
Todd Poynor15a9ea02012-04-23 20:42:41 -07001021 &boost.attr,
Todd Poynor442a3122012-05-03 00:16:55 -07001022 &boostpulse.attr,
Todd Poynore16d5922012-12-14 17:31:19 -08001023 &boostpulse_duration.attr,
Lianwei Wang72e40572013-02-22 11:39:18 +08001024 &io_is_busy_attr.attr,
Mike Chanef969692010-06-22 11:26:45 -07001025 NULL,
1026};
1027
1028static struct attribute_group interactive_attr_group = {
1029 .attrs = interactive_attributes,
1030 .name = "interactive",
1031};
1032
Sam Leffler3ab7c2b2012-06-27 10:12:04 -07001033static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
1034 unsigned long val,
1035 void *data)
1036{
1037 switch (val) {
1038 case IDLE_START:
1039 cpufreq_interactive_idle_start();
1040 break;
1041 case IDLE_END:
1042 cpufreq_interactive_idle_end();
1043 break;
1044 }
1045
1046 return 0;
1047}
1048
1049static struct notifier_block cpufreq_interactive_idle_nb = {
1050 .notifier_call = cpufreq_interactive_idle_notifier,
1051};
1052
Mike Chanef969692010-06-22 11:26:45 -07001053static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
1054 unsigned int event)
1055{
1056 int rc;
1057 unsigned int j;
1058 struct cpufreq_interactive_cpuinfo *pcpu;
1059 struct cpufreq_frequency_table *freq_table;
1060
1061 switch (event) {
1062 case CPUFREQ_GOV_START:
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001063 mutex_lock(&gov_lock);
1064
Mike Chanef969692010-06-22 11:26:45 -07001065 freq_table =
1066 cpufreq_frequency_get_table(policy->cpu);
Todd Poynore7afb7e2012-11-05 13:09:03 -08001067 if (!hispeed_freq)
1068 hispeed_freq = policy->max;
Mike Chanef969692010-06-22 11:26:45 -07001069
1070 for_each_cpu(j, policy->cpus) {
1071 pcpu = &per_cpu(cpuinfo, j);
1072 pcpu->policy = policy;
1073 pcpu->target_freq = policy->cur;
1074 pcpu->freq_table = freq_table;
Todd Poynor6d15fa32012-04-26 21:41:40 -07001075 pcpu->floor_freq = pcpu->target_freq;
1076 pcpu->floor_validate_time =
Todd Poynor31817c92012-12-07 20:08:45 -08001077 ktime_to_us(ktime_get());
Todd Poynor1a0389a2012-05-10 23:28:06 -07001078 pcpu->hispeed_validate_time =
Todd Poynor31817c92012-12-07 20:08:45 -08001079 pcpu->floor_validate_time;
Todd Poynor39512062012-12-20 15:51:00 -08001080 down_write(&pcpu->enable_sem);
Lianwei Wang90c6c152013-04-26 13:30:51 +08001081 cpufreq_interactive_timer_start(j);
Todd Poynor39512062012-12-20 15:51:00 -08001082 pcpu->governor_enabled = 1;
1083 up_write(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -07001084 }
1085
Mike Chanef969692010-06-22 11:26:45 -07001086 /*
1087 * Do not register the idle hook and create sysfs
1088 * entries if we have already done so.
1089 */
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001090 if (++active_count > 1) {
1091 mutex_unlock(&gov_lock);
Mike Chanef969692010-06-22 11:26:45 -07001092 return 0;
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001093 }
Mike Chanef969692010-06-22 11:26:45 -07001094
1095 rc = sysfs_create_group(cpufreq_global_kobject,
1096 &interactive_attr_group);
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001097 if (rc) {
1098 mutex_unlock(&gov_lock);
Mike Chanef969692010-06-22 11:26:45 -07001099 return rc;
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001100 }
Mike Chanef969692010-06-22 11:26:45 -07001101
Sam Leffler3ab7c2b2012-06-27 10:12:04 -07001102 idle_notifier_register(&cpufreq_interactive_idle_nb);
Todd Poynor0e58da22012-12-11 16:05:03 -08001103 cpufreq_register_notifier(
1104 &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001105 mutex_unlock(&gov_lock);
Mike Chanef969692010-06-22 11:26:45 -07001106 break;
1107
1108 case CPUFREQ_GOV_STOP:
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001109 mutex_lock(&gov_lock);
Mike Chanef969692010-06-22 11:26:45 -07001110 for_each_cpu(j, policy->cpus) {
1111 pcpu = &per_cpu(cpuinfo, j);
Todd Poynor5cad6092012-12-18 17:50:44 -08001112 down_write(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -07001113 pcpu->governor_enabled = 0;
Mike Chanef969692010-06-22 11:26:45 -07001114 del_timer_sync(&pcpu->cpu_timer);
Todd Poynor4add2592012-12-18 17:50:10 -08001115 del_timer_sync(&pcpu->cpu_slack_timer);
Todd Poynor5cad6092012-12-18 17:50:44 -08001116 up_write(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -07001117 }
1118
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001119 if (--active_count > 0) {
1120 mutex_unlock(&gov_lock);
Mike Chanef969692010-06-22 11:26:45 -07001121 return 0;
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001122 }
Mike Chanef969692010-06-22 11:26:45 -07001123
Todd Poynor0e58da22012-12-11 16:05:03 -08001124 cpufreq_unregister_notifier(
1125 &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
Sam Leffler3ab7c2b2012-06-27 10:12:04 -07001126 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
Mike Chanef969692010-06-22 11:26:45 -07001127 sysfs_remove_group(cpufreq_global_kobject,
1128 &interactive_attr_group);
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001129 mutex_unlock(&gov_lock);
Mike Chanef969692010-06-22 11:26:45 -07001130
1131 break;
1132
1133 case CPUFREQ_GOV_LIMITS:
1134 if (policy->max < policy->cur)
1135 __cpufreq_driver_target(policy,
1136 policy->max, CPUFREQ_RELATION_H);
1137 else if (policy->min > policy->cur)
1138 __cpufreq_driver_target(policy,
1139 policy->min, CPUFREQ_RELATION_L);
Lianwei Wang90c6c152013-04-26 13:30:51 +08001140 for_each_cpu(j, policy->cpus) {
1141 pcpu = &per_cpu(cpuinfo, j);
1142
1143 /* hold write semaphore to avoid race */
1144 down_write(&pcpu->enable_sem);
1145 if (pcpu->governor_enabled == 0) {
1146 up_write(&pcpu->enable_sem);
1147 continue;
1148 }
1149
1150 /* update target_freq firstly */
1151 if (policy->max < pcpu->target_freq)
1152 pcpu->target_freq = policy->max;
1153 else if (policy->min > pcpu->target_freq)
1154 pcpu->target_freq = policy->min;
1155
1156 /* Reschedule timer.
1157 * Delete the timers, else the timer callback may
1158 * return without re-arm the timer when failed
1159 * acquire the semaphore. This race may cause timer
1160 * stopped unexpectedly.
1161 */
1162 del_timer_sync(&pcpu->cpu_timer);
1163 del_timer_sync(&pcpu->cpu_slack_timer);
1164 cpufreq_interactive_timer_start(j);
1165 up_write(&pcpu->enable_sem);
1166 }
Mike Chanef969692010-06-22 11:26:45 -07001167 break;
1168 }
1169 return 0;
1170}
1171
Viresh Kumarc7f826b2013-05-16 14:58:53 +05301172#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1173static
1174#endif
1175struct cpufreq_governor cpufreq_gov_interactive = {
1176 .name = "interactive",
1177 .governor = cpufreq_governor_interactive,
1178 .max_transition_latency = 10000000,
1179 .owner = THIS_MODULE,
1180};
1181
Todd Poynor4add2592012-12-18 17:50:10 -08001182static void cpufreq_interactive_nop_timer(unsigned long data)
1183{
1184}
1185
Mike Chanef969692010-06-22 11:26:45 -07001186static int __init cpufreq_interactive_init(void)
1187{
1188 unsigned int i;
1189 struct cpufreq_interactive_cpuinfo *pcpu;
1190 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1191
Mike Chanef969692010-06-22 11:26:45 -07001192 /* Initalize per-cpu timers */
1193 for_each_possible_cpu(i) {
1194 pcpu = &per_cpu(cpuinfo, i);
Todd Poynor4add2592012-12-18 17:50:10 -08001195 init_timer_deferrable(&pcpu->cpu_timer);
Mike Chanef969692010-06-22 11:26:45 -07001196 pcpu->cpu_timer.function = cpufreq_interactive_timer;
1197 pcpu->cpu_timer.data = i;
Todd Poynor4add2592012-12-18 17:50:10 -08001198 init_timer(&pcpu->cpu_slack_timer);
1199 pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
Todd Poynor0e58da22012-12-11 16:05:03 -08001200 spin_lock_init(&pcpu->load_lock);
Todd Poynor5cad6092012-12-18 17:50:44 -08001201 init_rwsem(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -07001202 }
1203
Todd Poynore9c60742012-11-14 11:41:21 -08001204 spin_lock_init(&target_loads_lock);
Todd Poynor0f1920b2012-07-16 17:07:15 -07001205 spin_lock_init(&speedchange_cpumask_lock);
Minsung Kim1505cd12013-04-16 21:52:50 +09001206 spin_lock_init(&above_hispeed_delay_lock);
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001207 mutex_init(&gov_lock);
Todd Poynor0f1920b2012-07-16 17:07:15 -07001208 speedchange_task =
1209 kthread_create(cpufreq_interactive_speedchange_task, NULL,
1210 "cfinteractive");
1211 if (IS_ERR(speedchange_task))
1212 return PTR_ERR(speedchange_task);
Sam Leffler5c9b8272012-06-27 12:55:56 -07001213
Todd Poynor0f1920b2012-07-16 17:07:15 -07001214 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
1215 get_task_struct(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -07001216
Sam Leffler5c9b8272012-06-27 12:55:56 -07001217 /* NB: wake up so the thread does not look hung to the freezer */
Todd Poynor0f1920b2012-07-16 17:07:15 -07001218 wake_up_process(speedchange_task);
Sam Leffler5c9b8272012-06-27 12:55:56 -07001219
Mike Chanef969692010-06-22 11:26:45 -07001220 return cpufreq_register_governor(&cpufreq_gov_interactive);
Mike Chanef969692010-06-22 11:26:45 -07001221}
1222
1223#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1224fs_initcall(cpufreq_interactive_init);
1225#else
1226module_init(cpufreq_interactive_init);
1227#endif
1228
1229static void __exit cpufreq_interactive_exit(void)
1230{
1231 cpufreq_unregister_governor(&cpufreq_gov_interactive);
Todd Poynor0f1920b2012-07-16 17:07:15 -07001232 kthread_stop(speedchange_task);
1233 put_task_struct(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -07001234}
1235
1236module_exit(cpufreq_interactive_exit);
1237
1238MODULE_AUTHOR("Mike Chan <mike@android.com>");
1239MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1240 "Latency sensitive workloads");
1241MODULE_LICENSE("GPL");