blob: 9f63bd1e4feee26b697ae8deb76bf5fb1ea817cb [file] [log] [blame]
Mike Chanef969692010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/module.h>
Lianwei Wangd72db422012-11-01 09:59:52 +080023#include <linux/moduleparam.h>
Todd Poynor5cad6092012-12-18 17:50:44 -080024#include <linux/rwsem.h>
Mike Chanef969692010-06-22 11:26:45 -070025#include <linux/sched.h>
26#include <linux/sched/rt.h>
27#include <linux/tick.h>
28#include <linux/time.h>
29#include <linux/timer.h>
30#include <linux/workqueue.h>
31#include <linux/kthread.h>
Todd Poynorab8dc402012-04-02 17:17:14 -070032#include <linux/slab.h>
Lianwei Wang72e40572013-02-22 11:39:18 +080033#include <linux/kernel_stat.h>
Todd Poynor15a9ea02012-04-23 20:42:41 -070034#include <asm/cputime.h>
Mike Chanef969692010-06-22 11:26:45 -070035
Todd Poynorae010472012-02-16 16:27:59 -080036#define CREATE_TRACE_POINTS
37#include <trace/events/cpufreq_interactive.h>
38
Mike Chanef969692010-06-22 11:26:45 -070039struct cpufreq_interactive_cpuinfo {
40 struct timer_list cpu_timer;
Todd Poynor4add2592012-12-18 17:50:10 -080041 struct timer_list cpu_slack_timer;
Todd Poynor0e58da22012-12-11 16:05:03 -080042 spinlock_t load_lock; /* protects the next 4 fields */
Mike Chanef969692010-06-22 11:26:45 -070043 u64 time_in_idle;
Todd Poynor8eccd412012-10-08 20:14:34 -070044 u64 time_in_idle_timestamp;
Todd Poynor0e58da22012-12-11 16:05:03 -080045 u64 cputime_speedadj;
46 u64 cputime_speedadj_timestamp;
Mike Chanef969692010-06-22 11:26:45 -070047 struct cpufreq_policy *policy;
48 struct cpufreq_frequency_table *freq_table;
49 unsigned int target_freq;
Todd Poynor6d15fa32012-04-26 21:41:40 -070050 unsigned int floor_freq;
51 u64 floor_validate_time;
Todd Poynor1a0389a2012-05-10 23:28:06 -070052 u64 hispeed_validate_time;
Todd Poynor5cad6092012-12-18 17:50:44 -080053 struct rw_semaphore enable_sem;
Mike Chanef969692010-06-22 11:26:45 -070054 int governor_enabled;
55};
56
57static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
58
Todd Poynor0f1920b2012-07-16 17:07:15 -070059/* realtime thread handles frequency scaling */
60static struct task_struct *speedchange_task;
61static cpumask_t speedchange_cpumask;
62static spinlock_t speedchange_cpumask_lock;
Lianwei Wang1d4f9a72013-01-07 14:15:51 +080063static struct mutex gov_lock;
Mike Chanef969692010-06-22 11:26:45 -070064
Todd Poynor8d2d93f2012-11-28 17:58:17 -080065/* Target load. Lower values result in higher CPU speeds. */
66#define DEFAULT_TARGET_LOAD 90
Todd Poynore9c60742012-11-14 11:41:21 -080067static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
Todd Poynor8d2d93f2012-11-28 17:58:17 -080068
Todd Poynora380aa82012-04-17 17:39:34 -070069#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
Todd Poynorcbbe17d2012-04-13 20:18:02 -070070#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
Minsung Kim9c1f83a2013-02-25 23:48:04 +090071static unsigned int default_above_hispeed_delay[] = {
72 DEFAULT_ABOVE_HISPEED_DELAY };
Todd Poynorcbbe17d2012-04-13 20:18:02 -070073
Viresh Kumar17d15c42013-05-16 14:58:54 +053074struct cpufreq_interactive_tunables {
75 int usage_count;
76 /* Hi speed to bump to from lo speed when load burst (default max) */
77 unsigned int hispeed_freq;
78 /* Go to hi speed when CPU load at or above this value. */
79#define DEFAULT_GO_HISPEED_LOAD 99
80 unsigned long go_hispeed_load;
81 /* Target load. Lower values result in higher CPU speeds. */
82 spinlock_t target_loads_lock;
83 unsigned int *target_loads;
84 int ntarget_loads;
85 /*
86 * The minimum amount of time to spend at a frequency before we can ramp
87 * down.
88 */
89#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
90 unsigned long min_sample_time;
91 /*
92 * The sample rate of the timer used to increase frequency
93 */
94 unsigned long timer_rate;
95 /*
96 * Wait this long before raising speed above hispeed, by default a
97 * single timer interval.
98 */
99 spinlock_t above_hispeed_delay_lock;
100 unsigned int *above_hispeed_delay;
101 int nabove_hispeed_delay;
102 /* Non-zero means indefinite speed boost active */
103 int boost_val;
104 /* Duration of a boot pulse in usecs */
105 int boostpulse_duration_val;
106 /* End time of boost pulse in ktime converted to usecs */
107 u64 boostpulse_endtime;
108 /*
109 * Max additional time to wait in idle, beyond timer_rate, at speeds
110 * above minimum before wakeup to reduce speed, or -1 if unnecessary.
111 */
Todd Poynor4add2592012-12-18 17:50:10 -0800112#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
Viresh Kumar17d15c42013-05-16 14:58:54 +0530113 int timer_slack_val;
114 bool io_is_busy;
115};
Lianwei Wangd72db422012-11-01 09:59:52 +0800116
Viresh Kumar17d15c42013-05-16 14:58:54 +0530117/* For cases where we have single governor instance for system */
118struct cpufreq_interactive_tunables *common_tunables;
119
120static struct attribute_group *get_sysfs_attr(void);
Lianwei Wang72e40572013-02-22 11:39:18 +0800121
Lianwei Wang72e40572013-02-22 11:39:18 +0800122static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
123 cputime64_t *wall)
124{
125 u64 idle_time;
126 u64 cur_wall_time;
127 u64 busy_time;
128
129 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
130
131 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
132 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
133 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
134 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
135 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
136 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
137
138 idle_time = cur_wall_time - busy_time;
139 if (wall)
140 *wall = jiffies_to_usecs(cur_wall_time);
141
142 return jiffies_to_usecs(idle_time);
143}
144
Viresh Kumar17d15c42013-05-16 14:58:54 +0530145static inline cputime64_t get_cpu_idle_time(
146 unsigned int cpu,
147 cputime64_t *wall,
148 bool io_is_busy)
Lianwei Wang72e40572013-02-22 11:39:18 +0800149{
150 u64 idle_time = get_cpu_idle_time_us(cpu, wall);
151
152 if (idle_time == -1ULL)
153 idle_time = get_cpu_idle_time_jiffy(cpu, wall);
154 else if (!io_is_busy)
155 idle_time += get_cpu_iowait_time_us(cpu, wall);
156
157 return idle_time;
158}
159
Todd Poynor8eccd412012-10-08 20:14:34 -0700160static void cpufreq_interactive_timer_resched(
161 struct cpufreq_interactive_cpuinfo *pcpu)
162{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530163 struct cpufreq_interactive_tunables *tunables =
164 pcpu->policy->governor_data;
Todd Poynor4e25bf92013-04-05 13:25:21 -0700165 unsigned long expires;
Todd Poynordf673d12013-01-02 13:14:00 -0800166 unsigned long flags;
Todd Poynor4add2592012-12-18 17:50:10 -0800167
Todd Poynordf673d12013-01-02 13:14:00 -0800168 spin_lock_irqsave(&pcpu->load_lock, flags);
Todd Poynor8eccd412012-10-08 20:14:34 -0700169 pcpu->time_in_idle =
Lianwei Wang72e40572013-02-22 11:39:18 +0800170 get_cpu_idle_time(smp_processor_id(),
Viresh Kumar17d15c42013-05-16 14:58:54 +0530171 &pcpu->time_in_idle_timestamp,
172 tunables->io_is_busy);
Todd Poynor0e58da22012-12-11 16:05:03 -0800173 pcpu->cputime_speedadj = 0;
174 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530175 expires = jiffies + usecs_to_jiffies(tunables->timer_rate);
Todd Poynor4e25bf92013-04-05 13:25:21 -0700176 mod_timer_pinned(&pcpu->cpu_timer, expires);
177
Viresh Kumar17d15c42013-05-16 14:58:54 +0530178 if (tunables->timer_slack_val >= 0 &&
179 pcpu->target_freq > pcpu->policy->min) {
180 expires += usecs_to_jiffies(tunables->timer_slack_val);
Todd Poynor4e25bf92013-04-05 13:25:21 -0700181 mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
182 }
183
Todd Poynordf673d12013-01-02 13:14:00 -0800184 spin_unlock_irqrestore(&pcpu->load_lock, flags);
Todd Poynor8eccd412012-10-08 20:14:34 -0700185}
186
Lianwei Wang90c6c152013-04-26 13:30:51 +0800187/* The caller shall take enable_sem write semaphore to avoid any timer race.
188 * The cpu_timer and cpu_slack_timer must be deactivated when calling this
189 * function.
190 */
Viresh Kumar17d15c42013-05-16 14:58:54 +0530191static void cpufreq_interactive_timer_start(
192 struct cpufreq_interactive_tunables *tunables, int cpu)
Lianwei Wang90c6c152013-04-26 13:30:51 +0800193{
194 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530195 unsigned long expires = jiffies +
196 usecs_to_jiffies(tunables->timer_rate);
Lianwei Wang90c6c152013-04-26 13:30:51 +0800197 unsigned long flags;
198
199 pcpu->cpu_timer.expires = expires;
200 add_timer_on(&pcpu->cpu_timer, cpu);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530201 if (tunables->timer_slack_val >= 0 &&
202 pcpu->target_freq > pcpu->policy->min) {
203 expires += usecs_to_jiffies(tunables->timer_slack_val);
Lianwei Wang90c6c152013-04-26 13:30:51 +0800204 pcpu->cpu_slack_timer.expires = expires;
205 add_timer_on(&pcpu->cpu_slack_timer, cpu);
206 }
207
208 spin_lock_irqsave(&pcpu->load_lock, flags);
209 pcpu->time_in_idle =
Viresh Kumar17d15c42013-05-16 14:58:54 +0530210 get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp,
211 tunables->io_is_busy);
Lianwei Wang90c6c152013-04-26 13:30:51 +0800212 pcpu->cputime_speedadj = 0;
213 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
214 spin_unlock_irqrestore(&pcpu->load_lock, flags);
215}
216
Viresh Kumar17d15c42013-05-16 14:58:54 +0530217static unsigned int freq_to_above_hispeed_delay(
218 struct cpufreq_interactive_tunables *tunables,
219 unsigned int freq)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900220{
221 int i;
222 unsigned int ret;
223 unsigned long flags;
224
Viresh Kumar17d15c42013-05-16 14:58:54 +0530225 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900226
Viresh Kumar17d15c42013-05-16 14:58:54 +0530227 for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
228 freq >= tunables->above_hispeed_delay[i+1]; i += 2)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900229 ;
230
Viresh Kumar17d15c42013-05-16 14:58:54 +0530231 ret = tunables->above_hispeed_delay[i];
232 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900233 return ret;
234}
235
Viresh Kumar17d15c42013-05-16 14:58:54 +0530236static unsigned int freq_to_targetload(
237 struct cpufreq_interactive_tunables *tunables, unsigned int freq)
Todd Poynore9c60742012-11-14 11:41:21 -0800238{
239 int i;
240 unsigned int ret;
Todd Poynordf673d12013-01-02 13:14:00 -0800241 unsigned long flags;
Todd Poynore9c60742012-11-14 11:41:21 -0800242
Viresh Kumar17d15c42013-05-16 14:58:54 +0530243 spin_lock_irqsave(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800244
Viresh Kumar17d15c42013-05-16 14:58:54 +0530245 for (i = 0; i < tunables->ntarget_loads - 1 &&
246 freq >= tunables->target_loads[i+1]; i += 2)
Todd Poynore9c60742012-11-14 11:41:21 -0800247 ;
248
Viresh Kumar17d15c42013-05-16 14:58:54 +0530249 ret = tunables->target_loads[i];
250 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800251 return ret;
252}
253
254/*
255 * If increasing frequencies never map to a lower target load then
256 * choose_freq() will find the minimum frequency that does not exceed its
257 * target load given the current load.
258 */
Viresh Kumar17d15c42013-05-16 14:58:54 +0530259static unsigned int choose_freq(struct cpufreq_interactive_cpuinfo *pcpu,
260 unsigned int loadadjfreq)
Todd Poynore9c60742012-11-14 11:41:21 -0800261{
262 unsigned int freq = pcpu->policy->cur;
Todd Poynore9c60742012-11-14 11:41:21 -0800263 unsigned int prevfreq, freqmin, freqmax;
264 unsigned int tl;
265 int index;
266
267 freqmin = 0;
268 freqmax = UINT_MAX;
269
270 do {
271 prevfreq = freq;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530272 tl = freq_to_targetload(pcpu->policy->governor_data, freq);
Todd Poynore9c60742012-11-14 11:41:21 -0800273
274 /*
275 * Find the lowest frequency where the computed load is less
276 * than or equal to the target load.
277 */
278
Todd Poynor8cdabdc2013-04-22 16:44:58 -0700279 if (cpufreq_frequency_table_target(
280 pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
281 CPUFREQ_RELATION_L, &index))
282 break;
Todd Poynore9c60742012-11-14 11:41:21 -0800283 freq = pcpu->freq_table[index].frequency;
284
285 if (freq > prevfreq) {
286 /* The previous frequency is too low. */
287 freqmin = prevfreq;
288
289 if (freq >= freqmax) {
290 /*
291 * Find the highest frequency that is less
292 * than freqmax.
293 */
Todd Poynor8cdabdc2013-04-22 16:44:58 -0700294 if (cpufreq_frequency_table_target(
295 pcpu->policy, pcpu->freq_table,
296 freqmax - 1, CPUFREQ_RELATION_H,
297 &index))
298 break;
Todd Poynore9c60742012-11-14 11:41:21 -0800299 freq = pcpu->freq_table[index].frequency;
300
301 if (freq == freqmin) {
302 /*
303 * The first frequency below freqmax
304 * has already been found to be too
305 * low. freqmax is the lowest speed
306 * we found that is fast enough.
307 */
308 freq = freqmax;
309 break;
310 }
311 }
312 } else if (freq < prevfreq) {
313 /* The previous frequency is high enough. */
314 freqmax = prevfreq;
315
316 if (freq <= freqmin) {
317 /*
318 * Find the lowest frequency that is higher
319 * than freqmin.
320 */
Todd Poynor8cdabdc2013-04-22 16:44:58 -0700321 if (cpufreq_frequency_table_target(
322 pcpu->policy, pcpu->freq_table,
323 freqmin + 1, CPUFREQ_RELATION_L,
324 &index))
325 break;
Todd Poynore9c60742012-11-14 11:41:21 -0800326 freq = pcpu->freq_table[index].frequency;
327
328 /*
329 * If freqmax is the first frequency above
330 * freqmin then we have already found that
331 * this speed is fast enough.
332 */
333 if (freq == freqmax)
334 break;
335 }
336 }
337
338 /* If same frequency chosen as previous then done. */
339 } while (freq != prevfreq);
340
341 return freq;
342}
343
Todd Poynor0e58da22012-12-11 16:05:03 -0800344static u64 update_load(int cpu)
345{
346 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530347 struct cpufreq_interactive_tunables *tunables =
348 pcpu->policy->governor_data;
Todd Poynor0e58da22012-12-11 16:05:03 -0800349 u64 now;
350 u64 now_idle;
351 unsigned int delta_idle;
352 unsigned int delta_time;
353 u64 active_time;
354
Viresh Kumar17d15c42013-05-16 14:58:54 +0530355 now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
Todd Poynor0e58da22012-12-11 16:05:03 -0800356 delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
357 delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
Minsung Kim98b3b562013-04-23 22:32:01 +0900358
359 if (delta_time <= delta_idle)
360 active_time = 0;
361 else
362 active_time = delta_time - delta_idle;
363
Todd Poynor0e58da22012-12-11 16:05:03 -0800364 pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
365
366 pcpu->time_in_idle = now_idle;
367 pcpu->time_in_idle_timestamp = now;
368 return now;
369}
370
Mike Chanef969692010-06-22 11:26:45 -0700371static void cpufreq_interactive_timer(unsigned long data)
372{
Todd Poynore7afb7e2012-11-05 13:09:03 -0800373 u64 now;
Mike Chanef969692010-06-22 11:26:45 -0700374 unsigned int delta_time;
Todd Poynor0e58da22012-12-11 16:05:03 -0800375 u64 cputime_speedadj;
Mike Chanef969692010-06-22 11:26:45 -0700376 int cpu_load;
Mike Chanef969692010-06-22 11:26:45 -0700377 struct cpufreq_interactive_cpuinfo *pcpu =
378 &per_cpu(cpuinfo, data);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530379 struct cpufreq_interactive_tunables *tunables =
380 pcpu->policy->governor_data;
Mike Chanef969692010-06-22 11:26:45 -0700381 unsigned int new_freq;
Todd Poynor0e58da22012-12-11 16:05:03 -0800382 unsigned int loadadjfreq;
Mike Chanef969692010-06-22 11:26:45 -0700383 unsigned int index;
384 unsigned long flags;
Todd Poynore16d5922012-12-14 17:31:19 -0800385 bool boosted;
Mike Chanef969692010-06-22 11:26:45 -0700386
Todd Poynor5cad6092012-12-18 17:50:44 -0800387 if (!down_read_trylock(&pcpu->enable_sem))
388 return;
Mike Chanef969692010-06-22 11:26:45 -0700389 if (!pcpu->governor_enabled)
390 goto exit;
391
Todd Poynordf673d12013-01-02 13:14:00 -0800392 spin_lock_irqsave(&pcpu->load_lock, flags);
Todd Poynor0e58da22012-12-11 16:05:03 -0800393 now = update_load(data);
394 delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
395 cputime_speedadj = pcpu->cputime_speedadj;
Todd Poynordf673d12013-01-02 13:14:00 -0800396 spin_unlock_irqrestore(&pcpu->load_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700397
Todd Poynor0e58da22012-12-11 16:05:03 -0800398 if (WARN_ON_ONCE(!delta_time))
Mike Chanef969692010-06-22 11:26:45 -0700399 goto rearm;
400
Todd Poynor0e58da22012-12-11 16:05:03 -0800401 do_div(cputime_speedadj, delta_time);
402 loadadjfreq = (unsigned int)cputime_speedadj * 100;
403 cpu_load = loadadjfreq / pcpu->target_freq;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530404 boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
Mike Chanef969692010-06-22 11:26:45 -0700405
Viresh Kumar17d15c42013-05-16 14:58:54 +0530406 if (cpu_load >= tunables->go_hispeed_load || boosted) {
407 if (pcpu->target_freq < tunables->hispeed_freq) {
408 new_freq = tunables->hispeed_freq;
Todd Poynor2b660492012-12-19 16:06:48 -0800409 } else {
410 new_freq = choose_freq(pcpu, loadadjfreq);
411
Viresh Kumar17d15c42013-05-16 14:58:54 +0530412 if (new_freq < tunables->hispeed_freq)
413 new_freq = tunables->hispeed_freq;
Todd Poynor2b660492012-12-19 16:06:48 -0800414 }
415 } else {
Todd Poynor0e58da22012-12-11 16:05:03 -0800416 new_freq = choose_freq(pcpu, loadadjfreq);
Todd Poynor2b660492012-12-19 16:06:48 -0800417 }
Todd Poynor131ff022012-11-08 15:06:55 -0800418
Viresh Kumar17d15c42013-05-16 14:58:54 +0530419 if (pcpu->target_freq >= tunables->hispeed_freq &&
Todd Poynor131ff022012-11-08 15:06:55 -0800420 new_freq > pcpu->target_freq &&
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900421 now - pcpu->hispeed_validate_time <
Viresh Kumar17d15c42013-05-16 14:58:54 +0530422 freq_to_above_hispeed_delay(tunables, pcpu->target_freq)) {
Todd Poynor131ff022012-11-08 15:06:55 -0800423 trace_cpufreq_interactive_notyet(
424 data, cpu_load, pcpu->target_freq,
425 pcpu->policy->cur, new_freq);
426 goto rearm;
Mike Chanef969692010-06-22 11:26:45 -0700427 }
428
Todd Poynor131ff022012-11-08 15:06:55 -0800429 pcpu->hispeed_validate_time = now;
Todd Poynor1a0389a2012-05-10 23:28:06 -0700430
Mike Chanef969692010-06-22 11:26:45 -0700431 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800432 new_freq, CPUFREQ_RELATION_L,
Todd Poynor8cdabdc2013-04-22 16:44:58 -0700433 &index))
Mike Chanef969692010-06-22 11:26:45 -0700434 goto rearm;
Mike Chanef969692010-06-22 11:26:45 -0700435
436 new_freq = pcpu->freq_table[index].frequency;
437
Mike Chanef969692010-06-22 11:26:45 -0700438 /*
Todd Poynor6d15fa32012-04-26 21:41:40 -0700439 * Do not scale below floor_freq unless we have been at or above the
440 * floor frequency for the minimum sample time since last validated.
Mike Chanef969692010-06-22 11:26:45 -0700441 */
Todd Poynor6d15fa32012-04-26 21:41:40 -0700442 if (new_freq < pcpu->floor_freq) {
Viresh Kumar17d15c42013-05-16 14:58:54 +0530443 if (now - pcpu->floor_validate_time <
444 tunables->min_sample_time) {
Todd Poynore60cc1b2012-11-28 17:56:09 -0800445 trace_cpufreq_interactive_notyet(
446 data, cpu_load, pcpu->target_freq,
447 pcpu->policy->cur, new_freq);
Mike Chanef969692010-06-22 11:26:45 -0700448 goto rearm;
Todd Poynorae010472012-02-16 16:27:59 -0800449 }
Mike Chanef969692010-06-22 11:26:45 -0700450 }
451
Todd Poynore16d5922012-12-14 17:31:19 -0800452 /*
453 * Update the timestamp for checking whether speed has been held at
454 * or above the selected frequency for a minimum of min_sample_time,
455 * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
456 * allow the speed to drop as soon as the boostpulse duration expires
457 * (or the indefinite boost is turned off).
458 */
459
Viresh Kumar17d15c42013-05-16 14:58:54 +0530460 if (!boosted || new_freq > tunables->hispeed_freq) {
Todd Poynore16d5922012-12-14 17:31:19 -0800461 pcpu->floor_freq = new_freq;
462 pcpu->floor_validate_time = now;
463 }
Todd Poynor1f408dc2012-04-06 19:59:36 -0700464
465 if (pcpu->target_freq == new_freq) {
Todd Poynore60cc1b2012-11-28 17:56:09 -0800466 trace_cpufreq_interactive_already(
467 data, cpu_load, pcpu->target_freq,
468 pcpu->policy->cur, new_freq);
Todd Poynor1f408dc2012-04-06 19:59:36 -0700469 goto rearm_if_notmax;
470 }
471
Todd Poynorae010472012-02-16 16:27:59 -0800472 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
Todd Poynore60cc1b2012-11-28 17:56:09 -0800473 pcpu->policy->cur, new_freq);
Todd Poynorae010472012-02-16 16:27:59 -0800474
Todd Poynor0f1920b2012-07-16 17:07:15 -0700475 pcpu->target_freq = new_freq;
476 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
477 cpumask_set_cpu(data, &speedchange_cpumask);
478 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
479 wake_up_process(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -0700480
481rearm_if_notmax:
482 /*
483 * Already set max speed and don't see a need to change that,
484 * wait until next idle to re-evaluate, don't need timer.
485 */
486 if (pcpu->target_freq == pcpu->policy->max)
487 goto exit;
488
489rearm:
Todd Poynor4add2592012-12-18 17:50:10 -0800490 if (!timer_pending(&pcpu->cpu_timer))
Todd Poynor8eccd412012-10-08 20:14:34 -0700491 cpufreq_interactive_timer_resched(pcpu);
Mike Chanef969692010-06-22 11:26:45 -0700492
493exit:
Todd Poynor5cad6092012-12-18 17:50:44 -0800494 up_read(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700495 return;
496}
497
498static void cpufreq_interactive_idle_start(void)
499{
500 struct cpufreq_interactive_cpuinfo *pcpu =
501 &per_cpu(cpuinfo, smp_processor_id());
502 int pending;
503
Todd Poynor5cad6092012-12-18 17:50:44 -0800504 if (!down_read_trylock(&pcpu->enable_sem))
Mike Chanef969692010-06-22 11:26:45 -0700505 return;
Todd Poynor5cad6092012-12-18 17:50:44 -0800506 if (!pcpu->governor_enabled) {
507 up_read(&pcpu->enable_sem);
508 return;
509 }
Mike Chanef969692010-06-22 11:26:45 -0700510
Mike Chanef969692010-06-22 11:26:45 -0700511 pending = timer_pending(&pcpu->cpu_timer);
512
513 if (pcpu->target_freq != pcpu->policy->min) {
Mike Chanef969692010-06-22 11:26:45 -0700514 /*
515 * Entering idle while not at lowest speed. On some
516 * platforms this can hold the other CPU(s) at that speed
517 * even though the CPU is idle. Set a timer to re-evaluate
518 * speed so this idle CPU doesn't hold the other CPUs above
519 * min indefinitely. This should probably be a quirk of
520 * the CPUFreq driver.
521 */
Todd Poynor4add2592012-12-18 17:50:10 -0800522 if (!pending)
Todd Poynor8eccd412012-10-08 20:14:34 -0700523 cpufreq_interactive_timer_resched(pcpu);
Mike Chanef969692010-06-22 11:26:45 -0700524 }
525
Todd Poynor5cad6092012-12-18 17:50:44 -0800526 up_read(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700527}
528
529static void cpufreq_interactive_idle_end(void)
530{
531 struct cpufreq_interactive_cpuinfo *pcpu =
532 &per_cpu(cpuinfo, smp_processor_id());
533
Todd Poynor5cad6092012-12-18 17:50:44 -0800534 if (!down_read_trylock(&pcpu->enable_sem))
Sam Leffler3ab7c2b2012-06-27 10:12:04 -0700535 return;
Todd Poynor5cad6092012-12-18 17:50:44 -0800536 if (!pcpu->governor_enabled) {
537 up_read(&pcpu->enable_sem);
538 return;
539 }
Sam Leffler3ab7c2b2012-06-27 10:12:04 -0700540
Todd Poynore7afb7e2012-11-05 13:09:03 -0800541 /* Arm the timer for 1-2 ticks later if not already. */
542 if (!timer_pending(&pcpu->cpu_timer)) {
Todd Poynor8eccd412012-10-08 20:14:34 -0700543 cpufreq_interactive_timer_resched(pcpu);
Todd Poynor4add2592012-12-18 17:50:10 -0800544 } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
Todd Poynor8eccd412012-10-08 20:14:34 -0700545 del_timer(&pcpu->cpu_timer);
Todd Poynor4add2592012-12-18 17:50:10 -0800546 del_timer(&pcpu->cpu_slack_timer);
Todd Poynor8eccd412012-10-08 20:14:34 -0700547 cpufreq_interactive_timer(smp_processor_id());
Mike Chanef969692010-06-22 11:26:45 -0700548 }
Todd Poynor5cad6092012-12-18 17:50:44 -0800549
550 up_read(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700551}
552
Todd Poynor0f1920b2012-07-16 17:07:15 -0700553static int cpufreq_interactive_speedchange_task(void *data)
Mike Chanef969692010-06-22 11:26:45 -0700554{
555 unsigned int cpu;
556 cpumask_t tmp_mask;
557 unsigned long flags;
558 struct cpufreq_interactive_cpuinfo *pcpu;
559
560 while (1) {
561 set_current_state(TASK_INTERRUPTIBLE);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700562 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700563
Todd Poynor0f1920b2012-07-16 17:07:15 -0700564 if (cpumask_empty(&speedchange_cpumask)) {
565 spin_unlock_irqrestore(&speedchange_cpumask_lock,
566 flags);
Mike Chanef969692010-06-22 11:26:45 -0700567 schedule();
568
569 if (kthread_should_stop())
570 break;
571
Todd Poynor0f1920b2012-07-16 17:07:15 -0700572 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700573 }
574
575 set_current_state(TASK_RUNNING);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700576 tmp_mask = speedchange_cpumask;
577 cpumask_clear(&speedchange_cpumask);
578 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700579
580 for_each_cpu(cpu, &tmp_mask) {
581 unsigned int j;
582 unsigned int max_freq = 0;
583
584 pcpu = &per_cpu(cpuinfo, cpu);
Todd Poynor5cad6092012-12-18 17:50:44 -0800585 if (!down_read_trylock(&pcpu->enable_sem))
Mike Chanef969692010-06-22 11:26:45 -0700586 continue;
Todd Poynor5cad6092012-12-18 17:50:44 -0800587 if (!pcpu->governor_enabled) {
588 up_read(&pcpu->enable_sem);
589 continue;
590 }
Mike Chanef969692010-06-22 11:26:45 -0700591
Mike Chanef969692010-06-22 11:26:45 -0700592 for_each_cpu(j, pcpu->policy->cpus) {
593 struct cpufreq_interactive_cpuinfo *pjcpu =
594 &per_cpu(cpuinfo, j);
595
596 if (pjcpu->target_freq > max_freq)
597 max_freq = pjcpu->target_freq;
598 }
599
600 if (max_freq != pcpu->policy->cur)
601 __cpufreq_driver_target(pcpu->policy,
602 max_freq,
603 CPUFREQ_RELATION_H);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700604 trace_cpufreq_interactive_setspeed(cpu,
605 pcpu->target_freq,
Todd Poynorae010472012-02-16 16:27:59 -0800606 pcpu->policy->cur);
Todd Poynor5cad6092012-12-18 17:50:44 -0800607
608 up_read(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700609 }
610 }
611
612 return 0;
613}
614
Todd Poynorab8dc402012-04-02 17:17:14 -0700615static void cpufreq_interactive_boost(void)
616{
617 int i;
618 int anyboost = 0;
619 unsigned long flags;
620 struct cpufreq_interactive_cpuinfo *pcpu;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530621 struct cpufreq_interactive_tunables *tunables;
Todd Poynorab8dc402012-04-02 17:17:14 -0700622
Todd Poynor0f1920b2012-07-16 17:07:15 -0700623 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Todd Poynorab8dc402012-04-02 17:17:14 -0700624
625 for_each_online_cpu(i) {
626 pcpu = &per_cpu(cpuinfo, i);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530627 tunables = pcpu->policy->governor_data;
Todd Poynorab8dc402012-04-02 17:17:14 -0700628
Viresh Kumar17d15c42013-05-16 14:58:54 +0530629 if (pcpu->target_freq < tunables->hispeed_freq) {
630 pcpu->target_freq = tunables->hispeed_freq;
Todd Poynor0f1920b2012-07-16 17:07:15 -0700631 cpumask_set_cpu(i, &speedchange_cpumask);
Todd Poynor31817c92012-12-07 20:08:45 -0800632 pcpu->hispeed_validate_time =
633 ktime_to_us(ktime_get());
Todd Poynorab8dc402012-04-02 17:17:14 -0700634 anyboost = 1;
635 }
636
637 /*
Todd Poynor6d15fa32012-04-26 21:41:40 -0700638 * Set floor freq and (re)start timer for when last
639 * validated.
Todd Poynorab8dc402012-04-02 17:17:14 -0700640 */
641
Viresh Kumar17d15c42013-05-16 14:58:54 +0530642 pcpu->floor_freq = tunables->hispeed_freq;
Todd Poynor6d15fa32012-04-26 21:41:40 -0700643 pcpu->floor_validate_time = ktime_to_us(ktime_get());
Todd Poynorab8dc402012-04-02 17:17:14 -0700644 }
645
Todd Poynor0f1920b2012-07-16 17:07:15 -0700646 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Todd Poynorab8dc402012-04-02 17:17:14 -0700647
648 if (anyboost)
Todd Poynor0f1920b2012-07-16 17:07:15 -0700649 wake_up_process(speedchange_task);
Todd Poynorab8dc402012-04-02 17:17:14 -0700650}
651
Todd Poynor0e58da22012-12-11 16:05:03 -0800652static int cpufreq_interactive_notifier(
653 struct notifier_block *nb, unsigned long val, void *data)
654{
655 struct cpufreq_freqs *freq = data;
656 struct cpufreq_interactive_cpuinfo *pcpu;
657 int cpu;
Todd Poynordf673d12013-01-02 13:14:00 -0800658 unsigned long flags;
Todd Poynor0e58da22012-12-11 16:05:03 -0800659
660 if (val == CPUFREQ_POSTCHANGE) {
661 pcpu = &per_cpu(cpuinfo, freq->cpu);
Todd Poynor34974c32012-12-23 12:28:49 -0800662 if (!down_read_trylock(&pcpu->enable_sem))
663 return 0;
664 if (!pcpu->governor_enabled) {
665 up_read(&pcpu->enable_sem);
666 return 0;
667 }
Todd Poynor0e58da22012-12-11 16:05:03 -0800668
669 for_each_cpu(cpu, pcpu->policy->cpus) {
670 struct cpufreq_interactive_cpuinfo *pjcpu =
671 &per_cpu(cpuinfo, cpu);
Lianwei Wangc79705d2013-05-16 12:07:23 +0800672 if (cpu != freq->cpu) {
673 if (!down_read_trylock(&pjcpu->enable_sem))
674 continue;
675 if (!pjcpu->governor_enabled) {
676 up_read(&pjcpu->enable_sem);
677 continue;
678 }
679 }
Todd Poynordf673d12013-01-02 13:14:00 -0800680 spin_lock_irqsave(&pjcpu->load_lock, flags);
Todd Poynor0e58da22012-12-11 16:05:03 -0800681 update_load(cpu);
Todd Poynordf673d12013-01-02 13:14:00 -0800682 spin_unlock_irqrestore(&pjcpu->load_lock, flags);
Lianwei Wangc79705d2013-05-16 12:07:23 +0800683 if (cpu != freq->cpu)
684 up_read(&pjcpu->enable_sem);
Todd Poynor0e58da22012-12-11 16:05:03 -0800685 }
Todd Poynor0e58da22012-12-11 16:05:03 -0800686
Todd Poynor34974c32012-12-23 12:28:49 -0800687 up_read(&pcpu->enable_sem);
688 }
Todd Poynor0e58da22012-12-11 16:05:03 -0800689 return 0;
690}
691
692static struct notifier_block cpufreq_notifier_block = {
693 .notifier_call = cpufreq_interactive_notifier,
694};
695
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900696static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
697{
698 const char *cp;
699 int i;
700 int ntokens = 1;
701 unsigned int *tokenized_data;
Todd Poynor233dfa02013-03-20 15:40:46 -0700702 int err = -EINVAL;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900703
704 cp = buf;
705 while ((cp = strpbrk(cp + 1, " :")))
706 ntokens++;
707
Todd Poynor233dfa02013-03-20 15:40:46 -0700708 if (!(ntokens & 0x1))
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900709 goto err;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900710
711 tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
712 if (!tokenized_data) {
Todd Poynor233dfa02013-03-20 15:40:46 -0700713 err = -ENOMEM;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900714 goto err;
715 }
716
717 cp = buf;
718 i = 0;
719 while (i < ntokens) {
Todd Poynor233dfa02013-03-20 15:40:46 -0700720 if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900721 goto err_kfree;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900722
723 cp = strpbrk(cp, " :");
724 if (!cp)
725 break;
726 cp++;
727 }
728
Todd Poynor233dfa02013-03-20 15:40:46 -0700729 if (i != ntokens)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900730 goto err_kfree;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900731
732 *num_tokens = ntokens;
733 return tokenized_data;
734
735err_kfree:
736 kfree(tokenized_data);
737err:
Todd Poynor233dfa02013-03-20 15:40:46 -0700738 return ERR_PTR(err);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900739}
740
Todd Poynore9c60742012-11-14 11:41:21 -0800741static ssize_t show_target_loads(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530742 struct cpufreq_interactive_tunables *tunables,
743 char *buf)
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800744{
Todd Poynore9c60742012-11-14 11:41:21 -0800745 int i;
746 ssize_t ret = 0;
Todd Poynordf673d12013-01-02 13:14:00 -0800747 unsigned long flags;
Todd Poynore9c60742012-11-14 11:41:21 -0800748
Viresh Kumar17d15c42013-05-16 14:58:54 +0530749 spin_lock_irqsave(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800750
Viresh Kumar17d15c42013-05-16 14:58:54 +0530751 for (i = 0; i < tunables->ntarget_loads; i++)
752 ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
Todd Poynore9c60742012-11-14 11:41:21 -0800753 i & 0x1 ? ":" : " ");
754
Chih-Wei Huang8d9e5302013-12-24 17:51:55 +0800755 sprintf(buf + ret - 1, "\n");
Viresh Kumar17d15c42013-05-16 14:58:54 +0530756 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800757 return ret;
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800758}
759
Todd Poynore9c60742012-11-14 11:41:21 -0800760static ssize_t store_target_loads(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530761 struct cpufreq_interactive_tunables *tunables,
762 const char *buf, size_t count)
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800763{
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900764 int ntokens;
Todd Poynore9c60742012-11-14 11:41:21 -0800765 unsigned int *new_target_loads = NULL;
Todd Poynordf673d12013-01-02 13:14:00 -0800766 unsigned long flags;
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800767
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900768 new_target_loads = get_tokenized_data(buf, &ntokens);
769 if (IS_ERR(new_target_loads))
770 return PTR_RET(new_target_loads);
Todd Poynore9c60742012-11-14 11:41:21 -0800771
Viresh Kumar17d15c42013-05-16 14:58:54 +0530772 spin_lock_irqsave(&tunables->target_loads_lock, flags);
773 if (tunables->target_loads != default_target_loads)
774 kfree(tunables->target_loads);
775 tunables->target_loads = new_target_loads;
776 tunables->ntarget_loads = ntokens;
777 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800778 return count;
779}
780
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900781static ssize_t show_above_hispeed_delay(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530782 struct cpufreq_interactive_tunables *tunables, char *buf)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900783{
784 int i;
785 ssize_t ret = 0;
786 unsigned long flags;
787
Viresh Kumar17d15c42013-05-16 14:58:54 +0530788 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900789
Viresh Kumar17d15c42013-05-16 14:58:54 +0530790 for (i = 0; i < tunables->nabove_hispeed_delay; i++)
791 ret += sprintf(buf + ret, "%u%s",
792 tunables->above_hispeed_delay[i],
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900793 i & 0x1 ? ":" : " ");
794
Chih-Wei Huang8d9e5302013-12-24 17:51:55 +0800795 sprintf(buf + ret - 1, "\n");
Viresh Kumar17d15c42013-05-16 14:58:54 +0530796 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900797 return ret;
798}
799
800static ssize_t store_above_hispeed_delay(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530801 struct cpufreq_interactive_tunables *tunables,
802 const char *buf, size_t count)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900803{
804 int ntokens;
805 unsigned int *new_above_hispeed_delay = NULL;
806 unsigned long flags;
807
808 new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
809 if (IS_ERR(new_above_hispeed_delay))
810 return PTR_RET(new_above_hispeed_delay);
811
Viresh Kumar17d15c42013-05-16 14:58:54 +0530812 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
813 if (tunables->above_hispeed_delay != default_above_hispeed_delay)
814 kfree(tunables->above_hispeed_delay);
815 tunables->above_hispeed_delay = new_above_hispeed_delay;
816 tunables->nabove_hispeed_delay = ntokens;
817 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900818 return count;
819
820}
821
Viresh Kumar17d15c42013-05-16 14:58:54 +0530822static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
823 char *buf)
Mike Chanef969692010-06-22 11:26:45 -0700824{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530825 return sprintf(buf, "%u\n", tunables->hispeed_freq);
Mike Chanef969692010-06-22 11:26:45 -0700826}
827
Viresh Kumar17d15c42013-05-16 14:58:54 +0530828static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
829 const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -0700830{
831 int ret;
Todd Poynor3b7b5f82012-10-03 00:39:56 -0700832 long unsigned int val;
Mike Chanef969692010-06-22 11:26:45 -0700833
Todd Poynor3b7b5f82012-10-03 00:39:56 -0700834 ret = strict_strtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -0700835 if (ret < 0)
836 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530837 tunables->hispeed_freq = val;
Mike Chanef969692010-06-22 11:26:45 -0700838 return count;
839}
840
Viresh Kumar17d15c42013-05-16 14:58:54 +0530841static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
842 *tunables, char *buf)
Mike Chanef969692010-06-22 11:26:45 -0700843{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530844 return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
Mike Chanef969692010-06-22 11:26:45 -0700845}
846
Viresh Kumar17d15c42013-05-16 14:58:54 +0530847static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
848 *tunables, const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -0700849{
850 int ret;
851 unsigned long val;
852
853 ret = strict_strtoul(buf, 0, &val);
854 if (ret < 0)
855 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530856 tunables->go_hispeed_load = val;
Mike Chanef969692010-06-22 11:26:45 -0700857 return count;
858}
859
Viresh Kumar17d15c42013-05-16 14:58:54 +0530860static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
861 *tunables, char *buf)
Mike Chanef969692010-06-22 11:26:45 -0700862{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530863 return sprintf(buf, "%lu\n", tunables->min_sample_time);
Mike Chanef969692010-06-22 11:26:45 -0700864}
865
Viresh Kumar17d15c42013-05-16 14:58:54 +0530866static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
867 *tunables, const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -0700868{
869 int ret;
870 unsigned long val;
871
872 ret = strict_strtoul(buf, 0, &val);
873 if (ret < 0)
874 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530875 tunables->min_sample_time = val;
Mike Chanef969692010-06-22 11:26:45 -0700876 return count;
877}
878
Viresh Kumar17d15c42013-05-16 14:58:54 +0530879static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
880 char *buf)
Mike Chanef969692010-06-22 11:26:45 -0700881{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530882 return sprintf(buf, "%lu\n", tunables->timer_rate);
Mike Chanef969692010-06-22 11:26:45 -0700883}
884
Viresh Kumar17d15c42013-05-16 14:58:54 +0530885static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
886 const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -0700887{
888 int ret;
889 unsigned long val;
890
891 ret = strict_strtoul(buf, 0, &val);
892 if (ret < 0)
893 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530894 tunables->timer_rate = val;
Mike Chanef969692010-06-22 11:26:45 -0700895 return count;
896}
897
Viresh Kumar17d15c42013-05-16 14:58:54 +0530898static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
899 char *buf)
Todd Poynor4add2592012-12-18 17:50:10 -0800900{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530901 return sprintf(buf, "%d\n", tunables->timer_slack_val);
Todd Poynor4add2592012-12-18 17:50:10 -0800902}
903
Viresh Kumar17d15c42013-05-16 14:58:54 +0530904static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
905 const char *buf, size_t count)
Todd Poynor4add2592012-12-18 17:50:10 -0800906{
907 int ret;
908 unsigned long val;
909
910 ret = kstrtol(buf, 10, &val);
911 if (ret < 0)
912 return ret;
913
Viresh Kumar17d15c42013-05-16 14:58:54 +0530914 tunables->timer_slack_val = val;
Todd Poynor4add2592012-12-18 17:50:10 -0800915 return count;
916}
917
Viresh Kumar17d15c42013-05-16 14:58:54 +0530918static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
Todd Poynor15a9ea02012-04-23 20:42:41 -0700919 char *buf)
920{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530921 return sprintf(buf, "%d\n", tunables->boost_val);
Todd Poynor15a9ea02012-04-23 20:42:41 -0700922}
923
Viresh Kumar17d15c42013-05-16 14:58:54 +0530924static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
Todd Poynor15a9ea02012-04-23 20:42:41 -0700925 const char *buf, size_t count)
926{
927 int ret;
928 unsigned long val;
929
930 ret = kstrtoul(buf, 0, &val);
931 if (ret < 0)
932 return ret;
933
Viresh Kumar17d15c42013-05-16 14:58:54 +0530934 tunables->boost_val = val;
Todd Poynor15a9ea02012-04-23 20:42:41 -0700935
Viresh Kumar17d15c42013-05-16 14:58:54 +0530936 if (tunables->boost_val) {
Todd Poynor442a3122012-05-03 00:16:55 -0700937 trace_cpufreq_interactive_boost("on");
Todd Poynor15a9ea02012-04-23 20:42:41 -0700938 cpufreq_interactive_boost();
Todd Poynor442a3122012-05-03 00:16:55 -0700939 } else {
940 trace_cpufreq_interactive_unboost("off");
941 }
Todd Poynor15a9ea02012-04-23 20:42:41 -0700942
943 return count;
944}
945
Viresh Kumar17d15c42013-05-16 14:58:54 +0530946static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
Todd Poynor442a3122012-05-03 00:16:55 -0700947 const char *buf, size_t count)
948{
949 int ret;
950 unsigned long val;
951
952 ret = kstrtoul(buf, 0, &val);
953 if (ret < 0)
954 return ret;
955
Viresh Kumar17d15c42013-05-16 14:58:54 +0530956 tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
957 tunables->boostpulse_duration_val;
Todd Poynor442a3122012-05-03 00:16:55 -0700958 trace_cpufreq_interactive_boost("pulse");
959 cpufreq_interactive_boost();
960 return count;
961}
962
Viresh Kumar17d15c42013-05-16 14:58:54 +0530963static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
964 *tunables, char *buf)
Todd Poynore16d5922012-12-14 17:31:19 -0800965{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530966 return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
Todd Poynore16d5922012-12-14 17:31:19 -0800967}
968
Viresh Kumar17d15c42013-05-16 14:58:54 +0530969static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
970 *tunables, const char *buf, size_t count)
Todd Poynore16d5922012-12-14 17:31:19 -0800971{
972 int ret;
973 unsigned long val;
974
975 ret = kstrtoul(buf, 0, &val);
976 if (ret < 0)
977 return ret;
978
Viresh Kumar17d15c42013-05-16 14:58:54 +0530979 tunables->boostpulse_duration_val = val;
Todd Poynore16d5922012-12-14 17:31:19 -0800980 return count;
981}
982
Viresh Kumar17d15c42013-05-16 14:58:54 +0530983static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
984 char *buf)
Lianwei Wang72e40572013-02-22 11:39:18 +0800985{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530986 return sprintf(buf, "%u\n", tunables->io_is_busy);
Lianwei Wang72e40572013-02-22 11:39:18 +0800987}
988
Viresh Kumar17d15c42013-05-16 14:58:54 +0530989static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
990 const char *buf, size_t count)
Lianwei Wang72e40572013-02-22 11:39:18 +0800991{
992 int ret;
993 unsigned long val;
994
995 ret = kstrtoul(buf, 0, &val);
996 if (ret < 0)
997 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530998 tunables->io_is_busy = val;
Lianwei Wang72e40572013-02-22 11:39:18 +0800999 return count;
1000}
1001
Viresh Kumar17d15c42013-05-16 14:58:54 +05301002/*
1003 * Create show/store routines
1004 * - sys: One governor instance for complete SYSTEM
1005 * - pol: One governor instance per struct cpufreq_policy
1006 */
1007#define show_gov_pol_sys(file_name) \
1008static ssize_t show_##file_name##_gov_sys \
1009(struct kobject *kobj, struct attribute *attr, char *buf) \
1010{ \
1011 return show_##file_name(common_tunables, buf); \
1012} \
1013 \
1014static ssize_t show_##file_name##_gov_pol \
1015(struct cpufreq_policy *policy, char *buf) \
1016{ \
1017 return show_##file_name(policy->governor_data, buf); \
1018}
Lianwei Wang72e40572013-02-22 11:39:18 +08001019
Viresh Kumar17d15c42013-05-16 14:58:54 +05301020#define store_gov_pol_sys(file_name) \
1021static ssize_t store_##file_name##_gov_sys \
1022(struct kobject *kobj, struct attribute *attr, const char *buf, \
1023 size_t count) \
1024{ \
1025 return store_##file_name(common_tunables, buf, count); \
1026} \
1027 \
1028static ssize_t store_##file_name##_gov_pol \
1029(struct cpufreq_policy *policy, const char *buf, size_t count) \
1030{ \
1031 return store_##file_name(policy->governor_data, buf, count); \
1032}
1033
1034#define show_store_gov_pol_sys(file_name) \
1035show_gov_pol_sys(file_name); \
1036store_gov_pol_sys(file_name)
1037
1038show_store_gov_pol_sys(target_loads);
1039show_store_gov_pol_sys(above_hispeed_delay);
1040show_store_gov_pol_sys(hispeed_freq);
1041show_store_gov_pol_sys(go_hispeed_load);
1042show_store_gov_pol_sys(min_sample_time);
1043show_store_gov_pol_sys(timer_rate);
1044show_store_gov_pol_sys(timer_slack);
1045show_store_gov_pol_sys(boost);
1046store_gov_pol_sys(boostpulse);
1047show_store_gov_pol_sys(boostpulse_duration);
1048show_store_gov_pol_sys(io_is_busy);
1049
1050#define gov_sys_attr_rw(_name) \
1051static struct global_attr _name##_gov_sys = \
1052__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
1053
1054#define gov_pol_attr_rw(_name) \
1055static struct freq_attr _name##_gov_pol = \
1056__ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
1057
1058#define gov_sys_pol_attr_rw(_name) \
1059 gov_sys_attr_rw(_name); \
1060 gov_pol_attr_rw(_name)
1061
1062gov_sys_pol_attr_rw(target_loads);
1063gov_sys_pol_attr_rw(above_hispeed_delay);
1064gov_sys_pol_attr_rw(hispeed_freq);
1065gov_sys_pol_attr_rw(go_hispeed_load);
1066gov_sys_pol_attr_rw(min_sample_time);
1067gov_sys_pol_attr_rw(timer_rate);
1068gov_sys_pol_attr_rw(timer_slack);
1069gov_sys_pol_attr_rw(boost);
1070gov_sys_pol_attr_rw(boostpulse_duration);
1071gov_sys_pol_attr_rw(io_is_busy);
1072
1073static struct global_attr boostpulse_gov_sys =
1074 __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
1075
1076static struct freq_attr boostpulse_gov_pol =
1077 __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
1078
1079/* One Governor instance for entire system */
1080static struct attribute *interactive_attributes_gov_sys[] = {
1081 &target_loads_gov_sys.attr,
1082 &above_hispeed_delay_gov_sys.attr,
1083 &hispeed_freq_gov_sys.attr,
1084 &go_hispeed_load_gov_sys.attr,
1085 &min_sample_time_gov_sys.attr,
1086 &timer_rate_gov_sys.attr,
1087 &timer_slack_gov_sys.attr,
1088 &boost_gov_sys.attr,
1089 &boostpulse_gov_sys.attr,
1090 &boostpulse_duration_gov_sys.attr,
1091 &io_is_busy_gov_sys.attr,
Mike Chanef969692010-06-22 11:26:45 -07001092 NULL,
1093};
1094
Viresh Kumar17d15c42013-05-16 14:58:54 +05301095static struct attribute_group interactive_attr_group_gov_sys = {
1096 .attrs = interactive_attributes_gov_sys,
Mike Chanef969692010-06-22 11:26:45 -07001097 .name = "interactive",
1098};
1099
Viresh Kumar17d15c42013-05-16 14:58:54 +05301100/* Per policy governor instance */
1101static struct attribute *interactive_attributes_gov_pol[] = {
1102 &target_loads_gov_pol.attr,
1103 &above_hispeed_delay_gov_pol.attr,
1104 &hispeed_freq_gov_pol.attr,
1105 &go_hispeed_load_gov_pol.attr,
1106 &min_sample_time_gov_pol.attr,
1107 &timer_rate_gov_pol.attr,
1108 &timer_slack_gov_pol.attr,
1109 &boost_gov_pol.attr,
1110 &boostpulse_gov_pol.attr,
1111 &boostpulse_duration_gov_pol.attr,
1112 &io_is_busy_gov_pol.attr,
1113 NULL,
1114};
1115
1116static struct attribute_group interactive_attr_group_gov_pol = {
1117 .attrs = interactive_attributes_gov_pol,
1118 .name = "interactive",
1119};
1120
1121static struct attribute_group *get_sysfs_attr(void)
1122{
1123 if (have_governor_per_policy())
1124 return &interactive_attr_group_gov_pol;
1125 else
1126 return &interactive_attr_group_gov_sys;
1127}
1128
Sam Leffler3ab7c2b2012-06-27 10:12:04 -07001129static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
1130 unsigned long val,
1131 void *data)
1132{
1133 switch (val) {
1134 case IDLE_START:
1135 cpufreq_interactive_idle_start();
1136 break;
1137 case IDLE_END:
1138 cpufreq_interactive_idle_end();
1139 break;
1140 }
1141
1142 return 0;
1143}
1144
1145static struct notifier_block cpufreq_interactive_idle_nb = {
1146 .notifier_call = cpufreq_interactive_idle_notifier,
1147};
1148
Mike Chanef969692010-06-22 11:26:45 -07001149static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
1150 unsigned int event)
1151{
1152 int rc;
1153 unsigned int j;
1154 struct cpufreq_interactive_cpuinfo *pcpu;
1155 struct cpufreq_frequency_table *freq_table;
Viresh Kumar17d15c42013-05-16 14:58:54 +05301156 struct cpufreq_interactive_tunables *tunables;
1157
1158 if (have_governor_per_policy())
1159 tunables = policy->governor_data;
1160 else
1161 tunables = common_tunables;
1162
1163 WARN_ON(!tunables && (event != CPUFREQ_GOV_POLICY_INIT));
Mike Chanef969692010-06-22 11:26:45 -07001164
1165 switch (event) {
Viresh Kumar17d15c42013-05-16 14:58:54 +05301166 case CPUFREQ_GOV_POLICY_INIT:
1167 if (have_governor_per_policy()) {
1168 WARN_ON(tunables);
1169 } else if (tunables) {
1170 tunables->usage_count++;
1171 policy->governor_data = tunables;
1172 return 0;
1173 }
1174
1175 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
1176 if (!tunables) {
1177 pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
1178 return -ENOMEM;
1179 }
1180
1181 rc = sysfs_create_group(get_governor_parent_kobj(policy),
1182 get_sysfs_attr());
1183 if (rc) {
1184 kfree(tunables);
1185 return rc;
1186 }
1187
1188 tunables->usage_count = 1;
1189 tunables->above_hispeed_delay = default_above_hispeed_delay;
1190 tunables->nabove_hispeed_delay =
1191 ARRAY_SIZE(default_above_hispeed_delay);
1192 tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
1193 tunables->target_loads = default_target_loads;
1194 tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
1195 tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
1196 tunables->timer_rate = DEFAULT_TIMER_RATE;
1197 tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
1198 tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
1199
1200 spin_lock_init(&tunables->target_loads_lock);
1201 spin_lock_init(&tunables->above_hispeed_delay_lock);
1202
1203 if (!policy->governor->initialized) {
1204 idle_notifier_register(&cpufreq_interactive_idle_nb);
1205 cpufreq_register_notifier(&cpufreq_notifier_block,
1206 CPUFREQ_TRANSITION_NOTIFIER);
1207 }
1208
1209 policy->governor_data = tunables;
1210 if (!have_governor_per_policy())
1211 common_tunables = tunables;
1212
1213 break;
1214
1215 case CPUFREQ_GOV_POLICY_EXIT:
1216 if (!--tunables->usage_count) {
1217 if (policy->governor->initialized == 1) {
1218 cpufreq_unregister_notifier(&cpufreq_notifier_block,
1219 CPUFREQ_TRANSITION_NOTIFIER);
1220 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
1221 }
1222
1223 sysfs_remove_group(get_governor_parent_kobj(policy),
1224 get_sysfs_attr());
1225 kfree(tunables);
1226 common_tunables = NULL;
1227 }
1228
1229 policy->governor_data = NULL;
1230 break;
1231
Mike Chanef969692010-06-22 11:26:45 -07001232 case CPUFREQ_GOV_START:
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001233 mutex_lock(&gov_lock);
1234
Viresh Kumar17d15c42013-05-16 14:58:54 +05301235 freq_table = cpufreq_frequency_get_table(policy->cpu);
1236 if (!tunables->hispeed_freq)
1237 tunables->hispeed_freq = policy->max;
Mike Chanef969692010-06-22 11:26:45 -07001238
1239 for_each_cpu(j, policy->cpus) {
1240 pcpu = &per_cpu(cpuinfo, j);
1241 pcpu->policy = policy;
1242 pcpu->target_freq = policy->cur;
1243 pcpu->freq_table = freq_table;
Todd Poynor6d15fa32012-04-26 21:41:40 -07001244 pcpu->floor_freq = pcpu->target_freq;
1245 pcpu->floor_validate_time =
Todd Poynor31817c92012-12-07 20:08:45 -08001246 ktime_to_us(ktime_get());
Todd Poynor1a0389a2012-05-10 23:28:06 -07001247 pcpu->hispeed_validate_time =
Todd Poynor31817c92012-12-07 20:08:45 -08001248 pcpu->floor_validate_time;
Todd Poynor39512062012-12-20 15:51:00 -08001249 down_write(&pcpu->enable_sem);
Shridhar Rasal2907f842013-09-09 19:17:14 +05301250 del_timer_sync(&pcpu->cpu_timer);
1251 del_timer_sync(&pcpu->cpu_slack_timer);
Viresh Kumar17d15c42013-05-16 14:58:54 +05301252 cpufreq_interactive_timer_start(tunables, j);
Todd Poynor39512062012-12-20 15:51:00 -08001253 pcpu->governor_enabled = 1;
1254 up_write(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -07001255 }
1256
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001257 mutex_unlock(&gov_lock);
Mike Chanef969692010-06-22 11:26:45 -07001258 break;
1259
1260 case CPUFREQ_GOV_STOP:
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001261 mutex_lock(&gov_lock);
Mike Chanef969692010-06-22 11:26:45 -07001262 for_each_cpu(j, policy->cpus) {
1263 pcpu = &per_cpu(cpuinfo, j);
Todd Poynor5cad6092012-12-18 17:50:44 -08001264 down_write(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -07001265 pcpu->governor_enabled = 0;
Mike Chanef969692010-06-22 11:26:45 -07001266 del_timer_sync(&pcpu->cpu_timer);
Todd Poynor4add2592012-12-18 17:50:10 -08001267 del_timer_sync(&pcpu->cpu_slack_timer);
Todd Poynor5cad6092012-12-18 17:50:44 -08001268 up_write(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -07001269 }
1270
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001271 mutex_unlock(&gov_lock);
Mike Chanef969692010-06-22 11:26:45 -07001272 break;
1273
1274 case CPUFREQ_GOV_LIMITS:
1275 if (policy->max < policy->cur)
1276 __cpufreq_driver_target(policy,
1277 policy->max, CPUFREQ_RELATION_H);
1278 else if (policy->min > policy->cur)
1279 __cpufreq_driver_target(policy,
1280 policy->min, CPUFREQ_RELATION_L);
Lianwei Wang90c6c152013-04-26 13:30:51 +08001281 for_each_cpu(j, policy->cpus) {
1282 pcpu = &per_cpu(cpuinfo, j);
1283
1284 /* hold write semaphore to avoid race */
1285 down_write(&pcpu->enable_sem);
1286 if (pcpu->governor_enabled == 0) {
1287 up_write(&pcpu->enable_sem);
1288 continue;
1289 }
1290
1291 /* update target_freq firstly */
1292 if (policy->max < pcpu->target_freq)
1293 pcpu->target_freq = policy->max;
1294 else if (policy->min > pcpu->target_freq)
1295 pcpu->target_freq = policy->min;
1296
1297 /* Reschedule timer.
1298 * Delete the timers, else the timer callback may
1299 * return without re-arm the timer when failed
1300 * acquire the semaphore. This race may cause timer
1301 * stopped unexpectedly.
1302 */
1303 del_timer_sync(&pcpu->cpu_timer);
1304 del_timer_sync(&pcpu->cpu_slack_timer);
Viresh Kumar17d15c42013-05-16 14:58:54 +05301305 cpufreq_interactive_timer_start(tunables, j);
Lianwei Wang90c6c152013-04-26 13:30:51 +08001306 up_write(&pcpu->enable_sem);
1307 }
Mike Chanef969692010-06-22 11:26:45 -07001308 break;
1309 }
1310 return 0;
1311}
1312
Viresh Kumarc7f826b2013-05-16 14:58:53 +05301313#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1314static
1315#endif
1316struct cpufreq_governor cpufreq_gov_interactive = {
1317 .name = "interactive",
1318 .governor = cpufreq_governor_interactive,
1319 .max_transition_latency = 10000000,
1320 .owner = THIS_MODULE,
1321};
1322
Todd Poynor4add2592012-12-18 17:50:10 -08001323static void cpufreq_interactive_nop_timer(unsigned long data)
1324{
1325}
1326
Mike Chanef969692010-06-22 11:26:45 -07001327static int __init cpufreq_interactive_init(void)
1328{
1329 unsigned int i;
1330 struct cpufreq_interactive_cpuinfo *pcpu;
1331 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1332
Mike Chanef969692010-06-22 11:26:45 -07001333 /* Initalize per-cpu timers */
1334 for_each_possible_cpu(i) {
1335 pcpu = &per_cpu(cpuinfo, i);
Todd Poynor4add2592012-12-18 17:50:10 -08001336 init_timer_deferrable(&pcpu->cpu_timer);
Mike Chanef969692010-06-22 11:26:45 -07001337 pcpu->cpu_timer.function = cpufreq_interactive_timer;
1338 pcpu->cpu_timer.data = i;
Todd Poynor4add2592012-12-18 17:50:10 -08001339 init_timer(&pcpu->cpu_slack_timer);
1340 pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
Todd Poynor0e58da22012-12-11 16:05:03 -08001341 spin_lock_init(&pcpu->load_lock);
Todd Poynor5cad6092012-12-18 17:50:44 -08001342 init_rwsem(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -07001343 }
1344
Todd Poynor0f1920b2012-07-16 17:07:15 -07001345 spin_lock_init(&speedchange_cpumask_lock);
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001346 mutex_init(&gov_lock);
Todd Poynor0f1920b2012-07-16 17:07:15 -07001347 speedchange_task =
1348 kthread_create(cpufreq_interactive_speedchange_task, NULL,
1349 "cfinteractive");
1350 if (IS_ERR(speedchange_task))
1351 return PTR_ERR(speedchange_task);
Sam Leffler5c9b8272012-06-27 12:55:56 -07001352
Todd Poynor0f1920b2012-07-16 17:07:15 -07001353 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
1354 get_task_struct(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -07001355
Sam Leffler5c9b8272012-06-27 12:55:56 -07001356 /* NB: wake up so the thread does not look hung to the freezer */
Todd Poynor0f1920b2012-07-16 17:07:15 -07001357 wake_up_process(speedchange_task);
Sam Leffler5c9b8272012-06-27 12:55:56 -07001358
Mike Chanef969692010-06-22 11:26:45 -07001359 return cpufreq_register_governor(&cpufreq_gov_interactive);
Mike Chanef969692010-06-22 11:26:45 -07001360}
1361
1362#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1363fs_initcall(cpufreq_interactive_init);
1364#else
1365module_init(cpufreq_interactive_init);
1366#endif
1367
1368static void __exit cpufreq_interactive_exit(void)
1369{
1370 cpufreq_unregister_governor(&cpufreq_gov_interactive);
Todd Poynor0f1920b2012-07-16 17:07:15 -07001371 kthread_stop(speedchange_task);
1372 put_task_struct(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -07001373}
1374
1375module_exit(cpufreq_interactive_exit);
1376
1377MODULE_AUTHOR("Mike Chan <mike@android.com>");
1378MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1379 "Latency sensitive workloads");
1380MODULE_LICENSE("GPL");