blob: 6c4b6a895e0db24a1f0736c3faacbec758586cbc [file] [log] [blame]
Mike Chanef969692010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/module.h>
Lianwei Wangd72db422012-11-01 09:59:52 +080023#include <linux/moduleparam.h>
Todd Poynor5cad6092012-12-18 17:50:44 -080024#include <linux/rwsem.h>
Mike Chanef969692010-06-22 11:26:45 -070025#include <linux/sched.h>
26#include <linux/sched/rt.h>
27#include <linux/tick.h>
28#include <linux/time.h>
29#include <linux/timer.h>
30#include <linux/workqueue.h>
31#include <linux/kthread.h>
Todd Poynorab8dc402012-04-02 17:17:14 -070032#include <linux/slab.h>
Mike Chanef969692010-06-22 11:26:45 -070033
Todd Poynorae010472012-02-16 16:27:59 -080034#define CREATE_TRACE_POINTS
35#include <trace/events/cpufreq_interactive.h>
36
Mike Chanef969692010-06-22 11:26:45 -070037struct cpufreq_interactive_cpuinfo {
38 struct timer_list cpu_timer;
Todd Poynor4add2592012-12-18 17:50:10 -080039 struct timer_list cpu_slack_timer;
Todd Poynor0e58da22012-12-11 16:05:03 -080040 spinlock_t load_lock; /* protects the next 4 fields */
Mike Chanef969692010-06-22 11:26:45 -070041 u64 time_in_idle;
Todd Poynor8eccd412012-10-08 20:14:34 -070042 u64 time_in_idle_timestamp;
Todd Poynor0e58da22012-12-11 16:05:03 -080043 u64 cputime_speedadj;
44 u64 cputime_speedadj_timestamp;
Mike Chanef969692010-06-22 11:26:45 -070045 struct cpufreq_policy *policy;
46 struct cpufreq_frequency_table *freq_table;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -070047 spinlock_t target_freq_lock; /*protects target freq */
Mike Chanef969692010-06-22 11:26:45 -070048 unsigned int target_freq;
Todd Poynor6d15fa32012-04-26 21:41:40 -070049 unsigned int floor_freq;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -070050 unsigned int max_freq;
Todd Poynor6d15fa32012-04-26 21:41:40 -070051 u64 floor_validate_time;
Todd Poynor1a0389a2012-05-10 23:28:06 -070052 u64 hispeed_validate_time;
Todd Poynor5cad6092012-12-18 17:50:44 -080053 struct rw_semaphore enable_sem;
Mike Chanef969692010-06-22 11:26:45 -070054 int governor_enabled;
55};
56
57static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
58
Todd Poynor0f1920b2012-07-16 17:07:15 -070059/* realtime thread handles frequency scaling */
60static struct task_struct *speedchange_task;
61static cpumask_t speedchange_cpumask;
62static spinlock_t speedchange_cpumask_lock;
Lianwei Wang1d4f9a72013-01-07 14:15:51 +080063static struct mutex gov_lock;
Mike Chanef969692010-06-22 11:26:45 -070064
Todd Poynor8d2d93f2012-11-28 17:58:17 -080065/* Target load. Lower values result in higher CPU speeds. */
66#define DEFAULT_TARGET_LOAD 90
Todd Poynore9c60742012-11-14 11:41:21 -080067static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
Todd Poynor8d2d93f2012-11-28 17:58:17 -080068
Todd Poynora380aa82012-04-17 17:39:34 -070069#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
Todd Poynorcbbe17d2012-04-13 20:18:02 -070070#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
Minsung Kim9c1f83a2013-02-25 23:48:04 +090071static unsigned int default_above_hispeed_delay[] = {
72 DEFAULT_ABOVE_HISPEED_DELAY };
Todd Poynorcbbe17d2012-04-13 20:18:02 -070073
Viresh Kumar17d15c42013-05-16 14:58:54 +053074struct cpufreq_interactive_tunables {
75 int usage_count;
76 /* Hi speed to bump to from lo speed when load burst (default max) */
77 unsigned int hispeed_freq;
78 /* Go to hi speed when CPU load at or above this value. */
79#define DEFAULT_GO_HISPEED_LOAD 99
80 unsigned long go_hispeed_load;
81 /* Target load. Lower values result in higher CPU speeds. */
82 spinlock_t target_loads_lock;
83 unsigned int *target_loads;
84 int ntarget_loads;
85 /*
86 * The minimum amount of time to spend at a frequency before we can ramp
87 * down.
88 */
89#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
90 unsigned long min_sample_time;
91 /*
92 * The sample rate of the timer used to increase frequency
93 */
94 unsigned long timer_rate;
95 /*
96 * Wait this long before raising speed above hispeed, by default a
97 * single timer interval.
98 */
99 spinlock_t above_hispeed_delay_lock;
100 unsigned int *above_hispeed_delay;
101 int nabove_hispeed_delay;
102 /* Non-zero means indefinite speed boost active */
103 int boost_val;
104 /* Duration of a boot pulse in usecs */
105 int boostpulse_duration_val;
106 /* End time of boost pulse in ktime converted to usecs */
107 u64 boostpulse_endtime;
108 /*
109 * Max additional time to wait in idle, beyond timer_rate, at speeds
110 * above minimum before wakeup to reduce speed, or -1 if unnecessary.
111 */
Todd Poynor4add2592012-12-18 17:50:10 -0800112#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
Viresh Kumar17d15c42013-05-16 14:58:54 +0530113 int timer_slack_val;
114 bool io_is_busy;
115};
Lianwei Wangd72db422012-11-01 09:59:52 +0800116
Viresh Kumar17d15c42013-05-16 14:58:54 +0530117/* For cases where we have single governor instance for system */
118struct cpufreq_interactive_tunables *common_tunables;
119
120static struct attribute_group *get_sysfs_attr(void);
Lianwei Wang72e40572013-02-22 11:39:18 +0800121
Todd Poynor8eccd412012-10-08 20:14:34 -0700122static void cpufreq_interactive_timer_resched(
123 struct cpufreq_interactive_cpuinfo *pcpu)
124{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530125 struct cpufreq_interactive_tunables *tunables =
126 pcpu->policy->governor_data;
Todd Poynor4e25bf92013-04-05 13:25:21 -0700127 unsigned long expires;
Todd Poynordf673d12013-01-02 13:14:00 -0800128 unsigned long flags;
Todd Poynor4add2592012-12-18 17:50:10 -0800129
Todd Poynordf673d12013-01-02 13:14:00 -0800130 spin_lock_irqsave(&pcpu->load_lock, flags);
Todd Poynor8eccd412012-10-08 20:14:34 -0700131 pcpu->time_in_idle =
Lianwei Wang72e40572013-02-22 11:39:18 +0800132 get_cpu_idle_time(smp_processor_id(),
Viresh Kumar17d15c42013-05-16 14:58:54 +0530133 &pcpu->time_in_idle_timestamp,
134 tunables->io_is_busy);
Todd Poynor0e58da22012-12-11 16:05:03 -0800135 pcpu->cputime_speedadj = 0;
136 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530137 expires = jiffies + usecs_to_jiffies(tunables->timer_rate);
Todd Poynor4e25bf92013-04-05 13:25:21 -0700138 mod_timer_pinned(&pcpu->cpu_timer, expires);
139
Viresh Kumar17d15c42013-05-16 14:58:54 +0530140 if (tunables->timer_slack_val >= 0 &&
141 pcpu->target_freq > pcpu->policy->min) {
142 expires += usecs_to_jiffies(tunables->timer_slack_val);
Todd Poynor4e25bf92013-04-05 13:25:21 -0700143 mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
144 }
145
Todd Poynordf673d12013-01-02 13:14:00 -0800146 spin_unlock_irqrestore(&pcpu->load_lock, flags);
Todd Poynor8eccd412012-10-08 20:14:34 -0700147}
148
Lianwei Wang90c6c152013-04-26 13:30:51 +0800149/* The caller shall take enable_sem write semaphore to avoid any timer race.
150 * The cpu_timer and cpu_slack_timer must be deactivated when calling this
151 * function.
152 */
Viresh Kumar17d15c42013-05-16 14:58:54 +0530153static void cpufreq_interactive_timer_start(
154 struct cpufreq_interactive_tunables *tunables, int cpu)
Lianwei Wang90c6c152013-04-26 13:30:51 +0800155{
156 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530157 unsigned long expires = jiffies +
158 usecs_to_jiffies(tunables->timer_rate);
Lianwei Wang90c6c152013-04-26 13:30:51 +0800159 unsigned long flags;
160
161 pcpu->cpu_timer.expires = expires;
162 add_timer_on(&pcpu->cpu_timer, cpu);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530163 if (tunables->timer_slack_val >= 0 &&
164 pcpu->target_freq > pcpu->policy->min) {
165 expires += usecs_to_jiffies(tunables->timer_slack_val);
Lianwei Wang90c6c152013-04-26 13:30:51 +0800166 pcpu->cpu_slack_timer.expires = expires;
167 add_timer_on(&pcpu->cpu_slack_timer, cpu);
168 }
169
170 spin_lock_irqsave(&pcpu->load_lock, flags);
171 pcpu->time_in_idle =
Viresh Kumar17d15c42013-05-16 14:58:54 +0530172 get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp,
173 tunables->io_is_busy);
Lianwei Wang90c6c152013-04-26 13:30:51 +0800174 pcpu->cputime_speedadj = 0;
175 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
176 spin_unlock_irqrestore(&pcpu->load_lock, flags);
177}
178
Viresh Kumar17d15c42013-05-16 14:58:54 +0530179static unsigned int freq_to_above_hispeed_delay(
180 struct cpufreq_interactive_tunables *tunables,
181 unsigned int freq)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900182{
183 int i;
184 unsigned int ret;
185 unsigned long flags;
186
Viresh Kumar17d15c42013-05-16 14:58:54 +0530187 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900188
Viresh Kumar17d15c42013-05-16 14:58:54 +0530189 for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
190 freq >= tunables->above_hispeed_delay[i+1]; i += 2)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900191 ;
192
Viresh Kumar17d15c42013-05-16 14:58:54 +0530193 ret = tunables->above_hispeed_delay[i];
194 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900195 return ret;
196}
197
Viresh Kumar17d15c42013-05-16 14:58:54 +0530198static unsigned int freq_to_targetload(
199 struct cpufreq_interactive_tunables *tunables, unsigned int freq)
Todd Poynore9c60742012-11-14 11:41:21 -0800200{
201 int i;
202 unsigned int ret;
Todd Poynordf673d12013-01-02 13:14:00 -0800203 unsigned long flags;
Todd Poynore9c60742012-11-14 11:41:21 -0800204
Viresh Kumar17d15c42013-05-16 14:58:54 +0530205 spin_lock_irqsave(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800206
Viresh Kumar17d15c42013-05-16 14:58:54 +0530207 for (i = 0; i < tunables->ntarget_loads - 1 &&
208 freq >= tunables->target_loads[i+1]; i += 2)
Todd Poynore9c60742012-11-14 11:41:21 -0800209 ;
210
Viresh Kumar17d15c42013-05-16 14:58:54 +0530211 ret = tunables->target_loads[i];
212 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800213 return ret;
214}
215
216/*
217 * If increasing frequencies never map to a lower target load then
218 * choose_freq() will find the minimum frequency that does not exceed its
219 * target load given the current load.
220 */
Viresh Kumar17d15c42013-05-16 14:58:54 +0530221static unsigned int choose_freq(struct cpufreq_interactive_cpuinfo *pcpu,
222 unsigned int loadadjfreq)
Todd Poynore9c60742012-11-14 11:41:21 -0800223{
224 unsigned int freq = pcpu->policy->cur;
Todd Poynore9c60742012-11-14 11:41:21 -0800225 unsigned int prevfreq, freqmin, freqmax;
226 unsigned int tl;
227 int index;
228
229 freqmin = 0;
230 freqmax = UINT_MAX;
231
232 do {
233 prevfreq = freq;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530234 tl = freq_to_targetload(pcpu->policy->governor_data, freq);
Todd Poynore9c60742012-11-14 11:41:21 -0800235
236 /*
237 * Find the lowest frequency where the computed load is less
238 * than or equal to the target load.
239 */
240
Todd Poynor8cdabdc2013-04-22 16:44:58 -0700241 if (cpufreq_frequency_table_target(
242 pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
243 CPUFREQ_RELATION_L, &index))
244 break;
Todd Poynore9c60742012-11-14 11:41:21 -0800245 freq = pcpu->freq_table[index].frequency;
246
247 if (freq > prevfreq) {
248 /* The previous frequency is too low. */
249 freqmin = prevfreq;
250
251 if (freq >= freqmax) {
252 /*
253 * Find the highest frequency that is less
254 * than freqmax.
255 */
Todd Poynor8cdabdc2013-04-22 16:44:58 -0700256 if (cpufreq_frequency_table_target(
257 pcpu->policy, pcpu->freq_table,
258 freqmax - 1, CPUFREQ_RELATION_H,
259 &index))
260 break;
Todd Poynore9c60742012-11-14 11:41:21 -0800261 freq = pcpu->freq_table[index].frequency;
262
263 if (freq == freqmin) {
264 /*
265 * The first frequency below freqmax
266 * has already been found to be too
267 * low. freqmax is the lowest speed
268 * we found that is fast enough.
269 */
270 freq = freqmax;
271 break;
272 }
273 }
274 } else if (freq < prevfreq) {
275 /* The previous frequency is high enough. */
276 freqmax = prevfreq;
277
278 if (freq <= freqmin) {
279 /*
280 * Find the lowest frequency that is higher
281 * than freqmin.
282 */
Todd Poynor8cdabdc2013-04-22 16:44:58 -0700283 if (cpufreq_frequency_table_target(
284 pcpu->policy, pcpu->freq_table,
285 freqmin + 1, CPUFREQ_RELATION_L,
286 &index))
287 break;
Todd Poynore9c60742012-11-14 11:41:21 -0800288 freq = pcpu->freq_table[index].frequency;
289
290 /*
291 * If freqmax is the first frequency above
292 * freqmin then we have already found that
293 * this speed is fast enough.
294 */
295 if (freq == freqmax)
296 break;
297 }
298 }
299
300 /* If same frequency chosen as previous then done. */
301 } while (freq != prevfreq);
302
303 return freq;
304}
305
Todd Poynor0e58da22012-12-11 16:05:03 -0800306static u64 update_load(int cpu)
307{
308 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530309 struct cpufreq_interactive_tunables *tunables =
310 pcpu->policy->governor_data;
Todd Poynor0e58da22012-12-11 16:05:03 -0800311 u64 now;
312 u64 now_idle;
313 unsigned int delta_idle;
314 unsigned int delta_time;
315 u64 active_time;
316
Viresh Kumar17d15c42013-05-16 14:58:54 +0530317 now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
Todd Poynor0e58da22012-12-11 16:05:03 -0800318 delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
319 delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
Minsung Kim98b3b562013-04-23 22:32:01 +0900320
321 if (delta_time <= delta_idle)
322 active_time = 0;
323 else
324 active_time = delta_time - delta_idle;
325
Todd Poynor0e58da22012-12-11 16:05:03 -0800326 pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
327
328 pcpu->time_in_idle = now_idle;
329 pcpu->time_in_idle_timestamp = now;
330 return now;
331}
332
Mike Chanef969692010-06-22 11:26:45 -0700333static void cpufreq_interactive_timer(unsigned long data)
334{
Todd Poynore7afb7e2012-11-05 13:09:03 -0800335 u64 now;
Mike Chanef969692010-06-22 11:26:45 -0700336 unsigned int delta_time;
Todd Poynor0e58da22012-12-11 16:05:03 -0800337 u64 cputime_speedadj;
Mike Chanef969692010-06-22 11:26:45 -0700338 int cpu_load;
Mike Chanef969692010-06-22 11:26:45 -0700339 struct cpufreq_interactive_cpuinfo *pcpu =
340 &per_cpu(cpuinfo, data);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530341 struct cpufreq_interactive_tunables *tunables =
342 pcpu->policy->governor_data;
Mike Chanef969692010-06-22 11:26:45 -0700343 unsigned int new_freq;
Todd Poynor0e58da22012-12-11 16:05:03 -0800344 unsigned int loadadjfreq;
Mike Chanef969692010-06-22 11:26:45 -0700345 unsigned int index;
346 unsigned long flags;
Todd Poynore16d5922012-12-14 17:31:19 -0800347 bool boosted;
Mike Chanef969692010-06-22 11:26:45 -0700348
Todd Poynor5cad6092012-12-18 17:50:44 -0800349 if (!down_read_trylock(&pcpu->enable_sem))
350 return;
Mike Chanef969692010-06-22 11:26:45 -0700351 if (!pcpu->governor_enabled)
352 goto exit;
353
Todd Poynordf673d12013-01-02 13:14:00 -0800354 spin_lock_irqsave(&pcpu->load_lock, flags);
Todd Poynor0e58da22012-12-11 16:05:03 -0800355 now = update_load(data);
356 delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
357 cputime_speedadj = pcpu->cputime_speedadj;
Todd Poynordf673d12013-01-02 13:14:00 -0800358 spin_unlock_irqrestore(&pcpu->load_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700359
Todd Poynor0e58da22012-12-11 16:05:03 -0800360 if (WARN_ON_ONCE(!delta_time))
Mike Chanef969692010-06-22 11:26:45 -0700361 goto rearm;
362
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700363 spin_lock_irqsave(&pcpu->target_freq_lock, flags);
Todd Poynor0e58da22012-12-11 16:05:03 -0800364 do_div(cputime_speedadj, delta_time);
365 loadadjfreq = (unsigned int)cputime_speedadj * 100;
366 cpu_load = loadadjfreq / pcpu->target_freq;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530367 boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
Mike Chanef969692010-06-22 11:26:45 -0700368
Viresh Kumar17d15c42013-05-16 14:58:54 +0530369 if (cpu_load >= tunables->go_hispeed_load || boosted) {
370 if (pcpu->target_freq < tunables->hispeed_freq) {
371 new_freq = tunables->hispeed_freq;
Todd Poynor2b660492012-12-19 16:06:48 -0800372 } else {
373 new_freq = choose_freq(pcpu, loadadjfreq);
374
Viresh Kumar17d15c42013-05-16 14:58:54 +0530375 if (new_freq < tunables->hispeed_freq)
376 new_freq = tunables->hispeed_freq;
Todd Poynor2b660492012-12-19 16:06:48 -0800377 }
378 } else {
Todd Poynor0e58da22012-12-11 16:05:03 -0800379 new_freq = choose_freq(pcpu, loadadjfreq);
Todd Poynor2b660492012-12-19 16:06:48 -0800380 }
Todd Poynor131ff022012-11-08 15:06:55 -0800381
Viresh Kumar17d15c42013-05-16 14:58:54 +0530382 if (pcpu->target_freq >= tunables->hispeed_freq &&
Todd Poynor131ff022012-11-08 15:06:55 -0800383 new_freq > pcpu->target_freq &&
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900384 now - pcpu->hispeed_validate_time <
Viresh Kumar17d15c42013-05-16 14:58:54 +0530385 freq_to_above_hispeed_delay(tunables, pcpu->target_freq)) {
Todd Poynor131ff022012-11-08 15:06:55 -0800386 trace_cpufreq_interactive_notyet(
387 data, cpu_load, pcpu->target_freq,
388 pcpu->policy->cur, new_freq);
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700389 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
Todd Poynor131ff022012-11-08 15:06:55 -0800390 goto rearm;
Mike Chanef969692010-06-22 11:26:45 -0700391 }
392
Todd Poynor131ff022012-11-08 15:06:55 -0800393 pcpu->hispeed_validate_time = now;
Todd Poynor1a0389a2012-05-10 23:28:06 -0700394
Mike Chanef969692010-06-22 11:26:45 -0700395 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800396 new_freq, CPUFREQ_RELATION_L,
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700397 &index)) {
398 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700399 goto rearm;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700400 }
Mike Chanef969692010-06-22 11:26:45 -0700401
402 new_freq = pcpu->freq_table[index].frequency;
403
Mike Chanef969692010-06-22 11:26:45 -0700404 /*
Todd Poynor6d15fa32012-04-26 21:41:40 -0700405 * Do not scale below floor_freq unless we have been at or above the
406 * floor frequency for the minimum sample time since last validated.
Mike Chanef969692010-06-22 11:26:45 -0700407 */
Todd Poynor6d15fa32012-04-26 21:41:40 -0700408 if (new_freq < pcpu->floor_freq) {
Viresh Kumar17d15c42013-05-16 14:58:54 +0530409 if (now - pcpu->floor_validate_time <
410 tunables->min_sample_time) {
Todd Poynore60cc1b2012-11-28 17:56:09 -0800411 trace_cpufreq_interactive_notyet(
412 data, cpu_load, pcpu->target_freq,
413 pcpu->policy->cur, new_freq);
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700414 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700415 goto rearm;
Todd Poynorae010472012-02-16 16:27:59 -0800416 }
Mike Chanef969692010-06-22 11:26:45 -0700417 }
418
Todd Poynore16d5922012-12-14 17:31:19 -0800419 /*
420 * Update the timestamp for checking whether speed has been held at
421 * or above the selected frequency for a minimum of min_sample_time,
422 * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
423 * allow the speed to drop as soon as the boostpulse duration expires
424 * (or the indefinite boost is turned off).
425 */
426
Viresh Kumar17d15c42013-05-16 14:58:54 +0530427 if (!boosted || new_freq > tunables->hispeed_freq) {
Todd Poynore16d5922012-12-14 17:31:19 -0800428 pcpu->floor_freq = new_freq;
429 pcpu->floor_validate_time = now;
430 }
Todd Poynor1f408dc2012-04-06 19:59:36 -0700431
432 if (pcpu->target_freq == new_freq) {
Todd Poynore60cc1b2012-11-28 17:56:09 -0800433 trace_cpufreq_interactive_already(
434 data, cpu_load, pcpu->target_freq,
435 pcpu->policy->cur, new_freq);
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700436 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
Todd Poynor1f408dc2012-04-06 19:59:36 -0700437 goto rearm_if_notmax;
438 }
439
Todd Poynorae010472012-02-16 16:27:59 -0800440 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
Todd Poynore60cc1b2012-11-28 17:56:09 -0800441 pcpu->policy->cur, new_freq);
Todd Poynorae010472012-02-16 16:27:59 -0800442
Todd Poynor0f1920b2012-07-16 17:07:15 -0700443 pcpu->target_freq = new_freq;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700444 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700445 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
446 cpumask_set_cpu(data, &speedchange_cpumask);
447 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
448 wake_up_process(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -0700449
450rearm_if_notmax:
451 /*
452 * Already set max speed and don't see a need to change that,
453 * wait until next idle to re-evaluate, don't need timer.
454 */
455 if (pcpu->target_freq == pcpu->policy->max)
456 goto exit;
457
458rearm:
Todd Poynor4add2592012-12-18 17:50:10 -0800459 if (!timer_pending(&pcpu->cpu_timer))
Todd Poynor8eccd412012-10-08 20:14:34 -0700460 cpufreq_interactive_timer_resched(pcpu);
Mike Chanef969692010-06-22 11:26:45 -0700461
462exit:
Todd Poynor5cad6092012-12-18 17:50:44 -0800463 up_read(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700464 return;
465}
466
467static void cpufreq_interactive_idle_start(void)
468{
469 struct cpufreq_interactive_cpuinfo *pcpu =
470 &per_cpu(cpuinfo, smp_processor_id());
471 int pending;
472
Todd Poynor5cad6092012-12-18 17:50:44 -0800473 if (!down_read_trylock(&pcpu->enable_sem))
Mike Chanef969692010-06-22 11:26:45 -0700474 return;
Todd Poynor5cad6092012-12-18 17:50:44 -0800475 if (!pcpu->governor_enabled) {
476 up_read(&pcpu->enable_sem);
477 return;
478 }
Mike Chanef969692010-06-22 11:26:45 -0700479
Mike Chanef969692010-06-22 11:26:45 -0700480 pending = timer_pending(&pcpu->cpu_timer);
481
482 if (pcpu->target_freq != pcpu->policy->min) {
Mike Chanef969692010-06-22 11:26:45 -0700483 /*
484 * Entering idle while not at lowest speed. On some
485 * platforms this can hold the other CPU(s) at that speed
486 * even though the CPU is idle. Set a timer to re-evaluate
487 * speed so this idle CPU doesn't hold the other CPUs above
488 * min indefinitely. This should probably be a quirk of
489 * the CPUFreq driver.
490 */
Todd Poynor4add2592012-12-18 17:50:10 -0800491 if (!pending)
Todd Poynor8eccd412012-10-08 20:14:34 -0700492 cpufreq_interactive_timer_resched(pcpu);
Mike Chanef969692010-06-22 11:26:45 -0700493 }
494
Todd Poynor5cad6092012-12-18 17:50:44 -0800495 up_read(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700496}
497
498static void cpufreq_interactive_idle_end(void)
499{
500 struct cpufreq_interactive_cpuinfo *pcpu =
501 &per_cpu(cpuinfo, smp_processor_id());
502
Todd Poynor5cad6092012-12-18 17:50:44 -0800503 if (!down_read_trylock(&pcpu->enable_sem))
Sam Leffler3ab7c2b2012-06-27 10:12:04 -0700504 return;
Todd Poynor5cad6092012-12-18 17:50:44 -0800505 if (!pcpu->governor_enabled) {
506 up_read(&pcpu->enable_sem);
507 return;
508 }
Sam Leffler3ab7c2b2012-06-27 10:12:04 -0700509
Todd Poynore7afb7e2012-11-05 13:09:03 -0800510 /* Arm the timer for 1-2 ticks later if not already. */
511 if (!timer_pending(&pcpu->cpu_timer)) {
Todd Poynor8eccd412012-10-08 20:14:34 -0700512 cpufreq_interactive_timer_resched(pcpu);
Todd Poynor4add2592012-12-18 17:50:10 -0800513 } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
Todd Poynor8eccd412012-10-08 20:14:34 -0700514 del_timer(&pcpu->cpu_timer);
Todd Poynor4add2592012-12-18 17:50:10 -0800515 del_timer(&pcpu->cpu_slack_timer);
Todd Poynor8eccd412012-10-08 20:14:34 -0700516 cpufreq_interactive_timer(smp_processor_id());
Mike Chanef969692010-06-22 11:26:45 -0700517 }
Todd Poynor5cad6092012-12-18 17:50:44 -0800518
519 up_read(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700520}
521
Todd Poynor0f1920b2012-07-16 17:07:15 -0700522static int cpufreq_interactive_speedchange_task(void *data)
Mike Chanef969692010-06-22 11:26:45 -0700523{
524 unsigned int cpu;
525 cpumask_t tmp_mask;
526 unsigned long flags;
527 struct cpufreq_interactive_cpuinfo *pcpu;
528
529 while (1) {
530 set_current_state(TASK_INTERRUPTIBLE);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700531 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700532
Todd Poynor0f1920b2012-07-16 17:07:15 -0700533 if (cpumask_empty(&speedchange_cpumask)) {
534 spin_unlock_irqrestore(&speedchange_cpumask_lock,
535 flags);
Mike Chanef969692010-06-22 11:26:45 -0700536 schedule();
537
538 if (kthread_should_stop())
539 break;
540
Todd Poynor0f1920b2012-07-16 17:07:15 -0700541 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700542 }
543
544 set_current_state(TASK_RUNNING);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700545 tmp_mask = speedchange_cpumask;
546 cpumask_clear(&speedchange_cpumask);
547 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700548
549 for_each_cpu(cpu, &tmp_mask) {
550 unsigned int j;
551 unsigned int max_freq = 0;
552
553 pcpu = &per_cpu(cpuinfo, cpu);
Todd Poynor5cad6092012-12-18 17:50:44 -0800554 if (!down_read_trylock(&pcpu->enable_sem))
Mike Chanef969692010-06-22 11:26:45 -0700555 continue;
Todd Poynor5cad6092012-12-18 17:50:44 -0800556 if (!pcpu->governor_enabled) {
557 up_read(&pcpu->enable_sem);
558 continue;
559 }
Mike Chanef969692010-06-22 11:26:45 -0700560
Mike Chanef969692010-06-22 11:26:45 -0700561 for_each_cpu(j, pcpu->policy->cpus) {
562 struct cpufreq_interactive_cpuinfo *pjcpu =
563 &per_cpu(cpuinfo, j);
564
565 if (pjcpu->target_freq > max_freq)
566 max_freq = pjcpu->target_freq;
567 }
568
569 if (max_freq != pcpu->policy->cur)
570 __cpufreq_driver_target(pcpu->policy,
571 max_freq,
572 CPUFREQ_RELATION_H);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700573 trace_cpufreq_interactive_setspeed(cpu,
574 pcpu->target_freq,
Todd Poynorae010472012-02-16 16:27:59 -0800575 pcpu->policy->cur);
Todd Poynor5cad6092012-12-18 17:50:44 -0800576
577 up_read(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700578 }
579 }
580
581 return 0;
582}
583
Todd Poynorab8dc402012-04-02 17:17:14 -0700584static void cpufreq_interactive_boost(void)
585{
586 int i;
587 int anyboost = 0;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700588 unsigned long flags[2];
Todd Poynorab8dc402012-04-02 17:17:14 -0700589 struct cpufreq_interactive_cpuinfo *pcpu;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530590 struct cpufreq_interactive_tunables *tunables;
Todd Poynorab8dc402012-04-02 17:17:14 -0700591
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700592 spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
Todd Poynorab8dc402012-04-02 17:17:14 -0700593
594 for_each_online_cpu(i) {
595 pcpu = &per_cpu(cpuinfo, i);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530596 tunables = pcpu->policy->governor_data;
Todd Poynorab8dc402012-04-02 17:17:14 -0700597
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700598 spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530599 if (pcpu->target_freq < tunables->hispeed_freq) {
600 pcpu->target_freq = tunables->hispeed_freq;
Todd Poynor0f1920b2012-07-16 17:07:15 -0700601 cpumask_set_cpu(i, &speedchange_cpumask);
Todd Poynor31817c92012-12-07 20:08:45 -0800602 pcpu->hispeed_validate_time =
603 ktime_to_us(ktime_get());
Todd Poynorab8dc402012-04-02 17:17:14 -0700604 anyboost = 1;
605 }
606
607 /*
Todd Poynor6d15fa32012-04-26 21:41:40 -0700608 * Set floor freq and (re)start timer for when last
609 * validated.
Todd Poynorab8dc402012-04-02 17:17:14 -0700610 */
611
Viresh Kumar17d15c42013-05-16 14:58:54 +0530612 pcpu->floor_freq = tunables->hispeed_freq;
Todd Poynor6d15fa32012-04-26 21:41:40 -0700613 pcpu->floor_validate_time = ktime_to_us(ktime_get());
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700614 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]);
Todd Poynorab8dc402012-04-02 17:17:14 -0700615 }
616
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700617 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
Todd Poynorab8dc402012-04-02 17:17:14 -0700618
619 if (anyboost)
Todd Poynor0f1920b2012-07-16 17:07:15 -0700620 wake_up_process(speedchange_task);
Todd Poynorab8dc402012-04-02 17:17:14 -0700621}
622
Todd Poynor0e58da22012-12-11 16:05:03 -0800623static int cpufreq_interactive_notifier(
624 struct notifier_block *nb, unsigned long val, void *data)
625{
626 struct cpufreq_freqs *freq = data;
627 struct cpufreq_interactive_cpuinfo *pcpu;
628 int cpu;
Todd Poynordf673d12013-01-02 13:14:00 -0800629 unsigned long flags;
Todd Poynor0e58da22012-12-11 16:05:03 -0800630
631 if (val == CPUFREQ_POSTCHANGE) {
632 pcpu = &per_cpu(cpuinfo, freq->cpu);
Todd Poynor34974c32012-12-23 12:28:49 -0800633 if (!down_read_trylock(&pcpu->enable_sem))
634 return 0;
635 if (!pcpu->governor_enabled) {
636 up_read(&pcpu->enable_sem);
637 return 0;
638 }
Todd Poynor0e58da22012-12-11 16:05:03 -0800639
640 for_each_cpu(cpu, pcpu->policy->cpus) {
641 struct cpufreq_interactive_cpuinfo *pjcpu =
642 &per_cpu(cpuinfo, cpu);
Lianwei Wangc79705d2013-05-16 12:07:23 +0800643 if (cpu != freq->cpu) {
644 if (!down_read_trylock(&pjcpu->enable_sem))
645 continue;
646 if (!pjcpu->governor_enabled) {
647 up_read(&pjcpu->enable_sem);
648 continue;
649 }
650 }
Todd Poynordf673d12013-01-02 13:14:00 -0800651 spin_lock_irqsave(&pjcpu->load_lock, flags);
Todd Poynor0e58da22012-12-11 16:05:03 -0800652 update_load(cpu);
Todd Poynordf673d12013-01-02 13:14:00 -0800653 spin_unlock_irqrestore(&pjcpu->load_lock, flags);
Lianwei Wangc79705d2013-05-16 12:07:23 +0800654 if (cpu != freq->cpu)
655 up_read(&pjcpu->enable_sem);
Todd Poynor0e58da22012-12-11 16:05:03 -0800656 }
Todd Poynor0e58da22012-12-11 16:05:03 -0800657
Todd Poynor34974c32012-12-23 12:28:49 -0800658 up_read(&pcpu->enable_sem);
659 }
Todd Poynor0e58da22012-12-11 16:05:03 -0800660 return 0;
661}
662
663static struct notifier_block cpufreq_notifier_block = {
664 .notifier_call = cpufreq_interactive_notifier,
665};
666
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900667static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
668{
669 const char *cp;
670 int i;
671 int ntokens = 1;
672 unsigned int *tokenized_data;
Todd Poynor233dfa02013-03-20 15:40:46 -0700673 int err = -EINVAL;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900674
675 cp = buf;
676 while ((cp = strpbrk(cp + 1, " :")))
677 ntokens++;
678
Todd Poynor233dfa02013-03-20 15:40:46 -0700679 if (!(ntokens & 0x1))
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900680 goto err;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900681
682 tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
683 if (!tokenized_data) {
Todd Poynor233dfa02013-03-20 15:40:46 -0700684 err = -ENOMEM;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900685 goto err;
686 }
687
688 cp = buf;
689 i = 0;
690 while (i < ntokens) {
Todd Poynor233dfa02013-03-20 15:40:46 -0700691 if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900692 goto err_kfree;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900693
694 cp = strpbrk(cp, " :");
695 if (!cp)
696 break;
697 cp++;
698 }
699
Todd Poynor233dfa02013-03-20 15:40:46 -0700700 if (i != ntokens)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900701 goto err_kfree;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900702
703 *num_tokens = ntokens;
704 return tokenized_data;
705
706err_kfree:
707 kfree(tokenized_data);
708err:
Todd Poynor233dfa02013-03-20 15:40:46 -0700709 return ERR_PTR(err);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900710}
711
Todd Poynore9c60742012-11-14 11:41:21 -0800712static ssize_t show_target_loads(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530713 struct cpufreq_interactive_tunables *tunables,
714 char *buf)
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800715{
Todd Poynore9c60742012-11-14 11:41:21 -0800716 int i;
717 ssize_t ret = 0;
Todd Poynordf673d12013-01-02 13:14:00 -0800718 unsigned long flags;
Todd Poynore9c60742012-11-14 11:41:21 -0800719
Viresh Kumar17d15c42013-05-16 14:58:54 +0530720 spin_lock_irqsave(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800721
Viresh Kumar17d15c42013-05-16 14:58:54 +0530722 for (i = 0; i < tunables->ntarget_loads; i++)
723 ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
Todd Poynore9c60742012-11-14 11:41:21 -0800724 i & 0x1 ? ":" : " ");
725
Chih-Wei Huang8d9e5302013-12-24 17:51:55 +0800726 sprintf(buf + ret - 1, "\n");
Viresh Kumar17d15c42013-05-16 14:58:54 +0530727 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800728 return ret;
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800729}
730
Todd Poynore9c60742012-11-14 11:41:21 -0800731static ssize_t store_target_loads(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530732 struct cpufreq_interactive_tunables *tunables,
733 const char *buf, size_t count)
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800734{
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900735 int ntokens;
Todd Poynore9c60742012-11-14 11:41:21 -0800736 unsigned int *new_target_loads = NULL;
Todd Poynordf673d12013-01-02 13:14:00 -0800737 unsigned long flags;
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800738
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900739 new_target_loads = get_tokenized_data(buf, &ntokens);
740 if (IS_ERR(new_target_loads))
741 return PTR_RET(new_target_loads);
Todd Poynore9c60742012-11-14 11:41:21 -0800742
Viresh Kumar17d15c42013-05-16 14:58:54 +0530743 spin_lock_irqsave(&tunables->target_loads_lock, flags);
744 if (tunables->target_loads != default_target_loads)
745 kfree(tunables->target_loads);
746 tunables->target_loads = new_target_loads;
747 tunables->ntarget_loads = ntokens;
748 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800749 return count;
750}
751
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900752static ssize_t show_above_hispeed_delay(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530753 struct cpufreq_interactive_tunables *tunables, char *buf)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900754{
755 int i;
756 ssize_t ret = 0;
757 unsigned long flags;
758
Viresh Kumar17d15c42013-05-16 14:58:54 +0530759 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900760
Viresh Kumar17d15c42013-05-16 14:58:54 +0530761 for (i = 0; i < tunables->nabove_hispeed_delay; i++)
762 ret += sprintf(buf + ret, "%u%s",
763 tunables->above_hispeed_delay[i],
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900764 i & 0x1 ? ":" : " ");
765
Chih-Wei Huang8d9e5302013-12-24 17:51:55 +0800766 sprintf(buf + ret - 1, "\n");
Viresh Kumar17d15c42013-05-16 14:58:54 +0530767 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900768 return ret;
769}
770
771static ssize_t store_above_hispeed_delay(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530772 struct cpufreq_interactive_tunables *tunables,
773 const char *buf, size_t count)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900774{
775 int ntokens;
776 unsigned int *new_above_hispeed_delay = NULL;
777 unsigned long flags;
778
779 new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
780 if (IS_ERR(new_above_hispeed_delay))
781 return PTR_RET(new_above_hispeed_delay);
782
Viresh Kumar17d15c42013-05-16 14:58:54 +0530783 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
784 if (tunables->above_hispeed_delay != default_above_hispeed_delay)
785 kfree(tunables->above_hispeed_delay);
786 tunables->above_hispeed_delay = new_above_hispeed_delay;
787 tunables->nabove_hispeed_delay = ntokens;
788 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900789 return count;
790
791}
792
Viresh Kumar17d15c42013-05-16 14:58:54 +0530793static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
794 char *buf)
Mike Chanef969692010-06-22 11:26:45 -0700795{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530796 return sprintf(buf, "%u\n", tunables->hispeed_freq);
Mike Chanef969692010-06-22 11:26:45 -0700797}
798
Viresh Kumar17d15c42013-05-16 14:58:54 +0530799static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
800 const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -0700801{
802 int ret;
Todd Poynor3b7b5f82012-10-03 00:39:56 -0700803 long unsigned int val;
Mike Chanef969692010-06-22 11:26:45 -0700804
Todd Poynor3b7b5f82012-10-03 00:39:56 -0700805 ret = strict_strtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -0700806 if (ret < 0)
807 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530808 tunables->hispeed_freq = val;
Mike Chanef969692010-06-22 11:26:45 -0700809 return count;
810}
811
Viresh Kumar17d15c42013-05-16 14:58:54 +0530812static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
813 *tunables, char *buf)
Mike Chanef969692010-06-22 11:26:45 -0700814{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530815 return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
Mike Chanef969692010-06-22 11:26:45 -0700816}
817
Viresh Kumar17d15c42013-05-16 14:58:54 +0530818static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
819 *tunables, const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -0700820{
821 int ret;
822 unsigned long val;
823
824 ret = strict_strtoul(buf, 0, &val);
825 if (ret < 0)
826 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530827 tunables->go_hispeed_load = val;
Mike Chanef969692010-06-22 11:26:45 -0700828 return count;
829}
830
Viresh Kumar17d15c42013-05-16 14:58:54 +0530831static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
832 *tunables, char *buf)
Mike Chanef969692010-06-22 11:26:45 -0700833{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530834 return sprintf(buf, "%lu\n", tunables->min_sample_time);
Mike Chanef969692010-06-22 11:26:45 -0700835}
836
Viresh Kumar17d15c42013-05-16 14:58:54 +0530837static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
838 *tunables, const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -0700839{
840 int ret;
841 unsigned long val;
842
843 ret = strict_strtoul(buf, 0, &val);
844 if (ret < 0)
845 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530846 tunables->min_sample_time = val;
Mike Chanef969692010-06-22 11:26:45 -0700847 return count;
848}
849
Viresh Kumar17d15c42013-05-16 14:58:54 +0530850static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
851 char *buf)
Mike Chanef969692010-06-22 11:26:45 -0700852{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530853 return sprintf(buf, "%lu\n", tunables->timer_rate);
Mike Chanef969692010-06-22 11:26:45 -0700854}
855
Viresh Kumar17d15c42013-05-16 14:58:54 +0530856static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
857 const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -0700858{
859 int ret;
860 unsigned long val;
861
862 ret = strict_strtoul(buf, 0, &val);
863 if (ret < 0)
864 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530865 tunables->timer_rate = val;
Mike Chanef969692010-06-22 11:26:45 -0700866 return count;
867}
868
Viresh Kumar17d15c42013-05-16 14:58:54 +0530869static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
870 char *buf)
Todd Poynor4add2592012-12-18 17:50:10 -0800871{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530872 return sprintf(buf, "%d\n", tunables->timer_slack_val);
Todd Poynor4add2592012-12-18 17:50:10 -0800873}
874
Viresh Kumar17d15c42013-05-16 14:58:54 +0530875static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
876 const char *buf, size_t count)
Todd Poynor4add2592012-12-18 17:50:10 -0800877{
878 int ret;
879 unsigned long val;
880
881 ret = kstrtol(buf, 10, &val);
882 if (ret < 0)
883 return ret;
884
Viresh Kumar17d15c42013-05-16 14:58:54 +0530885 tunables->timer_slack_val = val;
Todd Poynor4add2592012-12-18 17:50:10 -0800886 return count;
887}
888
Viresh Kumar17d15c42013-05-16 14:58:54 +0530889static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
Todd Poynor15a9ea02012-04-23 20:42:41 -0700890 char *buf)
891{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530892 return sprintf(buf, "%d\n", tunables->boost_val);
Todd Poynor15a9ea02012-04-23 20:42:41 -0700893}
894
Viresh Kumar17d15c42013-05-16 14:58:54 +0530895static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
Todd Poynor15a9ea02012-04-23 20:42:41 -0700896 const char *buf, size_t count)
897{
898 int ret;
899 unsigned long val;
900
901 ret = kstrtoul(buf, 0, &val);
902 if (ret < 0)
903 return ret;
904
Viresh Kumar17d15c42013-05-16 14:58:54 +0530905 tunables->boost_val = val;
Todd Poynor15a9ea02012-04-23 20:42:41 -0700906
Viresh Kumar17d15c42013-05-16 14:58:54 +0530907 if (tunables->boost_val) {
Todd Poynor442a3122012-05-03 00:16:55 -0700908 trace_cpufreq_interactive_boost("on");
Todd Poynor15a9ea02012-04-23 20:42:41 -0700909 cpufreq_interactive_boost();
Todd Poynor442a3122012-05-03 00:16:55 -0700910 } else {
Ruchi Kandoi296d7912014-04-09 16:47:59 -0700911 tunables->boostpulse_endtime = ktime_to_us(ktime_get());
Todd Poynor442a3122012-05-03 00:16:55 -0700912 trace_cpufreq_interactive_unboost("off");
913 }
Todd Poynor15a9ea02012-04-23 20:42:41 -0700914
915 return count;
916}
917
Viresh Kumar17d15c42013-05-16 14:58:54 +0530918static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
Todd Poynor442a3122012-05-03 00:16:55 -0700919 const char *buf, size_t count)
920{
921 int ret;
922 unsigned long val;
923
924 ret = kstrtoul(buf, 0, &val);
925 if (ret < 0)
926 return ret;
927
Viresh Kumar17d15c42013-05-16 14:58:54 +0530928 tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
929 tunables->boostpulse_duration_val;
Todd Poynor442a3122012-05-03 00:16:55 -0700930 trace_cpufreq_interactive_boost("pulse");
931 cpufreq_interactive_boost();
932 return count;
933}
934
Viresh Kumar17d15c42013-05-16 14:58:54 +0530935static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
936 *tunables, char *buf)
Todd Poynore16d5922012-12-14 17:31:19 -0800937{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530938 return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
Todd Poynore16d5922012-12-14 17:31:19 -0800939}
940
Viresh Kumar17d15c42013-05-16 14:58:54 +0530941static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
942 *tunables, const char *buf, size_t count)
Todd Poynore16d5922012-12-14 17:31:19 -0800943{
944 int ret;
945 unsigned long val;
946
947 ret = kstrtoul(buf, 0, &val);
948 if (ret < 0)
949 return ret;
950
Viresh Kumar17d15c42013-05-16 14:58:54 +0530951 tunables->boostpulse_duration_val = val;
Todd Poynore16d5922012-12-14 17:31:19 -0800952 return count;
953}
954
Viresh Kumar17d15c42013-05-16 14:58:54 +0530955static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
956 char *buf)
Lianwei Wang72e40572013-02-22 11:39:18 +0800957{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530958 return sprintf(buf, "%u\n", tunables->io_is_busy);
Lianwei Wang72e40572013-02-22 11:39:18 +0800959}
960
Viresh Kumar17d15c42013-05-16 14:58:54 +0530961static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
962 const char *buf, size_t count)
Lianwei Wang72e40572013-02-22 11:39:18 +0800963{
964 int ret;
965 unsigned long val;
966
967 ret = kstrtoul(buf, 0, &val);
968 if (ret < 0)
969 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530970 tunables->io_is_busy = val;
Lianwei Wang72e40572013-02-22 11:39:18 +0800971 return count;
972}
973
Viresh Kumar17d15c42013-05-16 14:58:54 +0530974/*
975 * Create show/store routines
976 * - sys: One governor instance for complete SYSTEM
977 * - pol: One governor instance per struct cpufreq_policy
978 */
979#define show_gov_pol_sys(file_name) \
980static ssize_t show_##file_name##_gov_sys \
981(struct kobject *kobj, struct attribute *attr, char *buf) \
982{ \
983 return show_##file_name(common_tunables, buf); \
984} \
985 \
986static ssize_t show_##file_name##_gov_pol \
987(struct cpufreq_policy *policy, char *buf) \
988{ \
989 return show_##file_name(policy->governor_data, buf); \
990}
Lianwei Wang72e40572013-02-22 11:39:18 +0800991
Viresh Kumar17d15c42013-05-16 14:58:54 +0530992#define store_gov_pol_sys(file_name) \
993static ssize_t store_##file_name##_gov_sys \
994(struct kobject *kobj, struct attribute *attr, const char *buf, \
995 size_t count) \
996{ \
997 return store_##file_name(common_tunables, buf, count); \
998} \
999 \
1000static ssize_t store_##file_name##_gov_pol \
1001(struct cpufreq_policy *policy, const char *buf, size_t count) \
1002{ \
1003 return store_##file_name(policy->governor_data, buf, count); \
1004}
1005
1006#define show_store_gov_pol_sys(file_name) \
1007show_gov_pol_sys(file_name); \
1008store_gov_pol_sys(file_name)
1009
1010show_store_gov_pol_sys(target_loads);
1011show_store_gov_pol_sys(above_hispeed_delay);
1012show_store_gov_pol_sys(hispeed_freq);
1013show_store_gov_pol_sys(go_hispeed_load);
1014show_store_gov_pol_sys(min_sample_time);
1015show_store_gov_pol_sys(timer_rate);
1016show_store_gov_pol_sys(timer_slack);
1017show_store_gov_pol_sys(boost);
1018store_gov_pol_sys(boostpulse);
1019show_store_gov_pol_sys(boostpulse_duration);
1020show_store_gov_pol_sys(io_is_busy);
1021
1022#define gov_sys_attr_rw(_name) \
1023static struct global_attr _name##_gov_sys = \
1024__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
1025
1026#define gov_pol_attr_rw(_name) \
1027static struct freq_attr _name##_gov_pol = \
1028__ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
1029
1030#define gov_sys_pol_attr_rw(_name) \
1031 gov_sys_attr_rw(_name); \
1032 gov_pol_attr_rw(_name)
1033
1034gov_sys_pol_attr_rw(target_loads);
1035gov_sys_pol_attr_rw(above_hispeed_delay);
1036gov_sys_pol_attr_rw(hispeed_freq);
1037gov_sys_pol_attr_rw(go_hispeed_load);
1038gov_sys_pol_attr_rw(min_sample_time);
1039gov_sys_pol_attr_rw(timer_rate);
1040gov_sys_pol_attr_rw(timer_slack);
1041gov_sys_pol_attr_rw(boost);
1042gov_sys_pol_attr_rw(boostpulse_duration);
1043gov_sys_pol_attr_rw(io_is_busy);
1044
1045static struct global_attr boostpulse_gov_sys =
1046 __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
1047
1048static struct freq_attr boostpulse_gov_pol =
1049 __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
1050
1051/* One Governor instance for entire system */
1052static struct attribute *interactive_attributes_gov_sys[] = {
1053 &target_loads_gov_sys.attr,
1054 &above_hispeed_delay_gov_sys.attr,
1055 &hispeed_freq_gov_sys.attr,
1056 &go_hispeed_load_gov_sys.attr,
1057 &min_sample_time_gov_sys.attr,
1058 &timer_rate_gov_sys.attr,
1059 &timer_slack_gov_sys.attr,
1060 &boost_gov_sys.attr,
1061 &boostpulse_gov_sys.attr,
1062 &boostpulse_duration_gov_sys.attr,
1063 &io_is_busy_gov_sys.attr,
Mike Chanef969692010-06-22 11:26:45 -07001064 NULL,
1065};
1066
Viresh Kumar17d15c42013-05-16 14:58:54 +05301067static struct attribute_group interactive_attr_group_gov_sys = {
1068 .attrs = interactive_attributes_gov_sys,
Mike Chanef969692010-06-22 11:26:45 -07001069 .name = "interactive",
1070};
1071
Viresh Kumar17d15c42013-05-16 14:58:54 +05301072/* Per policy governor instance */
1073static struct attribute *interactive_attributes_gov_pol[] = {
1074 &target_loads_gov_pol.attr,
1075 &above_hispeed_delay_gov_pol.attr,
1076 &hispeed_freq_gov_pol.attr,
1077 &go_hispeed_load_gov_pol.attr,
1078 &min_sample_time_gov_pol.attr,
1079 &timer_rate_gov_pol.attr,
1080 &timer_slack_gov_pol.attr,
1081 &boost_gov_pol.attr,
1082 &boostpulse_gov_pol.attr,
1083 &boostpulse_duration_gov_pol.attr,
1084 &io_is_busy_gov_pol.attr,
1085 NULL,
1086};
1087
1088static struct attribute_group interactive_attr_group_gov_pol = {
1089 .attrs = interactive_attributes_gov_pol,
1090 .name = "interactive",
1091};
1092
1093static struct attribute_group *get_sysfs_attr(void)
1094{
1095 if (have_governor_per_policy())
1096 return &interactive_attr_group_gov_pol;
1097 else
1098 return &interactive_attr_group_gov_sys;
1099}
1100
Sam Leffler3ab7c2b2012-06-27 10:12:04 -07001101static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
1102 unsigned long val,
1103 void *data)
1104{
1105 switch (val) {
1106 case IDLE_START:
1107 cpufreq_interactive_idle_start();
1108 break;
1109 case IDLE_END:
1110 cpufreq_interactive_idle_end();
1111 break;
1112 }
1113
1114 return 0;
1115}
1116
1117static struct notifier_block cpufreq_interactive_idle_nb = {
1118 .notifier_call = cpufreq_interactive_idle_notifier,
1119};
1120
Mike Chanef969692010-06-22 11:26:45 -07001121static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
1122 unsigned int event)
1123{
1124 int rc;
1125 unsigned int j;
1126 struct cpufreq_interactive_cpuinfo *pcpu;
1127 struct cpufreq_frequency_table *freq_table;
Viresh Kumar17d15c42013-05-16 14:58:54 +05301128 struct cpufreq_interactive_tunables *tunables;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001129 unsigned long flags;
Viresh Kumar17d15c42013-05-16 14:58:54 +05301130
1131 if (have_governor_per_policy())
1132 tunables = policy->governor_data;
1133 else
1134 tunables = common_tunables;
1135
1136 WARN_ON(!tunables && (event != CPUFREQ_GOV_POLICY_INIT));
Mike Chanef969692010-06-22 11:26:45 -07001137
1138 switch (event) {
Viresh Kumar17d15c42013-05-16 14:58:54 +05301139 case CPUFREQ_GOV_POLICY_INIT:
1140 if (have_governor_per_policy()) {
1141 WARN_ON(tunables);
1142 } else if (tunables) {
1143 tunables->usage_count++;
1144 policy->governor_data = tunables;
1145 return 0;
1146 }
1147
1148 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
1149 if (!tunables) {
1150 pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
1151 return -ENOMEM;
1152 }
1153
Viresh Kumar17d15c42013-05-16 14:58:54 +05301154 tunables->usage_count = 1;
1155 tunables->above_hispeed_delay = default_above_hispeed_delay;
1156 tunables->nabove_hispeed_delay =
1157 ARRAY_SIZE(default_above_hispeed_delay);
1158 tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
1159 tunables->target_loads = default_target_loads;
1160 tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
1161 tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
1162 tunables->timer_rate = DEFAULT_TIMER_RATE;
1163 tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
1164 tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
1165
1166 spin_lock_init(&tunables->target_loads_lock);
1167 spin_lock_init(&tunables->above_hispeed_delay_lock);
1168
Minsung Kim82cc6a92014-01-19 14:32:42 +09001169 policy->governor_data = tunables;
Greg Hackmann6bc30c32014-12-08 10:08:35 -08001170 if (!have_governor_per_policy()) {
Minsung Kim82cc6a92014-01-19 14:32:42 +09001171 common_tunables = tunables;
Greg Hackmann6bc30c32014-12-08 10:08:35 -08001172 WARN_ON(cpufreq_get_global_kobject());
1173 }
Minsung Kim82cc6a92014-01-19 14:32:42 +09001174
1175 rc = sysfs_create_group(get_governor_parent_kobj(policy),
1176 get_sysfs_attr());
1177 if (rc) {
1178 kfree(tunables);
1179 policy->governor_data = NULL;
1180 if (!have_governor_per_policy())
1181 common_tunables = NULL;
1182 return rc;
1183 }
1184
Viresh Kumar17d15c42013-05-16 14:58:54 +05301185 if (!policy->governor->initialized) {
1186 idle_notifier_register(&cpufreq_interactive_idle_nb);
1187 cpufreq_register_notifier(&cpufreq_notifier_block,
1188 CPUFREQ_TRANSITION_NOTIFIER);
1189 }
1190
Viresh Kumar17d15c42013-05-16 14:58:54 +05301191 break;
1192
1193 case CPUFREQ_GOV_POLICY_EXIT:
1194 if (!--tunables->usage_count) {
1195 if (policy->governor->initialized == 1) {
1196 cpufreq_unregister_notifier(&cpufreq_notifier_block,
1197 CPUFREQ_TRANSITION_NOTIFIER);
1198 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
1199 }
1200
1201 sysfs_remove_group(get_governor_parent_kobj(policy),
1202 get_sysfs_attr());
Greg Hackmann6bc30c32014-12-08 10:08:35 -08001203
1204 if (!have_governor_per_policy())
1205 cpufreq_put_global_kobject();
1206
Viresh Kumar17d15c42013-05-16 14:58:54 +05301207 kfree(tunables);
1208 common_tunables = NULL;
1209 }
1210
1211 policy->governor_data = NULL;
1212 break;
1213
Mike Chanef969692010-06-22 11:26:45 -07001214 case CPUFREQ_GOV_START:
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001215 mutex_lock(&gov_lock);
1216
Viresh Kumar17d15c42013-05-16 14:58:54 +05301217 freq_table = cpufreq_frequency_get_table(policy->cpu);
1218 if (!tunables->hispeed_freq)
1219 tunables->hispeed_freq = policy->max;
Mike Chanef969692010-06-22 11:26:45 -07001220
1221 for_each_cpu(j, policy->cpus) {
1222 pcpu = &per_cpu(cpuinfo, j);
1223 pcpu->policy = policy;
1224 pcpu->target_freq = policy->cur;
1225 pcpu->freq_table = freq_table;
Todd Poynor6d15fa32012-04-26 21:41:40 -07001226 pcpu->floor_freq = pcpu->target_freq;
1227 pcpu->floor_validate_time =
Todd Poynor31817c92012-12-07 20:08:45 -08001228 ktime_to_us(ktime_get());
Todd Poynor1a0389a2012-05-10 23:28:06 -07001229 pcpu->hispeed_validate_time =
Todd Poynor31817c92012-12-07 20:08:45 -08001230 pcpu->floor_validate_time;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001231 pcpu->max_freq = policy->max;
Todd Poynor39512062012-12-20 15:51:00 -08001232 down_write(&pcpu->enable_sem);
Shridhar Rasal2907f842013-09-09 19:17:14 +05301233 del_timer_sync(&pcpu->cpu_timer);
1234 del_timer_sync(&pcpu->cpu_slack_timer);
Viresh Kumar17d15c42013-05-16 14:58:54 +05301235 cpufreq_interactive_timer_start(tunables, j);
Todd Poynor39512062012-12-20 15:51:00 -08001236 pcpu->governor_enabled = 1;
1237 up_write(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -07001238 }
1239
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001240 mutex_unlock(&gov_lock);
Mike Chanef969692010-06-22 11:26:45 -07001241 break;
1242
1243 case CPUFREQ_GOV_STOP:
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001244 mutex_lock(&gov_lock);
Mike Chanef969692010-06-22 11:26:45 -07001245 for_each_cpu(j, policy->cpus) {
1246 pcpu = &per_cpu(cpuinfo, j);
Todd Poynor5cad6092012-12-18 17:50:44 -08001247 down_write(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -07001248 pcpu->governor_enabled = 0;
Mike Chanef969692010-06-22 11:26:45 -07001249 del_timer_sync(&pcpu->cpu_timer);
Todd Poynor4add2592012-12-18 17:50:10 -08001250 del_timer_sync(&pcpu->cpu_slack_timer);
Todd Poynor5cad6092012-12-18 17:50:44 -08001251 up_write(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -07001252 }
1253
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001254 mutex_unlock(&gov_lock);
Mike Chanef969692010-06-22 11:26:45 -07001255 break;
1256
1257 case CPUFREQ_GOV_LIMITS:
1258 if (policy->max < policy->cur)
1259 __cpufreq_driver_target(policy,
1260 policy->max, CPUFREQ_RELATION_H);
1261 else if (policy->min > policy->cur)
1262 __cpufreq_driver_target(policy,
1263 policy->min, CPUFREQ_RELATION_L);
Lianwei Wang90c6c152013-04-26 13:30:51 +08001264 for_each_cpu(j, policy->cpus) {
1265 pcpu = &per_cpu(cpuinfo, j);
1266
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001267 down_read(&pcpu->enable_sem);
Lianwei Wang90c6c152013-04-26 13:30:51 +08001268 if (pcpu->governor_enabled == 0) {
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001269 up_read(&pcpu->enable_sem);
Lianwei Wang90c6c152013-04-26 13:30:51 +08001270 continue;
1271 }
1272
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001273 spin_lock_irqsave(&pcpu->target_freq_lock, flags);
Lianwei Wang90c6c152013-04-26 13:30:51 +08001274 if (policy->max < pcpu->target_freq)
1275 pcpu->target_freq = policy->max;
1276 else if (policy->min > pcpu->target_freq)
1277 pcpu->target_freq = policy->min;
1278
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001279 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
1280 up_read(&pcpu->enable_sem);
1281
1282 /* Reschedule timer only if policy->max is raised.
Lianwei Wang90c6c152013-04-26 13:30:51 +08001283 * Delete the timers, else the timer callback may
1284 * return without re-arm the timer when failed
1285 * acquire the semaphore. This race may cause timer
1286 * stopped unexpectedly.
1287 */
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001288
1289 if (policy->max > pcpu->max_freq) {
1290 down_write(&pcpu->enable_sem);
1291 del_timer_sync(&pcpu->cpu_timer);
1292 del_timer_sync(&pcpu->cpu_slack_timer);
1293 cpufreq_interactive_timer_start(tunables, j);
1294 up_write(&pcpu->enable_sem);
1295 }
1296
1297 pcpu->max_freq = policy->max;
Lianwei Wang90c6c152013-04-26 13:30:51 +08001298 }
Mike Chanef969692010-06-22 11:26:45 -07001299 break;
1300 }
1301 return 0;
1302}
1303
Viresh Kumarc7f826b2013-05-16 14:58:53 +05301304#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1305static
1306#endif
1307struct cpufreq_governor cpufreq_gov_interactive = {
1308 .name = "interactive",
1309 .governor = cpufreq_governor_interactive,
1310 .max_transition_latency = 10000000,
1311 .owner = THIS_MODULE,
1312};
1313
Todd Poynor4add2592012-12-18 17:50:10 -08001314static void cpufreq_interactive_nop_timer(unsigned long data)
1315{
1316}
1317
Mike Chanef969692010-06-22 11:26:45 -07001318static int __init cpufreq_interactive_init(void)
1319{
1320 unsigned int i;
1321 struct cpufreq_interactive_cpuinfo *pcpu;
1322 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1323
Mike Chanef969692010-06-22 11:26:45 -07001324 /* Initalize per-cpu timers */
1325 for_each_possible_cpu(i) {
1326 pcpu = &per_cpu(cpuinfo, i);
Todd Poynor4add2592012-12-18 17:50:10 -08001327 init_timer_deferrable(&pcpu->cpu_timer);
Mike Chanef969692010-06-22 11:26:45 -07001328 pcpu->cpu_timer.function = cpufreq_interactive_timer;
1329 pcpu->cpu_timer.data = i;
Todd Poynor4add2592012-12-18 17:50:10 -08001330 init_timer(&pcpu->cpu_slack_timer);
1331 pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
Todd Poynor0e58da22012-12-11 16:05:03 -08001332 spin_lock_init(&pcpu->load_lock);
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001333 spin_lock_init(&pcpu->target_freq_lock);
Todd Poynor5cad6092012-12-18 17:50:44 -08001334 init_rwsem(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -07001335 }
1336
Todd Poynor0f1920b2012-07-16 17:07:15 -07001337 spin_lock_init(&speedchange_cpumask_lock);
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001338 mutex_init(&gov_lock);
Todd Poynor0f1920b2012-07-16 17:07:15 -07001339 speedchange_task =
1340 kthread_create(cpufreq_interactive_speedchange_task, NULL,
1341 "cfinteractive");
1342 if (IS_ERR(speedchange_task))
1343 return PTR_ERR(speedchange_task);
Sam Leffler5c9b8272012-06-27 12:55:56 -07001344
Todd Poynor0f1920b2012-07-16 17:07:15 -07001345 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
1346 get_task_struct(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -07001347
Sam Leffler5c9b8272012-06-27 12:55:56 -07001348 /* NB: wake up so the thread does not look hung to the freezer */
Todd Poynor0f1920b2012-07-16 17:07:15 -07001349 wake_up_process(speedchange_task);
Sam Leffler5c9b8272012-06-27 12:55:56 -07001350
Mike Chanef969692010-06-22 11:26:45 -07001351 return cpufreq_register_governor(&cpufreq_gov_interactive);
Mike Chanef969692010-06-22 11:26:45 -07001352}
1353
1354#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1355fs_initcall(cpufreq_interactive_init);
1356#else
1357module_init(cpufreq_interactive_init);
1358#endif
1359
1360static void __exit cpufreq_interactive_exit(void)
1361{
1362 cpufreq_unregister_governor(&cpufreq_gov_interactive);
Todd Poynor0f1920b2012-07-16 17:07:15 -07001363 kthread_stop(speedchange_task);
1364 put_task_struct(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -07001365}
1366
1367module_exit(cpufreq_interactive_exit);
1368
1369MODULE_AUTHOR("Mike Chan <mike@android.com>");
1370MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1371 "Latency sensitive workloads");
1372MODULE_LICENSE("GPL");