blob: a0a7c3aef859a4ac003dd8a91ace0fb96ab21169 [file] [log] [blame]
Mike Chanef969692010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/module.h>
Lianwei Wangd72db422012-11-01 09:59:52 +080023#include <linux/moduleparam.h>
Todd Poynor5cad6092012-12-18 17:50:44 -080024#include <linux/rwsem.h>
Mike Chanef969692010-06-22 11:26:45 -070025#include <linux/sched.h>
26#include <linux/sched/rt.h>
27#include <linux/tick.h>
28#include <linux/time.h>
29#include <linux/timer.h>
30#include <linux/workqueue.h>
31#include <linux/kthread.h>
Todd Poynorab8dc402012-04-02 17:17:14 -070032#include <linux/slab.h>
Mike Chanef969692010-06-22 11:26:45 -070033
Todd Poynorae010472012-02-16 16:27:59 -080034#define CREATE_TRACE_POINTS
35#include <trace/events/cpufreq_interactive.h>
36
Mike Chanef969692010-06-22 11:26:45 -070037struct cpufreq_interactive_cpuinfo {
38 struct timer_list cpu_timer;
Todd Poynor4add2592012-12-18 17:50:10 -080039 struct timer_list cpu_slack_timer;
Todd Poynor0e58da22012-12-11 16:05:03 -080040 spinlock_t load_lock; /* protects the next 4 fields */
Mike Chanef969692010-06-22 11:26:45 -070041 u64 time_in_idle;
Todd Poynor8eccd412012-10-08 20:14:34 -070042 u64 time_in_idle_timestamp;
Todd Poynor0e58da22012-12-11 16:05:03 -080043 u64 cputime_speedadj;
44 u64 cputime_speedadj_timestamp;
Mike Chanef969692010-06-22 11:26:45 -070045 struct cpufreq_policy *policy;
46 struct cpufreq_frequency_table *freq_table;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -070047 spinlock_t target_freq_lock; /*protects target freq */
Mike Chanef969692010-06-22 11:26:45 -070048 unsigned int target_freq;
Todd Poynor6d15fa32012-04-26 21:41:40 -070049 unsigned int floor_freq;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -070050 unsigned int max_freq;
Todd Poynor6d15fa32012-04-26 21:41:40 -070051 u64 floor_validate_time;
Todd Poynor1a0389a2012-05-10 23:28:06 -070052 u64 hispeed_validate_time;
Todd Poynor5cad6092012-12-18 17:50:44 -080053 struct rw_semaphore enable_sem;
Mike Chanef969692010-06-22 11:26:45 -070054 int governor_enabled;
55};
56
57static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
58
Todd Poynor0f1920b2012-07-16 17:07:15 -070059/* realtime thread handles frequency scaling */
60static struct task_struct *speedchange_task;
61static cpumask_t speedchange_cpumask;
62static spinlock_t speedchange_cpumask_lock;
Lianwei Wang1d4f9a72013-01-07 14:15:51 +080063static struct mutex gov_lock;
Mike Chanef969692010-06-22 11:26:45 -070064
Todd Poynor8d2d93f2012-11-28 17:58:17 -080065/* Target load. Lower values result in higher CPU speeds. */
66#define DEFAULT_TARGET_LOAD 90
Todd Poynore9c60742012-11-14 11:41:21 -080067static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
Todd Poynor8d2d93f2012-11-28 17:58:17 -080068
Todd Poynora380aa82012-04-17 17:39:34 -070069#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
Todd Poynorcbbe17d2012-04-13 20:18:02 -070070#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
Minsung Kim9c1f83a2013-02-25 23:48:04 +090071static unsigned int default_above_hispeed_delay[] = {
72 DEFAULT_ABOVE_HISPEED_DELAY };
Todd Poynorcbbe17d2012-04-13 20:18:02 -070073
Viresh Kumar17d15c42013-05-16 14:58:54 +053074struct cpufreq_interactive_tunables {
75 int usage_count;
76 /* Hi speed to bump to from lo speed when load burst (default max) */
77 unsigned int hispeed_freq;
78 /* Go to hi speed when CPU load at or above this value. */
79#define DEFAULT_GO_HISPEED_LOAD 99
80 unsigned long go_hispeed_load;
81 /* Target load. Lower values result in higher CPU speeds. */
82 spinlock_t target_loads_lock;
83 unsigned int *target_loads;
84 int ntarget_loads;
85 /*
86 * The minimum amount of time to spend at a frequency before we can ramp
87 * down.
88 */
89#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
90 unsigned long min_sample_time;
91 /*
92 * The sample rate of the timer used to increase frequency
93 */
94 unsigned long timer_rate;
95 /*
96 * Wait this long before raising speed above hispeed, by default a
97 * single timer interval.
98 */
99 spinlock_t above_hispeed_delay_lock;
100 unsigned int *above_hispeed_delay;
101 int nabove_hispeed_delay;
102 /* Non-zero means indefinite speed boost active */
103 int boost_val;
104 /* Duration of a boot pulse in usecs */
105 int boostpulse_duration_val;
106 /* End time of boost pulse in ktime converted to usecs */
107 u64 boostpulse_endtime;
108 /*
109 * Max additional time to wait in idle, beyond timer_rate, at speeds
110 * above minimum before wakeup to reduce speed, or -1 if unnecessary.
111 */
Todd Poynor4add2592012-12-18 17:50:10 -0800112#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
Viresh Kumar17d15c42013-05-16 14:58:54 +0530113 int timer_slack_val;
114 bool io_is_busy;
115};
Lianwei Wangd72db422012-11-01 09:59:52 +0800116
Viresh Kumar17d15c42013-05-16 14:58:54 +0530117/* For cases where we have single governor instance for system */
Cylen Yaoc0b6ed62014-09-05 18:27:38 -0700118static struct cpufreq_interactive_tunables *common_tunables;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530119
120static struct attribute_group *get_sysfs_attr(void);
Lianwei Wang72e40572013-02-22 11:39:18 +0800121
Todd Poynor8eccd412012-10-08 20:14:34 -0700122static void cpufreq_interactive_timer_resched(
123 struct cpufreq_interactive_cpuinfo *pcpu)
124{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530125 struct cpufreq_interactive_tunables *tunables =
126 pcpu->policy->governor_data;
Todd Poynor4e25bf92013-04-05 13:25:21 -0700127 unsigned long expires;
Todd Poynordf673d12013-01-02 13:14:00 -0800128 unsigned long flags;
Todd Poynor4add2592012-12-18 17:50:10 -0800129
Todd Poynordf673d12013-01-02 13:14:00 -0800130 spin_lock_irqsave(&pcpu->load_lock, flags);
Todd Poynor8eccd412012-10-08 20:14:34 -0700131 pcpu->time_in_idle =
Lianwei Wang72e40572013-02-22 11:39:18 +0800132 get_cpu_idle_time(smp_processor_id(),
Viresh Kumar17d15c42013-05-16 14:58:54 +0530133 &pcpu->time_in_idle_timestamp,
134 tunables->io_is_busy);
Todd Poynor0e58da22012-12-11 16:05:03 -0800135 pcpu->cputime_speedadj = 0;
136 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530137 expires = jiffies + usecs_to_jiffies(tunables->timer_rate);
Todd Poynor4e25bf92013-04-05 13:25:21 -0700138 mod_timer_pinned(&pcpu->cpu_timer, expires);
139
Viresh Kumar17d15c42013-05-16 14:58:54 +0530140 if (tunables->timer_slack_val >= 0 &&
141 pcpu->target_freq > pcpu->policy->min) {
142 expires += usecs_to_jiffies(tunables->timer_slack_val);
Todd Poynor4e25bf92013-04-05 13:25:21 -0700143 mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
144 }
145
Todd Poynordf673d12013-01-02 13:14:00 -0800146 spin_unlock_irqrestore(&pcpu->load_lock, flags);
Todd Poynor8eccd412012-10-08 20:14:34 -0700147}
148
Lianwei Wang90c6c152013-04-26 13:30:51 +0800149/* The caller shall take enable_sem write semaphore to avoid any timer race.
150 * The cpu_timer and cpu_slack_timer must be deactivated when calling this
151 * function.
152 */
Viresh Kumar17d15c42013-05-16 14:58:54 +0530153static void cpufreq_interactive_timer_start(
154 struct cpufreq_interactive_tunables *tunables, int cpu)
Lianwei Wang90c6c152013-04-26 13:30:51 +0800155{
156 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530157 unsigned long expires = jiffies +
158 usecs_to_jiffies(tunables->timer_rate);
Lianwei Wang90c6c152013-04-26 13:30:51 +0800159 unsigned long flags;
160
161 pcpu->cpu_timer.expires = expires;
162 add_timer_on(&pcpu->cpu_timer, cpu);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530163 if (tunables->timer_slack_val >= 0 &&
164 pcpu->target_freq > pcpu->policy->min) {
165 expires += usecs_to_jiffies(tunables->timer_slack_val);
Lianwei Wang90c6c152013-04-26 13:30:51 +0800166 pcpu->cpu_slack_timer.expires = expires;
167 add_timer_on(&pcpu->cpu_slack_timer, cpu);
168 }
169
170 spin_lock_irqsave(&pcpu->load_lock, flags);
171 pcpu->time_in_idle =
Viresh Kumar17d15c42013-05-16 14:58:54 +0530172 get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp,
173 tunables->io_is_busy);
Lianwei Wang90c6c152013-04-26 13:30:51 +0800174 pcpu->cputime_speedadj = 0;
175 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
176 spin_unlock_irqrestore(&pcpu->load_lock, flags);
177}
178
Viresh Kumar17d15c42013-05-16 14:58:54 +0530179static unsigned int freq_to_above_hispeed_delay(
180 struct cpufreq_interactive_tunables *tunables,
181 unsigned int freq)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900182{
183 int i;
184 unsigned int ret;
185 unsigned long flags;
186
Viresh Kumar17d15c42013-05-16 14:58:54 +0530187 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900188
Viresh Kumar17d15c42013-05-16 14:58:54 +0530189 for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
190 freq >= tunables->above_hispeed_delay[i+1]; i += 2)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900191 ;
192
Viresh Kumar17d15c42013-05-16 14:58:54 +0530193 ret = tunables->above_hispeed_delay[i];
194 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900195 return ret;
196}
197
Viresh Kumar17d15c42013-05-16 14:58:54 +0530198static unsigned int freq_to_targetload(
199 struct cpufreq_interactive_tunables *tunables, unsigned int freq)
Todd Poynore9c60742012-11-14 11:41:21 -0800200{
201 int i;
202 unsigned int ret;
Todd Poynordf673d12013-01-02 13:14:00 -0800203 unsigned long flags;
Todd Poynore9c60742012-11-14 11:41:21 -0800204
Viresh Kumar17d15c42013-05-16 14:58:54 +0530205 spin_lock_irqsave(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800206
Viresh Kumar17d15c42013-05-16 14:58:54 +0530207 for (i = 0; i < tunables->ntarget_loads - 1 &&
208 freq >= tunables->target_loads[i+1]; i += 2)
Todd Poynore9c60742012-11-14 11:41:21 -0800209 ;
210
Viresh Kumar17d15c42013-05-16 14:58:54 +0530211 ret = tunables->target_loads[i];
212 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800213 return ret;
214}
215
216/*
217 * If increasing frequencies never map to a lower target load then
218 * choose_freq() will find the minimum frequency that does not exceed its
219 * target load given the current load.
220 */
Viresh Kumar17d15c42013-05-16 14:58:54 +0530221static unsigned int choose_freq(struct cpufreq_interactive_cpuinfo *pcpu,
222 unsigned int loadadjfreq)
Todd Poynore9c60742012-11-14 11:41:21 -0800223{
224 unsigned int freq = pcpu->policy->cur;
Todd Poynore9c60742012-11-14 11:41:21 -0800225 unsigned int prevfreq, freqmin, freqmax;
226 unsigned int tl;
227 int index;
228
229 freqmin = 0;
230 freqmax = UINT_MAX;
231
232 do {
233 prevfreq = freq;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530234 tl = freq_to_targetload(pcpu->policy->governor_data, freq);
Todd Poynore9c60742012-11-14 11:41:21 -0800235
236 /*
237 * Find the lowest frequency where the computed load is less
238 * than or equal to the target load.
239 */
240
Todd Poynor8cdabdc2013-04-22 16:44:58 -0700241 if (cpufreq_frequency_table_target(
242 pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
243 CPUFREQ_RELATION_L, &index))
244 break;
Todd Poynore9c60742012-11-14 11:41:21 -0800245 freq = pcpu->freq_table[index].frequency;
246
247 if (freq > prevfreq) {
248 /* The previous frequency is too low. */
249 freqmin = prevfreq;
250
251 if (freq >= freqmax) {
252 /*
253 * Find the highest frequency that is less
254 * than freqmax.
255 */
Todd Poynor8cdabdc2013-04-22 16:44:58 -0700256 if (cpufreq_frequency_table_target(
257 pcpu->policy, pcpu->freq_table,
258 freqmax - 1, CPUFREQ_RELATION_H,
259 &index))
260 break;
Todd Poynore9c60742012-11-14 11:41:21 -0800261 freq = pcpu->freq_table[index].frequency;
262
263 if (freq == freqmin) {
264 /*
265 * The first frequency below freqmax
266 * has already been found to be too
267 * low. freqmax is the lowest speed
268 * we found that is fast enough.
269 */
270 freq = freqmax;
271 break;
272 }
273 }
274 } else if (freq < prevfreq) {
275 /* The previous frequency is high enough. */
276 freqmax = prevfreq;
277
278 if (freq <= freqmin) {
279 /*
280 * Find the lowest frequency that is higher
281 * than freqmin.
282 */
Todd Poynor8cdabdc2013-04-22 16:44:58 -0700283 if (cpufreq_frequency_table_target(
284 pcpu->policy, pcpu->freq_table,
285 freqmin + 1, CPUFREQ_RELATION_L,
286 &index))
287 break;
Todd Poynore9c60742012-11-14 11:41:21 -0800288 freq = pcpu->freq_table[index].frequency;
289
290 /*
291 * If freqmax is the first frequency above
292 * freqmin then we have already found that
293 * this speed is fast enough.
294 */
295 if (freq == freqmax)
296 break;
297 }
298 }
299
300 /* If same frequency chosen as previous then done. */
301 } while (freq != prevfreq);
302
303 return freq;
304}
305
Todd Poynor0e58da22012-12-11 16:05:03 -0800306static u64 update_load(int cpu)
307{
308 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530309 struct cpufreq_interactive_tunables *tunables =
310 pcpu->policy->governor_data;
Todd Poynor0e58da22012-12-11 16:05:03 -0800311 u64 now;
312 u64 now_idle;
313 unsigned int delta_idle;
314 unsigned int delta_time;
315 u64 active_time;
316
Viresh Kumar17d15c42013-05-16 14:58:54 +0530317 now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
Todd Poynor0e58da22012-12-11 16:05:03 -0800318 delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
319 delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
Minsung Kim98b3b562013-04-23 22:32:01 +0900320
321 if (delta_time <= delta_idle)
322 active_time = 0;
323 else
324 active_time = delta_time - delta_idle;
325
Todd Poynor0e58da22012-12-11 16:05:03 -0800326 pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
327
328 pcpu->time_in_idle = now_idle;
329 pcpu->time_in_idle_timestamp = now;
330 return now;
331}
332
Mike Chanef969692010-06-22 11:26:45 -0700333static void cpufreq_interactive_timer(unsigned long data)
334{
Todd Poynore7afb7e2012-11-05 13:09:03 -0800335 u64 now;
Mike Chanef969692010-06-22 11:26:45 -0700336 unsigned int delta_time;
Todd Poynor0e58da22012-12-11 16:05:03 -0800337 u64 cputime_speedadj;
Mike Chanef969692010-06-22 11:26:45 -0700338 int cpu_load;
Mike Chanef969692010-06-22 11:26:45 -0700339 struct cpufreq_interactive_cpuinfo *pcpu =
340 &per_cpu(cpuinfo, data);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530341 struct cpufreq_interactive_tunables *tunables =
342 pcpu->policy->governor_data;
Mike Chanef969692010-06-22 11:26:45 -0700343 unsigned int new_freq;
Todd Poynor0e58da22012-12-11 16:05:03 -0800344 unsigned int loadadjfreq;
Mike Chanef969692010-06-22 11:26:45 -0700345 unsigned int index;
346 unsigned long flags;
Todd Poynore16d5922012-12-14 17:31:19 -0800347 bool boosted;
Mike Chanef969692010-06-22 11:26:45 -0700348
Todd Poynor5cad6092012-12-18 17:50:44 -0800349 if (!down_read_trylock(&pcpu->enable_sem))
350 return;
Mike Chanef969692010-06-22 11:26:45 -0700351 if (!pcpu->governor_enabled)
352 goto exit;
353
Todd Poynordf673d12013-01-02 13:14:00 -0800354 spin_lock_irqsave(&pcpu->load_lock, flags);
Todd Poynor0e58da22012-12-11 16:05:03 -0800355 now = update_load(data);
356 delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
357 cputime_speedadj = pcpu->cputime_speedadj;
Todd Poynordf673d12013-01-02 13:14:00 -0800358 spin_unlock_irqrestore(&pcpu->load_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700359
Todd Poynor0e58da22012-12-11 16:05:03 -0800360 if (WARN_ON_ONCE(!delta_time))
Mike Chanef969692010-06-22 11:26:45 -0700361 goto rearm;
362
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700363 spin_lock_irqsave(&pcpu->target_freq_lock, flags);
Todd Poynor0e58da22012-12-11 16:05:03 -0800364 do_div(cputime_speedadj, delta_time);
365 loadadjfreq = (unsigned int)cputime_speedadj * 100;
366 cpu_load = loadadjfreq / pcpu->target_freq;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530367 boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
Mike Chanef969692010-06-22 11:26:45 -0700368
Viresh Kumar17d15c42013-05-16 14:58:54 +0530369 if (cpu_load >= tunables->go_hispeed_load || boosted) {
370 if (pcpu->target_freq < tunables->hispeed_freq) {
371 new_freq = tunables->hispeed_freq;
Todd Poynor2b660492012-12-19 16:06:48 -0800372 } else {
373 new_freq = choose_freq(pcpu, loadadjfreq);
374
Viresh Kumar17d15c42013-05-16 14:58:54 +0530375 if (new_freq < tunables->hispeed_freq)
376 new_freq = tunables->hispeed_freq;
Todd Poynor2b660492012-12-19 16:06:48 -0800377 }
378 } else {
Todd Poynor0e58da22012-12-11 16:05:03 -0800379 new_freq = choose_freq(pcpu, loadadjfreq);
Ruchi Kandoi9df0ca92014-06-13 16:24:15 -0700380 if (new_freq > tunables->hispeed_freq &&
381 pcpu->target_freq < tunables->hispeed_freq)
382 new_freq = tunables->hispeed_freq;
Todd Poynor2b660492012-12-19 16:06:48 -0800383 }
Todd Poynor131ff022012-11-08 15:06:55 -0800384
Viresh Kumar17d15c42013-05-16 14:58:54 +0530385 if (pcpu->target_freq >= tunables->hispeed_freq &&
Todd Poynor131ff022012-11-08 15:06:55 -0800386 new_freq > pcpu->target_freq &&
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900387 now - pcpu->hispeed_validate_time <
Viresh Kumar17d15c42013-05-16 14:58:54 +0530388 freq_to_above_hispeed_delay(tunables, pcpu->target_freq)) {
Todd Poynor131ff022012-11-08 15:06:55 -0800389 trace_cpufreq_interactive_notyet(
390 data, cpu_load, pcpu->target_freq,
391 pcpu->policy->cur, new_freq);
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700392 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
Todd Poynor131ff022012-11-08 15:06:55 -0800393 goto rearm;
Mike Chanef969692010-06-22 11:26:45 -0700394 }
395
Todd Poynor131ff022012-11-08 15:06:55 -0800396 pcpu->hispeed_validate_time = now;
Todd Poynor1a0389a2012-05-10 23:28:06 -0700397
Mike Chanef969692010-06-22 11:26:45 -0700398 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800399 new_freq, CPUFREQ_RELATION_L,
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700400 &index)) {
401 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700402 goto rearm;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700403 }
Mike Chanef969692010-06-22 11:26:45 -0700404
405 new_freq = pcpu->freq_table[index].frequency;
406
Mike Chanef969692010-06-22 11:26:45 -0700407 /*
Todd Poynor6d15fa32012-04-26 21:41:40 -0700408 * Do not scale below floor_freq unless we have been at or above the
409 * floor frequency for the minimum sample time since last validated.
Mike Chanef969692010-06-22 11:26:45 -0700410 */
Todd Poynor6d15fa32012-04-26 21:41:40 -0700411 if (new_freq < pcpu->floor_freq) {
Viresh Kumar17d15c42013-05-16 14:58:54 +0530412 if (now - pcpu->floor_validate_time <
413 tunables->min_sample_time) {
Todd Poynore60cc1b2012-11-28 17:56:09 -0800414 trace_cpufreq_interactive_notyet(
415 data, cpu_load, pcpu->target_freq,
416 pcpu->policy->cur, new_freq);
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700417 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700418 goto rearm;
Todd Poynorae010472012-02-16 16:27:59 -0800419 }
Mike Chanef969692010-06-22 11:26:45 -0700420 }
421
Todd Poynore16d5922012-12-14 17:31:19 -0800422 /*
423 * Update the timestamp for checking whether speed has been held at
424 * or above the selected frequency for a minimum of min_sample_time,
425 * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
426 * allow the speed to drop as soon as the boostpulse duration expires
427 * (or the indefinite boost is turned off).
428 */
429
Viresh Kumar17d15c42013-05-16 14:58:54 +0530430 if (!boosted || new_freq > tunables->hispeed_freq) {
Todd Poynore16d5922012-12-14 17:31:19 -0800431 pcpu->floor_freq = new_freq;
432 pcpu->floor_validate_time = now;
433 }
Todd Poynor1f408dc2012-04-06 19:59:36 -0700434
435 if (pcpu->target_freq == new_freq) {
Todd Poynore60cc1b2012-11-28 17:56:09 -0800436 trace_cpufreq_interactive_already(
437 data, cpu_load, pcpu->target_freq,
438 pcpu->policy->cur, new_freq);
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700439 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
Todd Poynor1f408dc2012-04-06 19:59:36 -0700440 goto rearm_if_notmax;
441 }
442
Todd Poynorae010472012-02-16 16:27:59 -0800443 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
Todd Poynore60cc1b2012-11-28 17:56:09 -0800444 pcpu->policy->cur, new_freq);
Todd Poynorae010472012-02-16 16:27:59 -0800445
Todd Poynor0f1920b2012-07-16 17:07:15 -0700446 pcpu->target_freq = new_freq;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700447 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700448 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
449 cpumask_set_cpu(data, &speedchange_cpumask);
450 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
451 wake_up_process(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -0700452
453rearm_if_notmax:
454 /*
455 * Already set max speed and don't see a need to change that,
456 * wait until next idle to re-evaluate, don't need timer.
457 */
458 if (pcpu->target_freq == pcpu->policy->max)
459 goto exit;
460
461rearm:
Todd Poynor4add2592012-12-18 17:50:10 -0800462 if (!timer_pending(&pcpu->cpu_timer))
Todd Poynor8eccd412012-10-08 20:14:34 -0700463 cpufreq_interactive_timer_resched(pcpu);
Mike Chanef969692010-06-22 11:26:45 -0700464
465exit:
Todd Poynor5cad6092012-12-18 17:50:44 -0800466 up_read(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700467 return;
468}
469
470static void cpufreq_interactive_idle_start(void)
471{
472 struct cpufreq_interactive_cpuinfo *pcpu =
473 &per_cpu(cpuinfo, smp_processor_id());
474 int pending;
475
Todd Poynor5cad6092012-12-18 17:50:44 -0800476 if (!down_read_trylock(&pcpu->enable_sem))
Mike Chanef969692010-06-22 11:26:45 -0700477 return;
Todd Poynor5cad6092012-12-18 17:50:44 -0800478 if (!pcpu->governor_enabled) {
479 up_read(&pcpu->enable_sem);
480 return;
481 }
Mike Chanef969692010-06-22 11:26:45 -0700482
Mike Chanef969692010-06-22 11:26:45 -0700483 pending = timer_pending(&pcpu->cpu_timer);
484
485 if (pcpu->target_freq != pcpu->policy->min) {
Mike Chanef969692010-06-22 11:26:45 -0700486 /*
487 * Entering idle while not at lowest speed. On some
488 * platforms this can hold the other CPU(s) at that speed
489 * even though the CPU is idle. Set a timer to re-evaluate
490 * speed so this idle CPU doesn't hold the other CPUs above
491 * min indefinitely. This should probably be a quirk of
492 * the CPUFreq driver.
493 */
Todd Poynor4add2592012-12-18 17:50:10 -0800494 if (!pending)
Todd Poynor8eccd412012-10-08 20:14:34 -0700495 cpufreq_interactive_timer_resched(pcpu);
Mike Chanef969692010-06-22 11:26:45 -0700496 }
497
Todd Poynor5cad6092012-12-18 17:50:44 -0800498 up_read(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700499}
500
501static void cpufreq_interactive_idle_end(void)
502{
503 struct cpufreq_interactive_cpuinfo *pcpu =
504 &per_cpu(cpuinfo, smp_processor_id());
505
Todd Poynor5cad6092012-12-18 17:50:44 -0800506 if (!down_read_trylock(&pcpu->enable_sem))
Sam Leffler3ab7c2b2012-06-27 10:12:04 -0700507 return;
Todd Poynor5cad6092012-12-18 17:50:44 -0800508 if (!pcpu->governor_enabled) {
509 up_read(&pcpu->enable_sem);
510 return;
511 }
Sam Leffler3ab7c2b2012-06-27 10:12:04 -0700512
Todd Poynore7afb7e2012-11-05 13:09:03 -0800513 /* Arm the timer for 1-2 ticks later if not already. */
514 if (!timer_pending(&pcpu->cpu_timer)) {
Todd Poynor8eccd412012-10-08 20:14:34 -0700515 cpufreq_interactive_timer_resched(pcpu);
Todd Poynor4add2592012-12-18 17:50:10 -0800516 } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
Todd Poynor8eccd412012-10-08 20:14:34 -0700517 del_timer(&pcpu->cpu_timer);
Todd Poynor4add2592012-12-18 17:50:10 -0800518 del_timer(&pcpu->cpu_slack_timer);
Todd Poynor8eccd412012-10-08 20:14:34 -0700519 cpufreq_interactive_timer(smp_processor_id());
Mike Chanef969692010-06-22 11:26:45 -0700520 }
Todd Poynor5cad6092012-12-18 17:50:44 -0800521
522 up_read(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700523}
524
Todd Poynor0f1920b2012-07-16 17:07:15 -0700525static int cpufreq_interactive_speedchange_task(void *data)
Mike Chanef969692010-06-22 11:26:45 -0700526{
527 unsigned int cpu;
528 cpumask_t tmp_mask;
529 unsigned long flags;
530 struct cpufreq_interactive_cpuinfo *pcpu;
531
532 while (1) {
533 set_current_state(TASK_INTERRUPTIBLE);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700534 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700535
Todd Poynor0f1920b2012-07-16 17:07:15 -0700536 if (cpumask_empty(&speedchange_cpumask)) {
537 spin_unlock_irqrestore(&speedchange_cpumask_lock,
538 flags);
Mike Chanef969692010-06-22 11:26:45 -0700539 schedule();
540
541 if (kthread_should_stop())
542 break;
543
Todd Poynor0f1920b2012-07-16 17:07:15 -0700544 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700545 }
546
547 set_current_state(TASK_RUNNING);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700548 tmp_mask = speedchange_cpumask;
549 cpumask_clear(&speedchange_cpumask);
550 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700551
552 for_each_cpu(cpu, &tmp_mask) {
553 unsigned int j;
554 unsigned int max_freq = 0;
555
556 pcpu = &per_cpu(cpuinfo, cpu);
Todd Poynor5cad6092012-12-18 17:50:44 -0800557 if (!down_read_trylock(&pcpu->enable_sem))
Mike Chanef969692010-06-22 11:26:45 -0700558 continue;
Todd Poynor5cad6092012-12-18 17:50:44 -0800559 if (!pcpu->governor_enabled) {
560 up_read(&pcpu->enable_sem);
561 continue;
562 }
Mike Chanef969692010-06-22 11:26:45 -0700563
Mike Chanef969692010-06-22 11:26:45 -0700564 for_each_cpu(j, pcpu->policy->cpus) {
565 struct cpufreq_interactive_cpuinfo *pjcpu =
566 &per_cpu(cpuinfo, j);
567
568 if (pjcpu->target_freq > max_freq)
569 max_freq = pjcpu->target_freq;
570 }
571
572 if (max_freq != pcpu->policy->cur)
573 __cpufreq_driver_target(pcpu->policy,
574 max_freq,
575 CPUFREQ_RELATION_H);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700576 trace_cpufreq_interactive_setspeed(cpu,
577 pcpu->target_freq,
Todd Poynorae010472012-02-16 16:27:59 -0800578 pcpu->policy->cur);
Todd Poynor5cad6092012-12-18 17:50:44 -0800579
580 up_read(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700581 }
582 }
583
584 return 0;
585}
586
Todd Poynorab8dc402012-04-02 17:17:14 -0700587static void cpufreq_interactive_boost(void)
588{
589 int i;
590 int anyboost = 0;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700591 unsigned long flags[2];
Todd Poynorab8dc402012-04-02 17:17:14 -0700592 struct cpufreq_interactive_cpuinfo *pcpu;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530593 struct cpufreq_interactive_tunables *tunables;
Todd Poynorab8dc402012-04-02 17:17:14 -0700594
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700595 spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
Todd Poynorab8dc402012-04-02 17:17:14 -0700596
597 for_each_online_cpu(i) {
598 pcpu = &per_cpu(cpuinfo, i);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530599 tunables = pcpu->policy->governor_data;
Todd Poynorab8dc402012-04-02 17:17:14 -0700600
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700601 spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530602 if (pcpu->target_freq < tunables->hispeed_freq) {
603 pcpu->target_freq = tunables->hispeed_freq;
Todd Poynor0f1920b2012-07-16 17:07:15 -0700604 cpumask_set_cpu(i, &speedchange_cpumask);
Todd Poynor31817c92012-12-07 20:08:45 -0800605 pcpu->hispeed_validate_time =
606 ktime_to_us(ktime_get());
Todd Poynorab8dc402012-04-02 17:17:14 -0700607 anyboost = 1;
608 }
609
610 /*
Todd Poynor6d15fa32012-04-26 21:41:40 -0700611 * Set floor freq and (re)start timer for when last
612 * validated.
Todd Poynorab8dc402012-04-02 17:17:14 -0700613 */
614
Viresh Kumar17d15c42013-05-16 14:58:54 +0530615 pcpu->floor_freq = tunables->hispeed_freq;
Todd Poynor6d15fa32012-04-26 21:41:40 -0700616 pcpu->floor_validate_time = ktime_to_us(ktime_get());
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700617 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]);
Todd Poynorab8dc402012-04-02 17:17:14 -0700618 }
619
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700620 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
Todd Poynorab8dc402012-04-02 17:17:14 -0700621
622 if (anyboost)
Todd Poynor0f1920b2012-07-16 17:07:15 -0700623 wake_up_process(speedchange_task);
Todd Poynorab8dc402012-04-02 17:17:14 -0700624}
625
Todd Poynor0e58da22012-12-11 16:05:03 -0800626static int cpufreq_interactive_notifier(
627 struct notifier_block *nb, unsigned long val, void *data)
628{
629 struct cpufreq_freqs *freq = data;
630 struct cpufreq_interactive_cpuinfo *pcpu;
631 int cpu;
Todd Poynordf673d12013-01-02 13:14:00 -0800632 unsigned long flags;
Todd Poynor0e58da22012-12-11 16:05:03 -0800633
634 if (val == CPUFREQ_POSTCHANGE) {
635 pcpu = &per_cpu(cpuinfo, freq->cpu);
Todd Poynor34974c32012-12-23 12:28:49 -0800636 if (!down_read_trylock(&pcpu->enable_sem))
637 return 0;
638 if (!pcpu->governor_enabled) {
639 up_read(&pcpu->enable_sem);
640 return 0;
641 }
Todd Poynor0e58da22012-12-11 16:05:03 -0800642
643 for_each_cpu(cpu, pcpu->policy->cpus) {
644 struct cpufreq_interactive_cpuinfo *pjcpu =
645 &per_cpu(cpuinfo, cpu);
Lianwei Wangc79705d2013-05-16 12:07:23 +0800646 if (cpu != freq->cpu) {
647 if (!down_read_trylock(&pjcpu->enable_sem))
648 continue;
649 if (!pjcpu->governor_enabled) {
650 up_read(&pjcpu->enable_sem);
651 continue;
652 }
653 }
Todd Poynordf673d12013-01-02 13:14:00 -0800654 spin_lock_irqsave(&pjcpu->load_lock, flags);
Todd Poynor0e58da22012-12-11 16:05:03 -0800655 update_load(cpu);
Todd Poynordf673d12013-01-02 13:14:00 -0800656 spin_unlock_irqrestore(&pjcpu->load_lock, flags);
Lianwei Wangc79705d2013-05-16 12:07:23 +0800657 if (cpu != freq->cpu)
658 up_read(&pjcpu->enable_sem);
Todd Poynor0e58da22012-12-11 16:05:03 -0800659 }
Todd Poynor0e58da22012-12-11 16:05:03 -0800660
Todd Poynor34974c32012-12-23 12:28:49 -0800661 up_read(&pcpu->enable_sem);
662 }
Todd Poynor0e58da22012-12-11 16:05:03 -0800663 return 0;
664}
665
666static struct notifier_block cpufreq_notifier_block = {
667 .notifier_call = cpufreq_interactive_notifier,
668};
669
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900670static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
671{
672 const char *cp;
673 int i;
674 int ntokens = 1;
675 unsigned int *tokenized_data;
Todd Poynor233dfa02013-03-20 15:40:46 -0700676 int err = -EINVAL;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900677
678 cp = buf;
679 while ((cp = strpbrk(cp + 1, " :")))
680 ntokens++;
681
Todd Poynor233dfa02013-03-20 15:40:46 -0700682 if (!(ntokens & 0x1))
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900683 goto err;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900684
685 tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
686 if (!tokenized_data) {
Todd Poynor233dfa02013-03-20 15:40:46 -0700687 err = -ENOMEM;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900688 goto err;
689 }
690
691 cp = buf;
692 i = 0;
693 while (i < ntokens) {
Todd Poynor233dfa02013-03-20 15:40:46 -0700694 if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900695 goto err_kfree;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900696
697 cp = strpbrk(cp, " :");
698 if (!cp)
699 break;
700 cp++;
701 }
702
Todd Poynor233dfa02013-03-20 15:40:46 -0700703 if (i != ntokens)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900704 goto err_kfree;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900705
706 *num_tokens = ntokens;
707 return tokenized_data;
708
709err_kfree:
710 kfree(tokenized_data);
711err:
Todd Poynor233dfa02013-03-20 15:40:46 -0700712 return ERR_PTR(err);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900713}
714
Todd Poynore9c60742012-11-14 11:41:21 -0800715static ssize_t show_target_loads(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530716 struct cpufreq_interactive_tunables *tunables,
717 char *buf)
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800718{
Todd Poynore9c60742012-11-14 11:41:21 -0800719 int i;
720 ssize_t ret = 0;
Todd Poynordf673d12013-01-02 13:14:00 -0800721 unsigned long flags;
Todd Poynore9c60742012-11-14 11:41:21 -0800722
Viresh Kumar17d15c42013-05-16 14:58:54 +0530723 spin_lock_irqsave(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800724
Viresh Kumar17d15c42013-05-16 14:58:54 +0530725 for (i = 0; i < tunables->ntarget_loads; i++)
726 ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
Todd Poynore9c60742012-11-14 11:41:21 -0800727 i & 0x1 ? ":" : " ");
728
Chih-Wei Huang8d9e5302013-12-24 17:51:55 +0800729 sprintf(buf + ret - 1, "\n");
Viresh Kumar17d15c42013-05-16 14:58:54 +0530730 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800731 return ret;
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800732}
733
Todd Poynore9c60742012-11-14 11:41:21 -0800734static ssize_t store_target_loads(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530735 struct cpufreq_interactive_tunables *tunables,
736 const char *buf, size_t count)
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800737{
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900738 int ntokens;
Todd Poynore9c60742012-11-14 11:41:21 -0800739 unsigned int *new_target_loads = NULL;
Todd Poynordf673d12013-01-02 13:14:00 -0800740 unsigned long flags;
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800741
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900742 new_target_loads = get_tokenized_data(buf, &ntokens);
743 if (IS_ERR(new_target_loads))
744 return PTR_RET(new_target_loads);
Todd Poynore9c60742012-11-14 11:41:21 -0800745
Viresh Kumar17d15c42013-05-16 14:58:54 +0530746 spin_lock_irqsave(&tunables->target_loads_lock, flags);
747 if (tunables->target_loads != default_target_loads)
748 kfree(tunables->target_loads);
749 tunables->target_loads = new_target_loads;
750 tunables->ntarget_loads = ntokens;
751 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800752 return count;
753}
754
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900755static ssize_t show_above_hispeed_delay(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530756 struct cpufreq_interactive_tunables *tunables, char *buf)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900757{
758 int i;
759 ssize_t ret = 0;
760 unsigned long flags;
761
Viresh Kumar17d15c42013-05-16 14:58:54 +0530762 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900763
Viresh Kumar17d15c42013-05-16 14:58:54 +0530764 for (i = 0; i < tunables->nabove_hispeed_delay; i++)
765 ret += sprintf(buf + ret, "%u%s",
766 tunables->above_hispeed_delay[i],
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900767 i & 0x1 ? ":" : " ");
768
Chih-Wei Huang8d9e5302013-12-24 17:51:55 +0800769 sprintf(buf + ret - 1, "\n");
Viresh Kumar17d15c42013-05-16 14:58:54 +0530770 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900771 return ret;
772}
773
774static ssize_t store_above_hispeed_delay(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530775 struct cpufreq_interactive_tunables *tunables,
776 const char *buf, size_t count)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900777{
778 int ntokens;
779 unsigned int *new_above_hispeed_delay = NULL;
780 unsigned long flags;
781
782 new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
783 if (IS_ERR(new_above_hispeed_delay))
784 return PTR_RET(new_above_hispeed_delay);
785
Viresh Kumar17d15c42013-05-16 14:58:54 +0530786 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
787 if (tunables->above_hispeed_delay != default_above_hispeed_delay)
788 kfree(tunables->above_hispeed_delay);
789 tunables->above_hispeed_delay = new_above_hispeed_delay;
790 tunables->nabove_hispeed_delay = ntokens;
791 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900792 return count;
793
794}
795
Viresh Kumar17d15c42013-05-16 14:58:54 +0530796static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
797 char *buf)
Mike Chanef969692010-06-22 11:26:45 -0700798{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530799 return sprintf(buf, "%u\n", tunables->hispeed_freq);
Mike Chanef969692010-06-22 11:26:45 -0700800}
801
Viresh Kumar17d15c42013-05-16 14:58:54 +0530802static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
803 const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -0700804{
805 int ret;
Todd Poynor3b7b5f82012-10-03 00:39:56 -0700806 long unsigned int val;
Mike Chanef969692010-06-22 11:26:45 -0700807
Todd Poynor3b7b5f82012-10-03 00:39:56 -0700808 ret = strict_strtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -0700809 if (ret < 0)
810 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530811 tunables->hispeed_freq = val;
Mike Chanef969692010-06-22 11:26:45 -0700812 return count;
813}
814
Viresh Kumar17d15c42013-05-16 14:58:54 +0530815static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
816 *tunables, char *buf)
Mike Chanef969692010-06-22 11:26:45 -0700817{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530818 return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
Mike Chanef969692010-06-22 11:26:45 -0700819}
820
Viresh Kumar17d15c42013-05-16 14:58:54 +0530821static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
822 *tunables, const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -0700823{
824 int ret;
825 unsigned long val;
826
827 ret = strict_strtoul(buf, 0, &val);
828 if (ret < 0)
829 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530830 tunables->go_hispeed_load = val;
Mike Chanef969692010-06-22 11:26:45 -0700831 return count;
832}
833
Viresh Kumar17d15c42013-05-16 14:58:54 +0530834static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
835 *tunables, char *buf)
Mike Chanef969692010-06-22 11:26:45 -0700836{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530837 return sprintf(buf, "%lu\n", tunables->min_sample_time);
Mike Chanef969692010-06-22 11:26:45 -0700838}
839
Viresh Kumar17d15c42013-05-16 14:58:54 +0530840static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
841 *tunables, const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -0700842{
843 int ret;
844 unsigned long val;
845
846 ret = strict_strtoul(buf, 0, &val);
847 if (ret < 0)
848 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530849 tunables->min_sample_time = val;
Mike Chanef969692010-06-22 11:26:45 -0700850 return count;
851}
852
Viresh Kumar17d15c42013-05-16 14:58:54 +0530853static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
854 char *buf)
Mike Chanef969692010-06-22 11:26:45 -0700855{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530856 return sprintf(buf, "%lu\n", tunables->timer_rate);
Mike Chanef969692010-06-22 11:26:45 -0700857}
858
Viresh Kumar17d15c42013-05-16 14:58:54 +0530859static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
860 const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -0700861{
862 int ret;
863 unsigned long val;
864
865 ret = strict_strtoul(buf, 0, &val);
866 if (ret < 0)
867 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530868 tunables->timer_rate = val;
Mike Chanef969692010-06-22 11:26:45 -0700869 return count;
870}
871
Viresh Kumar17d15c42013-05-16 14:58:54 +0530872static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
873 char *buf)
Todd Poynor4add2592012-12-18 17:50:10 -0800874{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530875 return sprintf(buf, "%d\n", tunables->timer_slack_val);
Todd Poynor4add2592012-12-18 17:50:10 -0800876}
877
Viresh Kumar17d15c42013-05-16 14:58:54 +0530878static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
879 const char *buf, size_t count)
Todd Poynor4add2592012-12-18 17:50:10 -0800880{
881 int ret;
882 unsigned long val;
883
884 ret = kstrtol(buf, 10, &val);
885 if (ret < 0)
886 return ret;
887
Viresh Kumar17d15c42013-05-16 14:58:54 +0530888 tunables->timer_slack_val = val;
Todd Poynor4add2592012-12-18 17:50:10 -0800889 return count;
890}
891
Viresh Kumar17d15c42013-05-16 14:58:54 +0530892static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
Todd Poynor15a9ea02012-04-23 20:42:41 -0700893 char *buf)
894{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530895 return sprintf(buf, "%d\n", tunables->boost_val);
Todd Poynor15a9ea02012-04-23 20:42:41 -0700896}
897
Viresh Kumar17d15c42013-05-16 14:58:54 +0530898static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
Todd Poynor15a9ea02012-04-23 20:42:41 -0700899 const char *buf, size_t count)
900{
901 int ret;
902 unsigned long val;
903
904 ret = kstrtoul(buf, 0, &val);
905 if (ret < 0)
906 return ret;
907
Viresh Kumar17d15c42013-05-16 14:58:54 +0530908 tunables->boost_val = val;
Todd Poynor15a9ea02012-04-23 20:42:41 -0700909
Viresh Kumar17d15c42013-05-16 14:58:54 +0530910 if (tunables->boost_val) {
Todd Poynor442a3122012-05-03 00:16:55 -0700911 trace_cpufreq_interactive_boost("on");
Todd Poynor15a9ea02012-04-23 20:42:41 -0700912 cpufreq_interactive_boost();
Todd Poynor442a3122012-05-03 00:16:55 -0700913 } else {
Ruchi Kandoi296d7912014-04-09 16:47:59 -0700914 tunables->boostpulse_endtime = ktime_to_us(ktime_get());
Todd Poynor442a3122012-05-03 00:16:55 -0700915 trace_cpufreq_interactive_unboost("off");
916 }
Todd Poynor15a9ea02012-04-23 20:42:41 -0700917
918 return count;
919}
920
Viresh Kumar17d15c42013-05-16 14:58:54 +0530921static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
Todd Poynor442a3122012-05-03 00:16:55 -0700922 const char *buf, size_t count)
923{
924 int ret;
925 unsigned long val;
926
927 ret = kstrtoul(buf, 0, &val);
928 if (ret < 0)
929 return ret;
930
Viresh Kumar17d15c42013-05-16 14:58:54 +0530931 tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
932 tunables->boostpulse_duration_val;
Todd Poynor442a3122012-05-03 00:16:55 -0700933 trace_cpufreq_interactive_boost("pulse");
934 cpufreq_interactive_boost();
935 return count;
936}
937
Viresh Kumar17d15c42013-05-16 14:58:54 +0530938static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
939 *tunables, char *buf)
Todd Poynore16d5922012-12-14 17:31:19 -0800940{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530941 return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
Todd Poynore16d5922012-12-14 17:31:19 -0800942}
943
Viresh Kumar17d15c42013-05-16 14:58:54 +0530944static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
945 *tunables, const char *buf, size_t count)
Todd Poynore16d5922012-12-14 17:31:19 -0800946{
947 int ret;
948 unsigned long val;
949
950 ret = kstrtoul(buf, 0, &val);
951 if (ret < 0)
952 return ret;
953
Viresh Kumar17d15c42013-05-16 14:58:54 +0530954 tunables->boostpulse_duration_val = val;
Todd Poynore16d5922012-12-14 17:31:19 -0800955 return count;
956}
957
Viresh Kumar17d15c42013-05-16 14:58:54 +0530958static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
959 char *buf)
Lianwei Wang72e40572013-02-22 11:39:18 +0800960{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530961 return sprintf(buf, "%u\n", tunables->io_is_busy);
Lianwei Wang72e40572013-02-22 11:39:18 +0800962}
963
Viresh Kumar17d15c42013-05-16 14:58:54 +0530964static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
965 const char *buf, size_t count)
Lianwei Wang72e40572013-02-22 11:39:18 +0800966{
967 int ret;
968 unsigned long val;
969
970 ret = kstrtoul(buf, 0, &val);
971 if (ret < 0)
972 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530973 tunables->io_is_busy = val;
Lianwei Wang72e40572013-02-22 11:39:18 +0800974 return count;
975}
976
Viresh Kumar17d15c42013-05-16 14:58:54 +0530977/*
978 * Create show/store routines
979 * - sys: One governor instance for complete SYSTEM
980 * - pol: One governor instance per struct cpufreq_policy
981 */
982#define show_gov_pol_sys(file_name) \
983static ssize_t show_##file_name##_gov_sys \
984(struct kobject *kobj, struct attribute *attr, char *buf) \
985{ \
986 return show_##file_name(common_tunables, buf); \
987} \
988 \
989static ssize_t show_##file_name##_gov_pol \
990(struct cpufreq_policy *policy, char *buf) \
991{ \
992 return show_##file_name(policy->governor_data, buf); \
993}
Lianwei Wang72e40572013-02-22 11:39:18 +0800994
Viresh Kumar17d15c42013-05-16 14:58:54 +0530995#define store_gov_pol_sys(file_name) \
996static ssize_t store_##file_name##_gov_sys \
997(struct kobject *kobj, struct attribute *attr, const char *buf, \
998 size_t count) \
999{ \
1000 return store_##file_name(common_tunables, buf, count); \
1001} \
1002 \
1003static ssize_t store_##file_name##_gov_pol \
1004(struct cpufreq_policy *policy, const char *buf, size_t count) \
1005{ \
1006 return store_##file_name(policy->governor_data, buf, count); \
1007}
1008
1009#define show_store_gov_pol_sys(file_name) \
1010show_gov_pol_sys(file_name); \
1011store_gov_pol_sys(file_name)
1012
1013show_store_gov_pol_sys(target_loads);
1014show_store_gov_pol_sys(above_hispeed_delay);
1015show_store_gov_pol_sys(hispeed_freq);
1016show_store_gov_pol_sys(go_hispeed_load);
1017show_store_gov_pol_sys(min_sample_time);
1018show_store_gov_pol_sys(timer_rate);
1019show_store_gov_pol_sys(timer_slack);
1020show_store_gov_pol_sys(boost);
1021store_gov_pol_sys(boostpulse);
1022show_store_gov_pol_sys(boostpulse_duration);
1023show_store_gov_pol_sys(io_is_busy);
1024
1025#define gov_sys_attr_rw(_name) \
1026static struct global_attr _name##_gov_sys = \
1027__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
1028
1029#define gov_pol_attr_rw(_name) \
1030static struct freq_attr _name##_gov_pol = \
1031__ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
1032
1033#define gov_sys_pol_attr_rw(_name) \
1034 gov_sys_attr_rw(_name); \
1035 gov_pol_attr_rw(_name)
1036
1037gov_sys_pol_attr_rw(target_loads);
1038gov_sys_pol_attr_rw(above_hispeed_delay);
1039gov_sys_pol_attr_rw(hispeed_freq);
1040gov_sys_pol_attr_rw(go_hispeed_load);
1041gov_sys_pol_attr_rw(min_sample_time);
1042gov_sys_pol_attr_rw(timer_rate);
1043gov_sys_pol_attr_rw(timer_slack);
1044gov_sys_pol_attr_rw(boost);
1045gov_sys_pol_attr_rw(boostpulse_duration);
1046gov_sys_pol_attr_rw(io_is_busy);
1047
1048static struct global_attr boostpulse_gov_sys =
1049 __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
1050
1051static struct freq_attr boostpulse_gov_pol =
1052 __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
1053
1054/* One Governor instance for entire system */
1055static struct attribute *interactive_attributes_gov_sys[] = {
1056 &target_loads_gov_sys.attr,
1057 &above_hispeed_delay_gov_sys.attr,
1058 &hispeed_freq_gov_sys.attr,
1059 &go_hispeed_load_gov_sys.attr,
1060 &min_sample_time_gov_sys.attr,
1061 &timer_rate_gov_sys.attr,
1062 &timer_slack_gov_sys.attr,
1063 &boost_gov_sys.attr,
1064 &boostpulse_gov_sys.attr,
1065 &boostpulse_duration_gov_sys.attr,
1066 &io_is_busy_gov_sys.attr,
Mike Chanef969692010-06-22 11:26:45 -07001067 NULL,
1068};
1069
Viresh Kumar17d15c42013-05-16 14:58:54 +05301070static struct attribute_group interactive_attr_group_gov_sys = {
1071 .attrs = interactive_attributes_gov_sys,
Mike Chanef969692010-06-22 11:26:45 -07001072 .name = "interactive",
1073};
1074
Viresh Kumar17d15c42013-05-16 14:58:54 +05301075/* Per policy governor instance */
1076static struct attribute *interactive_attributes_gov_pol[] = {
1077 &target_loads_gov_pol.attr,
1078 &above_hispeed_delay_gov_pol.attr,
1079 &hispeed_freq_gov_pol.attr,
1080 &go_hispeed_load_gov_pol.attr,
1081 &min_sample_time_gov_pol.attr,
1082 &timer_rate_gov_pol.attr,
1083 &timer_slack_gov_pol.attr,
1084 &boost_gov_pol.attr,
1085 &boostpulse_gov_pol.attr,
1086 &boostpulse_duration_gov_pol.attr,
1087 &io_is_busy_gov_pol.attr,
1088 NULL,
1089};
1090
1091static struct attribute_group interactive_attr_group_gov_pol = {
1092 .attrs = interactive_attributes_gov_pol,
1093 .name = "interactive",
1094};
1095
1096static struct attribute_group *get_sysfs_attr(void)
1097{
1098 if (have_governor_per_policy())
1099 return &interactive_attr_group_gov_pol;
1100 else
1101 return &interactive_attr_group_gov_sys;
1102}
1103
Sam Leffler3ab7c2b2012-06-27 10:12:04 -07001104static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
1105 unsigned long val,
1106 void *data)
1107{
1108 switch (val) {
1109 case IDLE_START:
1110 cpufreq_interactive_idle_start();
1111 break;
1112 case IDLE_END:
1113 cpufreq_interactive_idle_end();
1114 break;
1115 }
1116
1117 return 0;
1118}
1119
1120static struct notifier_block cpufreq_interactive_idle_nb = {
1121 .notifier_call = cpufreq_interactive_idle_notifier,
1122};
1123
Mike Chanef969692010-06-22 11:26:45 -07001124static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
1125 unsigned int event)
1126{
1127 int rc;
1128 unsigned int j;
1129 struct cpufreq_interactive_cpuinfo *pcpu;
1130 struct cpufreq_frequency_table *freq_table;
Viresh Kumar17d15c42013-05-16 14:58:54 +05301131 struct cpufreq_interactive_tunables *tunables;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001132 unsigned long flags;
Viresh Kumar17d15c42013-05-16 14:58:54 +05301133
1134 if (have_governor_per_policy())
1135 tunables = policy->governor_data;
1136 else
1137 tunables = common_tunables;
1138
1139 WARN_ON(!tunables && (event != CPUFREQ_GOV_POLICY_INIT));
Mike Chanef969692010-06-22 11:26:45 -07001140
1141 switch (event) {
Viresh Kumar17d15c42013-05-16 14:58:54 +05301142 case CPUFREQ_GOV_POLICY_INIT:
1143 if (have_governor_per_policy()) {
1144 WARN_ON(tunables);
1145 } else if (tunables) {
1146 tunables->usage_count++;
1147 policy->governor_data = tunables;
1148 return 0;
1149 }
1150
1151 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
1152 if (!tunables) {
1153 pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
1154 return -ENOMEM;
1155 }
1156
Viresh Kumar17d15c42013-05-16 14:58:54 +05301157 tunables->usage_count = 1;
1158 tunables->above_hispeed_delay = default_above_hispeed_delay;
1159 tunables->nabove_hispeed_delay =
1160 ARRAY_SIZE(default_above_hispeed_delay);
1161 tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
1162 tunables->target_loads = default_target_loads;
1163 tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
1164 tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
1165 tunables->timer_rate = DEFAULT_TIMER_RATE;
1166 tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
1167 tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
1168
1169 spin_lock_init(&tunables->target_loads_lock);
1170 spin_lock_init(&tunables->above_hispeed_delay_lock);
1171
Minsung Kim82cc6a92014-01-19 14:32:42 +09001172 policy->governor_data = tunables;
Greg Hackmann6bc30c32014-12-08 10:08:35 -08001173 if (!have_governor_per_policy()) {
Minsung Kim82cc6a92014-01-19 14:32:42 +09001174 common_tunables = tunables;
Greg Hackmann6bc30c32014-12-08 10:08:35 -08001175 WARN_ON(cpufreq_get_global_kobject());
1176 }
Minsung Kim82cc6a92014-01-19 14:32:42 +09001177
1178 rc = sysfs_create_group(get_governor_parent_kobj(policy),
1179 get_sysfs_attr());
1180 if (rc) {
1181 kfree(tunables);
1182 policy->governor_data = NULL;
1183 if (!have_governor_per_policy())
1184 common_tunables = NULL;
1185 return rc;
1186 }
1187
Viresh Kumar17d15c42013-05-16 14:58:54 +05301188 if (!policy->governor->initialized) {
1189 idle_notifier_register(&cpufreq_interactive_idle_nb);
1190 cpufreq_register_notifier(&cpufreq_notifier_block,
1191 CPUFREQ_TRANSITION_NOTIFIER);
1192 }
1193
Viresh Kumar17d15c42013-05-16 14:58:54 +05301194 break;
1195
1196 case CPUFREQ_GOV_POLICY_EXIT:
1197 if (!--tunables->usage_count) {
1198 if (policy->governor->initialized == 1) {
1199 cpufreq_unregister_notifier(&cpufreq_notifier_block,
1200 CPUFREQ_TRANSITION_NOTIFIER);
1201 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
1202 }
1203
1204 sysfs_remove_group(get_governor_parent_kobj(policy),
1205 get_sysfs_attr());
Greg Hackmann6bc30c32014-12-08 10:08:35 -08001206
1207 if (!have_governor_per_policy())
1208 cpufreq_put_global_kobject();
1209
Viresh Kumar17d15c42013-05-16 14:58:54 +05301210 kfree(tunables);
1211 common_tunables = NULL;
1212 }
1213
1214 policy->governor_data = NULL;
1215 break;
1216
Mike Chanef969692010-06-22 11:26:45 -07001217 case CPUFREQ_GOV_START:
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001218 mutex_lock(&gov_lock);
1219
Viresh Kumar17d15c42013-05-16 14:58:54 +05301220 freq_table = cpufreq_frequency_get_table(policy->cpu);
1221 if (!tunables->hispeed_freq)
1222 tunables->hispeed_freq = policy->max;
Mike Chanef969692010-06-22 11:26:45 -07001223
1224 for_each_cpu(j, policy->cpus) {
1225 pcpu = &per_cpu(cpuinfo, j);
1226 pcpu->policy = policy;
1227 pcpu->target_freq = policy->cur;
1228 pcpu->freq_table = freq_table;
Todd Poynor6d15fa32012-04-26 21:41:40 -07001229 pcpu->floor_freq = pcpu->target_freq;
1230 pcpu->floor_validate_time =
Todd Poynor31817c92012-12-07 20:08:45 -08001231 ktime_to_us(ktime_get());
Todd Poynor1a0389a2012-05-10 23:28:06 -07001232 pcpu->hispeed_validate_time =
Todd Poynor31817c92012-12-07 20:08:45 -08001233 pcpu->floor_validate_time;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001234 pcpu->max_freq = policy->max;
Todd Poynor39512062012-12-20 15:51:00 -08001235 down_write(&pcpu->enable_sem);
Shridhar Rasal2907f842013-09-09 19:17:14 +05301236 del_timer_sync(&pcpu->cpu_timer);
1237 del_timer_sync(&pcpu->cpu_slack_timer);
Viresh Kumar17d15c42013-05-16 14:58:54 +05301238 cpufreq_interactive_timer_start(tunables, j);
Todd Poynor39512062012-12-20 15:51:00 -08001239 pcpu->governor_enabled = 1;
1240 up_write(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -07001241 }
1242
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001243 mutex_unlock(&gov_lock);
Mike Chanef969692010-06-22 11:26:45 -07001244 break;
1245
1246 case CPUFREQ_GOV_STOP:
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001247 mutex_lock(&gov_lock);
Mike Chanef969692010-06-22 11:26:45 -07001248 for_each_cpu(j, policy->cpus) {
1249 pcpu = &per_cpu(cpuinfo, j);
Todd Poynor5cad6092012-12-18 17:50:44 -08001250 down_write(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -07001251 pcpu->governor_enabled = 0;
Mike Chanef969692010-06-22 11:26:45 -07001252 del_timer_sync(&pcpu->cpu_timer);
Todd Poynor4add2592012-12-18 17:50:10 -08001253 del_timer_sync(&pcpu->cpu_slack_timer);
Todd Poynor5cad6092012-12-18 17:50:44 -08001254 up_write(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -07001255 }
1256
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001257 mutex_unlock(&gov_lock);
Mike Chanef969692010-06-22 11:26:45 -07001258 break;
1259
1260 case CPUFREQ_GOV_LIMITS:
1261 if (policy->max < policy->cur)
1262 __cpufreq_driver_target(policy,
1263 policy->max, CPUFREQ_RELATION_H);
1264 else if (policy->min > policy->cur)
1265 __cpufreq_driver_target(policy,
1266 policy->min, CPUFREQ_RELATION_L);
Lianwei Wang90c6c152013-04-26 13:30:51 +08001267 for_each_cpu(j, policy->cpus) {
1268 pcpu = &per_cpu(cpuinfo, j);
1269
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001270 down_read(&pcpu->enable_sem);
Lianwei Wang90c6c152013-04-26 13:30:51 +08001271 if (pcpu->governor_enabled == 0) {
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001272 up_read(&pcpu->enable_sem);
Lianwei Wang90c6c152013-04-26 13:30:51 +08001273 continue;
1274 }
1275
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001276 spin_lock_irqsave(&pcpu->target_freq_lock, flags);
Lianwei Wang90c6c152013-04-26 13:30:51 +08001277 if (policy->max < pcpu->target_freq)
1278 pcpu->target_freq = policy->max;
1279 else if (policy->min > pcpu->target_freq)
1280 pcpu->target_freq = policy->min;
1281
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001282 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
1283 up_read(&pcpu->enable_sem);
1284
1285 /* Reschedule timer only if policy->max is raised.
Lianwei Wang90c6c152013-04-26 13:30:51 +08001286 * Delete the timers, else the timer callback may
1287 * return without re-arm the timer when failed
1288 * acquire the semaphore. This race may cause timer
1289 * stopped unexpectedly.
1290 */
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001291
1292 if (policy->max > pcpu->max_freq) {
1293 down_write(&pcpu->enable_sem);
1294 del_timer_sync(&pcpu->cpu_timer);
1295 del_timer_sync(&pcpu->cpu_slack_timer);
1296 cpufreq_interactive_timer_start(tunables, j);
1297 up_write(&pcpu->enable_sem);
1298 }
1299
1300 pcpu->max_freq = policy->max;
Lianwei Wang90c6c152013-04-26 13:30:51 +08001301 }
Mike Chanef969692010-06-22 11:26:45 -07001302 break;
1303 }
1304 return 0;
1305}
1306
Viresh Kumarc7f826b2013-05-16 14:58:53 +05301307#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1308static
1309#endif
1310struct cpufreq_governor cpufreq_gov_interactive = {
1311 .name = "interactive",
1312 .governor = cpufreq_governor_interactive,
1313 .max_transition_latency = 10000000,
1314 .owner = THIS_MODULE,
1315};
1316
Todd Poynor4add2592012-12-18 17:50:10 -08001317static void cpufreq_interactive_nop_timer(unsigned long data)
1318{
1319}
1320
Mike Chanef969692010-06-22 11:26:45 -07001321static int __init cpufreq_interactive_init(void)
1322{
1323 unsigned int i;
1324 struct cpufreq_interactive_cpuinfo *pcpu;
1325 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1326
Mike Chanef969692010-06-22 11:26:45 -07001327 /* Initalize per-cpu timers */
1328 for_each_possible_cpu(i) {
1329 pcpu = &per_cpu(cpuinfo, i);
Todd Poynor4add2592012-12-18 17:50:10 -08001330 init_timer_deferrable(&pcpu->cpu_timer);
Mike Chanef969692010-06-22 11:26:45 -07001331 pcpu->cpu_timer.function = cpufreq_interactive_timer;
1332 pcpu->cpu_timer.data = i;
Todd Poynor4add2592012-12-18 17:50:10 -08001333 init_timer(&pcpu->cpu_slack_timer);
1334 pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
Todd Poynor0e58da22012-12-11 16:05:03 -08001335 spin_lock_init(&pcpu->load_lock);
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001336 spin_lock_init(&pcpu->target_freq_lock);
Todd Poynor5cad6092012-12-18 17:50:44 -08001337 init_rwsem(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -07001338 }
1339
Todd Poynor0f1920b2012-07-16 17:07:15 -07001340 spin_lock_init(&speedchange_cpumask_lock);
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001341 mutex_init(&gov_lock);
Todd Poynor0f1920b2012-07-16 17:07:15 -07001342 speedchange_task =
1343 kthread_create(cpufreq_interactive_speedchange_task, NULL,
1344 "cfinteractive");
1345 if (IS_ERR(speedchange_task))
1346 return PTR_ERR(speedchange_task);
Sam Leffler5c9b8272012-06-27 12:55:56 -07001347
Todd Poynor0f1920b2012-07-16 17:07:15 -07001348 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
1349 get_task_struct(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -07001350
Sam Leffler5c9b8272012-06-27 12:55:56 -07001351 /* NB: wake up so the thread does not look hung to the freezer */
Todd Poynor0f1920b2012-07-16 17:07:15 -07001352 wake_up_process(speedchange_task);
Sam Leffler5c9b8272012-06-27 12:55:56 -07001353
Mike Chanef969692010-06-22 11:26:45 -07001354 return cpufreq_register_governor(&cpufreq_gov_interactive);
Mike Chanef969692010-06-22 11:26:45 -07001355}
1356
1357#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1358fs_initcall(cpufreq_interactive_init);
1359#else
1360module_init(cpufreq_interactive_init);
1361#endif
1362
1363static void __exit cpufreq_interactive_exit(void)
1364{
1365 cpufreq_unregister_governor(&cpufreq_gov_interactive);
Todd Poynor0f1920b2012-07-16 17:07:15 -07001366 kthread_stop(speedchange_task);
1367 put_task_struct(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -07001368}
1369
1370module_exit(cpufreq_interactive_exit);
1371
1372MODULE_AUTHOR("Mike Chan <mike@android.com>");
1373MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1374 "Latency sensitive workloads");
1375MODULE_LICENSE("GPL");