blob: f2be0d87d73dc01984815a17fe3c66af94261346 [file] [log] [blame]
Mike Chanef969692010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/module.h>
Lianwei Wangd72db422012-11-01 09:59:52 +080023#include <linux/moduleparam.h>
Todd Poynor5cad6092012-12-18 17:50:44 -080024#include <linux/rwsem.h>
Mike Chanef969692010-06-22 11:26:45 -070025#include <linux/sched.h>
26#include <linux/sched/rt.h>
27#include <linux/tick.h>
28#include <linux/time.h>
29#include <linux/timer.h>
30#include <linux/workqueue.h>
31#include <linux/kthread.h>
Todd Poynorab8dc402012-04-02 17:17:14 -070032#include <linux/slab.h>
Mike Chanef969692010-06-22 11:26:45 -070033
Todd Poynorae010472012-02-16 16:27:59 -080034#define CREATE_TRACE_POINTS
35#include <trace/events/cpufreq_interactive.h>
36
Mike Chanef969692010-06-22 11:26:45 -070037struct cpufreq_interactive_cpuinfo {
38 struct timer_list cpu_timer;
Todd Poynor4add2592012-12-18 17:50:10 -080039 struct timer_list cpu_slack_timer;
Todd Poynor0e58da22012-12-11 16:05:03 -080040 spinlock_t load_lock; /* protects the next 4 fields */
Mike Chanef969692010-06-22 11:26:45 -070041 u64 time_in_idle;
Todd Poynor8eccd412012-10-08 20:14:34 -070042 u64 time_in_idle_timestamp;
Todd Poynor0e58da22012-12-11 16:05:03 -080043 u64 cputime_speedadj;
44 u64 cputime_speedadj_timestamp;
Mike Chanef969692010-06-22 11:26:45 -070045 struct cpufreq_policy *policy;
46 struct cpufreq_frequency_table *freq_table;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -070047 spinlock_t target_freq_lock; /*protects target freq */
Mike Chanef969692010-06-22 11:26:45 -070048 unsigned int target_freq;
Todd Poynor6d15fa32012-04-26 21:41:40 -070049 unsigned int floor_freq;
Junjie Wud5ac8ee2015-03-24 15:51:10 -070050 u64 pol_floor_val_time; /* policy floor_validate_time */
51 u64 loc_floor_val_time; /* per-cpu floor_validate_time */
Saravana Kannanfbae2f22014-10-15 12:44:18 -070052 u64 pol_hispeed_val_time; /* policy hispeed_validate_time */
53 u64 loc_hispeed_val_time; /* per-cpu hispeed_validate_time */
Todd Poynor5cad6092012-12-18 17:50:44 -080054 struct rw_semaphore enable_sem;
Mike Chanef969692010-06-22 11:26:45 -070055 int governor_enabled;
56};
57
58static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
59
Todd Poynor0f1920b2012-07-16 17:07:15 -070060/* realtime thread handles frequency scaling */
61static struct task_struct *speedchange_task;
62static cpumask_t speedchange_cpumask;
63static spinlock_t speedchange_cpumask_lock;
Lianwei Wang1d4f9a72013-01-07 14:15:51 +080064static struct mutex gov_lock;
Mike Chanef969692010-06-22 11:26:45 -070065
Todd Poynor8d2d93f2012-11-28 17:58:17 -080066/* Target load. Lower values result in higher CPU speeds. */
67#define DEFAULT_TARGET_LOAD 90
Todd Poynore9c60742012-11-14 11:41:21 -080068static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
Todd Poynor8d2d93f2012-11-28 17:58:17 -080069
Todd Poynora380aa82012-04-17 17:39:34 -070070#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
Todd Poynorcbbe17d2012-04-13 20:18:02 -070071#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
Minsung Kim9c1f83a2013-02-25 23:48:04 +090072static unsigned int default_above_hispeed_delay[] = {
73 DEFAULT_ABOVE_HISPEED_DELAY };
Todd Poynorcbbe17d2012-04-13 20:18:02 -070074
Viresh Kumar17d15c42013-05-16 14:58:54 +053075struct cpufreq_interactive_tunables {
76 int usage_count;
77 /* Hi speed to bump to from lo speed when load burst (default max) */
78 unsigned int hispeed_freq;
79 /* Go to hi speed when CPU load at or above this value. */
80#define DEFAULT_GO_HISPEED_LOAD 99
81 unsigned long go_hispeed_load;
82 /* Target load. Lower values result in higher CPU speeds. */
83 spinlock_t target_loads_lock;
84 unsigned int *target_loads;
85 int ntarget_loads;
86 /*
87 * The minimum amount of time to spend at a frequency before we can ramp
88 * down.
89 */
90#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
91 unsigned long min_sample_time;
92 /*
93 * The sample rate of the timer used to increase frequency
94 */
95 unsigned long timer_rate;
96 /*
97 * Wait this long before raising speed above hispeed, by default a
98 * single timer interval.
99 */
100 spinlock_t above_hispeed_delay_lock;
101 unsigned int *above_hispeed_delay;
102 int nabove_hispeed_delay;
103 /* Non-zero means indefinite speed boost active */
104 int boost_val;
105 /* Duration of a boot pulse in usecs */
106 int boostpulse_duration_val;
107 /* End time of boost pulse in ktime converted to usecs */
108 u64 boostpulse_endtime;
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800109 bool boosted;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530110 /*
111 * Max additional time to wait in idle, beyond timer_rate, at speeds
112 * above minimum before wakeup to reduce speed, or -1 if unnecessary.
113 */
Todd Poynor4add2592012-12-18 17:50:10 -0800114#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
Viresh Kumar17d15c42013-05-16 14:58:54 +0530115 int timer_slack_val;
116 bool io_is_busy;
117};
Lianwei Wangd72db422012-11-01 09:59:52 +0800118
Viresh Kumar17d15c42013-05-16 14:58:54 +0530119/* For cases where we have single governor instance for system */
Cylen Yaoc0b6ed62014-09-05 18:27:38 -0700120static struct cpufreq_interactive_tunables *common_tunables;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530121
122static struct attribute_group *get_sysfs_attr(void);
Lianwei Wang72e40572013-02-22 11:39:18 +0800123
Todd Poynor8eccd412012-10-08 20:14:34 -0700124static void cpufreq_interactive_timer_resched(
125 struct cpufreq_interactive_cpuinfo *pcpu)
126{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530127 struct cpufreq_interactive_tunables *tunables =
128 pcpu->policy->governor_data;
Todd Poynor4e25bf92013-04-05 13:25:21 -0700129 unsigned long expires;
Todd Poynordf673d12013-01-02 13:14:00 -0800130 unsigned long flags;
Todd Poynor4add2592012-12-18 17:50:10 -0800131
Todd Poynordf673d12013-01-02 13:14:00 -0800132 spin_lock_irqsave(&pcpu->load_lock, flags);
Todd Poynor8eccd412012-10-08 20:14:34 -0700133 pcpu->time_in_idle =
Lianwei Wang72e40572013-02-22 11:39:18 +0800134 get_cpu_idle_time(smp_processor_id(),
Viresh Kumar17d15c42013-05-16 14:58:54 +0530135 &pcpu->time_in_idle_timestamp,
136 tunables->io_is_busy);
Todd Poynor0e58da22012-12-11 16:05:03 -0800137 pcpu->cputime_speedadj = 0;
138 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530139 expires = jiffies + usecs_to_jiffies(tunables->timer_rate);
Todd Poynor4e25bf92013-04-05 13:25:21 -0700140 mod_timer_pinned(&pcpu->cpu_timer, expires);
141
Viresh Kumar17d15c42013-05-16 14:58:54 +0530142 if (tunables->timer_slack_val >= 0 &&
143 pcpu->target_freq > pcpu->policy->min) {
144 expires += usecs_to_jiffies(tunables->timer_slack_val);
Todd Poynor4e25bf92013-04-05 13:25:21 -0700145 mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
146 }
147
Todd Poynordf673d12013-01-02 13:14:00 -0800148 spin_unlock_irqrestore(&pcpu->load_lock, flags);
Todd Poynor8eccd412012-10-08 20:14:34 -0700149}
150
Lianwei Wang90c6c152013-04-26 13:30:51 +0800151/* The caller shall take enable_sem write semaphore to avoid any timer race.
152 * The cpu_timer and cpu_slack_timer must be deactivated when calling this
153 * function.
154 */
Viresh Kumar17d15c42013-05-16 14:58:54 +0530155static void cpufreq_interactive_timer_start(
156 struct cpufreq_interactive_tunables *tunables, int cpu)
Lianwei Wang90c6c152013-04-26 13:30:51 +0800157{
158 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530159 unsigned long expires = jiffies +
160 usecs_to_jiffies(tunables->timer_rate);
Lianwei Wang90c6c152013-04-26 13:30:51 +0800161 unsigned long flags;
162
163 pcpu->cpu_timer.expires = expires;
164 add_timer_on(&pcpu->cpu_timer, cpu);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530165 if (tunables->timer_slack_val >= 0 &&
166 pcpu->target_freq > pcpu->policy->min) {
167 expires += usecs_to_jiffies(tunables->timer_slack_val);
Lianwei Wang90c6c152013-04-26 13:30:51 +0800168 pcpu->cpu_slack_timer.expires = expires;
169 add_timer_on(&pcpu->cpu_slack_timer, cpu);
170 }
171
172 spin_lock_irqsave(&pcpu->load_lock, flags);
173 pcpu->time_in_idle =
Viresh Kumar17d15c42013-05-16 14:58:54 +0530174 get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp,
175 tunables->io_is_busy);
Lianwei Wang90c6c152013-04-26 13:30:51 +0800176 pcpu->cputime_speedadj = 0;
177 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
178 spin_unlock_irqrestore(&pcpu->load_lock, flags);
179}
180
Viresh Kumar17d15c42013-05-16 14:58:54 +0530181static unsigned int freq_to_above_hispeed_delay(
182 struct cpufreq_interactive_tunables *tunables,
183 unsigned int freq)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900184{
185 int i;
186 unsigned int ret;
187 unsigned long flags;
188
Viresh Kumar17d15c42013-05-16 14:58:54 +0530189 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900190
Viresh Kumar17d15c42013-05-16 14:58:54 +0530191 for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
192 freq >= tunables->above_hispeed_delay[i+1]; i += 2)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900193 ;
194
Viresh Kumar17d15c42013-05-16 14:58:54 +0530195 ret = tunables->above_hispeed_delay[i];
196 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900197 return ret;
198}
199
Viresh Kumar17d15c42013-05-16 14:58:54 +0530200static unsigned int freq_to_targetload(
201 struct cpufreq_interactive_tunables *tunables, unsigned int freq)
Todd Poynore9c60742012-11-14 11:41:21 -0800202{
203 int i;
204 unsigned int ret;
Todd Poynordf673d12013-01-02 13:14:00 -0800205 unsigned long flags;
Todd Poynore9c60742012-11-14 11:41:21 -0800206
Viresh Kumar17d15c42013-05-16 14:58:54 +0530207 spin_lock_irqsave(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800208
Viresh Kumar17d15c42013-05-16 14:58:54 +0530209 for (i = 0; i < tunables->ntarget_loads - 1 &&
210 freq >= tunables->target_loads[i+1]; i += 2)
Todd Poynore9c60742012-11-14 11:41:21 -0800211 ;
212
Viresh Kumar17d15c42013-05-16 14:58:54 +0530213 ret = tunables->target_loads[i];
214 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800215 return ret;
216}
217
218/*
219 * If increasing frequencies never map to a lower target load then
220 * choose_freq() will find the minimum frequency that does not exceed its
221 * target load given the current load.
222 */
Viresh Kumar17d15c42013-05-16 14:58:54 +0530223static unsigned int choose_freq(struct cpufreq_interactive_cpuinfo *pcpu,
224 unsigned int loadadjfreq)
Todd Poynore9c60742012-11-14 11:41:21 -0800225{
226 unsigned int freq = pcpu->policy->cur;
Todd Poynore9c60742012-11-14 11:41:21 -0800227 unsigned int prevfreq, freqmin, freqmax;
228 unsigned int tl;
229 int index;
230
231 freqmin = 0;
232 freqmax = UINT_MAX;
233
234 do {
235 prevfreq = freq;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530236 tl = freq_to_targetload(pcpu->policy->governor_data, freq);
Todd Poynore9c60742012-11-14 11:41:21 -0800237
238 /*
239 * Find the lowest frequency where the computed load is less
240 * than or equal to the target load.
241 */
242
Todd Poynor8cdabdc2013-04-22 16:44:58 -0700243 if (cpufreq_frequency_table_target(
244 pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
245 CPUFREQ_RELATION_L, &index))
246 break;
Todd Poynore9c60742012-11-14 11:41:21 -0800247 freq = pcpu->freq_table[index].frequency;
248
249 if (freq > prevfreq) {
250 /* The previous frequency is too low. */
251 freqmin = prevfreq;
252
253 if (freq >= freqmax) {
254 /*
255 * Find the highest frequency that is less
256 * than freqmax.
257 */
Todd Poynor8cdabdc2013-04-22 16:44:58 -0700258 if (cpufreq_frequency_table_target(
259 pcpu->policy, pcpu->freq_table,
260 freqmax - 1, CPUFREQ_RELATION_H,
261 &index))
262 break;
Todd Poynore9c60742012-11-14 11:41:21 -0800263 freq = pcpu->freq_table[index].frequency;
264
265 if (freq == freqmin) {
266 /*
267 * The first frequency below freqmax
268 * has already been found to be too
269 * low. freqmax is the lowest speed
270 * we found that is fast enough.
271 */
272 freq = freqmax;
273 break;
274 }
275 }
276 } else if (freq < prevfreq) {
277 /* The previous frequency is high enough. */
278 freqmax = prevfreq;
279
280 if (freq <= freqmin) {
281 /*
282 * Find the lowest frequency that is higher
283 * than freqmin.
284 */
Todd Poynor8cdabdc2013-04-22 16:44:58 -0700285 if (cpufreq_frequency_table_target(
286 pcpu->policy, pcpu->freq_table,
287 freqmin + 1, CPUFREQ_RELATION_L,
288 &index))
289 break;
Todd Poynore9c60742012-11-14 11:41:21 -0800290 freq = pcpu->freq_table[index].frequency;
291
292 /*
293 * If freqmax is the first frequency above
294 * freqmin then we have already found that
295 * this speed is fast enough.
296 */
297 if (freq == freqmax)
298 break;
299 }
300 }
301
302 /* If same frequency chosen as previous then done. */
303 } while (freq != prevfreq);
304
305 return freq;
306}
307
Todd Poynor0e58da22012-12-11 16:05:03 -0800308static u64 update_load(int cpu)
309{
310 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530311 struct cpufreq_interactive_tunables *tunables =
312 pcpu->policy->governor_data;
Todd Poynor0e58da22012-12-11 16:05:03 -0800313 u64 now;
314 u64 now_idle;
315 unsigned int delta_idle;
316 unsigned int delta_time;
317 u64 active_time;
318
Viresh Kumar17d15c42013-05-16 14:58:54 +0530319 now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
Todd Poynor0e58da22012-12-11 16:05:03 -0800320 delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
321 delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
Minsung Kim98b3b562013-04-23 22:32:01 +0900322
323 if (delta_time <= delta_idle)
324 active_time = 0;
325 else
326 active_time = delta_time - delta_idle;
327
Todd Poynor0e58da22012-12-11 16:05:03 -0800328 pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
329
330 pcpu->time_in_idle = now_idle;
331 pcpu->time_in_idle_timestamp = now;
332 return now;
333}
334
Mike Chanef969692010-06-22 11:26:45 -0700335static void cpufreq_interactive_timer(unsigned long data)
336{
Todd Poynore7afb7e2012-11-05 13:09:03 -0800337 u64 now;
Mike Chanef969692010-06-22 11:26:45 -0700338 unsigned int delta_time;
Todd Poynor0e58da22012-12-11 16:05:03 -0800339 u64 cputime_speedadj;
Mike Chanef969692010-06-22 11:26:45 -0700340 int cpu_load;
Mike Chanef969692010-06-22 11:26:45 -0700341 struct cpufreq_interactive_cpuinfo *pcpu =
342 &per_cpu(cpuinfo, data);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530343 struct cpufreq_interactive_tunables *tunables =
344 pcpu->policy->governor_data;
Mike Chanef969692010-06-22 11:26:45 -0700345 unsigned int new_freq;
Todd Poynor0e58da22012-12-11 16:05:03 -0800346 unsigned int loadadjfreq;
Mike Chanef969692010-06-22 11:26:45 -0700347 unsigned int index;
348 unsigned long flags;
Junjie Wud5ac8ee2015-03-24 15:51:10 -0700349 u64 max_fvtime;
Mike Chanef969692010-06-22 11:26:45 -0700350
Todd Poynor5cad6092012-12-18 17:50:44 -0800351 if (!down_read_trylock(&pcpu->enable_sem))
352 return;
Mike Chanef969692010-06-22 11:26:45 -0700353 if (!pcpu->governor_enabled)
354 goto exit;
355
Todd Poynordf673d12013-01-02 13:14:00 -0800356 spin_lock_irqsave(&pcpu->load_lock, flags);
Todd Poynor0e58da22012-12-11 16:05:03 -0800357 now = update_load(data);
358 delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
359 cputime_speedadj = pcpu->cputime_speedadj;
Todd Poynordf673d12013-01-02 13:14:00 -0800360 spin_unlock_irqrestore(&pcpu->load_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700361
Todd Poynor0e58da22012-12-11 16:05:03 -0800362 if (WARN_ON_ONCE(!delta_time))
Mike Chanef969692010-06-22 11:26:45 -0700363 goto rearm;
364
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700365 spin_lock_irqsave(&pcpu->target_freq_lock, flags);
Todd Poynor0e58da22012-12-11 16:05:03 -0800366 do_div(cputime_speedadj, delta_time);
367 loadadjfreq = (unsigned int)cputime_speedadj * 100;
368 cpu_load = loadadjfreq / pcpu->target_freq;
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800369 tunables->boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
Mike Chanef969692010-06-22 11:26:45 -0700370
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800371 if (cpu_load >= tunables->go_hispeed_load || tunables->boosted) {
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700372 if (pcpu->policy->cur < tunables->hispeed_freq) {
Viresh Kumar17d15c42013-05-16 14:58:54 +0530373 new_freq = tunables->hispeed_freq;
Todd Poynor2b660492012-12-19 16:06:48 -0800374 } else {
375 new_freq = choose_freq(pcpu, loadadjfreq);
376
Viresh Kumar17d15c42013-05-16 14:58:54 +0530377 if (new_freq < tunables->hispeed_freq)
378 new_freq = tunables->hispeed_freq;
Todd Poynor2b660492012-12-19 16:06:48 -0800379 }
380 } else {
Todd Poynor0e58da22012-12-11 16:05:03 -0800381 new_freq = choose_freq(pcpu, loadadjfreq);
Ruchi Kandoi9df0ca92014-06-13 16:24:15 -0700382 if (new_freq > tunables->hispeed_freq &&
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700383 pcpu->policy->cur < tunables->hispeed_freq)
Ruchi Kandoi9df0ca92014-06-13 16:24:15 -0700384 new_freq = tunables->hispeed_freq;
Todd Poynor2b660492012-12-19 16:06:48 -0800385 }
Todd Poynor131ff022012-11-08 15:06:55 -0800386
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700387 if (pcpu->policy->cur >= tunables->hispeed_freq &&
388 new_freq > pcpu->policy->cur &&
389 now - pcpu->pol_hispeed_val_time <
390 freq_to_above_hispeed_delay(tunables, pcpu->policy->cur)) {
Todd Poynor131ff022012-11-08 15:06:55 -0800391 trace_cpufreq_interactive_notyet(
392 data, cpu_load, pcpu->target_freq,
393 pcpu->policy->cur, new_freq);
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700394 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
Todd Poynor131ff022012-11-08 15:06:55 -0800395 goto rearm;
Mike Chanef969692010-06-22 11:26:45 -0700396 }
397
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700398 pcpu->loc_hispeed_val_time = now;
Todd Poynor1a0389a2012-05-10 23:28:06 -0700399
Mike Chanef969692010-06-22 11:26:45 -0700400 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800401 new_freq, CPUFREQ_RELATION_L,
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700402 &index)) {
403 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700404 goto rearm;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700405 }
Mike Chanef969692010-06-22 11:26:45 -0700406
407 new_freq = pcpu->freq_table[index].frequency;
408
Mike Chanef969692010-06-22 11:26:45 -0700409 /*
Todd Poynor6d15fa32012-04-26 21:41:40 -0700410 * Do not scale below floor_freq unless we have been at or above the
411 * floor frequency for the minimum sample time since last validated.
Mike Chanef969692010-06-22 11:26:45 -0700412 */
Junjie Wud5ac8ee2015-03-24 15:51:10 -0700413 max_fvtime = max(pcpu->pol_floor_val_time, pcpu->loc_floor_val_time);
414 if (new_freq < pcpu->floor_freq &&
415 pcpu->target_freq >= pcpu->policy->cur) {
416 if (now - max_fvtime < tunables->min_sample_time) {
Todd Poynore60cc1b2012-11-28 17:56:09 -0800417 trace_cpufreq_interactive_notyet(
418 data, cpu_load, pcpu->target_freq,
419 pcpu->policy->cur, new_freq);
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700420 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700421 goto rearm;
Todd Poynorae010472012-02-16 16:27:59 -0800422 }
Mike Chanef969692010-06-22 11:26:45 -0700423 }
424
Todd Poynore16d5922012-12-14 17:31:19 -0800425 /*
426 * Update the timestamp for checking whether speed has been held at
427 * or above the selected frequency for a minimum of min_sample_time,
428 * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
429 * allow the speed to drop as soon as the boostpulse duration expires
430 * (or the indefinite boost is turned off).
431 */
432
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800433 if (!tunables->boosted || new_freq > tunables->hispeed_freq) {
Todd Poynore16d5922012-12-14 17:31:19 -0800434 pcpu->floor_freq = new_freq;
Junjie Wud5ac8ee2015-03-24 15:51:10 -0700435 if (pcpu->target_freq >= pcpu->policy->cur ||
436 new_freq >= pcpu->policy->cur)
437 pcpu->loc_floor_val_time = now;
Todd Poynore16d5922012-12-14 17:31:19 -0800438 }
Todd Poynor1f408dc2012-04-06 19:59:36 -0700439
Minsung Kim9f2841b2014-11-29 21:43:53 +0900440 if (pcpu->target_freq == new_freq &&
441 pcpu->target_freq <= pcpu->policy->cur) {
Todd Poynore60cc1b2012-11-28 17:56:09 -0800442 trace_cpufreq_interactive_already(
443 data, cpu_load, pcpu->target_freq,
444 pcpu->policy->cur, new_freq);
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700445 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
Rohit Gupta189c2222015-03-06 18:46:04 -0800446 goto rearm;
Todd Poynor1f408dc2012-04-06 19:59:36 -0700447 }
448
Todd Poynorae010472012-02-16 16:27:59 -0800449 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
Todd Poynore60cc1b2012-11-28 17:56:09 -0800450 pcpu->policy->cur, new_freq);
Todd Poynorae010472012-02-16 16:27:59 -0800451
Todd Poynor0f1920b2012-07-16 17:07:15 -0700452 pcpu->target_freq = new_freq;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700453 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700454 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
455 cpumask_set_cpu(data, &speedchange_cpumask);
456 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
457 wake_up_process(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -0700458
Mike Chanef969692010-06-22 11:26:45 -0700459rearm:
Todd Poynor4add2592012-12-18 17:50:10 -0800460 if (!timer_pending(&pcpu->cpu_timer))
Todd Poynor8eccd412012-10-08 20:14:34 -0700461 cpufreq_interactive_timer_resched(pcpu);
Mike Chanef969692010-06-22 11:26:45 -0700462
463exit:
Todd Poynor5cad6092012-12-18 17:50:44 -0800464 up_read(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700465 return;
466}
467
Mike Chanef969692010-06-22 11:26:45 -0700468static void cpufreq_interactive_idle_end(void)
469{
470 struct cpufreq_interactive_cpuinfo *pcpu =
471 &per_cpu(cpuinfo, smp_processor_id());
472
Todd Poynor5cad6092012-12-18 17:50:44 -0800473 if (!down_read_trylock(&pcpu->enable_sem))
Sam Leffler3ab7c2b2012-06-27 10:12:04 -0700474 return;
Todd Poynor5cad6092012-12-18 17:50:44 -0800475 if (!pcpu->governor_enabled) {
476 up_read(&pcpu->enable_sem);
477 return;
478 }
Sam Leffler3ab7c2b2012-06-27 10:12:04 -0700479
Todd Poynore7afb7e2012-11-05 13:09:03 -0800480 /* Arm the timer for 1-2 ticks later if not already. */
481 if (!timer_pending(&pcpu->cpu_timer)) {
Todd Poynor8eccd412012-10-08 20:14:34 -0700482 cpufreq_interactive_timer_resched(pcpu);
Todd Poynor4add2592012-12-18 17:50:10 -0800483 } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
Todd Poynor8eccd412012-10-08 20:14:34 -0700484 del_timer(&pcpu->cpu_timer);
Todd Poynor4add2592012-12-18 17:50:10 -0800485 del_timer(&pcpu->cpu_slack_timer);
Todd Poynor8eccd412012-10-08 20:14:34 -0700486 cpufreq_interactive_timer(smp_processor_id());
Mike Chanef969692010-06-22 11:26:45 -0700487 }
Todd Poynor5cad6092012-12-18 17:50:44 -0800488
489 up_read(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700490}
491
Todd Poynor0f1920b2012-07-16 17:07:15 -0700492static int cpufreq_interactive_speedchange_task(void *data)
Mike Chanef969692010-06-22 11:26:45 -0700493{
494 unsigned int cpu;
495 cpumask_t tmp_mask;
496 unsigned long flags;
497 struct cpufreq_interactive_cpuinfo *pcpu;
498
499 while (1) {
500 set_current_state(TASK_INTERRUPTIBLE);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700501 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700502
Todd Poynor0f1920b2012-07-16 17:07:15 -0700503 if (cpumask_empty(&speedchange_cpumask)) {
504 spin_unlock_irqrestore(&speedchange_cpumask_lock,
505 flags);
Mike Chanef969692010-06-22 11:26:45 -0700506 schedule();
507
508 if (kthread_should_stop())
509 break;
510
Todd Poynor0f1920b2012-07-16 17:07:15 -0700511 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700512 }
513
514 set_current_state(TASK_RUNNING);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700515 tmp_mask = speedchange_cpumask;
516 cpumask_clear(&speedchange_cpumask);
517 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700518
519 for_each_cpu(cpu, &tmp_mask) {
520 unsigned int j;
521 unsigned int max_freq = 0;
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700522 struct cpufreq_interactive_cpuinfo *pjcpu;
Junjie Wud5ac8ee2015-03-24 15:51:10 -0700523 u64 hvt = ~0ULL, fvt = 0;
Mike Chanef969692010-06-22 11:26:45 -0700524
525 pcpu = &per_cpu(cpuinfo, cpu);
Todd Poynor5cad6092012-12-18 17:50:44 -0800526 if (!down_read_trylock(&pcpu->enable_sem))
Mike Chanef969692010-06-22 11:26:45 -0700527 continue;
Todd Poynor5cad6092012-12-18 17:50:44 -0800528 if (!pcpu->governor_enabled) {
529 up_read(&pcpu->enable_sem);
530 continue;
531 }
Mike Chanef969692010-06-22 11:26:45 -0700532
Mike Chanef969692010-06-22 11:26:45 -0700533 for_each_cpu(j, pcpu->policy->cpus) {
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700534 pjcpu = &per_cpu(cpuinfo, j);
Mike Chanef969692010-06-22 11:26:45 -0700535
Junjie Wud5ac8ee2015-03-24 15:51:10 -0700536 fvt = max(fvt, pjcpu->loc_floor_val_time);
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700537 if (pjcpu->target_freq > max_freq) {
Mike Chanef969692010-06-22 11:26:45 -0700538 max_freq = pjcpu->target_freq;
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700539 hvt = pjcpu->loc_hispeed_val_time;
540 } else if (pjcpu->target_freq == max_freq) {
541 hvt = min(hvt, pjcpu->loc_hispeed_val_time);
542 }
Mike Chanef969692010-06-22 11:26:45 -0700543 }
Junjie Wud5ac8ee2015-03-24 15:51:10 -0700544 for_each_cpu(j, pcpu->policy->cpus) {
545 pjcpu = &per_cpu(cpuinfo, j);
546 pjcpu->pol_floor_val_time = fvt;
547 }
Mike Chanef969692010-06-22 11:26:45 -0700548
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700549 if (max_freq != pcpu->policy->cur) {
Mike Chanef969692010-06-22 11:26:45 -0700550 __cpufreq_driver_target(pcpu->policy,
551 max_freq,
552 CPUFREQ_RELATION_H);
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700553 for_each_cpu(j, pcpu->policy->cpus) {
554 pjcpu = &per_cpu(cpuinfo, j);
555 pjcpu->pol_hispeed_val_time = hvt;
556 }
557 }
Todd Poynor0f1920b2012-07-16 17:07:15 -0700558 trace_cpufreq_interactive_setspeed(cpu,
559 pcpu->target_freq,
Todd Poynorae010472012-02-16 16:27:59 -0800560 pcpu->policy->cur);
Todd Poynor5cad6092012-12-18 17:50:44 -0800561
562 up_read(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700563 }
564 }
565
566 return 0;
567}
568
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800569static void cpufreq_interactive_boost(struct cpufreq_interactive_tunables *tunables)
Todd Poynorab8dc402012-04-02 17:17:14 -0700570{
571 int i;
572 int anyboost = 0;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700573 unsigned long flags[2];
Todd Poynorab8dc402012-04-02 17:17:14 -0700574 struct cpufreq_interactive_cpuinfo *pcpu;
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800575
576 tunables->boosted = true;
Todd Poynorab8dc402012-04-02 17:17:14 -0700577
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700578 spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
Todd Poynorab8dc402012-04-02 17:17:14 -0700579
580 for_each_online_cpu(i) {
581 pcpu = &per_cpu(cpuinfo, i);
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800582 if (tunables != pcpu->policy->governor_data)
583 continue;
Todd Poynorab8dc402012-04-02 17:17:14 -0700584
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700585 spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530586 if (pcpu->target_freq < tunables->hispeed_freq) {
587 pcpu->target_freq = tunables->hispeed_freq;
Todd Poynor0f1920b2012-07-16 17:07:15 -0700588 cpumask_set_cpu(i, &speedchange_cpumask);
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700589 pcpu->pol_hispeed_val_time =
Todd Poynor31817c92012-12-07 20:08:45 -0800590 ktime_to_us(ktime_get());
Todd Poynorab8dc402012-04-02 17:17:14 -0700591 anyboost = 1;
592 }
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700593 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]);
Todd Poynorab8dc402012-04-02 17:17:14 -0700594 }
595
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700596 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
Todd Poynorab8dc402012-04-02 17:17:14 -0700597
598 if (anyboost)
Todd Poynor0f1920b2012-07-16 17:07:15 -0700599 wake_up_process(speedchange_task);
Todd Poynorab8dc402012-04-02 17:17:14 -0700600}
601
Todd Poynor0e58da22012-12-11 16:05:03 -0800602static int cpufreq_interactive_notifier(
603 struct notifier_block *nb, unsigned long val, void *data)
604{
605 struct cpufreq_freqs *freq = data;
606 struct cpufreq_interactive_cpuinfo *pcpu;
607 int cpu;
Todd Poynordf673d12013-01-02 13:14:00 -0800608 unsigned long flags;
Todd Poynor0e58da22012-12-11 16:05:03 -0800609
610 if (val == CPUFREQ_POSTCHANGE) {
611 pcpu = &per_cpu(cpuinfo, freq->cpu);
Todd Poynor34974c32012-12-23 12:28:49 -0800612 if (!down_read_trylock(&pcpu->enable_sem))
613 return 0;
614 if (!pcpu->governor_enabled) {
615 up_read(&pcpu->enable_sem);
616 return 0;
617 }
Todd Poynor0e58da22012-12-11 16:05:03 -0800618
619 for_each_cpu(cpu, pcpu->policy->cpus) {
620 struct cpufreq_interactive_cpuinfo *pjcpu =
621 &per_cpu(cpuinfo, cpu);
Lianwei Wangc79705d2013-05-16 12:07:23 +0800622 if (cpu != freq->cpu) {
623 if (!down_read_trylock(&pjcpu->enable_sem))
624 continue;
625 if (!pjcpu->governor_enabled) {
626 up_read(&pjcpu->enable_sem);
627 continue;
628 }
629 }
Todd Poynordf673d12013-01-02 13:14:00 -0800630 spin_lock_irqsave(&pjcpu->load_lock, flags);
Todd Poynor0e58da22012-12-11 16:05:03 -0800631 update_load(cpu);
Todd Poynordf673d12013-01-02 13:14:00 -0800632 spin_unlock_irqrestore(&pjcpu->load_lock, flags);
Lianwei Wangc79705d2013-05-16 12:07:23 +0800633 if (cpu != freq->cpu)
634 up_read(&pjcpu->enable_sem);
Todd Poynor0e58da22012-12-11 16:05:03 -0800635 }
Todd Poynor0e58da22012-12-11 16:05:03 -0800636
Todd Poynor34974c32012-12-23 12:28:49 -0800637 up_read(&pcpu->enable_sem);
638 }
Todd Poynor0e58da22012-12-11 16:05:03 -0800639 return 0;
640}
641
642static struct notifier_block cpufreq_notifier_block = {
643 .notifier_call = cpufreq_interactive_notifier,
644};
645
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900646static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
647{
648 const char *cp;
649 int i;
650 int ntokens = 1;
651 unsigned int *tokenized_data;
Todd Poynor233dfa02013-03-20 15:40:46 -0700652 int err = -EINVAL;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900653
654 cp = buf;
655 while ((cp = strpbrk(cp + 1, " :")))
656 ntokens++;
657
Todd Poynor233dfa02013-03-20 15:40:46 -0700658 if (!(ntokens & 0x1))
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900659 goto err;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900660
661 tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
662 if (!tokenized_data) {
Todd Poynor233dfa02013-03-20 15:40:46 -0700663 err = -ENOMEM;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900664 goto err;
665 }
666
667 cp = buf;
668 i = 0;
669 while (i < ntokens) {
Todd Poynor233dfa02013-03-20 15:40:46 -0700670 if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900671 goto err_kfree;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900672
673 cp = strpbrk(cp, " :");
674 if (!cp)
675 break;
676 cp++;
677 }
678
Todd Poynor233dfa02013-03-20 15:40:46 -0700679 if (i != ntokens)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900680 goto err_kfree;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900681
682 *num_tokens = ntokens;
683 return tokenized_data;
684
685err_kfree:
686 kfree(tokenized_data);
687err:
Todd Poynor233dfa02013-03-20 15:40:46 -0700688 return ERR_PTR(err);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900689}
690
Todd Poynore9c60742012-11-14 11:41:21 -0800691static ssize_t show_target_loads(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530692 struct cpufreq_interactive_tunables *tunables,
693 char *buf)
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800694{
Todd Poynore9c60742012-11-14 11:41:21 -0800695 int i;
696 ssize_t ret = 0;
Todd Poynordf673d12013-01-02 13:14:00 -0800697 unsigned long flags;
Todd Poynore9c60742012-11-14 11:41:21 -0800698
Viresh Kumar17d15c42013-05-16 14:58:54 +0530699 spin_lock_irqsave(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800700
Viresh Kumar17d15c42013-05-16 14:58:54 +0530701 for (i = 0; i < tunables->ntarget_loads; i++)
702 ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
Todd Poynore9c60742012-11-14 11:41:21 -0800703 i & 0x1 ? ":" : " ");
704
Chih-Wei Huang8d9e5302013-12-24 17:51:55 +0800705 sprintf(buf + ret - 1, "\n");
Viresh Kumar17d15c42013-05-16 14:58:54 +0530706 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800707 return ret;
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800708}
709
Todd Poynore9c60742012-11-14 11:41:21 -0800710static ssize_t store_target_loads(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530711 struct cpufreq_interactive_tunables *tunables,
712 const char *buf, size_t count)
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800713{
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900714 int ntokens;
Todd Poynore9c60742012-11-14 11:41:21 -0800715 unsigned int *new_target_loads = NULL;
Todd Poynordf673d12013-01-02 13:14:00 -0800716 unsigned long flags;
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800717
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900718 new_target_loads = get_tokenized_data(buf, &ntokens);
719 if (IS_ERR(new_target_loads))
720 return PTR_RET(new_target_loads);
Todd Poynore9c60742012-11-14 11:41:21 -0800721
Viresh Kumar17d15c42013-05-16 14:58:54 +0530722 spin_lock_irqsave(&tunables->target_loads_lock, flags);
723 if (tunables->target_loads != default_target_loads)
724 kfree(tunables->target_loads);
725 tunables->target_loads = new_target_loads;
726 tunables->ntarget_loads = ntokens;
727 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800728 return count;
729}
730
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900731static ssize_t show_above_hispeed_delay(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530732 struct cpufreq_interactive_tunables *tunables, char *buf)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900733{
734 int i;
735 ssize_t ret = 0;
736 unsigned long flags;
737
Viresh Kumar17d15c42013-05-16 14:58:54 +0530738 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900739
Viresh Kumar17d15c42013-05-16 14:58:54 +0530740 for (i = 0; i < tunables->nabove_hispeed_delay; i++)
741 ret += sprintf(buf + ret, "%u%s",
742 tunables->above_hispeed_delay[i],
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900743 i & 0x1 ? ":" : " ");
744
Chih-Wei Huang8d9e5302013-12-24 17:51:55 +0800745 sprintf(buf + ret - 1, "\n");
Viresh Kumar17d15c42013-05-16 14:58:54 +0530746 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900747 return ret;
748}
749
750static ssize_t store_above_hispeed_delay(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530751 struct cpufreq_interactive_tunables *tunables,
752 const char *buf, size_t count)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900753{
754 int ntokens;
755 unsigned int *new_above_hispeed_delay = NULL;
756 unsigned long flags;
757
758 new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
759 if (IS_ERR(new_above_hispeed_delay))
760 return PTR_RET(new_above_hispeed_delay);
761
Viresh Kumar17d15c42013-05-16 14:58:54 +0530762 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
763 if (tunables->above_hispeed_delay != default_above_hispeed_delay)
764 kfree(tunables->above_hispeed_delay);
765 tunables->above_hispeed_delay = new_above_hispeed_delay;
766 tunables->nabove_hispeed_delay = ntokens;
767 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900768 return count;
769
770}
771
Viresh Kumar17d15c42013-05-16 14:58:54 +0530772static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
773 char *buf)
Mike Chanef969692010-06-22 11:26:45 -0700774{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530775 return sprintf(buf, "%u\n", tunables->hispeed_freq);
Mike Chanef969692010-06-22 11:26:45 -0700776}
777
Viresh Kumar17d15c42013-05-16 14:58:54 +0530778static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
779 const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -0700780{
781 int ret;
Todd Poynor3b7b5f82012-10-03 00:39:56 -0700782 long unsigned int val;
Mike Chanef969692010-06-22 11:26:45 -0700783
Amit Pundircf076402015-11-03 20:53:29 +0530784 ret = kstrtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -0700785 if (ret < 0)
786 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530787 tunables->hispeed_freq = val;
Mike Chanef969692010-06-22 11:26:45 -0700788 return count;
789}
790
Viresh Kumar17d15c42013-05-16 14:58:54 +0530791static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
792 *tunables, char *buf)
Mike Chanef969692010-06-22 11:26:45 -0700793{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530794 return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
Mike Chanef969692010-06-22 11:26:45 -0700795}
796
Viresh Kumar17d15c42013-05-16 14:58:54 +0530797static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
798 *tunables, const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -0700799{
800 int ret;
801 unsigned long val;
802
Amit Pundircf076402015-11-03 20:53:29 +0530803 ret = kstrtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -0700804 if (ret < 0)
805 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530806 tunables->go_hispeed_load = val;
Mike Chanef969692010-06-22 11:26:45 -0700807 return count;
808}
809
Viresh Kumar17d15c42013-05-16 14:58:54 +0530810static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
811 *tunables, char *buf)
Mike Chanef969692010-06-22 11:26:45 -0700812{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530813 return sprintf(buf, "%lu\n", tunables->min_sample_time);
Mike Chanef969692010-06-22 11:26:45 -0700814}
815
Viresh Kumar17d15c42013-05-16 14:58:54 +0530816static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
817 *tunables, const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -0700818{
819 int ret;
820 unsigned long val;
821
Amit Pundircf076402015-11-03 20:53:29 +0530822 ret = kstrtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -0700823 if (ret < 0)
824 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530825 tunables->min_sample_time = val;
Mike Chanef969692010-06-22 11:26:45 -0700826 return count;
827}
828
Viresh Kumar17d15c42013-05-16 14:58:54 +0530829static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
830 char *buf)
Mike Chanef969692010-06-22 11:26:45 -0700831{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530832 return sprintf(buf, "%lu\n", tunables->timer_rate);
Mike Chanef969692010-06-22 11:26:45 -0700833}
834
Viresh Kumar17d15c42013-05-16 14:58:54 +0530835static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
836 const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -0700837{
838 int ret;
Junjie Wu847796e2014-08-15 16:34:37 -0700839 unsigned long val, val_round;
Mike Chanef969692010-06-22 11:26:45 -0700840
Amit Pundircf076402015-11-03 20:53:29 +0530841 ret = kstrtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -0700842 if (ret < 0)
843 return ret;
Junjie Wu847796e2014-08-15 16:34:37 -0700844
845 val_round = jiffies_to_usecs(usecs_to_jiffies(val));
846 if (val != val_round)
847 pr_warn("timer_rate not aligned to jiffy. Rounded up to %lu\n",
848 val_round);
849
850 tunables->timer_rate = val_round;
Mike Chanef969692010-06-22 11:26:45 -0700851 return count;
852}
853
Viresh Kumar17d15c42013-05-16 14:58:54 +0530854static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
855 char *buf)
Todd Poynor4add2592012-12-18 17:50:10 -0800856{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530857 return sprintf(buf, "%d\n", tunables->timer_slack_val);
Todd Poynor4add2592012-12-18 17:50:10 -0800858}
859
Viresh Kumar17d15c42013-05-16 14:58:54 +0530860static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
861 const char *buf, size_t count)
Todd Poynor4add2592012-12-18 17:50:10 -0800862{
863 int ret;
864 unsigned long val;
865
866 ret = kstrtol(buf, 10, &val);
867 if (ret < 0)
868 return ret;
869
Viresh Kumar17d15c42013-05-16 14:58:54 +0530870 tunables->timer_slack_val = val;
Todd Poynor4add2592012-12-18 17:50:10 -0800871 return count;
872}
873
Viresh Kumar17d15c42013-05-16 14:58:54 +0530874static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
Todd Poynor15a9ea02012-04-23 20:42:41 -0700875 char *buf)
876{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530877 return sprintf(buf, "%d\n", tunables->boost_val);
Todd Poynor15a9ea02012-04-23 20:42:41 -0700878}
879
Viresh Kumar17d15c42013-05-16 14:58:54 +0530880static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
Todd Poynor15a9ea02012-04-23 20:42:41 -0700881 const char *buf, size_t count)
882{
883 int ret;
884 unsigned long val;
885
886 ret = kstrtoul(buf, 0, &val);
887 if (ret < 0)
888 return ret;
889
Viresh Kumar17d15c42013-05-16 14:58:54 +0530890 tunables->boost_val = val;
Todd Poynor15a9ea02012-04-23 20:42:41 -0700891
Viresh Kumar17d15c42013-05-16 14:58:54 +0530892 if (tunables->boost_val) {
Todd Poynor442a3122012-05-03 00:16:55 -0700893 trace_cpufreq_interactive_boost("on");
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800894 if (!tunables->boosted)
895 cpufreq_interactive_boost(tunables);
Todd Poynor442a3122012-05-03 00:16:55 -0700896 } else {
Ruchi Kandoi296d7912014-04-09 16:47:59 -0700897 tunables->boostpulse_endtime = ktime_to_us(ktime_get());
Todd Poynor442a3122012-05-03 00:16:55 -0700898 trace_cpufreq_interactive_unboost("off");
899 }
Todd Poynor15a9ea02012-04-23 20:42:41 -0700900
901 return count;
902}
903
Viresh Kumar17d15c42013-05-16 14:58:54 +0530904static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
Todd Poynor442a3122012-05-03 00:16:55 -0700905 const char *buf, size_t count)
906{
907 int ret;
908 unsigned long val;
909
910 ret = kstrtoul(buf, 0, &val);
911 if (ret < 0)
912 return ret;
913
Viresh Kumar17d15c42013-05-16 14:58:54 +0530914 tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
915 tunables->boostpulse_duration_val;
Todd Poynor442a3122012-05-03 00:16:55 -0700916 trace_cpufreq_interactive_boost("pulse");
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800917 if (!tunables->boosted)
918 cpufreq_interactive_boost(tunables);
Todd Poynor442a3122012-05-03 00:16:55 -0700919 return count;
920}
921
Viresh Kumar17d15c42013-05-16 14:58:54 +0530922static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
923 *tunables, char *buf)
Todd Poynore16d5922012-12-14 17:31:19 -0800924{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530925 return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
Todd Poynore16d5922012-12-14 17:31:19 -0800926}
927
Viresh Kumar17d15c42013-05-16 14:58:54 +0530928static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
929 *tunables, const char *buf, size_t count)
Todd Poynore16d5922012-12-14 17:31:19 -0800930{
931 int ret;
932 unsigned long val;
933
934 ret = kstrtoul(buf, 0, &val);
935 if (ret < 0)
936 return ret;
937
Viresh Kumar17d15c42013-05-16 14:58:54 +0530938 tunables->boostpulse_duration_val = val;
Todd Poynore16d5922012-12-14 17:31:19 -0800939 return count;
940}
941
Viresh Kumar17d15c42013-05-16 14:58:54 +0530942static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
943 char *buf)
Lianwei Wang72e40572013-02-22 11:39:18 +0800944{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530945 return sprintf(buf, "%u\n", tunables->io_is_busy);
Lianwei Wang72e40572013-02-22 11:39:18 +0800946}
947
Viresh Kumar17d15c42013-05-16 14:58:54 +0530948static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
949 const char *buf, size_t count)
Lianwei Wang72e40572013-02-22 11:39:18 +0800950{
951 int ret;
952 unsigned long val;
953
954 ret = kstrtoul(buf, 0, &val);
955 if (ret < 0)
956 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530957 tunables->io_is_busy = val;
Lianwei Wang72e40572013-02-22 11:39:18 +0800958 return count;
959}
960
Viresh Kumar17d15c42013-05-16 14:58:54 +0530961/*
962 * Create show/store routines
963 * - sys: One governor instance for complete SYSTEM
964 * - pol: One governor instance per struct cpufreq_policy
965 */
966#define show_gov_pol_sys(file_name) \
967static ssize_t show_##file_name##_gov_sys \
968(struct kobject *kobj, struct attribute *attr, char *buf) \
969{ \
970 return show_##file_name(common_tunables, buf); \
971} \
972 \
973static ssize_t show_##file_name##_gov_pol \
974(struct cpufreq_policy *policy, char *buf) \
975{ \
976 return show_##file_name(policy->governor_data, buf); \
977}
Lianwei Wang72e40572013-02-22 11:39:18 +0800978
Viresh Kumar17d15c42013-05-16 14:58:54 +0530979#define store_gov_pol_sys(file_name) \
980static ssize_t store_##file_name##_gov_sys \
981(struct kobject *kobj, struct attribute *attr, const char *buf, \
982 size_t count) \
983{ \
984 return store_##file_name(common_tunables, buf, count); \
985} \
986 \
987static ssize_t store_##file_name##_gov_pol \
988(struct cpufreq_policy *policy, const char *buf, size_t count) \
989{ \
990 return store_##file_name(policy->governor_data, buf, count); \
991}
992
993#define show_store_gov_pol_sys(file_name) \
994show_gov_pol_sys(file_name); \
995store_gov_pol_sys(file_name)
996
997show_store_gov_pol_sys(target_loads);
998show_store_gov_pol_sys(above_hispeed_delay);
999show_store_gov_pol_sys(hispeed_freq);
1000show_store_gov_pol_sys(go_hispeed_load);
1001show_store_gov_pol_sys(min_sample_time);
1002show_store_gov_pol_sys(timer_rate);
1003show_store_gov_pol_sys(timer_slack);
1004show_store_gov_pol_sys(boost);
1005store_gov_pol_sys(boostpulse);
1006show_store_gov_pol_sys(boostpulse_duration);
1007show_store_gov_pol_sys(io_is_busy);
1008
1009#define gov_sys_attr_rw(_name) \
1010static struct global_attr _name##_gov_sys = \
1011__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
1012
1013#define gov_pol_attr_rw(_name) \
1014static struct freq_attr _name##_gov_pol = \
1015__ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
1016
1017#define gov_sys_pol_attr_rw(_name) \
1018 gov_sys_attr_rw(_name); \
1019 gov_pol_attr_rw(_name)
1020
1021gov_sys_pol_attr_rw(target_loads);
1022gov_sys_pol_attr_rw(above_hispeed_delay);
1023gov_sys_pol_attr_rw(hispeed_freq);
1024gov_sys_pol_attr_rw(go_hispeed_load);
1025gov_sys_pol_attr_rw(min_sample_time);
1026gov_sys_pol_attr_rw(timer_rate);
1027gov_sys_pol_attr_rw(timer_slack);
1028gov_sys_pol_attr_rw(boost);
1029gov_sys_pol_attr_rw(boostpulse_duration);
1030gov_sys_pol_attr_rw(io_is_busy);
1031
1032static struct global_attr boostpulse_gov_sys =
1033 __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
1034
1035static struct freq_attr boostpulse_gov_pol =
1036 __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
1037
1038/* One Governor instance for entire system */
1039static struct attribute *interactive_attributes_gov_sys[] = {
1040 &target_loads_gov_sys.attr,
1041 &above_hispeed_delay_gov_sys.attr,
1042 &hispeed_freq_gov_sys.attr,
1043 &go_hispeed_load_gov_sys.attr,
1044 &min_sample_time_gov_sys.attr,
1045 &timer_rate_gov_sys.attr,
1046 &timer_slack_gov_sys.attr,
1047 &boost_gov_sys.attr,
1048 &boostpulse_gov_sys.attr,
1049 &boostpulse_duration_gov_sys.attr,
1050 &io_is_busy_gov_sys.attr,
Mike Chanef969692010-06-22 11:26:45 -07001051 NULL,
1052};
1053
Viresh Kumar17d15c42013-05-16 14:58:54 +05301054static struct attribute_group interactive_attr_group_gov_sys = {
1055 .attrs = interactive_attributes_gov_sys,
Mike Chanef969692010-06-22 11:26:45 -07001056 .name = "interactive",
1057};
1058
Viresh Kumar17d15c42013-05-16 14:58:54 +05301059/* Per policy governor instance */
1060static struct attribute *interactive_attributes_gov_pol[] = {
1061 &target_loads_gov_pol.attr,
1062 &above_hispeed_delay_gov_pol.attr,
1063 &hispeed_freq_gov_pol.attr,
1064 &go_hispeed_load_gov_pol.attr,
1065 &min_sample_time_gov_pol.attr,
1066 &timer_rate_gov_pol.attr,
1067 &timer_slack_gov_pol.attr,
1068 &boost_gov_pol.attr,
1069 &boostpulse_gov_pol.attr,
1070 &boostpulse_duration_gov_pol.attr,
1071 &io_is_busy_gov_pol.attr,
1072 NULL,
1073};
1074
1075static struct attribute_group interactive_attr_group_gov_pol = {
1076 .attrs = interactive_attributes_gov_pol,
1077 .name = "interactive",
1078};
1079
1080static struct attribute_group *get_sysfs_attr(void)
1081{
1082 if (have_governor_per_policy())
1083 return &interactive_attr_group_gov_pol;
1084 else
1085 return &interactive_attr_group_gov_sys;
1086}
1087
Sam Leffler3ab7c2b2012-06-27 10:12:04 -07001088static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
1089 unsigned long val,
1090 void *data)
1091{
Rohit Gupta189c2222015-03-06 18:46:04 -08001092 if (val == IDLE_END)
Sam Leffler3ab7c2b2012-06-27 10:12:04 -07001093 cpufreq_interactive_idle_end();
Sam Leffler3ab7c2b2012-06-27 10:12:04 -07001094
1095 return 0;
1096}
1097
1098static struct notifier_block cpufreq_interactive_idle_nb = {
1099 .notifier_call = cpufreq_interactive_idle_notifier,
1100};
1101
Mike Chanef969692010-06-22 11:26:45 -07001102static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
1103 unsigned int event)
1104{
1105 int rc;
1106 unsigned int j;
1107 struct cpufreq_interactive_cpuinfo *pcpu;
1108 struct cpufreq_frequency_table *freq_table;
Viresh Kumar17d15c42013-05-16 14:58:54 +05301109 struct cpufreq_interactive_tunables *tunables;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001110 unsigned long flags;
Viresh Kumar17d15c42013-05-16 14:58:54 +05301111
1112 if (have_governor_per_policy())
1113 tunables = policy->governor_data;
1114 else
1115 tunables = common_tunables;
1116
1117 WARN_ON(!tunables && (event != CPUFREQ_GOV_POLICY_INIT));
Mike Chanef969692010-06-22 11:26:45 -07001118
1119 switch (event) {
Viresh Kumar17d15c42013-05-16 14:58:54 +05301120 case CPUFREQ_GOV_POLICY_INIT:
1121 if (have_governor_per_policy()) {
1122 WARN_ON(tunables);
1123 } else if (tunables) {
1124 tunables->usage_count++;
1125 policy->governor_data = tunables;
1126 return 0;
1127 }
1128
1129 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
1130 if (!tunables) {
1131 pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
1132 return -ENOMEM;
1133 }
1134
Viresh Kumar17d15c42013-05-16 14:58:54 +05301135 tunables->usage_count = 1;
1136 tunables->above_hispeed_delay = default_above_hispeed_delay;
1137 tunables->nabove_hispeed_delay =
1138 ARRAY_SIZE(default_above_hispeed_delay);
1139 tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
1140 tunables->target_loads = default_target_loads;
1141 tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
1142 tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
1143 tunables->timer_rate = DEFAULT_TIMER_RATE;
1144 tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
1145 tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
1146
1147 spin_lock_init(&tunables->target_loads_lock);
1148 spin_lock_init(&tunables->above_hispeed_delay_lock);
1149
Minsung Kim82cc6a92014-01-19 14:32:42 +09001150 policy->governor_data = tunables;
Greg Hackmann6bc30c32014-12-08 10:08:35 -08001151 if (!have_governor_per_policy()) {
Minsung Kim82cc6a92014-01-19 14:32:42 +09001152 common_tunables = tunables;
Greg Hackmann6bc30c32014-12-08 10:08:35 -08001153 WARN_ON(cpufreq_get_global_kobject());
1154 }
Minsung Kim82cc6a92014-01-19 14:32:42 +09001155
1156 rc = sysfs_create_group(get_governor_parent_kobj(policy),
1157 get_sysfs_attr());
1158 if (rc) {
1159 kfree(tunables);
1160 policy->governor_data = NULL;
Junjie Wu40ede972015-02-06 20:28:37 -08001161 if (!have_governor_per_policy()) {
Minsung Kim82cc6a92014-01-19 14:32:42 +09001162 common_tunables = NULL;
Junjie Wu40ede972015-02-06 20:28:37 -08001163 cpufreq_put_global_kobject();
1164 }
Minsung Kim82cc6a92014-01-19 14:32:42 +09001165 return rc;
1166 }
1167
Viresh Kumar17d15c42013-05-16 14:58:54 +05301168 if (!policy->governor->initialized) {
1169 idle_notifier_register(&cpufreq_interactive_idle_nb);
1170 cpufreq_register_notifier(&cpufreq_notifier_block,
1171 CPUFREQ_TRANSITION_NOTIFIER);
1172 }
1173
Viresh Kumar17d15c42013-05-16 14:58:54 +05301174 break;
1175
1176 case CPUFREQ_GOV_POLICY_EXIT:
1177 if (!--tunables->usage_count) {
1178 if (policy->governor->initialized == 1) {
1179 cpufreq_unregister_notifier(&cpufreq_notifier_block,
1180 CPUFREQ_TRANSITION_NOTIFIER);
1181 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
1182 }
1183
1184 sysfs_remove_group(get_governor_parent_kobj(policy),
1185 get_sysfs_attr());
Greg Hackmann6bc30c32014-12-08 10:08:35 -08001186
1187 if (!have_governor_per_policy())
1188 cpufreq_put_global_kobject();
1189
Viresh Kumar17d15c42013-05-16 14:58:54 +05301190 kfree(tunables);
1191 common_tunables = NULL;
1192 }
1193
1194 policy->governor_data = NULL;
1195 break;
1196
Mike Chanef969692010-06-22 11:26:45 -07001197 case CPUFREQ_GOV_START:
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001198 mutex_lock(&gov_lock);
1199
Viresh Kumar17d15c42013-05-16 14:58:54 +05301200 freq_table = cpufreq_frequency_get_table(policy->cpu);
1201 if (!tunables->hispeed_freq)
1202 tunables->hispeed_freq = policy->max;
Mike Chanef969692010-06-22 11:26:45 -07001203
1204 for_each_cpu(j, policy->cpus) {
1205 pcpu = &per_cpu(cpuinfo, j);
1206 pcpu->policy = policy;
1207 pcpu->target_freq = policy->cur;
1208 pcpu->freq_table = freq_table;
Todd Poynor6d15fa32012-04-26 21:41:40 -07001209 pcpu->floor_freq = pcpu->target_freq;
Junjie Wud5ac8ee2015-03-24 15:51:10 -07001210 pcpu->pol_floor_val_time =
Todd Poynor31817c92012-12-07 20:08:45 -08001211 ktime_to_us(ktime_get());
Junjie Wud5ac8ee2015-03-24 15:51:10 -07001212 pcpu->loc_floor_val_time = pcpu->pol_floor_val_time;
1213 pcpu->pol_hispeed_val_time = pcpu->pol_floor_val_time;
1214 pcpu->loc_hispeed_val_time = pcpu->pol_floor_val_time;
Todd Poynor39512062012-12-20 15:51:00 -08001215 down_write(&pcpu->enable_sem);
Shridhar Rasal2907f842013-09-09 19:17:14 +05301216 del_timer_sync(&pcpu->cpu_timer);
1217 del_timer_sync(&pcpu->cpu_slack_timer);
Viresh Kumar17d15c42013-05-16 14:58:54 +05301218 cpufreq_interactive_timer_start(tunables, j);
Todd Poynor39512062012-12-20 15:51:00 -08001219 pcpu->governor_enabled = 1;
1220 up_write(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -07001221 }
1222
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001223 mutex_unlock(&gov_lock);
Mike Chanef969692010-06-22 11:26:45 -07001224 break;
1225
1226 case CPUFREQ_GOV_STOP:
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001227 mutex_lock(&gov_lock);
Mike Chanef969692010-06-22 11:26:45 -07001228 for_each_cpu(j, policy->cpus) {
1229 pcpu = &per_cpu(cpuinfo, j);
Todd Poynor5cad6092012-12-18 17:50:44 -08001230 down_write(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -07001231 pcpu->governor_enabled = 0;
Mike Chanef969692010-06-22 11:26:45 -07001232 del_timer_sync(&pcpu->cpu_timer);
Todd Poynor4add2592012-12-18 17:50:10 -08001233 del_timer_sync(&pcpu->cpu_slack_timer);
Todd Poynor5cad6092012-12-18 17:50:44 -08001234 up_write(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -07001235 }
1236
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001237 mutex_unlock(&gov_lock);
Mike Chanef969692010-06-22 11:26:45 -07001238 break;
1239
1240 case CPUFREQ_GOV_LIMITS:
1241 if (policy->max < policy->cur)
1242 __cpufreq_driver_target(policy,
1243 policy->max, CPUFREQ_RELATION_H);
1244 else if (policy->min > policy->cur)
1245 __cpufreq_driver_target(policy,
1246 policy->min, CPUFREQ_RELATION_L);
Lianwei Wang90c6c152013-04-26 13:30:51 +08001247 for_each_cpu(j, policy->cpus) {
1248 pcpu = &per_cpu(cpuinfo, j);
1249
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001250 down_read(&pcpu->enable_sem);
Lianwei Wang90c6c152013-04-26 13:30:51 +08001251 if (pcpu->governor_enabled == 0) {
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001252 up_read(&pcpu->enable_sem);
Lianwei Wang90c6c152013-04-26 13:30:51 +08001253 continue;
1254 }
1255
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001256 spin_lock_irqsave(&pcpu->target_freq_lock, flags);
Lianwei Wang90c6c152013-04-26 13:30:51 +08001257 if (policy->max < pcpu->target_freq)
1258 pcpu->target_freq = policy->max;
1259 else if (policy->min > pcpu->target_freq)
1260 pcpu->target_freq = policy->min;
1261
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001262 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
1263 up_read(&pcpu->enable_sem);
Lianwei Wang90c6c152013-04-26 13:30:51 +08001264 }
Mike Chanef969692010-06-22 11:26:45 -07001265 break;
1266 }
1267 return 0;
1268}
1269
Viresh Kumarc7f826b2013-05-16 14:58:53 +05301270#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1271static
1272#endif
1273struct cpufreq_governor cpufreq_gov_interactive = {
1274 .name = "interactive",
1275 .governor = cpufreq_governor_interactive,
1276 .max_transition_latency = 10000000,
1277 .owner = THIS_MODULE,
1278};
1279
Todd Poynor4add2592012-12-18 17:50:10 -08001280static void cpufreq_interactive_nop_timer(unsigned long data)
1281{
1282}
1283
Mike Chanef969692010-06-22 11:26:45 -07001284static int __init cpufreq_interactive_init(void)
1285{
1286 unsigned int i;
1287 struct cpufreq_interactive_cpuinfo *pcpu;
1288 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1289
Mike Chanef969692010-06-22 11:26:45 -07001290 /* Initalize per-cpu timers */
1291 for_each_possible_cpu(i) {
1292 pcpu = &per_cpu(cpuinfo, i);
Todd Poynor4add2592012-12-18 17:50:10 -08001293 init_timer_deferrable(&pcpu->cpu_timer);
Mike Chanef969692010-06-22 11:26:45 -07001294 pcpu->cpu_timer.function = cpufreq_interactive_timer;
1295 pcpu->cpu_timer.data = i;
Todd Poynor4add2592012-12-18 17:50:10 -08001296 init_timer(&pcpu->cpu_slack_timer);
1297 pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
Todd Poynor0e58da22012-12-11 16:05:03 -08001298 spin_lock_init(&pcpu->load_lock);
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001299 spin_lock_init(&pcpu->target_freq_lock);
Todd Poynor5cad6092012-12-18 17:50:44 -08001300 init_rwsem(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -07001301 }
1302
Todd Poynor0f1920b2012-07-16 17:07:15 -07001303 spin_lock_init(&speedchange_cpumask_lock);
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001304 mutex_init(&gov_lock);
Todd Poynor0f1920b2012-07-16 17:07:15 -07001305 speedchange_task =
1306 kthread_create(cpufreq_interactive_speedchange_task, NULL,
1307 "cfinteractive");
1308 if (IS_ERR(speedchange_task))
1309 return PTR_ERR(speedchange_task);
Sam Leffler5c9b8272012-06-27 12:55:56 -07001310
Todd Poynor0f1920b2012-07-16 17:07:15 -07001311 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
1312 get_task_struct(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -07001313
Sam Leffler5c9b8272012-06-27 12:55:56 -07001314 /* NB: wake up so the thread does not look hung to the freezer */
Todd Poynor0f1920b2012-07-16 17:07:15 -07001315 wake_up_process(speedchange_task);
Sam Leffler5c9b8272012-06-27 12:55:56 -07001316
Mike Chanef969692010-06-22 11:26:45 -07001317 return cpufreq_register_governor(&cpufreq_gov_interactive);
Mike Chanef969692010-06-22 11:26:45 -07001318}
1319
1320#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1321fs_initcall(cpufreq_interactive_init);
1322#else
1323module_init(cpufreq_interactive_init);
1324#endif
1325
1326static void __exit cpufreq_interactive_exit(void)
1327{
1328 cpufreq_unregister_governor(&cpufreq_gov_interactive);
Todd Poynor0f1920b2012-07-16 17:07:15 -07001329 kthread_stop(speedchange_task);
1330 put_task_struct(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -07001331}
1332
1333module_exit(cpufreq_interactive_exit);
1334
1335MODULE_AUTHOR("Mike Chan <mike@android.com>");
1336MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1337 "Latency sensitive workloads");
1338MODULE_LICENSE("GPL");