blob: f3266a3f405796003444ebecd844479c30a6bd8e [file] [log] [blame]
Mike Chan34a9e022010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/module.h>
Lianwei Wangff5a7092012-11-01 09:59:52 +080023#include <linux/moduleparam.h>
Todd Poynor42350532012-12-18 17:50:44 -080024#include <linux/rwsem.h>
Mike Chan34a9e022010-06-22 11:26:45 -070025#include <linux/sched.h>
26#include <linux/sched/rt.h>
27#include <linux/tick.h>
28#include <linux/time.h>
29#include <linux/timer.h>
30#include <linux/workqueue.h>
31#include <linux/kthread.h>
Todd Poynorcc506042012-04-02 17:17:14 -070032#include <linux/slab.h>
Mike Chan34a9e022010-06-22 11:26:45 -070033
Todd Poynor1c2fa8a2012-02-16 16:27:59 -080034#define CREATE_TRACE_POINTS
35#include <trace/events/cpufreq_interactive.h>
36
Mike Chan34a9e022010-06-22 11:26:45 -070037struct cpufreq_interactive_cpuinfo {
38 struct timer_list cpu_timer;
Todd Poynor47078402012-12-18 17:50:10 -080039 struct timer_list cpu_slack_timer;
Todd Poynor4ceca142012-12-11 16:05:03 -080040 spinlock_t load_lock; /* protects the next 4 fields */
Mike Chan34a9e022010-06-22 11:26:45 -070041 u64 time_in_idle;
Todd Poynor8a2739e2012-10-08 20:14:34 -070042 u64 time_in_idle_timestamp;
Todd Poynor4ceca142012-12-11 16:05:03 -080043 u64 cputime_speedadj;
44 u64 cputime_speedadj_timestamp;
Mike Chan34a9e022010-06-22 11:26:45 -070045 struct cpufreq_policy *policy;
46 struct cpufreq_frequency_table *freq_table;
Badhri Jagan Sridharand58bb952014-04-07 18:26:30 -070047 spinlock_t target_freq_lock; /*protects target freq */
Mike Chan34a9e022010-06-22 11:26:45 -070048 unsigned int target_freq;
Todd Poynor90fe90d2012-04-26 21:41:40 -070049 unsigned int floor_freq;
Junjie Wu302f05f2015-03-24 15:51:10 -070050 u64 pol_floor_val_time; /* policy floor_validate_time */
51 u64 loc_floor_val_time; /* per-cpu floor_validate_time */
Saravana Kannan76c17af2014-10-15 12:44:18 -070052 u64 pol_hispeed_val_time; /* policy hispeed_validate_time */
53 u64 loc_hispeed_val_time; /* per-cpu hispeed_validate_time */
Todd Poynor42350532012-12-18 17:50:44 -080054 struct rw_semaphore enable_sem;
Mike Chan34a9e022010-06-22 11:26:45 -070055 int governor_enabled;
56};
57
58static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
59
Todd Poynora21083d2012-07-16 17:07:15 -070060/* realtime thread handles frequency scaling */
61static struct task_struct *speedchange_task;
62static cpumask_t speedchange_cpumask;
63static spinlock_t speedchange_cpumask_lock;
Lianwei Wang6becfaa2013-01-07 14:15:51 +080064static struct mutex gov_lock;
Mike Chan34a9e022010-06-22 11:26:45 -070065
Todd Poynorff6cb662012-11-28 17:58:17 -080066/* Target load. Lower values result in higher CPU speeds. */
67#define DEFAULT_TARGET_LOAD 90
Todd Poynor4fbea962012-11-14 11:41:21 -080068static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
Todd Poynorff6cb662012-11-28 17:58:17 -080069
Todd Poynor82845bf2012-04-17 17:39:34 -070070#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
Todd Poynorfb421c42012-04-13 20:18:02 -070071#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
Minsung Kim700119b2013-02-25 23:48:04 +090072static unsigned int default_above_hispeed_delay[] = {
73 DEFAULT_ABOVE_HISPEED_DELAY };
Todd Poynorfb421c42012-04-13 20:18:02 -070074
Viresh Kumarb9c50752013-05-16 14:58:54 +053075struct cpufreq_interactive_tunables {
76 int usage_count;
77 /* Hi speed to bump to from lo speed when load burst (default max) */
78 unsigned int hispeed_freq;
79 /* Go to hi speed when CPU load at or above this value. */
80#define DEFAULT_GO_HISPEED_LOAD 99
81 unsigned long go_hispeed_load;
82 /* Target load. Lower values result in higher CPU speeds. */
83 spinlock_t target_loads_lock;
84 unsigned int *target_loads;
85 int ntarget_loads;
86 /*
87 * The minimum amount of time to spend at a frequency before we can ramp
88 * down.
89 */
90#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
91 unsigned long min_sample_time;
92 /*
93 * The sample rate of the timer used to increase frequency
94 */
95 unsigned long timer_rate;
96 /*
97 * Wait this long before raising speed above hispeed, by default a
98 * single timer interval.
99 */
100 spinlock_t above_hispeed_delay_lock;
101 unsigned int *above_hispeed_delay;
102 int nabove_hispeed_delay;
103 /* Non-zero means indefinite speed boost active */
104 int boost_val;
105 /* Duration of a boot pulse in usecs */
106 int boostpulse_duration_val;
107 /* End time of boost pulse in ktime converted to usecs */
108 u64 boostpulse_endtime;
Lianwei Wang4f81d192014-12-02 17:20:50 -0800109 bool boosted;
Viresh Kumarb9c50752013-05-16 14:58:54 +0530110 /*
111 * Max additional time to wait in idle, beyond timer_rate, at speeds
112 * above minimum before wakeup to reduce speed, or -1 if unnecessary.
113 */
Todd Poynor47078402012-12-18 17:50:10 -0800114#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
Viresh Kumarb9c50752013-05-16 14:58:54 +0530115 int timer_slack_val;
116 bool io_is_busy;
117};
Lianwei Wangff5a7092012-11-01 09:59:52 +0800118
Viresh Kumarb9c50752013-05-16 14:58:54 +0530119/* For cases where we have single governor instance for system */
Cylen Yao2f327962014-09-05 18:27:38 -0700120static struct cpufreq_interactive_tunables *common_tunables;
Viresh Kumarb9c50752013-05-16 14:58:54 +0530121
122static struct attribute_group *get_sysfs_attr(void);
Lianwei Wang27474802013-02-22 11:39:18 +0800123
Todd Poynor8a2739e2012-10-08 20:14:34 -0700124static void cpufreq_interactive_timer_resched(
125 struct cpufreq_interactive_cpuinfo *pcpu)
126{
Viresh Kumarb9c50752013-05-16 14:58:54 +0530127 struct cpufreq_interactive_tunables *tunables =
128 pcpu->policy->governor_data;
Todd Poynor4eebb542013-04-05 13:25:21 -0700129 unsigned long expires;
Todd Poynorbd2b55b2013-01-02 13:14:00 -0800130 unsigned long flags;
Todd Poynor47078402012-12-18 17:50:10 -0800131
Todd Poynorbd2b55b2013-01-02 13:14:00 -0800132 spin_lock_irqsave(&pcpu->load_lock, flags);
Todd Poynor8a2739e2012-10-08 20:14:34 -0700133 pcpu->time_in_idle =
Lianwei Wang27474802013-02-22 11:39:18 +0800134 get_cpu_idle_time(smp_processor_id(),
Viresh Kumarb9c50752013-05-16 14:58:54 +0530135 &pcpu->time_in_idle_timestamp,
136 tunables->io_is_busy);
Todd Poynor4ceca142012-12-11 16:05:03 -0800137 pcpu->cputime_speedadj = 0;
138 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
Viresh Kumarb9c50752013-05-16 14:58:54 +0530139 expires = jiffies + usecs_to_jiffies(tunables->timer_rate);
Amit Pundir5e3ce742016-08-12 17:12:36 +0530140 mod_timer(&pcpu->cpu_timer, expires);
Todd Poynor4eebb542013-04-05 13:25:21 -0700141
Viresh Kumarb9c50752013-05-16 14:58:54 +0530142 if (tunables->timer_slack_val >= 0 &&
143 pcpu->target_freq > pcpu->policy->min) {
144 expires += usecs_to_jiffies(tunables->timer_slack_val);
Amit Pundir5e3ce742016-08-12 17:12:36 +0530145 mod_timer(&pcpu->cpu_slack_timer, expires);
Todd Poynor4eebb542013-04-05 13:25:21 -0700146 }
147
Todd Poynorbd2b55b2013-01-02 13:14:00 -0800148 spin_unlock_irqrestore(&pcpu->load_lock, flags);
Todd Poynor8a2739e2012-10-08 20:14:34 -0700149}
150
Lianwei Wangbbe849d2013-04-26 13:30:51 +0800151/* The caller shall take enable_sem write semaphore to avoid any timer race.
152 * The cpu_timer and cpu_slack_timer must be deactivated when calling this
153 * function.
154 */
Viresh Kumarb9c50752013-05-16 14:58:54 +0530155static void cpufreq_interactive_timer_start(
156 struct cpufreq_interactive_tunables *tunables, int cpu)
Lianwei Wangbbe849d2013-04-26 13:30:51 +0800157{
158 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
Viresh Kumarb9c50752013-05-16 14:58:54 +0530159 unsigned long expires = jiffies +
160 usecs_to_jiffies(tunables->timer_rate);
Lianwei Wangbbe849d2013-04-26 13:30:51 +0800161 unsigned long flags;
162
163 pcpu->cpu_timer.expires = expires;
164 add_timer_on(&pcpu->cpu_timer, cpu);
Viresh Kumarb9c50752013-05-16 14:58:54 +0530165 if (tunables->timer_slack_val >= 0 &&
166 pcpu->target_freq > pcpu->policy->min) {
167 expires += usecs_to_jiffies(tunables->timer_slack_val);
Lianwei Wangbbe849d2013-04-26 13:30:51 +0800168 pcpu->cpu_slack_timer.expires = expires;
169 add_timer_on(&pcpu->cpu_slack_timer, cpu);
170 }
171
172 spin_lock_irqsave(&pcpu->load_lock, flags);
173 pcpu->time_in_idle =
Viresh Kumarb9c50752013-05-16 14:58:54 +0530174 get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp,
175 tunables->io_is_busy);
Lianwei Wangbbe849d2013-04-26 13:30:51 +0800176 pcpu->cputime_speedadj = 0;
177 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
178 spin_unlock_irqrestore(&pcpu->load_lock, flags);
179}
180
Viresh Kumarb9c50752013-05-16 14:58:54 +0530181static unsigned int freq_to_above_hispeed_delay(
182 struct cpufreq_interactive_tunables *tunables,
183 unsigned int freq)
Minsung Kim700119b2013-02-25 23:48:04 +0900184{
185 int i;
186 unsigned int ret;
187 unsigned long flags;
188
Viresh Kumarb9c50752013-05-16 14:58:54 +0530189 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim700119b2013-02-25 23:48:04 +0900190
Viresh Kumarb9c50752013-05-16 14:58:54 +0530191 for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
192 freq >= tunables->above_hispeed_delay[i+1]; i += 2)
Minsung Kim700119b2013-02-25 23:48:04 +0900193 ;
194
Viresh Kumarb9c50752013-05-16 14:58:54 +0530195 ret = tunables->above_hispeed_delay[i];
196 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim700119b2013-02-25 23:48:04 +0900197 return ret;
198}
199
Viresh Kumarb9c50752013-05-16 14:58:54 +0530200static unsigned int freq_to_targetload(
201 struct cpufreq_interactive_tunables *tunables, unsigned int freq)
Todd Poynor4fbea962012-11-14 11:41:21 -0800202{
203 int i;
204 unsigned int ret;
Todd Poynorbd2b55b2013-01-02 13:14:00 -0800205 unsigned long flags;
Todd Poynor4fbea962012-11-14 11:41:21 -0800206
Viresh Kumarb9c50752013-05-16 14:58:54 +0530207 spin_lock_irqsave(&tunables->target_loads_lock, flags);
Todd Poynor4fbea962012-11-14 11:41:21 -0800208
Viresh Kumarb9c50752013-05-16 14:58:54 +0530209 for (i = 0; i < tunables->ntarget_loads - 1 &&
210 freq >= tunables->target_loads[i+1]; i += 2)
Todd Poynor4fbea962012-11-14 11:41:21 -0800211 ;
212
Viresh Kumarb9c50752013-05-16 14:58:54 +0530213 ret = tunables->target_loads[i];
214 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
Todd Poynor4fbea962012-11-14 11:41:21 -0800215 return ret;
216}
217
218/*
219 * If increasing frequencies never map to a lower target load then
220 * choose_freq() will find the minimum frequency that does not exceed its
221 * target load given the current load.
222 */
Viresh Kumarb9c50752013-05-16 14:58:54 +0530223static unsigned int choose_freq(struct cpufreq_interactive_cpuinfo *pcpu,
224 unsigned int loadadjfreq)
Todd Poynor4fbea962012-11-14 11:41:21 -0800225{
226 unsigned int freq = pcpu->policy->cur;
Todd Poynor4fbea962012-11-14 11:41:21 -0800227 unsigned int prevfreq, freqmin, freqmax;
228 unsigned int tl;
229 int index;
230
231 freqmin = 0;
232 freqmax = UINT_MAX;
233
234 do {
235 prevfreq = freq;
Viresh Kumarb9c50752013-05-16 14:58:54 +0530236 tl = freq_to_targetload(pcpu->policy->governor_data, freq);
Todd Poynor4fbea962012-11-14 11:41:21 -0800237
238 /*
239 * Find the lowest frequency where the computed load is less
240 * than or equal to the target load.
241 */
242
Amit Pundir5e3ce742016-08-12 17:12:36 +0530243 index = cpufreq_frequency_table_target(
244 pcpu->policy, loadadjfreq / tl,
245 CPUFREQ_RELATION_L);
Todd Poynor4fbea962012-11-14 11:41:21 -0800246 freq = pcpu->freq_table[index].frequency;
247
248 if (freq > prevfreq) {
249 /* The previous frequency is too low. */
250 freqmin = prevfreq;
251
252 if (freq >= freqmax) {
253 /*
254 * Find the highest frequency that is less
255 * than freqmax.
256 */
Amit Pundir5e3ce742016-08-12 17:12:36 +0530257 index = cpufreq_frequency_table_target(
258 pcpu->policy,
259 freqmax - 1, CPUFREQ_RELATION_H);
Todd Poynor4fbea962012-11-14 11:41:21 -0800260 freq = pcpu->freq_table[index].frequency;
261
262 if (freq == freqmin) {
263 /*
264 * The first frequency below freqmax
265 * has already been found to be too
266 * low. freqmax is the lowest speed
267 * we found that is fast enough.
268 */
269 freq = freqmax;
270 break;
271 }
272 }
273 } else if (freq < prevfreq) {
274 /* The previous frequency is high enough. */
275 freqmax = prevfreq;
276
277 if (freq <= freqmin) {
278 /*
279 * Find the lowest frequency that is higher
280 * than freqmin.
281 */
Amit Pundir5e3ce742016-08-12 17:12:36 +0530282 index = cpufreq_frequency_table_target(
283 pcpu->policy,
284 freqmin + 1, CPUFREQ_RELATION_L);
Todd Poynor4fbea962012-11-14 11:41:21 -0800285 freq = pcpu->freq_table[index].frequency;
286
287 /*
288 * If freqmax is the first frequency above
289 * freqmin then we have already found that
290 * this speed is fast enough.
291 */
292 if (freq == freqmax)
293 break;
294 }
295 }
296
297 /* If same frequency chosen as previous then done. */
298 } while (freq != prevfreq);
299
300 return freq;
301}
302
Todd Poynor4ceca142012-12-11 16:05:03 -0800303static u64 update_load(int cpu)
304{
305 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
Viresh Kumarb9c50752013-05-16 14:58:54 +0530306 struct cpufreq_interactive_tunables *tunables =
307 pcpu->policy->governor_data;
Todd Poynor4ceca142012-12-11 16:05:03 -0800308 u64 now;
309 u64 now_idle;
310 unsigned int delta_idle;
311 unsigned int delta_time;
312 u64 active_time;
313
Viresh Kumarb9c50752013-05-16 14:58:54 +0530314 now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
Todd Poynor4ceca142012-12-11 16:05:03 -0800315 delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
316 delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
Minsung Kimdd7886b2013-04-23 22:32:01 +0900317
318 if (delta_time <= delta_idle)
319 active_time = 0;
320 else
321 active_time = delta_time - delta_idle;
322
Todd Poynor4ceca142012-12-11 16:05:03 -0800323 pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
324
325 pcpu->time_in_idle = now_idle;
326 pcpu->time_in_idle_timestamp = now;
327 return now;
328}
329
Mike Chan34a9e022010-06-22 11:26:45 -0700330static void cpufreq_interactive_timer(unsigned long data)
331{
Todd Poynorb510d292012-11-05 13:09:03 -0800332 u64 now;
Mike Chan34a9e022010-06-22 11:26:45 -0700333 unsigned int delta_time;
Todd Poynor4ceca142012-12-11 16:05:03 -0800334 u64 cputime_speedadj;
Mike Chan34a9e022010-06-22 11:26:45 -0700335 int cpu_load;
Mike Chan34a9e022010-06-22 11:26:45 -0700336 struct cpufreq_interactive_cpuinfo *pcpu =
337 &per_cpu(cpuinfo, data);
Viresh Kumarb9c50752013-05-16 14:58:54 +0530338 struct cpufreq_interactive_tunables *tunables =
339 pcpu->policy->governor_data;
Mike Chan34a9e022010-06-22 11:26:45 -0700340 unsigned int new_freq;
Todd Poynor4ceca142012-12-11 16:05:03 -0800341 unsigned int loadadjfreq;
Mike Chan34a9e022010-06-22 11:26:45 -0700342 unsigned int index;
343 unsigned long flags;
Junjie Wu302f05f2015-03-24 15:51:10 -0700344 u64 max_fvtime;
Mike Chan34a9e022010-06-22 11:26:45 -0700345
Todd Poynor42350532012-12-18 17:50:44 -0800346 if (!down_read_trylock(&pcpu->enable_sem))
347 return;
Mike Chan34a9e022010-06-22 11:26:45 -0700348 if (!pcpu->governor_enabled)
349 goto exit;
350
Todd Poynorbd2b55b2013-01-02 13:14:00 -0800351 spin_lock_irqsave(&pcpu->load_lock, flags);
Todd Poynor4ceca142012-12-11 16:05:03 -0800352 now = update_load(data);
353 delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
354 cputime_speedadj = pcpu->cputime_speedadj;
Todd Poynorbd2b55b2013-01-02 13:14:00 -0800355 spin_unlock_irqrestore(&pcpu->load_lock, flags);
Mike Chan34a9e022010-06-22 11:26:45 -0700356
Todd Poynor4ceca142012-12-11 16:05:03 -0800357 if (WARN_ON_ONCE(!delta_time))
Mike Chan34a9e022010-06-22 11:26:45 -0700358 goto rearm;
359
Badhri Jagan Sridharand58bb952014-04-07 18:26:30 -0700360 spin_lock_irqsave(&pcpu->target_freq_lock, flags);
Todd Poynor4ceca142012-12-11 16:05:03 -0800361 do_div(cputime_speedadj, delta_time);
362 loadadjfreq = (unsigned int)cputime_speedadj * 100;
rahul.khandelwal9c0deef2015-04-17 11:45:23 +0530363 cpu_load = loadadjfreq / pcpu->policy->cur;
Lianwei Wang4f81d192014-12-02 17:20:50 -0800364 tunables->boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
Mike Chan34a9e022010-06-22 11:26:45 -0700365
Lianwei Wang4f81d192014-12-02 17:20:50 -0800366 if (cpu_load >= tunables->go_hispeed_load || tunables->boosted) {
Saravana Kannan76c17af2014-10-15 12:44:18 -0700367 if (pcpu->policy->cur < tunables->hispeed_freq) {
Viresh Kumarb9c50752013-05-16 14:58:54 +0530368 new_freq = tunables->hispeed_freq;
Todd Poynoreb84e532012-12-19 16:06:48 -0800369 } else {
370 new_freq = choose_freq(pcpu, loadadjfreq);
371
Viresh Kumarb9c50752013-05-16 14:58:54 +0530372 if (new_freq < tunables->hispeed_freq)
373 new_freq = tunables->hispeed_freq;
Todd Poynoreb84e532012-12-19 16:06:48 -0800374 }
375 } else {
Todd Poynor4ceca142012-12-11 16:05:03 -0800376 new_freq = choose_freq(pcpu, loadadjfreq);
Ruchi Kandoi4b9012a2014-06-13 16:24:15 -0700377 if (new_freq > tunables->hispeed_freq &&
Saravana Kannan76c17af2014-10-15 12:44:18 -0700378 pcpu->policy->cur < tunables->hispeed_freq)
Ruchi Kandoi4b9012a2014-06-13 16:24:15 -0700379 new_freq = tunables->hispeed_freq;
Todd Poynoreb84e532012-12-19 16:06:48 -0800380 }
Todd Poynor240997a2012-11-08 15:06:55 -0800381
Saravana Kannan76c17af2014-10-15 12:44:18 -0700382 if (pcpu->policy->cur >= tunables->hispeed_freq &&
383 new_freq > pcpu->policy->cur &&
384 now - pcpu->pol_hispeed_val_time <
385 freq_to_above_hispeed_delay(tunables, pcpu->policy->cur)) {
Todd Poynor240997a2012-11-08 15:06:55 -0800386 trace_cpufreq_interactive_notyet(
387 data, cpu_load, pcpu->target_freq,
388 pcpu->policy->cur, new_freq);
Badhri Jagan Sridharand58bb952014-04-07 18:26:30 -0700389 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
Todd Poynor240997a2012-11-08 15:06:55 -0800390 goto rearm;
Mike Chan34a9e022010-06-22 11:26:45 -0700391 }
392
Saravana Kannan76c17af2014-10-15 12:44:18 -0700393 pcpu->loc_hispeed_val_time = now;
Todd Poynord0ca13f2012-05-10 23:28:06 -0700394
Amit Pundir5e3ce742016-08-12 17:12:36 +0530395 index = cpufreq_frequency_table_target(pcpu->policy,
396 new_freq, CPUFREQ_RELATION_L);
Mike Chan34a9e022010-06-22 11:26:45 -0700397 new_freq = pcpu->freq_table[index].frequency;
398
Mike Chan34a9e022010-06-22 11:26:45 -0700399 /*
Todd Poynor90fe90d2012-04-26 21:41:40 -0700400 * Do not scale below floor_freq unless we have been at or above the
401 * floor frequency for the minimum sample time since last validated.
Mike Chan34a9e022010-06-22 11:26:45 -0700402 */
Junjie Wu302f05f2015-03-24 15:51:10 -0700403 max_fvtime = max(pcpu->pol_floor_val_time, pcpu->loc_floor_val_time);
404 if (new_freq < pcpu->floor_freq &&
405 pcpu->target_freq >= pcpu->policy->cur) {
406 if (now - max_fvtime < tunables->min_sample_time) {
Todd Poynor56459312012-11-28 17:56:09 -0800407 trace_cpufreq_interactive_notyet(
408 data, cpu_load, pcpu->target_freq,
409 pcpu->policy->cur, new_freq);
Badhri Jagan Sridharand58bb952014-04-07 18:26:30 -0700410 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
Mike Chan34a9e022010-06-22 11:26:45 -0700411 goto rearm;
Todd Poynor1c2fa8a2012-02-16 16:27:59 -0800412 }
Mike Chan34a9e022010-06-22 11:26:45 -0700413 }
414
Todd Poynoreda40292012-12-14 17:31:19 -0800415 /*
416 * Update the timestamp for checking whether speed has been held at
417 * or above the selected frequency for a minimum of min_sample_time,
418 * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
419 * allow the speed to drop as soon as the boostpulse duration expires
420 * (or the indefinite boost is turned off).
421 */
422
Lianwei Wang4f81d192014-12-02 17:20:50 -0800423 if (!tunables->boosted || new_freq > tunables->hispeed_freq) {
Todd Poynoreda40292012-12-14 17:31:19 -0800424 pcpu->floor_freq = new_freq;
Junjie Wu302f05f2015-03-24 15:51:10 -0700425 if (pcpu->target_freq >= pcpu->policy->cur ||
426 new_freq >= pcpu->policy->cur)
427 pcpu->loc_floor_val_time = now;
Todd Poynoreda40292012-12-14 17:31:19 -0800428 }
Todd Poynora994e812012-04-06 19:59:36 -0700429
Minsung Kim26800202014-11-29 21:43:53 +0900430 if (pcpu->target_freq == new_freq &&
431 pcpu->target_freq <= pcpu->policy->cur) {
Todd Poynor56459312012-11-28 17:56:09 -0800432 trace_cpufreq_interactive_already(
433 data, cpu_load, pcpu->target_freq,
434 pcpu->policy->cur, new_freq);
Badhri Jagan Sridharand58bb952014-04-07 18:26:30 -0700435 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
Rohit Guptadc424102015-03-06 18:46:04 -0800436 goto rearm;
Todd Poynora994e812012-04-06 19:59:36 -0700437 }
438
Todd Poynor1c2fa8a2012-02-16 16:27:59 -0800439 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
Todd Poynor56459312012-11-28 17:56:09 -0800440 pcpu->policy->cur, new_freq);
Todd Poynor1c2fa8a2012-02-16 16:27:59 -0800441
Todd Poynora21083d2012-07-16 17:07:15 -0700442 pcpu->target_freq = new_freq;
Badhri Jagan Sridharand58bb952014-04-07 18:26:30 -0700443 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
Todd Poynora21083d2012-07-16 17:07:15 -0700444 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
445 cpumask_set_cpu(data, &speedchange_cpumask);
446 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
447 wake_up_process(speedchange_task);
Mike Chan34a9e022010-06-22 11:26:45 -0700448
Mike Chan34a9e022010-06-22 11:26:45 -0700449rearm:
Todd Poynor47078402012-12-18 17:50:10 -0800450 if (!timer_pending(&pcpu->cpu_timer))
Todd Poynor8a2739e2012-10-08 20:14:34 -0700451 cpufreq_interactive_timer_resched(pcpu);
Mike Chan34a9e022010-06-22 11:26:45 -0700452
453exit:
Todd Poynor42350532012-12-18 17:50:44 -0800454 up_read(&pcpu->enable_sem);
Mike Chan34a9e022010-06-22 11:26:45 -0700455 return;
456}
457
Mike Chan34a9e022010-06-22 11:26:45 -0700458static void cpufreq_interactive_idle_end(void)
459{
460 struct cpufreq_interactive_cpuinfo *pcpu =
461 &per_cpu(cpuinfo, smp_processor_id());
462
Todd Poynor42350532012-12-18 17:50:44 -0800463 if (!down_read_trylock(&pcpu->enable_sem))
Sam Lefflerf61219a2012-06-27 10:12:04 -0700464 return;
Todd Poynor42350532012-12-18 17:50:44 -0800465 if (!pcpu->governor_enabled) {
466 up_read(&pcpu->enable_sem);
467 return;
468 }
Sam Lefflerf61219a2012-06-27 10:12:04 -0700469
Todd Poynorb510d292012-11-05 13:09:03 -0800470 /* Arm the timer for 1-2 ticks later if not already. */
471 if (!timer_pending(&pcpu->cpu_timer)) {
Todd Poynor8a2739e2012-10-08 20:14:34 -0700472 cpufreq_interactive_timer_resched(pcpu);
Todd Poynor47078402012-12-18 17:50:10 -0800473 } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
Todd Poynor8a2739e2012-10-08 20:14:34 -0700474 del_timer(&pcpu->cpu_timer);
Todd Poynor47078402012-12-18 17:50:10 -0800475 del_timer(&pcpu->cpu_slack_timer);
Todd Poynor8a2739e2012-10-08 20:14:34 -0700476 cpufreq_interactive_timer(smp_processor_id());
Mike Chan34a9e022010-06-22 11:26:45 -0700477 }
Todd Poynor42350532012-12-18 17:50:44 -0800478
479 up_read(&pcpu->enable_sem);
Mike Chan34a9e022010-06-22 11:26:45 -0700480}
481
Dmitry Torokhov78e69132015-02-04 13:54:48 -0800482static void cpufreq_interactive_get_policy_info(struct cpufreq_policy *policy,
483 unsigned int *pmax_freq,
484 u64 *phvt, u64 *pfvt)
485{
486 struct cpufreq_interactive_cpuinfo *pcpu;
487 unsigned int max_freq = 0;
488 u64 hvt = ~0ULL, fvt = 0;
489 unsigned int i;
490
491 for_each_cpu(i, policy->cpus) {
492 pcpu = &per_cpu(cpuinfo, i);
493
494 fvt = max(fvt, pcpu->loc_floor_val_time);
495 if (pcpu->target_freq > max_freq) {
496 max_freq = pcpu->target_freq;
497 hvt = pcpu->loc_hispeed_val_time;
498 } else if (pcpu->target_freq == max_freq) {
499 hvt = min(hvt, pcpu->loc_hispeed_val_time);
500 }
501 }
502
503 *pmax_freq = max_freq;
504 *phvt = hvt;
505 *pfvt = fvt;
506}
507
508static void cpufreq_interactive_adjust_cpu(unsigned int cpu,
509 struct cpufreq_policy *policy)
510{
511 struct cpufreq_interactive_cpuinfo *pcpu;
512 u64 hvt, fvt;
513 unsigned int max_freq;
514 int i;
515
516 cpufreq_interactive_get_policy_info(policy, &max_freq, &hvt, &fvt);
517
518 for_each_cpu(i, policy->cpus) {
519 pcpu = &per_cpu(cpuinfo, i);
520 pcpu->pol_floor_val_time = fvt;
521 }
522
523 if (max_freq != policy->cur) {
524 __cpufreq_driver_target(policy, max_freq, CPUFREQ_RELATION_H);
525 for_each_cpu(i, policy->cpus) {
526 pcpu = &per_cpu(cpuinfo, i);
527 pcpu->pol_hispeed_val_time = hvt;
528 }
529 }
530
531 trace_cpufreq_interactive_setspeed(cpu, max_freq, policy->cur);
532}
533
Todd Poynora21083d2012-07-16 17:07:15 -0700534static int cpufreq_interactive_speedchange_task(void *data)
Mike Chan34a9e022010-06-22 11:26:45 -0700535{
536 unsigned int cpu;
537 cpumask_t tmp_mask;
538 unsigned long flags;
539 struct cpufreq_interactive_cpuinfo *pcpu;
540
541 while (1) {
542 set_current_state(TASK_INTERRUPTIBLE);
Todd Poynora21083d2012-07-16 17:07:15 -0700543 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chan34a9e022010-06-22 11:26:45 -0700544
Todd Poynora21083d2012-07-16 17:07:15 -0700545 if (cpumask_empty(&speedchange_cpumask)) {
546 spin_unlock_irqrestore(&speedchange_cpumask_lock,
547 flags);
Mike Chan34a9e022010-06-22 11:26:45 -0700548 schedule();
549
550 if (kthread_should_stop())
551 break;
552
Todd Poynora21083d2012-07-16 17:07:15 -0700553 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chan34a9e022010-06-22 11:26:45 -0700554 }
555
556 set_current_state(TASK_RUNNING);
Todd Poynora21083d2012-07-16 17:07:15 -0700557 tmp_mask = speedchange_cpumask;
558 cpumask_clear(&speedchange_cpumask);
559 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Mike Chan34a9e022010-06-22 11:26:45 -0700560
561 for_each_cpu(cpu, &tmp_mask) {
Mike Chan34a9e022010-06-22 11:26:45 -0700562 pcpu = &per_cpu(cpuinfo, cpu);
Dmitry Torokhov78e69132015-02-04 13:54:48 -0800563
564 down_write(&pcpu->policy->rwsem);
565
566 if (likely(down_read_trylock(&pcpu->enable_sem))) {
567 if (likely(pcpu->governor_enabled))
568 cpufreq_interactive_adjust_cpu(cpu,
569 pcpu->policy);
Todd Poynor42350532012-12-18 17:50:44 -0800570 up_read(&pcpu->enable_sem);
Todd Poynor42350532012-12-18 17:50:44 -0800571 }
Mike Chan34a9e022010-06-22 11:26:45 -0700572
Dmitry Torokhov78e69132015-02-04 13:54:48 -0800573 up_write(&pcpu->policy->rwsem);
Mike Chan34a9e022010-06-22 11:26:45 -0700574 }
575 }
576
577 return 0;
578}
579
Lianwei Wang4f81d192014-12-02 17:20:50 -0800580static void cpufreq_interactive_boost(struct cpufreq_interactive_tunables *tunables)
Todd Poynorcc506042012-04-02 17:17:14 -0700581{
582 int i;
583 int anyboost = 0;
Badhri Jagan Sridharand58bb952014-04-07 18:26:30 -0700584 unsigned long flags[2];
Todd Poynorcc506042012-04-02 17:17:14 -0700585 struct cpufreq_interactive_cpuinfo *pcpu;
Lianwei Wang4f81d192014-12-02 17:20:50 -0800586
587 tunables->boosted = true;
Todd Poynorcc506042012-04-02 17:17:14 -0700588
Badhri Jagan Sridharand58bb952014-04-07 18:26:30 -0700589 spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
Todd Poynorcc506042012-04-02 17:17:14 -0700590
591 for_each_online_cpu(i) {
592 pcpu = &per_cpu(cpuinfo, i);
Daniel Kurtz1160fcb2015-05-28 12:08:11 +0800593
594 if (!down_read_trylock(&pcpu->enable_sem))
Lianwei Wang4f81d192014-12-02 17:20:50 -0800595 continue;
Todd Poynorcc506042012-04-02 17:17:14 -0700596
Daniel Kurtz1160fcb2015-05-28 12:08:11 +0800597 if (!pcpu->governor_enabled) {
598 up_read(&pcpu->enable_sem);
599 continue;
600 }
601
602 if (tunables != pcpu->policy->governor_data) {
603 up_read(&pcpu->enable_sem);
604 continue;
605 }
606
Badhri Jagan Sridharand58bb952014-04-07 18:26:30 -0700607 spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]);
Viresh Kumarb9c50752013-05-16 14:58:54 +0530608 if (pcpu->target_freq < tunables->hispeed_freq) {
609 pcpu->target_freq = tunables->hispeed_freq;
Todd Poynora21083d2012-07-16 17:07:15 -0700610 cpumask_set_cpu(i, &speedchange_cpumask);
Saravana Kannan76c17af2014-10-15 12:44:18 -0700611 pcpu->pol_hispeed_val_time =
Todd Poynor91c1f6e2012-12-07 20:08:45 -0800612 ktime_to_us(ktime_get());
Todd Poynorcc506042012-04-02 17:17:14 -0700613 anyboost = 1;
614 }
Badhri Jagan Sridharand58bb952014-04-07 18:26:30 -0700615 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]);
Daniel Kurtz1160fcb2015-05-28 12:08:11 +0800616
617 up_read(&pcpu->enable_sem);
Todd Poynorcc506042012-04-02 17:17:14 -0700618 }
619
Badhri Jagan Sridharand58bb952014-04-07 18:26:30 -0700620 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
Todd Poynorcc506042012-04-02 17:17:14 -0700621
622 if (anyboost)
Todd Poynora21083d2012-07-16 17:07:15 -0700623 wake_up_process(speedchange_task);
Todd Poynorcc506042012-04-02 17:17:14 -0700624}
625
Todd Poynor4ceca142012-12-11 16:05:03 -0800626static int cpufreq_interactive_notifier(
627 struct notifier_block *nb, unsigned long val, void *data)
628{
629 struct cpufreq_freqs *freq = data;
630 struct cpufreq_interactive_cpuinfo *pcpu;
631 int cpu;
Todd Poynorbd2b55b2013-01-02 13:14:00 -0800632 unsigned long flags;
Todd Poynor4ceca142012-12-11 16:05:03 -0800633
634 if (val == CPUFREQ_POSTCHANGE) {
635 pcpu = &per_cpu(cpuinfo, freq->cpu);
Todd Poynor74368d82012-12-23 12:28:49 -0800636 if (!down_read_trylock(&pcpu->enable_sem))
637 return 0;
638 if (!pcpu->governor_enabled) {
639 up_read(&pcpu->enable_sem);
640 return 0;
641 }
Todd Poynor4ceca142012-12-11 16:05:03 -0800642
643 for_each_cpu(cpu, pcpu->policy->cpus) {
644 struct cpufreq_interactive_cpuinfo *pjcpu =
645 &per_cpu(cpuinfo, cpu);
Lianwei Wangfc8229c2013-05-16 12:07:23 +0800646 if (cpu != freq->cpu) {
647 if (!down_read_trylock(&pjcpu->enable_sem))
648 continue;
649 if (!pjcpu->governor_enabled) {
650 up_read(&pjcpu->enable_sem);
651 continue;
652 }
653 }
Todd Poynorbd2b55b2013-01-02 13:14:00 -0800654 spin_lock_irqsave(&pjcpu->load_lock, flags);
Todd Poynor4ceca142012-12-11 16:05:03 -0800655 update_load(cpu);
Todd Poynorbd2b55b2013-01-02 13:14:00 -0800656 spin_unlock_irqrestore(&pjcpu->load_lock, flags);
Lianwei Wangfc8229c2013-05-16 12:07:23 +0800657 if (cpu != freq->cpu)
658 up_read(&pjcpu->enable_sem);
Todd Poynor4ceca142012-12-11 16:05:03 -0800659 }
Todd Poynor4ceca142012-12-11 16:05:03 -0800660
Todd Poynor74368d82012-12-23 12:28:49 -0800661 up_read(&pcpu->enable_sem);
662 }
Todd Poynor4ceca142012-12-11 16:05:03 -0800663 return 0;
664}
665
666static struct notifier_block cpufreq_notifier_block = {
667 .notifier_call = cpufreq_interactive_notifier,
668};
669
Minsung Kim700119b2013-02-25 23:48:04 +0900670static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
671{
672 const char *cp;
673 int i;
674 int ntokens = 1;
675 unsigned int *tokenized_data;
Todd Poynor5fc17ad2013-03-20 15:40:46 -0700676 int err = -EINVAL;
Minsung Kim700119b2013-02-25 23:48:04 +0900677
678 cp = buf;
679 while ((cp = strpbrk(cp + 1, " :")))
680 ntokens++;
681
Todd Poynor5fc17ad2013-03-20 15:40:46 -0700682 if (!(ntokens & 0x1))
Minsung Kim700119b2013-02-25 23:48:04 +0900683 goto err;
Minsung Kim700119b2013-02-25 23:48:04 +0900684
685 tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
686 if (!tokenized_data) {
Todd Poynor5fc17ad2013-03-20 15:40:46 -0700687 err = -ENOMEM;
Minsung Kim700119b2013-02-25 23:48:04 +0900688 goto err;
689 }
690
691 cp = buf;
692 i = 0;
693 while (i < ntokens) {
Todd Poynor5fc17ad2013-03-20 15:40:46 -0700694 if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
Minsung Kim700119b2013-02-25 23:48:04 +0900695 goto err_kfree;
Minsung Kim700119b2013-02-25 23:48:04 +0900696
697 cp = strpbrk(cp, " :");
698 if (!cp)
699 break;
700 cp++;
701 }
702
Todd Poynor5fc17ad2013-03-20 15:40:46 -0700703 if (i != ntokens)
Minsung Kim700119b2013-02-25 23:48:04 +0900704 goto err_kfree;
Minsung Kim700119b2013-02-25 23:48:04 +0900705
706 *num_tokens = ntokens;
707 return tokenized_data;
708
709err_kfree:
710 kfree(tokenized_data);
711err:
Todd Poynor5fc17ad2013-03-20 15:40:46 -0700712 return ERR_PTR(err);
Minsung Kim700119b2013-02-25 23:48:04 +0900713}
714
Todd Poynor4fbea962012-11-14 11:41:21 -0800715static ssize_t show_target_loads(
Viresh Kumarb9c50752013-05-16 14:58:54 +0530716 struct cpufreq_interactive_tunables *tunables,
717 char *buf)
Todd Poynorff6cb662012-11-28 17:58:17 -0800718{
Todd Poynor4fbea962012-11-14 11:41:21 -0800719 int i;
720 ssize_t ret = 0;
Todd Poynorbd2b55b2013-01-02 13:14:00 -0800721 unsigned long flags;
Todd Poynor4fbea962012-11-14 11:41:21 -0800722
Viresh Kumarb9c50752013-05-16 14:58:54 +0530723 spin_lock_irqsave(&tunables->target_loads_lock, flags);
Todd Poynor4fbea962012-11-14 11:41:21 -0800724
Viresh Kumarb9c50752013-05-16 14:58:54 +0530725 for (i = 0; i < tunables->ntarget_loads; i++)
726 ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
Todd Poynor4fbea962012-11-14 11:41:21 -0800727 i & 0x1 ? ":" : " ");
728
Chih-Wei Huang32582ff2013-12-24 17:51:55 +0800729 sprintf(buf + ret - 1, "\n");
Viresh Kumarb9c50752013-05-16 14:58:54 +0530730 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
Todd Poynor4fbea962012-11-14 11:41:21 -0800731 return ret;
Todd Poynorff6cb662012-11-28 17:58:17 -0800732}
733
Todd Poynor4fbea962012-11-14 11:41:21 -0800734static ssize_t store_target_loads(
Viresh Kumarb9c50752013-05-16 14:58:54 +0530735 struct cpufreq_interactive_tunables *tunables,
736 const char *buf, size_t count)
Todd Poynorff6cb662012-11-28 17:58:17 -0800737{
Minsung Kim700119b2013-02-25 23:48:04 +0900738 int ntokens;
Todd Poynor4fbea962012-11-14 11:41:21 -0800739 unsigned int *new_target_loads = NULL;
Todd Poynorbd2b55b2013-01-02 13:14:00 -0800740 unsigned long flags;
Todd Poynorff6cb662012-11-28 17:58:17 -0800741
Minsung Kim700119b2013-02-25 23:48:04 +0900742 new_target_loads = get_tokenized_data(buf, &ntokens);
743 if (IS_ERR(new_target_loads))
744 return PTR_RET(new_target_loads);
Todd Poynor4fbea962012-11-14 11:41:21 -0800745
Viresh Kumarb9c50752013-05-16 14:58:54 +0530746 spin_lock_irqsave(&tunables->target_loads_lock, flags);
747 if (tunables->target_loads != default_target_loads)
748 kfree(tunables->target_loads);
749 tunables->target_loads = new_target_loads;
750 tunables->ntarget_loads = ntokens;
751 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
Todd Poynorff6cb662012-11-28 17:58:17 -0800752 return count;
753}
754
Minsung Kim700119b2013-02-25 23:48:04 +0900755static ssize_t show_above_hispeed_delay(
Viresh Kumarb9c50752013-05-16 14:58:54 +0530756 struct cpufreq_interactive_tunables *tunables, char *buf)
Minsung Kim700119b2013-02-25 23:48:04 +0900757{
758 int i;
759 ssize_t ret = 0;
760 unsigned long flags;
761
Viresh Kumarb9c50752013-05-16 14:58:54 +0530762 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim700119b2013-02-25 23:48:04 +0900763
Viresh Kumarb9c50752013-05-16 14:58:54 +0530764 for (i = 0; i < tunables->nabove_hispeed_delay; i++)
765 ret += sprintf(buf + ret, "%u%s",
766 tunables->above_hispeed_delay[i],
Minsung Kim700119b2013-02-25 23:48:04 +0900767 i & 0x1 ? ":" : " ");
768
Chih-Wei Huang32582ff2013-12-24 17:51:55 +0800769 sprintf(buf + ret - 1, "\n");
Viresh Kumarb9c50752013-05-16 14:58:54 +0530770 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim700119b2013-02-25 23:48:04 +0900771 return ret;
772}
773
774static ssize_t store_above_hispeed_delay(
Viresh Kumarb9c50752013-05-16 14:58:54 +0530775 struct cpufreq_interactive_tunables *tunables,
776 const char *buf, size_t count)
Minsung Kim700119b2013-02-25 23:48:04 +0900777{
778 int ntokens;
779 unsigned int *new_above_hispeed_delay = NULL;
780 unsigned long flags;
781
782 new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
783 if (IS_ERR(new_above_hispeed_delay))
784 return PTR_RET(new_above_hispeed_delay);
785
Viresh Kumarb9c50752013-05-16 14:58:54 +0530786 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
787 if (tunables->above_hispeed_delay != default_above_hispeed_delay)
788 kfree(tunables->above_hispeed_delay);
789 tunables->above_hispeed_delay = new_above_hispeed_delay;
790 tunables->nabove_hispeed_delay = ntokens;
791 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim700119b2013-02-25 23:48:04 +0900792 return count;
793
794}
795
Viresh Kumarb9c50752013-05-16 14:58:54 +0530796static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
797 char *buf)
Mike Chan34a9e022010-06-22 11:26:45 -0700798{
Viresh Kumarb9c50752013-05-16 14:58:54 +0530799 return sprintf(buf, "%u\n", tunables->hispeed_freq);
Mike Chan34a9e022010-06-22 11:26:45 -0700800}
801
Viresh Kumarb9c50752013-05-16 14:58:54 +0530802static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
803 const char *buf, size_t count)
Mike Chan34a9e022010-06-22 11:26:45 -0700804{
805 int ret;
Todd Poynordf284d92012-10-03 00:39:56 -0700806 long unsigned int val;
Mike Chan34a9e022010-06-22 11:26:45 -0700807
Amit Pundirdcc2f2b2015-11-03 20:53:29 +0530808 ret = kstrtoul(buf, 0, &val);
Mike Chan34a9e022010-06-22 11:26:45 -0700809 if (ret < 0)
810 return ret;
Viresh Kumarb9c50752013-05-16 14:58:54 +0530811 tunables->hispeed_freq = val;
Mike Chan34a9e022010-06-22 11:26:45 -0700812 return count;
813}
814
Viresh Kumarb9c50752013-05-16 14:58:54 +0530815static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
816 *tunables, char *buf)
Mike Chan34a9e022010-06-22 11:26:45 -0700817{
Viresh Kumarb9c50752013-05-16 14:58:54 +0530818 return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
Mike Chan34a9e022010-06-22 11:26:45 -0700819}
820
Viresh Kumarb9c50752013-05-16 14:58:54 +0530821static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
822 *tunables, const char *buf, size_t count)
Mike Chan34a9e022010-06-22 11:26:45 -0700823{
824 int ret;
825 unsigned long val;
826
Amit Pundirdcc2f2b2015-11-03 20:53:29 +0530827 ret = kstrtoul(buf, 0, &val);
Mike Chan34a9e022010-06-22 11:26:45 -0700828 if (ret < 0)
829 return ret;
Viresh Kumarb9c50752013-05-16 14:58:54 +0530830 tunables->go_hispeed_load = val;
Mike Chan34a9e022010-06-22 11:26:45 -0700831 return count;
832}
833
Viresh Kumarb9c50752013-05-16 14:58:54 +0530834static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
835 *tunables, char *buf)
Mike Chan34a9e022010-06-22 11:26:45 -0700836{
Viresh Kumarb9c50752013-05-16 14:58:54 +0530837 return sprintf(buf, "%lu\n", tunables->min_sample_time);
Mike Chan34a9e022010-06-22 11:26:45 -0700838}
839
Viresh Kumarb9c50752013-05-16 14:58:54 +0530840static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
841 *tunables, const char *buf, size_t count)
Mike Chan34a9e022010-06-22 11:26:45 -0700842{
843 int ret;
844 unsigned long val;
845
Amit Pundirdcc2f2b2015-11-03 20:53:29 +0530846 ret = kstrtoul(buf, 0, &val);
Mike Chan34a9e022010-06-22 11:26:45 -0700847 if (ret < 0)
848 return ret;
Viresh Kumarb9c50752013-05-16 14:58:54 +0530849 tunables->min_sample_time = val;
Mike Chan34a9e022010-06-22 11:26:45 -0700850 return count;
851}
852
Viresh Kumarb9c50752013-05-16 14:58:54 +0530853static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
854 char *buf)
Mike Chan34a9e022010-06-22 11:26:45 -0700855{
Viresh Kumarb9c50752013-05-16 14:58:54 +0530856 return sprintf(buf, "%lu\n", tunables->timer_rate);
Mike Chan34a9e022010-06-22 11:26:45 -0700857}
858
Viresh Kumarb9c50752013-05-16 14:58:54 +0530859static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
860 const char *buf, size_t count)
Mike Chan34a9e022010-06-22 11:26:45 -0700861{
862 int ret;
Junjie Wu6f4610d2014-08-15 16:34:37 -0700863 unsigned long val, val_round;
Mike Chan34a9e022010-06-22 11:26:45 -0700864
Amit Pundirdcc2f2b2015-11-03 20:53:29 +0530865 ret = kstrtoul(buf, 0, &val);
Mike Chan34a9e022010-06-22 11:26:45 -0700866 if (ret < 0)
867 return ret;
Junjie Wu6f4610d2014-08-15 16:34:37 -0700868
869 val_round = jiffies_to_usecs(usecs_to_jiffies(val));
870 if (val != val_round)
871 pr_warn("timer_rate not aligned to jiffy. Rounded up to %lu\n",
872 val_round);
873
874 tunables->timer_rate = val_round;
Mike Chan34a9e022010-06-22 11:26:45 -0700875 return count;
876}
877
Viresh Kumarb9c50752013-05-16 14:58:54 +0530878static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
879 char *buf)
Todd Poynor47078402012-12-18 17:50:10 -0800880{
Viresh Kumarb9c50752013-05-16 14:58:54 +0530881 return sprintf(buf, "%d\n", tunables->timer_slack_val);
Todd Poynor47078402012-12-18 17:50:10 -0800882}
883
Viresh Kumarb9c50752013-05-16 14:58:54 +0530884static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
885 const char *buf, size_t count)
Todd Poynor47078402012-12-18 17:50:10 -0800886{
887 int ret;
888 unsigned long val;
889
890 ret = kstrtol(buf, 10, &val);
891 if (ret < 0)
892 return ret;
893
Viresh Kumarb9c50752013-05-16 14:58:54 +0530894 tunables->timer_slack_val = val;
Todd Poynor47078402012-12-18 17:50:10 -0800895 return count;
896}
897
Viresh Kumarb9c50752013-05-16 14:58:54 +0530898static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
Todd Poynor199620b2012-04-23 20:42:41 -0700899 char *buf)
900{
Viresh Kumarb9c50752013-05-16 14:58:54 +0530901 return sprintf(buf, "%d\n", tunables->boost_val);
Todd Poynor199620b2012-04-23 20:42:41 -0700902}
903
Viresh Kumarb9c50752013-05-16 14:58:54 +0530904static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
Todd Poynor199620b2012-04-23 20:42:41 -0700905 const char *buf, size_t count)
906{
907 int ret;
908 unsigned long val;
909
910 ret = kstrtoul(buf, 0, &val);
911 if (ret < 0)
912 return ret;
913
Viresh Kumarb9c50752013-05-16 14:58:54 +0530914 tunables->boost_val = val;
Todd Poynor199620b2012-04-23 20:42:41 -0700915
Viresh Kumarb9c50752013-05-16 14:58:54 +0530916 if (tunables->boost_val) {
Todd Poynorb9054522012-05-03 00:16:55 -0700917 trace_cpufreq_interactive_boost("on");
Lianwei Wang4f81d192014-12-02 17:20:50 -0800918 if (!tunables->boosted)
919 cpufreq_interactive_boost(tunables);
Todd Poynorb9054522012-05-03 00:16:55 -0700920 } else {
Ruchi Kandoie68985a2014-04-09 16:47:59 -0700921 tunables->boostpulse_endtime = ktime_to_us(ktime_get());
Todd Poynorb9054522012-05-03 00:16:55 -0700922 trace_cpufreq_interactive_unboost("off");
923 }
Todd Poynor199620b2012-04-23 20:42:41 -0700924
925 return count;
926}
927
Viresh Kumarb9c50752013-05-16 14:58:54 +0530928static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
Todd Poynorb9054522012-05-03 00:16:55 -0700929 const char *buf, size_t count)
930{
931 int ret;
932 unsigned long val;
933
934 ret = kstrtoul(buf, 0, &val);
935 if (ret < 0)
936 return ret;
937
Viresh Kumarb9c50752013-05-16 14:58:54 +0530938 tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
939 tunables->boostpulse_duration_val;
Todd Poynorb9054522012-05-03 00:16:55 -0700940 trace_cpufreq_interactive_boost("pulse");
Lianwei Wang4f81d192014-12-02 17:20:50 -0800941 if (!tunables->boosted)
942 cpufreq_interactive_boost(tunables);
Todd Poynorb9054522012-05-03 00:16:55 -0700943 return count;
944}
945
Viresh Kumarb9c50752013-05-16 14:58:54 +0530946static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
947 *tunables, char *buf)
Todd Poynoreda40292012-12-14 17:31:19 -0800948{
Viresh Kumarb9c50752013-05-16 14:58:54 +0530949 return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
Todd Poynoreda40292012-12-14 17:31:19 -0800950}
951
Viresh Kumarb9c50752013-05-16 14:58:54 +0530952static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
953 *tunables, const char *buf, size_t count)
Todd Poynoreda40292012-12-14 17:31:19 -0800954{
955 int ret;
956 unsigned long val;
957
958 ret = kstrtoul(buf, 0, &val);
959 if (ret < 0)
960 return ret;
961
Viresh Kumarb9c50752013-05-16 14:58:54 +0530962 tunables->boostpulse_duration_val = val;
Todd Poynoreda40292012-12-14 17:31:19 -0800963 return count;
964}
965
Viresh Kumarb9c50752013-05-16 14:58:54 +0530966static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
967 char *buf)
Lianwei Wang27474802013-02-22 11:39:18 +0800968{
Viresh Kumarb9c50752013-05-16 14:58:54 +0530969 return sprintf(buf, "%u\n", tunables->io_is_busy);
Lianwei Wang27474802013-02-22 11:39:18 +0800970}
971
Viresh Kumarb9c50752013-05-16 14:58:54 +0530972static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
973 const char *buf, size_t count)
Lianwei Wang27474802013-02-22 11:39:18 +0800974{
975 int ret;
976 unsigned long val;
977
978 ret = kstrtoul(buf, 0, &val);
979 if (ret < 0)
980 return ret;
Viresh Kumarb9c50752013-05-16 14:58:54 +0530981 tunables->io_is_busy = val;
Lianwei Wang27474802013-02-22 11:39:18 +0800982 return count;
983}
984
Viresh Kumarb9c50752013-05-16 14:58:54 +0530985/*
986 * Create show/store routines
987 * - sys: One governor instance for complete SYSTEM
988 * - pol: One governor instance per struct cpufreq_policy
989 */
990#define show_gov_pol_sys(file_name) \
991static ssize_t show_##file_name##_gov_sys \
992(struct kobject *kobj, struct attribute *attr, char *buf) \
993{ \
994 return show_##file_name(common_tunables, buf); \
995} \
996 \
997static ssize_t show_##file_name##_gov_pol \
998(struct cpufreq_policy *policy, char *buf) \
999{ \
1000 return show_##file_name(policy->governor_data, buf); \
1001}
Lianwei Wang27474802013-02-22 11:39:18 +08001002
Viresh Kumarb9c50752013-05-16 14:58:54 +05301003#define store_gov_pol_sys(file_name) \
1004static ssize_t store_##file_name##_gov_sys \
1005(struct kobject *kobj, struct attribute *attr, const char *buf, \
1006 size_t count) \
1007{ \
1008 return store_##file_name(common_tunables, buf, count); \
1009} \
1010 \
1011static ssize_t store_##file_name##_gov_pol \
1012(struct cpufreq_policy *policy, const char *buf, size_t count) \
1013{ \
1014 return store_##file_name(policy->governor_data, buf, count); \
1015}
1016
1017#define show_store_gov_pol_sys(file_name) \
1018show_gov_pol_sys(file_name); \
1019store_gov_pol_sys(file_name)
1020
1021show_store_gov_pol_sys(target_loads);
1022show_store_gov_pol_sys(above_hispeed_delay);
1023show_store_gov_pol_sys(hispeed_freq);
1024show_store_gov_pol_sys(go_hispeed_load);
1025show_store_gov_pol_sys(min_sample_time);
1026show_store_gov_pol_sys(timer_rate);
1027show_store_gov_pol_sys(timer_slack);
1028show_store_gov_pol_sys(boost);
1029store_gov_pol_sys(boostpulse);
1030show_store_gov_pol_sys(boostpulse_duration);
1031show_store_gov_pol_sys(io_is_busy);
1032
1033#define gov_sys_attr_rw(_name) \
1034static struct global_attr _name##_gov_sys = \
1035__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
1036
1037#define gov_pol_attr_rw(_name) \
1038static struct freq_attr _name##_gov_pol = \
1039__ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
1040
1041#define gov_sys_pol_attr_rw(_name) \
1042 gov_sys_attr_rw(_name); \
1043 gov_pol_attr_rw(_name)
1044
1045gov_sys_pol_attr_rw(target_loads);
1046gov_sys_pol_attr_rw(above_hispeed_delay);
1047gov_sys_pol_attr_rw(hispeed_freq);
1048gov_sys_pol_attr_rw(go_hispeed_load);
1049gov_sys_pol_attr_rw(min_sample_time);
1050gov_sys_pol_attr_rw(timer_rate);
1051gov_sys_pol_attr_rw(timer_slack);
1052gov_sys_pol_attr_rw(boost);
1053gov_sys_pol_attr_rw(boostpulse_duration);
1054gov_sys_pol_attr_rw(io_is_busy);
1055
1056static struct global_attr boostpulse_gov_sys =
1057 __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
1058
1059static struct freq_attr boostpulse_gov_pol =
1060 __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
1061
1062/* One Governor instance for entire system */
1063static struct attribute *interactive_attributes_gov_sys[] = {
1064 &target_loads_gov_sys.attr,
1065 &above_hispeed_delay_gov_sys.attr,
1066 &hispeed_freq_gov_sys.attr,
1067 &go_hispeed_load_gov_sys.attr,
1068 &min_sample_time_gov_sys.attr,
1069 &timer_rate_gov_sys.attr,
1070 &timer_slack_gov_sys.attr,
1071 &boost_gov_sys.attr,
1072 &boostpulse_gov_sys.attr,
1073 &boostpulse_duration_gov_sys.attr,
1074 &io_is_busy_gov_sys.attr,
Mike Chan34a9e022010-06-22 11:26:45 -07001075 NULL,
1076};
1077
Viresh Kumarb9c50752013-05-16 14:58:54 +05301078static struct attribute_group interactive_attr_group_gov_sys = {
1079 .attrs = interactive_attributes_gov_sys,
Mike Chan34a9e022010-06-22 11:26:45 -07001080 .name = "interactive",
1081};
1082
Viresh Kumarb9c50752013-05-16 14:58:54 +05301083/* Per policy governor instance */
1084static struct attribute *interactive_attributes_gov_pol[] = {
1085 &target_loads_gov_pol.attr,
1086 &above_hispeed_delay_gov_pol.attr,
1087 &hispeed_freq_gov_pol.attr,
1088 &go_hispeed_load_gov_pol.attr,
1089 &min_sample_time_gov_pol.attr,
1090 &timer_rate_gov_pol.attr,
1091 &timer_slack_gov_pol.attr,
1092 &boost_gov_pol.attr,
1093 &boostpulse_gov_pol.attr,
1094 &boostpulse_duration_gov_pol.attr,
1095 &io_is_busy_gov_pol.attr,
1096 NULL,
1097};
1098
1099static struct attribute_group interactive_attr_group_gov_pol = {
1100 .attrs = interactive_attributes_gov_pol,
1101 .name = "interactive",
1102};
1103
1104static struct attribute_group *get_sysfs_attr(void)
1105{
1106 if (have_governor_per_policy())
1107 return &interactive_attr_group_gov_pol;
1108 else
1109 return &interactive_attr_group_gov_sys;
1110}
1111
Sam Lefflerf61219a2012-06-27 10:12:04 -07001112static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
1113 unsigned long val,
1114 void *data)
1115{
Rohit Guptadc424102015-03-06 18:46:04 -08001116 if (val == IDLE_END)
Sam Lefflerf61219a2012-06-27 10:12:04 -07001117 cpufreq_interactive_idle_end();
Sam Lefflerf61219a2012-06-27 10:12:04 -07001118
1119 return 0;
1120}
1121
1122static struct notifier_block cpufreq_interactive_idle_nb = {
1123 .notifier_call = cpufreq_interactive_idle_notifier,
1124};
1125
Amit Pundir5e3ce742016-08-12 17:12:36 +05301126static int cpufreq_governor_interactive_init(struct cpufreq_policy *policy)
Mike Chan34a9e022010-06-22 11:26:45 -07001127{
1128 int rc;
Viresh Kumarb9c50752013-05-16 14:58:54 +05301129 struct cpufreq_interactive_tunables *tunables;
1130
1131 if (have_governor_per_policy())
1132 tunables = policy->governor_data;
1133 else
1134 tunables = common_tunables;
1135
Amit Pundir5e3ce742016-08-12 17:12:36 +05301136 if (have_governor_per_policy()) {
1137 WARN_ON(tunables);
1138 } else if (tunables) {
1139 tunables->usage_count++;
Minsung Kim4f6c7ec2014-01-19 14:32:42 +09001140 policy->governor_data = tunables;
Amit Pundir5e3ce742016-08-12 17:12:36 +05301141 return 0;
1142 }
1143
1144 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
1145 if (!tunables) {
1146 pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
1147 return -ENOMEM;
1148 }
1149
1150 tunables->usage_count = 1;
1151 tunables->above_hispeed_delay = default_above_hispeed_delay;
1152 tunables->nabove_hispeed_delay =
1153 ARRAY_SIZE(default_above_hispeed_delay);
1154 tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
1155 tunables->target_loads = default_target_loads;
1156 tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
1157 tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
1158 tunables->timer_rate = DEFAULT_TIMER_RATE;
1159 tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
1160 tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
1161
1162 spin_lock_init(&tunables->target_loads_lock);
1163 spin_lock_init(&tunables->above_hispeed_delay_lock);
1164
1165 policy->governor_data = tunables;
1166 if (!have_governor_per_policy()) {
1167 common_tunables = tunables;
1168 }
1169
1170 rc = sysfs_create_group(get_governor_parent_kobj(policy),
1171 get_sysfs_attr());
1172 if (rc) {
1173 kfree(tunables);
1174 policy->governor_data = NULL;
Greg Hackmanne2c6c882014-12-08 10:08:35 -08001175 if (!have_governor_per_policy()) {
Viresh Kumarb9c50752013-05-16 14:58:54 +05301176 common_tunables = NULL;
1177 }
Amit Pundir5e3ce742016-08-12 17:12:36 +05301178 return rc;
Mike Chan34a9e022010-06-22 11:26:45 -07001179 }
Amit Pundir5e3ce742016-08-12 17:12:36 +05301180
1181 idle_notifier_register(&cpufreq_interactive_idle_nb);
1182 cpufreq_register_notifier(&cpufreq_notifier_block,
1183 CPUFREQ_TRANSITION_NOTIFIER);
1184
Mike Chan34a9e022010-06-22 11:26:45 -07001185 return 0;
1186}
1187
Amit Pundir5e3ce742016-08-12 17:12:36 +05301188static void cpufreq_governor_interactive_exit(struct cpufreq_policy *policy)
1189{
1190 struct cpufreq_interactive_tunables *tunables;
1191
1192 if (have_governor_per_policy())
1193 tunables = policy->governor_data;
1194 else
1195 tunables = common_tunables;
1196
1197 WARN_ON(!tunables);
1198
1199 if (!--tunables->usage_count) {
1200 cpufreq_unregister_notifier(&cpufreq_notifier_block,
1201 CPUFREQ_TRANSITION_NOTIFIER);
1202 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
1203
1204 sysfs_remove_group(get_governor_parent_kobj(policy),
1205 get_sysfs_attr());
1206
1207 kfree(tunables);
1208 common_tunables = NULL;
1209 }
1210
1211 policy->governor_data = NULL;
1212}
1213
1214static int cpufreq_governor_interactive_start(struct cpufreq_policy *policy)
1215{
1216 unsigned int j;
1217 struct cpufreq_interactive_cpuinfo *pcpu;
1218 struct cpufreq_frequency_table *freq_table;
1219 struct cpufreq_interactive_tunables *tunables;
1220
1221 if (have_governor_per_policy())
1222 tunables = policy->governor_data;
1223 else
1224 tunables = common_tunables;
1225
1226 WARN_ON(!tunables);
1227
1228 mutex_lock(&gov_lock);
1229
1230 freq_table = policy->freq_table;
1231 if (!tunables->hispeed_freq)
1232 tunables->hispeed_freq = policy->max;
1233
1234 for_each_cpu(j, policy->cpus) {
1235 pcpu = &per_cpu(cpuinfo, j);
1236 pcpu->policy = policy;
1237 pcpu->target_freq = policy->cur;
1238 pcpu->freq_table = freq_table;
1239 pcpu->floor_freq = pcpu->target_freq;
1240 pcpu->pol_floor_val_time =
1241 ktime_to_us(ktime_get());
1242 pcpu->loc_floor_val_time = pcpu->pol_floor_val_time;
1243 pcpu->pol_hispeed_val_time = pcpu->pol_floor_val_time;
1244 pcpu->loc_hispeed_val_time = pcpu->pol_floor_val_time;
1245 down_write(&pcpu->enable_sem);
1246 del_timer_sync(&pcpu->cpu_timer);
1247 del_timer_sync(&pcpu->cpu_slack_timer);
1248 cpufreq_interactive_timer_start(tunables, j);
1249 pcpu->governor_enabled = 1;
1250 up_write(&pcpu->enable_sem);
1251 }
1252
1253 mutex_unlock(&gov_lock);
1254 return 0;
1255}
1256
1257static void cpufreq_governor_interactive_stop(struct cpufreq_policy *policy)
1258{
1259 unsigned int j;
1260 struct cpufreq_interactive_cpuinfo *pcpu;
1261
1262 mutex_lock(&gov_lock);
1263 for_each_cpu(j, policy->cpus) {
1264 pcpu = &per_cpu(cpuinfo, j);
1265 down_write(&pcpu->enable_sem);
1266 pcpu->governor_enabled = 0;
1267 del_timer_sync(&pcpu->cpu_timer);
1268 del_timer_sync(&pcpu->cpu_slack_timer);
1269 up_write(&pcpu->enable_sem);
1270 }
1271
1272 mutex_unlock(&gov_lock);
1273}
1274
1275static void cpufreq_governor_interactive_limits(struct cpufreq_policy *policy)
1276{
1277 unsigned int j;
1278 struct cpufreq_interactive_cpuinfo *pcpu;
1279 unsigned long flags;
1280
1281 if (policy->max < policy->cur)
1282 __cpufreq_driver_target(policy,
1283 policy->max, CPUFREQ_RELATION_H);
1284 else if (policy->min > policy->cur)
1285 __cpufreq_driver_target(policy,
1286 policy->min, CPUFREQ_RELATION_L);
1287 for_each_cpu(j, policy->cpus) {
1288 pcpu = &per_cpu(cpuinfo, j);
1289
1290 down_read(&pcpu->enable_sem);
1291 if (pcpu->governor_enabled == 0) {
1292 up_read(&pcpu->enable_sem);
1293 continue;
1294 }
1295
1296 spin_lock_irqsave(&pcpu->target_freq_lock, flags);
1297 if (policy->max < pcpu->target_freq)
1298 pcpu->target_freq = policy->max;
1299 else if (policy->min > pcpu->target_freq)
1300 pcpu->target_freq = policy->min;
1301
1302 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
1303 up_read(&pcpu->enable_sem);
1304 }
1305}
1306
Amit Pundir67aa2842016-08-12 13:17:25 +05301307static struct cpufreq_governor cpufreq_gov_interactive = {
Viresh Kumarcba75f52013-05-16 14:58:53 +05301308 .name = "interactive",
Amit Pundir5e3ce742016-08-12 17:12:36 +05301309 .init = cpufreq_governor_interactive_init,
1310 .exit = cpufreq_governor_interactive_exit,
1311 .start = cpufreq_governor_interactive_start,
1312 .stop = cpufreq_governor_interactive_stop,
1313 .limits = cpufreq_governor_interactive_limits,
Viresh Kumarcba75f52013-05-16 14:58:53 +05301314 .max_transition_latency = 10000000,
1315 .owner = THIS_MODULE,
1316};
1317
Todd Poynor47078402012-12-18 17:50:10 -08001318static void cpufreq_interactive_nop_timer(unsigned long data)
1319{
1320}
1321
Mike Chan34a9e022010-06-22 11:26:45 -07001322static int __init cpufreq_interactive_init(void)
1323{
1324 unsigned int i;
1325 struct cpufreq_interactive_cpuinfo *pcpu;
1326 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1327
Mike Chan34a9e022010-06-22 11:26:45 -07001328 /* Initalize per-cpu timers */
1329 for_each_possible_cpu(i) {
1330 pcpu = &per_cpu(cpuinfo, i);
Amit Pundir5e3ce742016-08-12 17:12:36 +05301331 init_timer_pinned_deferrable(&pcpu->cpu_timer);
Mike Chan34a9e022010-06-22 11:26:45 -07001332 pcpu->cpu_timer.function = cpufreq_interactive_timer;
1333 pcpu->cpu_timer.data = i;
Todd Poynor47078402012-12-18 17:50:10 -08001334 init_timer(&pcpu->cpu_slack_timer);
1335 pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
Todd Poynor4ceca142012-12-11 16:05:03 -08001336 spin_lock_init(&pcpu->load_lock);
Badhri Jagan Sridharand58bb952014-04-07 18:26:30 -07001337 spin_lock_init(&pcpu->target_freq_lock);
Todd Poynor42350532012-12-18 17:50:44 -08001338 init_rwsem(&pcpu->enable_sem);
Mike Chan34a9e022010-06-22 11:26:45 -07001339 }
1340
Todd Poynora21083d2012-07-16 17:07:15 -07001341 spin_lock_init(&speedchange_cpumask_lock);
Lianwei Wang6becfaa2013-01-07 14:15:51 +08001342 mutex_init(&gov_lock);
Todd Poynora21083d2012-07-16 17:07:15 -07001343 speedchange_task =
1344 kthread_create(cpufreq_interactive_speedchange_task, NULL,
1345 "cfinteractive");
1346 if (IS_ERR(speedchange_task))
1347 return PTR_ERR(speedchange_task);
Sam Leffler1b897562012-06-27 12:55:56 -07001348
Todd Poynora21083d2012-07-16 17:07:15 -07001349 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
1350 get_task_struct(speedchange_task);
Mike Chan34a9e022010-06-22 11:26:45 -07001351
Sam Leffler1b897562012-06-27 12:55:56 -07001352 /* NB: wake up so the thread does not look hung to the freezer */
Todd Poynora21083d2012-07-16 17:07:15 -07001353 wake_up_process(speedchange_task);
Sam Leffler1b897562012-06-27 12:55:56 -07001354
Mike Chan34a9e022010-06-22 11:26:45 -07001355 return cpufreq_register_governor(&cpufreq_gov_interactive);
Mike Chan34a9e022010-06-22 11:26:45 -07001356}
1357
1358#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
Amit Pundir67aa2842016-08-12 13:17:25 +05301359struct cpufreq_governor *cpufreq_default_governor(void)
1360{
1361 return &cpufreq_gov_interactive;
1362}
Mike Chan34a9e022010-06-22 11:26:45 -07001363fs_initcall(cpufreq_interactive_init);
1364#else
1365module_init(cpufreq_interactive_init);
1366#endif
1367
1368static void __exit cpufreq_interactive_exit(void)
1369{
1370 cpufreq_unregister_governor(&cpufreq_gov_interactive);
Todd Poynora21083d2012-07-16 17:07:15 -07001371 kthread_stop(speedchange_task);
1372 put_task_struct(speedchange_task);
Mike Chan34a9e022010-06-22 11:26:45 -07001373}
1374
1375module_exit(cpufreq_interactive_exit);
1376
1377MODULE_AUTHOR("Mike Chan <mike@android.com>");
1378MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1379 "Latency sensitive workloads");
1380MODULE_LICENSE("GPL");