blob: f8293ee92055fb6bfb9d6879d1b667f61fd85d66 [file] [log] [blame]
Mike Chanef969692010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/module.h>
Lianwei Wangd72db422012-11-01 09:59:52 +080023#include <linux/moduleparam.h>
Todd Poynor5cad6092012-12-18 17:50:44 -080024#include <linux/rwsem.h>
Mike Chanef969692010-06-22 11:26:45 -070025#include <linux/sched.h>
26#include <linux/sched/rt.h>
27#include <linux/tick.h>
28#include <linux/time.h>
29#include <linux/timer.h>
30#include <linux/workqueue.h>
31#include <linux/kthread.h>
Todd Poynorab8dc402012-04-02 17:17:14 -070032#include <linux/slab.h>
Mike Chanef969692010-06-22 11:26:45 -070033
Todd Poynorae010472012-02-16 16:27:59 -080034#define CREATE_TRACE_POINTS
35#include <trace/events/cpufreq_interactive.h>
36
Mike Chanef969692010-06-22 11:26:45 -070037struct cpufreq_interactive_cpuinfo {
38 struct timer_list cpu_timer;
Todd Poynor4add2592012-12-18 17:50:10 -080039 struct timer_list cpu_slack_timer;
Todd Poynor0e58da22012-12-11 16:05:03 -080040 spinlock_t load_lock; /* protects the next 4 fields */
Mike Chanef969692010-06-22 11:26:45 -070041 u64 time_in_idle;
Todd Poynor8eccd412012-10-08 20:14:34 -070042 u64 time_in_idle_timestamp;
Todd Poynor0e58da22012-12-11 16:05:03 -080043 u64 cputime_speedadj;
44 u64 cputime_speedadj_timestamp;
Junjie Wu6b974ed2014-04-28 15:11:47 -070045 u64 last_evaluated_jiffy;
Mike Chanef969692010-06-22 11:26:45 -070046 struct cpufreq_policy *policy;
47 struct cpufreq_frequency_table *freq_table;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -070048 spinlock_t target_freq_lock; /*protects target freq */
Mike Chanef969692010-06-22 11:26:45 -070049 unsigned int target_freq;
Todd Poynor6d15fa32012-04-26 21:41:40 -070050 unsigned int floor_freq;
Junjie Wud5ac8ee2015-03-24 15:51:10 -070051 u64 pol_floor_val_time; /* policy floor_validate_time */
52 u64 loc_floor_val_time; /* per-cpu floor_validate_time */
Saravana Kannanfbae2f22014-10-15 12:44:18 -070053 u64 pol_hispeed_val_time; /* policy hispeed_validate_time */
54 u64 loc_hispeed_val_time; /* per-cpu hispeed_validate_time */
Junjie Wue05d74e2014-08-29 14:12:52 -070055 u64 max_freq_hyst_start_time;
Junjie Wu1d868952015-03-27 11:44:21 -070056 unsigned int min_freq;
Todd Poynor5cad6092012-12-18 17:50:44 -080057 struct rw_semaphore enable_sem;
Junjie Wu82f08032014-12-09 13:20:26 -080058 bool reject_notification;
Mike Chanef969692010-06-22 11:26:45 -070059 int governor_enabled;
Junjie Wu13c6a762014-08-07 18:04:13 -070060 struct cpufreq_interactive_tunables *cached_tunables;
Junjie Wu4344ea32014-04-28 16:22:24 -070061 int first_cpu;
Mike Chanef969692010-06-22 11:26:45 -070062};
63
64static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
65
Todd Poynor0f1920b2012-07-16 17:07:15 -070066/* realtime thread handles frequency scaling */
67static struct task_struct *speedchange_task;
68static cpumask_t speedchange_cpumask;
69static spinlock_t speedchange_cpumask_lock;
Lianwei Wang1d4f9a72013-01-07 14:15:51 +080070static struct mutex gov_lock;
Mike Chanef969692010-06-22 11:26:45 -070071
Junjie Wu4344ea32014-04-28 16:22:24 -070072static int set_window_count;
73static int migration_register_count;
74static struct mutex sched_lock;
75
Todd Poynor8d2d93f2012-11-28 17:58:17 -080076/* Target load. Lower values result in higher CPU speeds. */
77#define DEFAULT_TARGET_LOAD 90
Todd Poynore9c60742012-11-14 11:41:21 -080078static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
Todd Poynor8d2d93f2012-11-28 17:58:17 -080079
Todd Poynora380aa82012-04-17 17:39:34 -070080#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
Todd Poynorcbbe17d2012-04-13 20:18:02 -070081#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
Minsung Kim9c1f83a2013-02-25 23:48:04 +090082static unsigned int default_above_hispeed_delay[] = {
83 DEFAULT_ABOVE_HISPEED_DELAY };
Todd Poynorcbbe17d2012-04-13 20:18:02 -070084
Viresh Kumar17d15c42013-05-16 14:58:54 +053085struct cpufreq_interactive_tunables {
86 int usage_count;
87 /* Hi speed to bump to from lo speed when load burst (default max) */
88 unsigned int hispeed_freq;
89 /* Go to hi speed when CPU load at or above this value. */
90#define DEFAULT_GO_HISPEED_LOAD 99
91 unsigned long go_hispeed_load;
92 /* Target load. Lower values result in higher CPU speeds. */
93 spinlock_t target_loads_lock;
94 unsigned int *target_loads;
95 int ntarget_loads;
96 /*
97 * The minimum amount of time to spend at a frequency before we can ramp
98 * down.
99 */
100#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
101 unsigned long min_sample_time;
102 /*
103 * The sample rate of the timer used to increase frequency
104 */
105 unsigned long timer_rate;
106 /*
107 * Wait this long before raising speed above hispeed, by default a
108 * single timer interval.
109 */
110 spinlock_t above_hispeed_delay_lock;
111 unsigned int *above_hispeed_delay;
112 int nabove_hispeed_delay;
113 /* Non-zero means indefinite speed boost active */
114 int boost_val;
115 /* Duration of a boot pulse in usecs */
116 int boostpulse_duration_val;
117 /* End time of boost pulse in ktime converted to usecs */
118 u64 boostpulse_endtime;
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800119 bool boosted;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530120 /*
121 * Max additional time to wait in idle, beyond timer_rate, at speeds
122 * above minimum before wakeup to reduce speed, or -1 if unnecessary.
123 */
Todd Poynor4add2592012-12-18 17:50:10 -0800124#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
Viresh Kumar17d15c42013-05-16 14:58:54 +0530125 int timer_slack_val;
126 bool io_is_busy;
Junjie Wu4344ea32014-04-28 16:22:24 -0700127
128 /* scheduler input related flags */
129 bool use_sched_load;
130 bool use_migration_notif;
Junjie Wue05d74e2014-08-29 14:12:52 -0700131
132 /*
Junjie Wu7ca999f2014-08-29 18:55:45 -0700133 * Whether to align timer windows across all CPUs. When
134 * use_sched_load is true, this flag is ignored and windows
135 * will always be aligned.
136 */
137 bool align_windows;
138
139 /*
Junjie Wue05d74e2014-08-29 14:12:52 -0700140 * Stay at max freq for at least max_freq_hysteresis before dropping
141 * frequency.
142 */
143 unsigned int max_freq_hysteresis;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530144};
Lianwei Wangd72db422012-11-01 09:59:52 +0800145
Amit Pundir94c7a812015-11-20 18:54:30 +0530146/*
147 * HACK: FIXME: Bring back cpufreq_{get,put}_global_kobject()
148 * definition removed by upstream commit 8eec1020f0c0 "cpufreq:
149 * create cpu/cpufreq at boot time" to fix build failures.
150 */
151static int cpufreq_global_kobject_usage;
152
153int cpufreq_get_global_kobject(void)
154{
155 if (!cpufreq_global_kobject_usage++)
156 return kobject_add(cpufreq_global_kobject,
157 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
158
159 return 0;
160}
161
162void cpufreq_put_global_kobject(void)
163{
164 if (!--cpufreq_global_kobject_usage)
165 kobject_del(cpufreq_global_kobject);
166}
167
Viresh Kumar17d15c42013-05-16 14:58:54 +0530168/* For cases where we have single governor instance for system */
Cylen Yaoc0b6ed62014-09-05 18:27:38 -0700169static struct cpufreq_interactive_tunables *common_tunables;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530170
171static struct attribute_group *get_sysfs_attr(void);
Lianwei Wang72e40572013-02-22 11:39:18 +0800172
Junjie Wu6b974ed2014-04-28 15:11:47 -0700173/* Round to starting jiffy of next evaluation window */
174static u64 round_to_nw_start(u64 jif,
175 struct cpufreq_interactive_tunables *tunables)
176{
177 unsigned long step = usecs_to_jiffies(tunables->timer_rate);
Junjie Wu7ca999f2014-08-29 18:55:45 -0700178 u64 ret;
Junjie Wu6b974ed2014-04-28 15:11:47 -0700179
Junjie Wu7ca999f2014-08-29 18:55:45 -0700180 if (tunables->use_sched_load || tunables->align_windows) {
181 do_div(jif, step);
182 ret = (jif + 1) * step;
183 } else {
184 ret = jiffies + usecs_to_jiffies(tunables->timer_rate);
185 }
186
187 return ret;
Junjie Wu6b974ed2014-04-28 15:11:47 -0700188}
189
Junjie Wu4344ea32014-04-28 16:22:24 -0700190static inline int set_window_helper(
191 struct cpufreq_interactive_tunables *tunables)
192{
193 return sched_set_window(round_to_nw_start(get_jiffies_64(), tunables),
194 usecs_to_jiffies(tunables->timer_rate));
195}
196
Junjie Wu1d868952015-03-27 11:44:21 -0700197static void cpufreq_interactive_timer_resched(unsigned long cpu,
198 bool slack_only)
Todd Poynor8eccd412012-10-08 20:14:34 -0700199{
Junjie Wu2a39b1e2014-08-15 16:20:54 -0700200 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530201 struct cpufreq_interactive_tunables *tunables =
202 pcpu->policy->governor_data;
Junjie Wu6b974ed2014-04-28 15:11:47 -0700203 u64 expires;
Todd Poynordf673d12013-01-02 13:14:00 -0800204 unsigned long flags;
Todd Poynor4add2592012-12-18 17:50:10 -0800205
Todd Poynordf673d12013-01-02 13:14:00 -0800206 spin_lock_irqsave(&pcpu->load_lock, flags);
Junjie Wu1d868952015-03-27 11:44:21 -0700207 expires = round_to_nw_start(pcpu->last_evaluated_jiffy, tunables);
208 if (!slack_only) {
209 pcpu->time_in_idle =
210 get_cpu_idle_time(smp_processor_id(),
Viresh Kumar17d15c42013-05-16 14:58:54 +0530211 &pcpu->time_in_idle_timestamp,
212 tunables->io_is_busy);
Junjie Wu1d868952015-03-27 11:44:21 -0700213 pcpu->cputime_speedadj = 0;
214 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
215 del_timer(&pcpu->cpu_timer);
216 pcpu->cpu_timer.expires = expires;
217 add_timer_on(&pcpu->cpu_timer, cpu);
218 }
Todd Poynor4e25bf92013-04-05 13:25:21 -0700219
Viresh Kumar17d15c42013-05-16 14:58:54 +0530220 if (tunables->timer_slack_val >= 0 &&
221 pcpu->target_freq > pcpu->policy->min) {
222 expires += usecs_to_jiffies(tunables->timer_slack_val);
Junjie Wu2a39b1e2014-08-15 16:20:54 -0700223 del_timer(&pcpu->cpu_slack_timer);
224 pcpu->cpu_slack_timer.expires = expires;
225 add_timer_on(&pcpu->cpu_slack_timer, cpu);
Todd Poynor4e25bf92013-04-05 13:25:21 -0700226 }
227
Todd Poynordf673d12013-01-02 13:14:00 -0800228 spin_unlock_irqrestore(&pcpu->load_lock, flags);
Todd Poynor8eccd412012-10-08 20:14:34 -0700229}
230
Lianwei Wang90c6c152013-04-26 13:30:51 +0800231/* The caller shall take enable_sem write semaphore to avoid any timer race.
232 * The cpu_timer and cpu_slack_timer must be deactivated when calling this
233 * function.
234 */
Viresh Kumar17d15c42013-05-16 14:58:54 +0530235static void cpufreq_interactive_timer_start(
236 struct cpufreq_interactive_tunables *tunables, int cpu)
Lianwei Wang90c6c152013-04-26 13:30:51 +0800237{
238 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
Junjie Wu6b974ed2014-04-28 15:11:47 -0700239 u64 expires = round_to_nw_start(pcpu->last_evaluated_jiffy, tunables);
Lianwei Wang90c6c152013-04-26 13:30:51 +0800240 unsigned long flags;
241
Junjie Wu2a39b1e2014-08-15 16:20:54 -0700242 spin_lock_irqsave(&pcpu->load_lock, flags);
Lianwei Wang90c6c152013-04-26 13:30:51 +0800243 pcpu->cpu_timer.expires = expires;
244 add_timer_on(&pcpu->cpu_timer, cpu);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530245 if (tunables->timer_slack_val >= 0 &&
246 pcpu->target_freq > pcpu->policy->min) {
247 expires += usecs_to_jiffies(tunables->timer_slack_val);
Lianwei Wang90c6c152013-04-26 13:30:51 +0800248 pcpu->cpu_slack_timer.expires = expires;
249 add_timer_on(&pcpu->cpu_slack_timer, cpu);
250 }
251
Lianwei Wang90c6c152013-04-26 13:30:51 +0800252 pcpu->time_in_idle =
Viresh Kumar17d15c42013-05-16 14:58:54 +0530253 get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp,
254 tunables->io_is_busy);
Lianwei Wang90c6c152013-04-26 13:30:51 +0800255 pcpu->cputime_speedadj = 0;
256 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
257 spin_unlock_irqrestore(&pcpu->load_lock, flags);
258}
259
Viresh Kumar17d15c42013-05-16 14:58:54 +0530260static unsigned int freq_to_above_hispeed_delay(
261 struct cpufreq_interactive_tunables *tunables,
262 unsigned int freq)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900263{
264 int i;
265 unsigned int ret;
266 unsigned long flags;
267
Viresh Kumar17d15c42013-05-16 14:58:54 +0530268 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900269
Viresh Kumar17d15c42013-05-16 14:58:54 +0530270 for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
271 freq >= tunables->above_hispeed_delay[i+1]; i += 2)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900272 ;
273
Viresh Kumar17d15c42013-05-16 14:58:54 +0530274 ret = tunables->above_hispeed_delay[i];
275 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900276 return ret;
277}
278
Viresh Kumar17d15c42013-05-16 14:58:54 +0530279static unsigned int freq_to_targetload(
280 struct cpufreq_interactive_tunables *tunables, unsigned int freq)
Todd Poynore9c60742012-11-14 11:41:21 -0800281{
282 int i;
283 unsigned int ret;
Todd Poynordf673d12013-01-02 13:14:00 -0800284 unsigned long flags;
Todd Poynore9c60742012-11-14 11:41:21 -0800285
Viresh Kumar17d15c42013-05-16 14:58:54 +0530286 spin_lock_irqsave(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800287
Viresh Kumar17d15c42013-05-16 14:58:54 +0530288 for (i = 0; i < tunables->ntarget_loads - 1 &&
289 freq >= tunables->target_loads[i+1]; i += 2)
Todd Poynore9c60742012-11-14 11:41:21 -0800290 ;
291
Viresh Kumar17d15c42013-05-16 14:58:54 +0530292 ret = tunables->target_loads[i];
293 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800294 return ret;
295}
296
297/*
298 * If increasing frequencies never map to a lower target load then
299 * choose_freq() will find the minimum frequency that does not exceed its
300 * target load given the current load.
301 */
Viresh Kumar17d15c42013-05-16 14:58:54 +0530302static unsigned int choose_freq(struct cpufreq_interactive_cpuinfo *pcpu,
303 unsigned int loadadjfreq)
Todd Poynore9c60742012-11-14 11:41:21 -0800304{
305 unsigned int freq = pcpu->policy->cur;
Todd Poynore9c60742012-11-14 11:41:21 -0800306 unsigned int prevfreq, freqmin, freqmax;
307 unsigned int tl;
308 int index;
309
310 freqmin = 0;
311 freqmax = UINT_MAX;
312
313 do {
314 prevfreq = freq;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530315 tl = freq_to_targetload(pcpu->policy->governor_data, freq);
Todd Poynore9c60742012-11-14 11:41:21 -0800316
317 /*
318 * Find the lowest frequency where the computed load is less
319 * than or equal to the target load.
320 */
321
Todd Poynor8cdabdc2013-04-22 16:44:58 -0700322 if (cpufreq_frequency_table_target(
323 pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
324 CPUFREQ_RELATION_L, &index))
325 break;
Todd Poynore9c60742012-11-14 11:41:21 -0800326 freq = pcpu->freq_table[index].frequency;
327
328 if (freq > prevfreq) {
329 /* The previous frequency is too low. */
330 freqmin = prevfreq;
331
332 if (freq >= freqmax) {
333 /*
334 * Find the highest frequency that is less
335 * than freqmax.
336 */
Todd Poynor8cdabdc2013-04-22 16:44:58 -0700337 if (cpufreq_frequency_table_target(
338 pcpu->policy, pcpu->freq_table,
339 freqmax - 1, CPUFREQ_RELATION_H,
340 &index))
341 break;
Todd Poynore9c60742012-11-14 11:41:21 -0800342 freq = pcpu->freq_table[index].frequency;
343
344 if (freq == freqmin) {
345 /*
346 * The first frequency below freqmax
347 * has already been found to be too
348 * low. freqmax is the lowest speed
349 * we found that is fast enough.
350 */
351 freq = freqmax;
352 break;
353 }
354 }
355 } else if (freq < prevfreq) {
356 /* The previous frequency is high enough. */
357 freqmax = prevfreq;
358
359 if (freq <= freqmin) {
360 /*
361 * Find the lowest frequency that is higher
362 * than freqmin.
363 */
Todd Poynor8cdabdc2013-04-22 16:44:58 -0700364 if (cpufreq_frequency_table_target(
365 pcpu->policy, pcpu->freq_table,
366 freqmin + 1, CPUFREQ_RELATION_L,
367 &index))
368 break;
Todd Poynore9c60742012-11-14 11:41:21 -0800369 freq = pcpu->freq_table[index].frequency;
370
371 /*
372 * If freqmax is the first frequency above
373 * freqmin then we have already found that
374 * this speed is fast enough.
375 */
376 if (freq == freqmax)
377 break;
378 }
379 }
380
381 /* If same frequency chosen as previous then done. */
382 } while (freq != prevfreq);
383
384 return freq;
385}
386
Todd Poynor0e58da22012-12-11 16:05:03 -0800387static u64 update_load(int cpu)
388{
389 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530390 struct cpufreq_interactive_tunables *tunables =
391 pcpu->policy->governor_data;
Todd Poynor0e58da22012-12-11 16:05:03 -0800392 u64 now;
393 u64 now_idle;
394 unsigned int delta_idle;
395 unsigned int delta_time;
396 u64 active_time;
397
Viresh Kumar17d15c42013-05-16 14:58:54 +0530398 now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
Todd Poynor0e58da22012-12-11 16:05:03 -0800399 delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
400 delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
Minsung Kim98b3b562013-04-23 22:32:01 +0900401
402 if (delta_time <= delta_idle)
403 active_time = 0;
404 else
405 active_time = delta_time - delta_idle;
406
Todd Poynor0e58da22012-12-11 16:05:03 -0800407 pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
408
409 pcpu->time_in_idle = now_idle;
410 pcpu->time_in_idle_timestamp = now;
411 return now;
412}
413
Junjie Wu2a577762015-03-25 14:05:49 -0700414#define MAX_LOCAL_LOAD 100
Mike Chanef969692010-06-22 11:26:45 -0700415static void cpufreq_interactive_timer(unsigned long data)
416{
Todd Poynore7afb7e2012-11-05 13:09:03 -0800417 u64 now;
Mike Chanef969692010-06-22 11:26:45 -0700418 unsigned int delta_time;
Todd Poynor0e58da22012-12-11 16:05:03 -0800419 u64 cputime_speedadj;
Mike Chanef969692010-06-22 11:26:45 -0700420 int cpu_load;
Mike Chanef969692010-06-22 11:26:45 -0700421 struct cpufreq_interactive_cpuinfo *pcpu =
422 &per_cpu(cpuinfo, data);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530423 struct cpufreq_interactive_tunables *tunables =
424 pcpu->policy->governor_data;
Mike Chanef969692010-06-22 11:26:45 -0700425 unsigned int new_freq;
Todd Poynor0e58da22012-12-11 16:05:03 -0800426 unsigned int loadadjfreq;
Mike Chanef969692010-06-22 11:26:45 -0700427 unsigned int index;
428 unsigned long flags;
Junjie Wud5ac8ee2015-03-24 15:51:10 -0700429 u64 max_fvtime;
Rohit Gupta6d3abc12014-11-14 17:59:42 -0800430 struct cpufreq_govinfo int_info;
Mike Chanef969692010-06-22 11:26:45 -0700431
Todd Poynor5cad6092012-12-18 17:50:44 -0800432 if (!down_read_trylock(&pcpu->enable_sem))
433 return;
Mike Chanef969692010-06-22 11:26:45 -0700434 if (!pcpu->governor_enabled)
435 goto exit;
436
Todd Poynordf673d12013-01-02 13:14:00 -0800437 spin_lock_irqsave(&pcpu->load_lock, flags);
Junjie Wu6b974ed2014-04-28 15:11:47 -0700438 pcpu->last_evaluated_jiffy = get_jiffies_64();
Junjie Wu4344ea32014-04-28 16:22:24 -0700439 now = update_load(data);
440 if (tunables->use_sched_load) {
441 /*
442 * Unlock early to avoid deadlock.
443 *
Junjie Wu18e7fd22014-09-17 18:51:41 -0700444 * load_change_callback() for thread migration already
445 * holds rq lock. Then it locks load_lock to avoid racing
446 * with cpufreq_interactive_timer_resched/start().
Junjie Wu4344ea32014-04-28 16:22:24 -0700447 * sched_get_busy() will also acquire rq lock. Thus we
448 * can't hold load_lock when calling sched_get_busy().
449 *
450 * load_lock used in this function protects time
451 * and load information. These stats are not used when
452 * scheduler input is available. Thus unlocking load_lock
453 * early is perfectly OK.
454 */
455 spin_unlock_irqrestore(&pcpu->load_lock, flags);
456 cputime_speedadj = (u64)sched_get_busy(data) *
457 pcpu->policy->cpuinfo.max_freq;
458 do_div(cputime_speedadj, tunables->timer_rate);
459 } else {
460 delta_time = (unsigned int)
461 (now - pcpu->cputime_speedadj_timestamp);
462 cputime_speedadj = pcpu->cputime_speedadj;
463 spin_unlock_irqrestore(&pcpu->load_lock, flags);
464 if (WARN_ON_ONCE(!delta_time))
465 goto rearm;
466 do_div(cputime_speedadj, delta_time);
467 }
Mike Chanef969692010-06-22 11:26:45 -0700468
Todd Poynor0e58da22012-12-11 16:05:03 -0800469 loadadjfreq = (unsigned int)cputime_speedadj * 100;
Rohit Gupta6d3abc12014-11-14 17:59:42 -0800470
471 int_info.cpu = data;
472 int_info.load = loadadjfreq / pcpu->policy->max;
473 int_info.sampling_rate_us = tunables->timer_rate;
474 atomic_notifier_call_chain(&cpufreq_govinfo_notifier_list,
475 CPUFREQ_LOAD_CHANGE, &int_info);
476
477 spin_lock_irqsave(&pcpu->target_freq_lock, flags);
rahul.khandelwal0d0606a2015-04-17 11:45:23 +0530478 cpu_load = loadadjfreq / pcpu->policy->cur;
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800479 tunables->boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
Mike Chanef969692010-06-22 11:26:45 -0700480
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800481 if (cpu_load >= tunables->go_hispeed_load || tunables->boosted) {
Junjie Wu2a577762015-03-25 14:05:49 -0700482 if (pcpu->policy->cur < tunables->hispeed_freq &&
483 cpu_load <= MAX_LOCAL_LOAD) {
Viresh Kumar17d15c42013-05-16 14:58:54 +0530484 new_freq = tunables->hispeed_freq;
Todd Poynor2b660492012-12-19 16:06:48 -0800485 } else {
486 new_freq = choose_freq(pcpu, loadadjfreq);
487
Viresh Kumar17d15c42013-05-16 14:58:54 +0530488 if (new_freq < tunables->hispeed_freq)
489 new_freq = tunables->hispeed_freq;
Todd Poynor2b660492012-12-19 16:06:48 -0800490 }
491 } else {
Todd Poynor0e58da22012-12-11 16:05:03 -0800492 new_freq = choose_freq(pcpu, loadadjfreq);
Ruchi Kandoi9df0ca92014-06-13 16:24:15 -0700493 if (new_freq > tunables->hispeed_freq &&
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700494 pcpu->policy->cur < tunables->hispeed_freq)
Ruchi Kandoi9df0ca92014-06-13 16:24:15 -0700495 new_freq = tunables->hispeed_freq;
Todd Poynor2b660492012-12-19 16:06:48 -0800496 }
Todd Poynor131ff022012-11-08 15:06:55 -0800497
Junjie Wu2a577762015-03-25 14:05:49 -0700498 if (cpu_load <= MAX_LOCAL_LOAD &&
499 pcpu->policy->cur >= tunables->hispeed_freq &&
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700500 new_freq > pcpu->policy->cur &&
501 now - pcpu->pol_hispeed_val_time <
502 freq_to_above_hispeed_delay(tunables, pcpu->policy->cur)) {
Todd Poynor131ff022012-11-08 15:06:55 -0800503 trace_cpufreq_interactive_notyet(
504 data, cpu_load, pcpu->target_freq,
505 pcpu->policy->cur, new_freq);
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700506 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
Todd Poynor131ff022012-11-08 15:06:55 -0800507 goto rearm;
Mike Chanef969692010-06-22 11:26:45 -0700508 }
509
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700510 pcpu->loc_hispeed_val_time = now;
Todd Poynor1a0389a2012-05-10 23:28:06 -0700511
Mike Chanef969692010-06-22 11:26:45 -0700512 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800513 new_freq, CPUFREQ_RELATION_L,
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700514 &index)) {
515 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700516 goto rearm;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700517 }
Mike Chanef969692010-06-22 11:26:45 -0700518
519 new_freq = pcpu->freq_table[index].frequency;
520
Junjie Wue05d74e2014-08-29 14:12:52 -0700521 if (pcpu->target_freq >= pcpu->policy->max
522 && new_freq < pcpu->target_freq
523 && now - pcpu->max_freq_hyst_start_time <
524 tunables->max_freq_hysteresis) {
525 trace_cpufreq_interactive_notyet(data, cpu_load,
526 pcpu->target_freq, pcpu->policy->cur, new_freq);
527 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
528 goto rearm;
529 }
530
Mike Chanef969692010-06-22 11:26:45 -0700531 /*
Todd Poynor6d15fa32012-04-26 21:41:40 -0700532 * Do not scale below floor_freq unless we have been at or above the
533 * floor frequency for the minimum sample time since last validated.
Mike Chanef969692010-06-22 11:26:45 -0700534 */
Junjie Wud5ac8ee2015-03-24 15:51:10 -0700535 max_fvtime = max(pcpu->pol_floor_val_time, pcpu->loc_floor_val_time);
536 if (new_freq < pcpu->floor_freq &&
537 pcpu->target_freq >= pcpu->policy->cur) {
538 if (now - max_fvtime < tunables->min_sample_time) {
Todd Poynore60cc1b2012-11-28 17:56:09 -0800539 trace_cpufreq_interactive_notyet(
540 data, cpu_load, pcpu->target_freq,
541 pcpu->policy->cur, new_freq);
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700542 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700543 goto rearm;
Todd Poynorae010472012-02-16 16:27:59 -0800544 }
Mike Chanef969692010-06-22 11:26:45 -0700545 }
546
Todd Poynore16d5922012-12-14 17:31:19 -0800547 /*
548 * Update the timestamp for checking whether speed has been held at
549 * or above the selected frequency for a minimum of min_sample_time,
550 * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
551 * allow the speed to drop as soon as the boostpulse duration expires
552 * (or the indefinite boost is turned off).
553 */
554
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800555 if (!tunables->boosted || new_freq > tunables->hispeed_freq) {
Todd Poynore16d5922012-12-14 17:31:19 -0800556 pcpu->floor_freq = new_freq;
Junjie Wud5ac8ee2015-03-24 15:51:10 -0700557 if (pcpu->target_freq >= pcpu->policy->cur ||
558 new_freq >= pcpu->policy->cur)
559 pcpu->loc_floor_val_time = now;
Todd Poynore16d5922012-12-14 17:31:19 -0800560 }
Todd Poynor1f408dc2012-04-06 19:59:36 -0700561
Minsung Kim9f2841b2014-11-29 21:43:53 +0900562 if (pcpu->target_freq == new_freq &&
563 pcpu->target_freq <= pcpu->policy->cur) {
Todd Poynore60cc1b2012-11-28 17:56:09 -0800564 trace_cpufreq_interactive_already(
565 data, cpu_load, pcpu->target_freq,
566 pcpu->policy->cur, new_freq);
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700567 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
Rohit Gupta189c2222015-03-06 18:46:04 -0800568 goto rearm;
Todd Poynor1f408dc2012-04-06 19:59:36 -0700569 }
570
Todd Poynorae010472012-02-16 16:27:59 -0800571 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
Todd Poynore60cc1b2012-11-28 17:56:09 -0800572 pcpu->policy->cur, new_freq);
Todd Poynorae010472012-02-16 16:27:59 -0800573
Todd Poynor0f1920b2012-07-16 17:07:15 -0700574 pcpu->target_freq = new_freq;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700575 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700576 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
577 cpumask_set_cpu(data, &speedchange_cpumask);
578 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
579 wake_up_process(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -0700580
Mike Chanef969692010-06-22 11:26:45 -0700581rearm:
Todd Poynor4add2592012-12-18 17:50:10 -0800582 if (!timer_pending(&pcpu->cpu_timer))
Junjie Wu1d868952015-03-27 11:44:21 -0700583 cpufreq_interactive_timer_resched(data, false);
Mike Chanef969692010-06-22 11:26:45 -0700584
585exit:
Todd Poynor5cad6092012-12-18 17:50:44 -0800586 up_read(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700587 return;
588}
589
Mike Chanef969692010-06-22 11:26:45 -0700590static void cpufreq_interactive_idle_end(void)
591{
592 struct cpufreq_interactive_cpuinfo *pcpu =
593 &per_cpu(cpuinfo, smp_processor_id());
594
Todd Poynor5cad6092012-12-18 17:50:44 -0800595 if (!down_read_trylock(&pcpu->enable_sem))
Sam Leffler3ab7c2b2012-06-27 10:12:04 -0700596 return;
Todd Poynor5cad6092012-12-18 17:50:44 -0800597 if (!pcpu->governor_enabled) {
598 up_read(&pcpu->enable_sem);
599 return;
600 }
Sam Leffler3ab7c2b2012-06-27 10:12:04 -0700601
Todd Poynore7afb7e2012-11-05 13:09:03 -0800602 /* Arm the timer for 1-2 ticks later if not already. */
603 if (!timer_pending(&pcpu->cpu_timer)) {
Junjie Wu1d868952015-03-27 11:44:21 -0700604 cpufreq_interactive_timer_resched(smp_processor_id(), false);
Todd Poynor4add2592012-12-18 17:50:10 -0800605 } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
Todd Poynor8eccd412012-10-08 20:14:34 -0700606 del_timer(&pcpu->cpu_timer);
Todd Poynor4add2592012-12-18 17:50:10 -0800607 del_timer(&pcpu->cpu_slack_timer);
Todd Poynor8eccd412012-10-08 20:14:34 -0700608 cpufreq_interactive_timer(smp_processor_id());
Mike Chanef969692010-06-22 11:26:45 -0700609 }
Todd Poynor5cad6092012-12-18 17:50:44 -0800610
611 up_read(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700612}
613
Todd Poynor0f1920b2012-07-16 17:07:15 -0700614static int cpufreq_interactive_speedchange_task(void *data)
Mike Chanef969692010-06-22 11:26:45 -0700615{
616 unsigned int cpu;
617 cpumask_t tmp_mask;
618 unsigned long flags;
619 struct cpufreq_interactive_cpuinfo *pcpu;
620
621 while (1) {
622 set_current_state(TASK_INTERRUPTIBLE);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700623 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700624
Todd Poynor0f1920b2012-07-16 17:07:15 -0700625 if (cpumask_empty(&speedchange_cpumask)) {
626 spin_unlock_irqrestore(&speedchange_cpumask_lock,
627 flags);
Mike Chanef969692010-06-22 11:26:45 -0700628 schedule();
629
630 if (kthread_should_stop())
631 break;
632
Todd Poynor0f1920b2012-07-16 17:07:15 -0700633 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700634 }
635
636 set_current_state(TASK_RUNNING);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700637 tmp_mask = speedchange_cpumask;
638 cpumask_clear(&speedchange_cpumask);
639 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700640
641 for_each_cpu(cpu, &tmp_mask) {
642 unsigned int j;
643 unsigned int max_freq = 0;
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700644 struct cpufreq_interactive_cpuinfo *pjcpu;
Junjie Wud5ac8ee2015-03-24 15:51:10 -0700645 u64 hvt = ~0ULL, fvt = 0;
Mike Chanef969692010-06-22 11:26:45 -0700646
647 pcpu = &per_cpu(cpuinfo, cpu);
Todd Poynor5cad6092012-12-18 17:50:44 -0800648 if (!down_read_trylock(&pcpu->enable_sem))
Mike Chanef969692010-06-22 11:26:45 -0700649 continue;
Todd Poynor5cad6092012-12-18 17:50:44 -0800650 if (!pcpu->governor_enabled) {
651 up_read(&pcpu->enable_sem);
652 continue;
653 }
Mike Chanef969692010-06-22 11:26:45 -0700654
Mike Chanef969692010-06-22 11:26:45 -0700655 for_each_cpu(j, pcpu->policy->cpus) {
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700656 pjcpu = &per_cpu(cpuinfo, j);
Mike Chanef969692010-06-22 11:26:45 -0700657
Junjie Wud5ac8ee2015-03-24 15:51:10 -0700658 fvt = max(fvt, pjcpu->loc_floor_val_time);
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700659 if (pjcpu->target_freq > max_freq) {
Mike Chanef969692010-06-22 11:26:45 -0700660 max_freq = pjcpu->target_freq;
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700661 hvt = pjcpu->loc_hispeed_val_time;
662 } else if (pjcpu->target_freq == max_freq) {
663 hvt = min(hvt, pjcpu->loc_hispeed_val_time);
664 }
Mike Chanef969692010-06-22 11:26:45 -0700665 }
Junjie Wud5ac8ee2015-03-24 15:51:10 -0700666 for_each_cpu(j, pcpu->policy->cpus) {
667 pjcpu = &per_cpu(cpuinfo, j);
668 pjcpu->pol_floor_val_time = fvt;
669 }
Mike Chanef969692010-06-22 11:26:45 -0700670
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700671 if (max_freq != pcpu->policy->cur) {
Mike Chanef969692010-06-22 11:26:45 -0700672 __cpufreq_driver_target(pcpu->policy,
673 max_freq,
674 CPUFREQ_RELATION_H);
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700675 for_each_cpu(j, pcpu->policy->cpus) {
676 pjcpu = &per_cpu(cpuinfo, j);
677 pjcpu->pol_hispeed_val_time = hvt;
678 }
679 }
Todd Poynor0f1920b2012-07-16 17:07:15 -0700680 trace_cpufreq_interactive_setspeed(cpu,
681 pcpu->target_freq,
Todd Poynorae010472012-02-16 16:27:59 -0800682 pcpu->policy->cur);
Todd Poynor5cad6092012-12-18 17:50:44 -0800683
684 up_read(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700685 }
686 }
687
688 return 0;
689}
690
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800691static void cpufreq_interactive_boost(struct cpufreq_interactive_tunables *tunables)
Todd Poynorab8dc402012-04-02 17:17:14 -0700692{
693 int i;
694 int anyboost = 0;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700695 unsigned long flags[2];
Todd Poynorab8dc402012-04-02 17:17:14 -0700696 struct cpufreq_interactive_cpuinfo *pcpu;
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800697
698 tunables->boosted = true;
Todd Poynorab8dc402012-04-02 17:17:14 -0700699
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700700 spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
Todd Poynorab8dc402012-04-02 17:17:14 -0700701
702 for_each_online_cpu(i) {
703 pcpu = &per_cpu(cpuinfo, i);
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800704 if (tunables != pcpu->policy->governor_data)
705 continue;
Todd Poynorab8dc402012-04-02 17:17:14 -0700706
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700707 spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530708 if (pcpu->target_freq < tunables->hispeed_freq) {
709 pcpu->target_freq = tunables->hispeed_freq;
Todd Poynor0f1920b2012-07-16 17:07:15 -0700710 cpumask_set_cpu(i, &speedchange_cpumask);
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700711 pcpu->pol_hispeed_val_time =
Todd Poynor31817c92012-12-07 20:08:45 -0800712 ktime_to_us(ktime_get());
Todd Poynorab8dc402012-04-02 17:17:14 -0700713 anyboost = 1;
714 }
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700715 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]);
Todd Poynorab8dc402012-04-02 17:17:14 -0700716 }
717
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700718 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
Todd Poynorab8dc402012-04-02 17:17:14 -0700719
720 if (anyboost)
Todd Poynor0f1920b2012-07-16 17:07:15 -0700721 wake_up_process(speedchange_task);
Todd Poynorab8dc402012-04-02 17:17:14 -0700722}
723
Junjie Wu4344ea32014-04-28 16:22:24 -0700724static int load_change_callback(struct notifier_block *nb, unsigned long val,
725 void *data)
726{
727 unsigned long cpu = (unsigned long) data;
728 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
729 struct cpufreq_interactive_tunables *tunables;
730
Junjie Wu18e7fd22014-09-17 18:51:41 -0700731 if (speedchange_task == current)
732 return 0;
733
Junjie Wu82f08032014-12-09 13:20:26 -0800734 if (pcpu->reject_notification)
735 return 0;
736
Junjie Wu18e7fd22014-09-17 18:51:41 -0700737 if (!down_read_trylock(&pcpu->enable_sem))
738 return 0;
739 if (!pcpu->governor_enabled) {
740 up_read(&pcpu->enable_sem);
741 return 0;
742 }
743 tunables = pcpu->policy->governor_data;
744 if (!tunables->use_sched_load || !tunables->use_migration_notif) {
745 up_read(&pcpu->enable_sem);
746 return 0;
Junjie Wu4344ea32014-04-28 16:22:24 -0700747 }
748
Junjie Wu18e7fd22014-09-17 18:51:41 -0700749 trace_cpufreq_interactive_load_change(cpu);
750 del_timer(&pcpu->cpu_timer);
751 del_timer(&pcpu->cpu_slack_timer);
752 cpufreq_interactive_timer(cpu);
753
754 up_read(&pcpu->enable_sem);
Junjie Wu4344ea32014-04-28 16:22:24 -0700755 return 0;
756}
757
758static struct notifier_block load_notifier_block = {
759 .notifier_call = load_change_callback,
760};
761
Todd Poynor0e58da22012-12-11 16:05:03 -0800762static int cpufreq_interactive_notifier(
763 struct notifier_block *nb, unsigned long val, void *data)
764{
765 struct cpufreq_freqs *freq = data;
766 struct cpufreq_interactive_cpuinfo *pcpu;
767 int cpu;
Todd Poynordf673d12013-01-02 13:14:00 -0800768 unsigned long flags;
Todd Poynor0e58da22012-12-11 16:05:03 -0800769
770 if (val == CPUFREQ_POSTCHANGE) {
771 pcpu = &per_cpu(cpuinfo, freq->cpu);
Todd Poynor34974c32012-12-23 12:28:49 -0800772 if (!down_read_trylock(&pcpu->enable_sem))
773 return 0;
774 if (!pcpu->governor_enabled) {
775 up_read(&pcpu->enable_sem);
776 return 0;
777 }
Todd Poynor0e58da22012-12-11 16:05:03 -0800778
779 for_each_cpu(cpu, pcpu->policy->cpus) {
780 struct cpufreq_interactive_cpuinfo *pjcpu =
781 &per_cpu(cpuinfo, cpu);
Lianwei Wangc79705d2013-05-16 12:07:23 +0800782 if (cpu != freq->cpu) {
783 if (!down_read_trylock(&pjcpu->enable_sem))
784 continue;
785 if (!pjcpu->governor_enabled) {
786 up_read(&pjcpu->enable_sem);
787 continue;
788 }
789 }
Todd Poynordf673d12013-01-02 13:14:00 -0800790 spin_lock_irqsave(&pjcpu->load_lock, flags);
Todd Poynor0e58da22012-12-11 16:05:03 -0800791 update_load(cpu);
Todd Poynordf673d12013-01-02 13:14:00 -0800792 spin_unlock_irqrestore(&pjcpu->load_lock, flags);
Lianwei Wangc79705d2013-05-16 12:07:23 +0800793 if (cpu != freq->cpu)
794 up_read(&pjcpu->enable_sem);
Todd Poynor0e58da22012-12-11 16:05:03 -0800795 }
Todd Poynor0e58da22012-12-11 16:05:03 -0800796
Todd Poynor34974c32012-12-23 12:28:49 -0800797 up_read(&pcpu->enable_sem);
798 }
Todd Poynor0e58da22012-12-11 16:05:03 -0800799 return 0;
800}
801
802static struct notifier_block cpufreq_notifier_block = {
803 .notifier_call = cpufreq_interactive_notifier,
804};
805
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900806static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
807{
808 const char *cp;
809 int i;
810 int ntokens = 1;
811 unsigned int *tokenized_data;
Todd Poynor233dfa02013-03-20 15:40:46 -0700812 int err = -EINVAL;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900813
814 cp = buf;
815 while ((cp = strpbrk(cp + 1, " :")))
816 ntokens++;
817
Todd Poynor233dfa02013-03-20 15:40:46 -0700818 if (!(ntokens & 0x1))
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900819 goto err;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900820
821 tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
822 if (!tokenized_data) {
Todd Poynor233dfa02013-03-20 15:40:46 -0700823 err = -ENOMEM;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900824 goto err;
825 }
826
827 cp = buf;
828 i = 0;
829 while (i < ntokens) {
Todd Poynor233dfa02013-03-20 15:40:46 -0700830 if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900831 goto err_kfree;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900832
833 cp = strpbrk(cp, " :");
834 if (!cp)
835 break;
836 cp++;
837 }
838
Todd Poynor233dfa02013-03-20 15:40:46 -0700839 if (i != ntokens)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900840 goto err_kfree;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900841
842 *num_tokens = ntokens;
843 return tokenized_data;
844
845err_kfree:
846 kfree(tokenized_data);
847err:
Todd Poynor233dfa02013-03-20 15:40:46 -0700848 return ERR_PTR(err);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900849}
850
Todd Poynore9c60742012-11-14 11:41:21 -0800851static ssize_t show_target_loads(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530852 struct cpufreq_interactive_tunables *tunables,
853 char *buf)
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800854{
Todd Poynore9c60742012-11-14 11:41:21 -0800855 int i;
856 ssize_t ret = 0;
Todd Poynordf673d12013-01-02 13:14:00 -0800857 unsigned long flags;
Todd Poynore9c60742012-11-14 11:41:21 -0800858
Viresh Kumar17d15c42013-05-16 14:58:54 +0530859 spin_lock_irqsave(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800860
Viresh Kumar17d15c42013-05-16 14:58:54 +0530861 for (i = 0; i < tunables->ntarget_loads; i++)
862 ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
Todd Poynore9c60742012-11-14 11:41:21 -0800863 i & 0x1 ? ":" : " ");
864
Chih-Wei Huang8d9e5302013-12-24 17:51:55 +0800865 sprintf(buf + ret - 1, "\n");
Viresh Kumar17d15c42013-05-16 14:58:54 +0530866 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800867 return ret;
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800868}
869
Todd Poynore9c60742012-11-14 11:41:21 -0800870static ssize_t store_target_loads(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530871 struct cpufreq_interactive_tunables *tunables,
872 const char *buf, size_t count)
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800873{
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900874 int ntokens;
Todd Poynore9c60742012-11-14 11:41:21 -0800875 unsigned int *new_target_loads = NULL;
Todd Poynordf673d12013-01-02 13:14:00 -0800876 unsigned long flags;
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800877
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900878 new_target_loads = get_tokenized_data(buf, &ntokens);
879 if (IS_ERR(new_target_loads))
880 return PTR_RET(new_target_loads);
Todd Poynore9c60742012-11-14 11:41:21 -0800881
Viresh Kumar17d15c42013-05-16 14:58:54 +0530882 spin_lock_irqsave(&tunables->target_loads_lock, flags);
883 if (tunables->target_loads != default_target_loads)
884 kfree(tunables->target_loads);
885 tunables->target_loads = new_target_loads;
886 tunables->ntarget_loads = ntokens;
887 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800888 return count;
889}
890
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900891static ssize_t show_above_hispeed_delay(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530892 struct cpufreq_interactive_tunables *tunables, char *buf)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900893{
894 int i;
895 ssize_t ret = 0;
896 unsigned long flags;
897
Viresh Kumar17d15c42013-05-16 14:58:54 +0530898 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900899
Viresh Kumar17d15c42013-05-16 14:58:54 +0530900 for (i = 0; i < tunables->nabove_hispeed_delay; i++)
901 ret += sprintf(buf + ret, "%u%s",
902 tunables->above_hispeed_delay[i],
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900903 i & 0x1 ? ":" : " ");
904
Chih-Wei Huang8d9e5302013-12-24 17:51:55 +0800905 sprintf(buf + ret - 1, "\n");
Viresh Kumar17d15c42013-05-16 14:58:54 +0530906 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900907 return ret;
908}
909
910static ssize_t store_above_hispeed_delay(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530911 struct cpufreq_interactive_tunables *tunables,
912 const char *buf, size_t count)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900913{
914 int ntokens;
915 unsigned int *new_above_hispeed_delay = NULL;
916 unsigned long flags;
917
918 new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
919 if (IS_ERR(new_above_hispeed_delay))
920 return PTR_RET(new_above_hispeed_delay);
921
Viresh Kumar17d15c42013-05-16 14:58:54 +0530922 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
923 if (tunables->above_hispeed_delay != default_above_hispeed_delay)
924 kfree(tunables->above_hispeed_delay);
925 tunables->above_hispeed_delay = new_above_hispeed_delay;
926 tunables->nabove_hispeed_delay = ntokens;
927 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900928 return count;
929
930}
931
Viresh Kumar17d15c42013-05-16 14:58:54 +0530932static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
933 char *buf)
Mike Chanef969692010-06-22 11:26:45 -0700934{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530935 return sprintf(buf, "%u\n", tunables->hispeed_freq);
Mike Chanef969692010-06-22 11:26:45 -0700936}
937
Viresh Kumar17d15c42013-05-16 14:58:54 +0530938static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
939 const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -0700940{
941 int ret;
Todd Poynor3b7b5f82012-10-03 00:39:56 -0700942 long unsigned int val;
Mike Chanef969692010-06-22 11:26:45 -0700943
Amit Pundircf076402015-11-03 20:53:29 +0530944 ret = kstrtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -0700945 if (ret < 0)
946 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530947 tunables->hispeed_freq = val;
Mike Chanef969692010-06-22 11:26:45 -0700948 return count;
949}
950
Junjie Wue05d74e2014-08-29 14:12:52 -0700951#define show_store_one(file_name) \
952static ssize_t show_##file_name( \
953 struct cpufreq_interactive_tunables *tunables, char *buf) \
954{ \
955 return snprintf(buf, PAGE_SIZE, "%u\n", tunables->file_name); \
956} \
957static ssize_t store_##file_name( \
958 struct cpufreq_interactive_tunables *tunables, \
959 const char *buf, size_t count) \
960{ \
961 int ret; \
962 unsigned long int val; \
963 \
964 ret = kstrtoul(buf, 0, &val); \
965 if (ret < 0) \
966 return ret; \
967 tunables->file_name = val; \
968 return count; \
969}
970show_store_one(max_freq_hysteresis);
Junjie Wu7ca999f2014-08-29 18:55:45 -0700971show_store_one(align_windows);
Junjie Wue05d74e2014-08-29 14:12:52 -0700972
Viresh Kumar17d15c42013-05-16 14:58:54 +0530973static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
974 *tunables, char *buf)
Mike Chanef969692010-06-22 11:26:45 -0700975{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530976 return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
Mike Chanef969692010-06-22 11:26:45 -0700977}
978
Viresh Kumar17d15c42013-05-16 14:58:54 +0530979static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
980 *tunables, const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -0700981{
982 int ret;
983 unsigned long val;
984
Amit Pundircf076402015-11-03 20:53:29 +0530985 ret = kstrtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -0700986 if (ret < 0)
987 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530988 tunables->go_hispeed_load = val;
Mike Chanef969692010-06-22 11:26:45 -0700989 return count;
990}
991
Viresh Kumar17d15c42013-05-16 14:58:54 +0530992static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
993 *tunables, char *buf)
Mike Chanef969692010-06-22 11:26:45 -0700994{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530995 return sprintf(buf, "%lu\n", tunables->min_sample_time);
Mike Chanef969692010-06-22 11:26:45 -0700996}
997
Viresh Kumar17d15c42013-05-16 14:58:54 +0530998static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
999 *tunables, const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -07001000{
1001 int ret;
1002 unsigned long val;
1003
Amit Pundircf076402015-11-03 20:53:29 +05301004 ret = kstrtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -07001005 if (ret < 0)
1006 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +05301007 tunables->min_sample_time = val;
Mike Chanef969692010-06-22 11:26:45 -07001008 return count;
1009}
1010
Viresh Kumar17d15c42013-05-16 14:58:54 +05301011static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
1012 char *buf)
Mike Chanef969692010-06-22 11:26:45 -07001013{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301014 return sprintf(buf, "%lu\n", tunables->timer_rate);
Mike Chanef969692010-06-22 11:26:45 -07001015}
1016
Viresh Kumar17d15c42013-05-16 14:58:54 +05301017static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
1018 const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -07001019{
1020 int ret;
Junjie Wu847796e2014-08-15 16:34:37 -07001021 unsigned long val, val_round;
Junjie Wu4344ea32014-04-28 16:22:24 -07001022 struct cpufreq_interactive_tunables *t;
1023 int cpu;
Mike Chanef969692010-06-22 11:26:45 -07001024
Amit Pundircf076402015-11-03 20:53:29 +05301025 ret = kstrtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -07001026 if (ret < 0)
1027 return ret;
Junjie Wu847796e2014-08-15 16:34:37 -07001028
1029 val_round = jiffies_to_usecs(usecs_to_jiffies(val));
1030 if (val != val_round)
1031 pr_warn("timer_rate not aligned to jiffy. Rounded up to %lu\n",
1032 val_round);
Junjie Wu847796e2014-08-15 16:34:37 -07001033 tunables->timer_rate = val_round;
Junjie Wu4344ea32014-04-28 16:22:24 -07001034
1035 if (!tunables->use_sched_load)
1036 return count;
1037
1038 for_each_possible_cpu(cpu) {
1039 t = per_cpu(cpuinfo, cpu).cached_tunables;
1040 if (t && t->use_sched_load)
1041 t->timer_rate = val_round;
1042 }
1043 set_window_helper(tunables);
1044
Mike Chanef969692010-06-22 11:26:45 -07001045 return count;
1046}
1047
Viresh Kumar17d15c42013-05-16 14:58:54 +05301048static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
1049 char *buf)
Todd Poynor4add2592012-12-18 17:50:10 -08001050{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301051 return sprintf(buf, "%d\n", tunables->timer_slack_val);
Todd Poynor4add2592012-12-18 17:50:10 -08001052}
1053
Viresh Kumar17d15c42013-05-16 14:58:54 +05301054static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
1055 const char *buf, size_t count)
Todd Poynor4add2592012-12-18 17:50:10 -08001056{
1057 int ret;
1058 unsigned long val;
1059
1060 ret = kstrtol(buf, 10, &val);
1061 if (ret < 0)
1062 return ret;
1063
Viresh Kumar17d15c42013-05-16 14:58:54 +05301064 tunables->timer_slack_val = val;
Todd Poynor4add2592012-12-18 17:50:10 -08001065 return count;
1066}
1067
Viresh Kumar17d15c42013-05-16 14:58:54 +05301068static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
Todd Poynor15a9ea02012-04-23 20:42:41 -07001069 char *buf)
1070{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301071 return sprintf(buf, "%d\n", tunables->boost_val);
Todd Poynor15a9ea02012-04-23 20:42:41 -07001072}
1073
Viresh Kumar17d15c42013-05-16 14:58:54 +05301074static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
Todd Poynor15a9ea02012-04-23 20:42:41 -07001075 const char *buf, size_t count)
1076{
1077 int ret;
1078 unsigned long val;
1079
1080 ret = kstrtoul(buf, 0, &val);
1081 if (ret < 0)
1082 return ret;
1083
Viresh Kumar17d15c42013-05-16 14:58:54 +05301084 tunables->boost_val = val;
Todd Poynor15a9ea02012-04-23 20:42:41 -07001085
Viresh Kumar17d15c42013-05-16 14:58:54 +05301086 if (tunables->boost_val) {
Todd Poynor442a3122012-05-03 00:16:55 -07001087 trace_cpufreq_interactive_boost("on");
Lianwei Wang2277e3f2014-12-02 17:20:50 -08001088 if (!tunables->boosted)
1089 cpufreq_interactive_boost(tunables);
Todd Poynor442a3122012-05-03 00:16:55 -07001090 } else {
Ruchi Kandoi296d7912014-04-09 16:47:59 -07001091 tunables->boostpulse_endtime = ktime_to_us(ktime_get());
Todd Poynor442a3122012-05-03 00:16:55 -07001092 trace_cpufreq_interactive_unboost("off");
1093 }
Todd Poynor15a9ea02012-04-23 20:42:41 -07001094
1095 return count;
1096}
1097
Viresh Kumar17d15c42013-05-16 14:58:54 +05301098static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
Todd Poynor442a3122012-05-03 00:16:55 -07001099 const char *buf, size_t count)
1100{
1101 int ret;
1102 unsigned long val;
1103
1104 ret = kstrtoul(buf, 0, &val);
1105 if (ret < 0)
1106 return ret;
1107
Viresh Kumar17d15c42013-05-16 14:58:54 +05301108 tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
1109 tunables->boostpulse_duration_val;
Todd Poynor442a3122012-05-03 00:16:55 -07001110 trace_cpufreq_interactive_boost("pulse");
Lianwei Wang2277e3f2014-12-02 17:20:50 -08001111 if (!tunables->boosted)
1112 cpufreq_interactive_boost(tunables);
Todd Poynor442a3122012-05-03 00:16:55 -07001113 return count;
1114}
1115
Viresh Kumar17d15c42013-05-16 14:58:54 +05301116static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
1117 *tunables, char *buf)
Todd Poynore16d5922012-12-14 17:31:19 -08001118{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301119 return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
Todd Poynore16d5922012-12-14 17:31:19 -08001120}
1121
Viresh Kumar17d15c42013-05-16 14:58:54 +05301122static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
1123 *tunables, const char *buf, size_t count)
Todd Poynore16d5922012-12-14 17:31:19 -08001124{
1125 int ret;
1126 unsigned long val;
1127
1128 ret = kstrtoul(buf, 0, &val);
1129 if (ret < 0)
1130 return ret;
1131
Viresh Kumar17d15c42013-05-16 14:58:54 +05301132 tunables->boostpulse_duration_val = val;
Todd Poynore16d5922012-12-14 17:31:19 -08001133 return count;
1134}
1135
Viresh Kumar17d15c42013-05-16 14:58:54 +05301136static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
1137 char *buf)
Lianwei Wang72e40572013-02-22 11:39:18 +08001138{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301139 return sprintf(buf, "%u\n", tunables->io_is_busy);
Lianwei Wang72e40572013-02-22 11:39:18 +08001140}
1141
Viresh Kumar17d15c42013-05-16 14:58:54 +05301142static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
1143 const char *buf, size_t count)
Lianwei Wang72e40572013-02-22 11:39:18 +08001144{
1145 int ret;
1146 unsigned long val;
Junjie Wu4344ea32014-04-28 16:22:24 -07001147 struct cpufreq_interactive_tunables *t;
1148 int cpu;
Lianwei Wang72e40572013-02-22 11:39:18 +08001149
1150 ret = kstrtoul(buf, 0, &val);
1151 if (ret < 0)
1152 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +05301153 tunables->io_is_busy = val;
Junjie Wu4344ea32014-04-28 16:22:24 -07001154
1155 if (!tunables->use_sched_load)
1156 return count;
1157
1158 for_each_possible_cpu(cpu) {
1159 t = per_cpu(cpuinfo, cpu).cached_tunables;
1160 if (t && t->use_sched_load)
1161 t->io_is_busy = val;
1162 }
1163 sched_set_io_is_busy(val);
1164
1165 return count;
1166}
1167
1168static int cpufreq_interactive_enable_sched_input(
1169 struct cpufreq_interactive_tunables *tunables)
1170{
1171 int rc = 0, j;
1172 struct cpufreq_interactive_tunables *t;
1173
1174 mutex_lock(&sched_lock);
1175
1176 set_window_count++;
Junjie Wue627d702014-12-15 16:51:08 -08001177 if (set_window_count > 1) {
Junjie Wu4344ea32014-04-28 16:22:24 -07001178 for_each_possible_cpu(j) {
1179 t = per_cpu(cpuinfo, j).cached_tunables;
1180 if (t && t->use_sched_load) {
1181 tunables->timer_rate = t->timer_rate;
1182 tunables->io_is_busy = t->io_is_busy;
1183 break;
1184 }
1185 }
Junjie Wue627d702014-12-15 16:51:08 -08001186 } else {
1187 rc = set_window_helper(tunables);
1188 if (rc) {
1189 pr_err("%s: Failed to set sched window\n", __func__);
1190 set_window_count--;
1191 goto out;
1192 }
1193 sched_set_io_is_busy(tunables->io_is_busy);
Junjie Wu4344ea32014-04-28 16:22:24 -07001194 }
1195
Junjie Wu4344ea32014-04-28 16:22:24 -07001196 if (!tunables->use_migration_notif)
1197 goto out;
1198
1199 migration_register_count++;
Junjie Wue627d702014-12-15 16:51:08 -08001200 if (migration_register_count > 1)
Junjie Wu4344ea32014-04-28 16:22:24 -07001201 goto out;
1202 else
1203 atomic_notifier_chain_register(&load_alert_notifier_head,
1204 &load_notifier_block);
1205out:
1206 mutex_unlock(&sched_lock);
1207 return rc;
1208}
1209
1210static int cpufreq_interactive_disable_sched_input(
1211 struct cpufreq_interactive_tunables *tunables)
1212{
1213 mutex_lock(&sched_lock);
1214
1215 if (tunables->use_migration_notif) {
1216 migration_register_count--;
Junjie Wue627d702014-12-15 16:51:08 -08001217 if (migration_register_count < 1)
Junjie Wu4344ea32014-04-28 16:22:24 -07001218 atomic_notifier_chain_unregister(
1219 &load_alert_notifier_head,
1220 &load_notifier_block);
1221 }
1222 set_window_count--;
1223
1224 mutex_unlock(&sched_lock);
1225 return 0;
1226}
1227
1228static ssize_t show_use_sched_load(
1229 struct cpufreq_interactive_tunables *tunables, char *buf)
1230{
1231 return snprintf(buf, PAGE_SIZE, "%d\n", tunables->use_sched_load);
1232}
1233
1234static ssize_t store_use_sched_load(
1235 struct cpufreq_interactive_tunables *tunables,
1236 const char *buf, size_t count)
1237{
1238 int ret;
1239 unsigned long val;
1240
1241 ret = kstrtoul(buf, 0, &val);
1242 if (ret < 0)
1243 return ret;
1244
1245 if (tunables->use_sched_load == (bool) val)
1246 return count;
1247 if (val)
1248 ret = cpufreq_interactive_enable_sched_input(tunables);
1249 else
1250 ret = cpufreq_interactive_disable_sched_input(tunables);
1251
1252 if (ret)
1253 return ret;
1254
1255 tunables->use_sched_load = val;
1256 return count;
1257}
1258
1259static ssize_t show_use_migration_notif(
1260 struct cpufreq_interactive_tunables *tunables, char *buf)
1261{
1262 return snprintf(buf, PAGE_SIZE, "%d\n",
1263 tunables->use_migration_notif);
1264}
1265
1266static ssize_t store_use_migration_notif(
1267 struct cpufreq_interactive_tunables *tunables,
1268 const char *buf, size_t count)
1269{
1270 int ret;
1271 unsigned long val;
1272
1273 ret = kstrtoul(buf, 0, &val);
1274 if (ret < 0)
1275 return ret;
1276
1277 if (tunables->use_migration_notif == (bool) val)
1278 return count;
1279 tunables->use_migration_notif = val;
1280
1281 if (!tunables->use_sched_load)
1282 return count;
1283
1284 mutex_lock(&sched_lock);
1285 if (val) {
1286 migration_register_count++;
1287 if (migration_register_count == 1)
1288 atomic_notifier_chain_register(
1289 &load_alert_notifier_head,
1290 &load_notifier_block);
1291 } else {
1292 migration_register_count--;
1293 if (!migration_register_count)
1294 atomic_notifier_chain_unregister(
1295 &load_alert_notifier_head,
1296 &load_notifier_block);
1297 }
1298 mutex_unlock(&sched_lock);
1299
Lianwei Wang72e40572013-02-22 11:39:18 +08001300 return count;
1301}
1302
Viresh Kumar17d15c42013-05-16 14:58:54 +05301303/*
1304 * Create show/store routines
1305 * - sys: One governor instance for complete SYSTEM
1306 * - pol: One governor instance per struct cpufreq_policy
1307 */
1308#define show_gov_pol_sys(file_name) \
1309static ssize_t show_##file_name##_gov_sys \
1310(struct kobject *kobj, struct attribute *attr, char *buf) \
1311{ \
1312 return show_##file_name(common_tunables, buf); \
1313} \
1314 \
1315static ssize_t show_##file_name##_gov_pol \
1316(struct cpufreq_policy *policy, char *buf) \
1317{ \
1318 return show_##file_name(policy->governor_data, buf); \
1319}
Lianwei Wang72e40572013-02-22 11:39:18 +08001320
Viresh Kumar17d15c42013-05-16 14:58:54 +05301321#define store_gov_pol_sys(file_name) \
1322static ssize_t store_##file_name##_gov_sys \
1323(struct kobject *kobj, struct attribute *attr, const char *buf, \
1324 size_t count) \
1325{ \
1326 return store_##file_name(common_tunables, buf, count); \
1327} \
1328 \
1329static ssize_t store_##file_name##_gov_pol \
1330(struct cpufreq_policy *policy, const char *buf, size_t count) \
1331{ \
1332 return store_##file_name(policy->governor_data, buf, count); \
1333}
1334
1335#define show_store_gov_pol_sys(file_name) \
1336show_gov_pol_sys(file_name); \
1337store_gov_pol_sys(file_name)
1338
1339show_store_gov_pol_sys(target_loads);
1340show_store_gov_pol_sys(above_hispeed_delay);
1341show_store_gov_pol_sys(hispeed_freq);
1342show_store_gov_pol_sys(go_hispeed_load);
1343show_store_gov_pol_sys(min_sample_time);
1344show_store_gov_pol_sys(timer_rate);
1345show_store_gov_pol_sys(timer_slack);
1346show_store_gov_pol_sys(boost);
1347store_gov_pol_sys(boostpulse);
1348show_store_gov_pol_sys(boostpulse_duration);
1349show_store_gov_pol_sys(io_is_busy);
Junjie Wu4344ea32014-04-28 16:22:24 -07001350show_store_gov_pol_sys(use_sched_load);
1351show_store_gov_pol_sys(use_migration_notif);
Junjie Wue05d74e2014-08-29 14:12:52 -07001352show_store_gov_pol_sys(max_freq_hysteresis);
Junjie Wu7ca999f2014-08-29 18:55:45 -07001353show_store_gov_pol_sys(align_windows);
Viresh Kumar17d15c42013-05-16 14:58:54 +05301354
1355#define gov_sys_attr_rw(_name) \
1356static struct global_attr _name##_gov_sys = \
1357__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
1358
1359#define gov_pol_attr_rw(_name) \
1360static struct freq_attr _name##_gov_pol = \
1361__ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
1362
1363#define gov_sys_pol_attr_rw(_name) \
1364 gov_sys_attr_rw(_name); \
1365 gov_pol_attr_rw(_name)
1366
1367gov_sys_pol_attr_rw(target_loads);
1368gov_sys_pol_attr_rw(above_hispeed_delay);
1369gov_sys_pol_attr_rw(hispeed_freq);
1370gov_sys_pol_attr_rw(go_hispeed_load);
1371gov_sys_pol_attr_rw(min_sample_time);
1372gov_sys_pol_attr_rw(timer_rate);
1373gov_sys_pol_attr_rw(timer_slack);
1374gov_sys_pol_attr_rw(boost);
1375gov_sys_pol_attr_rw(boostpulse_duration);
1376gov_sys_pol_attr_rw(io_is_busy);
Junjie Wu4344ea32014-04-28 16:22:24 -07001377gov_sys_pol_attr_rw(use_sched_load);
1378gov_sys_pol_attr_rw(use_migration_notif);
Junjie Wue05d74e2014-08-29 14:12:52 -07001379gov_sys_pol_attr_rw(max_freq_hysteresis);
Junjie Wu7ca999f2014-08-29 18:55:45 -07001380gov_sys_pol_attr_rw(align_windows);
Viresh Kumar17d15c42013-05-16 14:58:54 +05301381
1382static struct global_attr boostpulse_gov_sys =
1383 __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
1384
1385static struct freq_attr boostpulse_gov_pol =
1386 __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
1387
1388/* One Governor instance for entire system */
1389static struct attribute *interactive_attributes_gov_sys[] = {
1390 &target_loads_gov_sys.attr,
1391 &above_hispeed_delay_gov_sys.attr,
1392 &hispeed_freq_gov_sys.attr,
1393 &go_hispeed_load_gov_sys.attr,
1394 &min_sample_time_gov_sys.attr,
1395 &timer_rate_gov_sys.attr,
1396 &timer_slack_gov_sys.attr,
1397 &boost_gov_sys.attr,
1398 &boostpulse_gov_sys.attr,
1399 &boostpulse_duration_gov_sys.attr,
1400 &io_is_busy_gov_sys.attr,
Junjie Wu4344ea32014-04-28 16:22:24 -07001401 &use_sched_load_gov_sys.attr,
1402 &use_migration_notif_gov_sys.attr,
Junjie Wue05d74e2014-08-29 14:12:52 -07001403 &max_freq_hysteresis_gov_sys.attr,
Junjie Wu7ca999f2014-08-29 18:55:45 -07001404 &align_windows_gov_sys.attr,
Mike Chanef969692010-06-22 11:26:45 -07001405 NULL,
1406};
1407
Viresh Kumar17d15c42013-05-16 14:58:54 +05301408static struct attribute_group interactive_attr_group_gov_sys = {
1409 .attrs = interactive_attributes_gov_sys,
Mike Chanef969692010-06-22 11:26:45 -07001410 .name = "interactive",
1411};
1412
Viresh Kumar17d15c42013-05-16 14:58:54 +05301413/* Per policy governor instance */
1414static struct attribute *interactive_attributes_gov_pol[] = {
1415 &target_loads_gov_pol.attr,
1416 &above_hispeed_delay_gov_pol.attr,
1417 &hispeed_freq_gov_pol.attr,
1418 &go_hispeed_load_gov_pol.attr,
1419 &min_sample_time_gov_pol.attr,
1420 &timer_rate_gov_pol.attr,
1421 &timer_slack_gov_pol.attr,
1422 &boost_gov_pol.attr,
1423 &boostpulse_gov_pol.attr,
1424 &boostpulse_duration_gov_pol.attr,
1425 &io_is_busy_gov_pol.attr,
Junjie Wu4344ea32014-04-28 16:22:24 -07001426 &use_sched_load_gov_pol.attr,
1427 &use_migration_notif_gov_pol.attr,
Junjie Wue05d74e2014-08-29 14:12:52 -07001428 &max_freq_hysteresis_gov_pol.attr,
Junjie Wu7ca999f2014-08-29 18:55:45 -07001429 &align_windows_gov_pol.attr,
Viresh Kumar17d15c42013-05-16 14:58:54 +05301430 NULL,
1431};
1432
1433static struct attribute_group interactive_attr_group_gov_pol = {
1434 .attrs = interactive_attributes_gov_pol,
1435 .name = "interactive",
1436};
1437
1438static struct attribute_group *get_sysfs_attr(void)
1439{
1440 if (have_governor_per_policy())
1441 return &interactive_attr_group_gov_pol;
1442 else
1443 return &interactive_attr_group_gov_sys;
1444}
1445
Sam Leffler3ab7c2b2012-06-27 10:12:04 -07001446static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
1447 unsigned long val,
1448 void *data)
1449{
Rohit Gupta189c2222015-03-06 18:46:04 -08001450 if (val == IDLE_END)
Sam Leffler3ab7c2b2012-06-27 10:12:04 -07001451 cpufreq_interactive_idle_end();
Sam Leffler3ab7c2b2012-06-27 10:12:04 -07001452
1453 return 0;
1454}
1455
1456static struct notifier_block cpufreq_interactive_idle_nb = {
1457 .notifier_call = cpufreq_interactive_idle_notifier,
1458};
1459
Junjie Wu53f83f82014-08-18 16:35:09 -07001460static void save_tunables(struct cpufreq_policy *policy,
1461 struct cpufreq_interactive_tunables *tunables)
1462{
1463 int cpu;
1464 struct cpufreq_interactive_cpuinfo *pcpu;
1465
1466 if (have_governor_per_policy())
1467 cpu = cpumask_first(policy->related_cpus);
1468 else
1469 cpu = 0;
1470
1471 pcpu = &per_cpu(cpuinfo, cpu);
1472 WARN_ON(pcpu->cached_tunables && pcpu->cached_tunables != tunables);
1473 pcpu->cached_tunables = tunables;
1474}
1475
Junjie Wuc5a97d92014-05-23 12:22:59 -07001476static struct cpufreq_interactive_tunables *alloc_tunable(
1477 struct cpufreq_policy *policy)
1478{
1479 struct cpufreq_interactive_tunables *tunables;
1480
1481 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
1482 if (!tunables)
1483 return ERR_PTR(-ENOMEM);
1484
1485 tunables->above_hispeed_delay = default_above_hispeed_delay;
1486 tunables->nabove_hispeed_delay =
1487 ARRAY_SIZE(default_above_hispeed_delay);
1488 tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
1489 tunables->target_loads = default_target_loads;
1490 tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
1491 tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
1492 tunables->timer_rate = DEFAULT_TIMER_RATE;
1493 tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
1494 tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
1495
1496 spin_lock_init(&tunables->target_loads_lock);
1497 spin_lock_init(&tunables->above_hispeed_delay_lock);
1498
Junjie Wu53f83f82014-08-18 16:35:09 -07001499 save_tunables(policy, tunables);
Junjie Wuc5a97d92014-05-23 12:22:59 -07001500 return tunables;
1501}
1502
Saravana Kannan07c2aa62014-07-22 15:42:51 -07001503static struct cpufreq_interactive_tunables *restore_tunables(
1504 struct cpufreq_policy *policy)
1505{
1506 int cpu;
1507
1508 if (have_governor_per_policy())
1509 cpu = cpumask_first(policy->related_cpus);
1510 else
1511 cpu = 0;
1512
Junjie Wu13c6a762014-08-07 18:04:13 -07001513 return per_cpu(cpuinfo, cpu).cached_tunables;
Junjie Wuc5a97d92014-05-23 12:22:59 -07001514}
1515
Mike Chanef969692010-06-22 11:26:45 -07001516static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
1517 unsigned int event)
1518{
1519 int rc;
1520 unsigned int j;
1521 struct cpufreq_interactive_cpuinfo *pcpu;
1522 struct cpufreq_frequency_table *freq_table;
Viresh Kumar17d15c42013-05-16 14:58:54 +05301523 struct cpufreq_interactive_tunables *tunables;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001524 unsigned long flags;
Junjie Wu4344ea32014-04-28 16:22:24 -07001525 int first_cpu;
Viresh Kumar17d15c42013-05-16 14:58:54 +05301526
1527 if (have_governor_per_policy())
1528 tunables = policy->governor_data;
1529 else
1530 tunables = common_tunables;
1531
1532 WARN_ON(!tunables && (event != CPUFREQ_GOV_POLICY_INIT));
Mike Chanef969692010-06-22 11:26:45 -07001533
1534 switch (event) {
Viresh Kumar17d15c42013-05-16 14:58:54 +05301535 case CPUFREQ_GOV_POLICY_INIT:
1536 if (have_governor_per_policy()) {
1537 WARN_ON(tunables);
1538 } else if (tunables) {
1539 tunables->usage_count++;
1540 policy->governor_data = tunables;
1541 return 0;
1542 }
1543
Junjie Wu4344ea32014-04-28 16:22:24 -07001544 first_cpu = cpumask_first(policy->related_cpus);
1545 for_each_cpu(j, policy->related_cpus)
1546 per_cpu(cpuinfo, j).first_cpu = first_cpu;
1547
Saravana Kannan07c2aa62014-07-22 15:42:51 -07001548 tunables = restore_tunables(policy);
Viresh Kumar17d15c42013-05-16 14:58:54 +05301549 if (!tunables) {
Junjie Wuc5a97d92014-05-23 12:22:59 -07001550 tunables = alloc_tunable(policy);
1551 if (IS_ERR(tunables))
1552 return PTR_ERR(tunables);
Viresh Kumar17d15c42013-05-16 14:58:54 +05301553 }
1554
Viresh Kumar17d15c42013-05-16 14:58:54 +05301555 tunables->usage_count = 1;
Minsung Kim82cc6a92014-01-19 14:32:42 +09001556 policy->governor_data = tunables;
Junjie Wuab1db0a2015-11-18 14:49:29 -08001557 if (!have_governor_per_policy())
Minsung Kim82cc6a92014-01-19 14:32:42 +09001558 common_tunables = tunables;
1559
1560 rc = sysfs_create_group(get_governor_parent_kobj(policy),
1561 get_sysfs_attr());
1562 if (rc) {
1563 kfree(tunables);
1564 policy->governor_data = NULL;
Junjie Wuab1db0a2015-11-18 14:49:29 -08001565 if (!have_governor_per_policy())
Minsung Kim82cc6a92014-01-19 14:32:42 +09001566 common_tunables = NULL;
1567 return rc;
1568 }
1569
Viresh Kumar17d15c42013-05-16 14:58:54 +05301570 if (!policy->governor->initialized) {
1571 idle_notifier_register(&cpufreq_interactive_idle_nb);
1572 cpufreq_register_notifier(&cpufreq_notifier_block,
1573 CPUFREQ_TRANSITION_NOTIFIER);
1574 }
1575
Junjie Wu4344ea32014-04-28 16:22:24 -07001576 if (tunables->use_sched_load)
1577 cpufreq_interactive_enable_sched_input(tunables);
1578
Viresh Kumar17d15c42013-05-16 14:58:54 +05301579 break;
1580
1581 case CPUFREQ_GOV_POLICY_EXIT:
1582 if (!--tunables->usage_count) {
1583 if (policy->governor->initialized == 1) {
1584 cpufreq_unregister_notifier(&cpufreq_notifier_block,
1585 CPUFREQ_TRANSITION_NOTIFIER);
1586 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
1587 }
1588
1589 sysfs_remove_group(get_governor_parent_kobj(policy),
1590 get_sysfs_attr());
Greg Hackmann6bc30c32014-12-08 10:08:35 -08001591
Viresh Kumar17d15c42013-05-16 14:58:54 +05301592 common_tunables = NULL;
1593 }
1594
1595 policy->governor_data = NULL;
Junjie Wu4344ea32014-04-28 16:22:24 -07001596
1597 if (tunables->use_sched_load)
1598 cpufreq_interactive_disable_sched_input(tunables);
1599
Viresh Kumar17d15c42013-05-16 14:58:54 +05301600 break;
1601
Mike Chanef969692010-06-22 11:26:45 -07001602 case CPUFREQ_GOV_START:
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001603 mutex_lock(&gov_lock);
1604
Viresh Kumar17d15c42013-05-16 14:58:54 +05301605 freq_table = cpufreq_frequency_get_table(policy->cpu);
1606 if (!tunables->hispeed_freq)
1607 tunables->hispeed_freq = policy->max;
Mike Chanef969692010-06-22 11:26:45 -07001608
1609 for_each_cpu(j, policy->cpus) {
1610 pcpu = &per_cpu(cpuinfo, j);
1611 pcpu->policy = policy;
1612 pcpu->target_freq = policy->cur;
1613 pcpu->freq_table = freq_table;
Todd Poynor6d15fa32012-04-26 21:41:40 -07001614 pcpu->floor_freq = pcpu->target_freq;
Junjie Wud5ac8ee2015-03-24 15:51:10 -07001615 pcpu->pol_floor_val_time =
Todd Poynor31817c92012-12-07 20:08:45 -08001616 ktime_to_us(ktime_get());
Junjie Wud5ac8ee2015-03-24 15:51:10 -07001617 pcpu->loc_floor_val_time = pcpu->pol_floor_val_time;
1618 pcpu->pol_hispeed_val_time = pcpu->pol_floor_val_time;
1619 pcpu->loc_hispeed_val_time = pcpu->pol_floor_val_time;
Junjie Wu1d868952015-03-27 11:44:21 -07001620 pcpu->min_freq = policy->min;
Junjie Wu82f08032014-12-09 13:20:26 -08001621 pcpu->reject_notification = true;
Todd Poynor39512062012-12-20 15:51:00 -08001622 down_write(&pcpu->enable_sem);
Shridhar Rasal2907f842013-09-09 19:17:14 +05301623 del_timer_sync(&pcpu->cpu_timer);
1624 del_timer_sync(&pcpu->cpu_slack_timer);
Junjie Wu6b974ed2014-04-28 15:11:47 -07001625 pcpu->last_evaluated_jiffy = get_jiffies_64();
Viresh Kumar17d15c42013-05-16 14:58:54 +05301626 cpufreq_interactive_timer_start(tunables, j);
Todd Poynor39512062012-12-20 15:51:00 -08001627 pcpu->governor_enabled = 1;
1628 up_write(&pcpu->enable_sem);
Junjie Wu82f08032014-12-09 13:20:26 -08001629 pcpu->reject_notification = false;
Mike Chanef969692010-06-22 11:26:45 -07001630 }
1631
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001632 mutex_unlock(&gov_lock);
Mike Chanef969692010-06-22 11:26:45 -07001633 break;
1634
1635 case CPUFREQ_GOV_STOP:
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001636 mutex_lock(&gov_lock);
Mike Chanef969692010-06-22 11:26:45 -07001637 for_each_cpu(j, policy->cpus) {
1638 pcpu = &per_cpu(cpuinfo, j);
Junjie Wu82f08032014-12-09 13:20:26 -08001639 pcpu->reject_notification = true;
Todd Poynor5cad6092012-12-18 17:50:44 -08001640 down_write(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -07001641 pcpu->governor_enabled = 0;
Mike Chanef969692010-06-22 11:26:45 -07001642 del_timer_sync(&pcpu->cpu_timer);
Todd Poynor4add2592012-12-18 17:50:10 -08001643 del_timer_sync(&pcpu->cpu_slack_timer);
Todd Poynor5cad6092012-12-18 17:50:44 -08001644 up_write(&pcpu->enable_sem);
Junjie Wu82f08032014-12-09 13:20:26 -08001645 pcpu->reject_notification = false;
Mike Chanef969692010-06-22 11:26:45 -07001646 }
1647
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001648 mutex_unlock(&gov_lock);
Mike Chanef969692010-06-22 11:26:45 -07001649 break;
1650
1651 case CPUFREQ_GOV_LIMITS:
1652 if (policy->max < policy->cur)
1653 __cpufreq_driver_target(policy,
1654 policy->max, CPUFREQ_RELATION_H);
1655 else if (policy->min > policy->cur)
1656 __cpufreq_driver_target(policy,
1657 policy->min, CPUFREQ_RELATION_L);
Lianwei Wang90c6c152013-04-26 13:30:51 +08001658 for_each_cpu(j, policy->cpus) {
1659 pcpu = &per_cpu(cpuinfo, j);
1660
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001661 down_read(&pcpu->enable_sem);
Lianwei Wang90c6c152013-04-26 13:30:51 +08001662 if (pcpu->governor_enabled == 0) {
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001663 up_read(&pcpu->enable_sem);
Lianwei Wang90c6c152013-04-26 13:30:51 +08001664 continue;
1665 }
1666
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001667 spin_lock_irqsave(&pcpu->target_freq_lock, flags);
Lianwei Wang90c6c152013-04-26 13:30:51 +08001668 if (policy->max < pcpu->target_freq)
1669 pcpu->target_freq = policy->max;
1670 else if (policy->min > pcpu->target_freq)
1671 pcpu->target_freq = policy->min;
1672
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001673 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
Junjie Wu1d868952015-03-27 11:44:21 -07001674
1675 if (policy->min < pcpu->min_freq)
1676 cpufreq_interactive_timer_resched(j, true);
1677 pcpu->min_freq = policy->min;
1678
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001679 up_read(&pcpu->enable_sem);
Lianwei Wang90c6c152013-04-26 13:30:51 +08001680 }
Mike Chanef969692010-06-22 11:26:45 -07001681 break;
1682 }
1683 return 0;
1684}
1685
Viresh Kumarc7f826b2013-05-16 14:58:53 +05301686#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1687static
1688#endif
1689struct cpufreq_governor cpufreq_gov_interactive = {
1690 .name = "interactive",
1691 .governor = cpufreq_governor_interactive,
1692 .max_transition_latency = 10000000,
1693 .owner = THIS_MODULE,
1694};
1695
Todd Poynor4add2592012-12-18 17:50:10 -08001696static void cpufreq_interactive_nop_timer(unsigned long data)
1697{
1698}
1699
Mike Chanef969692010-06-22 11:26:45 -07001700static int __init cpufreq_interactive_init(void)
1701{
1702 unsigned int i;
1703 struct cpufreq_interactive_cpuinfo *pcpu;
1704 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1705
Mike Chanef969692010-06-22 11:26:45 -07001706 /* Initalize per-cpu timers */
1707 for_each_possible_cpu(i) {
1708 pcpu = &per_cpu(cpuinfo, i);
Todd Poynor4add2592012-12-18 17:50:10 -08001709 init_timer_deferrable(&pcpu->cpu_timer);
Mike Chanef969692010-06-22 11:26:45 -07001710 pcpu->cpu_timer.function = cpufreq_interactive_timer;
1711 pcpu->cpu_timer.data = i;
Todd Poynor4add2592012-12-18 17:50:10 -08001712 init_timer(&pcpu->cpu_slack_timer);
1713 pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
Todd Poynor0e58da22012-12-11 16:05:03 -08001714 spin_lock_init(&pcpu->load_lock);
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001715 spin_lock_init(&pcpu->target_freq_lock);
Todd Poynor5cad6092012-12-18 17:50:44 -08001716 init_rwsem(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -07001717 }
1718
Todd Poynor0f1920b2012-07-16 17:07:15 -07001719 spin_lock_init(&speedchange_cpumask_lock);
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001720 mutex_init(&gov_lock);
Junjie Wu4344ea32014-04-28 16:22:24 -07001721 mutex_init(&sched_lock);
Todd Poynor0f1920b2012-07-16 17:07:15 -07001722 speedchange_task =
1723 kthread_create(cpufreq_interactive_speedchange_task, NULL,
1724 "cfinteractive");
1725 if (IS_ERR(speedchange_task))
1726 return PTR_ERR(speedchange_task);
Sam Leffler5c9b8272012-06-27 12:55:56 -07001727
Todd Poynor0f1920b2012-07-16 17:07:15 -07001728 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
1729 get_task_struct(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -07001730
Sam Leffler5c9b8272012-06-27 12:55:56 -07001731 /* NB: wake up so the thread does not look hung to the freezer */
Todd Poynor0f1920b2012-07-16 17:07:15 -07001732 wake_up_process(speedchange_task);
Sam Leffler5c9b8272012-06-27 12:55:56 -07001733
Mike Chanef969692010-06-22 11:26:45 -07001734 return cpufreq_register_governor(&cpufreq_gov_interactive);
Mike Chanef969692010-06-22 11:26:45 -07001735}
1736
1737#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1738fs_initcall(cpufreq_interactive_init);
1739#else
1740module_init(cpufreq_interactive_init);
1741#endif
1742
1743static void __exit cpufreq_interactive_exit(void)
1744{
Junjie Wuc5a97d92014-05-23 12:22:59 -07001745 int cpu;
Junjie Wu13c6a762014-08-07 18:04:13 -07001746 struct cpufreq_interactive_cpuinfo *pcpu;
Junjie Wuc5a97d92014-05-23 12:22:59 -07001747
Mike Chanef969692010-06-22 11:26:45 -07001748 cpufreq_unregister_governor(&cpufreq_gov_interactive);
Todd Poynor0f1920b2012-07-16 17:07:15 -07001749 kthread_stop(speedchange_task);
1750 put_task_struct(speedchange_task);
Junjie Wuc5a97d92014-05-23 12:22:59 -07001751
1752 for_each_possible_cpu(cpu) {
Junjie Wu13c6a762014-08-07 18:04:13 -07001753 pcpu = &per_cpu(cpuinfo, cpu);
1754 kfree(pcpu->cached_tunables);
1755 pcpu->cached_tunables = NULL;
Junjie Wuc5a97d92014-05-23 12:22:59 -07001756 }
Mike Chanef969692010-06-22 11:26:45 -07001757}
1758
1759module_exit(cpufreq_interactive_exit);
1760
1761MODULE_AUTHOR("Mike Chan <mike@android.com>");
1762MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1763 "Latency sensitive workloads");
1764MODULE_LICENSE("GPL");