blob: f5e95693a8506f77c2fb19c34ffd2e1b1a539269 [file] [log] [blame]
Mike Chanef969692010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/module.h>
Lianwei Wangd72db422012-11-01 09:59:52 +080023#include <linux/moduleparam.h>
Todd Poynor5cad6092012-12-18 17:50:44 -080024#include <linux/rwsem.h>
Mike Chanef969692010-06-22 11:26:45 -070025#include <linux/sched.h>
26#include <linux/sched/rt.h>
27#include <linux/tick.h>
28#include <linux/time.h>
29#include <linux/timer.h>
30#include <linux/workqueue.h>
31#include <linux/kthread.h>
Todd Poynorab8dc402012-04-02 17:17:14 -070032#include <linux/slab.h>
Mike Chanef969692010-06-22 11:26:45 -070033
Todd Poynorae010472012-02-16 16:27:59 -080034#define CREATE_TRACE_POINTS
35#include <trace/events/cpufreq_interactive.h>
36
Mike Chanef969692010-06-22 11:26:45 -070037struct cpufreq_interactive_cpuinfo {
38 struct timer_list cpu_timer;
Todd Poynor4add2592012-12-18 17:50:10 -080039 struct timer_list cpu_slack_timer;
Todd Poynor0e58da22012-12-11 16:05:03 -080040 spinlock_t load_lock; /* protects the next 4 fields */
Mike Chanef969692010-06-22 11:26:45 -070041 u64 time_in_idle;
Todd Poynor8eccd412012-10-08 20:14:34 -070042 u64 time_in_idle_timestamp;
Todd Poynor0e58da22012-12-11 16:05:03 -080043 u64 cputime_speedadj;
44 u64 cputime_speedadj_timestamp;
Junjie Wu6b974ed2014-04-28 15:11:47 -070045 u64 last_evaluated_jiffy;
Mike Chanef969692010-06-22 11:26:45 -070046 struct cpufreq_policy *policy;
47 struct cpufreq_frequency_table *freq_table;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -070048 spinlock_t target_freq_lock; /*protects target freq */
Mike Chanef969692010-06-22 11:26:45 -070049 unsigned int target_freq;
Todd Poynor6d15fa32012-04-26 21:41:40 -070050 unsigned int floor_freq;
Junjie Wud5ac8ee2015-03-24 15:51:10 -070051 u64 pol_floor_val_time; /* policy floor_validate_time */
52 u64 loc_floor_val_time; /* per-cpu floor_validate_time */
Saravana Kannanfbae2f22014-10-15 12:44:18 -070053 u64 pol_hispeed_val_time; /* policy hispeed_validate_time */
54 u64 loc_hispeed_val_time; /* per-cpu hispeed_validate_time */
Junjie Wue05d74e2014-08-29 14:12:52 -070055 u64 max_freq_hyst_start_time;
Todd Poynor5cad6092012-12-18 17:50:44 -080056 struct rw_semaphore enable_sem;
Mike Chanef969692010-06-22 11:26:45 -070057 int governor_enabled;
Junjie Wu13c6a762014-08-07 18:04:13 -070058 struct cpufreq_interactive_tunables *cached_tunables;
Junjie Wu4344ea32014-04-28 16:22:24 -070059 int first_cpu;
Mike Chanef969692010-06-22 11:26:45 -070060};
61
62static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
63
Todd Poynor0f1920b2012-07-16 17:07:15 -070064/* realtime thread handles frequency scaling */
65static struct task_struct *speedchange_task;
66static cpumask_t speedchange_cpumask;
67static spinlock_t speedchange_cpumask_lock;
Lianwei Wang1d4f9a72013-01-07 14:15:51 +080068static struct mutex gov_lock;
Mike Chanef969692010-06-22 11:26:45 -070069
Junjie Wu4344ea32014-04-28 16:22:24 -070070static int set_window_count;
71static int migration_register_count;
72static struct mutex sched_lock;
73
Todd Poynor8d2d93f2012-11-28 17:58:17 -080074/* Target load. Lower values result in higher CPU speeds. */
75#define DEFAULT_TARGET_LOAD 90
Todd Poynore9c60742012-11-14 11:41:21 -080076static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
Todd Poynor8d2d93f2012-11-28 17:58:17 -080077
Todd Poynora380aa82012-04-17 17:39:34 -070078#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
Todd Poynorcbbe17d2012-04-13 20:18:02 -070079#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
Minsung Kim9c1f83a2013-02-25 23:48:04 +090080static unsigned int default_above_hispeed_delay[] = {
81 DEFAULT_ABOVE_HISPEED_DELAY };
Todd Poynorcbbe17d2012-04-13 20:18:02 -070082
Viresh Kumar17d15c42013-05-16 14:58:54 +053083struct cpufreq_interactive_tunables {
84 int usage_count;
85 /* Hi speed to bump to from lo speed when load burst (default max) */
86 unsigned int hispeed_freq;
87 /* Go to hi speed when CPU load at or above this value. */
88#define DEFAULT_GO_HISPEED_LOAD 99
89 unsigned long go_hispeed_load;
90 /* Target load. Lower values result in higher CPU speeds. */
91 spinlock_t target_loads_lock;
92 unsigned int *target_loads;
93 int ntarget_loads;
94 /*
95 * The minimum amount of time to spend at a frequency before we can ramp
96 * down.
97 */
98#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
99 unsigned long min_sample_time;
100 /*
101 * The sample rate of the timer used to increase frequency
102 */
103 unsigned long timer_rate;
104 /*
105 * Wait this long before raising speed above hispeed, by default a
106 * single timer interval.
107 */
108 spinlock_t above_hispeed_delay_lock;
109 unsigned int *above_hispeed_delay;
110 int nabove_hispeed_delay;
111 /* Non-zero means indefinite speed boost active */
112 int boost_val;
113 /* Duration of a boot pulse in usecs */
114 int boostpulse_duration_val;
115 /* End time of boost pulse in ktime converted to usecs */
116 u64 boostpulse_endtime;
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800117 bool boosted;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530118 /*
119 * Max additional time to wait in idle, beyond timer_rate, at speeds
120 * above minimum before wakeup to reduce speed, or -1 if unnecessary.
121 */
Todd Poynor4add2592012-12-18 17:50:10 -0800122#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
Viresh Kumar17d15c42013-05-16 14:58:54 +0530123 int timer_slack_val;
124 bool io_is_busy;
Junjie Wu4344ea32014-04-28 16:22:24 -0700125
126 /* scheduler input related flags */
127 bool use_sched_load;
128 bool use_migration_notif;
Junjie Wue05d74e2014-08-29 14:12:52 -0700129
130 /*
Junjie Wu7ca999f2014-08-29 18:55:45 -0700131 * Whether to align timer windows across all CPUs. When
132 * use_sched_load is true, this flag is ignored and windows
133 * will always be aligned.
134 */
135 bool align_windows;
136
137 /*
Junjie Wue05d74e2014-08-29 14:12:52 -0700138 * Stay at max freq for at least max_freq_hysteresis before dropping
139 * frequency.
140 */
141 unsigned int max_freq_hysteresis;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530142};
Lianwei Wangd72db422012-11-01 09:59:52 +0800143
Amit Pundir94c7a812015-11-20 18:54:30 +0530144/*
145 * HACK: FIXME: Bring back cpufreq_{get,put}_global_kobject()
146 * definition removed by upstream commit 8eec1020f0c0 "cpufreq:
147 * create cpu/cpufreq at boot time" to fix build failures.
148 */
149static int cpufreq_global_kobject_usage;
150
151int cpufreq_get_global_kobject(void)
152{
153 if (!cpufreq_global_kobject_usage++)
154 return kobject_add(cpufreq_global_kobject,
155 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
156
157 return 0;
158}
159
160void cpufreq_put_global_kobject(void)
161{
162 if (!--cpufreq_global_kobject_usage)
163 kobject_del(cpufreq_global_kobject);
164}
165
Viresh Kumar17d15c42013-05-16 14:58:54 +0530166/* For cases where we have single governor instance for system */
Cylen Yaoc0b6ed62014-09-05 18:27:38 -0700167static struct cpufreq_interactive_tunables *common_tunables;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530168
169static struct attribute_group *get_sysfs_attr(void);
Lianwei Wang72e40572013-02-22 11:39:18 +0800170
Junjie Wu6b974ed2014-04-28 15:11:47 -0700171/* Round to starting jiffy of next evaluation window */
172static u64 round_to_nw_start(u64 jif,
173 struct cpufreq_interactive_tunables *tunables)
174{
175 unsigned long step = usecs_to_jiffies(tunables->timer_rate);
Junjie Wu7ca999f2014-08-29 18:55:45 -0700176 u64 ret;
Junjie Wu6b974ed2014-04-28 15:11:47 -0700177
Junjie Wu7ca999f2014-08-29 18:55:45 -0700178 if (tunables->use_sched_load || tunables->align_windows) {
179 do_div(jif, step);
180 ret = (jif + 1) * step;
181 } else {
182 ret = jiffies + usecs_to_jiffies(tunables->timer_rate);
183 }
184
185 return ret;
Junjie Wu6b974ed2014-04-28 15:11:47 -0700186}
187
Junjie Wu4344ea32014-04-28 16:22:24 -0700188static inline int set_window_helper(
189 struct cpufreq_interactive_tunables *tunables)
190{
191 return sched_set_window(round_to_nw_start(get_jiffies_64(), tunables),
192 usecs_to_jiffies(tunables->timer_rate));
193}
194
Junjie Wu2a39b1e2014-08-15 16:20:54 -0700195static void cpufreq_interactive_timer_resched(unsigned long cpu)
Todd Poynor8eccd412012-10-08 20:14:34 -0700196{
Junjie Wu2a39b1e2014-08-15 16:20:54 -0700197 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530198 struct cpufreq_interactive_tunables *tunables =
199 pcpu->policy->governor_data;
Junjie Wu6b974ed2014-04-28 15:11:47 -0700200 u64 expires;
Todd Poynordf673d12013-01-02 13:14:00 -0800201 unsigned long flags;
Todd Poynor4add2592012-12-18 17:50:10 -0800202
Todd Poynordf673d12013-01-02 13:14:00 -0800203 spin_lock_irqsave(&pcpu->load_lock, flags);
Todd Poynor8eccd412012-10-08 20:14:34 -0700204 pcpu->time_in_idle =
Lianwei Wang72e40572013-02-22 11:39:18 +0800205 get_cpu_idle_time(smp_processor_id(),
Viresh Kumar17d15c42013-05-16 14:58:54 +0530206 &pcpu->time_in_idle_timestamp,
207 tunables->io_is_busy);
Todd Poynor0e58da22012-12-11 16:05:03 -0800208 pcpu->cputime_speedadj = 0;
209 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
Junjie Wu6b974ed2014-04-28 15:11:47 -0700210 expires = round_to_nw_start(pcpu->last_evaluated_jiffy, tunables);
Junjie Wu2a39b1e2014-08-15 16:20:54 -0700211 del_timer(&pcpu->cpu_timer);
212 pcpu->cpu_timer.expires = expires;
213 add_timer_on(&pcpu->cpu_timer, cpu);
Todd Poynor4e25bf92013-04-05 13:25:21 -0700214
Viresh Kumar17d15c42013-05-16 14:58:54 +0530215 if (tunables->timer_slack_val >= 0 &&
216 pcpu->target_freq > pcpu->policy->min) {
217 expires += usecs_to_jiffies(tunables->timer_slack_val);
Junjie Wu2a39b1e2014-08-15 16:20:54 -0700218 del_timer(&pcpu->cpu_slack_timer);
219 pcpu->cpu_slack_timer.expires = expires;
220 add_timer_on(&pcpu->cpu_slack_timer, cpu);
Todd Poynor4e25bf92013-04-05 13:25:21 -0700221 }
222
Todd Poynordf673d12013-01-02 13:14:00 -0800223 spin_unlock_irqrestore(&pcpu->load_lock, flags);
Todd Poynor8eccd412012-10-08 20:14:34 -0700224}
225
Lianwei Wang90c6c152013-04-26 13:30:51 +0800226/* The caller shall take enable_sem write semaphore to avoid any timer race.
227 * The cpu_timer and cpu_slack_timer must be deactivated when calling this
228 * function.
229 */
Viresh Kumar17d15c42013-05-16 14:58:54 +0530230static void cpufreq_interactive_timer_start(
231 struct cpufreq_interactive_tunables *tunables, int cpu)
Lianwei Wang90c6c152013-04-26 13:30:51 +0800232{
233 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
Junjie Wu6b974ed2014-04-28 15:11:47 -0700234 u64 expires = round_to_nw_start(pcpu->last_evaluated_jiffy, tunables);
Lianwei Wang90c6c152013-04-26 13:30:51 +0800235 unsigned long flags;
236
Junjie Wu2a39b1e2014-08-15 16:20:54 -0700237 spin_lock_irqsave(&pcpu->load_lock, flags);
Lianwei Wang90c6c152013-04-26 13:30:51 +0800238 pcpu->cpu_timer.expires = expires;
239 add_timer_on(&pcpu->cpu_timer, cpu);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530240 if (tunables->timer_slack_val >= 0 &&
241 pcpu->target_freq > pcpu->policy->min) {
242 expires += usecs_to_jiffies(tunables->timer_slack_val);
Lianwei Wang90c6c152013-04-26 13:30:51 +0800243 pcpu->cpu_slack_timer.expires = expires;
244 add_timer_on(&pcpu->cpu_slack_timer, cpu);
245 }
246
Lianwei Wang90c6c152013-04-26 13:30:51 +0800247 pcpu->time_in_idle =
Viresh Kumar17d15c42013-05-16 14:58:54 +0530248 get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp,
249 tunables->io_is_busy);
Lianwei Wang90c6c152013-04-26 13:30:51 +0800250 pcpu->cputime_speedadj = 0;
251 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
252 spin_unlock_irqrestore(&pcpu->load_lock, flags);
253}
254
Viresh Kumar17d15c42013-05-16 14:58:54 +0530255static unsigned int freq_to_above_hispeed_delay(
256 struct cpufreq_interactive_tunables *tunables,
257 unsigned int freq)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900258{
259 int i;
260 unsigned int ret;
261 unsigned long flags;
262
Viresh Kumar17d15c42013-05-16 14:58:54 +0530263 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900264
Viresh Kumar17d15c42013-05-16 14:58:54 +0530265 for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
266 freq >= tunables->above_hispeed_delay[i+1]; i += 2)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900267 ;
268
Viresh Kumar17d15c42013-05-16 14:58:54 +0530269 ret = tunables->above_hispeed_delay[i];
270 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900271 return ret;
272}
273
Viresh Kumar17d15c42013-05-16 14:58:54 +0530274static unsigned int freq_to_targetload(
275 struct cpufreq_interactive_tunables *tunables, unsigned int freq)
Todd Poynore9c60742012-11-14 11:41:21 -0800276{
277 int i;
278 unsigned int ret;
Todd Poynordf673d12013-01-02 13:14:00 -0800279 unsigned long flags;
Todd Poynore9c60742012-11-14 11:41:21 -0800280
Viresh Kumar17d15c42013-05-16 14:58:54 +0530281 spin_lock_irqsave(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800282
Viresh Kumar17d15c42013-05-16 14:58:54 +0530283 for (i = 0; i < tunables->ntarget_loads - 1 &&
284 freq >= tunables->target_loads[i+1]; i += 2)
Todd Poynore9c60742012-11-14 11:41:21 -0800285 ;
286
Viresh Kumar17d15c42013-05-16 14:58:54 +0530287 ret = tunables->target_loads[i];
288 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800289 return ret;
290}
291
292/*
293 * If increasing frequencies never map to a lower target load then
294 * choose_freq() will find the minimum frequency that does not exceed its
295 * target load given the current load.
296 */
Viresh Kumar17d15c42013-05-16 14:58:54 +0530297static unsigned int choose_freq(struct cpufreq_interactive_cpuinfo *pcpu,
298 unsigned int loadadjfreq)
Todd Poynore9c60742012-11-14 11:41:21 -0800299{
300 unsigned int freq = pcpu->policy->cur;
Todd Poynore9c60742012-11-14 11:41:21 -0800301 unsigned int prevfreq, freqmin, freqmax;
302 unsigned int tl;
303 int index;
304
305 freqmin = 0;
306 freqmax = UINT_MAX;
307
308 do {
309 prevfreq = freq;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530310 tl = freq_to_targetload(pcpu->policy->governor_data, freq);
Todd Poynore9c60742012-11-14 11:41:21 -0800311
312 /*
313 * Find the lowest frequency where the computed load is less
314 * than or equal to the target load.
315 */
316
Todd Poynor8cdabdc2013-04-22 16:44:58 -0700317 if (cpufreq_frequency_table_target(
318 pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
319 CPUFREQ_RELATION_L, &index))
320 break;
Todd Poynore9c60742012-11-14 11:41:21 -0800321 freq = pcpu->freq_table[index].frequency;
322
323 if (freq > prevfreq) {
324 /* The previous frequency is too low. */
325 freqmin = prevfreq;
326
327 if (freq >= freqmax) {
328 /*
329 * Find the highest frequency that is less
330 * than freqmax.
331 */
Todd Poynor8cdabdc2013-04-22 16:44:58 -0700332 if (cpufreq_frequency_table_target(
333 pcpu->policy, pcpu->freq_table,
334 freqmax - 1, CPUFREQ_RELATION_H,
335 &index))
336 break;
Todd Poynore9c60742012-11-14 11:41:21 -0800337 freq = pcpu->freq_table[index].frequency;
338
339 if (freq == freqmin) {
340 /*
341 * The first frequency below freqmax
342 * has already been found to be too
343 * low. freqmax is the lowest speed
344 * we found that is fast enough.
345 */
346 freq = freqmax;
347 break;
348 }
349 }
350 } else if (freq < prevfreq) {
351 /* The previous frequency is high enough. */
352 freqmax = prevfreq;
353
354 if (freq <= freqmin) {
355 /*
356 * Find the lowest frequency that is higher
357 * than freqmin.
358 */
Todd Poynor8cdabdc2013-04-22 16:44:58 -0700359 if (cpufreq_frequency_table_target(
360 pcpu->policy, pcpu->freq_table,
361 freqmin + 1, CPUFREQ_RELATION_L,
362 &index))
363 break;
Todd Poynore9c60742012-11-14 11:41:21 -0800364 freq = pcpu->freq_table[index].frequency;
365
366 /*
367 * If freqmax is the first frequency above
368 * freqmin then we have already found that
369 * this speed is fast enough.
370 */
371 if (freq == freqmax)
372 break;
373 }
374 }
375
376 /* If same frequency chosen as previous then done. */
377 } while (freq != prevfreq);
378
379 return freq;
380}
381
Todd Poynor0e58da22012-12-11 16:05:03 -0800382static u64 update_load(int cpu)
383{
384 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530385 struct cpufreq_interactive_tunables *tunables =
386 pcpu->policy->governor_data;
Todd Poynor0e58da22012-12-11 16:05:03 -0800387 u64 now;
388 u64 now_idle;
389 unsigned int delta_idle;
390 unsigned int delta_time;
391 u64 active_time;
392
Viresh Kumar17d15c42013-05-16 14:58:54 +0530393 now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
Todd Poynor0e58da22012-12-11 16:05:03 -0800394 delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
395 delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
Minsung Kim98b3b562013-04-23 22:32:01 +0900396
397 if (delta_time <= delta_idle)
398 active_time = 0;
399 else
400 active_time = delta_time - delta_idle;
401
Todd Poynor0e58da22012-12-11 16:05:03 -0800402 pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
403
404 pcpu->time_in_idle = now_idle;
405 pcpu->time_in_idle_timestamp = now;
406 return now;
407}
408
Mike Chanef969692010-06-22 11:26:45 -0700409static void cpufreq_interactive_timer(unsigned long data)
410{
Todd Poynore7afb7e2012-11-05 13:09:03 -0800411 u64 now;
Mike Chanef969692010-06-22 11:26:45 -0700412 unsigned int delta_time;
Todd Poynor0e58da22012-12-11 16:05:03 -0800413 u64 cputime_speedadj;
Mike Chanef969692010-06-22 11:26:45 -0700414 int cpu_load;
Mike Chanef969692010-06-22 11:26:45 -0700415 struct cpufreq_interactive_cpuinfo *pcpu =
416 &per_cpu(cpuinfo, data);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530417 struct cpufreq_interactive_tunables *tunables =
418 pcpu->policy->governor_data;
Mike Chanef969692010-06-22 11:26:45 -0700419 unsigned int new_freq;
Todd Poynor0e58da22012-12-11 16:05:03 -0800420 unsigned int loadadjfreq;
Mike Chanef969692010-06-22 11:26:45 -0700421 unsigned int index;
422 unsigned long flags;
Junjie Wud5ac8ee2015-03-24 15:51:10 -0700423 u64 max_fvtime;
Mike Chanef969692010-06-22 11:26:45 -0700424
Todd Poynor5cad6092012-12-18 17:50:44 -0800425 if (!down_read_trylock(&pcpu->enable_sem))
426 return;
Mike Chanef969692010-06-22 11:26:45 -0700427 if (!pcpu->governor_enabled)
428 goto exit;
429
Todd Poynordf673d12013-01-02 13:14:00 -0800430 spin_lock_irqsave(&pcpu->load_lock, flags);
Junjie Wu6b974ed2014-04-28 15:11:47 -0700431 pcpu->last_evaluated_jiffy = get_jiffies_64();
Junjie Wu4344ea32014-04-28 16:22:24 -0700432 now = update_load(data);
433 if (tunables->use_sched_load) {
434 /*
435 * Unlock early to avoid deadlock.
436 *
Junjie Wu18e7fd22014-09-17 18:51:41 -0700437 * load_change_callback() for thread migration already
438 * holds rq lock. Then it locks load_lock to avoid racing
439 * with cpufreq_interactive_timer_resched/start().
Junjie Wu4344ea32014-04-28 16:22:24 -0700440 * sched_get_busy() will also acquire rq lock. Thus we
441 * can't hold load_lock when calling sched_get_busy().
442 *
443 * load_lock used in this function protects time
444 * and load information. These stats are not used when
445 * scheduler input is available. Thus unlocking load_lock
446 * early is perfectly OK.
447 */
448 spin_unlock_irqrestore(&pcpu->load_lock, flags);
449 cputime_speedadj = (u64)sched_get_busy(data) *
450 pcpu->policy->cpuinfo.max_freq;
451 do_div(cputime_speedadj, tunables->timer_rate);
452 } else {
453 delta_time = (unsigned int)
454 (now - pcpu->cputime_speedadj_timestamp);
455 cputime_speedadj = pcpu->cputime_speedadj;
456 spin_unlock_irqrestore(&pcpu->load_lock, flags);
457 if (WARN_ON_ONCE(!delta_time))
458 goto rearm;
459 do_div(cputime_speedadj, delta_time);
460 }
Mike Chanef969692010-06-22 11:26:45 -0700461
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700462 spin_lock_irqsave(&pcpu->target_freq_lock, flags);
Todd Poynor0e58da22012-12-11 16:05:03 -0800463 loadadjfreq = (unsigned int)cputime_speedadj * 100;
rahul.khandelwal0d0606a2015-04-17 11:45:23 +0530464 cpu_load = loadadjfreq / pcpu->policy->cur;
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800465 tunables->boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
Mike Chanef969692010-06-22 11:26:45 -0700466
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800467 if (cpu_load >= tunables->go_hispeed_load || tunables->boosted) {
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700468 if (pcpu->policy->cur < tunables->hispeed_freq) {
Viresh Kumar17d15c42013-05-16 14:58:54 +0530469 new_freq = tunables->hispeed_freq;
Todd Poynor2b660492012-12-19 16:06:48 -0800470 } else {
471 new_freq = choose_freq(pcpu, loadadjfreq);
472
Viresh Kumar17d15c42013-05-16 14:58:54 +0530473 if (new_freq < tunables->hispeed_freq)
474 new_freq = tunables->hispeed_freq;
Todd Poynor2b660492012-12-19 16:06:48 -0800475 }
476 } else {
Todd Poynor0e58da22012-12-11 16:05:03 -0800477 new_freq = choose_freq(pcpu, loadadjfreq);
Ruchi Kandoi9df0ca92014-06-13 16:24:15 -0700478 if (new_freq > tunables->hispeed_freq &&
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700479 pcpu->policy->cur < tunables->hispeed_freq)
Ruchi Kandoi9df0ca92014-06-13 16:24:15 -0700480 new_freq = tunables->hispeed_freq;
Todd Poynor2b660492012-12-19 16:06:48 -0800481 }
Todd Poynor131ff022012-11-08 15:06:55 -0800482
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700483 if (pcpu->policy->cur >= tunables->hispeed_freq &&
484 new_freq > pcpu->policy->cur &&
485 now - pcpu->pol_hispeed_val_time <
486 freq_to_above_hispeed_delay(tunables, pcpu->policy->cur)) {
Todd Poynor131ff022012-11-08 15:06:55 -0800487 trace_cpufreq_interactive_notyet(
488 data, cpu_load, pcpu->target_freq,
489 pcpu->policy->cur, new_freq);
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700490 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
Todd Poynor131ff022012-11-08 15:06:55 -0800491 goto rearm;
Mike Chanef969692010-06-22 11:26:45 -0700492 }
493
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700494 pcpu->loc_hispeed_val_time = now;
Todd Poynor1a0389a2012-05-10 23:28:06 -0700495
Mike Chanef969692010-06-22 11:26:45 -0700496 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800497 new_freq, CPUFREQ_RELATION_L,
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700498 &index)) {
499 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700500 goto rearm;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700501 }
Mike Chanef969692010-06-22 11:26:45 -0700502
503 new_freq = pcpu->freq_table[index].frequency;
504
Junjie Wue05d74e2014-08-29 14:12:52 -0700505 if (pcpu->target_freq >= pcpu->policy->max
506 && new_freq < pcpu->target_freq
507 && now - pcpu->max_freq_hyst_start_time <
508 tunables->max_freq_hysteresis) {
509 trace_cpufreq_interactive_notyet(data, cpu_load,
510 pcpu->target_freq, pcpu->policy->cur, new_freq);
511 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
512 goto rearm;
513 }
514
Mike Chanef969692010-06-22 11:26:45 -0700515 /*
Todd Poynor6d15fa32012-04-26 21:41:40 -0700516 * Do not scale below floor_freq unless we have been at or above the
517 * floor frequency for the minimum sample time since last validated.
Mike Chanef969692010-06-22 11:26:45 -0700518 */
Junjie Wud5ac8ee2015-03-24 15:51:10 -0700519 max_fvtime = max(pcpu->pol_floor_val_time, pcpu->loc_floor_val_time);
520 if (new_freq < pcpu->floor_freq &&
521 pcpu->target_freq >= pcpu->policy->cur) {
522 if (now - max_fvtime < tunables->min_sample_time) {
Todd Poynore60cc1b2012-11-28 17:56:09 -0800523 trace_cpufreq_interactive_notyet(
524 data, cpu_load, pcpu->target_freq,
525 pcpu->policy->cur, new_freq);
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700526 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700527 goto rearm;
Todd Poynorae010472012-02-16 16:27:59 -0800528 }
Mike Chanef969692010-06-22 11:26:45 -0700529 }
530
Todd Poynore16d5922012-12-14 17:31:19 -0800531 /*
532 * Update the timestamp for checking whether speed has been held at
533 * or above the selected frequency for a minimum of min_sample_time,
534 * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
535 * allow the speed to drop as soon as the boostpulse duration expires
536 * (or the indefinite boost is turned off).
537 */
538
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800539 if (!tunables->boosted || new_freq > tunables->hispeed_freq) {
Todd Poynore16d5922012-12-14 17:31:19 -0800540 pcpu->floor_freq = new_freq;
Junjie Wud5ac8ee2015-03-24 15:51:10 -0700541 if (pcpu->target_freq >= pcpu->policy->cur ||
542 new_freq >= pcpu->policy->cur)
543 pcpu->loc_floor_val_time = now;
Todd Poynore16d5922012-12-14 17:31:19 -0800544 }
Todd Poynor1f408dc2012-04-06 19:59:36 -0700545
Minsung Kim9f2841b2014-11-29 21:43:53 +0900546 if (pcpu->target_freq == new_freq &&
547 pcpu->target_freq <= pcpu->policy->cur) {
Todd Poynore60cc1b2012-11-28 17:56:09 -0800548 trace_cpufreq_interactive_already(
549 data, cpu_load, pcpu->target_freq,
550 pcpu->policy->cur, new_freq);
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700551 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
Rohit Gupta189c2222015-03-06 18:46:04 -0800552 goto rearm;
Todd Poynor1f408dc2012-04-06 19:59:36 -0700553 }
554
Todd Poynorae010472012-02-16 16:27:59 -0800555 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
Todd Poynore60cc1b2012-11-28 17:56:09 -0800556 pcpu->policy->cur, new_freq);
Todd Poynorae010472012-02-16 16:27:59 -0800557
Todd Poynor0f1920b2012-07-16 17:07:15 -0700558 pcpu->target_freq = new_freq;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700559 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700560 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
561 cpumask_set_cpu(data, &speedchange_cpumask);
562 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
563 wake_up_process(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -0700564
Mike Chanef969692010-06-22 11:26:45 -0700565rearm:
Todd Poynor4add2592012-12-18 17:50:10 -0800566 if (!timer_pending(&pcpu->cpu_timer))
Junjie Wu2a39b1e2014-08-15 16:20:54 -0700567 cpufreq_interactive_timer_resched(data);
Mike Chanef969692010-06-22 11:26:45 -0700568
569exit:
Todd Poynor5cad6092012-12-18 17:50:44 -0800570 up_read(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700571 return;
572}
573
Mike Chanef969692010-06-22 11:26:45 -0700574static void cpufreq_interactive_idle_end(void)
575{
576 struct cpufreq_interactive_cpuinfo *pcpu =
577 &per_cpu(cpuinfo, smp_processor_id());
578
Todd Poynor5cad6092012-12-18 17:50:44 -0800579 if (!down_read_trylock(&pcpu->enable_sem))
Sam Leffler3ab7c2b2012-06-27 10:12:04 -0700580 return;
Todd Poynor5cad6092012-12-18 17:50:44 -0800581 if (!pcpu->governor_enabled) {
582 up_read(&pcpu->enable_sem);
583 return;
584 }
Sam Leffler3ab7c2b2012-06-27 10:12:04 -0700585
Todd Poynore7afb7e2012-11-05 13:09:03 -0800586 /* Arm the timer for 1-2 ticks later if not already. */
587 if (!timer_pending(&pcpu->cpu_timer)) {
Junjie Wu2a39b1e2014-08-15 16:20:54 -0700588 cpufreq_interactive_timer_resched(smp_processor_id());
Todd Poynor4add2592012-12-18 17:50:10 -0800589 } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
Todd Poynor8eccd412012-10-08 20:14:34 -0700590 del_timer(&pcpu->cpu_timer);
Todd Poynor4add2592012-12-18 17:50:10 -0800591 del_timer(&pcpu->cpu_slack_timer);
Todd Poynor8eccd412012-10-08 20:14:34 -0700592 cpufreq_interactive_timer(smp_processor_id());
Mike Chanef969692010-06-22 11:26:45 -0700593 }
Todd Poynor5cad6092012-12-18 17:50:44 -0800594
595 up_read(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700596}
597
Todd Poynor0f1920b2012-07-16 17:07:15 -0700598static int cpufreq_interactive_speedchange_task(void *data)
Mike Chanef969692010-06-22 11:26:45 -0700599{
600 unsigned int cpu;
601 cpumask_t tmp_mask;
602 unsigned long flags;
603 struct cpufreq_interactive_cpuinfo *pcpu;
604
605 while (1) {
606 set_current_state(TASK_INTERRUPTIBLE);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700607 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700608
Todd Poynor0f1920b2012-07-16 17:07:15 -0700609 if (cpumask_empty(&speedchange_cpumask)) {
610 spin_unlock_irqrestore(&speedchange_cpumask_lock,
611 flags);
Mike Chanef969692010-06-22 11:26:45 -0700612 schedule();
613
614 if (kthread_should_stop())
615 break;
616
Todd Poynor0f1920b2012-07-16 17:07:15 -0700617 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700618 }
619
620 set_current_state(TASK_RUNNING);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700621 tmp_mask = speedchange_cpumask;
622 cpumask_clear(&speedchange_cpumask);
623 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700624
625 for_each_cpu(cpu, &tmp_mask) {
626 unsigned int j;
627 unsigned int max_freq = 0;
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700628 struct cpufreq_interactive_cpuinfo *pjcpu;
Junjie Wud5ac8ee2015-03-24 15:51:10 -0700629 u64 hvt = ~0ULL, fvt = 0;
Mike Chanef969692010-06-22 11:26:45 -0700630
631 pcpu = &per_cpu(cpuinfo, cpu);
Todd Poynor5cad6092012-12-18 17:50:44 -0800632 if (!down_read_trylock(&pcpu->enable_sem))
Mike Chanef969692010-06-22 11:26:45 -0700633 continue;
Todd Poynor5cad6092012-12-18 17:50:44 -0800634 if (!pcpu->governor_enabled) {
635 up_read(&pcpu->enable_sem);
636 continue;
637 }
Mike Chanef969692010-06-22 11:26:45 -0700638
Mike Chanef969692010-06-22 11:26:45 -0700639 for_each_cpu(j, pcpu->policy->cpus) {
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700640 pjcpu = &per_cpu(cpuinfo, j);
Mike Chanef969692010-06-22 11:26:45 -0700641
Junjie Wud5ac8ee2015-03-24 15:51:10 -0700642 fvt = max(fvt, pjcpu->loc_floor_val_time);
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700643 if (pjcpu->target_freq > max_freq) {
Mike Chanef969692010-06-22 11:26:45 -0700644 max_freq = pjcpu->target_freq;
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700645 hvt = pjcpu->loc_hispeed_val_time;
646 } else if (pjcpu->target_freq == max_freq) {
647 hvt = min(hvt, pjcpu->loc_hispeed_val_time);
648 }
Mike Chanef969692010-06-22 11:26:45 -0700649 }
Junjie Wud5ac8ee2015-03-24 15:51:10 -0700650 for_each_cpu(j, pcpu->policy->cpus) {
651 pjcpu = &per_cpu(cpuinfo, j);
652 pjcpu->pol_floor_val_time = fvt;
653 }
Mike Chanef969692010-06-22 11:26:45 -0700654
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700655 if (max_freq != pcpu->policy->cur) {
Mike Chanef969692010-06-22 11:26:45 -0700656 __cpufreq_driver_target(pcpu->policy,
657 max_freq,
658 CPUFREQ_RELATION_H);
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700659 for_each_cpu(j, pcpu->policy->cpus) {
660 pjcpu = &per_cpu(cpuinfo, j);
661 pjcpu->pol_hispeed_val_time = hvt;
662 }
663 }
Todd Poynor0f1920b2012-07-16 17:07:15 -0700664 trace_cpufreq_interactive_setspeed(cpu,
665 pcpu->target_freq,
Todd Poynorae010472012-02-16 16:27:59 -0800666 pcpu->policy->cur);
Todd Poynor5cad6092012-12-18 17:50:44 -0800667
668 up_read(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700669 }
670 }
671
672 return 0;
673}
674
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800675static void cpufreq_interactive_boost(struct cpufreq_interactive_tunables *tunables)
Todd Poynorab8dc402012-04-02 17:17:14 -0700676{
677 int i;
678 int anyboost = 0;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700679 unsigned long flags[2];
Todd Poynorab8dc402012-04-02 17:17:14 -0700680 struct cpufreq_interactive_cpuinfo *pcpu;
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800681
682 tunables->boosted = true;
Todd Poynorab8dc402012-04-02 17:17:14 -0700683
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700684 spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
Todd Poynorab8dc402012-04-02 17:17:14 -0700685
686 for_each_online_cpu(i) {
687 pcpu = &per_cpu(cpuinfo, i);
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800688 if (tunables != pcpu->policy->governor_data)
689 continue;
Todd Poynorab8dc402012-04-02 17:17:14 -0700690
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700691 spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530692 if (pcpu->target_freq < tunables->hispeed_freq) {
693 pcpu->target_freq = tunables->hispeed_freq;
Todd Poynor0f1920b2012-07-16 17:07:15 -0700694 cpumask_set_cpu(i, &speedchange_cpumask);
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700695 pcpu->pol_hispeed_val_time =
Todd Poynor31817c92012-12-07 20:08:45 -0800696 ktime_to_us(ktime_get());
Todd Poynorab8dc402012-04-02 17:17:14 -0700697 anyboost = 1;
698 }
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700699 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]);
Todd Poynorab8dc402012-04-02 17:17:14 -0700700 }
701
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700702 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
Todd Poynorab8dc402012-04-02 17:17:14 -0700703
704 if (anyboost)
Todd Poynor0f1920b2012-07-16 17:07:15 -0700705 wake_up_process(speedchange_task);
Todd Poynorab8dc402012-04-02 17:17:14 -0700706}
707
Junjie Wu4344ea32014-04-28 16:22:24 -0700708static int load_change_callback(struct notifier_block *nb, unsigned long val,
709 void *data)
710{
711 unsigned long cpu = (unsigned long) data;
712 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
713 struct cpufreq_interactive_tunables *tunables;
714
Junjie Wu18e7fd22014-09-17 18:51:41 -0700715 if (speedchange_task == current)
716 return 0;
717
718 if (!down_read_trylock(&pcpu->enable_sem))
719 return 0;
720 if (!pcpu->governor_enabled) {
721 up_read(&pcpu->enable_sem);
722 return 0;
723 }
724 tunables = pcpu->policy->governor_data;
725 if (!tunables->use_sched_load || !tunables->use_migration_notif) {
726 up_read(&pcpu->enable_sem);
727 return 0;
Junjie Wu4344ea32014-04-28 16:22:24 -0700728 }
729
Junjie Wu18e7fd22014-09-17 18:51:41 -0700730 trace_cpufreq_interactive_load_change(cpu);
731 del_timer(&pcpu->cpu_timer);
732 del_timer(&pcpu->cpu_slack_timer);
733 cpufreq_interactive_timer(cpu);
734
735 up_read(&pcpu->enable_sem);
Junjie Wu4344ea32014-04-28 16:22:24 -0700736 return 0;
737}
738
739static struct notifier_block load_notifier_block = {
740 .notifier_call = load_change_callback,
741};
742
Todd Poynor0e58da22012-12-11 16:05:03 -0800743static int cpufreq_interactive_notifier(
744 struct notifier_block *nb, unsigned long val, void *data)
745{
746 struct cpufreq_freqs *freq = data;
747 struct cpufreq_interactive_cpuinfo *pcpu;
748 int cpu;
Todd Poynordf673d12013-01-02 13:14:00 -0800749 unsigned long flags;
Todd Poynor0e58da22012-12-11 16:05:03 -0800750
751 if (val == CPUFREQ_POSTCHANGE) {
752 pcpu = &per_cpu(cpuinfo, freq->cpu);
Todd Poynor34974c32012-12-23 12:28:49 -0800753 if (!down_read_trylock(&pcpu->enable_sem))
754 return 0;
755 if (!pcpu->governor_enabled) {
756 up_read(&pcpu->enable_sem);
757 return 0;
758 }
Todd Poynor0e58da22012-12-11 16:05:03 -0800759
760 for_each_cpu(cpu, pcpu->policy->cpus) {
761 struct cpufreq_interactive_cpuinfo *pjcpu =
762 &per_cpu(cpuinfo, cpu);
Lianwei Wangc79705d2013-05-16 12:07:23 +0800763 if (cpu != freq->cpu) {
764 if (!down_read_trylock(&pjcpu->enable_sem))
765 continue;
766 if (!pjcpu->governor_enabled) {
767 up_read(&pjcpu->enable_sem);
768 continue;
769 }
770 }
Todd Poynordf673d12013-01-02 13:14:00 -0800771 spin_lock_irqsave(&pjcpu->load_lock, flags);
Todd Poynor0e58da22012-12-11 16:05:03 -0800772 update_load(cpu);
Todd Poynordf673d12013-01-02 13:14:00 -0800773 spin_unlock_irqrestore(&pjcpu->load_lock, flags);
Lianwei Wangc79705d2013-05-16 12:07:23 +0800774 if (cpu != freq->cpu)
775 up_read(&pjcpu->enable_sem);
Todd Poynor0e58da22012-12-11 16:05:03 -0800776 }
Todd Poynor0e58da22012-12-11 16:05:03 -0800777
Todd Poynor34974c32012-12-23 12:28:49 -0800778 up_read(&pcpu->enable_sem);
779 }
Todd Poynor0e58da22012-12-11 16:05:03 -0800780 return 0;
781}
782
783static struct notifier_block cpufreq_notifier_block = {
784 .notifier_call = cpufreq_interactive_notifier,
785};
786
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900787static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
788{
789 const char *cp;
790 int i;
791 int ntokens = 1;
792 unsigned int *tokenized_data;
Todd Poynor233dfa02013-03-20 15:40:46 -0700793 int err = -EINVAL;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900794
795 cp = buf;
796 while ((cp = strpbrk(cp + 1, " :")))
797 ntokens++;
798
Todd Poynor233dfa02013-03-20 15:40:46 -0700799 if (!(ntokens & 0x1))
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900800 goto err;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900801
802 tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
803 if (!tokenized_data) {
Todd Poynor233dfa02013-03-20 15:40:46 -0700804 err = -ENOMEM;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900805 goto err;
806 }
807
808 cp = buf;
809 i = 0;
810 while (i < ntokens) {
Todd Poynor233dfa02013-03-20 15:40:46 -0700811 if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900812 goto err_kfree;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900813
814 cp = strpbrk(cp, " :");
815 if (!cp)
816 break;
817 cp++;
818 }
819
Todd Poynor233dfa02013-03-20 15:40:46 -0700820 if (i != ntokens)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900821 goto err_kfree;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900822
823 *num_tokens = ntokens;
824 return tokenized_data;
825
826err_kfree:
827 kfree(tokenized_data);
828err:
Todd Poynor233dfa02013-03-20 15:40:46 -0700829 return ERR_PTR(err);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900830}
831
Todd Poynore9c60742012-11-14 11:41:21 -0800832static ssize_t show_target_loads(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530833 struct cpufreq_interactive_tunables *tunables,
834 char *buf)
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800835{
Todd Poynore9c60742012-11-14 11:41:21 -0800836 int i;
837 ssize_t ret = 0;
Todd Poynordf673d12013-01-02 13:14:00 -0800838 unsigned long flags;
Todd Poynore9c60742012-11-14 11:41:21 -0800839
Viresh Kumar17d15c42013-05-16 14:58:54 +0530840 spin_lock_irqsave(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800841
Viresh Kumar17d15c42013-05-16 14:58:54 +0530842 for (i = 0; i < tunables->ntarget_loads; i++)
843 ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
Todd Poynore9c60742012-11-14 11:41:21 -0800844 i & 0x1 ? ":" : " ");
845
Chih-Wei Huang8d9e5302013-12-24 17:51:55 +0800846 sprintf(buf + ret - 1, "\n");
Viresh Kumar17d15c42013-05-16 14:58:54 +0530847 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800848 return ret;
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800849}
850
Todd Poynore9c60742012-11-14 11:41:21 -0800851static ssize_t store_target_loads(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530852 struct cpufreq_interactive_tunables *tunables,
853 const char *buf, size_t count)
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800854{
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900855 int ntokens;
Todd Poynore9c60742012-11-14 11:41:21 -0800856 unsigned int *new_target_loads = NULL;
Todd Poynordf673d12013-01-02 13:14:00 -0800857 unsigned long flags;
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800858
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900859 new_target_loads = get_tokenized_data(buf, &ntokens);
860 if (IS_ERR(new_target_loads))
861 return PTR_RET(new_target_loads);
Todd Poynore9c60742012-11-14 11:41:21 -0800862
Viresh Kumar17d15c42013-05-16 14:58:54 +0530863 spin_lock_irqsave(&tunables->target_loads_lock, flags);
864 if (tunables->target_loads != default_target_loads)
865 kfree(tunables->target_loads);
866 tunables->target_loads = new_target_loads;
867 tunables->ntarget_loads = ntokens;
868 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800869 return count;
870}
871
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900872static ssize_t show_above_hispeed_delay(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530873 struct cpufreq_interactive_tunables *tunables, char *buf)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900874{
875 int i;
876 ssize_t ret = 0;
877 unsigned long flags;
878
Viresh Kumar17d15c42013-05-16 14:58:54 +0530879 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900880
Viresh Kumar17d15c42013-05-16 14:58:54 +0530881 for (i = 0; i < tunables->nabove_hispeed_delay; i++)
882 ret += sprintf(buf + ret, "%u%s",
883 tunables->above_hispeed_delay[i],
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900884 i & 0x1 ? ":" : " ");
885
Chih-Wei Huang8d9e5302013-12-24 17:51:55 +0800886 sprintf(buf + ret - 1, "\n");
Viresh Kumar17d15c42013-05-16 14:58:54 +0530887 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900888 return ret;
889}
890
891static ssize_t store_above_hispeed_delay(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530892 struct cpufreq_interactive_tunables *tunables,
893 const char *buf, size_t count)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900894{
895 int ntokens;
896 unsigned int *new_above_hispeed_delay = NULL;
897 unsigned long flags;
898
899 new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
900 if (IS_ERR(new_above_hispeed_delay))
901 return PTR_RET(new_above_hispeed_delay);
902
Viresh Kumar17d15c42013-05-16 14:58:54 +0530903 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
904 if (tunables->above_hispeed_delay != default_above_hispeed_delay)
905 kfree(tunables->above_hispeed_delay);
906 tunables->above_hispeed_delay = new_above_hispeed_delay;
907 tunables->nabove_hispeed_delay = ntokens;
908 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900909 return count;
910
911}
912
Viresh Kumar17d15c42013-05-16 14:58:54 +0530913static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
914 char *buf)
Mike Chanef969692010-06-22 11:26:45 -0700915{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530916 return sprintf(buf, "%u\n", tunables->hispeed_freq);
Mike Chanef969692010-06-22 11:26:45 -0700917}
918
Viresh Kumar17d15c42013-05-16 14:58:54 +0530919static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
920 const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -0700921{
922 int ret;
Todd Poynor3b7b5f82012-10-03 00:39:56 -0700923 long unsigned int val;
Mike Chanef969692010-06-22 11:26:45 -0700924
Amit Pundircf076402015-11-03 20:53:29 +0530925 ret = kstrtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -0700926 if (ret < 0)
927 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530928 tunables->hispeed_freq = val;
Mike Chanef969692010-06-22 11:26:45 -0700929 return count;
930}
931
Junjie Wue05d74e2014-08-29 14:12:52 -0700932#define show_store_one(file_name) \
933static ssize_t show_##file_name( \
934 struct cpufreq_interactive_tunables *tunables, char *buf) \
935{ \
936 return snprintf(buf, PAGE_SIZE, "%u\n", tunables->file_name); \
937} \
938static ssize_t store_##file_name( \
939 struct cpufreq_interactive_tunables *tunables, \
940 const char *buf, size_t count) \
941{ \
942 int ret; \
943 unsigned long int val; \
944 \
945 ret = kstrtoul(buf, 0, &val); \
946 if (ret < 0) \
947 return ret; \
948 tunables->file_name = val; \
949 return count; \
950}
951show_store_one(max_freq_hysteresis);
Junjie Wu7ca999f2014-08-29 18:55:45 -0700952show_store_one(align_windows);
Junjie Wue05d74e2014-08-29 14:12:52 -0700953
Viresh Kumar17d15c42013-05-16 14:58:54 +0530954static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
955 *tunables, char *buf)
Mike Chanef969692010-06-22 11:26:45 -0700956{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530957 return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
Mike Chanef969692010-06-22 11:26:45 -0700958}
959
Viresh Kumar17d15c42013-05-16 14:58:54 +0530960static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
961 *tunables, const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -0700962{
963 int ret;
964 unsigned long val;
965
Amit Pundircf076402015-11-03 20:53:29 +0530966 ret = kstrtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -0700967 if (ret < 0)
968 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530969 tunables->go_hispeed_load = val;
Mike Chanef969692010-06-22 11:26:45 -0700970 return count;
971}
972
Viresh Kumar17d15c42013-05-16 14:58:54 +0530973static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
974 *tunables, char *buf)
Mike Chanef969692010-06-22 11:26:45 -0700975{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530976 return sprintf(buf, "%lu\n", tunables->min_sample_time);
Mike Chanef969692010-06-22 11:26:45 -0700977}
978
Viresh Kumar17d15c42013-05-16 14:58:54 +0530979static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
980 *tunables, const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -0700981{
982 int ret;
983 unsigned long val;
984
Amit Pundircf076402015-11-03 20:53:29 +0530985 ret = kstrtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -0700986 if (ret < 0)
987 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530988 tunables->min_sample_time = val;
Mike Chanef969692010-06-22 11:26:45 -0700989 return count;
990}
991
Viresh Kumar17d15c42013-05-16 14:58:54 +0530992static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
993 char *buf)
Mike Chanef969692010-06-22 11:26:45 -0700994{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530995 return sprintf(buf, "%lu\n", tunables->timer_rate);
Mike Chanef969692010-06-22 11:26:45 -0700996}
997
Viresh Kumar17d15c42013-05-16 14:58:54 +0530998static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
999 const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -07001000{
1001 int ret;
Junjie Wu847796e2014-08-15 16:34:37 -07001002 unsigned long val, val_round;
Junjie Wu4344ea32014-04-28 16:22:24 -07001003 struct cpufreq_interactive_tunables *t;
1004 int cpu;
Mike Chanef969692010-06-22 11:26:45 -07001005
Amit Pundircf076402015-11-03 20:53:29 +05301006 ret = kstrtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -07001007 if (ret < 0)
1008 return ret;
Junjie Wu847796e2014-08-15 16:34:37 -07001009
1010 val_round = jiffies_to_usecs(usecs_to_jiffies(val));
1011 if (val != val_round)
1012 pr_warn("timer_rate not aligned to jiffy. Rounded up to %lu\n",
1013 val_round);
Junjie Wu847796e2014-08-15 16:34:37 -07001014 tunables->timer_rate = val_round;
Junjie Wu4344ea32014-04-28 16:22:24 -07001015
1016 if (!tunables->use_sched_load)
1017 return count;
1018
1019 for_each_possible_cpu(cpu) {
1020 t = per_cpu(cpuinfo, cpu).cached_tunables;
1021 if (t && t->use_sched_load)
1022 t->timer_rate = val_round;
1023 }
1024 set_window_helper(tunables);
1025
Mike Chanef969692010-06-22 11:26:45 -07001026 return count;
1027}
1028
Viresh Kumar17d15c42013-05-16 14:58:54 +05301029static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
1030 char *buf)
Todd Poynor4add2592012-12-18 17:50:10 -08001031{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301032 return sprintf(buf, "%d\n", tunables->timer_slack_val);
Todd Poynor4add2592012-12-18 17:50:10 -08001033}
1034
Viresh Kumar17d15c42013-05-16 14:58:54 +05301035static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
1036 const char *buf, size_t count)
Todd Poynor4add2592012-12-18 17:50:10 -08001037{
1038 int ret;
1039 unsigned long val;
1040
1041 ret = kstrtol(buf, 10, &val);
1042 if (ret < 0)
1043 return ret;
1044
Viresh Kumar17d15c42013-05-16 14:58:54 +05301045 tunables->timer_slack_val = val;
Todd Poynor4add2592012-12-18 17:50:10 -08001046 return count;
1047}
1048
Viresh Kumar17d15c42013-05-16 14:58:54 +05301049static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
Todd Poynor15a9ea02012-04-23 20:42:41 -07001050 char *buf)
1051{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301052 return sprintf(buf, "%d\n", tunables->boost_val);
Todd Poynor15a9ea02012-04-23 20:42:41 -07001053}
1054
Viresh Kumar17d15c42013-05-16 14:58:54 +05301055static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
Todd Poynor15a9ea02012-04-23 20:42:41 -07001056 const char *buf, size_t count)
1057{
1058 int ret;
1059 unsigned long val;
1060
1061 ret = kstrtoul(buf, 0, &val);
1062 if (ret < 0)
1063 return ret;
1064
Viresh Kumar17d15c42013-05-16 14:58:54 +05301065 tunables->boost_val = val;
Todd Poynor15a9ea02012-04-23 20:42:41 -07001066
Viresh Kumar17d15c42013-05-16 14:58:54 +05301067 if (tunables->boost_val) {
Todd Poynor442a3122012-05-03 00:16:55 -07001068 trace_cpufreq_interactive_boost("on");
Lianwei Wang2277e3f2014-12-02 17:20:50 -08001069 if (!tunables->boosted)
1070 cpufreq_interactive_boost(tunables);
Todd Poynor442a3122012-05-03 00:16:55 -07001071 } else {
Ruchi Kandoi296d7912014-04-09 16:47:59 -07001072 tunables->boostpulse_endtime = ktime_to_us(ktime_get());
Todd Poynor442a3122012-05-03 00:16:55 -07001073 trace_cpufreq_interactive_unboost("off");
1074 }
Todd Poynor15a9ea02012-04-23 20:42:41 -07001075
1076 return count;
1077}
1078
Viresh Kumar17d15c42013-05-16 14:58:54 +05301079static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
Todd Poynor442a3122012-05-03 00:16:55 -07001080 const char *buf, size_t count)
1081{
1082 int ret;
1083 unsigned long val;
1084
1085 ret = kstrtoul(buf, 0, &val);
1086 if (ret < 0)
1087 return ret;
1088
Viresh Kumar17d15c42013-05-16 14:58:54 +05301089 tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
1090 tunables->boostpulse_duration_val;
Todd Poynor442a3122012-05-03 00:16:55 -07001091 trace_cpufreq_interactive_boost("pulse");
Lianwei Wang2277e3f2014-12-02 17:20:50 -08001092 if (!tunables->boosted)
1093 cpufreq_interactive_boost(tunables);
Todd Poynor442a3122012-05-03 00:16:55 -07001094 return count;
1095}
1096
Viresh Kumar17d15c42013-05-16 14:58:54 +05301097static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
1098 *tunables, char *buf)
Todd Poynore16d5922012-12-14 17:31:19 -08001099{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301100 return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
Todd Poynore16d5922012-12-14 17:31:19 -08001101}
1102
Viresh Kumar17d15c42013-05-16 14:58:54 +05301103static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
1104 *tunables, const char *buf, size_t count)
Todd Poynore16d5922012-12-14 17:31:19 -08001105{
1106 int ret;
1107 unsigned long val;
1108
1109 ret = kstrtoul(buf, 0, &val);
1110 if (ret < 0)
1111 return ret;
1112
Viresh Kumar17d15c42013-05-16 14:58:54 +05301113 tunables->boostpulse_duration_val = val;
Todd Poynore16d5922012-12-14 17:31:19 -08001114 return count;
1115}
1116
Viresh Kumar17d15c42013-05-16 14:58:54 +05301117static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
1118 char *buf)
Lianwei Wang72e40572013-02-22 11:39:18 +08001119{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301120 return sprintf(buf, "%u\n", tunables->io_is_busy);
Lianwei Wang72e40572013-02-22 11:39:18 +08001121}
1122
Viresh Kumar17d15c42013-05-16 14:58:54 +05301123static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
1124 const char *buf, size_t count)
Lianwei Wang72e40572013-02-22 11:39:18 +08001125{
1126 int ret;
1127 unsigned long val;
Junjie Wu4344ea32014-04-28 16:22:24 -07001128 struct cpufreq_interactive_tunables *t;
1129 int cpu;
Lianwei Wang72e40572013-02-22 11:39:18 +08001130
1131 ret = kstrtoul(buf, 0, &val);
1132 if (ret < 0)
1133 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +05301134 tunables->io_is_busy = val;
Junjie Wu4344ea32014-04-28 16:22:24 -07001135
1136 if (!tunables->use_sched_load)
1137 return count;
1138
1139 for_each_possible_cpu(cpu) {
1140 t = per_cpu(cpuinfo, cpu).cached_tunables;
1141 if (t && t->use_sched_load)
1142 t->io_is_busy = val;
1143 }
1144 sched_set_io_is_busy(val);
1145
1146 return count;
1147}
1148
1149static int cpufreq_interactive_enable_sched_input(
1150 struct cpufreq_interactive_tunables *tunables)
1151{
1152 int rc = 0, j;
1153 struct cpufreq_interactive_tunables *t;
1154
1155 mutex_lock(&sched_lock);
1156
1157 set_window_count++;
1158 if (set_window_count != 1) {
1159 for_each_possible_cpu(j) {
1160 t = per_cpu(cpuinfo, j).cached_tunables;
1161 if (t && t->use_sched_load) {
1162 tunables->timer_rate = t->timer_rate;
1163 tunables->io_is_busy = t->io_is_busy;
1164 break;
1165 }
1166 }
1167 goto out;
1168 }
1169
1170 rc = set_window_helper(tunables);
1171 if (rc) {
1172 pr_err("%s: Failed to set sched window\n", __func__);
1173 set_window_count--;
1174 goto out;
1175 }
1176 sched_set_io_is_busy(tunables->io_is_busy);
1177
1178 if (!tunables->use_migration_notif)
1179 goto out;
1180
1181 migration_register_count++;
1182 if (migration_register_count != 1)
1183 goto out;
1184 else
1185 atomic_notifier_chain_register(&load_alert_notifier_head,
1186 &load_notifier_block);
1187out:
1188 mutex_unlock(&sched_lock);
1189 return rc;
1190}
1191
1192static int cpufreq_interactive_disable_sched_input(
1193 struct cpufreq_interactive_tunables *tunables)
1194{
1195 mutex_lock(&sched_lock);
1196
1197 if (tunables->use_migration_notif) {
1198 migration_register_count--;
1199 if (!migration_register_count)
1200 atomic_notifier_chain_unregister(
1201 &load_alert_notifier_head,
1202 &load_notifier_block);
1203 }
1204 set_window_count--;
1205
1206 mutex_unlock(&sched_lock);
1207 return 0;
1208}
1209
1210static ssize_t show_use_sched_load(
1211 struct cpufreq_interactive_tunables *tunables, char *buf)
1212{
1213 return snprintf(buf, PAGE_SIZE, "%d\n", tunables->use_sched_load);
1214}
1215
1216static ssize_t store_use_sched_load(
1217 struct cpufreq_interactive_tunables *tunables,
1218 const char *buf, size_t count)
1219{
1220 int ret;
1221 unsigned long val;
1222
1223 ret = kstrtoul(buf, 0, &val);
1224 if (ret < 0)
1225 return ret;
1226
1227 if (tunables->use_sched_load == (bool) val)
1228 return count;
1229 if (val)
1230 ret = cpufreq_interactive_enable_sched_input(tunables);
1231 else
1232 ret = cpufreq_interactive_disable_sched_input(tunables);
1233
1234 if (ret)
1235 return ret;
1236
1237 tunables->use_sched_load = val;
1238 return count;
1239}
1240
1241static ssize_t show_use_migration_notif(
1242 struct cpufreq_interactive_tunables *tunables, char *buf)
1243{
1244 return snprintf(buf, PAGE_SIZE, "%d\n",
1245 tunables->use_migration_notif);
1246}
1247
1248static ssize_t store_use_migration_notif(
1249 struct cpufreq_interactive_tunables *tunables,
1250 const char *buf, size_t count)
1251{
1252 int ret;
1253 unsigned long val;
1254
1255 ret = kstrtoul(buf, 0, &val);
1256 if (ret < 0)
1257 return ret;
1258
1259 if (tunables->use_migration_notif == (bool) val)
1260 return count;
1261 tunables->use_migration_notif = val;
1262
1263 if (!tunables->use_sched_load)
1264 return count;
1265
1266 mutex_lock(&sched_lock);
1267 if (val) {
1268 migration_register_count++;
1269 if (migration_register_count == 1)
1270 atomic_notifier_chain_register(
1271 &load_alert_notifier_head,
1272 &load_notifier_block);
1273 } else {
1274 migration_register_count--;
1275 if (!migration_register_count)
1276 atomic_notifier_chain_unregister(
1277 &load_alert_notifier_head,
1278 &load_notifier_block);
1279 }
1280 mutex_unlock(&sched_lock);
1281
Lianwei Wang72e40572013-02-22 11:39:18 +08001282 return count;
1283}
1284
Viresh Kumar17d15c42013-05-16 14:58:54 +05301285/*
1286 * Create show/store routines
1287 * - sys: One governor instance for complete SYSTEM
1288 * - pol: One governor instance per struct cpufreq_policy
1289 */
1290#define show_gov_pol_sys(file_name) \
1291static ssize_t show_##file_name##_gov_sys \
1292(struct kobject *kobj, struct attribute *attr, char *buf) \
1293{ \
1294 return show_##file_name(common_tunables, buf); \
1295} \
1296 \
1297static ssize_t show_##file_name##_gov_pol \
1298(struct cpufreq_policy *policy, char *buf) \
1299{ \
1300 return show_##file_name(policy->governor_data, buf); \
1301}
Lianwei Wang72e40572013-02-22 11:39:18 +08001302
Viresh Kumar17d15c42013-05-16 14:58:54 +05301303#define store_gov_pol_sys(file_name) \
1304static ssize_t store_##file_name##_gov_sys \
1305(struct kobject *kobj, struct attribute *attr, const char *buf, \
1306 size_t count) \
1307{ \
1308 return store_##file_name(common_tunables, buf, count); \
1309} \
1310 \
1311static ssize_t store_##file_name##_gov_pol \
1312(struct cpufreq_policy *policy, const char *buf, size_t count) \
1313{ \
1314 return store_##file_name(policy->governor_data, buf, count); \
1315}
1316
1317#define show_store_gov_pol_sys(file_name) \
1318show_gov_pol_sys(file_name); \
1319store_gov_pol_sys(file_name)
1320
1321show_store_gov_pol_sys(target_loads);
1322show_store_gov_pol_sys(above_hispeed_delay);
1323show_store_gov_pol_sys(hispeed_freq);
1324show_store_gov_pol_sys(go_hispeed_load);
1325show_store_gov_pol_sys(min_sample_time);
1326show_store_gov_pol_sys(timer_rate);
1327show_store_gov_pol_sys(timer_slack);
1328show_store_gov_pol_sys(boost);
1329store_gov_pol_sys(boostpulse);
1330show_store_gov_pol_sys(boostpulse_duration);
1331show_store_gov_pol_sys(io_is_busy);
Junjie Wu4344ea32014-04-28 16:22:24 -07001332show_store_gov_pol_sys(use_sched_load);
1333show_store_gov_pol_sys(use_migration_notif);
Junjie Wue05d74e2014-08-29 14:12:52 -07001334show_store_gov_pol_sys(max_freq_hysteresis);
Junjie Wu7ca999f2014-08-29 18:55:45 -07001335show_store_gov_pol_sys(align_windows);
Viresh Kumar17d15c42013-05-16 14:58:54 +05301336
1337#define gov_sys_attr_rw(_name) \
1338static struct global_attr _name##_gov_sys = \
1339__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
1340
1341#define gov_pol_attr_rw(_name) \
1342static struct freq_attr _name##_gov_pol = \
1343__ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
1344
1345#define gov_sys_pol_attr_rw(_name) \
1346 gov_sys_attr_rw(_name); \
1347 gov_pol_attr_rw(_name)
1348
1349gov_sys_pol_attr_rw(target_loads);
1350gov_sys_pol_attr_rw(above_hispeed_delay);
1351gov_sys_pol_attr_rw(hispeed_freq);
1352gov_sys_pol_attr_rw(go_hispeed_load);
1353gov_sys_pol_attr_rw(min_sample_time);
1354gov_sys_pol_attr_rw(timer_rate);
1355gov_sys_pol_attr_rw(timer_slack);
1356gov_sys_pol_attr_rw(boost);
1357gov_sys_pol_attr_rw(boostpulse_duration);
1358gov_sys_pol_attr_rw(io_is_busy);
Junjie Wu4344ea32014-04-28 16:22:24 -07001359gov_sys_pol_attr_rw(use_sched_load);
1360gov_sys_pol_attr_rw(use_migration_notif);
Junjie Wue05d74e2014-08-29 14:12:52 -07001361gov_sys_pol_attr_rw(max_freq_hysteresis);
Junjie Wu7ca999f2014-08-29 18:55:45 -07001362gov_sys_pol_attr_rw(align_windows);
Viresh Kumar17d15c42013-05-16 14:58:54 +05301363
1364static struct global_attr boostpulse_gov_sys =
1365 __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
1366
1367static struct freq_attr boostpulse_gov_pol =
1368 __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
1369
1370/* One Governor instance for entire system */
1371static struct attribute *interactive_attributes_gov_sys[] = {
1372 &target_loads_gov_sys.attr,
1373 &above_hispeed_delay_gov_sys.attr,
1374 &hispeed_freq_gov_sys.attr,
1375 &go_hispeed_load_gov_sys.attr,
1376 &min_sample_time_gov_sys.attr,
1377 &timer_rate_gov_sys.attr,
1378 &timer_slack_gov_sys.attr,
1379 &boost_gov_sys.attr,
1380 &boostpulse_gov_sys.attr,
1381 &boostpulse_duration_gov_sys.attr,
1382 &io_is_busy_gov_sys.attr,
Junjie Wu4344ea32014-04-28 16:22:24 -07001383 &use_sched_load_gov_sys.attr,
1384 &use_migration_notif_gov_sys.attr,
Junjie Wue05d74e2014-08-29 14:12:52 -07001385 &max_freq_hysteresis_gov_sys.attr,
Junjie Wu7ca999f2014-08-29 18:55:45 -07001386 &align_windows_gov_sys.attr,
Mike Chanef969692010-06-22 11:26:45 -07001387 NULL,
1388};
1389
Viresh Kumar17d15c42013-05-16 14:58:54 +05301390static struct attribute_group interactive_attr_group_gov_sys = {
1391 .attrs = interactive_attributes_gov_sys,
Mike Chanef969692010-06-22 11:26:45 -07001392 .name = "interactive",
1393};
1394
Viresh Kumar17d15c42013-05-16 14:58:54 +05301395/* Per policy governor instance */
1396static struct attribute *interactive_attributes_gov_pol[] = {
1397 &target_loads_gov_pol.attr,
1398 &above_hispeed_delay_gov_pol.attr,
1399 &hispeed_freq_gov_pol.attr,
1400 &go_hispeed_load_gov_pol.attr,
1401 &min_sample_time_gov_pol.attr,
1402 &timer_rate_gov_pol.attr,
1403 &timer_slack_gov_pol.attr,
1404 &boost_gov_pol.attr,
1405 &boostpulse_gov_pol.attr,
1406 &boostpulse_duration_gov_pol.attr,
1407 &io_is_busy_gov_pol.attr,
Junjie Wu4344ea32014-04-28 16:22:24 -07001408 &use_sched_load_gov_pol.attr,
1409 &use_migration_notif_gov_pol.attr,
Junjie Wue05d74e2014-08-29 14:12:52 -07001410 &max_freq_hysteresis_gov_pol.attr,
Junjie Wu7ca999f2014-08-29 18:55:45 -07001411 &align_windows_gov_pol.attr,
Viresh Kumar17d15c42013-05-16 14:58:54 +05301412 NULL,
1413};
1414
1415static struct attribute_group interactive_attr_group_gov_pol = {
1416 .attrs = interactive_attributes_gov_pol,
1417 .name = "interactive",
1418};
1419
1420static struct attribute_group *get_sysfs_attr(void)
1421{
1422 if (have_governor_per_policy())
1423 return &interactive_attr_group_gov_pol;
1424 else
1425 return &interactive_attr_group_gov_sys;
1426}
1427
Sam Leffler3ab7c2b2012-06-27 10:12:04 -07001428static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
1429 unsigned long val,
1430 void *data)
1431{
Rohit Gupta189c2222015-03-06 18:46:04 -08001432 if (val == IDLE_END)
Sam Leffler3ab7c2b2012-06-27 10:12:04 -07001433 cpufreq_interactive_idle_end();
Sam Leffler3ab7c2b2012-06-27 10:12:04 -07001434
1435 return 0;
1436}
1437
1438static struct notifier_block cpufreq_interactive_idle_nb = {
1439 .notifier_call = cpufreq_interactive_idle_notifier,
1440};
1441
Junjie Wu53f83f82014-08-18 16:35:09 -07001442static void save_tunables(struct cpufreq_policy *policy,
1443 struct cpufreq_interactive_tunables *tunables)
1444{
1445 int cpu;
1446 struct cpufreq_interactive_cpuinfo *pcpu;
1447
1448 if (have_governor_per_policy())
1449 cpu = cpumask_first(policy->related_cpus);
1450 else
1451 cpu = 0;
1452
1453 pcpu = &per_cpu(cpuinfo, cpu);
1454 WARN_ON(pcpu->cached_tunables && pcpu->cached_tunables != tunables);
1455 pcpu->cached_tunables = tunables;
1456}
1457
Junjie Wuc5a97d92014-05-23 12:22:59 -07001458static struct cpufreq_interactive_tunables *alloc_tunable(
1459 struct cpufreq_policy *policy)
1460{
1461 struct cpufreq_interactive_tunables *tunables;
1462
1463 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
1464 if (!tunables)
1465 return ERR_PTR(-ENOMEM);
1466
1467 tunables->above_hispeed_delay = default_above_hispeed_delay;
1468 tunables->nabove_hispeed_delay =
1469 ARRAY_SIZE(default_above_hispeed_delay);
1470 tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
1471 tunables->target_loads = default_target_loads;
1472 tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
1473 tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
1474 tunables->timer_rate = DEFAULT_TIMER_RATE;
1475 tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
1476 tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
Junjie Wu7ca999f2014-08-29 18:55:45 -07001477 tunables->align_windows = true;
Junjie Wuc5a97d92014-05-23 12:22:59 -07001478
1479 spin_lock_init(&tunables->target_loads_lock);
1480 spin_lock_init(&tunables->above_hispeed_delay_lock);
1481
Junjie Wu53f83f82014-08-18 16:35:09 -07001482 save_tunables(policy, tunables);
Junjie Wuc5a97d92014-05-23 12:22:59 -07001483 return tunables;
1484}
1485
Saravana Kannan07c2aa62014-07-22 15:42:51 -07001486static struct cpufreq_interactive_tunables *restore_tunables(
1487 struct cpufreq_policy *policy)
1488{
1489 int cpu;
1490
1491 if (have_governor_per_policy())
1492 cpu = cpumask_first(policy->related_cpus);
1493 else
1494 cpu = 0;
1495
Junjie Wu13c6a762014-08-07 18:04:13 -07001496 return per_cpu(cpuinfo, cpu).cached_tunables;
Junjie Wuc5a97d92014-05-23 12:22:59 -07001497}
1498
Mike Chanef969692010-06-22 11:26:45 -07001499static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
1500 unsigned int event)
1501{
1502 int rc;
1503 unsigned int j;
1504 struct cpufreq_interactive_cpuinfo *pcpu;
1505 struct cpufreq_frequency_table *freq_table;
Viresh Kumar17d15c42013-05-16 14:58:54 +05301506 struct cpufreq_interactive_tunables *tunables;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001507 unsigned long flags;
Junjie Wu4344ea32014-04-28 16:22:24 -07001508 int first_cpu;
Viresh Kumar17d15c42013-05-16 14:58:54 +05301509
1510 if (have_governor_per_policy())
1511 tunables = policy->governor_data;
1512 else
1513 tunables = common_tunables;
1514
1515 WARN_ON(!tunables && (event != CPUFREQ_GOV_POLICY_INIT));
Mike Chanef969692010-06-22 11:26:45 -07001516
1517 switch (event) {
Viresh Kumar17d15c42013-05-16 14:58:54 +05301518 case CPUFREQ_GOV_POLICY_INIT:
1519 if (have_governor_per_policy()) {
1520 WARN_ON(tunables);
1521 } else if (tunables) {
1522 tunables->usage_count++;
1523 policy->governor_data = tunables;
1524 return 0;
1525 }
1526
Junjie Wu4344ea32014-04-28 16:22:24 -07001527 first_cpu = cpumask_first(policy->related_cpus);
1528 for_each_cpu(j, policy->related_cpus)
1529 per_cpu(cpuinfo, j).first_cpu = first_cpu;
1530
Saravana Kannan07c2aa62014-07-22 15:42:51 -07001531 tunables = restore_tunables(policy);
Viresh Kumar17d15c42013-05-16 14:58:54 +05301532 if (!tunables) {
Junjie Wuc5a97d92014-05-23 12:22:59 -07001533 tunables = alloc_tunable(policy);
1534 if (IS_ERR(tunables))
1535 return PTR_ERR(tunables);
Viresh Kumar17d15c42013-05-16 14:58:54 +05301536 }
1537
Viresh Kumar17d15c42013-05-16 14:58:54 +05301538 tunables->usage_count = 1;
Minsung Kim82cc6a92014-01-19 14:32:42 +09001539 policy->governor_data = tunables;
Junjie Wuab1db0a2015-11-18 14:49:29 -08001540 if (!have_governor_per_policy())
Minsung Kim82cc6a92014-01-19 14:32:42 +09001541 common_tunables = tunables;
1542
1543 rc = sysfs_create_group(get_governor_parent_kobj(policy),
1544 get_sysfs_attr());
1545 if (rc) {
1546 kfree(tunables);
1547 policy->governor_data = NULL;
Junjie Wuab1db0a2015-11-18 14:49:29 -08001548 if (!have_governor_per_policy())
Minsung Kim82cc6a92014-01-19 14:32:42 +09001549 common_tunables = NULL;
1550 return rc;
1551 }
1552
Viresh Kumar17d15c42013-05-16 14:58:54 +05301553 if (!policy->governor->initialized) {
1554 idle_notifier_register(&cpufreq_interactive_idle_nb);
1555 cpufreq_register_notifier(&cpufreq_notifier_block,
1556 CPUFREQ_TRANSITION_NOTIFIER);
1557 }
1558
Junjie Wu4344ea32014-04-28 16:22:24 -07001559 if (tunables->use_sched_load)
1560 cpufreq_interactive_enable_sched_input(tunables);
1561
Viresh Kumar17d15c42013-05-16 14:58:54 +05301562 break;
1563
1564 case CPUFREQ_GOV_POLICY_EXIT:
1565 if (!--tunables->usage_count) {
1566 if (policy->governor->initialized == 1) {
1567 cpufreq_unregister_notifier(&cpufreq_notifier_block,
1568 CPUFREQ_TRANSITION_NOTIFIER);
1569 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
1570 }
1571
1572 sysfs_remove_group(get_governor_parent_kobj(policy),
1573 get_sysfs_attr());
Greg Hackmann6bc30c32014-12-08 10:08:35 -08001574
Viresh Kumar17d15c42013-05-16 14:58:54 +05301575 common_tunables = NULL;
1576 }
1577
1578 policy->governor_data = NULL;
Junjie Wu4344ea32014-04-28 16:22:24 -07001579
1580 if (tunables->use_sched_load)
1581 cpufreq_interactive_disable_sched_input(tunables);
1582
Viresh Kumar17d15c42013-05-16 14:58:54 +05301583 break;
1584
Mike Chanef969692010-06-22 11:26:45 -07001585 case CPUFREQ_GOV_START:
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001586 mutex_lock(&gov_lock);
1587
Viresh Kumar17d15c42013-05-16 14:58:54 +05301588 freq_table = cpufreq_frequency_get_table(policy->cpu);
1589 if (!tunables->hispeed_freq)
1590 tunables->hispeed_freq = policy->max;
Mike Chanef969692010-06-22 11:26:45 -07001591
1592 for_each_cpu(j, policy->cpus) {
1593 pcpu = &per_cpu(cpuinfo, j);
1594 pcpu->policy = policy;
1595 pcpu->target_freq = policy->cur;
1596 pcpu->freq_table = freq_table;
Todd Poynor6d15fa32012-04-26 21:41:40 -07001597 pcpu->floor_freq = pcpu->target_freq;
Junjie Wud5ac8ee2015-03-24 15:51:10 -07001598 pcpu->pol_floor_val_time =
Todd Poynor31817c92012-12-07 20:08:45 -08001599 ktime_to_us(ktime_get());
Junjie Wud5ac8ee2015-03-24 15:51:10 -07001600 pcpu->loc_floor_val_time = pcpu->pol_floor_val_time;
1601 pcpu->pol_hispeed_val_time = pcpu->pol_floor_val_time;
1602 pcpu->loc_hispeed_val_time = pcpu->pol_floor_val_time;
Todd Poynor39512062012-12-20 15:51:00 -08001603 down_write(&pcpu->enable_sem);
Shridhar Rasal2907f842013-09-09 19:17:14 +05301604 del_timer_sync(&pcpu->cpu_timer);
1605 del_timer_sync(&pcpu->cpu_slack_timer);
Junjie Wu6b974ed2014-04-28 15:11:47 -07001606 pcpu->last_evaluated_jiffy = get_jiffies_64();
Viresh Kumar17d15c42013-05-16 14:58:54 +05301607 cpufreq_interactive_timer_start(tunables, j);
Todd Poynor39512062012-12-20 15:51:00 -08001608 pcpu->governor_enabled = 1;
1609 up_write(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -07001610 }
1611
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001612 mutex_unlock(&gov_lock);
Mike Chanef969692010-06-22 11:26:45 -07001613 break;
1614
1615 case CPUFREQ_GOV_STOP:
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001616 mutex_lock(&gov_lock);
Mike Chanef969692010-06-22 11:26:45 -07001617 for_each_cpu(j, policy->cpus) {
1618 pcpu = &per_cpu(cpuinfo, j);
Todd Poynor5cad6092012-12-18 17:50:44 -08001619 down_write(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -07001620 pcpu->governor_enabled = 0;
Mike Chanef969692010-06-22 11:26:45 -07001621 del_timer_sync(&pcpu->cpu_timer);
Todd Poynor4add2592012-12-18 17:50:10 -08001622 del_timer_sync(&pcpu->cpu_slack_timer);
Todd Poynor5cad6092012-12-18 17:50:44 -08001623 up_write(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -07001624 }
1625
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001626 mutex_unlock(&gov_lock);
Mike Chanef969692010-06-22 11:26:45 -07001627 break;
1628
1629 case CPUFREQ_GOV_LIMITS:
1630 if (policy->max < policy->cur)
1631 __cpufreq_driver_target(policy,
1632 policy->max, CPUFREQ_RELATION_H);
1633 else if (policy->min > policy->cur)
1634 __cpufreq_driver_target(policy,
1635 policy->min, CPUFREQ_RELATION_L);
Lianwei Wang90c6c152013-04-26 13:30:51 +08001636 for_each_cpu(j, policy->cpus) {
1637 pcpu = &per_cpu(cpuinfo, j);
1638
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001639 down_read(&pcpu->enable_sem);
Lianwei Wang90c6c152013-04-26 13:30:51 +08001640 if (pcpu->governor_enabled == 0) {
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001641 up_read(&pcpu->enable_sem);
Lianwei Wang90c6c152013-04-26 13:30:51 +08001642 continue;
1643 }
1644
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001645 spin_lock_irqsave(&pcpu->target_freq_lock, flags);
Lianwei Wang90c6c152013-04-26 13:30:51 +08001646 if (policy->max < pcpu->target_freq)
1647 pcpu->target_freq = policy->max;
1648 else if (policy->min > pcpu->target_freq)
1649 pcpu->target_freq = policy->min;
1650
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001651 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
1652 up_read(&pcpu->enable_sem);
Lianwei Wang90c6c152013-04-26 13:30:51 +08001653 }
Mike Chanef969692010-06-22 11:26:45 -07001654 break;
1655 }
1656 return 0;
1657}
1658
Viresh Kumarc7f826b2013-05-16 14:58:53 +05301659#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1660static
1661#endif
1662struct cpufreq_governor cpufreq_gov_interactive = {
1663 .name = "interactive",
1664 .governor = cpufreq_governor_interactive,
1665 .max_transition_latency = 10000000,
1666 .owner = THIS_MODULE,
1667};
1668
Todd Poynor4add2592012-12-18 17:50:10 -08001669static void cpufreq_interactive_nop_timer(unsigned long data)
1670{
1671}
1672
Mike Chanef969692010-06-22 11:26:45 -07001673static int __init cpufreq_interactive_init(void)
1674{
1675 unsigned int i;
1676 struct cpufreq_interactive_cpuinfo *pcpu;
1677 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1678
Mike Chanef969692010-06-22 11:26:45 -07001679 /* Initalize per-cpu timers */
1680 for_each_possible_cpu(i) {
1681 pcpu = &per_cpu(cpuinfo, i);
Todd Poynor4add2592012-12-18 17:50:10 -08001682 init_timer_deferrable(&pcpu->cpu_timer);
Mike Chanef969692010-06-22 11:26:45 -07001683 pcpu->cpu_timer.function = cpufreq_interactive_timer;
1684 pcpu->cpu_timer.data = i;
Todd Poynor4add2592012-12-18 17:50:10 -08001685 init_timer(&pcpu->cpu_slack_timer);
1686 pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
Todd Poynor0e58da22012-12-11 16:05:03 -08001687 spin_lock_init(&pcpu->load_lock);
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001688 spin_lock_init(&pcpu->target_freq_lock);
Todd Poynor5cad6092012-12-18 17:50:44 -08001689 init_rwsem(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -07001690 }
1691
Todd Poynor0f1920b2012-07-16 17:07:15 -07001692 spin_lock_init(&speedchange_cpumask_lock);
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001693 mutex_init(&gov_lock);
Junjie Wu4344ea32014-04-28 16:22:24 -07001694 mutex_init(&sched_lock);
Todd Poynor0f1920b2012-07-16 17:07:15 -07001695 speedchange_task =
1696 kthread_create(cpufreq_interactive_speedchange_task, NULL,
1697 "cfinteractive");
1698 if (IS_ERR(speedchange_task))
1699 return PTR_ERR(speedchange_task);
Sam Leffler5c9b8272012-06-27 12:55:56 -07001700
Todd Poynor0f1920b2012-07-16 17:07:15 -07001701 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
1702 get_task_struct(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -07001703
Sam Leffler5c9b8272012-06-27 12:55:56 -07001704 /* NB: wake up so the thread does not look hung to the freezer */
Todd Poynor0f1920b2012-07-16 17:07:15 -07001705 wake_up_process(speedchange_task);
Sam Leffler5c9b8272012-06-27 12:55:56 -07001706
Mike Chanef969692010-06-22 11:26:45 -07001707 return cpufreq_register_governor(&cpufreq_gov_interactive);
Mike Chanef969692010-06-22 11:26:45 -07001708}
1709
1710#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1711fs_initcall(cpufreq_interactive_init);
1712#else
1713module_init(cpufreq_interactive_init);
1714#endif
1715
1716static void __exit cpufreq_interactive_exit(void)
1717{
Junjie Wuc5a97d92014-05-23 12:22:59 -07001718 int cpu;
Junjie Wu13c6a762014-08-07 18:04:13 -07001719 struct cpufreq_interactive_cpuinfo *pcpu;
Junjie Wuc5a97d92014-05-23 12:22:59 -07001720
Mike Chanef969692010-06-22 11:26:45 -07001721 cpufreq_unregister_governor(&cpufreq_gov_interactive);
Todd Poynor0f1920b2012-07-16 17:07:15 -07001722 kthread_stop(speedchange_task);
1723 put_task_struct(speedchange_task);
Junjie Wuc5a97d92014-05-23 12:22:59 -07001724
1725 for_each_possible_cpu(cpu) {
Junjie Wu13c6a762014-08-07 18:04:13 -07001726 pcpu = &per_cpu(cpuinfo, cpu);
1727 kfree(pcpu->cached_tunables);
1728 pcpu->cached_tunables = NULL;
Junjie Wuc5a97d92014-05-23 12:22:59 -07001729 }
Mike Chanef969692010-06-22 11:26:45 -07001730}
1731
1732module_exit(cpufreq_interactive_exit);
1733
1734MODULE_AUTHOR("Mike Chan <mike@android.com>");
1735MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1736 "Latency sensitive workloads");
1737MODULE_LICENSE("GPL");