Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 1 | /* |
| 2 | * drivers/cpufreq/cpufreq_interactive.c |
| 3 | * |
| 4 | * Copyright (C) 2010 Google, Inc. |
| 5 | * |
| 6 | * This software is licensed under the terms of the GNU General Public |
| 7 | * License version 2, as published by the Free Software Foundation, and |
| 8 | * may be copied, distributed, and modified under those terms. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
| 14 | * |
| 15 | * Author: Mike Chan (mike@android.com) |
| 16 | * |
| 17 | */ |
| 18 | |
| 19 | #include <linux/cpu.h> |
| 20 | #include <linux/cpumask.h> |
| 21 | #include <linux/cpufreq.h> |
| 22 | #include <linux/module.h> |
Lianwei Wang | d72db42 | 2012-11-01 09:59:52 +0800 | [diff] [blame] | 23 | #include <linux/moduleparam.h> |
Todd Poynor | 5cad609 | 2012-12-18 17:50:44 -0800 | [diff] [blame] | 24 | #include <linux/rwsem.h> |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 25 | #include <linux/sched.h> |
| 26 | #include <linux/sched/rt.h> |
| 27 | #include <linux/tick.h> |
| 28 | #include <linux/time.h> |
| 29 | #include <linux/timer.h> |
| 30 | #include <linux/workqueue.h> |
| 31 | #include <linux/kthread.h> |
Todd Poynor | ab8dc40 | 2012-04-02 17:17:14 -0700 | [diff] [blame] | 32 | #include <linux/slab.h> |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 33 | |
Todd Poynor | ae01047 | 2012-02-16 16:27:59 -0800 | [diff] [blame] | 34 | #define CREATE_TRACE_POINTS |
| 35 | #include <trace/events/cpufreq_interactive.h> |
| 36 | |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 37 | struct cpufreq_interactive_cpuinfo { |
| 38 | struct timer_list cpu_timer; |
Todd Poynor | 4add259 | 2012-12-18 17:50:10 -0800 | [diff] [blame] | 39 | struct timer_list cpu_slack_timer; |
Todd Poynor | 0e58da2 | 2012-12-11 16:05:03 -0800 | [diff] [blame] | 40 | spinlock_t load_lock; /* protects the next 4 fields */ |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 41 | u64 time_in_idle; |
Todd Poynor | 8eccd41 | 2012-10-08 20:14:34 -0700 | [diff] [blame] | 42 | u64 time_in_idle_timestamp; |
Todd Poynor | 0e58da2 | 2012-12-11 16:05:03 -0800 | [diff] [blame] | 43 | u64 cputime_speedadj; |
| 44 | u64 cputime_speedadj_timestamp; |
Junjie Wu | 6b974ed | 2014-04-28 15:11:47 -0700 | [diff] [blame] | 45 | u64 last_evaluated_jiffy; |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 46 | struct cpufreq_policy *policy; |
| 47 | struct cpufreq_frequency_table *freq_table; |
Badhri Jagan Sridharan | ef1eddd | 2014-04-07 18:26:30 -0700 | [diff] [blame] | 48 | spinlock_t target_freq_lock; /*protects target freq */ |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 49 | unsigned int target_freq; |
Todd Poynor | 6d15fa3 | 2012-04-26 21:41:40 -0700 | [diff] [blame] | 50 | unsigned int floor_freq; |
Junjie Wu | d5ac8ee | 2015-03-24 15:51:10 -0700 | [diff] [blame] | 51 | u64 pol_floor_val_time; /* policy floor_validate_time */ |
| 52 | u64 loc_floor_val_time; /* per-cpu floor_validate_time */ |
Saravana Kannan | fbae2f2 | 2014-10-15 12:44:18 -0700 | [diff] [blame] | 53 | u64 pol_hispeed_val_time; /* policy hispeed_validate_time */ |
| 54 | u64 loc_hispeed_val_time; /* per-cpu hispeed_validate_time */ |
Junjie Wu | e05d74e | 2014-08-29 14:12:52 -0700 | [diff] [blame] | 55 | u64 max_freq_hyst_start_time; |
Junjie Wu | 1d86895 | 2015-03-27 11:44:21 -0700 | [diff] [blame^] | 56 | unsigned int min_freq; |
Todd Poynor | 5cad609 | 2012-12-18 17:50:44 -0800 | [diff] [blame] | 57 | struct rw_semaphore enable_sem; |
Junjie Wu | 82f0803 | 2014-12-09 13:20:26 -0800 | [diff] [blame] | 58 | bool reject_notification; |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 59 | int governor_enabled; |
Junjie Wu | 13c6a76 | 2014-08-07 18:04:13 -0700 | [diff] [blame] | 60 | struct cpufreq_interactive_tunables *cached_tunables; |
Junjie Wu | 4344ea3 | 2014-04-28 16:22:24 -0700 | [diff] [blame] | 61 | int first_cpu; |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 62 | }; |
| 63 | |
| 64 | static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo); |
| 65 | |
Todd Poynor | 0f1920b | 2012-07-16 17:07:15 -0700 | [diff] [blame] | 66 | /* realtime thread handles frequency scaling */ |
| 67 | static struct task_struct *speedchange_task; |
| 68 | static cpumask_t speedchange_cpumask; |
| 69 | static spinlock_t speedchange_cpumask_lock; |
Lianwei Wang | 1d4f9a7 | 2013-01-07 14:15:51 +0800 | [diff] [blame] | 70 | static struct mutex gov_lock; |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 71 | |
Junjie Wu | 4344ea3 | 2014-04-28 16:22:24 -0700 | [diff] [blame] | 72 | static int set_window_count; |
| 73 | static int migration_register_count; |
| 74 | static struct mutex sched_lock; |
| 75 | |
Todd Poynor | 8d2d93f | 2012-11-28 17:58:17 -0800 | [diff] [blame] | 76 | /* Target load. Lower values result in higher CPU speeds. */ |
| 77 | #define DEFAULT_TARGET_LOAD 90 |
Todd Poynor | e9c6074 | 2012-11-14 11:41:21 -0800 | [diff] [blame] | 78 | static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD}; |
Todd Poynor | 8d2d93f | 2012-11-28 17:58:17 -0800 | [diff] [blame] | 79 | |
Todd Poynor | a380aa8 | 2012-04-17 17:39:34 -0700 | [diff] [blame] | 80 | #define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC) |
Todd Poynor | cbbe17d | 2012-04-13 20:18:02 -0700 | [diff] [blame] | 81 | #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE |
Minsung Kim | 9c1f83a | 2013-02-25 23:48:04 +0900 | [diff] [blame] | 82 | static unsigned int default_above_hispeed_delay[] = { |
| 83 | DEFAULT_ABOVE_HISPEED_DELAY }; |
Todd Poynor | cbbe17d | 2012-04-13 20:18:02 -0700 | [diff] [blame] | 84 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 85 | struct cpufreq_interactive_tunables { |
| 86 | int usage_count; |
| 87 | /* Hi speed to bump to from lo speed when load burst (default max) */ |
| 88 | unsigned int hispeed_freq; |
| 89 | /* Go to hi speed when CPU load at or above this value. */ |
| 90 | #define DEFAULT_GO_HISPEED_LOAD 99 |
| 91 | unsigned long go_hispeed_load; |
| 92 | /* Target load. Lower values result in higher CPU speeds. */ |
| 93 | spinlock_t target_loads_lock; |
| 94 | unsigned int *target_loads; |
| 95 | int ntarget_loads; |
| 96 | /* |
| 97 | * The minimum amount of time to spend at a frequency before we can ramp |
| 98 | * down. |
| 99 | */ |
| 100 | #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC) |
| 101 | unsigned long min_sample_time; |
| 102 | /* |
| 103 | * The sample rate of the timer used to increase frequency |
| 104 | */ |
| 105 | unsigned long timer_rate; |
| 106 | /* |
| 107 | * Wait this long before raising speed above hispeed, by default a |
| 108 | * single timer interval. |
| 109 | */ |
| 110 | spinlock_t above_hispeed_delay_lock; |
| 111 | unsigned int *above_hispeed_delay; |
| 112 | int nabove_hispeed_delay; |
| 113 | /* Non-zero means indefinite speed boost active */ |
| 114 | int boost_val; |
| 115 | /* Duration of a boot pulse in usecs */ |
| 116 | int boostpulse_duration_val; |
| 117 | /* End time of boost pulse in ktime converted to usecs */ |
| 118 | u64 boostpulse_endtime; |
Lianwei Wang | 2277e3f | 2014-12-02 17:20:50 -0800 | [diff] [blame] | 119 | bool boosted; |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 120 | /* |
| 121 | * Max additional time to wait in idle, beyond timer_rate, at speeds |
| 122 | * above minimum before wakeup to reduce speed, or -1 if unnecessary. |
| 123 | */ |
Todd Poynor | 4add259 | 2012-12-18 17:50:10 -0800 | [diff] [blame] | 124 | #define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE) |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 125 | int timer_slack_val; |
| 126 | bool io_is_busy; |
Junjie Wu | 4344ea3 | 2014-04-28 16:22:24 -0700 | [diff] [blame] | 127 | |
| 128 | /* scheduler input related flags */ |
| 129 | bool use_sched_load; |
| 130 | bool use_migration_notif; |
Junjie Wu | e05d74e | 2014-08-29 14:12:52 -0700 | [diff] [blame] | 131 | |
| 132 | /* |
Junjie Wu | 7ca999f | 2014-08-29 18:55:45 -0700 | [diff] [blame] | 133 | * Whether to align timer windows across all CPUs. When |
| 134 | * use_sched_load is true, this flag is ignored and windows |
| 135 | * will always be aligned. |
| 136 | */ |
| 137 | bool align_windows; |
| 138 | |
| 139 | /* |
Junjie Wu | e05d74e | 2014-08-29 14:12:52 -0700 | [diff] [blame] | 140 | * Stay at max freq for at least max_freq_hysteresis before dropping |
| 141 | * frequency. |
| 142 | */ |
| 143 | unsigned int max_freq_hysteresis; |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 144 | }; |
Lianwei Wang | d72db42 | 2012-11-01 09:59:52 +0800 | [diff] [blame] | 145 | |
Amit Pundir | 94c7a81 | 2015-11-20 18:54:30 +0530 | [diff] [blame] | 146 | /* |
| 147 | * HACK: FIXME: Bring back cpufreq_{get,put}_global_kobject() |
| 148 | * definition removed by upstream commit 8eec1020f0c0 "cpufreq: |
| 149 | * create cpu/cpufreq at boot time" to fix build failures. |
| 150 | */ |
| 151 | static int cpufreq_global_kobject_usage; |
| 152 | |
| 153 | int cpufreq_get_global_kobject(void) |
| 154 | { |
| 155 | if (!cpufreq_global_kobject_usage++) |
| 156 | return kobject_add(cpufreq_global_kobject, |
| 157 | &cpu_subsys.dev_root->kobj, "%s", "cpufreq"); |
| 158 | |
| 159 | return 0; |
| 160 | } |
| 161 | |
| 162 | void cpufreq_put_global_kobject(void) |
| 163 | { |
| 164 | if (!--cpufreq_global_kobject_usage) |
| 165 | kobject_del(cpufreq_global_kobject); |
| 166 | } |
| 167 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 168 | /* For cases where we have single governor instance for system */ |
Cylen Yao | c0b6ed6 | 2014-09-05 18:27:38 -0700 | [diff] [blame] | 169 | static struct cpufreq_interactive_tunables *common_tunables; |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 170 | |
| 171 | static struct attribute_group *get_sysfs_attr(void); |
Lianwei Wang | 72e4057 | 2013-02-22 11:39:18 +0800 | [diff] [blame] | 172 | |
Junjie Wu | 6b974ed | 2014-04-28 15:11:47 -0700 | [diff] [blame] | 173 | /* Round to starting jiffy of next evaluation window */ |
| 174 | static u64 round_to_nw_start(u64 jif, |
| 175 | struct cpufreq_interactive_tunables *tunables) |
| 176 | { |
| 177 | unsigned long step = usecs_to_jiffies(tunables->timer_rate); |
Junjie Wu | 7ca999f | 2014-08-29 18:55:45 -0700 | [diff] [blame] | 178 | u64 ret; |
Junjie Wu | 6b974ed | 2014-04-28 15:11:47 -0700 | [diff] [blame] | 179 | |
Junjie Wu | 7ca999f | 2014-08-29 18:55:45 -0700 | [diff] [blame] | 180 | if (tunables->use_sched_load || tunables->align_windows) { |
| 181 | do_div(jif, step); |
| 182 | ret = (jif + 1) * step; |
| 183 | } else { |
| 184 | ret = jiffies + usecs_to_jiffies(tunables->timer_rate); |
| 185 | } |
| 186 | |
| 187 | return ret; |
Junjie Wu | 6b974ed | 2014-04-28 15:11:47 -0700 | [diff] [blame] | 188 | } |
| 189 | |
Junjie Wu | 4344ea3 | 2014-04-28 16:22:24 -0700 | [diff] [blame] | 190 | static inline int set_window_helper( |
| 191 | struct cpufreq_interactive_tunables *tunables) |
| 192 | { |
| 193 | return sched_set_window(round_to_nw_start(get_jiffies_64(), tunables), |
| 194 | usecs_to_jiffies(tunables->timer_rate)); |
| 195 | } |
| 196 | |
Junjie Wu | 1d86895 | 2015-03-27 11:44:21 -0700 | [diff] [blame^] | 197 | static void cpufreq_interactive_timer_resched(unsigned long cpu, |
| 198 | bool slack_only) |
Todd Poynor | 8eccd41 | 2012-10-08 20:14:34 -0700 | [diff] [blame] | 199 | { |
Junjie Wu | 2a39b1e | 2014-08-15 16:20:54 -0700 | [diff] [blame] | 200 | struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu); |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 201 | struct cpufreq_interactive_tunables *tunables = |
| 202 | pcpu->policy->governor_data; |
Junjie Wu | 6b974ed | 2014-04-28 15:11:47 -0700 | [diff] [blame] | 203 | u64 expires; |
Todd Poynor | df673d1 | 2013-01-02 13:14:00 -0800 | [diff] [blame] | 204 | unsigned long flags; |
Todd Poynor | 4add259 | 2012-12-18 17:50:10 -0800 | [diff] [blame] | 205 | |
Todd Poynor | df673d1 | 2013-01-02 13:14:00 -0800 | [diff] [blame] | 206 | spin_lock_irqsave(&pcpu->load_lock, flags); |
Junjie Wu | 1d86895 | 2015-03-27 11:44:21 -0700 | [diff] [blame^] | 207 | expires = round_to_nw_start(pcpu->last_evaluated_jiffy, tunables); |
| 208 | if (!slack_only) { |
| 209 | pcpu->time_in_idle = |
| 210 | get_cpu_idle_time(smp_processor_id(), |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 211 | &pcpu->time_in_idle_timestamp, |
| 212 | tunables->io_is_busy); |
Junjie Wu | 1d86895 | 2015-03-27 11:44:21 -0700 | [diff] [blame^] | 213 | pcpu->cputime_speedadj = 0; |
| 214 | pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp; |
| 215 | del_timer(&pcpu->cpu_timer); |
| 216 | pcpu->cpu_timer.expires = expires; |
| 217 | add_timer_on(&pcpu->cpu_timer, cpu); |
| 218 | } |
Todd Poynor | 4e25bf9 | 2013-04-05 13:25:21 -0700 | [diff] [blame] | 219 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 220 | if (tunables->timer_slack_val >= 0 && |
| 221 | pcpu->target_freq > pcpu->policy->min) { |
| 222 | expires += usecs_to_jiffies(tunables->timer_slack_val); |
Junjie Wu | 2a39b1e | 2014-08-15 16:20:54 -0700 | [diff] [blame] | 223 | del_timer(&pcpu->cpu_slack_timer); |
| 224 | pcpu->cpu_slack_timer.expires = expires; |
| 225 | add_timer_on(&pcpu->cpu_slack_timer, cpu); |
Todd Poynor | 4e25bf9 | 2013-04-05 13:25:21 -0700 | [diff] [blame] | 226 | } |
| 227 | |
Todd Poynor | df673d1 | 2013-01-02 13:14:00 -0800 | [diff] [blame] | 228 | spin_unlock_irqrestore(&pcpu->load_lock, flags); |
Todd Poynor | 8eccd41 | 2012-10-08 20:14:34 -0700 | [diff] [blame] | 229 | } |
| 230 | |
Lianwei Wang | 90c6c15 | 2013-04-26 13:30:51 +0800 | [diff] [blame] | 231 | /* The caller shall take enable_sem write semaphore to avoid any timer race. |
| 232 | * The cpu_timer and cpu_slack_timer must be deactivated when calling this |
| 233 | * function. |
| 234 | */ |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 235 | static void cpufreq_interactive_timer_start( |
| 236 | struct cpufreq_interactive_tunables *tunables, int cpu) |
Lianwei Wang | 90c6c15 | 2013-04-26 13:30:51 +0800 | [diff] [blame] | 237 | { |
| 238 | struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu); |
Junjie Wu | 6b974ed | 2014-04-28 15:11:47 -0700 | [diff] [blame] | 239 | u64 expires = round_to_nw_start(pcpu->last_evaluated_jiffy, tunables); |
Lianwei Wang | 90c6c15 | 2013-04-26 13:30:51 +0800 | [diff] [blame] | 240 | unsigned long flags; |
| 241 | |
Junjie Wu | 2a39b1e | 2014-08-15 16:20:54 -0700 | [diff] [blame] | 242 | spin_lock_irqsave(&pcpu->load_lock, flags); |
Lianwei Wang | 90c6c15 | 2013-04-26 13:30:51 +0800 | [diff] [blame] | 243 | pcpu->cpu_timer.expires = expires; |
| 244 | add_timer_on(&pcpu->cpu_timer, cpu); |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 245 | if (tunables->timer_slack_val >= 0 && |
| 246 | pcpu->target_freq > pcpu->policy->min) { |
| 247 | expires += usecs_to_jiffies(tunables->timer_slack_val); |
Lianwei Wang | 90c6c15 | 2013-04-26 13:30:51 +0800 | [diff] [blame] | 248 | pcpu->cpu_slack_timer.expires = expires; |
| 249 | add_timer_on(&pcpu->cpu_slack_timer, cpu); |
| 250 | } |
| 251 | |
Lianwei Wang | 90c6c15 | 2013-04-26 13:30:51 +0800 | [diff] [blame] | 252 | pcpu->time_in_idle = |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 253 | get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp, |
| 254 | tunables->io_is_busy); |
Lianwei Wang | 90c6c15 | 2013-04-26 13:30:51 +0800 | [diff] [blame] | 255 | pcpu->cputime_speedadj = 0; |
| 256 | pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp; |
| 257 | spin_unlock_irqrestore(&pcpu->load_lock, flags); |
| 258 | } |
| 259 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 260 | static unsigned int freq_to_above_hispeed_delay( |
| 261 | struct cpufreq_interactive_tunables *tunables, |
| 262 | unsigned int freq) |
Minsung Kim | 9c1f83a | 2013-02-25 23:48:04 +0900 | [diff] [blame] | 263 | { |
| 264 | int i; |
| 265 | unsigned int ret; |
| 266 | unsigned long flags; |
| 267 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 268 | spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags); |
Minsung Kim | 9c1f83a | 2013-02-25 23:48:04 +0900 | [diff] [blame] | 269 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 270 | for (i = 0; i < tunables->nabove_hispeed_delay - 1 && |
| 271 | freq >= tunables->above_hispeed_delay[i+1]; i += 2) |
Minsung Kim | 9c1f83a | 2013-02-25 23:48:04 +0900 | [diff] [blame] | 272 | ; |
| 273 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 274 | ret = tunables->above_hispeed_delay[i]; |
| 275 | spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags); |
Minsung Kim | 9c1f83a | 2013-02-25 23:48:04 +0900 | [diff] [blame] | 276 | return ret; |
| 277 | } |
| 278 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 279 | static unsigned int freq_to_targetload( |
| 280 | struct cpufreq_interactive_tunables *tunables, unsigned int freq) |
Todd Poynor | e9c6074 | 2012-11-14 11:41:21 -0800 | [diff] [blame] | 281 | { |
| 282 | int i; |
| 283 | unsigned int ret; |
Todd Poynor | df673d1 | 2013-01-02 13:14:00 -0800 | [diff] [blame] | 284 | unsigned long flags; |
Todd Poynor | e9c6074 | 2012-11-14 11:41:21 -0800 | [diff] [blame] | 285 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 286 | spin_lock_irqsave(&tunables->target_loads_lock, flags); |
Todd Poynor | e9c6074 | 2012-11-14 11:41:21 -0800 | [diff] [blame] | 287 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 288 | for (i = 0; i < tunables->ntarget_loads - 1 && |
| 289 | freq >= tunables->target_loads[i+1]; i += 2) |
Todd Poynor | e9c6074 | 2012-11-14 11:41:21 -0800 | [diff] [blame] | 290 | ; |
| 291 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 292 | ret = tunables->target_loads[i]; |
| 293 | spin_unlock_irqrestore(&tunables->target_loads_lock, flags); |
Todd Poynor | e9c6074 | 2012-11-14 11:41:21 -0800 | [diff] [blame] | 294 | return ret; |
| 295 | } |
| 296 | |
| 297 | /* |
| 298 | * If increasing frequencies never map to a lower target load then |
| 299 | * choose_freq() will find the minimum frequency that does not exceed its |
| 300 | * target load given the current load. |
| 301 | */ |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 302 | static unsigned int choose_freq(struct cpufreq_interactive_cpuinfo *pcpu, |
| 303 | unsigned int loadadjfreq) |
Todd Poynor | e9c6074 | 2012-11-14 11:41:21 -0800 | [diff] [blame] | 304 | { |
| 305 | unsigned int freq = pcpu->policy->cur; |
Todd Poynor | e9c6074 | 2012-11-14 11:41:21 -0800 | [diff] [blame] | 306 | unsigned int prevfreq, freqmin, freqmax; |
| 307 | unsigned int tl; |
| 308 | int index; |
| 309 | |
| 310 | freqmin = 0; |
| 311 | freqmax = UINT_MAX; |
| 312 | |
| 313 | do { |
| 314 | prevfreq = freq; |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 315 | tl = freq_to_targetload(pcpu->policy->governor_data, freq); |
Todd Poynor | e9c6074 | 2012-11-14 11:41:21 -0800 | [diff] [blame] | 316 | |
| 317 | /* |
| 318 | * Find the lowest frequency where the computed load is less |
| 319 | * than or equal to the target load. |
| 320 | */ |
| 321 | |
Todd Poynor | 8cdabdc | 2013-04-22 16:44:58 -0700 | [diff] [blame] | 322 | if (cpufreq_frequency_table_target( |
| 323 | pcpu->policy, pcpu->freq_table, loadadjfreq / tl, |
| 324 | CPUFREQ_RELATION_L, &index)) |
| 325 | break; |
Todd Poynor | e9c6074 | 2012-11-14 11:41:21 -0800 | [diff] [blame] | 326 | freq = pcpu->freq_table[index].frequency; |
| 327 | |
| 328 | if (freq > prevfreq) { |
| 329 | /* The previous frequency is too low. */ |
| 330 | freqmin = prevfreq; |
| 331 | |
| 332 | if (freq >= freqmax) { |
| 333 | /* |
| 334 | * Find the highest frequency that is less |
| 335 | * than freqmax. |
| 336 | */ |
Todd Poynor | 8cdabdc | 2013-04-22 16:44:58 -0700 | [diff] [blame] | 337 | if (cpufreq_frequency_table_target( |
| 338 | pcpu->policy, pcpu->freq_table, |
| 339 | freqmax - 1, CPUFREQ_RELATION_H, |
| 340 | &index)) |
| 341 | break; |
Todd Poynor | e9c6074 | 2012-11-14 11:41:21 -0800 | [diff] [blame] | 342 | freq = pcpu->freq_table[index].frequency; |
| 343 | |
| 344 | if (freq == freqmin) { |
| 345 | /* |
| 346 | * The first frequency below freqmax |
| 347 | * has already been found to be too |
| 348 | * low. freqmax is the lowest speed |
| 349 | * we found that is fast enough. |
| 350 | */ |
| 351 | freq = freqmax; |
| 352 | break; |
| 353 | } |
| 354 | } |
| 355 | } else if (freq < prevfreq) { |
| 356 | /* The previous frequency is high enough. */ |
| 357 | freqmax = prevfreq; |
| 358 | |
| 359 | if (freq <= freqmin) { |
| 360 | /* |
| 361 | * Find the lowest frequency that is higher |
| 362 | * than freqmin. |
| 363 | */ |
Todd Poynor | 8cdabdc | 2013-04-22 16:44:58 -0700 | [diff] [blame] | 364 | if (cpufreq_frequency_table_target( |
| 365 | pcpu->policy, pcpu->freq_table, |
| 366 | freqmin + 1, CPUFREQ_RELATION_L, |
| 367 | &index)) |
| 368 | break; |
Todd Poynor | e9c6074 | 2012-11-14 11:41:21 -0800 | [diff] [blame] | 369 | freq = pcpu->freq_table[index].frequency; |
| 370 | |
| 371 | /* |
| 372 | * If freqmax is the first frequency above |
| 373 | * freqmin then we have already found that |
| 374 | * this speed is fast enough. |
| 375 | */ |
| 376 | if (freq == freqmax) |
| 377 | break; |
| 378 | } |
| 379 | } |
| 380 | |
| 381 | /* If same frequency chosen as previous then done. */ |
| 382 | } while (freq != prevfreq); |
| 383 | |
| 384 | return freq; |
| 385 | } |
| 386 | |
Todd Poynor | 0e58da2 | 2012-12-11 16:05:03 -0800 | [diff] [blame] | 387 | static u64 update_load(int cpu) |
| 388 | { |
| 389 | struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu); |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 390 | struct cpufreq_interactive_tunables *tunables = |
| 391 | pcpu->policy->governor_data; |
Todd Poynor | 0e58da2 | 2012-12-11 16:05:03 -0800 | [diff] [blame] | 392 | u64 now; |
| 393 | u64 now_idle; |
| 394 | unsigned int delta_idle; |
| 395 | unsigned int delta_time; |
| 396 | u64 active_time; |
| 397 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 398 | now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy); |
Todd Poynor | 0e58da2 | 2012-12-11 16:05:03 -0800 | [diff] [blame] | 399 | delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle); |
| 400 | delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp); |
Minsung Kim | 98b3b56 | 2013-04-23 22:32:01 +0900 | [diff] [blame] | 401 | |
| 402 | if (delta_time <= delta_idle) |
| 403 | active_time = 0; |
| 404 | else |
| 405 | active_time = delta_time - delta_idle; |
| 406 | |
Todd Poynor | 0e58da2 | 2012-12-11 16:05:03 -0800 | [diff] [blame] | 407 | pcpu->cputime_speedadj += active_time * pcpu->policy->cur; |
| 408 | |
| 409 | pcpu->time_in_idle = now_idle; |
| 410 | pcpu->time_in_idle_timestamp = now; |
| 411 | return now; |
| 412 | } |
| 413 | |
Junjie Wu | 2a57776 | 2015-03-25 14:05:49 -0700 | [diff] [blame] | 414 | #define MAX_LOCAL_LOAD 100 |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 415 | static void cpufreq_interactive_timer(unsigned long data) |
| 416 | { |
Todd Poynor | e7afb7e | 2012-11-05 13:09:03 -0800 | [diff] [blame] | 417 | u64 now; |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 418 | unsigned int delta_time; |
Todd Poynor | 0e58da2 | 2012-12-11 16:05:03 -0800 | [diff] [blame] | 419 | u64 cputime_speedadj; |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 420 | int cpu_load; |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 421 | struct cpufreq_interactive_cpuinfo *pcpu = |
| 422 | &per_cpu(cpuinfo, data); |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 423 | struct cpufreq_interactive_tunables *tunables = |
| 424 | pcpu->policy->governor_data; |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 425 | unsigned int new_freq; |
Todd Poynor | 0e58da2 | 2012-12-11 16:05:03 -0800 | [diff] [blame] | 426 | unsigned int loadadjfreq; |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 427 | unsigned int index; |
| 428 | unsigned long flags; |
Junjie Wu | d5ac8ee | 2015-03-24 15:51:10 -0700 | [diff] [blame] | 429 | u64 max_fvtime; |
Rohit Gupta | 6d3abc1 | 2014-11-14 17:59:42 -0800 | [diff] [blame] | 430 | struct cpufreq_govinfo int_info; |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 431 | |
Todd Poynor | 5cad609 | 2012-12-18 17:50:44 -0800 | [diff] [blame] | 432 | if (!down_read_trylock(&pcpu->enable_sem)) |
| 433 | return; |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 434 | if (!pcpu->governor_enabled) |
| 435 | goto exit; |
| 436 | |
Todd Poynor | df673d1 | 2013-01-02 13:14:00 -0800 | [diff] [blame] | 437 | spin_lock_irqsave(&pcpu->load_lock, flags); |
Junjie Wu | 6b974ed | 2014-04-28 15:11:47 -0700 | [diff] [blame] | 438 | pcpu->last_evaluated_jiffy = get_jiffies_64(); |
Junjie Wu | 4344ea3 | 2014-04-28 16:22:24 -0700 | [diff] [blame] | 439 | now = update_load(data); |
| 440 | if (tunables->use_sched_load) { |
| 441 | /* |
| 442 | * Unlock early to avoid deadlock. |
| 443 | * |
Junjie Wu | 18e7fd2 | 2014-09-17 18:51:41 -0700 | [diff] [blame] | 444 | * load_change_callback() for thread migration already |
| 445 | * holds rq lock. Then it locks load_lock to avoid racing |
| 446 | * with cpufreq_interactive_timer_resched/start(). |
Junjie Wu | 4344ea3 | 2014-04-28 16:22:24 -0700 | [diff] [blame] | 447 | * sched_get_busy() will also acquire rq lock. Thus we |
| 448 | * can't hold load_lock when calling sched_get_busy(). |
| 449 | * |
| 450 | * load_lock used in this function protects time |
| 451 | * and load information. These stats are not used when |
| 452 | * scheduler input is available. Thus unlocking load_lock |
| 453 | * early is perfectly OK. |
| 454 | */ |
| 455 | spin_unlock_irqrestore(&pcpu->load_lock, flags); |
| 456 | cputime_speedadj = (u64)sched_get_busy(data) * |
| 457 | pcpu->policy->cpuinfo.max_freq; |
| 458 | do_div(cputime_speedadj, tunables->timer_rate); |
| 459 | } else { |
| 460 | delta_time = (unsigned int) |
| 461 | (now - pcpu->cputime_speedadj_timestamp); |
| 462 | cputime_speedadj = pcpu->cputime_speedadj; |
| 463 | spin_unlock_irqrestore(&pcpu->load_lock, flags); |
| 464 | if (WARN_ON_ONCE(!delta_time)) |
| 465 | goto rearm; |
| 466 | do_div(cputime_speedadj, delta_time); |
| 467 | } |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 468 | |
Todd Poynor | 0e58da2 | 2012-12-11 16:05:03 -0800 | [diff] [blame] | 469 | loadadjfreq = (unsigned int)cputime_speedadj * 100; |
Rohit Gupta | 6d3abc1 | 2014-11-14 17:59:42 -0800 | [diff] [blame] | 470 | |
| 471 | int_info.cpu = data; |
| 472 | int_info.load = loadadjfreq / pcpu->policy->max; |
| 473 | int_info.sampling_rate_us = tunables->timer_rate; |
| 474 | atomic_notifier_call_chain(&cpufreq_govinfo_notifier_list, |
| 475 | CPUFREQ_LOAD_CHANGE, &int_info); |
| 476 | |
| 477 | spin_lock_irqsave(&pcpu->target_freq_lock, flags); |
rahul.khandelwal | 0d0606a | 2015-04-17 11:45:23 +0530 | [diff] [blame] | 478 | cpu_load = loadadjfreq / pcpu->policy->cur; |
Lianwei Wang | 2277e3f | 2014-12-02 17:20:50 -0800 | [diff] [blame] | 479 | tunables->boosted = tunables->boost_val || now < tunables->boostpulse_endtime; |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 480 | |
Lianwei Wang | 2277e3f | 2014-12-02 17:20:50 -0800 | [diff] [blame] | 481 | if (cpu_load >= tunables->go_hispeed_load || tunables->boosted) { |
Junjie Wu | 2a57776 | 2015-03-25 14:05:49 -0700 | [diff] [blame] | 482 | if (pcpu->policy->cur < tunables->hispeed_freq && |
| 483 | cpu_load <= MAX_LOCAL_LOAD) { |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 484 | new_freq = tunables->hispeed_freq; |
Todd Poynor | 2b66049 | 2012-12-19 16:06:48 -0800 | [diff] [blame] | 485 | } else { |
| 486 | new_freq = choose_freq(pcpu, loadadjfreq); |
| 487 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 488 | if (new_freq < tunables->hispeed_freq) |
| 489 | new_freq = tunables->hispeed_freq; |
Todd Poynor | 2b66049 | 2012-12-19 16:06:48 -0800 | [diff] [blame] | 490 | } |
| 491 | } else { |
Todd Poynor | 0e58da2 | 2012-12-11 16:05:03 -0800 | [diff] [blame] | 492 | new_freq = choose_freq(pcpu, loadadjfreq); |
Ruchi Kandoi | 9df0ca9 | 2014-06-13 16:24:15 -0700 | [diff] [blame] | 493 | if (new_freq > tunables->hispeed_freq && |
Saravana Kannan | fbae2f2 | 2014-10-15 12:44:18 -0700 | [diff] [blame] | 494 | pcpu->policy->cur < tunables->hispeed_freq) |
Ruchi Kandoi | 9df0ca9 | 2014-06-13 16:24:15 -0700 | [diff] [blame] | 495 | new_freq = tunables->hispeed_freq; |
Todd Poynor | 2b66049 | 2012-12-19 16:06:48 -0800 | [diff] [blame] | 496 | } |
Todd Poynor | 131ff02 | 2012-11-08 15:06:55 -0800 | [diff] [blame] | 497 | |
Junjie Wu | 2a57776 | 2015-03-25 14:05:49 -0700 | [diff] [blame] | 498 | if (cpu_load <= MAX_LOCAL_LOAD && |
| 499 | pcpu->policy->cur >= tunables->hispeed_freq && |
Saravana Kannan | fbae2f2 | 2014-10-15 12:44:18 -0700 | [diff] [blame] | 500 | new_freq > pcpu->policy->cur && |
| 501 | now - pcpu->pol_hispeed_val_time < |
| 502 | freq_to_above_hispeed_delay(tunables, pcpu->policy->cur)) { |
Todd Poynor | 131ff02 | 2012-11-08 15:06:55 -0800 | [diff] [blame] | 503 | trace_cpufreq_interactive_notyet( |
| 504 | data, cpu_load, pcpu->target_freq, |
| 505 | pcpu->policy->cur, new_freq); |
Badhri Jagan Sridharan | ef1eddd | 2014-04-07 18:26:30 -0700 | [diff] [blame] | 506 | spin_unlock_irqrestore(&pcpu->target_freq_lock, flags); |
Todd Poynor | 131ff02 | 2012-11-08 15:06:55 -0800 | [diff] [blame] | 507 | goto rearm; |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 508 | } |
| 509 | |
Saravana Kannan | fbae2f2 | 2014-10-15 12:44:18 -0700 | [diff] [blame] | 510 | pcpu->loc_hispeed_val_time = now; |
Todd Poynor | 1a0389a | 2012-05-10 23:28:06 -0700 | [diff] [blame] | 511 | |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 512 | if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table, |
Todd Poynor | 8d2d93f | 2012-11-28 17:58:17 -0800 | [diff] [blame] | 513 | new_freq, CPUFREQ_RELATION_L, |
Badhri Jagan Sridharan | ef1eddd | 2014-04-07 18:26:30 -0700 | [diff] [blame] | 514 | &index)) { |
| 515 | spin_unlock_irqrestore(&pcpu->target_freq_lock, flags); |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 516 | goto rearm; |
Badhri Jagan Sridharan | ef1eddd | 2014-04-07 18:26:30 -0700 | [diff] [blame] | 517 | } |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 518 | |
| 519 | new_freq = pcpu->freq_table[index].frequency; |
| 520 | |
Junjie Wu | e05d74e | 2014-08-29 14:12:52 -0700 | [diff] [blame] | 521 | if (pcpu->target_freq >= pcpu->policy->max |
| 522 | && new_freq < pcpu->target_freq |
| 523 | && now - pcpu->max_freq_hyst_start_time < |
| 524 | tunables->max_freq_hysteresis) { |
| 525 | trace_cpufreq_interactive_notyet(data, cpu_load, |
| 526 | pcpu->target_freq, pcpu->policy->cur, new_freq); |
| 527 | spin_unlock_irqrestore(&pcpu->target_freq_lock, flags); |
| 528 | goto rearm; |
| 529 | } |
| 530 | |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 531 | /* |
Todd Poynor | 6d15fa3 | 2012-04-26 21:41:40 -0700 | [diff] [blame] | 532 | * Do not scale below floor_freq unless we have been at or above the |
| 533 | * floor frequency for the minimum sample time since last validated. |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 534 | */ |
Junjie Wu | d5ac8ee | 2015-03-24 15:51:10 -0700 | [diff] [blame] | 535 | max_fvtime = max(pcpu->pol_floor_val_time, pcpu->loc_floor_val_time); |
| 536 | if (new_freq < pcpu->floor_freq && |
| 537 | pcpu->target_freq >= pcpu->policy->cur) { |
| 538 | if (now - max_fvtime < tunables->min_sample_time) { |
Todd Poynor | e60cc1b | 2012-11-28 17:56:09 -0800 | [diff] [blame] | 539 | trace_cpufreq_interactive_notyet( |
| 540 | data, cpu_load, pcpu->target_freq, |
| 541 | pcpu->policy->cur, new_freq); |
Badhri Jagan Sridharan | ef1eddd | 2014-04-07 18:26:30 -0700 | [diff] [blame] | 542 | spin_unlock_irqrestore(&pcpu->target_freq_lock, flags); |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 543 | goto rearm; |
Todd Poynor | ae01047 | 2012-02-16 16:27:59 -0800 | [diff] [blame] | 544 | } |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 545 | } |
| 546 | |
Todd Poynor | e16d592 | 2012-12-14 17:31:19 -0800 | [diff] [blame] | 547 | /* |
| 548 | * Update the timestamp for checking whether speed has been held at |
| 549 | * or above the selected frequency for a minimum of min_sample_time, |
| 550 | * if not boosted to hispeed_freq. If boosted to hispeed_freq then we |
| 551 | * allow the speed to drop as soon as the boostpulse duration expires |
| 552 | * (or the indefinite boost is turned off). |
| 553 | */ |
| 554 | |
Lianwei Wang | 2277e3f | 2014-12-02 17:20:50 -0800 | [diff] [blame] | 555 | if (!tunables->boosted || new_freq > tunables->hispeed_freq) { |
Todd Poynor | e16d592 | 2012-12-14 17:31:19 -0800 | [diff] [blame] | 556 | pcpu->floor_freq = new_freq; |
Junjie Wu | d5ac8ee | 2015-03-24 15:51:10 -0700 | [diff] [blame] | 557 | if (pcpu->target_freq >= pcpu->policy->cur || |
| 558 | new_freq >= pcpu->policy->cur) |
| 559 | pcpu->loc_floor_val_time = now; |
Todd Poynor | e16d592 | 2012-12-14 17:31:19 -0800 | [diff] [blame] | 560 | } |
Todd Poynor | 1f408dc | 2012-04-06 19:59:36 -0700 | [diff] [blame] | 561 | |
Minsung Kim | 9f2841b | 2014-11-29 21:43:53 +0900 | [diff] [blame] | 562 | if (pcpu->target_freq == new_freq && |
| 563 | pcpu->target_freq <= pcpu->policy->cur) { |
Todd Poynor | e60cc1b | 2012-11-28 17:56:09 -0800 | [diff] [blame] | 564 | trace_cpufreq_interactive_already( |
| 565 | data, cpu_load, pcpu->target_freq, |
| 566 | pcpu->policy->cur, new_freq); |
Badhri Jagan Sridharan | ef1eddd | 2014-04-07 18:26:30 -0700 | [diff] [blame] | 567 | spin_unlock_irqrestore(&pcpu->target_freq_lock, flags); |
Rohit Gupta | 189c222 | 2015-03-06 18:46:04 -0800 | [diff] [blame] | 568 | goto rearm; |
Todd Poynor | 1f408dc | 2012-04-06 19:59:36 -0700 | [diff] [blame] | 569 | } |
| 570 | |
Todd Poynor | ae01047 | 2012-02-16 16:27:59 -0800 | [diff] [blame] | 571 | trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq, |
Todd Poynor | e60cc1b | 2012-11-28 17:56:09 -0800 | [diff] [blame] | 572 | pcpu->policy->cur, new_freq); |
Todd Poynor | ae01047 | 2012-02-16 16:27:59 -0800 | [diff] [blame] | 573 | |
Todd Poynor | 0f1920b | 2012-07-16 17:07:15 -0700 | [diff] [blame] | 574 | pcpu->target_freq = new_freq; |
Badhri Jagan Sridharan | ef1eddd | 2014-04-07 18:26:30 -0700 | [diff] [blame] | 575 | spin_unlock_irqrestore(&pcpu->target_freq_lock, flags); |
Todd Poynor | 0f1920b | 2012-07-16 17:07:15 -0700 | [diff] [blame] | 576 | spin_lock_irqsave(&speedchange_cpumask_lock, flags); |
| 577 | cpumask_set_cpu(data, &speedchange_cpumask); |
| 578 | spin_unlock_irqrestore(&speedchange_cpumask_lock, flags); |
| 579 | wake_up_process(speedchange_task); |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 580 | |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 581 | rearm: |
Todd Poynor | 4add259 | 2012-12-18 17:50:10 -0800 | [diff] [blame] | 582 | if (!timer_pending(&pcpu->cpu_timer)) |
Junjie Wu | 1d86895 | 2015-03-27 11:44:21 -0700 | [diff] [blame^] | 583 | cpufreq_interactive_timer_resched(data, false); |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 584 | |
| 585 | exit: |
Todd Poynor | 5cad609 | 2012-12-18 17:50:44 -0800 | [diff] [blame] | 586 | up_read(&pcpu->enable_sem); |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 587 | return; |
| 588 | } |
| 589 | |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 590 | static void cpufreq_interactive_idle_end(void) |
| 591 | { |
| 592 | struct cpufreq_interactive_cpuinfo *pcpu = |
| 593 | &per_cpu(cpuinfo, smp_processor_id()); |
| 594 | |
Todd Poynor | 5cad609 | 2012-12-18 17:50:44 -0800 | [diff] [blame] | 595 | if (!down_read_trylock(&pcpu->enable_sem)) |
Sam Leffler | 3ab7c2b | 2012-06-27 10:12:04 -0700 | [diff] [blame] | 596 | return; |
Todd Poynor | 5cad609 | 2012-12-18 17:50:44 -0800 | [diff] [blame] | 597 | if (!pcpu->governor_enabled) { |
| 598 | up_read(&pcpu->enable_sem); |
| 599 | return; |
| 600 | } |
Sam Leffler | 3ab7c2b | 2012-06-27 10:12:04 -0700 | [diff] [blame] | 601 | |
Todd Poynor | e7afb7e | 2012-11-05 13:09:03 -0800 | [diff] [blame] | 602 | /* Arm the timer for 1-2 ticks later if not already. */ |
| 603 | if (!timer_pending(&pcpu->cpu_timer)) { |
Junjie Wu | 1d86895 | 2015-03-27 11:44:21 -0700 | [diff] [blame^] | 604 | cpufreq_interactive_timer_resched(smp_processor_id(), false); |
Todd Poynor | 4add259 | 2012-12-18 17:50:10 -0800 | [diff] [blame] | 605 | } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) { |
Todd Poynor | 8eccd41 | 2012-10-08 20:14:34 -0700 | [diff] [blame] | 606 | del_timer(&pcpu->cpu_timer); |
Todd Poynor | 4add259 | 2012-12-18 17:50:10 -0800 | [diff] [blame] | 607 | del_timer(&pcpu->cpu_slack_timer); |
Todd Poynor | 8eccd41 | 2012-10-08 20:14:34 -0700 | [diff] [blame] | 608 | cpufreq_interactive_timer(smp_processor_id()); |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 609 | } |
Todd Poynor | 5cad609 | 2012-12-18 17:50:44 -0800 | [diff] [blame] | 610 | |
| 611 | up_read(&pcpu->enable_sem); |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 612 | } |
| 613 | |
Todd Poynor | 0f1920b | 2012-07-16 17:07:15 -0700 | [diff] [blame] | 614 | static int cpufreq_interactive_speedchange_task(void *data) |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 615 | { |
| 616 | unsigned int cpu; |
| 617 | cpumask_t tmp_mask; |
| 618 | unsigned long flags; |
| 619 | struct cpufreq_interactive_cpuinfo *pcpu; |
| 620 | |
| 621 | while (1) { |
| 622 | set_current_state(TASK_INTERRUPTIBLE); |
Todd Poynor | 0f1920b | 2012-07-16 17:07:15 -0700 | [diff] [blame] | 623 | spin_lock_irqsave(&speedchange_cpumask_lock, flags); |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 624 | |
Todd Poynor | 0f1920b | 2012-07-16 17:07:15 -0700 | [diff] [blame] | 625 | if (cpumask_empty(&speedchange_cpumask)) { |
| 626 | spin_unlock_irqrestore(&speedchange_cpumask_lock, |
| 627 | flags); |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 628 | schedule(); |
| 629 | |
| 630 | if (kthread_should_stop()) |
| 631 | break; |
| 632 | |
Todd Poynor | 0f1920b | 2012-07-16 17:07:15 -0700 | [diff] [blame] | 633 | spin_lock_irqsave(&speedchange_cpumask_lock, flags); |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 634 | } |
| 635 | |
| 636 | set_current_state(TASK_RUNNING); |
Todd Poynor | 0f1920b | 2012-07-16 17:07:15 -0700 | [diff] [blame] | 637 | tmp_mask = speedchange_cpumask; |
| 638 | cpumask_clear(&speedchange_cpumask); |
| 639 | spin_unlock_irqrestore(&speedchange_cpumask_lock, flags); |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 640 | |
| 641 | for_each_cpu(cpu, &tmp_mask) { |
| 642 | unsigned int j; |
| 643 | unsigned int max_freq = 0; |
Saravana Kannan | fbae2f2 | 2014-10-15 12:44:18 -0700 | [diff] [blame] | 644 | struct cpufreq_interactive_cpuinfo *pjcpu; |
Junjie Wu | d5ac8ee | 2015-03-24 15:51:10 -0700 | [diff] [blame] | 645 | u64 hvt = ~0ULL, fvt = 0; |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 646 | |
| 647 | pcpu = &per_cpu(cpuinfo, cpu); |
Todd Poynor | 5cad609 | 2012-12-18 17:50:44 -0800 | [diff] [blame] | 648 | if (!down_read_trylock(&pcpu->enable_sem)) |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 649 | continue; |
Todd Poynor | 5cad609 | 2012-12-18 17:50:44 -0800 | [diff] [blame] | 650 | if (!pcpu->governor_enabled) { |
| 651 | up_read(&pcpu->enable_sem); |
| 652 | continue; |
| 653 | } |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 654 | |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 655 | for_each_cpu(j, pcpu->policy->cpus) { |
Saravana Kannan | fbae2f2 | 2014-10-15 12:44:18 -0700 | [diff] [blame] | 656 | pjcpu = &per_cpu(cpuinfo, j); |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 657 | |
Junjie Wu | d5ac8ee | 2015-03-24 15:51:10 -0700 | [diff] [blame] | 658 | fvt = max(fvt, pjcpu->loc_floor_val_time); |
Saravana Kannan | fbae2f2 | 2014-10-15 12:44:18 -0700 | [diff] [blame] | 659 | if (pjcpu->target_freq > max_freq) { |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 660 | max_freq = pjcpu->target_freq; |
Saravana Kannan | fbae2f2 | 2014-10-15 12:44:18 -0700 | [diff] [blame] | 661 | hvt = pjcpu->loc_hispeed_val_time; |
| 662 | } else if (pjcpu->target_freq == max_freq) { |
| 663 | hvt = min(hvt, pjcpu->loc_hispeed_val_time); |
| 664 | } |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 665 | } |
Junjie Wu | d5ac8ee | 2015-03-24 15:51:10 -0700 | [diff] [blame] | 666 | for_each_cpu(j, pcpu->policy->cpus) { |
| 667 | pjcpu = &per_cpu(cpuinfo, j); |
| 668 | pjcpu->pol_floor_val_time = fvt; |
| 669 | } |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 670 | |
Saravana Kannan | fbae2f2 | 2014-10-15 12:44:18 -0700 | [diff] [blame] | 671 | if (max_freq != pcpu->policy->cur) { |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 672 | __cpufreq_driver_target(pcpu->policy, |
| 673 | max_freq, |
| 674 | CPUFREQ_RELATION_H); |
Saravana Kannan | fbae2f2 | 2014-10-15 12:44:18 -0700 | [diff] [blame] | 675 | for_each_cpu(j, pcpu->policy->cpus) { |
| 676 | pjcpu = &per_cpu(cpuinfo, j); |
| 677 | pjcpu->pol_hispeed_val_time = hvt; |
| 678 | } |
| 679 | } |
Todd Poynor | 0f1920b | 2012-07-16 17:07:15 -0700 | [diff] [blame] | 680 | trace_cpufreq_interactive_setspeed(cpu, |
| 681 | pcpu->target_freq, |
Todd Poynor | ae01047 | 2012-02-16 16:27:59 -0800 | [diff] [blame] | 682 | pcpu->policy->cur); |
Todd Poynor | 5cad609 | 2012-12-18 17:50:44 -0800 | [diff] [blame] | 683 | |
| 684 | up_read(&pcpu->enable_sem); |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 685 | } |
| 686 | } |
| 687 | |
| 688 | return 0; |
| 689 | } |
| 690 | |
Lianwei Wang | 2277e3f | 2014-12-02 17:20:50 -0800 | [diff] [blame] | 691 | static void cpufreq_interactive_boost(struct cpufreq_interactive_tunables *tunables) |
Todd Poynor | ab8dc40 | 2012-04-02 17:17:14 -0700 | [diff] [blame] | 692 | { |
| 693 | int i; |
| 694 | int anyboost = 0; |
Badhri Jagan Sridharan | ef1eddd | 2014-04-07 18:26:30 -0700 | [diff] [blame] | 695 | unsigned long flags[2]; |
Todd Poynor | ab8dc40 | 2012-04-02 17:17:14 -0700 | [diff] [blame] | 696 | struct cpufreq_interactive_cpuinfo *pcpu; |
Lianwei Wang | 2277e3f | 2014-12-02 17:20:50 -0800 | [diff] [blame] | 697 | |
| 698 | tunables->boosted = true; |
Todd Poynor | ab8dc40 | 2012-04-02 17:17:14 -0700 | [diff] [blame] | 699 | |
Badhri Jagan Sridharan | ef1eddd | 2014-04-07 18:26:30 -0700 | [diff] [blame] | 700 | spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]); |
Todd Poynor | ab8dc40 | 2012-04-02 17:17:14 -0700 | [diff] [blame] | 701 | |
| 702 | for_each_online_cpu(i) { |
| 703 | pcpu = &per_cpu(cpuinfo, i); |
Lianwei Wang | 2277e3f | 2014-12-02 17:20:50 -0800 | [diff] [blame] | 704 | if (tunables != pcpu->policy->governor_data) |
| 705 | continue; |
Todd Poynor | ab8dc40 | 2012-04-02 17:17:14 -0700 | [diff] [blame] | 706 | |
Badhri Jagan Sridharan | ef1eddd | 2014-04-07 18:26:30 -0700 | [diff] [blame] | 707 | spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]); |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 708 | if (pcpu->target_freq < tunables->hispeed_freq) { |
| 709 | pcpu->target_freq = tunables->hispeed_freq; |
Todd Poynor | 0f1920b | 2012-07-16 17:07:15 -0700 | [diff] [blame] | 710 | cpumask_set_cpu(i, &speedchange_cpumask); |
Saravana Kannan | fbae2f2 | 2014-10-15 12:44:18 -0700 | [diff] [blame] | 711 | pcpu->pol_hispeed_val_time = |
Todd Poynor | 31817c9 | 2012-12-07 20:08:45 -0800 | [diff] [blame] | 712 | ktime_to_us(ktime_get()); |
Todd Poynor | ab8dc40 | 2012-04-02 17:17:14 -0700 | [diff] [blame] | 713 | anyboost = 1; |
| 714 | } |
Badhri Jagan Sridharan | ef1eddd | 2014-04-07 18:26:30 -0700 | [diff] [blame] | 715 | spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]); |
Todd Poynor | ab8dc40 | 2012-04-02 17:17:14 -0700 | [diff] [blame] | 716 | } |
| 717 | |
Badhri Jagan Sridharan | ef1eddd | 2014-04-07 18:26:30 -0700 | [diff] [blame] | 718 | spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]); |
Todd Poynor | ab8dc40 | 2012-04-02 17:17:14 -0700 | [diff] [blame] | 719 | |
| 720 | if (anyboost) |
Todd Poynor | 0f1920b | 2012-07-16 17:07:15 -0700 | [diff] [blame] | 721 | wake_up_process(speedchange_task); |
Todd Poynor | ab8dc40 | 2012-04-02 17:17:14 -0700 | [diff] [blame] | 722 | } |
| 723 | |
Junjie Wu | 4344ea3 | 2014-04-28 16:22:24 -0700 | [diff] [blame] | 724 | static int load_change_callback(struct notifier_block *nb, unsigned long val, |
| 725 | void *data) |
| 726 | { |
| 727 | unsigned long cpu = (unsigned long) data; |
| 728 | struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu); |
| 729 | struct cpufreq_interactive_tunables *tunables; |
| 730 | |
Junjie Wu | 18e7fd2 | 2014-09-17 18:51:41 -0700 | [diff] [blame] | 731 | if (speedchange_task == current) |
| 732 | return 0; |
| 733 | |
Junjie Wu | 82f0803 | 2014-12-09 13:20:26 -0800 | [diff] [blame] | 734 | if (pcpu->reject_notification) |
| 735 | return 0; |
| 736 | |
Junjie Wu | 18e7fd2 | 2014-09-17 18:51:41 -0700 | [diff] [blame] | 737 | if (!down_read_trylock(&pcpu->enable_sem)) |
| 738 | return 0; |
| 739 | if (!pcpu->governor_enabled) { |
| 740 | up_read(&pcpu->enable_sem); |
| 741 | return 0; |
| 742 | } |
| 743 | tunables = pcpu->policy->governor_data; |
| 744 | if (!tunables->use_sched_load || !tunables->use_migration_notif) { |
| 745 | up_read(&pcpu->enable_sem); |
| 746 | return 0; |
Junjie Wu | 4344ea3 | 2014-04-28 16:22:24 -0700 | [diff] [blame] | 747 | } |
| 748 | |
Junjie Wu | 18e7fd2 | 2014-09-17 18:51:41 -0700 | [diff] [blame] | 749 | trace_cpufreq_interactive_load_change(cpu); |
| 750 | del_timer(&pcpu->cpu_timer); |
| 751 | del_timer(&pcpu->cpu_slack_timer); |
| 752 | cpufreq_interactive_timer(cpu); |
| 753 | |
| 754 | up_read(&pcpu->enable_sem); |
Junjie Wu | 4344ea3 | 2014-04-28 16:22:24 -0700 | [diff] [blame] | 755 | return 0; |
| 756 | } |
| 757 | |
| 758 | static struct notifier_block load_notifier_block = { |
| 759 | .notifier_call = load_change_callback, |
| 760 | }; |
| 761 | |
Todd Poynor | 0e58da2 | 2012-12-11 16:05:03 -0800 | [diff] [blame] | 762 | static int cpufreq_interactive_notifier( |
| 763 | struct notifier_block *nb, unsigned long val, void *data) |
| 764 | { |
| 765 | struct cpufreq_freqs *freq = data; |
| 766 | struct cpufreq_interactive_cpuinfo *pcpu; |
| 767 | int cpu; |
Todd Poynor | df673d1 | 2013-01-02 13:14:00 -0800 | [diff] [blame] | 768 | unsigned long flags; |
Todd Poynor | 0e58da2 | 2012-12-11 16:05:03 -0800 | [diff] [blame] | 769 | |
| 770 | if (val == CPUFREQ_POSTCHANGE) { |
| 771 | pcpu = &per_cpu(cpuinfo, freq->cpu); |
Todd Poynor | 34974c3 | 2012-12-23 12:28:49 -0800 | [diff] [blame] | 772 | if (!down_read_trylock(&pcpu->enable_sem)) |
| 773 | return 0; |
| 774 | if (!pcpu->governor_enabled) { |
| 775 | up_read(&pcpu->enable_sem); |
| 776 | return 0; |
| 777 | } |
Todd Poynor | 0e58da2 | 2012-12-11 16:05:03 -0800 | [diff] [blame] | 778 | |
| 779 | for_each_cpu(cpu, pcpu->policy->cpus) { |
| 780 | struct cpufreq_interactive_cpuinfo *pjcpu = |
| 781 | &per_cpu(cpuinfo, cpu); |
Lianwei Wang | c79705d | 2013-05-16 12:07:23 +0800 | [diff] [blame] | 782 | if (cpu != freq->cpu) { |
| 783 | if (!down_read_trylock(&pjcpu->enable_sem)) |
| 784 | continue; |
| 785 | if (!pjcpu->governor_enabled) { |
| 786 | up_read(&pjcpu->enable_sem); |
| 787 | continue; |
| 788 | } |
| 789 | } |
Todd Poynor | df673d1 | 2013-01-02 13:14:00 -0800 | [diff] [blame] | 790 | spin_lock_irqsave(&pjcpu->load_lock, flags); |
Todd Poynor | 0e58da2 | 2012-12-11 16:05:03 -0800 | [diff] [blame] | 791 | update_load(cpu); |
Todd Poynor | df673d1 | 2013-01-02 13:14:00 -0800 | [diff] [blame] | 792 | spin_unlock_irqrestore(&pjcpu->load_lock, flags); |
Lianwei Wang | c79705d | 2013-05-16 12:07:23 +0800 | [diff] [blame] | 793 | if (cpu != freq->cpu) |
| 794 | up_read(&pjcpu->enable_sem); |
Todd Poynor | 0e58da2 | 2012-12-11 16:05:03 -0800 | [diff] [blame] | 795 | } |
Todd Poynor | 0e58da2 | 2012-12-11 16:05:03 -0800 | [diff] [blame] | 796 | |
Todd Poynor | 34974c3 | 2012-12-23 12:28:49 -0800 | [diff] [blame] | 797 | up_read(&pcpu->enable_sem); |
| 798 | } |
Todd Poynor | 0e58da2 | 2012-12-11 16:05:03 -0800 | [diff] [blame] | 799 | return 0; |
| 800 | } |
| 801 | |
| 802 | static struct notifier_block cpufreq_notifier_block = { |
| 803 | .notifier_call = cpufreq_interactive_notifier, |
| 804 | }; |
| 805 | |
Minsung Kim | 9c1f83a | 2013-02-25 23:48:04 +0900 | [diff] [blame] | 806 | static unsigned int *get_tokenized_data(const char *buf, int *num_tokens) |
| 807 | { |
| 808 | const char *cp; |
| 809 | int i; |
| 810 | int ntokens = 1; |
| 811 | unsigned int *tokenized_data; |
Todd Poynor | 233dfa0 | 2013-03-20 15:40:46 -0700 | [diff] [blame] | 812 | int err = -EINVAL; |
Minsung Kim | 9c1f83a | 2013-02-25 23:48:04 +0900 | [diff] [blame] | 813 | |
| 814 | cp = buf; |
| 815 | while ((cp = strpbrk(cp + 1, " :"))) |
| 816 | ntokens++; |
| 817 | |
Todd Poynor | 233dfa0 | 2013-03-20 15:40:46 -0700 | [diff] [blame] | 818 | if (!(ntokens & 0x1)) |
Minsung Kim | 9c1f83a | 2013-02-25 23:48:04 +0900 | [diff] [blame] | 819 | goto err; |
Minsung Kim | 9c1f83a | 2013-02-25 23:48:04 +0900 | [diff] [blame] | 820 | |
| 821 | tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL); |
| 822 | if (!tokenized_data) { |
Todd Poynor | 233dfa0 | 2013-03-20 15:40:46 -0700 | [diff] [blame] | 823 | err = -ENOMEM; |
Minsung Kim | 9c1f83a | 2013-02-25 23:48:04 +0900 | [diff] [blame] | 824 | goto err; |
| 825 | } |
| 826 | |
| 827 | cp = buf; |
| 828 | i = 0; |
| 829 | while (i < ntokens) { |
Todd Poynor | 233dfa0 | 2013-03-20 15:40:46 -0700 | [diff] [blame] | 830 | if (sscanf(cp, "%u", &tokenized_data[i++]) != 1) |
Minsung Kim | 9c1f83a | 2013-02-25 23:48:04 +0900 | [diff] [blame] | 831 | goto err_kfree; |
Minsung Kim | 9c1f83a | 2013-02-25 23:48:04 +0900 | [diff] [blame] | 832 | |
| 833 | cp = strpbrk(cp, " :"); |
| 834 | if (!cp) |
| 835 | break; |
| 836 | cp++; |
| 837 | } |
| 838 | |
Todd Poynor | 233dfa0 | 2013-03-20 15:40:46 -0700 | [diff] [blame] | 839 | if (i != ntokens) |
Minsung Kim | 9c1f83a | 2013-02-25 23:48:04 +0900 | [diff] [blame] | 840 | goto err_kfree; |
Minsung Kim | 9c1f83a | 2013-02-25 23:48:04 +0900 | [diff] [blame] | 841 | |
| 842 | *num_tokens = ntokens; |
| 843 | return tokenized_data; |
| 844 | |
| 845 | err_kfree: |
| 846 | kfree(tokenized_data); |
| 847 | err: |
Todd Poynor | 233dfa0 | 2013-03-20 15:40:46 -0700 | [diff] [blame] | 848 | return ERR_PTR(err); |
Minsung Kim | 9c1f83a | 2013-02-25 23:48:04 +0900 | [diff] [blame] | 849 | } |
| 850 | |
Todd Poynor | e9c6074 | 2012-11-14 11:41:21 -0800 | [diff] [blame] | 851 | static ssize_t show_target_loads( |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 852 | struct cpufreq_interactive_tunables *tunables, |
| 853 | char *buf) |
Todd Poynor | 8d2d93f | 2012-11-28 17:58:17 -0800 | [diff] [blame] | 854 | { |
Todd Poynor | e9c6074 | 2012-11-14 11:41:21 -0800 | [diff] [blame] | 855 | int i; |
| 856 | ssize_t ret = 0; |
Todd Poynor | df673d1 | 2013-01-02 13:14:00 -0800 | [diff] [blame] | 857 | unsigned long flags; |
Todd Poynor | e9c6074 | 2012-11-14 11:41:21 -0800 | [diff] [blame] | 858 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 859 | spin_lock_irqsave(&tunables->target_loads_lock, flags); |
Todd Poynor | e9c6074 | 2012-11-14 11:41:21 -0800 | [diff] [blame] | 860 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 861 | for (i = 0; i < tunables->ntarget_loads; i++) |
| 862 | ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i], |
Todd Poynor | e9c6074 | 2012-11-14 11:41:21 -0800 | [diff] [blame] | 863 | i & 0x1 ? ":" : " "); |
| 864 | |
Chih-Wei Huang | 8d9e530 | 2013-12-24 17:51:55 +0800 | [diff] [blame] | 865 | sprintf(buf + ret - 1, "\n"); |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 866 | spin_unlock_irqrestore(&tunables->target_loads_lock, flags); |
Todd Poynor | e9c6074 | 2012-11-14 11:41:21 -0800 | [diff] [blame] | 867 | return ret; |
Todd Poynor | 8d2d93f | 2012-11-28 17:58:17 -0800 | [diff] [blame] | 868 | } |
| 869 | |
Todd Poynor | e9c6074 | 2012-11-14 11:41:21 -0800 | [diff] [blame] | 870 | static ssize_t store_target_loads( |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 871 | struct cpufreq_interactive_tunables *tunables, |
| 872 | const char *buf, size_t count) |
Todd Poynor | 8d2d93f | 2012-11-28 17:58:17 -0800 | [diff] [blame] | 873 | { |
Minsung Kim | 9c1f83a | 2013-02-25 23:48:04 +0900 | [diff] [blame] | 874 | int ntokens; |
Todd Poynor | e9c6074 | 2012-11-14 11:41:21 -0800 | [diff] [blame] | 875 | unsigned int *new_target_loads = NULL; |
Todd Poynor | df673d1 | 2013-01-02 13:14:00 -0800 | [diff] [blame] | 876 | unsigned long flags; |
Todd Poynor | 8d2d93f | 2012-11-28 17:58:17 -0800 | [diff] [blame] | 877 | |
Minsung Kim | 9c1f83a | 2013-02-25 23:48:04 +0900 | [diff] [blame] | 878 | new_target_loads = get_tokenized_data(buf, &ntokens); |
| 879 | if (IS_ERR(new_target_loads)) |
| 880 | return PTR_RET(new_target_loads); |
Todd Poynor | e9c6074 | 2012-11-14 11:41:21 -0800 | [diff] [blame] | 881 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 882 | spin_lock_irqsave(&tunables->target_loads_lock, flags); |
| 883 | if (tunables->target_loads != default_target_loads) |
| 884 | kfree(tunables->target_loads); |
| 885 | tunables->target_loads = new_target_loads; |
| 886 | tunables->ntarget_loads = ntokens; |
| 887 | spin_unlock_irqrestore(&tunables->target_loads_lock, flags); |
Todd Poynor | 8d2d93f | 2012-11-28 17:58:17 -0800 | [diff] [blame] | 888 | return count; |
| 889 | } |
| 890 | |
Minsung Kim | 9c1f83a | 2013-02-25 23:48:04 +0900 | [diff] [blame] | 891 | static ssize_t show_above_hispeed_delay( |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 892 | struct cpufreq_interactive_tunables *tunables, char *buf) |
Minsung Kim | 9c1f83a | 2013-02-25 23:48:04 +0900 | [diff] [blame] | 893 | { |
| 894 | int i; |
| 895 | ssize_t ret = 0; |
| 896 | unsigned long flags; |
| 897 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 898 | spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags); |
Minsung Kim | 9c1f83a | 2013-02-25 23:48:04 +0900 | [diff] [blame] | 899 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 900 | for (i = 0; i < tunables->nabove_hispeed_delay; i++) |
| 901 | ret += sprintf(buf + ret, "%u%s", |
| 902 | tunables->above_hispeed_delay[i], |
Minsung Kim | 9c1f83a | 2013-02-25 23:48:04 +0900 | [diff] [blame] | 903 | i & 0x1 ? ":" : " "); |
| 904 | |
Chih-Wei Huang | 8d9e530 | 2013-12-24 17:51:55 +0800 | [diff] [blame] | 905 | sprintf(buf + ret - 1, "\n"); |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 906 | spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags); |
Minsung Kim | 9c1f83a | 2013-02-25 23:48:04 +0900 | [diff] [blame] | 907 | return ret; |
| 908 | } |
| 909 | |
| 910 | static ssize_t store_above_hispeed_delay( |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 911 | struct cpufreq_interactive_tunables *tunables, |
| 912 | const char *buf, size_t count) |
Minsung Kim | 9c1f83a | 2013-02-25 23:48:04 +0900 | [diff] [blame] | 913 | { |
| 914 | int ntokens; |
| 915 | unsigned int *new_above_hispeed_delay = NULL; |
| 916 | unsigned long flags; |
| 917 | |
| 918 | new_above_hispeed_delay = get_tokenized_data(buf, &ntokens); |
| 919 | if (IS_ERR(new_above_hispeed_delay)) |
| 920 | return PTR_RET(new_above_hispeed_delay); |
| 921 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 922 | spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags); |
| 923 | if (tunables->above_hispeed_delay != default_above_hispeed_delay) |
| 924 | kfree(tunables->above_hispeed_delay); |
| 925 | tunables->above_hispeed_delay = new_above_hispeed_delay; |
| 926 | tunables->nabove_hispeed_delay = ntokens; |
| 927 | spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags); |
Minsung Kim | 9c1f83a | 2013-02-25 23:48:04 +0900 | [diff] [blame] | 928 | return count; |
| 929 | |
| 930 | } |
| 931 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 932 | static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables, |
| 933 | char *buf) |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 934 | { |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 935 | return sprintf(buf, "%u\n", tunables->hispeed_freq); |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 936 | } |
| 937 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 938 | static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables, |
| 939 | const char *buf, size_t count) |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 940 | { |
| 941 | int ret; |
Todd Poynor | 3b7b5f8 | 2012-10-03 00:39:56 -0700 | [diff] [blame] | 942 | long unsigned int val; |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 943 | |
Amit Pundir | cf07640 | 2015-11-03 20:53:29 +0530 | [diff] [blame] | 944 | ret = kstrtoul(buf, 0, &val); |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 945 | if (ret < 0) |
| 946 | return ret; |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 947 | tunables->hispeed_freq = val; |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 948 | return count; |
| 949 | } |
| 950 | |
Junjie Wu | e05d74e | 2014-08-29 14:12:52 -0700 | [diff] [blame] | 951 | #define show_store_one(file_name) \ |
| 952 | static ssize_t show_##file_name( \ |
| 953 | struct cpufreq_interactive_tunables *tunables, char *buf) \ |
| 954 | { \ |
| 955 | return snprintf(buf, PAGE_SIZE, "%u\n", tunables->file_name); \ |
| 956 | } \ |
| 957 | static ssize_t store_##file_name( \ |
| 958 | struct cpufreq_interactive_tunables *tunables, \ |
| 959 | const char *buf, size_t count) \ |
| 960 | { \ |
| 961 | int ret; \ |
| 962 | unsigned long int val; \ |
| 963 | \ |
| 964 | ret = kstrtoul(buf, 0, &val); \ |
| 965 | if (ret < 0) \ |
| 966 | return ret; \ |
| 967 | tunables->file_name = val; \ |
| 968 | return count; \ |
| 969 | } |
| 970 | show_store_one(max_freq_hysteresis); |
Junjie Wu | 7ca999f | 2014-08-29 18:55:45 -0700 | [diff] [blame] | 971 | show_store_one(align_windows); |
Junjie Wu | e05d74e | 2014-08-29 14:12:52 -0700 | [diff] [blame] | 972 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 973 | static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables |
| 974 | *tunables, char *buf) |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 975 | { |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 976 | return sprintf(buf, "%lu\n", tunables->go_hispeed_load); |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 977 | } |
| 978 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 979 | static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables |
| 980 | *tunables, const char *buf, size_t count) |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 981 | { |
| 982 | int ret; |
| 983 | unsigned long val; |
| 984 | |
Amit Pundir | cf07640 | 2015-11-03 20:53:29 +0530 | [diff] [blame] | 985 | ret = kstrtoul(buf, 0, &val); |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 986 | if (ret < 0) |
| 987 | return ret; |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 988 | tunables->go_hispeed_load = val; |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 989 | return count; |
| 990 | } |
| 991 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 992 | static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables |
| 993 | *tunables, char *buf) |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 994 | { |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 995 | return sprintf(buf, "%lu\n", tunables->min_sample_time); |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 996 | } |
| 997 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 998 | static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables |
| 999 | *tunables, const char *buf, size_t count) |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 1000 | { |
| 1001 | int ret; |
| 1002 | unsigned long val; |
| 1003 | |
Amit Pundir | cf07640 | 2015-11-03 20:53:29 +0530 | [diff] [blame] | 1004 | ret = kstrtoul(buf, 0, &val); |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 1005 | if (ret < 0) |
| 1006 | return ret; |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1007 | tunables->min_sample_time = val; |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 1008 | return count; |
| 1009 | } |
| 1010 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1011 | static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables, |
| 1012 | char *buf) |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 1013 | { |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1014 | return sprintf(buf, "%lu\n", tunables->timer_rate); |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 1015 | } |
| 1016 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1017 | static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables, |
| 1018 | const char *buf, size_t count) |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 1019 | { |
| 1020 | int ret; |
Junjie Wu | 847796e | 2014-08-15 16:34:37 -0700 | [diff] [blame] | 1021 | unsigned long val, val_round; |
Junjie Wu | 4344ea3 | 2014-04-28 16:22:24 -0700 | [diff] [blame] | 1022 | struct cpufreq_interactive_tunables *t; |
| 1023 | int cpu; |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 1024 | |
Amit Pundir | cf07640 | 2015-11-03 20:53:29 +0530 | [diff] [blame] | 1025 | ret = kstrtoul(buf, 0, &val); |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 1026 | if (ret < 0) |
| 1027 | return ret; |
Junjie Wu | 847796e | 2014-08-15 16:34:37 -0700 | [diff] [blame] | 1028 | |
| 1029 | val_round = jiffies_to_usecs(usecs_to_jiffies(val)); |
| 1030 | if (val != val_round) |
| 1031 | pr_warn("timer_rate not aligned to jiffy. Rounded up to %lu\n", |
| 1032 | val_round); |
Junjie Wu | 847796e | 2014-08-15 16:34:37 -0700 | [diff] [blame] | 1033 | tunables->timer_rate = val_round; |
Junjie Wu | 4344ea3 | 2014-04-28 16:22:24 -0700 | [diff] [blame] | 1034 | |
| 1035 | if (!tunables->use_sched_load) |
| 1036 | return count; |
| 1037 | |
| 1038 | for_each_possible_cpu(cpu) { |
| 1039 | t = per_cpu(cpuinfo, cpu).cached_tunables; |
| 1040 | if (t && t->use_sched_load) |
| 1041 | t->timer_rate = val_round; |
| 1042 | } |
| 1043 | set_window_helper(tunables); |
| 1044 | |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 1045 | return count; |
| 1046 | } |
| 1047 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1048 | static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables, |
| 1049 | char *buf) |
Todd Poynor | 4add259 | 2012-12-18 17:50:10 -0800 | [diff] [blame] | 1050 | { |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1051 | return sprintf(buf, "%d\n", tunables->timer_slack_val); |
Todd Poynor | 4add259 | 2012-12-18 17:50:10 -0800 | [diff] [blame] | 1052 | } |
| 1053 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1054 | static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables, |
| 1055 | const char *buf, size_t count) |
Todd Poynor | 4add259 | 2012-12-18 17:50:10 -0800 | [diff] [blame] | 1056 | { |
| 1057 | int ret; |
| 1058 | unsigned long val; |
| 1059 | |
| 1060 | ret = kstrtol(buf, 10, &val); |
| 1061 | if (ret < 0) |
| 1062 | return ret; |
| 1063 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1064 | tunables->timer_slack_val = val; |
Todd Poynor | 4add259 | 2012-12-18 17:50:10 -0800 | [diff] [blame] | 1065 | return count; |
| 1066 | } |
| 1067 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1068 | static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables, |
Todd Poynor | 15a9ea0 | 2012-04-23 20:42:41 -0700 | [diff] [blame] | 1069 | char *buf) |
| 1070 | { |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1071 | return sprintf(buf, "%d\n", tunables->boost_val); |
Todd Poynor | 15a9ea0 | 2012-04-23 20:42:41 -0700 | [diff] [blame] | 1072 | } |
| 1073 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1074 | static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables, |
Todd Poynor | 15a9ea0 | 2012-04-23 20:42:41 -0700 | [diff] [blame] | 1075 | const char *buf, size_t count) |
| 1076 | { |
| 1077 | int ret; |
| 1078 | unsigned long val; |
| 1079 | |
| 1080 | ret = kstrtoul(buf, 0, &val); |
| 1081 | if (ret < 0) |
| 1082 | return ret; |
| 1083 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1084 | tunables->boost_val = val; |
Todd Poynor | 15a9ea0 | 2012-04-23 20:42:41 -0700 | [diff] [blame] | 1085 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1086 | if (tunables->boost_val) { |
Todd Poynor | 442a312 | 2012-05-03 00:16:55 -0700 | [diff] [blame] | 1087 | trace_cpufreq_interactive_boost("on"); |
Lianwei Wang | 2277e3f | 2014-12-02 17:20:50 -0800 | [diff] [blame] | 1088 | if (!tunables->boosted) |
| 1089 | cpufreq_interactive_boost(tunables); |
Todd Poynor | 442a312 | 2012-05-03 00:16:55 -0700 | [diff] [blame] | 1090 | } else { |
Ruchi Kandoi | 296d791 | 2014-04-09 16:47:59 -0700 | [diff] [blame] | 1091 | tunables->boostpulse_endtime = ktime_to_us(ktime_get()); |
Todd Poynor | 442a312 | 2012-05-03 00:16:55 -0700 | [diff] [blame] | 1092 | trace_cpufreq_interactive_unboost("off"); |
| 1093 | } |
Todd Poynor | 15a9ea0 | 2012-04-23 20:42:41 -0700 | [diff] [blame] | 1094 | |
| 1095 | return count; |
| 1096 | } |
| 1097 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1098 | static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables, |
Todd Poynor | 442a312 | 2012-05-03 00:16:55 -0700 | [diff] [blame] | 1099 | const char *buf, size_t count) |
| 1100 | { |
| 1101 | int ret; |
| 1102 | unsigned long val; |
| 1103 | |
| 1104 | ret = kstrtoul(buf, 0, &val); |
| 1105 | if (ret < 0) |
| 1106 | return ret; |
| 1107 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1108 | tunables->boostpulse_endtime = ktime_to_us(ktime_get()) + |
| 1109 | tunables->boostpulse_duration_val; |
Todd Poynor | 442a312 | 2012-05-03 00:16:55 -0700 | [diff] [blame] | 1110 | trace_cpufreq_interactive_boost("pulse"); |
Lianwei Wang | 2277e3f | 2014-12-02 17:20:50 -0800 | [diff] [blame] | 1111 | if (!tunables->boosted) |
| 1112 | cpufreq_interactive_boost(tunables); |
Todd Poynor | 442a312 | 2012-05-03 00:16:55 -0700 | [diff] [blame] | 1113 | return count; |
| 1114 | } |
| 1115 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1116 | static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables |
| 1117 | *tunables, char *buf) |
Todd Poynor | e16d592 | 2012-12-14 17:31:19 -0800 | [diff] [blame] | 1118 | { |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1119 | return sprintf(buf, "%d\n", tunables->boostpulse_duration_val); |
Todd Poynor | e16d592 | 2012-12-14 17:31:19 -0800 | [diff] [blame] | 1120 | } |
| 1121 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1122 | static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables |
| 1123 | *tunables, const char *buf, size_t count) |
Todd Poynor | e16d592 | 2012-12-14 17:31:19 -0800 | [diff] [blame] | 1124 | { |
| 1125 | int ret; |
| 1126 | unsigned long val; |
| 1127 | |
| 1128 | ret = kstrtoul(buf, 0, &val); |
| 1129 | if (ret < 0) |
| 1130 | return ret; |
| 1131 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1132 | tunables->boostpulse_duration_val = val; |
Todd Poynor | e16d592 | 2012-12-14 17:31:19 -0800 | [diff] [blame] | 1133 | return count; |
| 1134 | } |
| 1135 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1136 | static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables, |
| 1137 | char *buf) |
Lianwei Wang | 72e4057 | 2013-02-22 11:39:18 +0800 | [diff] [blame] | 1138 | { |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1139 | return sprintf(buf, "%u\n", tunables->io_is_busy); |
Lianwei Wang | 72e4057 | 2013-02-22 11:39:18 +0800 | [diff] [blame] | 1140 | } |
| 1141 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1142 | static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables, |
| 1143 | const char *buf, size_t count) |
Lianwei Wang | 72e4057 | 2013-02-22 11:39:18 +0800 | [diff] [blame] | 1144 | { |
| 1145 | int ret; |
| 1146 | unsigned long val; |
Junjie Wu | 4344ea3 | 2014-04-28 16:22:24 -0700 | [diff] [blame] | 1147 | struct cpufreq_interactive_tunables *t; |
| 1148 | int cpu; |
Lianwei Wang | 72e4057 | 2013-02-22 11:39:18 +0800 | [diff] [blame] | 1149 | |
| 1150 | ret = kstrtoul(buf, 0, &val); |
| 1151 | if (ret < 0) |
| 1152 | return ret; |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1153 | tunables->io_is_busy = val; |
Junjie Wu | 4344ea3 | 2014-04-28 16:22:24 -0700 | [diff] [blame] | 1154 | |
| 1155 | if (!tunables->use_sched_load) |
| 1156 | return count; |
| 1157 | |
| 1158 | for_each_possible_cpu(cpu) { |
| 1159 | t = per_cpu(cpuinfo, cpu).cached_tunables; |
| 1160 | if (t && t->use_sched_load) |
| 1161 | t->io_is_busy = val; |
| 1162 | } |
| 1163 | sched_set_io_is_busy(val); |
| 1164 | |
| 1165 | return count; |
| 1166 | } |
| 1167 | |
| 1168 | static int cpufreq_interactive_enable_sched_input( |
| 1169 | struct cpufreq_interactive_tunables *tunables) |
| 1170 | { |
| 1171 | int rc = 0, j; |
| 1172 | struct cpufreq_interactive_tunables *t; |
| 1173 | |
| 1174 | mutex_lock(&sched_lock); |
| 1175 | |
| 1176 | set_window_count++; |
Junjie Wu | e627d70 | 2014-12-15 16:51:08 -0800 | [diff] [blame] | 1177 | if (set_window_count > 1) { |
Junjie Wu | 4344ea3 | 2014-04-28 16:22:24 -0700 | [diff] [blame] | 1178 | for_each_possible_cpu(j) { |
| 1179 | t = per_cpu(cpuinfo, j).cached_tunables; |
| 1180 | if (t && t->use_sched_load) { |
| 1181 | tunables->timer_rate = t->timer_rate; |
| 1182 | tunables->io_is_busy = t->io_is_busy; |
| 1183 | break; |
| 1184 | } |
| 1185 | } |
Junjie Wu | e627d70 | 2014-12-15 16:51:08 -0800 | [diff] [blame] | 1186 | } else { |
| 1187 | rc = set_window_helper(tunables); |
| 1188 | if (rc) { |
| 1189 | pr_err("%s: Failed to set sched window\n", __func__); |
| 1190 | set_window_count--; |
| 1191 | goto out; |
| 1192 | } |
| 1193 | sched_set_io_is_busy(tunables->io_is_busy); |
Junjie Wu | 4344ea3 | 2014-04-28 16:22:24 -0700 | [diff] [blame] | 1194 | } |
| 1195 | |
Junjie Wu | 4344ea3 | 2014-04-28 16:22:24 -0700 | [diff] [blame] | 1196 | if (!tunables->use_migration_notif) |
| 1197 | goto out; |
| 1198 | |
| 1199 | migration_register_count++; |
Junjie Wu | e627d70 | 2014-12-15 16:51:08 -0800 | [diff] [blame] | 1200 | if (migration_register_count > 1) |
Junjie Wu | 4344ea3 | 2014-04-28 16:22:24 -0700 | [diff] [blame] | 1201 | goto out; |
| 1202 | else |
| 1203 | atomic_notifier_chain_register(&load_alert_notifier_head, |
| 1204 | &load_notifier_block); |
| 1205 | out: |
| 1206 | mutex_unlock(&sched_lock); |
| 1207 | return rc; |
| 1208 | } |
| 1209 | |
| 1210 | static int cpufreq_interactive_disable_sched_input( |
| 1211 | struct cpufreq_interactive_tunables *tunables) |
| 1212 | { |
| 1213 | mutex_lock(&sched_lock); |
| 1214 | |
| 1215 | if (tunables->use_migration_notif) { |
| 1216 | migration_register_count--; |
Junjie Wu | e627d70 | 2014-12-15 16:51:08 -0800 | [diff] [blame] | 1217 | if (migration_register_count < 1) |
Junjie Wu | 4344ea3 | 2014-04-28 16:22:24 -0700 | [diff] [blame] | 1218 | atomic_notifier_chain_unregister( |
| 1219 | &load_alert_notifier_head, |
| 1220 | &load_notifier_block); |
| 1221 | } |
| 1222 | set_window_count--; |
| 1223 | |
| 1224 | mutex_unlock(&sched_lock); |
| 1225 | return 0; |
| 1226 | } |
| 1227 | |
| 1228 | static ssize_t show_use_sched_load( |
| 1229 | struct cpufreq_interactive_tunables *tunables, char *buf) |
| 1230 | { |
| 1231 | return snprintf(buf, PAGE_SIZE, "%d\n", tunables->use_sched_load); |
| 1232 | } |
| 1233 | |
| 1234 | static ssize_t store_use_sched_load( |
| 1235 | struct cpufreq_interactive_tunables *tunables, |
| 1236 | const char *buf, size_t count) |
| 1237 | { |
| 1238 | int ret; |
| 1239 | unsigned long val; |
| 1240 | |
| 1241 | ret = kstrtoul(buf, 0, &val); |
| 1242 | if (ret < 0) |
| 1243 | return ret; |
| 1244 | |
| 1245 | if (tunables->use_sched_load == (bool) val) |
| 1246 | return count; |
| 1247 | if (val) |
| 1248 | ret = cpufreq_interactive_enable_sched_input(tunables); |
| 1249 | else |
| 1250 | ret = cpufreq_interactive_disable_sched_input(tunables); |
| 1251 | |
| 1252 | if (ret) |
| 1253 | return ret; |
| 1254 | |
| 1255 | tunables->use_sched_load = val; |
| 1256 | return count; |
| 1257 | } |
| 1258 | |
| 1259 | static ssize_t show_use_migration_notif( |
| 1260 | struct cpufreq_interactive_tunables *tunables, char *buf) |
| 1261 | { |
| 1262 | return snprintf(buf, PAGE_SIZE, "%d\n", |
| 1263 | tunables->use_migration_notif); |
| 1264 | } |
| 1265 | |
| 1266 | static ssize_t store_use_migration_notif( |
| 1267 | struct cpufreq_interactive_tunables *tunables, |
| 1268 | const char *buf, size_t count) |
| 1269 | { |
| 1270 | int ret; |
| 1271 | unsigned long val; |
| 1272 | |
| 1273 | ret = kstrtoul(buf, 0, &val); |
| 1274 | if (ret < 0) |
| 1275 | return ret; |
| 1276 | |
| 1277 | if (tunables->use_migration_notif == (bool) val) |
| 1278 | return count; |
| 1279 | tunables->use_migration_notif = val; |
| 1280 | |
| 1281 | if (!tunables->use_sched_load) |
| 1282 | return count; |
| 1283 | |
| 1284 | mutex_lock(&sched_lock); |
| 1285 | if (val) { |
| 1286 | migration_register_count++; |
| 1287 | if (migration_register_count == 1) |
| 1288 | atomic_notifier_chain_register( |
| 1289 | &load_alert_notifier_head, |
| 1290 | &load_notifier_block); |
| 1291 | } else { |
| 1292 | migration_register_count--; |
| 1293 | if (!migration_register_count) |
| 1294 | atomic_notifier_chain_unregister( |
| 1295 | &load_alert_notifier_head, |
| 1296 | &load_notifier_block); |
| 1297 | } |
| 1298 | mutex_unlock(&sched_lock); |
| 1299 | |
Lianwei Wang | 72e4057 | 2013-02-22 11:39:18 +0800 | [diff] [blame] | 1300 | return count; |
| 1301 | } |
| 1302 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1303 | /* |
| 1304 | * Create show/store routines |
| 1305 | * - sys: One governor instance for complete SYSTEM |
| 1306 | * - pol: One governor instance per struct cpufreq_policy |
| 1307 | */ |
| 1308 | #define show_gov_pol_sys(file_name) \ |
| 1309 | static ssize_t show_##file_name##_gov_sys \ |
| 1310 | (struct kobject *kobj, struct attribute *attr, char *buf) \ |
| 1311 | { \ |
| 1312 | return show_##file_name(common_tunables, buf); \ |
| 1313 | } \ |
| 1314 | \ |
| 1315 | static ssize_t show_##file_name##_gov_pol \ |
| 1316 | (struct cpufreq_policy *policy, char *buf) \ |
| 1317 | { \ |
| 1318 | return show_##file_name(policy->governor_data, buf); \ |
| 1319 | } |
Lianwei Wang | 72e4057 | 2013-02-22 11:39:18 +0800 | [diff] [blame] | 1320 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1321 | #define store_gov_pol_sys(file_name) \ |
| 1322 | static ssize_t store_##file_name##_gov_sys \ |
| 1323 | (struct kobject *kobj, struct attribute *attr, const char *buf, \ |
| 1324 | size_t count) \ |
| 1325 | { \ |
| 1326 | return store_##file_name(common_tunables, buf, count); \ |
| 1327 | } \ |
| 1328 | \ |
| 1329 | static ssize_t store_##file_name##_gov_pol \ |
| 1330 | (struct cpufreq_policy *policy, const char *buf, size_t count) \ |
| 1331 | { \ |
| 1332 | return store_##file_name(policy->governor_data, buf, count); \ |
| 1333 | } |
| 1334 | |
| 1335 | #define show_store_gov_pol_sys(file_name) \ |
| 1336 | show_gov_pol_sys(file_name); \ |
| 1337 | store_gov_pol_sys(file_name) |
| 1338 | |
| 1339 | show_store_gov_pol_sys(target_loads); |
| 1340 | show_store_gov_pol_sys(above_hispeed_delay); |
| 1341 | show_store_gov_pol_sys(hispeed_freq); |
| 1342 | show_store_gov_pol_sys(go_hispeed_load); |
| 1343 | show_store_gov_pol_sys(min_sample_time); |
| 1344 | show_store_gov_pol_sys(timer_rate); |
| 1345 | show_store_gov_pol_sys(timer_slack); |
| 1346 | show_store_gov_pol_sys(boost); |
| 1347 | store_gov_pol_sys(boostpulse); |
| 1348 | show_store_gov_pol_sys(boostpulse_duration); |
| 1349 | show_store_gov_pol_sys(io_is_busy); |
Junjie Wu | 4344ea3 | 2014-04-28 16:22:24 -0700 | [diff] [blame] | 1350 | show_store_gov_pol_sys(use_sched_load); |
| 1351 | show_store_gov_pol_sys(use_migration_notif); |
Junjie Wu | e05d74e | 2014-08-29 14:12:52 -0700 | [diff] [blame] | 1352 | show_store_gov_pol_sys(max_freq_hysteresis); |
Junjie Wu | 7ca999f | 2014-08-29 18:55:45 -0700 | [diff] [blame] | 1353 | show_store_gov_pol_sys(align_windows); |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1354 | |
| 1355 | #define gov_sys_attr_rw(_name) \ |
| 1356 | static struct global_attr _name##_gov_sys = \ |
| 1357 | __ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys) |
| 1358 | |
| 1359 | #define gov_pol_attr_rw(_name) \ |
| 1360 | static struct freq_attr _name##_gov_pol = \ |
| 1361 | __ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol) |
| 1362 | |
| 1363 | #define gov_sys_pol_attr_rw(_name) \ |
| 1364 | gov_sys_attr_rw(_name); \ |
| 1365 | gov_pol_attr_rw(_name) |
| 1366 | |
| 1367 | gov_sys_pol_attr_rw(target_loads); |
| 1368 | gov_sys_pol_attr_rw(above_hispeed_delay); |
| 1369 | gov_sys_pol_attr_rw(hispeed_freq); |
| 1370 | gov_sys_pol_attr_rw(go_hispeed_load); |
| 1371 | gov_sys_pol_attr_rw(min_sample_time); |
| 1372 | gov_sys_pol_attr_rw(timer_rate); |
| 1373 | gov_sys_pol_attr_rw(timer_slack); |
| 1374 | gov_sys_pol_attr_rw(boost); |
| 1375 | gov_sys_pol_attr_rw(boostpulse_duration); |
| 1376 | gov_sys_pol_attr_rw(io_is_busy); |
Junjie Wu | 4344ea3 | 2014-04-28 16:22:24 -0700 | [diff] [blame] | 1377 | gov_sys_pol_attr_rw(use_sched_load); |
| 1378 | gov_sys_pol_attr_rw(use_migration_notif); |
Junjie Wu | e05d74e | 2014-08-29 14:12:52 -0700 | [diff] [blame] | 1379 | gov_sys_pol_attr_rw(max_freq_hysteresis); |
Junjie Wu | 7ca999f | 2014-08-29 18:55:45 -0700 | [diff] [blame] | 1380 | gov_sys_pol_attr_rw(align_windows); |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1381 | |
| 1382 | static struct global_attr boostpulse_gov_sys = |
| 1383 | __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys); |
| 1384 | |
| 1385 | static struct freq_attr boostpulse_gov_pol = |
| 1386 | __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol); |
| 1387 | |
| 1388 | /* One Governor instance for entire system */ |
| 1389 | static struct attribute *interactive_attributes_gov_sys[] = { |
| 1390 | &target_loads_gov_sys.attr, |
| 1391 | &above_hispeed_delay_gov_sys.attr, |
| 1392 | &hispeed_freq_gov_sys.attr, |
| 1393 | &go_hispeed_load_gov_sys.attr, |
| 1394 | &min_sample_time_gov_sys.attr, |
| 1395 | &timer_rate_gov_sys.attr, |
| 1396 | &timer_slack_gov_sys.attr, |
| 1397 | &boost_gov_sys.attr, |
| 1398 | &boostpulse_gov_sys.attr, |
| 1399 | &boostpulse_duration_gov_sys.attr, |
| 1400 | &io_is_busy_gov_sys.attr, |
Junjie Wu | 4344ea3 | 2014-04-28 16:22:24 -0700 | [diff] [blame] | 1401 | &use_sched_load_gov_sys.attr, |
| 1402 | &use_migration_notif_gov_sys.attr, |
Junjie Wu | e05d74e | 2014-08-29 14:12:52 -0700 | [diff] [blame] | 1403 | &max_freq_hysteresis_gov_sys.attr, |
Junjie Wu | 7ca999f | 2014-08-29 18:55:45 -0700 | [diff] [blame] | 1404 | &align_windows_gov_sys.attr, |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 1405 | NULL, |
| 1406 | }; |
| 1407 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1408 | static struct attribute_group interactive_attr_group_gov_sys = { |
| 1409 | .attrs = interactive_attributes_gov_sys, |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 1410 | .name = "interactive", |
| 1411 | }; |
| 1412 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1413 | /* Per policy governor instance */ |
| 1414 | static struct attribute *interactive_attributes_gov_pol[] = { |
| 1415 | &target_loads_gov_pol.attr, |
| 1416 | &above_hispeed_delay_gov_pol.attr, |
| 1417 | &hispeed_freq_gov_pol.attr, |
| 1418 | &go_hispeed_load_gov_pol.attr, |
| 1419 | &min_sample_time_gov_pol.attr, |
| 1420 | &timer_rate_gov_pol.attr, |
| 1421 | &timer_slack_gov_pol.attr, |
| 1422 | &boost_gov_pol.attr, |
| 1423 | &boostpulse_gov_pol.attr, |
| 1424 | &boostpulse_duration_gov_pol.attr, |
| 1425 | &io_is_busy_gov_pol.attr, |
Junjie Wu | 4344ea3 | 2014-04-28 16:22:24 -0700 | [diff] [blame] | 1426 | &use_sched_load_gov_pol.attr, |
| 1427 | &use_migration_notif_gov_pol.attr, |
Junjie Wu | e05d74e | 2014-08-29 14:12:52 -0700 | [diff] [blame] | 1428 | &max_freq_hysteresis_gov_pol.attr, |
Junjie Wu | 7ca999f | 2014-08-29 18:55:45 -0700 | [diff] [blame] | 1429 | &align_windows_gov_pol.attr, |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1430 | NULL, |
| 1431 | }; |
| 1432 | |
| 1433 | static struct attribute_group interactive_attr_group_gov_pol = { |
| 1434 | .attrs = interactive_attributes_gov_pol, |
| 1435 | .name = "interactive", |
| 1436 | }; |
| 1437 | |
| 1438 | static struct attribute_group *get_sysfs_attr(void) |
| 1439 | { |
| 1440 | if (have_governor_per_policy()) |
| 1441 | return &interactive_attr_group_gov_pol; |
| 1442 | else |
| 1443 | return &interactive_attr_group_gov_sys; |
| 1444 | } |
| 1445 | |
Sam Leffler | 3ab7c2b | 2012-06-27 10:12:04 -0700 | [diff] [blame] | 1446 | static int cpufreq_interactive_idle_notifier(struct notifier_block *nb, |
| 1447 | unsigned long val, |
| 1448 | void *data) |
| 1449 | { |
Rohit Gupta | 189c222 | 2015-03-06 18:46:04 -0800 | [diff] [blame] | 1450 | if (val == IDLE_END) |
Sam Leffler | 3ab7c2b | 2012-06-27 10:12:04 -0700 | [diff] [blame] | 1451 | cpufreq_interactive_idle_end(); |
Sam Leffler | 3ab7c2b | 2012-06-27 10:12:04 -0700 | [diff] [blame] | 1452 | |
| 1453 | return 0; |
| 1454 | } |
| 1455 | |
| 1456 | static struct notifier_block cpufreq_interactive_idle_nb = { |
| 1457 | .notifier_call = cpufreq_interactive_idle_notifier, |
| 1458 | }; |
| 1459 | |
Junjie Wu | 53f83f8 | 2014-08-18 16:35:09 -0700 | [diff] [blame] | 1460 | static void save_tunables(struct cpufreq_policy *policy, |
| 1461 | struct cpufreq_interactive_tunables *tunables) |
| 1462 | { |
| 1463 | int cpu; |
| 1464 | struct cpufreq_interactive_cpuinfo *pcpu; |
| 1465 | |
| 1466 | if (have_governor_per_policy()) |
| 1467 | cpu = cpumask_first(policy->related_cpus); |
| 1468 | else |
| 1469 | cpu = 0; |
| 1470 | |
| 1471 | pcpu = &per_cpu(cpuinfo, cpu); |
| 1472 | WARN_ON(pcpu->cached_tunables && pcpu->cached_tunables != tunables); |
| 1473 | pcpu->cached_tunables = tunables; |
| 1474 | } |
| 1475 | |
Junjie Wu | c5a97d9 | 2014-05-23 12:22:59 -0700 | [diff] [blame] | 1476 | static struct cpufreq_interactive_tunables *alloc_tunable( |
| 1477 | struct cpufreq_policy *policy) |
| 1478 | { |
| 1479 | struct cpufreq_interactive_tunables *tunables; |
| 1480 | |
| 1481 | tunables = kzalloc(sizeof(*tunables), GFP_KERNEL); |
| 1482 | if (!tunables) |
| 1483 | return ERR_PTR(-ENOMEM); |
| 1484 | |
| 1485 | tunables->above_hispeed_delay = default_above_hispeed_delay; |
| 1486 | tunables->nabove_hispeed_delay = |
| 1487 | ARRAY_SIZE(default_above_hispeed_delay); |
| 1488 | tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD; |
| 1489 | tunables->target_loads = default_target_loads; |
| 1490 | tunables->ntarget_loads = ARRAY_SIZE(default_target_loads); |
| 1491 | tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME; |
| 1492 | tunables->timer_rate = DEFAULT_TIMER_RATE; |
| 1493 | tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME; |
| 1494 | tunables->timer_slack_val = DEFAULT_TIMER_SLACK; |
| 1495 | |
| 1496 | spin_lock_init(&tunables->target_loads_lock); |
| 1497 | spin_lock_init(&tunables->above_hispeed_delay_lock); |
| 1498 | |
Junjie Wu | 53f83f8 | 2014-08-18 16:35:09 -0700 | [diff] [blame] | 1499 | save_tunables(policy, tunables); |
Junjie Wu | c5a97d9 | 2014-05-23 12:22:59 -0700 | [diff] [blame] | 1500 | return tunables; |
| 1501 | } |
| 1502 | |
Saravana Kannan | 07c2aa6 | 2014-07-22 15:42:51 -0700 | [diff] [blame] | 1503 | static struct cpufreq_interactive_tunables *restore_tunables( |
| 1504 | struct cpufreq_policy *policy) |
| 1505 | { |
| 1506 | int cpu; |
| 1507 | |
| 1508 | if (have_governor_per_policy()) |
| 1509 | cpu = cpumask_first(policy->related_cpus); |
| 1510 | else |
| 1511 | cpu = 0; |
| 1512 | |
Junjie Wu | 13c6a76 | 2014-08-07 18:04:13 -0700 | [diff] [blame] | 1513 | return per_cpu(cpuinfo, cpu).cached_tunables; |
Junjie Wu | c5a97d9 | 2014-05-23 12:22:59 -0700 | [diff] [blame] | 1514 | } |
| 1515 | |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 1516 | static int cpufreq_governor_interactive(struct cpufreq_policy *policy, |
| 1517 | unsigned int event) |
| 1518 | { |
| 1519 | int rc; |
| 1520 | unsigned int j; |
| 1521 | struct cpufreq_interactive_cpuinfo *pcpu; |
| 1522 | struct cpufreq_frequency_table *freq_table; |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1523 | struct cpufreq_interactive_tunables *tunables; |
Badhri Jagan Sridharan | ef1eddd | 2014-04-07 18:26:30 -0700 | [diff] [blame] | 1524 | unsigned long flags; |
Junjie Wu | 4344ea3 | 2014-04-28 16:22:24 -0700 | [diff] [blame] | 1525 | int first_cpu; |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1526 | |
| 1527 | if (have_governor_per_policy()) |
| 1528 | tunables = policy->governor_data; |
| 1529 | else |
| 1530 | tunables = common_tunables; |
| 1531 | |
| 1532 | WARN_ON(!tunables && (event != CPUFREQ_GOV_POLICY_INIT)); |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 1533 | |
| 1534 | switch (event) { |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1535 | case CPUFREQ_GOV_POLICY_INIT: |
| 1536 | if (have_governor_per_policy()) { |
| 1537 | WARN_ON(tunables); |
| 1538 | } else if (tunables) { |
| 1539 | tunables->usage_count++; |
| 1540 | policy->governor_data = tunables; |
| 1541 | return 0; |
| 1542 | } |
| 1543 | |
Junjie Wu | 4344ea3 | 2014-04-28 16:22:24 -0700 | [diff] [blame] | 1544 | first_cpu = cpumask_first(policy->related_cpus); |
| 1545 | for_each_cpu(j, policy->related_cpus) |
| 1546 | per_cpu(cpuinfo, j).first_cpu = first_cpu; |
| 1547 | |
Saravana Kannan | 07c2aa6 | 2014-07-22 15:42:51 -0700 | [diff] [blame] | 1548 | tunables = restore_tunables(policy); |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1549 | if (!tunables) { |
Junjie Wu | c5a97d9 | 2014-05-23 12:22:59 -0700 | [diff] [blame] | 1550 | tunables = alloc_tunable(policy); |
| 1551 | if (IS_ERR(tunables)) |
| 1552 | return PTR_ERR(tunables); |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1553 | } |
| 1554 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1555 | tunables->usage_count = 1; |
Minsung Kim | 82cc6a9 | 2014-01-19 14:32:42 +0900 | [diff] [blame] | 1556 | policy->governor_data = tunables; |
Junjie Wu | ab1db0a | 2015-11-18 14:49:29 -0800 | [diff] [blame] | 1557 | if (!have_governor_per_policy()) |
Minsung Kim | 82cc6a9 | 2014-01-19 14:32:42 +0900 | [diff] [blame] | 1558 | common_tunables = tunables; |
| 1559 | |
| 1560 | rc = sysfs_create_group(get_governor_parent_kobj(policy), |
| 1561 | get_sysfs_attr()); |
| 1562 | if (rc) { |
| 1563 | kfree(tunables); |
| 1564 | policy->governor_data = NULL; |
Junjie Wu | ab1db0a | 2015-11-18 14:49:29 -0800 | [diff] [blame] | 1565 | if (!have_governor_per_policy()) |
Minsung Kim | 82cc6a9 | 2014-01-19 14:32:42 +0900 | [diff] [blame] | 1566 | common_tunables = NULL; |
| 1567 | return rc; |
| 1568 | } |
| 1569 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1570 | if (!policy->governor->initialized) { |
| 1571 | idle_notifier_register(&cpufreq_interactive_idle_nb); |
| 1572 | cpufreq_register_notifier(&cpufreq_notifier_block, |
| 1573 | CPUFREQ_TRANSITION_NOTIFIER); |
| 1574 | } |
| 1575 | |
Junjie Wu | 4344ea3 | 2014-04-28 16:22:24 -0700 | [diff] [blame] | 1576 | if (tunables->use_sched_load) |
| 1577 | cpufreq_interactive_enable_sched_input(tunables); |
| 1578 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1579 | break; |
| 1580 | |
| 1581 | case CPUFREQ_GOV_POLICY_EXIT: |
| 1582 | if (!--tunables->usage_count) { |
| 1583 | if (policy->governor->initialized == 1) { |
| 1584 | cpufreq_unregister_notifier(&cpufreq_notifier_block, |
| 1585 | CPUFREQ_TRANSITION_NOTIFIER); |
| 1586 | idle_notifier_unregister(&cpufreq_interactive_idle_nb); |
| 1587 | } |
| 1588 | |
| 1589 | sysfs_remove_group(get_governor_parent_kobj(policy), |
| 1590 | get_sysfs_attr()); |
Greg Hackmann | 6bc30c3 | 2014-12-08 10:08:35 -0800 | [diff] [blame] | 1591 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1592 | common_tunables = NULL; |
| 1593 | } |
| 1594 | |
| 1595 | policy->governor_data = NULL; |
Junjie Wu | 4344ea3 | 2014-04-28 16:22:24 -0700 | [diff] [blame] | 1596 | |
| 1597 | if (tunables->use_sched_load) |
| 1598 | cpufreq_interactive_disable_sched_input(tunables); |
| 1599 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1600 | break; |
| 1601 | |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 1602 | case CPUFREQ_GOV_START: |
Lianwei Wang | 1d4f9a7 | 2013-01-07 14:15:51 +0800 | [diff] [blame] | 1603 | mutex_lock(&gov_lock); |
| 1604 | |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1605 | freq_table = cpufreq_frequency_get_table(policy->cpu); |
| 1606 | if (!tunables->hispeed_freq) |
| 1607 | tunables->hispeed_freq = policy->max; |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 1608 | |
| 1609 | for_each_cpu(j, policy->cpus) { |
| 1610 | pcpu = &per_cpu(cpuinfo, j); |
| 1611 | pcpu->policy = policy; |
| 1612 | pcpu->target_freq = policy->cur; |
| 1613 | pcpu->freq_table = freq_table; |
Todd Poynor | 6d15fa3 | 2012-04-26 21:41:40 -0700 | [diff] [blame] | 1614 | pcpu->floor_freq = pcpu->target_freq; |
Junjie Wu | d5ac8ee | 2015-03-24 15:51:10 -0700 | [diff] [blame] | 1615 | pcpu->pol_floor_val_time = |
Todd Poynor | 31817c9 | 2012-12-07 20:08:45 -0800 | [diff] [blame] | 1616 | ktime_to_us(ktime_get()); |
Junjie Wu | d5ac8ee | 2015-03-24 15:51:10 -0700 | [diff] [blame] | 1617 | pcpu->loc_floor_val_time = pcpu->pol_floor_val_time; |
| 1618 | pcpu->pol_hispeed_val_time = pcpu->pol_floor_val_time; |
| 1619 | pcpu->loc_hispeed_val_time = pcpu->pol_floor_val_time; |
Junjie Wu | 1d86895 | 2015-03-27 11:44:21 -0700 | [diff] [blame^] | 1620 | pcpu->min_freq = policy->min; |
Junjie Wu | 82f0803 | 2014-12-09 13:20:26 -0800 | [diff] [blame] | 1621 | pcpu->reject_notification = true; |
Todd Poynor | 3951206 | 2012-12-20 15:51:00 -0800 | [diff] [blame] | 1622 | down_write(&pcpu->enable_sem); |
Shridhar Rasal | 2907f84 | 2013-09-09 19:17:14 +0530 | [diff] [blame] | 1623 | del_timer_sync(&pcpu->cpu_timer); |
| 1624 | del_timer_sync(&pcpu->cpu_slack_timer); |
Junjie Wu | 6b974ed | 2014-04-28 15:11:47 -0700 | [diff] [blame] | 1625 | pcpu->last_evaluated_jiffy = get_jiffies_64(); |
Viresh Kumar | 17d15c4 | 2013-05-16 14:58:54 +0530 | [diff] [blame] | 1626 | cpufreq_interactive_timer_start(tunables, j); |
Todd Poynor | 3951206 | 2012-12-20 15:51:00 -0800 | [diff] [blame] | 1627 | pcpu->governor_enabled = 1; |
| 1628 | up_write(&pcpu->enable_sem); |
Junjie Wu | 82f0803 | 2014-12-09 13:20:26 -0800 | [diff] [blame] | 1629 | pcpu->reject_notification = false; |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 1630 | } |
| 1631 | |
Lianwei Wang | 1d4f9a7 | 2013-01-07 14:15:51 +0800 | [diff] [blame] | 1632 | mutex_unlock(&gov_lock); |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 1633 | break; |
| 1634 | |
| 1635 | case CPUFREQ_GOV_STOP: |
Lianwei Wang | 1d4f9a7 | 2013-01-07 14:15:51 +0800 | [diff] [blame] | 1636 | mutex_lock(&gov_lock); |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 1637 | for_each_cpu(j, policy->cpus) { |
| 1638 | pcpu = &per_cpu(cpuinfo, j); |
Junjie Wu | 82f0803 | 2014-12-09 13:20:26 -0800 | [diff] [blame] | 1639 | pcpu->reject_notification = true; |
Todd Poynor | 5cad609 | 2012-12-18 17:50:44 -0800 | [diff] [blame] | 1640 | down_write(&pcpu->enable_sem); |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 1641 | pcpu->governor_enabled = 0; |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 1642 | del_timer_sync(&pcpu->cpu_timer); |
Todd Poynor | 4add259 | 2012-12-18 17:50:10 -0800 | [diff] [blame] | 1643 | del_timer_sync(&pcpu->cpu_slack_timer); |
Todd Poynor | 5cad609 | 2012-12-18 17:50:44 -0800 | [diff] [blame] | 1644 | up_write(&pcpu->enable_sem); |
Junjie Wu | 82f0803 | 2014-12-09 13:20:26 -0800 | [diff] [blame] | 1645 | pcpu->reject_notification = false; |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 1646 | } |
| 1647 | |
Lianwei Wang | 1d4f9a7 | 2013-01-07 14:15:51 +0800 | [diff] [blame] | 1648 | mutex_unlock(&gov_lock); |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 1649 | break; |
| 1650 | |
| 1651 | case CPUFREQ_GOV_LIMITS: |
| 1652 | if (policy->max < policy->cur) |
| 1653 | __cpufreq_driver_target(policy, |
| 1654 | policy->max, CPUFREQ_RELATION_H); |
| 1655 | else if (policy->min > policy->cur) |
| 1656 | __cpufreq_driver_target(policy, |
| 1657 | policy->min, CPUFREQ_RELATION_L); |
Lianwei Wang | 90c6c15 | 2013-04-26 13:30:51 +0800 | [diff] [blame] | 1658 | for_each_cpu(j, policy->cpus) { |
| 1659 | pcpu = &per_cpu(cpuinfo, j); |
| 1660 | |
Badhri Jagan Sridharan | ef1eddd | 2014-04-07 18:26:30 -0700 | [diff] [blame] | 1661 | down_read(&pcpu->enable_sem); |
Lianwei Wang | 90c6c15 | 2013-04-26 13:30:51 +0800 | [diff] [blame] | 1662 | if (pcpu->governor_enabled == 0) { |
Badhri Jagan Sridharan | ef1eddd | 2014-04-07 18:26:30 -0700 | [diff] [blame] | 1663 | up_read(&pcpu->enable_sem); |
Lianwei Wang | 90c6c15 | 2013-04-26 13:30:51 +0800 | [diff] [blame] | 1664 | continue; |
| 1665 | } |
| 1666 | |
Badhri Jagan Sridharan | ef1eddd | 2014-04-07 18:26:30 -0700 | [diff] [blame] | 1667 | spin_lock_irqsave(&pcpu->target_freq_lock, flags); |
Lianwei Wang | 90c6c15 | 2013-04-26 13:30:51 +0800 | [diff] [blame] | 1668 | if (policy->max < pcpu->target_freq) |
| 1669 | pcpu->target_freq = policy->max; |
| 1670 | else if (policy->min > pcpu->target_freq) |
| 1671 | pcpu->target_freq = policy->min; |
| 1672 | |
Badhri Jagan Sridharan | ef1eddd | 2014-04-07 18:26:30 -0700 | [diff] [blame] | 1673 | spin_unlock_irqrestore(&pcpu->target_freq_lock, flags); |
Junjie Wu | 1d86895 | 2015-03-27 11:44:21 -0700 | [diff] [blame^] | 1674 | |
| 1675 | if (policy->min < pcpu->min_freq) |
| 1676 | cpufreq_interactive_timer_resched(j, true); |
| 1677 | pcpu->min_freq = policy->min; |
| 1678 | |
Badhri Jagan Sridharan | ef1eddd | 2014-04-07 18:26:30 -0700 | [diff] [blame] | 1679 | up_read(&pcpu->enable_sem); |
Lianwei Wang | 90c6c15 | 2013-04-26 13:30:51 +0800 | [diff] [blame] | 1680 | } |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 1681 | break; |
| 1682 | } |
| 1683 | return 0; |
| 1684 | } |
| 1685 | |
Viresh Kumar | c7f826b | 2013-05-16 14:58:53 +0530 | [diff] [blame] | 1686 | #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE |
| 1687 | static |
| 1688 | #endif |
| 1689 | struct cpufreq_governor cpufreq_gov_interactive = { |
| 1690 | .name = "interactive", |
| 1691 | .governor = cpufreq_governor_interactive, |
| 1692 | .max_transition_latency = 10000000, |
| 1693 | .owner = THIS_MODULE, |
| 1694 | }; |
| 1695 | |
Todd Poynor | 4add259 | 2012-12-18 17:50:10 -0800 | [diff] [blame] | 1696 | static void cpufreq_interactive_nop_timer(unsigned long data) |
| 1697 | { |
| 1698 | } |
| 1699 | |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 1700 | static int __init cpufreq_interactive_init(void) |
| 1701 | { |
| 1702 | unsigned int i; |
| 1703 | struct cpufreq_interactive_cpuinfo *pcpu; |
| 1704 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; |
| 1705 | |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 1706 | /* Initalize per-cpu timers */ |
| 1707 | for_each_possible_cpu(i) { |
| 1708 | pcpu = &per_cpu(cpuinfo, i); |
Todd Poynor | 4add259 | 2012-12-18 17:50:10 -0800 | [diff] [blame] | 1709 | init_timer_deferrable(&pcpu->cpu_timer); |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 1710 | pcpu->cpu_timer.function = cpufreq_interactive_timer; |
| 1711 | pcpu->cpu_timer.data = i; |
Todd Poynor | 4add259 | 2012-12-18 17:50:10 -0800 | [diff] [blame] | 1712 | init_timer(&pcpu->cpu_slack_timer); |
| 1713 | pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer; |
Todd Poynor | 0e58da2 | 2012-12-11 16:05:03 -0800 | [diff] [blame] | 1714 | spin_lock_init(&pcpu->load_lock); |
Badhri Jagan Sridharan | ef1eddd | 2014-04-07 18:26:30 -0700 | [diff] [blame] | 1715 | spin_lock_init(&pcpu->target_freq_lock); |
Todd Poynor | 5cad609 | 2012-12-18 17:50:44 -0800 | [diff] [blame] | 1716 | init_rwsem(&pcpu->enable_sem); |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 1717 | } |
| 1718 | |
Todd Poynor | 0f1920b | 2012-07-16 17:07:15 -0700 | [diff] [blame] | 1719 | spin_lock_init(&speedchange_cpumask_lock); |
Lianwei Wang | 1d4f9a7 | 2013-01-07 14:15:51 +0800 | [diff] [blame] | 1720 | mutex_init(&gov_lock); |
Junjie Wu | 4344ea3 | 2014-04-28 16:22:24 -0700 | [diff] [blame] | 1721 | mutex_init(&sched_lock); |
Todd Poynor | 0f1920b | 2012-07-16 17:07:15 -0700 | [diff] [blame] | 1722 | speedchange_task = |
| 1723 | kthread_create(cpufreq_interactive_speedchange_task, NULL, |
| 1724 | "cfinteractive"); |
| 1725 | if (IS_ERR(speedchange_task)) |
| 1726 | return PTR_ERR(speedchange_task); |
Sam Leffler | 5c9b827 | 2012-06-27 12:55:56 -0700 | [diff] [blame] | 1727 | |
Todd Poynor | 0f1920b | 2012-07-16 17:07:15 -0700 | [diff] [blame] | 1728 | sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, ¶m); |
| 1729 | get_task_struct(speedchange_task); |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 1730 | |
Sam Leffler | 5c9b827 | 2012-06-27 12:55:56 -0700 | [diff] [blame] | 1731 | /* NB: wake up so the thread does not look hung to the freezer */ |
Todd Poynor | 0f1920b | 2012-07-16 17:07:15 -0700 | [diff] [blame] | 1732 | wake_up_process(speedchange_task); |
Sam Leffler | 5c9b827 | 2012-06-27 12:55:56 -0700 | [diff] [blame] | 1733 | |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 1734 | return cpufreq_register_governor(&cpufreq_gov_interactive); |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 1735 | } |
| 1736 | |
| 1737 | #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE |
| 1738 | fs_initcall(cpufreq_interactive_init); |
| 1739 | #else |
| 1740 | module_init(cpufreq_interactive_init); |
| 1741 | #endif |
| 1742 | |
| 1743 | static void __exit cpufreq_interactive_exit(void) |
| 1744 | { |
Junjie Wu | c5a97d9 | 2014-05-23 12:22:59 -0700 | [diff] [blame] | 1745 | int cpu; |
Junjie Wu | 13c6a76 | 2014-08-07 18:04:13 -0700 | [diff] [blame] | 1746 | struct cpufreq_interactive_cpuinfo *pcpu; |
Junjie Wu | c5a97d9 | 2014-05-23 12:22:59 -0700 | [diff] [blame] | 1747 | |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 1748 | cpufreq_unregister_governor(&cpufreq_gov_interactive); |
Todd Poynor | 0f1920b | 2012-07-16 17:07:15 -0700 | [diff] [blame] | 1749 | kthread_stop(speedchange_task); |
| 1750 | put_task_struct(speedchange_task); |
Junjie Wu | c5a97d9 | 2014-05-23 12:22:59 -0700 | [diff] [blame] | 1751 | |
| 1752 | for_each_possible_cpu(cpu) { |
Junjie Wu | 13c6a76 | 2014-08-07 18:04:13 -0700 | [diff] [blame] | 1753 | pcpu = &per_cpu(cpuinfo, cpu); |
| 1754 | kfree(pcpu->cached_tunables); |
| 1755 | pcpu->cached_tunables = NULL; |
Junjie Wu | c5a97d9 | 2014-05-23 12:22:59 -0700 | [diff] [blame] | 1756 | } |
Mike Chan | ef96969 | 2010-06-22 11:26:45 -0700 | [diff] [blame] | 1757 | } |
| 1758 | |
| 1759 | module_exit(cpufreq_interactive_exit); |
| 1760 | |
| 1761 | MODULE_AUTHOR("Mike Chan <mike@android.com>"); |
| 1762 | MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for " |
| 1763 | "Latency sensitive workloads"); |
| 1764 | MODULE_LICENSE("GPL"); |