blob: e67f12b47905779ed227413c2ac72b9c93420a0a [file] [log] [blame]
Saravana Kannane53d0b22013-07-13 01:49:09 -07001/*
Amir Vajidce53a572017-03-20 11:32:48 -07002 * Copyright (c) 2013-2015,2017, The Linux Foundation. All rights reserved.
Saravana Kannane53d0b22013-07-13 01:49:09 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#define pr_fmt(fmt) "cpu-boost: " fmt
15
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/cpufreq.h>
Rohit Guptaa88da622013-11-26 18:20:57 -080019#include <linux/cpu.h>
Saravana Kannane53d0b22013-07-13 01:49:09 -070020#include <linux/sched.h>
21#include <linux/moduleparam.h>
Rohit Guptaa88da622013-11-26 18:20:57 -080022#include <linux/slab.h>
23#include <linux/input.h>
24#include <linux/time.h>
Saravana Kannane53d0b22013-07-13 01:49:09 -070025
26struct cpu_sync {
27 int cpu;
Rohit Guptaa88da622013-11-26 18:20:57 -080028 unsigned int input_boost_min;
Junjie Wud225b212014-05-28 16:36:25 -070029 unsigned int input_boost_freq;
Saravana Kannane53d0b22013-07-13 01:49:09 -070030};
31
32static DEFINE_PER_CPU(struct cpu_sync, sync_info);
Rohit Guptaa88da622013-11-26 18:20:57 -080033static struct workqueue_struct *cpu_boost_wq;
34
35static struct work_struct input_boost_work;
36
Junjie Wud225b212014-05-28 16:36:25 -070037static bool input_boost_enabled;
Rohit Guptaa88da622013-11-26 18:20:57 -080038
39static unsigned int input_boost_ms = 40;
40module_param(input_boost_ms, uint, 0644);
41
Amir Vajidce53a572017-03-20 11:32:48 -070042static unsigned int sched_boost_on_input;
43module_param(sched_boost_on_input, uint, 0644);
Rohit Gupta1c042402014-06-12 17:32:08 -070044
45static bool sched_boost_active;
46
Rohit Guptaa88da622013-11-26 18:20:57 -080047static struct delayed_work input_boost_rem;
48static u64 last_input_time;
49#define MIN_INPUT_INTERVAL (150 * USEC_PER_MSEC)
Saravana Kannane53d0b22013-07-13 01:49:09 -070050
Junjie Wud225b212014-05-28 16:36:25 -070051static int set_input_boost_freq(const char *buf, const struct kernel_param *kp)
52{
53 int i, ntokens = 0;
54 unsigned int val, cpu;
55 const char *cp = buf;
56 bool enabled = false;
57
58 while ((cp = strpbrk(cp + 1, " :")))
59 ntokens++;
60
61 /* single number: apply to all CPUs */
62 if (!ntokens) {
63 if (sscanf(buf, "%u\n", &val) != 1)
64 return -EINVAL;
65 for_each_possible_cpu(i)
66 per_cpu(sync_info, i).input_boost_freq = val;
67 goto check_enable;
68 }
69
70 /* CPU:value pair */
71 if (!(ntokens % 2))
72 return -EINVAL;
73
74 cp = buf;
75 for (i = 0; i < ntokens; i += 2) {
76 if (sscanf(cp, "%u:%u", &cpu, &val) != 2)
77 return -EINVAL;
78 if (cpu > num_possible_cpus())
79 return -EINVAL;
80
81 per_cpu(sync_info, cpu).input_boost_freq = val;
82 cp = strchr(cp, ' ');
83 cp++;
84 }
85
86check_enable:
87 for_each_possible_cpu(i) {
88 if (per_cpu(sync_info, i).input_boost_freq) {
89 enabled = true;
90 break;
91 }
92 }
93 input_boost_enabled = enabled;
94
95 return 0;
96}
97
98static int get_input_boost_freq(char *buf, const struct kernel_param *kp)
99{
100 int cnt = 0, cpu;
101 struct cpu_sync *s;
102
103 for_each_possible_cpu(cpu) {
104 s = &per_cpu(sync_info, cpu);
105 cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
106 "%d:%u ", cpu, s->input_boost_freq);
107 }
108 cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, "\n");
109 return cnt;
110}
111
112static const struct kernel_param_ops param_ops_input_boost_freq = {
113 .set = set_input_boost_freq,
114 .get = get_input_boost_freq,
115};
116module_param_cb(input_boost_freq, &param_ops_input_boost_freq, NULL, 0644);
117
Saravana Kannane53d0b22013-07-13 01:49:09 -0700118/*
119 * The CPUFREQ_ADJUST notifier is used to override the current policy min to
120 * make sure policy min >= boost_min. The cpufreq framework then does the job
121 * of enforcing the new policy.
122 */
123static int boost_adjust_notify(struct notifier_block *nb, unsigned long val,
124 void *data)
125{
126 struct cpufreq_policy *policy = data;
127 unsigned int cpu = policy->cpu;
128 struct cpu_sync *s = &per_cpu(sync_info, cpu);
Rohit Guptaa88da622013-11-26 18:20:57 -0800129 unsigned int ib_min = s->input_boost_min;
Saravana Kannane53d0b22013-07-13 01:49:09 -0700130
Rohit Guptaa88da622013-11-26 18:20:57 -0800131 switch (val) {
132 case CPUFREQ_ADJUST:
133 if (!ib_min)
134 break;
Saravana Kannane53d0b22013-07-13 01:49:09 -0700135
Rohit Guptaa88da622013-11-26 18:20:57 -0800136 pr_debug("CPU%u policy min before boost: %u kHz\n",
137 cpu, policy->min);
138 pr_debug("CPU%u boost min: %u kHz\n", cpu, ib_min);
Saravana Kannane53d0b22013-07-13 01:49:09 -0700139
Rohit Guptaa88da622013-11-26 18:20:57 -0800140 cpufreq_verify_within_limits(policy, ib_min, UINT_MAX);
Saravana Kannane53d0b22013-07-13 01:49:09 -0700141
Rohit Guptaa88da622013-11-26 18:20:57 -0800142 pr_debug("CPU%u policy min after boost: %u kHz\n",
143 cpu, policy->min);
144 break;
145 }
Saravana Kannane53d0b22013-07-13 01:49:09 -0700146
147 return NOTIFY_OK;
148}
149
150static struct notifier_block boost_adjust_nb = {
151 .notifier_call = boost_adjust_notify,
152};
153
Rohit Guptaa88da622013-11-26 18:20:57 -0800154static void update_policy_online(void)
155{
156 unsigned int i;
157
158 /* Re-evaluate policy to trigger adjust notifier for online CPUs */
159 get_online_cpus();
160 for_each_online_cpu(i) {
161 pr_debug("Updating policy for CPU%d\n", i);
162 cpufreq_update_policy(i);
163 }
164 put_online_cpus();
165}
166
167static void do_input_boost_rem(struct work_struct *work)
168{
Rohit Gupta1c042402014-06-12 17:32:08 -0700169 unsigned int i, ret;
Rohit Guptaa88da622013-11-26 18:20:57 -0800170 struct cpu_sync *i_sync_info;
171
172 /* Reset the input_boost_min for all CPUs in the system */
173 pr_debug("Resetting input boost min for all CPUs\n");
174 for_each_possible_cpu(i) {
175 i_sync_info = &per_cpu(sync_info, i);
176 i_sync_info->input_boost_min = 0;
177 }
178
179 /* Update policies for all online CPUs */
180 update_policy_online();
Rohit Gupta1c042402014-06-12 17:32:08 -0700181
182 if (sched_boost_active) {
183 ret = sched_set_boost(0);
184 if (ret)
185 pr_err("cpu-boost: HMP boost disable failed\n");
186 sched_boost_active = false;
187 }
Rohit Guptaa88da622013-11-26 18:20:57 -0800188}
189
190static void do_input_boost(struct work_struct *work)
191{
Rohit Gupta1c042402014-06-12 17:32:08 -0700192 unsigned int i, ret;
Rohit Guptaa88da622013-11-26 18:20:57 -0800193 struct cpu_sync *i_sync_info;
194
195 cancel_delayed_work_sync(&input_boost_rem);
Rohit Gupta1c042402014-06-12 17:32:08 -0700196 if (sched_boost_active) {
197 sched_set_boost(0);
198 sched_boost_active = false;
199 }
Rohit Guptaa88da622013-11-26 18:20:57 -0800200
201 /* Set the input_boost_min for all CPUs in the system */
202 pr_debug("Setting input boost min for all CPUs\n");
203 for_each_possible_cpu(i) {
204 i_sync_info = &per_cpu(sync_info, i);
Junjie Wud225b212014-05-28 16:36:25 -0700205 i_sync_info->input_boost_min = i_sync_info->input_boost_freq;
Rohit Guptaa88da622013-11-26 18:20:57 -0800206 }
207
208 /* Update policies for all online CPUs */
209 update_policy_online();
210
Rohit Gupta1c042402014-06-12 17:32:08 -0700211 /* Enable scheduler boost to migrate tasks to big cluster */
Amir Vajidce53a572017-03-20 11:32:48 -0700212 if (sched_boost_on_input > 0) {
213 ret = sched_set_boost(sched_boost_on_input);
Rohit Gupta1c042402014-06-12 17:32:08 -0700214 if (ret)
215 pr_err("cpu-boost: HMP boost enable failed\n");
216 else
217 sched_boost_active = true;
218 }
219
Rohit Guptaa88da622013-11-26 18:20:57 -0800220 queue_delayed_work(cpu_boost_wq, &input_boost_rem,
221 msecs_to_jiffies(input_boost_ms));
222}
223
224static void cpuboost_input_event(struct input_handle *handle,
225 unsigned int type, unsigned int code, int value)
226{
227 u64 now;
228
Junjie Wud225b212014-05-28 16:36:25 -0700229 if (!input_boost_enabled)
Rohit Guptaa88da622013-11-26 18:20:57 -0800230 return;
231
232 now = ktime_to_us(ktime_get());
233 if (now - last_input_time < MIN_INPUT_INTERVAL)
234 return;
235
236 if (work_pending(&input_boost_work))
237 return;
238
239 queue_work(cpu_boost_wq, &input_boost_work);
240 last_input_time = ktime_to_us(ktime_get());
241}
242
243static int cpuboost_input_connect(struct input_handler *handler,
244 struct input_dev *dev, const struct input_device_id *id)
245{
246 struct input_handle *handle;
247 int error;
248
249 handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
250 if (!handle)
251 return -ENOMEM;
252
253 handle->dev = dev;
254 handle->handler = handler;
255 handle->name = "cpufreq";
256
257 error = input_register_handle(handle);
258 if (error)
259 goto err2;
260
261 error = input_open_device(handle);
262 if (error)
263 goto err1;
264
265 return 0;
266err1:
267 input_unregister_handle(handle);
268err2:
269 kfree(handle);
270 return error;
271}
272
273static void cpuboost_input_disconnect(struct input_handle *handle)
274{
275 input_close_device(handle);
276 input_unregister_handle(handle);
277 kfree(handle);
278}
279
280static const struct input_device_id cpuboost_ids[] = {
281 /* multi-touch touchscreen */
282 {
283 .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
284 INPUT_DEVICE_ID_MATCH_ABSBIT,
285 .evbit = { BIT_MASK(EV_ABS) },
286 .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
287 BIT_MASK(ABS_MT_POSITION_X) |
288 BIT_MASK(ABS_MT_POSITION_Y) },
289 },
290 /* touchpad */
291 {
292 .flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
293 INPUT_DEVICE_ID_MATCH_ABSBIT,
294 .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
295 .absbit = { [BIT_WORD(ABS_X)] =
296 BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
297 },
298 /* Keypad */
299 {
300 .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
301 .evbit = { BIT_MASK(EV_KEY) },
302 },
303 { },
304};
305
306static struct input_handler cpuboost_input_handler = {
307 .event = cpuboost_input_event,
308 .connect = cpuboost_input_connect,
309 .disconnect = cpuboost_input_disconnect,
310 .name = "cpu-boost",
311 .id_table = cpuboost_ids,
312};
313
Saravana Kannane53d0b22013-07-13 01:49:09 -0700314static int cpu_boost_init(void)
315{
Rohit Guptaa88da622013-11-26 18:20:57 -0800316 int cpu, ret;
Saravana Kannane53d0b22013-07-13 01:49:09 -0700317 struct cpu_sync *s;
318
Rohit Guptaa88da622013-11-26 18:20:57 -0800319 cpu_boost_wq = alloc_workqueue("cpuboost_wq", WQ_HIGHPRI, 0);
320 if (!cpu_boost_wq)
Saravana Kannane53d0b22013-07-13 01:49:09 -0700321 return -EFAULT;
322
Rohit Guptaa88da622013-11-26 18:20:57 -0800323 INIT_WORK(&input_boost_work, do_input_boost);
324 INIT_DELAYED_WORK(&input_boost_rem, do_input_boost_rem);
325
Saravana Kannane53d0b22013-07-13 01:49:09 -0700326 for_each_possible_cpu(cpu) {
327 s = &per_cpu(sync_info, cpu);
328 s->cpu = cpu;
329 }
330 cpufreq_register_notifier(&boost_adjust_nb, CPUFREQ_POLICY_NOTIFIER);
331
Rohit Guptaa88da622013-11-26 18:20:57 -0800332 ret = input_register_handler(&cpuboost_input_handler);
Saravana Kannane53d0b22013-07-13 01:49:09 -0700333 return 0;
334}
335late_initcall(cpu_boost_init);