blob: f546805d8a35d89d3705d4a309f4af59119704e9 [file] [log] [blame]
Saravana Kannanbd1b66e2013-07-13 01:49:09 -07001/*
Swetha Chikkaboraiah2c039672015-04-06 14:50:10 +05302 * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
Saravana Kannanbd1b66e2013-07-13 01:49:09 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#define pr_fmt(fmt) "cpu-boost: " fmt
15
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/notifier.h>
19#include <linux/cpufreq.h>
Saravana Kannan4f51d1e2014-01-28 19:14:57 -080020#include <linux/cpu.h>
Saravana Kannanbd1b66e2013-07-13 01:49:09 -070021#include <linux/sched.h>
22#include <linux/jiffies.h>
23#include <linux/kthread.h>
24#include <linux/moduleparam.h>
Rohit Gupta865e53b2013-11-26 18:20:57 -080025#include <linux/slab.h>
26#include <linux/input.h>
27#include <linux/time.h>
Saravana Kannanbd1b66e2013-07-13 01:49:09 -070028
29struct cpu_sync {
30 struct task_struct *thread;
31 wait_queue_head_t sync_wq;
32 struct delayed_work boost_rem;
Rohit Gupta865e53b2013-11-26 18:20:57 -080033 struct delayed_work input_boost_rem;
Saravana Kannanbd1b66e2013-07-13 01:49:09 -070034 int cpu;
35 spinlock_t lock;
36 bool pending;
Swetha Chikkaboraiah2c039672015-04-06 14:50:10 +053037 atomic_t being_woken;
Saravana Kannanbd1b66e2013-07-13 01:49:09 -070038 int src_cpu;
39 unsigned int boost_min;
Rohit Gupta865e53b2013-11-26 18:20:57 -080040 unsigned int input_boost_min;
Saravana Kannanbd1b66e2013-07-13 01:49:09 -070041};
42
43static DEFINE_PER_CPU(struct cpu_sync, sync_info);
Rohit Gupta865e53b2013-11-26 18:20:57 -080044static struct workqueue_struct *cpu_boost_wq;
45
46static struct work_struct input_boost_work;
Saravana Kannanbd1b66e2013-07-13 01:49:09 -070047
Rohit Gupta40735b82013-12-04 14:46:18 -080048static unsigned int boost_ms;
Saravana Kannanbd1b66e2013-07-13 01:49:09 -070049module_param(boost_ms, uint, 0644);
50
Rohit Gupta161228f2013-11-21 14:51:07 -080051static unsigned int sync_threshold;
52module_param(sync_threshold, uint, 0644);
Rohit Gupta865e53b2013-11-26 18:20:57 -080053
54static unsigned int input_boost_freq;
55module_param(input_boost_freq, uint, 0644);
56
57static unsigned int input_boost_ms = 40;
58module_param(input_boost_ms, uint, 0644);
59
60static u64 last_input_time;
61#define MIN_INPUT_INTERVAL (150 * USEC_PER_MSEC)
62
Saravana Kannanbd1b66e2013-07-13 01:49:09 -070063/*
64 * The CPUFREQ_ADJUST notifier is used to override the current policy min to
65 * make sure policy min >= boost_min. The cpufreq framework then does the job
66 * of enforcing the new policy.
Saravana Kannan6e745322014-01-28 19:40:32 -080067 *
68 * The sync kthread needs to run on the CPU in question to avoid deadlocks in
69 * the wake up code. Achieve this by binding the thread to the respective
70 * CPU. But a CPU going offline unbinds threads from that CPU. So, set it up
71 * again each time the CPU comes back up. We can use CPUFREQ_START to figure
72 * out a CPU is coming online instead of registering for hotplug notifiers.
Saravana Kannanbd1b66e2013-07-13 01:49:09 -070073 */
74static int boost_adjust_notify(struct notifier_block *nb, unsigned long val, void *data)
75{
76 struct cpufreq_policy *policy = data;
77 unsigned int cpu = policy->cpu;
78 struct cpu_sync *s = &per_cpu(sync_info, cpu);
Rohit Gupta865e53b2013-11-26 18:20:57 -080079 unsigned int b_min = s->boost_min;
80 unsigned int ib_min = s->input_boost_min;
81 unsigned int min;
Saravana Kannanbd1b66e2013-07-13 01:49:09 -070082
Saravana Kannan6e745322014-01-28 19:40:32 -080083 switch (val) {
84 case CPUFREQ_ADJUST:
85 if (!b_min && !ib_min)
86 break;
Saravana Kannanbd1b66e2013-07-13 01:49:09 -070087
Saravana Kannan6e745322014-01-28 19:40:32 -080088 min = max(b_min, ib_min);
Saravana Kannanbd1b66e2013-07-13 01:49:09 -070089
Saravana Kannan6e745322014-01-28 19:40:32 -080090 pr_debug("CPU%u policy min before boost: %u kHz\n",
91 cpu, policy->min);
92 pr_debug("CPU%u boost min: %u kHz\n", cpu, min);
Rohit Gupta865e53b2013-11-26 18:20:57 -080093
Saravana Kannan6e745322014-01-28 19:40:32 -080094 cpufreq_verify_within_limits(policy, min, UINT_MAX);
Saravana Kannanbd1b66e2013-07-13 01:49:09 -070095
Saravana Kannan6e745322014-01-28 19:40:32 -080096 pr_debug("CPU%u policy min after boost: %u kHz\n",
97 cpu, policy->min);
98 break;
Saravana Kannanbd1b66e2013-07-13 01:49:09 -070099
Saravana Kannan6e745322014-01-28 19:40:32 -0800100 case CPUFREQ_START:
101 set_cpus_allowed(s->thread, *cpumask_of(cpu));
102 break;
103 }
Saravana Kannanbd1b66e2013-07-13 01:49:09 -0700104
105 return NOTIFY_OK;
106}
107
108static struct notifier_block boost_adjust_nb = {
109 .notifier_call = boost_adjust_notify,
110};
111
112static void do_boost_rem(struct work_struct *work)
113{
114 struct cpu_sync *s = container_of(work, struct cpu_sync,
115 boost_rem.work);
116
117 pr_debug("Removing boost for CPU%d\n", s->cpu);
118 s->boost_min = 0;
119 /* Force policy re-evaluation to trigger adjust notifier. */
120 cpufreq_update_policy(s->cpu);
121}
122
Rohit Gupta865e53b2013-11-26 18:20:57 -0800123static void do_input_boost_rem(struct work_struct *work)
124{
125 struct cpu_sync *s = container_of(work, struct cpu_sync,
126 input_boost_rem.work);
127
128 pr_debug("Removing input boost for CPU%d\n", s->cpu);
129 s->input_boost_min = 0;
130 /* Force policy re-evaluation to trigger adjust notifier. */
131 cpufreq_update_policy(s->cpu);
132}
133
Saravana Kannanbd1b66e2013-07-13 01:49:09 -0700134static int boost_mig_sync_thread(void *data)
135{
136 int dest_cpu = (int) data;
137 int src_cpu, ret;
138 struct cpu_sync *s = &per_cpu(sync_info, dest_cpu);
139 struct cpufreq_policy dest_policy;
140 struct cpufreq_policy src_policy;
141 unsigned long flags;
142
143 while(1) {
Swetha Chikkaboraiah8c9c0f42014-06-10 17:31:23 +0530144 wait_event_interruptible(s->sync_wq, s->pending ||
145 kthread_should_stop());
Saravana Kannanbd1b66e2013-07-13 01:49:09 -0700146
147 if (kthread_should_stop())
148 break;
149
150 spin_lock_irqsave(&s->lock, flags);
151 s->pending = false;
152 src_cpu = s->src_cpu;
153 spin_unlock_irqrestore(&s->lock, flags);
154
155 ret = cpufreq_get_policy(&src_policy, src_cpu);
156 if (ret)
157 continue;
158
159 ret = cpufreq_get_policy(&dest_policy, dest_cpu);
160 if (ret)
161 continue;
162
163 if (dest_policy.cur >= src_policy.cur ) {
164 pr_debug("No sync. CPU%d@%dKHz >= CPU%d@%dKHz\n",
165 dest_cpu, dest_policy.cur, src_cpu, src_policy.cur);
166 continue;
167 }
168
Rohit Gupta161228f2013-11-21 14:51:07 -0800169 if (sync_threshold && (dest_policy.cur >= sync_threshold))
170 continue;
171
Saravana Kannanbd1b66e2013-07-13 01:49:09 -0700172 cancel_delayed_work_sync(&s->boost_rem);
Rohit Gupta161228f2013-11-21 14:51:07 -0800173 if (sync_threshold) {
174 if (src_policy.cur >= sync_threshold)
175 s->boost_min = sync_threshold;
176 else
177 s->boost_min = src_policy.cur;
178 } else {
179 s->boost_min = src_policy.cur;
180 }
Saravana Kannanbd1b66e2013-07-13 01:49:09 -0700181 /* Force policy re-evaluation to trigger adjust notifier. */
Saravana Kannan4f51d1e2014-01-28 19:14:57 -0800182 get_online_cpus();
183 if (cpu_online(dest_cpu)) {
184 cpufreq_update_policy(dest_cpu);
185 queue_delayed_work_on(dest_cpu, cpu_boost_wq,
186 &s->boost_rem, msecs_to_jiffies(boost_ms));
187 } else {
188 s->boost_min = 0;
189 }
190 put_online_cpus();
Saravana Kannanbd1b66e2013-07-13 01:49:09 -0700191 }
192
193 return 0;
194}
195
196static int boost_migration_notify(struct notifier_block *nb,
197 unsigned long dest_cpu, void *arg)
198{
199 unsigned long flags;
200 struct cpu_sync *s = &per_cpu(sync_info, dest_cpu);
201
202 if (!boost_ms)
203 return NOTIFY_OK;
204
205 pr_debug("Migration: CPU%d --> CPU%d\n", (int) arg, (int) dest_cpu);
206 spin_lock_irqsave(&s->lock, flags);
207 s->pending = true;
208 s->src_cpu = (int) arg;
209 spin_unlock_irqrestore(&s->lock, flags);
Swetha Chikkaboraiah2c039672015-04-06 14:50:10 +0530210 /*
211 * Avoid issuing recursive wakeup call, as sync thread itself could be
212 * seen as migrating triggering this notification. Note that sync thread
213 * of a cpu could be running for a short while with its affinity broken
214 * because of CPU hotplug.
215 */
216 if (!atomic_cmpxchg(&s->being_woken, 0, 1)) {
217 wake_up(&s->sync_wq);
218 atomic_set(&s->being_woken, 0);
219 }
Saravana Kannanbd1b66e2013-07-13 01:49:09 -0700220
221 return NOTIFY_OK;
222}
223
224static struct notifier_block boost_migration_nb = {
225 .notifier_call = boost_migration_notify,
226};
227
Rohit Gupta865e53b2013-11-26 18:20:57 -0800228static void do_input_boost(struct work_struct *work)
229{
230 unsigned int i, ret;
231 struct cpu_sync *i_sync_info;
232 struct cpufreq_policy policy;
233
Saravana Kannan4f51d1e2014-01-28 19:14:57 -0800234 get_online_cpus();
Rohit Gupta865e53b2013-11-26 18:20:57 -0800235 for_each_online_cpu(i) {
236
237 i_sync_info = &per_cpu(sync_info, i);
238 ret = cpufreq_get_policy(&policy, i);
239 if (ret)
240 continue;
241 if (policy.cur >= input_boost_freq)
242 continue;
243
244 cancel_delayed_work_sync(&i_sync_info->input_boost_rem);
245 i_sync_info->input_boost_min = input_boost_freq;
246 cpufreq_update_policy(i);
247 queue_delayed_work_on(i_sync_info->cpu, cpu_boost_wq,
248 &i_sync_info->input_boost_rem,
249 msecs_to_jiffies(input_boost_ms));
250 }
Saravana Kannan4f51d1e2014-01-28 19:14:57 -0800251 put_online_cpus();
Rohit Gupta865e53b2013-11-26 18:20:57 -0800252}
253
254static void cpuboost_input_event(struct input_handle *handle,
255 unsigned int type, unsigned int code, int value)
256{
257 u64 now;
258
259 if (!input_boost_freq)
260 return;
261
262 now = ktime_to_us(ktime_get());
263 if (now - last_input_time < MIN_INPUT_INTERVAL)
264 return;
265
266 if (work_pending(&input_boost_work))
267 return;
268
269 queue_work(cpu_boost_wq, &input_boost_work);
270 last_input_time = ktime_to_us(ktime_get());
271}
272
273static int cpuboost_input_connect(struct input_handler *handler,
274 struct input_dev *dev, const struct input_device_id *id)
275{
276 struct input_handle *handle;
277 int error;
278
279 handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
280 if (!handle)
281 return -ENOMEM;
282
283 handle->dev = dev;
284 handle->handler = handler;
285 handle->name = "cpufreq";
286
287 error = input_register_handle(handle);
288 if (error)
289 goto err2;
290
291 error = input_open_device(handle);
292 if (error)
293 goto err1;
294
295 return 0;
296err1:
297 input_unregister_handle(handle);
298err2:
299 kfree(handle);
300 return error;
301}
302
303static void cpuboost_input_disconnect(struct input_handle *handle)
304{
305 input_close_device(handle);
306 input_unregister_handle(handle);
307 kfree(handle);
308}
309
310static const struct input_device_id cpuboost_ids[] = {
311 /* multi-touch touchscreen */
312 {
313 .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
314 INPUT_DEVICE_ID_MATCH_ABSBIT,
315 .evbit = { BIT_MASK(EV_ABS) },
316 .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
317 BIT_MASK(ABS_MT_POSITION_X) |
318 BIT_MASK(ABS_MT_POSITION_Y) },
319 },
320 /* touchpad */
321 {
322 .flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
323 INPUT_DEVICE_ID_MATCH_ABSBIT,
324 .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
325 .absbit = { [BIT_WORD(ABS_X)] =
326 BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
327 },
328 /* Keypad */
329 {
330 .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
331 .evbit = { BIT_MASK(EV_KEY) },
332 },
333 { },
334};
335
336static struct input_handler cpuboost_input_handler = {
337 .event = cpuboost_input_event,
338 .connect = cpuboost_input_connect,
339 .disconnect = cpuboost_input_disconnect,
340 .name = "cpu-boost",
341 .id_table = cpuboost_ids,
342};
343
Saravana Kannanbd1b66e2013-07-13 01:49:09 -0700344static int cpu_boost_init(void)
345{
Rohit Gupta865e53b2013-11-26 18:20:57 -0800346 int cpu, ret;
Saravana Kannanbd1b66e2013-07-13 01:49:09 -0700347 struct cpu_sync *s;
348
Rohit Gupta865e53b2013-11-26 18:20:57 -0800349 cpu_boost_wq = alloc_workqueue("cpuboost_wq", WQ_HIGHPRI, 0);
350 if (!cpu_boost_wq)
Saravana Kannanbd1b66e2013-07-13 01:49:09 -0700351 return -EFAULT;
352
Rohit Gupta865e53b2013-11-26 18:20:57 -0800353 INIT_WORK(&input_boost_work, do_input_boost);
354
Saravana Kannanbd1b66e2013-07-13 01:49:09 -0700355 for_each_possible_cpu(cpu) {
356 s = &per_cpu(sync_info, cpu);
357 s->cpu = cpu;
358 init_waitqueue_head(&s->sync_wq);
Swetha Chikkaboraiah2c039672015-04-06 14:50:10 +0530359 atomic_set(&s->being_woken, 0);
Saravana Kannanbd1b66e2013-07-13 01:49:09 -0700360 spin_lock_init(&s->lock);
361 INIT_DELAYED_WORK(&s->boost_rem, do_boost_rem);
Rohit Gupta865e53b2013-11-26 18:20:57 -0800362 INIT_DELAYED_WORK(&s->input_boost_rem, do_input_boost_rem);
Saravana Kannanbd1b66e2013-07-13 01:49:09 -0700363 s->thread = kthread_run(boost_mig_sync_thread, (void *)cpu,
364 "boost_sync/%d", cpu);
Saravana Kannan6e745322014-01-28 19:40:32 -0800365 set_cpus_allowed(s->thread, *cpumask_of(cpu));
Saravana Kannanbd1b66e2013-07-13 01:49:09 -0700366 }
Saravana Kannan887c61c2014-02-13 19:08:21 -0800367 cpufreq_register_notifier(&boost_adjust_nb, CPUFREQ_POLICY_NOTIFIER);
Saravana Kannanbd1b66e2013-07-13 01:49:09 -0700368 atomic_notifier_chain_register(&migration_notifier_head,
369 &boost_migration_nb);
Rohit Gupta865e53b2013-11-26 18:20:57 -0800370 ret = input_register_handler(&cpuboost_input_handler);
Saravana Kannan887c61c2014-02-13 19:08:21 -0800371
Saravana Kannanbd1b66e2013-07-13 01:49:09 -0700372 return 0;
373}
374late_initcall(cpu_boost_init);