blob: b4aec53488772da2aa4cc160ffa80ef22fb0a1cb [file] [log] [blame]
Saravana Kannanbd1b66e2013-07-13 01:49:09 -07001/*
Saravana Kannan4f51d1e2014-01-28 19:14:57 -08002 * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
Saravana Kannanbd1b66e2013-07-13 01:49:09 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#define pr_fmt(fmt) "cpu-boost: " fmt
15
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/notifier.h>
19#include <linux/cpufreq.h>
Saravana Kannan4f51d1e2014-01-28 19:14:57 -080020#include <linux/cpu.h>
Saravana Kannanbd1b66e2013-07-13 01:49:09 -070021#include <linux/sched.h>
22#include <linux/jiffies.h>
23#include <linux/kthread.h>
24#include <linux/moduleparam.h>
Rohit Gupta865e53b2013-11-26 18:20:57 -080025#include <linux/slab.h>
26#include <linux/input.h>
27#include <linux/time.h>
Saravana Kannanbd1b66e2013-07-13 01:49:09 -070028
29struct cpu_sync {
30 struct task_struct *thread;
31 wait_queue_head_t sync_wq;
32 struct delayed_work boost_rem;
Rohit Gupta865e53b2013-11-26 18:20:57 -080033 struct delayed_work input_boost_rem;
Saravana Kannanbd1b66e2013-07-13 01:49:09 -070034 int cpu;
35 spinlock_t lock;
36 bool pending;
37 int src_cpu;
38 unsigned int boost_min;
Rohit Gupta865e53b2013-11-26 18:20:57 -080039 unsigned int input_boost_min;
Saravana Kannanbd1b66e2013-07-13 01:49:09 -070040};
41
42static DEFINE_PER_CPU(struct cpu_sync, sync_info);
Rohit Gupta865e53b2013-11-26 18:20:57 -080043static struct workqueue_struct *cpu_boost_wq;
44
45static struct work_struct input_boost_work;
Saravana Kannanbd1b66e2013-07-13 01:49:09 -070046
Rohit Gupta40735b82013-12-04 14:46:18 -080047static unsigned int boost_ms;
Saravana Kannanbd1b66e2013-07-13 01:49:09 -070048module_param(boost_ms, uint, 0644);
49
Rohit Gupta161228f2013-11-21 14:51:07 -080050static unsigned int sync_threshold;
51module_param(sync_threshold, uint, 0644);
Rohit Gupta865e53b2013-11-26 18:20:57 -080052
53static unsigned int input_boost_freq;
54module_param(input_boost_freq, uint, 0644);
55
56static unsigned int input_boost_ms = 40;
57module_param(input_boost_ms, uint, 0644);
58
59static u64 last_input_time;
60#define MIN_INPUT_INTERVAL (150 * USEC_PER_MSEC)
61
Saravana Kannanbd1b66e2013-07-13 01:49:09 -070062/*
63 * The CPUFREQ_ADJUST notifier is used to override the current policy min to
64 * make sure policy min >= boost_min. The cpufreq framework then does the job
65 * of enforcing the new policy.
Saravana Kannan6e745322014-01-28 19:40:32 -080066 *
67 * The sync kthread needs to run on the CPU in question to avoid deadlocks in
68 * the wake up code. Achieve this by binding the thread to the respective
69 * CPU. But a CPU going offline unbinds threads from that CPU. So, set it up
70 * again each time the CPU comes back up. We can use CPUFREQ_START to figure
71 * out a CPU is coming online instead of registering for hotplug notifiers.
Saravana Kannanbd1b66e2013-07-13 01:49:09 -070072 */
73static int boost_adjust_notify(struct notifier_block *nb, unsigned long val, void *data)
74{
75 struct cpufreq_policy *policy = data;
76 unsigned int cpu = policy->cpu;
77 struct cpu_sync *s = &per_cpu(sync_info, cpu);
Rohit Gupta865e53b2013-11-26 18:20:57 -080078 unsigned int b_min = s->boost_min;
79 unsigned int ib_min = s->input_boost_min;
80 unsigned int min;
Saravana Kannanbd1b66e2013-07-13 01:49:09 -070081
Saravana Kannan6e745322014-01-28 19:40:32 -080082 switch (val) {
83 case CPUFREQ_ADJUST:
84 if (!b_min && !ib_min)
85 break;
Saravana Kannanbd1b66e2013-07-13 01:49:09 -070086
Saravana Kannan6e745322014-01-28 19:40:32 -080087 min = max(b_min, ib_min);
Saravana Kannanbd1b66e2013-07-13 01:49:09 -070088
Saravana Kannan6e745322014-01-28 19:40:32 -080089 pr_debug("CPU%u policy min before boost: %u kHz\n",
90 cpu, policy->min);
91 pr_debug("CPU%u boost min: %u kHz\n", cpu, min);
Rohit Gupta865e53b2013-11-26 18:20:57 -080092
Saravana Kannan6e745322014-01-28 19:40:32 -080093 cpufreq_verify_within_limits(policy, min, UINT_MAX);
Saravana Kannanbd1b66e2013-07-13 01:49:09 -070094
Saravana Kannan6e745322014-01-28 19:40:32 -080095 pr_debug("CPU%u policy min after boost: %u kHz\n",
96 cpu, policy->min);
97 break;
Saravana Kannanbd1b66e2013-07-13 01:49:09 -070098
Saravana Kannan6e745322014-01-28 19:40:32 -080099 case CPUFREQ_START:
100 set_cpus_allowed(s->thread, *cpumask_of(cpu));
101 break;
102 }
Saravana Kannanbd1b66e2013-07-13 01:49:09 -0700103
104 return NOTIFY_OK;
105}
106
107static struct notifier_block boost_adjust_nb = {
108 .notifier_call = boost_adjust_notify,
109};
110
111static void do_boost_rem(struct work_struct *work)
112{
113 struct cpu_sync *s = container_of(work, struct cpu_sync,
114 boost_rem.work);
115
116 pr_debug("Removing boost for CPU%d\n", s->cpu);
117 s->boost_min = 0;
118 /* Force policy re-evaluation to trigger adjust notifier. */
119 cpufreq_update_policy(s->cpu);
120}
121
Rohit Gupta865e53b2013-11-26 18:20:57 -0800122static void do_input_boost_rem(struct work_struct *work)
123{
124 struct cpu_sync *s = container_of(work, struct cpu_sync,
125 input_boost_rem.work);
126
127 pr_debug("Removing input boost for CPU%d\n", s->cpu);
128 s->input_boost_min = 0;
129 /* Force policy re-evaluation to trigger adjust notifier. */
130 cpufreq_update_policy(s->cpu);
131}
132
Saravana Kannanbd1b66e2013-07-13 01:49:09 -0700133static int boost_mig_sync_thread(void *data)
134{
135 int dest_cpu = (int) data;
136 int src_cpu, ret;
137 struct cpu_sync *s = &per_cpu(sync_info, dest_cpu);
138 struct cpufreq_policy dest_policy;
139 struct cpufreq_policy src_policy;
140 unsigned long flags;
141
142 while(1) {
143 wait_event(s->sync_wq, s->pending || kthread_should_stop());
144
145 if (kthread_should_stop())
146 break;
147
148 spin_lock_irqsave(&s->lock, flags);
149 s->pending = false;
150 src_cpu = s->src_cpu;
151 spin_unlock_irqrestore(&s->lock, flags);
152
153 ret = cpufreq_get_policy(&src_policy, src_cpu);
154 if (ret)
155 continue;
156
157 ret = cpufreq_get_policy(&dest_policy, dest_cpu);
158 if (ret)
159 continue;
160
161 if (dest_policy.cur >= src_policy.cur ) {
162 pr_debug("No sync. CPU%d@%dKHz >= CPU%d@%dKHz\n",
163 dest_cpu, dest_policy.cur, src_cpu, src_policy.cur);
164 continue;
165 }
166
Rohit Gupta161228f2013-11-21 14:51:07 -0800167 if (sync_threshold && (dest_policy.cur >= sync_threshold))
168 continue;
169
Saravana Kannanbd1b66e2013-07-13 01:49:09 -0700170 cancel_delayed_work_sync(&s->boost_rem);
Rohit Gupta161228f2013-11-21 14:51:07 -0800171 if (sync_threshold) {
172 if (src_policy.cur >= sync_threshold)
173 s->boost_min = sync_threshold;
174 else
175 s->boost_min = src_policy.cur;
176 } else {
177 s->boost_min = src_policy.cur;
178 }
Saravana Kannanbd1b66e2013-07-13 01:49:09 -0700179 /* Force policy re-evaluation to trigger adjust notifier. */
Saravana Kannan4f51d1e2014-01-28 19:14:57 -0800180 get_online_cpus();
181 if (cpu_online(dest_cpu)) {
182 cpufreq_update_policy(dest_cpu);
183 queue_delayed_work_on(dest_cpu, cpu_boost_wq,
184 &s->boost_rem, msecs_to_jiffies(boost_ms));
185 } else {
186 s->boost_min = 0;
187 }
188 put_online_cpus();
Saravana Kannanbd1b66e2013-07-13 01:49:09 -0700189 }
190
191 return 0;
192}
193
194static int boost_migration_notify(struct notifier_block *nb,
195 unsigned long dest_cpu, void *arg)
196{
197 unsigned long flags;
198 struct cpu_sync *s = &per_cpu(sync_info, dest_cpu);
199
200 if (!boost_ms)
201 return NOTIFY_OK;
202
203 pr_debug("Migration: CPU%d --> CPU%d\n", (int) arg, (int) dest_cpu);
204 spin_lock_irqsave(&s->lock, flags);
205 s->pending = true;
206 s->src_cpu = (int) arg;
207 spin_unlock_irqrestore(&s->lock, flags);
208 wake_up(&s->sync_wq);
209
210 return NOTIFY_OK;
211}
212
213static struct notifier_block boost_migration_nb = {
214 .notifier_call = boost_migration_notify,
215};
216
Rohit Gupta865e53b2013-11-26 18:20:57 -0800217static void do_input_boost(struct work_struct *work)
218{
219 unsigned int i, ret;
220 struct cpu_sync *i_sync_info;
221 struct cpufreq_policy policy;
222
Saravana Kannan4f51d1e2014-01-28 19:14:57 -0800223 get_online_cpus();
Rohit Gupta865e53b2013-11-26 18:20:57 -0800224 for_each_online_cpu(i) {
225
226 i_sync_info = &per_cpu(sync_info, i);
227 ret = cpufreq_get_policy(&policy, i);
228 if (ret)
229 continue;
230 if (policy.cur >= input_boost_freq)
231 continue;
232
233 cancel_delayed_work_sync(&i_sync_info->input_boost_rem);
234 i_sync_info->input_boost_min = input_boost_freq;
235 cpufreq_update_policy(i);
236 queue_delayed_work_on(i_sync_info->cpu, cpu_boost_wq,
237 &i_sync_info->input_boost_rem,
238 msecs_to_jiffies(input_boost_ms));
239 }
Saravana Kannan4f51d1e2014-01-28 19:14:57 -0800240 put_online_cpus();
Rohit Gupta865e53b2013-11-26 18:20:57 -0800241}
242
243static void cpuboost_input_event(struct input_handle *handle,
244 unsigned int type, unsigned int code, int value)
245{
246 u64 now;
247
248 if (!input_boost_freq)
249 return;
250
251 now = ktime_to_us(ktime_get());
252 if (now - last_input_time < MIN_INPUT_INTERVAL)
253 return;
254
255 if (work_pending(&input_boost_work))
256 return;
257
258 queue_work(cpu_boost_wq, &input_boost_work);
259 last_input_time = ktime_to_us(ktime_get());
260}
261
262static int cpuboost_input_connect(struct input_handler *handler,
263 struct input_dev *dev, const struct input_device_id *id)
264{
265 struct input_handle *handle;
266 int error;
267
268 handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
269 if (!handle)
270 return -ENOMEM;
271
272 handle->dev = dev;
273 handle->handler = handler;
274 handle->name = "cpufreq";
275
276 error = input_register_handle(handle);
277 if (error)
278 goto err2;
279
280 error = input_open_device(handle);
281 if (error)
282 goto err1;
283
284 return 0;
285err1:
286 input_unregister_handle(handle);
287err2:
288 kfree(handle);
289 return error;
290}
291
292static void cpuboost_input_disconnect(struct input_handle *handle)
293{
294 input_close_device(handle);
295 input_unregister_handle(handle);
296 kfree(handle);
297}
298
299static const struct input_device_id cpuboost_ids[] = {
300 /* multi-touch touchscreen */
301 {
302 .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
303 INPUT_DEVICE_ID_MATCH_ABSBIT,
304 .evbit = { BIT_MASK(EV_ABS) },
305 .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
306 BIT_MASK(ABS_MT_POSITION_X) |
307 BIT_MASK(ABS_MT_POSITION_Y) },
308 },
309 /* touchpad */
310 {
311 .flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
312 INPUT_DEVICE_ID_MATCH_ABSBIT,
313 .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
314 .absbit = { [BIT_WORD(ABS_X)] =
315 BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
316 },
317 /* Keypad */
318 {
319 .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
320 .evbit = { BIT_MASK(EV_KEY) },
321 },
322 { },
323};
324
325static struct input_handler cpuboost_input_handler = {
326 .event = cpuboost_input_event,
327 .connect = cpuboost_input_connect,
328 .disconnect = cpuboost_input_disconnect,
329 .name = "cpu-boost",
330 .id_table = cpuboost_ids,
331};
332
Saravana Kannanbd1b66e2013-07-13 01:49:09 -0700333static int cpu_boost_init(void)
334{
Rohit Gupta865e53b2013-11-26 18:20:57 -0800335 int cpu, ret;
Saravana Kannanbd1b66e2013-07-13 01:49:09 -0700336 struct cpu_sync *s;
337
Rohit Gupta865e53b2013-11-26 18:20:57 -0800338 cpu_boost_wq = alloc_workqueue("cpuboost_wq", WQ_HIGHPRI, 0);
339 if (!cpu_boost_wq)
Saravana Kannanbd1b66e2013-07-13 01:49:09 -0700340 return -EFAULT;
341
Rohit Gupta865e53b2013-11-26 18:20:57 -0800342 INIT_WORK(&input_boost_work, do_input_boost);
343
Saravana Kannanbd1b66e2013-07-13 01:49:09 -0700344 for_each_possible_cpu(cpu) {
345 s = &per_cpu(sync_info, cpu);
346 s->cpu = cpu;
347 init_waitqueue_head(&s->sync_wq);
348 spin_lock_init(&s->lock);
349 INIT_DELAYED_WORK(&s->boost_rem, do_boost_rem);
Rohit Gupta865e53b2013-11-26 18:20:57 -0800350 INIT_DELAYED_WORK(&s->input_boost_rem, do_input_boost_rem);
Saravana Kannanbd1b66e2013-07-13 01:49:09 -0700351 s->thread = kthread_run(boost_mig_sync_thread, (void *)cpu,
352 "boost_sync/%d", cpu);
Saravana Kannan6e745322014-01-28 19:40:32 -0800353 set_cpus_allowed(s->thread, *cpumask_of(cpu));
Saravana Kannanbd1b66e2013-07-13 01:49:09 -0700354 }
Saravana Kannan887c61c2014-02-13 19:08:21 -0800355 cpufreq_register_notifier(&boost_adjust_nb, CPUFREQ_POLICY_NOTIFIER);
Saravana Kannanbd1b66e2013-07-13 01:49:09 -0700356 atomic_notifier_chain_register(&migration_notifier_head,
357 &boost_migration_nb);
Rohit Gupta865e53b2013-11-26 18:20:57 -0800358 ret = input_register_handler(&cpuboost_input_handler);
Saravana Kannan887c61c2014-02-13 19:08:21 -0800359
Saravana Kannanbd1b66e2013-07-13 01:49:09 -0700360 return 0;
361}
362late_initcall(cpu_boost_init);