blob: f66cf3843585c742405577044bd5129859848be5 [file] [log] [blame]
Saravana Kannanbd1b66e2013-07-13 01:49:09 -07001/*
2 * Copyright (c) 2013, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#define pr_fmt(fmt) "cpu-boost: " fmt
15
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/notifier.h>
19#include <linux/cpufreq.h>
20#include <linux/sched.h>
21#include <linux/jiffies.h>
22#include <linux/kthread.h>
23#include <linux/moduleparam.h>
Rohit Gupta865e53b2013-11-26 18:20:57 -080024#include <linux/slab.h>
25#include <linux/input.h>
26#include <linux/time.h>
Saravana Kannanbd1b66e2013-07-13 01:49:09 -070027
28struct cpu_sync {
29 struct task_struct *thread;
30 wait_queue_head_t sync_wq;
31 struct delayed_work boost_rem;
Rohit Gupta865e53b2013-11-26 18:20:57 -080032 struct delayed_work input_boost_rem;
Saravana Kannanbd1b66e2013-07-13 01:49:09 -070033 int cpu;
34 spinlock_t lock;
35 bool pending;
36 int src_cpu;
37 unsigned int boost_min;
Rohit Gupta865e53b2013-11-26 18:20:57 -080038 unsigned int input_boost_min;
Saravana Kannanbd1b66e2013-07-13 01:49:09 -070039};
40
41static DEFINE_PER_CPU(struct cpu_sync, sync_info);
Rohit Gupta865e53b2013-11-26 18:20:57 -080042static struct workqueue_struct *cpu_boost_wq;
43
44static struct work_struct input_boost_work;
Saravana Kannanbd1b66e2013-07-13 01:49:09 -070045
46static unsigned int boost_ms = 50;
47module_param(boost_ms, uint, 0644);
48
Rohit Gupta161228f2013-11-21 14:51:07 -080049static unsigned int sync_threshold;
50module_param(sync_threshold, uint, 0644);
Rohit Gupta865e53b2013-11-26 18:20:57 -080051
52static unsigned int input_boost_freq;
53module_param(input_boost_freq, uint, 0644);
54
55static unsigned int input_boost_ms = 40;
56module_param(input_boost_ms, uint, 0644);
57
58static u64 last_input_time;
59#define MIN_INPUT_INTERVAL (150 * USEC_PER_MSEC)
60
Saravana Kannanbd1b66e2013-07-13 01:49:09 -070061/*
62 * The CPUFREQ_ADJUST notifier is used to override the current policy min to
63 * make sure policy min >= boost_min. The cpufreq framework then does the job
64 * of enforcing the new policy.
65 */
66static int boost_adjust_notify(struct notifier_block *nb, unsigned long val, void *data)
67{
68 struct cpufreq_policy *policy = data;
69 unsigned int cpu = policy->cpu;
70 struct cpu_sync *s = &per_cpu(sync_info, cpu);
Rohit Gupta865e53b2013-11-26 18:20:57 -080071 unsigned int b_min = s->boost_min;
72 unsigned int ib_min = s->input_boost_min;
73 unsigned int min;
Saravana Kannanbd1b66e2013-07-13 01:49:09 -070074
75 if (val != CPUFREQ_ADJUST)
76 return NOTIFY_OK;
77
Rohit Gupta865e53b2013-11-26 18:20:57 -080078 if (!b_min && !ib_min)
Saravana Kannanbd1b66e2013-07-13 01:49:09 -070079 return NOTIFY_OK;
80
Rohit Gupta865e53b2013-11-26 18:20:57 -080081 min = max(b_min, ib_min);
82
Saravana Kannanbd1b66e2013-07-13 01:49:09 -070083 pr_debug("CPU%u policy min before boost: %u kHz\n",
84 cpu, policy->min);
85 pr_debug("CPU%u boost min: %u kHz\n", cpu, min);
86
87 cpufreq_verify_within_limits(policy, min, UINT_MAX);
88
89 pr_debug("CPU%u policy min after boost: %u kHz\n",
90 cpu, policy->min);
91
92 return NOTIFY_OK;
93}
94
95static struct notifier_block boost_adjust_nb = {
96 .notifier_call = boost_adjust_notify,
97};
98
99static void do_boost_rem(struct work_struct *work)
100{
101 struct cpu_sync *s = container_of(work, struct cpu_sync,
102 boost_rem.work);
103
104 pr_debug("Removing boost for CPU%d\n", s->cpu);
105 s->boost_min = 0;
106 /* Force policy re-evaluation to trigger adjust notifier. */
107 cpufreq_update_policy(s->cpu);
108}
109
Rohit Gupta865e53b2013-11-26 18:20:57 -0800110static void do_input_boost_rem(struct work_struct *work)
111{
112 struct cpu_sync *s = container_of(work, struct cpu_sync,
113 input_boost_rem.work);
114
115 pr_debug("Removing input boost for CPU%d\n", s->cpu);
116 s->input_boost_min = 0;
117 /* Force policy re-evaluation to trigger adjust notifier. */
118 cpufreq_update_policy(s->cpu);
119}
120
Saravana Kannanbd1b66e2013-07-13 01:49:09 -0700121static int boost_mig_sync_thread(void *data)
122{
123 int dest_cpu = (int) data;
124 int src_cpu, ret;
125 struct cpu_sync *s = &per_cpu(sync_info, dest_cpu);
126 struct cpufreq_policy dest_policy;
127 struct cpufreq_policy src_policy;
128 unsigned long flags;
129
130 while(1) {
131 wait_event(s->sync_wq, s->pending || kthread_should_stop());
132
133 if (kthread_should_stop())
134 break;
135
136 spin_lock_irqsave(&s->lock, flags);
137 s->pending = false;
138 src_cpu = s->src_cpu;
139 spin_unlock_irqrestore(&s->lock, flags);
140
141 ret = cpufreq_get_policy(&src_policy, src_cpu);
142 if (ret)
143 continue;
144
145 ret = cpufreq_get_policy(&dest_policy, dest_cpu);
146 if (ret)
147 continue;
148
149 if (dest_policy.cur >= src_policy.cur ) {
150 pr_debug("No sync. CPU%d@%dKHz >= CPU%d@%dKHz\n",
151 dest_cpu, dest_policy.cur, src_cpu, src_policy.cur);
152 continue;
153 }
154
Rohit Gupta161228f2013-11-21 14:51:07 -0800155 if (sync_threshold && (dest_policy.cur >= sync_threshold))
156 continue;
157
Saravana Kannanbd1b66e2013-07-13 01:49:09 -0700158 cancel_delayed_work_sync(&s->boost_rem);
Rohit Gupta161228f2013-11-21 14:51:07 -0800159 if (sync_threshold) {
160 if (src_policy.cur >= sync_threshold)
161 s->boost_min = sync_threshold;
162 else
163 s->boost_min = src_policy.cur;
164 } else {
165 s->boost_min = src_policy.cur;
166 }
Saravana Kannanbd1b66e2013-07-13 01:49:09 -0700167 /* Force policy re-evaluation to trigger adjust notifier. */
168 cpufreq_update_policy(dest_cpu);
Rohit Gupta865e53b2013-11-26 18:20:57 -0800169 queue_delayed_work_on(s->cpu, cpu_boost_wq,
Saravana Kannanbd1b66e2013-07-13 01:49:09 -0700170 &s->boost_rem, msecs_to_jiffies(boost_ms));
171 }
172
173 return 0;
174}
175
176static int boost_migration_notify(struct notifier_block *nb,
177 unsigned long dest_cpu, void *arg)
178{
179 unsigned long flags;
180 struct cpu_sync *s = &per_cpu(sync_info, dest_cpu);
181
182 if (!boost_ms)
183 return NOTIFY_OK;
184
185 pr_debug("Migration: CPU%d --> CPU%d\n", (int) arg, (int) dest_cpu);
186 spin_lock_irqsave(&s->lock, flags);
187 s->pending = true;
188 s->src_cpu = (int) arg;
189 spin_unlock_irqrestore(&s->lock, flags);
190 wake_up(&s->sync_wq);
191
192 return NOTIFY_OK;
193}
194
195static struct notifier_block boost_migration_nb = {
196 .notifier_call = boost_migration_notify,
197};
198
Rohit Gupta865e53b2013-11-26 18:20:57 -0800199static void do_input_boost(struct work_struct *work)
200{
201 unsigned int i, ret;
202 struct cpu_sync *i_sync_info;
203 struct cpufreq_policy policy;
204
205 for_each_online_cpu(i) {
206
207 i_sync_info = &per_cpu(sync_info, i);
208 ret = cpufreq_get_policy(&policy, i);
209 if (ret)
210 continue;
211 if (policy.cur >= input_boost_freq)
212 continue;
213
214 cancel_delayed_work_sync(&i_sync_info->input_boost_rem);
215 i_sync_info->input_boost_min = input_boost_freq;
216 cpufreq_update_policy(i);
217 queue_delayed_work_on(i_sync_info->cpu, cpu_boost_wq,
218 &i_sync_info->input_boost_rem,
219 msecs_to_jiffies(input_boost_ms));
220 }
221}
222
223static void cpuboost_input_event(struct input_handle *handle,
224 unsigned int type, unsigned int code, int value)
225{
226 u64 now;
227
228 if (!input_boost_freq)
229 return;
230
231 now = ktime_to_us(ktime_get());
232 if (now - last_input_time < MIN_INPUT_INTERVAL)
233 return;
234
235 if (work_pending(&input_boost_work))
236 return;
237
238 queue_work(cpu_boost_wq, &input_boost_work);
239 last_input_time = ktime_to_us(ktime_get());
240}
241
242static int cpuboost_input_connect(struct input_handler *handler,
243 struct input_dev *dev, const struct input_device_id *id)
244{
245 struct input_handle *handle;
246 int error;
247
248 handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
249 if (!handle)
250 return -ENOMEM;
251
252 handle->dev = dev;
253 handle->handler = handler;
254 handle->name = "cpufreq";
255
256 error = input_register_handle(handle);
257 if (error)
258 goto err2;
259
260 error = input_open_device(handle);
261 if (error)
262 goto err1;
263
264 return 0;
265err1:
266 input_unregister_handle(handle);
267err2:
268 kfree(handle);
269 return error;
270}
271
272static void cpuboost_input_disconnect(struct input_handle *handle)
273{
274 input_close_device(handle);
275 input_unregister_handle(handle);
276 kfree(handle);
277}
278
279static const struct input_device_id cpuboost_ids[] = {
280 /* multi-touch touchscreen */
281 {
282 .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
283 INPUT_DEVICE_ID_MATCH_ABSBIT,
284 .evbit = { BIT_MASK(EV_ABS) },
285 .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
286 BIT_MASK(ABS_MT_POSITION_X) |
287 BIT_MASK(ABS_MT_POSITION_Y) },
288 },
289 /* touchpad */
290 {
291 .flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
292 INPUT_DEVICE_ID_MATCH_ABSBIT,
293 .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
294 .absbit = { [BIT_WORD(ABS_X)] =
295 BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
296 },
297 /* Keypad */
298 {
299 .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
300 .evbit = { BIT_MASK(EV_KEY) },
301 },
302 { },
303};
304
305static struct input_handler cpuboost_input_handler = {
306 .event = cpuboost_input_event,
307 .connect = cpuboost_input_connect,
308 .disconnect = cpuboost_input_disconnect,
309 .name = "cpu-boost",
310 .id_table = cpuboost_ids,
311};
312
Saravana Kannanbd1b66e2013-07-13 01:49:09 -0700313static int cpu_boost_init(void)
314{
Rohit Gupta865e53b2013-11-26 18:20:57 -0800315 int cpu, ret;
Saravana Kannanbd1b66e2013-07-13 01:49:09 -0700316 struct cpu_sync *s;
317
318 cpufreq_register_notifier(&boost_adjust_nb, CPUFREQ_POLICY_NOTIFIER);
319
Rohit Gupta865e53b2013-11-26 18:20:57 -0800320 cpu_boost_wq = alloc_workqueue("cpuboost_wq", WQ_HIGHPRI, 0);
321 if (!cpu_boost_wq)
Saravana Kannanbd1b66e2013-07-13 01:49:09 -0700322 return -EFAULT;
323
Rohit Gupta865e53b2013-11-26 18:20:57 -0800324 INIT_WORK(&input_boost_work, do_input_boost);
325
Saravana Kannanbd1b66e2013-07-13 01:49:09 -0700326 for_each_possible_cpu(cpu) {
327 s = &per_cpu(sync_info, cpu);
328 s->cpu = cpu;
329 init_waitqueue_head(&s->sync_wq);
330 spin_lock_init(&s->lock);
331 INIT_DELAYED_WORK(&s->boost_rem, do_boost_rem);
Rohit Gupta865e53b2013-11-26 18:20:57 -0800332 INIT_DELAYED_WORK(&s->input_boost_rem, do_input_boost_rem);
Saravana Kannanbd1b66e2013-07-13 01:49:09 -0700333 s->thread = kthread_run(boost_mig_sync_thread, (void *)cpu,
334 "boost_sync/%d", cpu);
335 }
336 atomic_notifier_chain_register(&migration_notifier_head,
337 &boost_migration_nb);
338
Rohit Gupta865e53b2013-11-26 18:20:57 -0800339 ret = input_register_handler(&cpuboost_input_handler);
Saravana Kannanbd1b66e2013-07-13 01:49:09 -0700340 return 0;
341}
342late_initcall(cpu_boost_init);