blob: 78df5a74822eb5d875f7928920230204930f1ce0 [file] [log] [blame]
Dmitry Torokhov0dcd8072007-04-29 23:42:45 -04001/*
2 * Generic implementation of a polled input device
3
4 * Copyright (c) 2007 Dmitry Torokhov
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
9 */
10
Joe Perchesda0c4902010-11-29 23:33:07 -080011#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
Dmitry Torokhov0dcd8072007-04-29 23:42:45 -040013#include <linux/jiffies.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090014#include <linux/slab.h>
Dmitry Torokhov0dcd8072007-04-29 23:42:45 -040015#include <linux/mutex.h>
Dmitry Torokhove490ebd2011-04-27 23:20:16 -070016#include <linux/workqueue.h>
Paul Gortmakerd2d84422011-07-03 13:53:48 -040017#include <linux/module.h>
Dmitry Torokhov0dcd8072007-04-29 23:42:45 -040018#include <linux/input-polldev.h>
19
Eric Piel36bd52a2007-05-22 23:28:03 -040020MODULE_AUTHOR("Dmitry Torokhov <dtor@mail.ru>");
21MODULE_DESCRIPTION("Generic implementation of a polled input device");
22MODULE_LICENSE("GPL v2");
Eric Piel36bd52a2007-05-22 23:28:03 -040023
Samu Onkalodad725d2009-11-13 21:13:22 -080024static void input_polldev_queue_work(struct input_polled_dev *dev)
Dmitry Torokhov0dcd8072007-04-29 23:42:45 -040025{
Stephen Hemminger374766b2007-11-21 14:03:37 -050026 unsigned long delay;
Dmitry Torokhov0dcd8072007-04-29 23:42:45 -040027
Stephen Hemminger374766b2007-11-21 14:03:37 -050028 delay = msecs_to_jiffies(dev->poll_interval);
29 if (delay >= HZ)
30 delay = round_jiffies_relative(delay);
31
Dmitry Torokhove490ebd2011-04-27 23:20:16 -070032 queue_delayed_work(system_freezable_wq, &dev->work, delay);
Dmitry Torokhov0dcd8072007-04-29 23:42:45 -040033}
34
Samu Onkalodad725d2009-11-13 21:13:22 -080035static void input_polled_device_work(struct work_struct *work)
36{
37 struct input_polled_dev *dev =
38 container_of(work, struct input_polled_dev, work.work);
39
40 dev->poll(dev);
41 input_polldev_queue_work(dev);
42}
43
Dmitry Torokhov0dcd8072007-04-29 23:42:45 -040044static int input_open_polled_device(struct input_dev *input)
45{
Dmitry Torokhov3797fec2008-04-02 00:41:00 -040046 struct input_polled_dev *dev = input_get_drvdata(input);
Dmitry Torokhov0dcd8072007-04-29 23:42:45 -040047
Samu Onkalob0aba1e2009-10-18 00:38:57 -070048 if (dev->open)
49 dev->open(dev);
Dmitry Torokhov0dcd8072007-04-29 23:42:45 -040050
Samu Onkalo11bb4cc2009-11-23 10:01:33 -080051 /* Only start polling if polling is enabled */
Dmitry Torokhov5e3e4eb2011-08-02 22:22:46 -070052 if (dev->poll_interval > 0) {
53 dev->poll(dev);
54 input_polldev_queue_work(dev);
55 }
Dmitry Torokhov0dcd8072007-04-29 23:42:45 -040056
57 return 0;
58}
59
60static void input_close_polled_device(struct input_dev *input)
61{
Dmitry Torokhov3797fec2008-04-02 00:41:00 -040062 struct input_polled_dev *dev = input_get_drvdata(input);
Dmitry Torokhov0dcd8072007-04-29 23:42:45 -040063
Stephen Hemminger374766b2007-11-21 14:03:37 -050064 cancel_delayed_work_sync(&dev->work);
Samu Onkalob0aba1e2009-10-18 00:38:57 -070065
66 if (dev->close)
67 dev->close(dev);
Dmitry Torokhov0dcd8072007-04-29 23:42:45 -040068}
69
Samu Onkalodad725d2009-11-13 21:13:22 -080070/* SYSFS interface */
71
72static ssize_t input_polldev_get_poll(struct device *dev,
73 struct device_attribute *attr, char *buf)
74{
75 struct input_polled_dev *polldev = dev_get_drvdata(dev);
76
77 return sprintf(buf, "%d\n", polldev->poll_interval);
78}
79
80static ssize_t input_polldev_set_poll(struct device *dev,
81 struct device_attribute *attr, const char *buf,
82 size_t count)
83{
84 struct input_polled_dev *polldev = dev_get_drvdata(dev);
85 struct input_dev *input = polldev->input;
JJ Ding76496e72011-11-09 10:20:14 -080086 unsigned int interval;
87 int err;
Samu Onkalodad725d2009-11-13 21:13:22 -080088
JJ Ding76496e72011-11-09 10:20:14 -080089 err = kstrtouint(buf, 0, &interval);
90 if (err)
91 return err;
Samu Onkalodad725d2009-11-13 21:13:22 -080092
93 if (interval < polldev->poll_interval_min)
94 return -EINVAL;
95
96 if (interval > polldev->poll_interval_max)
97 return -EINVAL;
98
99 mutex_lock(&input->mutex);
100
101 polldev->poll_interval = interval;
102
103 if (input->users) {
104 cancel_delayed_work_sync(&polldev->work);
105 if (polldev->poll_interval > 0)
106 input_polldev_queue_work(polldev);
107 }
108
109 mutex_unlock(&input->mutex);
110
111 return count;
112}
113
114static DEVICE_ATTR(poll, S_IRUGO | S_IWUSR, input_polldev_get_poll,
115 input_polldev_set_poll);
116
117
118static ssize_t input_polldev_get_max(struct device *dev,
119 struct device_attribute *attr, char *buf)
120{
121 struct input_polled_dev *polldev = dev_get_drvdata(dev);
122
123 return sprintf(buf, "%d\n", polldev->poll_interval_max);
124}
125
126static DEVICE_ATTR(max, S_IRUGO, input_polldev_get_max, NULL);
127
128static ssize_t input_polldev_get_min(struct device *dev,
129 struct device_attribute *attr, char *buf)
130{
131 struct input_polled_dev *polldev = dev_get_drvdata(dev);
132
133 return sprintf(buf, "%d\n", polldev->poll_interval_min);
134}
135
136static DEVICE_ATTR(min, S_IRUGO, input_polldev_get_min, NULL);
137
138static struct attribute *sysfs_attrs[] = {
139 &dev_attr_poll.attr,
140 &dev_attr_max.attr,
141 &dev_attr_min.attr,
142 NULL
143};
144
145static struct attribute_group input_polldev_attribute_group = {
146 .attrs = sysfs_attrs
147};
148
Dmitry Torokhovd1fefd52014-04-28 13:16:00 -0700149static const struct attribute_group *input_polldev_attribute_groups[] = {
150 &input_polldev_attribute_group,
151 NULL
152};
153
Dmitry Torokhov0dcd8072007-04-29 23:42:45 -0400154/**
Dmitry Torokhov2546bcc2011-01-31 21:06:34 -0800155 * input_allocate_polled_device - allocate memory for polled device
Dmitry Torokhov0dcd8072007-04-29 23:42:45 -0400156 *
157 * The function allocates memory for a polled device and also
158 * for an input device associated with this polled device.
159 */
160struct input_polled_dev *input_allocate_polled_device(void)
161{
162 struct input_polled_dev *dev;
163
164 dev = kzalloc(sizeof(struct input_polled_dev), GFP_KERNEL);
165 if (!dev)
166 return NULL;
167
168 dev->input = input_allocate_device();
169 if (!dev->input) {
170 kfree(dev);
171 return NULL;
172 }
173
174 return dev;
175}
176EXPORT_SYMBOL(input_allocate_polled_device);
177
Dmitry Torokhovbf1de972014-04-28 10:49:51 -0700178struct input_polled_devres {
179 struct input_polled_dev *polldev;
180};
181
182static int devm_input_polldev_match(struct device *dev, void *res, void *data)
183{
184 struct input_polled_devres *devres = res;
185
186 return devres->polldev == data;
187}
188
189static void devm_input_polldev_release(struct device *dev, void *res)
190{
191 struct input_polled_devres *devres = res;
192 struct input_polled_dev *polldev = devres->polldev;
193
194 dev_dbg(dev, "%s: dropping reference/freeing %s\n",
195 __func__, dev_name(&polldev->input->dev));
196
197 input_put_device(polldev->input);
198 kfree(polldev);
199}
200
201static void devm_input_polldev_unregister(struct device *dev, void *res)
202{
203 struct input_polled_devres *devres = res;
204 struct input_polled_dev *polldev = devres->polldev;
205
206 dev_dbg(dev, "%s: unregistering device %s\n",
207 __func__, dev_name(&polldev->input->dev));
208 input_unregister_device(polldev->input);
209
210 /*
211 * Note that we are still holding extra reference to the input
212 * device so it will stick around until devm_input_polldev_release()
213 * is called.
214 */
215}
216
217/**
218 * devm_input_allocate_polled_device - allocate managed polled device
219 * @dev: device owning the polled device being created
220 *
221 * Returns prepared &struct input_polled_dev or %NULL.
222 *
223 * Managed polled input devices do not need to be explicitly unregistered
224 * or freed as it will be done automatically when owner device unbinds
225 * from * its driver (or binding fails). Once such managed polled device
226 * is allocated, it is ready to be set up and registered in the same
227 * fashion as regular polled input devices (using
228 * input_register_polled_device() function).
229 *
230 * If you want to manually unregister and free such managed polled devices,
231 * it can be still done by calling input_unregister_polled_device() and
232 * input_free_polled_device(), although it is rarely needed.
233 *
234 * NOTE: the owner device is set up as parent of input device and users
235 * should not override it.
236 */
237struct input_polled_dev *devm_input_allocate_polled_device(struct device *dev)
238{
239 struct input_polled_dev *polldev;
240 struct input_polled_devres *devres;
241
242 devres = devres_alloc(devm_input_polldev_release, sizeof(*devres),
243 GFP_KERNEL);
244 if (!devres)
245 return NULL;
246
247 polldev = input_allocate_polled_device();
248 if (!polldev) {
249 devres_free(devres);
250 return NULL;
251 }
252
253 polldev->input->dev.parent = dev;
254 polldev->devres_managed = true;
255
256 devres->polldev = polldev;
257 devres_add(dev, devres);
258
259 return polldev;
260}
261EXPORT_SYMBOL(devm_input_allocate_polled_device);
262
Dmitry Torokhov0dcd8072007-04-29 23:42:45 -0400263/**
264 * input_free_polled_device - free memory allocated for polled device
265 * @dev: device to free
266 *
267 * The function frees memory allocated for polling device and drops
Dmitry Torokhov36203c42009-12-04 10:22:23 -0800268 * reference to the associated input device.
Dmitry Torokhov0dcd8072007-04-29 23:42:45 -0400269 */
270void input_free_polled_device(struct input_polled_dev *dev)
271{
272 if (dev) {
Dmitry Torokhovbf1de972014-04-28 10:49:51 -0700273 if (dev->devres_managed)
274 WARN_ON(devres_destroy(dev->input->dev.parent,
275 devm_input_polldev_release,
276 devm_input_polldev_match,
277 dev));
278 input_put_device(dev->input);
Dmitry Torokhov0dcd8072007-04-29 23:42:45 -0400279 kfree(dev);
280 }
281}
282EXPORT_SYMBOL(input_free_polled_device);
283
284/**
285 * input_register_polled_device - register polled device
286 * @dev: device to register
287 *
288 * The function registers previously initialized polled input device
289 * with input layer. The device should be allocated with call to
290 * input_allocate_polled_device(). Callers should also set up poll()
291 * method and set up capabilities (id, name, phys, bits) of the
Dmitry Torokhov2546bcc2011-01-31 21:06:34 -0800292 * corresponding input_dev structure.
Dmitry Torokhov0dcd8072007-04-29 23:42:45 -0400293 */
294int input_register_polled_device(struct input_polled_dev *dev)
295{
Dmitry Torokhovbf1de972014-04-28 10:49:51 -0700296 struct input_polled_devres *devres = NULL;
Dmitry Torokhov0dcd8072007-04-29 23:42:45 -0400297 struct input_dev *input = dev->input;
Samu Onkalodad725d2009-11-13 21:13:22 -0800298 int error;
Dmitry Torokhov0dcd8072007-04-29 23:42:45 -0400299
Dmitry Torokhovbf1de972014-04-28 10:49:51 -0700300 if (dev->devres_managed) {
301 devres = devres_alloc(devm_input_polldev_unregister,
302 sizeof(*devres), GFP_KERNEL);
303 if (!devres)
304 return -ENOMEM;
305
306 devres->polldev = dev;
307 }
308
Dmitry Torokhov3797fec2008-04-02 00:41:00 -0400309 input_set_drvdata(input, dev);
Dmitry Torokhov0dcd8072007-04-29 23:42:45 -0400310 INIT_DELAYED_WORK(&dev->work, input_polled_device_work);
Dmitry Torokhovd1fefd52014-04-28 13:16:00 -0700311
Dmitry Torokhov0dcd8072007-04-29 23:42:45 -0400312 if (!dev->poll_interval)
313 dev->poll_interval = 500;
Samu Onkalodad725d2009-11-13 21:13:22 -0800314 if (!dev->poll_interval_max)
315 dev->poll_interval_max = dev->poll_interval;
Dmitry Torokhovd1fefd52014-04-28 13:16:00 -0700316
Dmitry Torokhov0dcd8072007-04-29 23:42:45 -0400317 input->open = input_open_polled_device;
318 input->close = input_close_polled_device;
319
Dmitry Torokhovd1fefd52014-04-28 13:16:00 -0700320 input->dev.groups = input_polldev_attribute_groups;
321
Samu Onkalodad725d2009-11-13 21:13:22 -0800322 error = input_register_device(input);
Dmitry Torokhovbf1de972014-04-28 10:49:51 -0700323 if (error) {
324 devres_free(devres);
Samu Onkalodad725d2009-11-13 21:13:22 -0800325 return error;
Dmitry Torokhovbf1de972014-04-28 10:49:51 -0700326 }
Samu Onkalodad725d2009-11-13 21:13:22 -0800327
Dmitry Torokhov36203c42009-12-04 10:22:23 -0800328 /*
329 * Take extra reference to the underlying input device so
330 * that it survives call to input_unregister_polled_device()
331 * and is deleted only after input_free_polled_device()
332 * has been invoked. This is needed to ease task of freeing
333 * sparse keymaps.
334 */
335 input_get_device(input);
336
Dmitry Torokhovbf1de972014-04-28 10:49:51 -0700337 if (dev->devres_managed) {
338 dev_dbg(input->dev.parent, "%s: registering %s with devres.\n",
339 __func__, dev_name(&input->dev));
340 devres_add(input->dev.parent, devres);
341 }
342
Samu Onkalodad725d2009-11-13 21:13:22 -0800343 return 0;
Dmitry Torokhov0dcd8072007-04-29 23:42:45 -0400344}
345EXPORT_SYMBOL(input_register_polled_device);
346
347/**
348 * input_unregister_polled_device - unregister polled device
349 * @dev: device to unregister
350 *
351 * The function unregisters previously registered polled input
352 * device from input layer. Polling is stopped and device is
353 * ready to be freed with call to input_free_polled_device().
Dmitry Torokhov0dcd8072007-04-29 23:42:45 -0400354 */
355void input_unregister_polled_device(struct input_polled_dev *dev)
356{
Dmitry Torokhovbf1de972014-04-28 10:49:51 -0700357 if (dev->devres_managed)
358 WARN_ON(devres_destroy(dev->input->dev.parent,
359 devm_input_polldev_unregister,
360 devm_input_polldev_match,
361 dev));
362
Dmitry Torokhov0dcd8072007-04-29 23:42:45 -0400363 input_unregister_device(dev->input);
Dmitry Torokhov0dcd8072007-04-29 23:42:45 -0400364}
365EXPORT_SYMBOL(input_unregister_polled_device);