Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * The input core |
| 3 | * |
| 4 | * Copyright (c) 1999-2002 Vojtech Pavlik |
| 5 | */ |
| 6 | |
| 7 | /* |
| 8 | * This program is free software; you can redistribute it and/or modify it |
| 9 | * under the terms of the GNU General Public License version 2 as published by |
| 10 | * the Free Software Foundation. |
| 11 | */ |
| 12 | |
Joe Perches | da0c490 | 2010-11-29 23:33:07 -0800 | [diff] [blame] | 13 | #define pr_fmt(fmt) KBUILD_BASENAME ": " fmt |
| 14 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/init.h> |
Dmitry Torokhov | ffd0db9 | 2009-09-16 01:06:43 -0700 | [diff] [blame] | 16 | #include <linux/types.h> |
Dmitry Torokhov | 7f8d4ca | 2012-10-08 09:07:24 -0700 | [diff] [blame] | 17 | #include <linux/idr.h> |
Henrik Rydberg | 47c78e8 | 2010-11-27 09:16:48 +0100 | [diff] [blame] | 18 | #include <linux/input/mt.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <linux/module.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 20 | #include <linux/slab.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include <linux/random.h> |
| 22 | #include <linux/major.h> |
| 23 | #include <linux/proc_fs.h> |
Alexey Dobriyan | a99bbaf | 2009-10-04 16:11:37 +0400 | [diff] [blame] | 24 | #include <linux/sched.h> |
Dmitry Torokhov | 969b21c | 2006-04-02 00:09:34 -0500 | [diff] [blame] | 25 | #include <linux/seq_file.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #include <linux/poll.h> |
| 27 | #include <linux/device.h> |
Jes Sorensen | e676c23 | 2006-02-19 00:21:46 -0500 | [diff] [blame] | 28 | #include <linux/mutex.h> |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 29 | #include <linux/rcupdate.h> |
Dmitry Torokhov | 15e184a | 2010-01-11 00:05:43 -0800 | [diff] [blame] | 30 | #include "input-compat.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | |
| 32 | MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>"); |
| 33 | MODULE_DESCRIPTION("Input core"); |
| 34 | MODULE_LICENSE("GPL"); |
| 35 | |
Dmitry Torokhov | 7f8d4ca | 2012-10-08 09:07:24 -0700 | [diff] [blame] | 36 | #define INPUT_MAX_CHAR_DEVICES 1024 |
| 37 | #define INPUT_FIRST_DYNAMIC_DEV 256 |
| 38 | static DEFINE_IDA(input_ida); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | |
| 40 | static LIST_HEAD(input_dev_list); |
| 41 | static LIST_HEAD(input_handler_list); |
| 42 | |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 43 | /* |
| 44 | * input_mutex protects access to both input_dev_list and input_handler_list. |
| 45 | * This also causes input_[un]register_device and input_[un]register_handler |
| 46 | * be mutually exclusive which simplifies locking in drivers implementing |
| 47 | * input handlers. |
| 48 | */ |
| 49 | static DEFINE_MUTEX(input_mutex); |
| 50 | |
Henrik Rydberg | 4369c64 | 2012-09-15 15:23:35 +0200 | [diff] [blame] | 51 | static const struct input_value input_value_sync = { EV_SYN, SYN_REPORT, 1 }; |
| 52 | |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 53 | static inline int is_event_supported(unsigned int code, |
| 54 | unsigned long *bm, unsigned int max) |
| 55 | { |
| 56 | return code <= max && test_bit(code, bm); |
| 57 | } |
| 58 | |
| 59 | static int input_defuzz_abs_event(int value, int old_val, int fuzz) |
| 60 | { |
| 61 | if (fuzz) { |
| 62 | if (value > old_val - fuzz / 2 && value < old_val + fuzz / 2) |
| 63 | return old_val; |
| 64 | |
| 65 | if (value > old_val - fuzz && value < old_val + fuzz) |
| 66 | return (old_val * 3 + value) / 4; |
| 67 | |
| 68 | if (value > old_val - fuzz * 2 && value < old_val + fuzz * 2) |
| 69 | return (old_val + value) / 2; |
| 70 | } |
| 71 | |
| 72 | return value; |
| 73 | } |
| 74 | |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 75 | static void input_start_autorepeat(struct input_dev *dev, int code) |
| 76 | { |
| 77 | if (test_bit(EV_REP, dev->evbit) && |
| 78 | dev->rep[REP_PERIOD] && dev->rep[REP_DELAY] && |
| 79 | dev->timer.data) { |
| 80 | dev->repeat_key = code; |
| 81 | mod_timer(&dev->timer, |
| 82 | jiffies + msecs_to_jiffies(dev->rep[REP_DELAY])); |
| 83 | } |
| 84 | } |
| 85 | |
Johannes Berg | e7b5c1e | 2009-01-29 23:17:52 -0800 | [diff] [blame] | 86 | static void input_stop_autorepeat(struct input_dev *dev) |
| 87 | { |
| 88 | del_timer(&dev->timer); |
| 89 | } |
| 90 | |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 91 | /* |
| 92 | * Pass event first through all filters and then, if event has not been |
| 93 | * filtered out, through all open handles. This function is called with |
| 94 | * dev->event_lock held and interrupts disabled. |
| 95 | */ |
Henrik Rydberg | 4369c64 | 2012-09-15 15:23:35 +0200 | [diff] [blame] | 96 | static unsigned int input_to_handler(struct input_handle *handle, |
| 97 | struct input_value *vals, unsigned int count) |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 98 | { |
Henrik Rydberg | 4369c64 | 2012-09-15 15:23:35 +0200 | [diff] [blame] | 99 | struct input_handler *handler = handle->handler; |
| 100 | struct input_value *end = vals; |
| 101 | struct input_value *v; |
| 102 | |
Anshul Garg | 2c50ad34 | 2015-01-08 13:47:37 -0800 | [diff] [blame] | 103 | if (handler->filter) { |
| 104 | for (v = vals; v != vals + count; v++) { |
| 105 | if (handler->filter(handle, v->type, v->code, v->value)) |
| 106 | continue; |
| 107 | if (end != v) |
| 108 | *end = *v; |
| 109 | end++; |
| 110 | } |
| 111 | count = end - vals; |
Henrik Rydberg | 4369c64 | 2012-09-15 15:23:35 +0200 | [diff] [blame] | 112 | } |
| 113 | |
Henrik Rydberg | 4369c64 | 2012-09-15 15:23:35 +0200 | [diff] [blame] | 114 | if (!count) |
| 115 | return 0; |
| 116 | |
| 117 | if (handler->events) |
| 118 | handler->events(handle, vals, count); |
| 119 | else if (handler->event) |
Anshul Garg | 2c50ad34 | 2015-01-08 13:47:37 -0800 | [diff] [blame] | 120 | for (v = vals; v != vals + count; v++) |
Henrik Rydberg | 4369c64 | 2012-09-15 15:23:35 +0200 | [diff] [blame] | 121 | handler->event(handle, v->type, v->code, v->value); |
| 122 | |
| 123 | return count; |
| 124 | } |
| 125 | |
| 126 | /* |
| 127 | * Pass values first through all filters and then, if event has not been |
| 128 | * filtered out, through all open handles. This function is called with |
| 129 | * dev->event_lock held and interrupts disabled. |
| 130 | */ |
| 131 | static void input_pass_values(struct input_dev *dev, |
| 132 | struct input_value *vals, unsigned int count) |
| 133 | { |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 134 | struct input_handle *handle; |
Henrik Rydberg | 4369c64 | 2012-09-15 15:23:35 +0200 | [diff] [blame] | 135 | struct input_value *v; |
| 136 | |
| 137 | if (!count) |
| 138 | return; |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 139 | |
| 140 | rcu_read_lock(); |
| 141 | |
| 142 | handle = rcu_dereference(dev->grab); |
Henrik Rydberg | 4369c64 | 2012-09-15 15:23:35 +0200 | [diff] [blame] | 143 | if (handle) { |
| 144 | count = input_to_handler(handle, vals, count); |
| 145 | } else { |
| 146 | list_for_each_entry_rcu(handle, &dev->h_list, d_node) |
Anshul Garg | 2c50ad34 | 2015-01-08 13:47:37 -0800 | [diff] [blame] | 147 | if (handle->open) { |
Henrik Rydberg | 4369c64 | 2012-09-15 15:23:35 +0200 | [diff] [blame] | 148 | count = input_to_handler(handle, vals, count); |
Anshul Garg | 2c50ad34 | 2015-01-08 13:47:37 -0800 | [diff] [blame] | 149 | if (!count) |
| 150 | break; |
| 151 | } |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 152 | } |
| 153 | |
| 154 | rcu_read_unlock(); |
Henrik Rydberg | 352ac4b | 2012-08-10 21:36:15 +0200 | [diff] [blame] | 155 | |
Henrik Rydberg | 4369c64 | 2012-09-15 15:23:35 +0200 | [diff] [blame] | 156 | /* trigger auto repeat for key events */ |
Anshul Garg | 5ab1714 | 2015-01-08 13:41:24 -0800 | [diff] [blame] | 157 | if (test_bit(EV_REP, dev->evbit) && test_bit(EV_KEY, dev->evbit)) { |
| 158 | for (v = vals; v != vals + count; v++) { |
| 159 | if (v->type == EV_KEY && v->value != 2) { |
| 160 | if (v->value) |
| 161 | input_start_autorepeat(dev, v->code); |
| 162 | else |
| 163 | input_stop_autorepeat(dev); |
| 164 | } |
Henrik Rydberg | 4369c64 | 2012-09-15 15:23:35 +0200 | [diff] [blame] | 165 | } |
| 166 | } |
| 167 | } |
| 168 | |
| 169 | static void input_pass_event(struct input_dev *dev, |
| 170 | unsigned int type, unsigned int code, int value) |
| 171 | { |
| 172 | struct input_value vals[] = { { type, code, value } }; |
| 173 | |
| 174 | input_pass_values(dev, vals, ARRAY_SIZE(vals)); |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 175 | } |
| 176 | |
| 177 | /* |
| 178 | * Generate software autorepeat event. Note that we take |
| 179 | * dev->event_lock here to avoid racing with input_event |
| 180 | * which may cause keys get "stuck". |
| 181 | */ |
| 182 | static void input_repeat_key(unsigned long data) |
| 183 | { |
| 184 | struct input_dev *dev = (void *) data; |
| 185 | unsigned long flags; |
| 186 | |
| 187 | spin_lock_irqsave(&dev->event_lock, flags); |
| 188 | |
| 189 | if (test_bit(dev->repeat_key, dev->key) && |
| 190 | is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) { |
Henrik Rydberg | 4369c64 | 2012-09-15 15:23:35 +0200 | [diff] [blame] | 191 | struct input_value vals[] = { |
| 192 | { EV_KEY, dev->repeat_key, 2 }, |
| 193 | input_value_sync |
| 194 | }; |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 195 | |
Henrik Rydberg | 4369c64 | 2012-09-15 15:23:35 +0200 | [diff] [blame] | 196 | input_pass_values(dev, vals, ARRAY_SIZE(vals)); |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 197 | |
| 198 | if (dev->rep[REP_PERIOD]) |
| 199 | mod_timer(&dev->timer, jiffies + |
| 200 | msecs_to_jiffies(dev->rep[REP_PERIOD])); |
| 201 | } |
| 202 | |
| 203 | spin_unlock_irqrestore(&dev->event_lock, flags); |
| 204 | } |
| 205 | |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 206 | #define INPUT_IGNORE_EVENT 0 |
| 207 | #define INPUT_PASS_TO_HANDLERS 1 |
| 208 | #define INPUT_PASS_TO_DEVICE 2 |
Henrik Rydberg | 4369c64 | 2012-09-15 15:23:35 +0200 | [diff] [blame] | 209 | #define INPUT_SLOT 4 |
| 210 | #define INPUT_FLUSH 8 |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 211 | #define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE) |
| 212 | |
Henrik Rydberg | 40d007e | 2010-07-15 23:10:10 -0700 | [diff] [blame] | 213 | static int input_handle_abs_event(struct input_dev *dev, |
| 214 | unsigned int code, int *pval) |
| 215 | { |
Henrik Rydberg | 8d18fba | 2012-09-15 15:15:58 +0200 | [diff] [blame] | 216 | struct input_mt *mt = dev->mt; |
Henrik Rydberg | 40d007e | 2010-07-15 23:10:10 -0700 | [diff] [blame] | 217 | bool is_mt_event; |
| 218 | int *pold; |
| 219 | |
| 220 | if (code == ABS_MT_SLOT) { |
| 221 | /* |
| 222 | * "Stage" the event; we'll flush it later, when we |
Dmitry Torokhov | 144c0f8 | 2010-09-03 10:31:05 -0700 | [diff] [blame] | 223 | * get actual touch data. |
Henrik Rydberg | 40d007e | 2010-07-15 23:10:10 -0700 | [diff] [blame] | 224 | */ |
Henrik Rydberg | 8d18fba | 2012-09-15 15:15:58 +0200 | [diff] [blame] | 225 | if (mt && *pval >= 0 && *pval < mt->num_slots) |
| 226 | mt->slot = *pval; |
Henrik Rydberg | 40d007e | 2010-07-15 23:10:10 -0700 | [diff] [blame] | 227 | |
| 228 | return INPUT_IGNORE_EVENT; |
| 229 | } |
| 230 | |
Henrik Rydberg | b89529a | 2012-01-12 19:40:34 +0100 | [diff] [blame] | 231 | is_mt_event = input_is_mt_value(code); |
Henrik Rydberg | 40d007e | 2010-07-15 23:10:10 -0700 | [diff] [blame] | 232 | |
| 233 | if (!is_mt_event) { |
Daniel Mack | d31b286 | 2010-08-02 20:18:21 -0700 | [diff] [blame] | 234 | pold = &dev->absinfo[code].value; |
Henrik Rydberg | 8d18fba | 2012-09-15 15:15:58 +0200 | [diff] [blame] | 235 | } else if (mt) { |
| 236 | pold = &mt->slots[mt->slot].abs[code - ABS_MT_FIRST]; |
Henrik Rydberg | 40d007e | 2010-07-15 23:10:10 -0700 | [diff] [blame] | 237 | } else { |
| 238 | /* |
Dmitry Torokhov | 144c0f8 | 2010-09-03 10:31:05 -0700 | [diff] [blame] | 239 | * Bypass filtering for multi-touch events when |
Henrik Rydberg | 40d007e | 2010-07-15 23:10:10 -0700 | [diff] [blame] | 240 | * not employing slots. |
| 241 | */ |
| 242 | pold = NULL; |
| 243 | } |
| 244 | |
| 245 | if (pold) { |
| 246 | *pval = input_defuzz_abs_event(*pval, *pold, |
Daniel Mack | d31b286 | 2010-08-02 20:18:21 -0700 | [diff] [blame] | 247 | dev->absinfo[code].fuzz); |
Henrik Rydberg | 40d007e | 2010-07-15 23:10:10 -0700 | [diff] [blame] | 248 | if (*pold == *pval) |
| 249 | return INPUT_IGNORE_EVENT; |
| 250 | |
| 251 | *pold = *pval; |
| 252 | } |
| 253 | |
| 254 | /* Flush pending "slot" event */ |
Henrik Rydberg | 8d18fba | 2012-09-15 15:15:58 +0200 | [diff] [blame] | 255 | if (is_mt_event && mt && mt->slot != input_abs_get_val(dev, ABS_MT_SLOT)) { |
| 256 | input_abs_set_val(dev, ABS_MT_SLOT, mt->slot); |
Henrik Rydberg | 4369c64 | 2012-09-15 15:23:35 +0200 | [diff] [blame] | 257 | return INPUT_PASS_TO_HANDLERS | INPUT_SLOT; |
Henrik Rydberg | 40d007e | 2010-07-15 23:10:10 -0700 | [diff] [blame] | 258 | } |
| 259 | |
| 260 | return INPUT_PASS_TO_HANDLERS; |
| 261 | } |
| 262 | |
Henrik Rydberg | 4369c64 | 2012-09-15 15:23:35 +0200 | [diff] [blame] | 263 | static int input_get_disposition(struct input_dev *dev, |
Dmitry Torokhov | 50c5d36 | 2014-07-19 16:30:31 -0700 | [diff] [blame] | 264 | unsigned int type, unsigned int code, int *pval) |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 265 | { |
| 266 | int disposition = INPUT_IGNORE_EVENT; |
Dmitry Torokhov | 50c5d36 | 2014-07-19 16:30:31 -0700 | [diff] [blame] | 267 | int value = *pval; |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 268 | |
| 269 | switch (type) { |
| 270 | |
| 271 | case EV_SYN: |
| 272 | switch (code) { |
| 273 | case SYN_CONFIG: |
| 274 | disposition = INPUT_PASS_TO_ALL; |
| 275 | break; |
| 276 | |
| 277 | case SYN_REPORT: |
Henrik Rydberg | 4369c64 | 2012-09-15 15:23:35 +0200 | [diff] [blame] | 278 | disposition = INPUT_PASS_TO_HANDLERS | INPUT_FLUSH; |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 279 | break; |
Henrik Rydberg | 5e5ee68 | 2009-04-28 07:47:33 -0700 | [diff] [blame] | 280 | case SYN_MT_REPORT: |
Henrik Rydberg | 5e5ee68 | 2009-04-28 07:47:33 -0700 | [diff] [blame] | 281 | disposition = INPUT_PASS_TO_HANDLERS; |
| 282 | break; |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 283 | } |
| 284 | break; |
| 285 | |
| 286 | case EV_KEY: |
Henrik Rydberg | 0672120 | 2012-08-10 21:39:38 +0200 | [diff] [blame] | 287 | if (is_event_supported(code, dev->keybit, KEY_MAX)) { |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 288 | |
Henrik Rydberg | 0672120 | 2012-08-10 21:39:38 +0200 | [diff] [blame] | 289 | /* auto-repeat bypasses state updates */ |
| 290 | if (value == 2) { |
| 291 | disposition = INPUT_PASS_TO_HANDLERS; |
| 292 | break; |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 293 | } |
| 294 | |
Henrik Rydberg | 0672120 | 2012-08-10 21:39:38 +0200 | [diff] [blame] | 295 | if (!!test_bit(code, dev->key) != !!value) { |
| 296 | |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 297 | __change_bit(code, dev->key); |
Henrik Rydberg | 0672120 | 2012-08-10 21:39:38 +0200 | [diff] [blame] | 298 | disposition = INPUT_PASS_TO_HANDLERS; |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 299 | } |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 300 | } |
| 301 | break; |
| 302 | |
| 303 | case EV_SW: |
| 304 | if (is_event_supported(code, dev->swbit, SW_MAX) && |
Henrik Rydberg | 0672120 | 2012-08-10 21:39:38 +0200 | [diff] [blame] | 305 | !!test_bit(code, dev->sw) != !!value) { |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 306 | |
| 307 | __change_bit(code, dev->sw); |
| 308 | disposition = INPUT_PASS_TO_HANDLERS; |
| 309 | } |
| 310 | break; |
| 311 | |
| 312 | case EV_ABS: |
Henrik Rydberg | 40d007e | 2010-07-15 23:10:10 -0700 | [diff] [blame] | 313 | if (is_event_supported(code, dev->absbit, ABS_MAX)) |
Dmitry Torokhov | 9ae4345 | 2011-02-02 23:04:27 -0800 | [diff] [blame] | 314 | disposition = input_handle_abs_event(dev, code, &value); |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 315 | |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 316 | break; |
| 317 | |
| 318 | case EV_REL: |
| 319 | if (is_event_supported(code, dev->relbit, REL_MAX) && value) |
| 320 | disposition = INPUT_PASS_TO_HANDLERS; |
| 321 | |
| 322 | break; |
| 323 | |
| 324 | case EV_MSC: |
| 325 | if (is_event_supported(code, dev->mscbit, MSC_MAX)) |
| 326 | disposition = INPUT_PASS_TO_ALL; |
| 327 | |
| 328 | break; |
| 329 | |
| 330 | case EV_LED: |
| 331 | if (is_event_supported(code, dev->ledbit, LED_MAX) && |
Henrik Rydberg | 0672120 | 2012-08-10 21:39:38 +0200 | [diff] [blame] | 332 | !!test_bit(code, dev->led) != !!value) { |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 333 | |
| 334 | __change_bit(code, dev->led); |
| 335 | disposition = INPUT_PASS_TO_ALL; |
| 336 | } |
| 337 | break; |
| 338 | |
| 339 | case EV_SND: |
| 340 | if (is_event_supported(code, dev->sndbit, SND_MAX)) { |
| 341 | |
| 342 | if (!!test_bit(code, dev->snd) != !!value) |
| 343 | __change_bit(code, dev->snd); |
| 344 | disposition = INPUT_PASS_TO_ALL; |
| 345 | } |
| 346 | break; |
| 347 | |
| 348 | case EV_REP: |
| 349 | if (code <= REP_MAX && value >= 0 && dev->rep[code] != value) { |
| 350 | dev->rep[code] = value; |
| 351 | disposition = INPUT_PASS_TO_ALL; |
| 352 | } |
| 353 | break; |
| 354 | |
| 355 | case EV_FF: |
| 356 | if (value >= 0) |
| 357 | disposition = INPUT_PASS_TO_ALL; |
| 358 | break; |
Richard Purdie | ed2fa4d | 2008-01-03 10:46:21 -0500 | [diff] [blame] | 359 | |
| 360 | case EV_PWR: |
| 361 | disposition = INPUT_PASS_TO_ALL; |
| 362 | break; |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 363 | } |
| 364 | |
Dmitry Torokhov | 50c5d36 | 2014-07-19 16:30:31 -0700 | [diff] [blame] | 365 | *pval = value; |
Henrik Rydberg | 4369c64 | 2012-09-15 15:23:35 +0200 | [diff] [blame] | 366 | return disposition; |
| 367 | } |
| 368 | |
| 369 | static void input_handle_event(struct input_dev *dev, |
| 370 | unsigned int type, unsigned int code, int value) |
| 371 | { |
Dmitry Torokhov | b55eb29 | 2016-01-23 11:29:13 -0800 | [diff] [blame] | 372 | int disposition = input_get_disposition(dev, type, code, &value); |
Henrik Rydberg | 4369c64 | 2012-09-15 15:23:35 +0200 | [diff] [blame] | 373 | |
Dmitry Torokhov | b55eb29 | 2016-01-23 11:29:13 -0800 | [diff] [blame] | 374 | if (disposition != INPUT_IGNORE_EVENT && type != EV_SYN) |
| 375 | add_input_randomness(type, code, value); |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 376 | |
| 377 | if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event) |
| 378 | dev->event(dev, type, code, value); |
| 379 | |
Henrik Rydberg | 4369c64 | 2012-09-15 15:23:35 +0200 | [diff] [blame] | 380 | if (!dev->vals) |
| 381 | return; |
| 382 | |
| 383 | if (disposition & INPUT_PASS_TO_HANDLERS) { |
| 384 | struct input_value *v; |
| 385 | |
| 386 | if (disposition & INPUT_SLOT) { |
| 387 | v = &dev->vals[dev->num_vals++]; |
| 388 | v->type = EV_ABS; |
| 389 | v->code = ABS_MT_SLOT; |
| 390 | v->value = dev->mt->slot; |
| 391 | } |
| 392 | |
| 393 | v = &dev->vals[dev->num_vals++]; |
| 394 | v->type = type; |
| 395 | v->code = code; |
| 396 | v->value = value; |
| 397 | } |
| 398 | |
| 399 | if (disposition & INPUT_FLUSH) { |
| 400 | if (dev->num_vals >= 2) |
| 401 | input_pass_values(dev, dev->vals, dev->num_vals); |
| 402 | dev->num_vals = 0; |
| 403 | } else if (dev->num_vals >= dev->max_vals - 2) { |
| 404 | dev->vals[dev->num_vals++] = input_value_sync; |
| 405 | input_pass_values(dev, dev->vals, dev->num_vals); |
| 406 | dev->num_vals = 0; |
| 407 | } |
| 408 | |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 409 | } |
| 410 | |
Dmitry Torokhov | 0e739d2 | 2006-07-06 00:22:43 -0400 | [diff] [blame] | 411 | /** |
| 412 | * input_event() - report new input event |
Dmitry Torokhov | 1447190 | 2006-11-02 23:26:55 -0500 | [diff] [blame] | 413 | * @dev: device that generated the event |
Dmitry Torokhov | 0e739d2 | 2006-07-06 00:22:43 -0400 | [diff] [blame] | 414 | * @type: type of the event |
| 415 | * @code: event code |
| 416 | * @value: value of the event |
| 417 | * |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 418 | * This function should be used by drivers implementing various input |
Dmitry Torokhov | df2d463 | 2009-12-11 21:57:31 -0800 | [diff] [blame] | 419 | * devices to report input events. See also input_inject_event(). |
| 420 | * |
| 421 | * NOTE: input_event() may be safely used right after input device was |
| 422 | * allocated with input_allocate_device(), even before it is registered |
| 423 | * with input_register_device(), but the event will not reach any of the |
| 424 | * input handlers. Such early invocation of input_event() may be used |
| 425 | * to 'seed' initial state of a switch or initial position of absolute |
| 426 | * axis, etc. |
Dmitry Torokhov | 0e739d2 | 2006-07-06 00:22:43 -0400 | [diff] [blame] | 427 | */ |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 428 | void input_event(struct input_dev *dev, |
| 429 | unsigned int type, unsigned int code, int value) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 430 | { |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 431 | unsigned long flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 432 | |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 433 | if (is_event_supported(type, dev->evbit, EV_MAX)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 434 | |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 435 | spin_lock_irqsave(&dev->event_lock, flags); |
Dmitry Torokhov | 9ae4345 | 2011-02-02 23:04:27 -0800 | [diff] [blame] | 436 | input_handle_event(dev, type, code, value); |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 437 | spin_unlock_irqrestore(&dev->event_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 438 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 439 | } |
Dmitry Torokhov | ca56fe0 | 2006-06-26 01:49:21 -0400 | [diff] [blame] | 440 | EXPORT_SYMBOL(input_event); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 441 | |
Dmitry Torokhov | 0e739d2 | 2006-07-06 00:22:43 -0400 | [diff] [blame] | 442 | /** |
| 443 | * input_inject_event() - send input event from input handler |
| 444 | * @handle: input handle to send event through |
| 445 | * @type: type of the event |
| 446 | * @code: event code |
| 447 | * @value: value of the event |
| 448 | * |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 449 | * Similar to input_event() but will ignore event if device is |
| 450 | * "grabbed" and handle injecting event is not the one that owns |
| 451 | * the device. |
Dmitry Torokhov | 0e739d2 | 2006-07-06 00:22:43 -0400 | [diff] [blame] | 452 | */ |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 453 | void input_inject_event(struct input_handle *handle, |
| 454 | unsigned int type, unsigned int code, int value) |
Dmitry Torokhov | 0e739d2 | 2006-07-06 00:22:43 -0400 | [diff] [blame] | 455 | { |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 456 | struct input_dev *dev = handle->dev; |
| 457 | struct input_handle *grab; |
| 458 | unsigned long flags; |
| 459 | |
| 460 | if (is_event_supported(type, dev->evbit, EV_MAX)) { |
| 461 | spin_lock_irqsave(&dev->event_lock, flags); |
| 462 | |
Dmitry Torokhov | 82ba56c | 2007-10-13 15:46:55 -0400 | [diff] [blame] | 463 | rcu_read_lock(); |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 464 | grab = rcu_dereference(dev->grab); |
| 465 | if (!grab || grab == handle) |
Dmitry Torokhov | 9ae4345 | 2011-02-02 23:04:27 -0800 | [diff] [blame] | 466 | input_handle_event(dev, type, code, value); |
Dmitry Torokhov | 82ba56c | 2007-10-13 15:46:55 -0400 | [diff] [blame] | 467 | rcu_read_unlock(); |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 468 | |
| 469 | spin_unlock_irqrestore(&dev->event_lock, flags); |
| 470 | } |
Dmitry Torokhov | 0e739d2 | 2006-07-06 00:22:43 -0400 | [diff] [blame] | 471 | } |
| 472 | EXPORT_SYMBOL(input_inject_event); |
| 473 | |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 474 | /** |
Daniel Mack | d31b286 | 2010-08-02 20:18:21 -0700 | [diff] [blame] | 475 | * input_alloc_absinfo - allocates array of input_absinfo structs |
| 476 | * @dev: the input device emitting absolute events |
| 477 | * |
| 478 | * If the absinfo struct the caller asked for is already allocated, this |
| 479 | * functions will not do anything. |
| 480 | */ |
| 481 | void input_alloc_absinfo(struct input_dev *dev) |
| 482 | { |
| 483 | if (!dev->absinfo) |
| 484 | dev->absinfo = kcalloc(ABS_CNT, sizeof(struct input_absinfo), |
| 485 | GFP_KERNEL); |
| 486 | |
| 487 | WARN(!dev->absinfo, "%s(): kcalloc() failed?\n", __func__); |
| 488 | } |
| 489 | EXPORT_SYMBOL(input_alloc_absinfo); |
| 490 | |
| 491 | void input_set_abs_params(struct input_dev *dev, unsigned int axis, |
| 492 | int min, int max, int fuzz, int flat) |
| 493 | { |
| 494 | struct input_absinfo *absinfo; |
| 495 | |
| 496 | input_alloc_absinfo(dev); |
| 497 | if (!dev->absinfo) |
| 498 | return; |
| 499 | |
| 500 | absinfo = &dev->absinfo[axis]; |
| 501 | absinfo->minimum = min; |
| 502 | absinfo->maximum = max; |
| 503 | absinfo->fuzz = fuzz; |
| 504 | absinfo->flat = flat; |
| 505 | |
Dmitry Torokhov | 2c9a9cf | 2014-10-08 09:28:32 -0700 | [diff] [blame] | 506 | __set_bit(EV_ABS, dev->evbit); |
| 507 | __set_bit(axis, dev->absbit); |
Daniel Mack | d31b286 | 2010-08-02 20:18:21 -0700 | [diff] [blame] | 508 | } |
| 509 | EXPORT_SYMBOL(input_set_abs_params); |
| 510 | |
| 511 | |
| 512 | /** |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 513 | * input_grab_device - grabs device for exclusive use |
| 514 | * @handle: input handle that wants to own the device |
| 515 | * |
| 516 | * When a device is grabbed by an input handle all events generated by |
| 517 | * the device are delivered only to this handle. Also events injected |
| 518 | * by other input handles are ignored while device is grabbed. |
| 519 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 520 | int input_grab_device(struct input_handle *handle) |
| 521 | { |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 522 | struct input_dev *dev = handle->dev; |
| 523 | int retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 524 | |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 525 | retval = mutex_lock_interruptible(&dev->mutex); |
| 526 | if (retval) |
| 527 | return retval; |
| 528 | |
| 529 | if (dev->grab) { |
| 530 | retval = -EBUSY; |
| 531 | goto out; |
| 532 | } |
| 533 | |
| 534 | rcu_assign_pointer(dev->grab, handle); |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 535 | |
| 536 | out: |
| 537 | mutex_unlock(&dev->mutex); |
| 538 | return retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 539 | } |
Dmitry Torokhov | ca56fe0 | 2006-06-26 01:49:21 -0400 | [diff] [blame] | 540 | EXPORT_SYMBOL(input_grab_device); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 541 | |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 542 | static void __input_release_device(struct input_handle *handle) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 543 | { |
Andrew Morton | a2b2ed2 | 2006-07-15 01:17:38 -0400 | [diff] [blame] | 544 | struct input_dev *dev = handle->dev; |
Dmitry Torokhov | adc4633 | 2012-10-24 23:53:01 -0700 | [diff] [blame] | 545 | struct input_handle *grabber; |
Dmitry Torokhov | c7e8dc6 | 2006-07-06 00:21:03 -0400 | [diff] [blame] | 546 | |
Dmitry Torokhov | adc4633 | 2012-10-24 23:53:01 -0700 | [diff] [blame] | 547 | grabber = rcu_dereference_protected(dev->grab, |
| 548 | lockdep_is_held(&dev->mutex)); |
| 549 | if (grabber == handle) { |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 550 | rcu_assign_pointer(dev->grab, NULL); |
| 551 | /* Make sure input_pass_event() notices that grab is gone */ |
Dmitry Torokhov | 82ba56c | 2007-10-13 15:46:55 -0400 | [diff] [blame] | 552 | synchronize_rcu(); |
Andrew Morton | a2b2ed2 | 2006-07-15 01:17:38 -0400 | [diff] [blame] | 553 | |
| 554 | list_for_each_entry(handle, &dev->h_list, d_node) |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 555 | if (handle->open && handle->handler->start) |
Dmitry Torokhov | c7e8dc6 | 2006-07-06 00:21:03 -0400 | [diff] [blame] | 556 | handle->handler->start(handle); |
| 557 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 558 | } |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 559 | |
| 560 | /** |
| 561 | * input_release_device - release previously grabbed device |
| 562 | * @handle: input handle that owns the device |
| 563 | * |
| 564 | * Releases previously grabbed device so that other input handles can |
| 565 | * start receiving input events. Upon release all handlers attached |
| 566 | * to the device have their start() method called so they have a change |
| 567 | * to synchronize device state with the rest of the system. |
| 568 | */ |
| 569 | void input_release_device(struct input_handle *handle) |
| 570 | { |
| 571 | struct input_dev *dev = handle->dev; |
| 572 | |
| 573 | mutex_lock(&dev->mutex); |
| 574 | __input_release_device(handle); |
| 575 | mutex_unlock(&dev->mutex); |
| 576 | } |
Dmitry Torokhov | ca56fe0 | 2006-06-26 01:49:21 -0400 | [diff] [blame] | 577 | EXPORT_SYMBOL(input_release_device); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 578 | |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 579 | /** |
| 580 | * input_open_device - open input device |
| 581 | * @handle: handle through which device is being accessed |
| 582 | * |
| 583 | * This function should be called by input handlers when they |
| 584 | * want to start receive events from given input device. |
| 585 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 586 | int input_open_device(struct input_handle *handle) |
| 587 | { |
Dmitry Torokhov | 0fbf87c | 2005-05-29 02:29:25 -0500 | [diff] [blame] | 588 | struct input_dev *dev = handle->dev; |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 589 | int retval; |
Dmitry Torokhov | 0fbf87c | 2005-05-29 02:29:25 -0500 | [diff] [blame] | 590 | |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 591 | retval = mutex_lock_interruptible(&dev->mutex); |
| 592 | if (retval) |
| 593 | return retval; |
| 594 | |
| 595 | if (dev->going_away) { |
| 596 | retval = -ENODEV; |
| 597 | goto out; |
| 598 | } |
Dmitry Torokhov | 0fbf87c | 2005-05-29 02:29:25 -0500 | [diff] [blame] | 599 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 600 | handle->open++; |
Dmitry Torokhov | 0fbf87c | 2005-05-29 02:29:25 -0500 | [diff] [blame] | 601 | |
| 602 | if (!dev->users++ && dev->open) |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 603 | retval = dev->open(dev); |
Dmitry Torokhov | 0fbf87c | 2005-05-29 02:29:25 -0500 | [diff] [blame] | 604 | |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 605 | if (retval) { |
| 606 | dev->users--; |
| 607 | if (!--handle->open) { |
| 608 | /* |
| 609 | * Make sure we are not delivering any more events |
| 610 | * through this handle |
| 611 | */ |
Dmitry Torokhov | 82ba56c | 2007-10-13 15:46:55 -0400 | [diff] [blame] | 612 | synchronize_rcu(); |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 613 | } |
| 614 | } |
Dmitry Torokhov | 0fbf87c | 2005-05-29 02:29:25 -0500 | [diff] [blame] | 615 | |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 616 | out: |
Jes Sorensen | e676c23 | 2006-02-19 00:21:46 -0500 | [diff] [blame] | 617 | mutex_unlock(&dev->mutex); |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 618 | return retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 619 | } |
Dmitry Torokhov | ca56fe0 | 2006-06-26 01:49:21 -0400 | [diff] [blame] | 620 | EXPORT_SYMBOL(input_open_device); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 621 | |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 622 | int input_flush_device(struct input_handle *handle, struct file *file) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 623 | { |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 624 | struct input_dev *dev = handle->dev; |
| 625 | int retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 626 | |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 627 | retval = mutex_lock_interruptible(&dev->mutex); |
| 628 | if (retval) |
| 629 | return retval; |
| 630 | |
| 631 | if (dev->flush) |
| 632 | retval = dev->flush(dev, file); |
| 633 | |
| 634 | mutex_unlock(&dev->mutex); |
| 635 | return retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 636 | } |
Dmitry Torokhov | ca56fe0 | 2006-06-26 01:49:21 -0400 | [diff] [blame] | 637 | EXPORT_SYMBOL(input_flush_device); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 638 | |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 639 | /** |
| 640 | * input_close_device - close input device |
| 641 | * @handle: handle through which device is being accessed |
| 642 | * |
| 643 | * This function should be called by input handlers when they |
| 644 | * want to stop receive events from given input device. |
| 645 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 646 | void input_close_device(struct input_handle *handle) |
| 647 | { |
Dmitry Torokhov | 0fbf87c | 2005-05-29 02:29:25 -0500 | [diff] [blame] | 648 | struct input_dev *dev = handle->dev; |
| 649 | |
Jes Sorensen | e676c23 | 2006-02-19 00:21:46 -0500 | [diff] [blame] | 650 | mutex_lock(&dev->mutex); |
Dmitry Torokhov | 0fbf87c | 2005-05-29 02:29:25 -0500 | [diff] [blame] | 651 | |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 652 | __input_release_device(handle); |
| 653 | |
Dmitry Torokhov | 0fbf87c | 2005-05-29 02:29:25 -0500 | [diff] [blame] | 654 | if (!--dev->users && dev->close) |
| 655 | dev->close(dev); |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 656 | |
| 657 | if (!--handle->open) { |
| 658 | /* |
Dmitry Torokhov | 82ba56c | 2007-10-13 15:46:55 -0400 | [diff] [blame] | 659 | * synchronize_rcu() makes sure that input_pass_event() |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 660 | * completed and that no more input events are delivered |
| 661 | * through this handle |
| 662 | */ |
Dmitry Torokhov | 82ba56c | 2007-10-13 15:46:55 -0400 | [diff] [blame] | 663 | synchronize_rcu(); |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 664 | } |
Dmitry Torokhov | 0fbf87c | 2005-05-29 02:29:25 -0500 | [diff] [blame] | 665 | |
Jes Sorensen | e676c23 | 2006-02-19 00:21:46 -0500 | [diff] [blame] | 666 | mutex_unlock(&dev->mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 667 | } |
Dmitry Torokhov | ca56fe0 | 2006-06-26 01:49:21 -0400 | [diff] [blame] | 668 | EXPORT_SYMBOL(input_close_device); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 669 | |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 670 | /* |
Oliver Neukum | 866d7d7 | 2010-07-01 09:01:50 -0700 | [diff] [blame] | 671 | * Simulate keyup events for all keys that are marked as pressed. |
| 672 | * The function must be called with dev->event_lock held. |
| 673 | */ |
| 674 | static void input_dev_release_keys(struct input_dev *dev) |
| 675 | { |
Dmitry Torokhov | 00159f1 | 2015-08-06 19:15:30 -0700 | [diff] [blame] | 676 | bool need_sync = false; |
Oliver Neukum | 866d7d7 | 2010-07-01 09:01:50 -0700 | [diff] [blame] | 677 | int code; |
| 678 | |
| 679 | if (is_event_supported(EV_KEY, dev->evbit, EV_MAX)) { |
Dmitry Torokhov | 00159f1 | 2015-08-06 19:15:30 -0700 | [diff] [blame] | 680 | for_each_set_bit(code, dev->key, KEY_CNT) { |
Anshul Garg | 3e2b03d | 2015-06-25 13:33:12 -0700 | [diff] [blame] | 681 | input_pass_event(dev, EV_KEY, code, 0); |
Dmitry Torokhov | 00159f1 | 2015-08-06 19:15:30 -0700 | [diff] [blame] | 682 | need_sync = true; |
| 683 | } |
| 684 | |
| 685 | if (need_sync) |
| 686 | input_pass_event(dev, EV_SYN, SYN_REPORT, 1); |
| 687 | |
Anshul Garg | 3e2b03d | 2015-06-25 13:33:12 -0700 | [diff] [blame] | 688 | memset(dev->key, 0, sizeof(dev->key)); |
Oliver Neukum | 866d7d7 | 2010-07-01 09:01:50 -0700 | [diff] [blame] | 689 | } |
| 690 | } |
| 691 | |
| 692 | /* |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 693 | * Prepare device for unregistering |
| 694 | */ |
| 695 | static void input_disconnect_device(struct input_dev *dev) |
| 696 | { |
| 697 | struct input_handle *handle; |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 698 | |
| 699 | /* |
| 700 | * Mark device as going away. Note that we take dev->mutex here |
| 701 | * not to protect access to dev->going_away but rather to ensure |
| 702 | * that there are no threads in the middle of input_open_device() |
| 703 | */ |
| 704 | mutex_lock(&dev->mutex); |
Dmitry Torokhov | ffd0db9 | 2009-09-16 01:06:43 -0700 | [diff] [blame] | 705 | dev->going_away = true; |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 706 | mutex_unlock(&dev->mutex); |
| 707 | |
| 708 | spin_lock_irq(&dev->event_lock); |
| 709 | |
| 710 | /* |
| 711 | * Simulate keyup events for all pressed keys so that handlers |
| 712 | * are not left with "stuck" keys. The driver may continue |
| 713 | * generate events even after we done here but they will not |
| 714 | * reach any handlers. |
| 715 | */ |
Oliver Neukum | 866d7d7 | 2010-07-01 09:01:50 -0700 | [diff] [blame] | 716 | input_dev_release_keys(dev); |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 717 | |
| 718 | list_for_each_entry(handle, &dev->h_list, d_node) |
| 719 | handle->open = 0; |
| 720 | |
| 721 | spin_unlock_irq(&dev->event_lock); |
| 722 | } |
| 723 | |
Mauro Carvalho Chehab | 8613e4c | 2010-09-09 21:54:22 -0700 | [diff] [blame] | 724 | /** |
| 725 | * input_scancode_to_scalar() - converts scancode in &struct input_keymap_entry |
| 726 | * @ke: keymap entry containing scancode to be converted. |
| 727 | * @scancode: pointer to the location where converted scancode should |
| 728 | * be stored. |
| 729 | * |
| 730 | * This function is used to convert scancode stored in &struct keymap_entry |
| 731 | * into scalar form understood by legacy keymap handling methods. These |
| 732 | * methods expect scancodes to be represented as 'unsigned int'. |
| 733 | */ |
| 734 | int input_scancode_to_scalar(const struct input_keymap_entry *ke, |
| 735 | unsigned int *scancode) |
| 736 | { |
| 737 | switch (ke->len) { |
| 738 | case 1: |
| 739 | *scancode = *((u8 *)ke->scancode); |
| 740 | break; |
| 741 | |
| 742 | case 2: |
| 743 | *scancode = *((u16 *)ke->scancode); |
| 744 | break; |
| 745 | |
| 746 | case 4: |
| 747 | *scancode = *((u32 *)ke->scancode); |
| 748 | break; |
| 749 | |
| 750 | default: |
| 751 | return -EINVAL; |
| 752 | } |
| 753 | |
| 754 | return 0; |
| 755 | } |
| 756 | EXPORT_SYMBOL(input_scancode_to_scalar); |
| 757 | |
| 758 | /* |
| 759 | * Those routines handle the default case where no [gs]etkeycode() is |
| 760 | * defined. In this case, an array indexed by the scancode is used. |
| 761 | */ |
| 762 | |
| 763 | static unsigned int input_fetch_keycode(struct input_dev *dev, |
| 764 | unsigned int index) |
Marvin Raaijmakers | c8e4c77 | 2007-03-14 22:50:42 -0400 | [diff] [blame] | 765 | { |
| 766 | switch (dev->keycodesize) { |
Mauro Carvalho Chehab | 8613e4c | 2010-09-09 21:54:22 -0700 | [diff] [blame] | 767 | case 1: |
| 768 | return ((u8 *)dev->keycode)[index]; |
Marvin Raaijmakers | c8e4c77 | 2007-03-14 22:50:42 -0400 | [diff] [blame] | 769 | |
Mauro Carvalho Chehab | 8613e4c | 2010-09-09 21:54:22 -0700 | [diff] [blame] | 770 | case 2: |
| 771 | return ((u16 *)dev->keycode)[index]; |
Marvin Raaijmakers | c8e4c77 | 2007-03-14 22:50:42 -0400 | [diff] [blame] | 772 | |
Mauro Carvalho Chehab | 8613e4c | 2010-09-09 21:54:22 -0700 | [diff] [blame] | 773 | default: |
| 774 | return ((u32 *)dev->keycode)[index]; |
Marvin Raaijmakers | c8e4c77 | 2007-03-14 22:50:42 -0400 | [diff] [blame] | 775 | } |
| 776 | } |
| 777 | |
| 778 | static int input_default_getkeycode(struct input_dev *dev, |
Mauro Carvalho Chehab | 8613e4c | 2010-09-09 21:54:22 -0700 | [diff] [blame] | 779 | struct input_keymap_entry *ke) |
Marvin Raaijmakers | c8e4c77 | 2007-03-14 22:50:42 -0400 | [diff] [blame] | 780 | { |
Mauro Carvalho Chehab | 8613e4c | 2010-09-09 21:54:22 -0700 | [diff] [blame] | 781 | unsigned int index; |
| 782 | int error; |
| 783 | |
Marvin Raaijmakers | c8e4c77 | 2007-03-14 22:50:42 -0400 | [diff] [blame] | 784 | if (!dev->keycodesize) |
| 785 | return -EINVAL; |
| 786 | |
Mauro Carvalho Chehab | 8613e4c | 2010-09-09 21:54:22 -0700 | [diff] [blame] | 787 | if (ke->flags & INPUT_KEYMAP_BY_INDEX) |
| 788 | index = ke->index; |
| 789 | else { |
| 790 | error = input_scancode_to_scalar(ke, &index); |
| 791 | if (error) |
| 792 | return error; |
| 793 | } |
| 794 | |
| 795 | if (index >= dev->keycodemax) |
Marvin Raaijmakers | c8e4c77 | 2007-03-14 22:50:42 -0400 | [diff] [blame] | 796 | return -EINVAL; |
| 797 | |
Mauro Carvalho Chehab | 8613e4c | 2010-09-09 21:54:22 -0700 | [diff] [blame] | 798 | ke->keycode = input_fetch_keycode(dev, index); |
| 799 | ke->index = index; |
| 800 | ke->len = sizeof(index); |
| 801 | memcpy(ke->scancode, &index, sizeof(index)); |
Marvin Raaijmakers | c8e4c77 | 2007-03-14 22:50:42 -0400 | [diff] [blame] | 802 | |
| 803 | return 0; |
| 804 | } |
| 805 | |
| 806 | static int input_default_setkeycode(struct input_dev *dev, |
Mauro Carvalho Chehab | 8613e4c | 2010-09-09 21:54:22 -0700 | [diff] [blame] | 807 | const struct input_keymap_entry *ke, |
| 808 | unsigned int *old_keycode) |
Marvin Raaijmakers | c8e4c77 | 2007-03-14 22:50:42 -0400 | [diff] [blame] | 809 | { |
Mauro Carvalho Chehab | 8613e4c | 2010-09-09 21:54:22 -0700 | [diff] [blame] | 810 | unsigned int index; |
| 811 | int error; |
Marvin Raaijmakers | c8e4c77 | 2007-03-14 22:50:42 -0400 | [diff] [blame] | 812 | int i; |
| 813 | |
Marvin Raaijmakers | c8e4c77 | 2007-03-14 22:50:42 -0400 | [diff] [blame] | 814 | if (!dev->keycodesize) |
| 815 | return -EINVAL; |
| 816 | |
Mauro Carvalho Chehab | 8613e4c | 2010-09-09 21:54:22 -0700 | [diff] [blame] | 817 | if (ke->flags & INPUT_KEYMAP_BY_INDEX) { |
| 818 | index = ke->index; |
| 819 | } else { |
| 820 | error = input_scancode_to_scalar(ke, &index); |
| 821 | if (error) |
| 822 | return error; |
| 823 | } |
| 824 | |
| 825 | if (index >= dev->keycodemax) |
| 826 | return -EINVAL; |
| 827 | |
Mattia Dongili | de391d1 | 2010-11-18 09:06:43 -0800 | [diff] [blame] | 828 | if (dev->keycodesize < sizeof(ke->keycode) && |
Mauro Carvalho Chehab | 8613e4c | 2010-09-09 21:54:22 -0700 | [diff] [blame] | 829 | (ke->keycode >> (dev->keycodesize * 8))) |
Marvin Raaijmakers | c8e4c77 | 2007-03-14 22:50:42 -0400 | [diff] [blame] | 830 | return -EINVAL; |
| 831 | |
| 832 | switch (dev->keycodesize) { |
| 833 | case 1: { |
| 834 | u8 *k = (u8 *)dev->keycode; |
Mauro Carvalho Chehab | 8613e4c | 2010-09-09 21:54:22 -0700 | [diff] [blame] | 835 | *old_keycode = k[index]; |
| 836 | k[index] = ke->keycode; |
Marvin Raaijmakers | c8e4c77 | 2007-03-14 22:50:42 -0400 | [diff] [blame] | 837 | break; |
| 838 | } |
| 839 | case 2: { |
| 840 | u16 *k = (u16 *)dev->keycode; |
Mauro Carvalho Chehab | 8613e4c | 2010-09-09 21:54:22 -0700 | [diff] [blame] | 841 | *old_keycode = k[index]; |
| 842 | k[index] = ke->keycode; |
Marvin Raaijmakers | c8e4c77 | 2007-03-14 22:50:42 -0400 | [diff] [blame] | 843 | break; |
| 844 | } |
| 845 | default: { |
| 846 | u32 *k = (u32 *)dev->keycode; |
Mauro Carvalho Chehab | 8613e4c | 2010-09-09 21:54:22 -0700 | [diff] [blame] | 847 | *old_keycode = k[index]; |
| 848 | k[index] = ke->keycode; |
Marvin Raaijmakers | c8e4c77 | 2007-03-14 22:50:42 -0400 | [diff] [blame] | 849 | break; |
| 850 | } |
| 851 | } |
| 852 | |
Dmitry Torokhov | 5f27f97 | 2019-12-13 14:56:16 -0800 | [diff] [blame] | 853 | if (*old_keycode <= KEY_MAX) { |
| 854 | __clear_bit(*old_keycode, dev->keybit); |
| 855 | for (i = 0; i < dev->keycodemax; i++) { |
| 856 | if (input_fetch_keycode(dev, i) == *old_keycode) { |
| 857 | __set_bit(*old_keycode, dev->keybit); |
| 858 | /* Setting the bit twice is useless, so break */ |
| 859 | break; |
| 860 | } |
Marvin Raaijmakers | c8e4c77 | 2007-03-14 22:50:42 -0400 | [diff] [blame] | 861 | } |
| 862 | } |
| 863 | |
Dmitry Torokhov | 5f27f97 | 2019-12-13 14:56:16 -0800 | [diff] [blame] | 864 | __set_bit(ke->keycode, dev->keybit); |
Marvin Raaijmakers | c8e4c77 | 2007-03-14 22:50:42 -0400 | [diff] [blame] | 865 | return 0; |
| 866 | } |
| 867 | |
Dmitry Torokhov | f4f37c8 | 2007-11-04 00:41:12 -0400 | [diff] [blame] | 868 | /** |
| 869 | * input_get_keycode - retrieve keycode currently mapped to a given scancode |
| 870 | * @dev: input device which keymap is being queried |
Mauro Carvalho Chehab | 8613e4c | 2010-09-09 21:54:22 -0700 | [diff] [blame] | 871 | * @ke: keymap entry |
Dmitry Torokhov | f4f37c8 | 2007-11-04 00:41:12 -0400 | [diff] [blame] | 872 | * |
| 873 | * This function should be called by anyone interested in retrieving current |
Mauro Carvalho Chehab | 8613e4c | 2010-09-09 21:54:22 -0700 | [diff] [blame] | 874 | * keymap. Presently evdev handlers use it. |
Dmitry Torokhov | f4f37c8 | 2007-11-04 00:41:12 -0400 | [diff] [blame] | 875 | */ |
Mauro Carvalho Chehab | 8613e4c | 2010-09-09 21:54:22 -0700 | [diff] [blame] | 876 | int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke) |
Dmitry Torokhov | f4f37c8 | 2007-11-04 00:41:12 -0400 | [diff] [blame] | 877 | { |
Dmitry Torokhov | 2e2e3b9 | 2010-03-21 22:56:15 -0700 | [diff] [blame] | 878 | unsigned long flags; |
| 879 | int retval; |
| 880 | |
| 881 | spin_lock_irqsave(&dev->event_lock, flags); |
Dmitry Torokhov | aebd636 | 2011-01-31 21:06:39 -0800 | [diff] [blame] | 882 | retval = dev->getkeycode(dev, ke); |
Mauro Carvalho Chehab | 8613e4c | 2010-09-09 21:54:22 -0700 | [diff] [blame] | 883 | spin_unlock_irqrestore(&dev->event_lock, flags); |
Dmitry Torokhov | aebd636 | 2011-01-31 21:06:39 -0800 | [diff] [blame] | 884 | |
Dmitry Torokhov | 2e2e3b9 | 2010-03-21 22:56:15 -0700 | [diff] [blame] | 885 | return retval; |
Dmitry Torokhov | f4f37c8 | 2007-11-04 00:41:12 -0400 | [diff] [blame] | 886 | } |
| 887 | EXPORT_SYMBOL(input_get_keycode); |
| 888 | |
| 889 | /** |
Mauro Carvalho Chehab | 8613e4c | 2010-09-09 21:54:22 -0700 | [diff] [blame] | 890 | * input_set_keycode - attribute a keycode to a given scancode |
Dmitry Torokhov | f4f37c8 | 2007-11-04 00:41:12 -0400 | [diff] [blame] | 891 | * @dev: input device which keymap is being updated |
Mauro Carvalho Chehab | 8613e4c | 2010-09-09 21:54:22 -0700 | [diff] [blame] | 892 | * @ke: new keymap entry |
Dmitry Torokhov | f4f37c8 | 2007-11-04 00:41:12 -0400 | [diff] [blame] | 893 | * |
| 894 | * This function should be called by anyone needing to update current |
| 895 | * keymap. Presently keyboard and evdev handlers use it. |
| 896 | */ |
Dmitry Torokhov | 58b9399 | 2010-03-08 22:37:10 -0800 | [diff] [blame] | 897 | int input_set_keycode(struct input_dev *dev, |
Mauro Carvalho Chehab | 8613e4c | 2010-09-09 21:54:22 -0700 | [diff] [blame] | 898 | const struct input_keymap_entry *ke) |
Dmitry Torokhov | f4f37c8 | 2007-11-04 00:41:12 -0400 | [diff] [blame] | 899 | { |
| 900 | unsigned long flags; |
Dmitry Torokhov | fd6cf3d | 2010-07-14 00:25:21 -0700 | [diff] [blame] | 901 | unsigned int old_keycode; |
Dmitry Torokhov | f4f37c8 | 2007-11-04 00:41:12 -0400 | [diff] [blame] | 902 | int retval; |
| 903 | |
Mauro Carvalho Chehab | 8613e4c | 2010-09-09 21:54:22 -0700 | [diff] [blame] | 904 | if (ke->keycode > KEY_MAX) |
Dmitry Torokhov | f4f37c8 | 2007-11-04 00:41:12 -0400 | [diff] [blame] | 905 | return -EINVAL; |
| 906 | |
| 907 | spin_lock_irqsave(&dev->event_lock, flags); |
| 908 | |
Dmitry Torokhov | aebd636 | 2011-01-31 21:06:39 -0800 | [diff] [blame] | 909 | retval = dev->setkeycode(dev, ke, &old_keycode); |
Dmitry Torokhov | f4f37c8 | 2007-11-04 00:41:12 -0400 | [diff] [blame] | 910 | if (retval) |
| 911 | goto out; |
| 912 | |
Dmitry Torokhov | 4f93df4 | 2010-01-05 17:56:00 -0800 | [diff] [blame] | 913 | /* Make sure KEY_RESERVED did not get enabled. */ |
| 914 | __clear_bit(KEY_RESERVED, dev->keybit); |
| 915 | |
Dmitry Torokhov | f4f37c8 | 2007-11-04 00:41:12 -0400 | [diff] [blame] | 916 | /* |
| 917 | * Simulate keyup event if keycode is not present |
| 918 | * in the keymap anymore |
| 919 | */ |
Dmitry Torokhov | 5f27f97 | 2019-12-13 14:56:16 -0800 | [diff] [blame] | 920 | if (old_keycode > KEY_MAX) { |
| 921 | dev_warn(dev->dev.parent ?: &dev->dev, |
| 922 | "%s: got too big old keycode %#x\n", |
| 923 | __func__, old_keycode); |
| 924 | } else if (test_bit(EV_KEY, dev->evbit) && |
| 925 | !is_event_supported(old_keycode, dev->keybit, KEY_MAX) && |
| 926 | __test_and_clear_bit(old_keycode, dev->key)) { |
Henrik Rydberg | 4369c64 | 2012-09-15 15:23:35 +0200 | [diff] [blame] | 927 | struct input_value vals[] = { |
| 928 | { EV_KEY, old_keycode, 0 }, |
| 929 | input_value_sync |
| 930 | }; |
Dmitry Torokhov | f4f37c8 | 2007-11-04 00:41:12 -0400 | [diff] [blame] | 931 | |
Henrik Rydberg | 4369c64 | 2012-09-15 15:23:35 +0200 | [diff] [blame] | 932 | input_pass_values(dev, vals, ARRAY_SIZE(vals)); |
Dmitry Torokhov | f4f37c8 | 2007-11-04 00:41:12 -0400 | [diff] [blame] | 933 | } |
| 934 | |
| 935 | out: |
| 936 | spin_unlock_irqrestore(&dev->event_lock, flags); |
| 937 | |
| 938 | return retval; |
| 939 | } |
| 940 | EXPORT_SYMBOL(input_set_keycode); |
Marvin Raaijmakers | c8e4c77 | 2007-03-14 22:50:42 -0400 | [diff] [blame] | 941 | |
Dmitry Torokhov | 0b7024ac | 2010-02-02 21:08:26 -0800 | [diff] [blame] | 942 | static const struct input_device_id *input_match_device(struct input_handler *handler, |
Dmitry Torokhov | 66e6611 | 2006-09-14 01:31:59 -0400 | [diff] [blame] | 943 | struct input_dev *dev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 944 | { |
Dmitry Torokhov | 0b7024ac | 2010-02-02 21:08:26 -0800 | [diff] [blame] | 945 | const struct input_device_id *id; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 946 | |
Dmitry Torokhov | 0b7024ac | 2010-02-02 21:08:26 -0800 | [diff] [blame] | 947 | for (id = handler->id_table; id->flags || id->driver_info; id++) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 948 | |
| 949 | if (id->flags & INPUT_DEVICE_ID_MATCH_BUS) |
Dmitry Torokhov | ddc5d34 | 2006-04-26 00:14:19 -0400 | [diff] [blame] | 950 | if (id->bustype != dev->id.bustype) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 951 | continue; |
| 952 | |
| 953 | if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR) |
Dmitry Torokhov | ddc5d34 | 2006-04-26 00:14:19 -0400 | [diff] [blame] | 954 | if (id->vendor != dev->id.vendor) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 955 | continue; |
| 956 | |
| 957 | if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT) |
Dmitry Torokhov | ddc5d34 | 2006-04-26 00:14:19 -0400 | [diff] [blame] | 958 | if (id->product != dev->id.product) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 959 | continue; |
| 960 | |
| 961 | if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION) |
Dmitry Torokhov | ddc5d34 | 2006-04-26 00:14:19 -0400 | [diff] [blame] | 962 | if (id->version != dev->id.version) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 963 | continue; |
| 964 | |
Dmitry Torokhov | c0bb1f9 | 2012-07-31 22:08:55 -0700 | [diff] [blame] | 965 | if (!bitmap_subset(id->evbit, dev->evbit, EV_MAX)) |
| 966 | continue; |
| 967 | |
| 968 | if (!bitmap_subset(id->keybit, dev->keybit, KEY_MAX)) |
| 969 | continue; |
| 970 | |
| 971 | if (!bitmap_subset(id->relbit, dev->relbit, REL_MAX)) |
| 972 | continue; |
| 973 | |
| 974 | if (!bitmap_subset(id->absbit, dev->absbit, ABS_MAX)) |
| 975 | continue; |
| 976 | |
| 977 | if (!bitmap_subset(id->mscbit, dev->mscbit, MSC_MAX)) |
| 978 | continue; |
| 979 | |
| 980 | if (!bitmap_subset(id->ledbit, dev->ledbit, LED_MAX)) |
| 981 | continue; |
| 982 | |
| 983 | if (!bitmap_subset(id->sndbit, dev->sndbit, SND_MAX)) |
| 984 | continue; |
| 985 | |
| 986 | if (!bitmap_subset(id->ffbit, dev->ffbit, FF_MAX)) |
| 987 | continue; |
| 988 | |
| 989 | if (!bitmap_subset(id->swbit, dev->swbit, SW_MAX)) |
| 990 | continue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 991 | |
Dmitry Torokhov | 0b7024ac | 2010-02-02 21:08:26 -0800 | [diff] [blame] | 992 | if (!handler->match || handler->match(handler, dev)) |
| 993 | return id; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 994 | } |
| 995 | |
| 996 | return NULL; |
| 997 | } |
| 998 | |
Dmitry Torokhov | 5b2a082 | 2007-04-12 01:29:46 -0400 | [diff] [blame] | 999 | static int input_attach_handler(struct input_dev *dev, struct input_handler *handler) |
| 1000 | { |
| 1001 | const struct input_device_id *id; |
| 1002 | int error; |
| 1003 | |
Dmitry Torokhov | 0b7024ac | 2010-02-02 21:08:26 -0800 | [diff] [blame] | 1004 | id = input_match_device(handler, dev); |
Dmitry Torokhov | 5b2a082 | 2007-04-12 01:29:46 -0400 | [diff] [blame] | 1005 | if (!id) |
| 1006 | return -ENODEV; |
| 1007 | |
| 1008 | error = handler->connect(handler, dev, id); |
| 1009 | if (error && error != -ENODEV) |
Joe Perches | da0c490 | 2010-11-29 23:33:07 -0800 | [diff] [blame] | 1010 | pr_err("failed to attach handler %s to device %s, error: %d\n", |
| 1011 | handler->name, kobject_name(&dev->dev.kobj), error); |
Dmitry Torokhov | 5b2a082 | 2007-04-12 01:29:46 -0400 | [diff] [blame] | 1012 | |
| 1013 | return error; |
| 1014 | } |
| 1015 | |
Dmitry Torokhov | 15e184a | 2010-01-11 00:05:43 -0800 | [diff] [blame] | 1016 | #ifdef CONFIG_COMPAT |
| 1017 | |
| 1018 | static int input_bits_to_string(char *buf, int buf_size, |
| 1019 | unsigned long bits, bool skip_empty) |
| 1020 | { |
| 1021 | int len = 0; |
| 1022 | |
Andrew Morton | b8b4ead | 2016-03-25 14:20:47 -0700 | [diff] [blame] | 1023 | if (in_compat_syscall()) { |
Dmitry Torokhov | 15e184a | 2010-01-11 00:05:43 -0800 | [diff] [blame] | 1024 | u32 dword = bits >> 32; |
| 1025 | if (dword || !skip_empty) |
| 1026 | len += snprintf(buf, buf_size, "%x ", dword); |
| 1027 | |
| 1028 | dword = bits & 0xffffffffUL; |
| 1029 | if (dword || !skip_empty || len) |
| 1030 | len += snprintf(buf + len, max(buf_size - len, 0), |
| 1031 | "%x", dword); |
| 1032 | } else { |
| 1033 | if (bits || !skip_empty) |
| 1034 | len += snprintf(buf, buf_size, "%lx", bits); |
| 1035 | } |
| 1036 | |
| 1037 | return len; |
| 1038 | } |
| 1039 | |
| 1040 | #else /* !CONFIG_COMPAT */ |
| 1041 | |
| 1042 | static int input_bits_to_string(char *buf, int buf_size, |
| 1043 | unsigned long bits, bool skip_empty) |
| 1044 | { |
| 1045 | return bits || !skip_empty ? |
| 1046 | snprintf(buf, buf_size, "%lx", bits) : 0; |
| 1047 | } |
| 1048 | |
| 1049 | #endif |
Dmitry Torokhov | 5b2a082 | 2007-04-12 01:29:46 -0400 | [diff] [blame] | 1050 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1051 | #ifdef CONFIG_PROC_FS |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 1052 | |
| 1053 | static struct proc_dir_entry *proc_bus_input_dir; |
| 1054 | static DECLARE_WAIT_QUEUE_HEAD(input_devices_poll_wait); |
| 1055 | static int input_devices_state; |
| 1056 | |
| 1057 | static inline void input_wakeup_procfs_readers(void) |
| 1058 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1059 | input_devices_state++; |
| 1060 | wake_up(&input_devices_poll_wait); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1061 | } |
| 1062 | |
Dmitry Torokhov | 969b21c | 2006-04-02 00:09:34 -0500 | [diff] [blame] | 1063 | static unsigned int input_proc_devices_poll(struct file *file, poll_table *wait) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1064 | { |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 1065 | poll_wait(file, &input_devices_poll_wait, wait); |
Dmitry Torokhov | fa88661 | 2009-03-04 00:52:20 -0800 | [diff] [blame] | 1066 | if (file->f_version != input_devices_state) { |
| 1067 | file->f_version = input_devices_state; |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 1068 | return POLLIN | POLLRDNORM; |
Dmitry Torokhov | fa88661 | 2009-03-04 00:52:20 -0800 | [diff] [blame] | 1069 | } |
Dmitry Torokhov | 1e0afb2 | 2006-06-26 01:48:47 -0400 | [diff] [blame] | 1070 | |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 1071 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1072 | } |
| 1073 | |
Dmitry Torokhov | 1572ca2 | 2009-10-13 23:37:30 -0700 | [diff] [blame] | 1074 | union input_seq_state { |
| 1075 | struct { |
| 1076 | unsigned short pos; |
| 1077 | bool mutex_acquired; |
| 1078 | }; |
| 1079 | void *p; |
| 1080 | }; |
| 1081 | |
Dmitry Torokhov | 969b21c | 2006-04-02 00:09:34 -0500 | [diff] [blame] | 1082 | static void *input_devices_seq_start(struct seq_file *seq, loff_t *pos) |
| 1083 | { |
Dmitry Torokhov | 1572ca2 | 2009-10-13 23:37:30 -0700 | [diff] [blame] | 1084 | union input_seq_state *state = (union input_seq_state *)&seq->private; |
| 1085 | int error; |
| 1086 | |
| 1087 | /* We need to fit into seq->private pointer */ |
| 1088 | BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private)); |
| 1089 | |
| 1090 | error = mutex_lock_interruptible(&input_mutex); |
| 1091 | if (error) { |
| 1092 | state->mutex_acquired = false; |
| 1093 | return ERR_PTR(error); |
| 1094 | } |
| 1095 | |
| 1096 | state->mutex_acquired = true; |
Dmitry Torokhov | 969b21c | 2006-04-02 00:09:34 -0500 | [diff] [blame] | 1097 | |
Pavel Emelianov | ad5d972 | 2007-07-18 00:38:32 -0400 | [diff] [blame] | 1098 | return seq_list_start(&input_dev_list, *pos); |
Dmitry Torokhov | 969b21c | 2006-04-02 00:09:34 -0500 | [diff] [blame] | 1099 | } |
| 1100 | |
| 1101 | static void *input_devices_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
| 1102 | { |
Pavel Emelianov | ad5d972 | 2007-07-18 00:38:32 -0400 | [diff] [blame] | 1103 | return seq_list_next(v, &input_dev_list, pos); |
Dmitry Torokhov | 969b21c | 2006-04-02 00:09:34 -0500 | [diff] [blame] | 1104 | } |
| 1105 | |
Dmitry Torokhov | 1572ca2 | 2009-10-13 23:37:30 -0700 | [diff] [blame] | 1106 | static void input_seq_stop(struct seq_file *seq, void *v) |
Dmitry Torokhov | 969b21c | 2006-04-02 00:09:34 -0500 | [diff] [blame] | 1107 | { |
Dmitry Torokhov | 1572ca2 | 2009-10-13 23:37:30 -0700 | [diff] [blame] | 1108 | union input_seq_state *state = (union input_seq_state *)&seq->private; |
| 1109 | |
| 1110 | if (state->mutex_acquired) |
| 1111 | mutex_unlock(&input_mutex); |
Dmitry Torokhov | 969b21c | 2006-04-02 00:09:34 -0500 | [diff] [blame] | 1112 | } |
| 1113 | |
| 1114 | static void input_seq_print_bitmap(struct seq_file *seq, const char *name, |
| 1115 | unsigned long *bitmap, int max) |
| 1116 | { |
| 1117 | int i; |
Dmitry Torokhov | 15e184a | 2010-01-11 00:05:43 -0800 | [diff] [blame] | 1118 | bool skip_empty = true; |
| 1119 | char buf[18]; |
Dmitry Torokhov | 969b21c | 2006-04-02 00:09:34 -0500 | [diff] [blame] | 1120 | |
| 1121 | seq_printf(seq, "B: %s=", name); |
Dmitry Torokhov | 15e184a | 2010-01-11 00:05:43 -0800 | [diff] [blame] | 1122 | |
| 1123 | for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) { |
| 1124 | if (input_bits_to_string(buf, sizeof(buf), |
| 1125 | bitmap[i], skip_empty)) { |
| 1126 | skip_empty = false; |
| 1127 | seq_printf(seq, "%s%s", buf, i > 0 ? " " : ""); |
| 1128 | } |
| 1129 | } |
| 1130 | |
| 1131 | /* |
| 1132 | * If no output was produced print a single 0. |
| 1133 | */ |
| 1134 | if (skip_empty) |
| 1135 | seq_puts(seq, "0"); |
| 1136 | |
Dmitry Torokhov | 969b21c | 2006-04-02 00:09:34 -0500 | [diff] [blame] | 1137 | seq_putc(seq, '\n'); |
| 1138 | } |
| 1139 | |
| 1140 | static int input_devices_seq_show(struct seq_file *seq, void *v) |
| 1141 | { |
| 1142 | struct input_dev *dev = container_of(v, struct input_dev, node); |
Dmitry Torokhov | 9657d75 | 2007-06-14 23:32:24 -0400 | [diff] [blame] | 1143 | const char *path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1144 | struct input_handle *handle; |
| 1145 | |
Dmitry Torokhov | 969b21c | 2006-04-02 00:09:34 -0500 | [diff] [blame] | 1146 | seq_printf(seq, "I: Bus=%04x Vendor=%04x Product=%04x Version=%04x\n", |
| 1147 | dev->id.bustype, dev->id.vendor, dev->id.product, dev->id.version); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1148 | |
Dmitry Torokhov | 969b21c | 2006-04-02 00:09:34 -0500 | [diff] [blame] | 1149 | seq_printf(seq, "N: Name=\"%s\"\n", dev->name ? dev->name : ""); |
| 1150 | seq_printf(seq, "P: Phys=%s\n", dev->phys ? dev->phys : ""); |
| 1151 | seq_printf(seq, "S: Sysfs=%s\n", path ? path : ""); |
Dmitry Torokhov | 15e03ae | 2007-03-07 23:20:17 -0500 | [diff] [blame] | 1152 | seq_printf(seq, "U: Uniq=%s\n", dev->uniq ? dev->uniq : ""); |
Dmitry Torokhov | 969b21c | 2006-04-02 00:09:34 -0500 | [diff] [blame] | 1153 | seq_printf(seq, "H: Handlers="); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1154 | |
Dmitry Torokhov | 969b21c | 2006-04-02 00:09:34 -0500 | [diff] [blame] | 1155 | list_for_each_entry(handle, &dev->h_list, d_node) |
| 1156 | seq_printf(seq, "%s ", handle->name); |
| 1157 | seq_putc(seq, '\n'); |
Dmitry Torokhov | 051b2fe | 2005-09-15 02:01:54 -0500 | [diff] [blame] | 1158 | |
Henrik Rydberg | 85b7720 | 2010-12-18 20:51:13 +0100 | [diff] [blame] | 1159 | input_seq_print_bitmap(seq, "PROP", dev->propbit, INPUT_PROP_MAX); |
| 1160 | |
Dmitry Torokhov | 969b21c | 2006-04-02 00:09:34 -0500 | [diff] [blame] | 1161 | input_seq_print_bitmap(seq, "EV", dev->evbit, EV_MAX); |
| 1162 | if (test_bit(EV_KEY, dev->evbit)) |
| 1163 | input_seq_print_bitmap(seq, "KEY", dev->keybit, KEY_MAX); |
| 1164 | if (test_bit(EV_REL, dev->evbit)) |
| 1165 | input_seq_print_bitmap(seq, "REL", dev->relbit, REL_MAX); |
| 1166 | if (test_bit(EV_ABS, dev->evbit)) |
| 1167 | input_seq_print_bitmap(seq, "ABS", dev->absbit, ABS_MAX); |
| 1168 | if (test_bit(EV_MSC, dev->evbit)) |
| 1169 | input_seq_print_bitmap(seq, "MSC", dev->mscbit, MSC_MAX); |
| 1170 | if (test_bit(EV_LED, dev->evbit)) |
| 1171 | input_seq_print_bitmap(seq, "LED", dev->ledbit, LED_MAX); |
| 1172 | if (test_bit(EV_SND, dev->evbit)) |
| 1173 | input_seq_print_bitmap(seq, "SND", dev->sndbit, SND_MAX); |
| 1174 | if (test_bit(EV_FF, dev->evbit)) |
| 1175 | input_seq_print_bitmap(seq, "FF", dev->ffbit, FF_MAX); |
| 1176 | if (test_bit(EV_SW, dev->evbit)) |
| 1177 | input_seq_print_bitmap(seq, "SW", dev->swbit, SW_MAX); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1178 | |
Dmitry Torokhov | 969b21c | 2006-04-02 00:09:34 -0500 | [diff] [blame] | 1179 | seq_putc(seq, '\n'); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1180 | |
Dmitry Torokhov | 969b21c | 2006-04-02 00:09:34 -0500 | [diff] [blame] | 1181 | kfree(path); |
| 1182 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1183 | } |
| 1184 | |
Jan Engelhardt | cec69c3 | 2008-01-31 00:43:32 -0500 | [diff] [blame] | 1185 | static const struct seq_operations input_devices_seq_ops = { |
Dmitry Torokhov | 969b21c | 2006-04-02 00:09:34 -0500 | [diff] [blame] | 1186 | .start = input_devices_seq_start, |
| 1187 | .next = input_devices_seq_next, |
Dmitry Torokhov | 1572ca2 | 2009-10-13 23:37:30 -0700 | [diff] [blame] | 1188 | .stop = input_seq_stop, |
Dmitry Torokhov | 969b21c | 2006-04-02 00:09:34 -0500 | [diff] [blame] | 1189 | .show = input_devices_seq_show, |
| 1190 | }; |
| 1191 | |
| 1192 | static int input_proc_devices_open(struct inode *inode, struct file *file) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1193 | { |
Dmitry Torokhov | 969b21c | 2006-04-02 00:09:34 -0500 | [diff] [blame] | 1194 | return seq_open(file, &input_devices_seq_ops); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1195 | } |
| 1196 | |
Arjan van de Ven | 2b8693c | 2007-02-12 00:55:32 -0800 | [diff] [blame] | 1197 | static const struct file_operations input_devices_fileops = { |
Dmitry Torokhov | 969b21c | 2006-04-02 00:09:34 -0500 | [diff] [blame] | 1198 | .owner = THIS_MODULE, |
| 1199 | .open = input_proc_devices_open, |
| 1200 | .poll = input_proc_devices_poll, |
| 1201 | .read = seq_read, |
| 1202 | .llseek = seq_lseek, |
| 1203 | .release = seq_release, |
| 1204 | }; |
| 1205 | |
| 1206 | static void *input_handlers_seq_start(struct seq_file *seq, loff_t *pos) |
| 1207 | { |
Dmitry Torokhov | 1572ca2 | 2009-10-13 23:37:30 -0700 | [diff] [blame] | 1208 | union input_seq_state *state = (union input_seq_state *)&seq->private; |
| 1209 | int error; |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 1210 | |
Dmitry Torokhov | 1572ca2 | 2009-10-13 23:37:30 -0700 | [diff] [blame] | 1211 | /* We need to fit into seq->private pointer */ |
| 1212 | BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private)); |
| 1213 | |
| 1214 | error = mutex_lock_interruptible(&input_mutex); |
| 1215 | if (error) { |
| 1216 | state->mutex_acquired = false; |
| 1217 | return ERR_PTR(error); |
| 1218 | } |
| 1219 | |
| 1220 | state->mutex_acquired = true; |
| 1221 | state->pos = *pos; |
| 1222 | |
Pavel Emelianov | ad5d972 | 2007-07-18 00:38:32 -0400 | [diff] [blame] | 1223 | return seq_list_start(&input_handler_list, *pos); |
Dmitry Torokhov | 969b21c | 2006-04-02 00:09:34 -0500 | [diff] [blame] | 1224 | } |
| 1225 | |
| 1226 | static void *input_handlers_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
| 1227 | { |
Dmitry Torokhov | 1572ca2 | 2009-10-13 23:37:30 -0700 | [diff] [blame] | 1228 | union input_seq_state *state = (union input_seq_state *)&seq->private; |
Dmitry Torokhov | 969b21c | 2006-04-02 00:09:34 -0500 | [diff] [blame] | 1229 | |
Dmitry Torokhov | 1572ca2 | 2009-10-13 23:37:30 -0700 | [diff] [blame] | 1230 | state->pos = *pos + 1; |
| 1231 | return seq_list_next(v, &input_handler_list, pos); |
Dmitry Torokhov | 969b21c | 2006-04-02 00:09:34 -0500 | [diff] [blame] | 1232 | } |
| 1233 | |
| 1234 | static int input_handlers_seq_show(struct seq_file *seq, void *v) |
| 1235 | { |
| 1236 | struct input_handler *handler = container_of(v, struct input_handler, node); |
Dmitry Torokhov | 1572ca2 | 2009-10-13 23:37:30 -0700 | [diff] [blame] | 1237 | union input_seq_state *state = (union input_seq_state *)&seq->private; |
Dmitry Torokhov | 969b21c | 2006-04-02 00:09:34 -0500 | [diff] [blame] | 1238 | |
Dmitry Torokhov | 1572ca2 | 2009-10-13 23:37:30 -0700 | [diff] [blame] | 1239 | seq_printf(seq, "N: Number=%u Name=%s", state->pos, handler->name); |
Dmitry Torokhov | ef7995f | 2010-01-29 23:59:12 -0800 | [diff] [blame] | 1240 | if (handler->filter) |
| 1241 | seq_puts(seq, " (filter)"); |
Dmitry Torokhov | 7f8d4ca | 2012-10-08 09:07:24 -0700 | [diff] [blame] | 1242 | if (handler->legacy_minors) |
Dmitry Torokhov | 969b21c | 2006-04-02 00:09:34 -0500 | [diff] [blame] | 1243 | seq_printf(seq, " Minor=%d", handler->minor); |
| 1244 | seq_putc(seq, '\n'); |
| 1245 | |
| 1246 | return 0; |
| 1247 | } |
Dmitry Torokhov | 1572ca2 | 2009-10-13 23:37:30 -0700 | [diff] [blame] | 1248 | |
Jan Engelhardt | cec69c3 | 2008-01-31 00:43:32 -0500 | [diff] [blame] | 1249 | static const struct seq_operations input_handlers_seq_ops = { |
Dmitry Torokhov | 969b21c | 2006-04-02 00:09:34 -0500 | [diff] [blame] | 1250 | .start = input_handlers_seq_start, |
| 1251 | .next = input_handlers_seq_next, |
Dmitry Torokhov | 1572ca2 | 2009-10-13 23:37:30 -0700 | [diff] [blame] | 1252 | .stop = input_seq_stop, |
Dmitry Torokhov | 969b21c | 2006-04-02 00:09:34 -0500 | [diff] [blame] | 1253 | .show = input_handlers_seq_show, |
| 1254 | }; |
| 1255 | |
| 1256 | static int input_proc_handlers_open(struct inode *inode, struct file *file) |
| 1257 | { |
| 1258 | return seq_open(file, &input_handlers_seq_ops); |
| 1259 | } |
| 1260 | |
Arjan van de Ven | 2b8693c | 2007-02-12 00:55:32 -0800 | [diff] [blame] | 1261 | static const struct file_operations input_handlers_fileops = { |
Dmitry Torokhov | 969b21c | 2006-04-02 00:09:34 -0500 | [diff] [blame] | 1262 | .owner = THIS_MODULE, |
| 1263 | .open = input_proc_handlers_open, |
| 1264 | .read = seq_read, |
| 1265 | .llseek = seq_lseek, |
| 1266 | .release = seq_release, |
| 1267 | }; |
Luke Kosewski | e334016fc | 2005-06-01 02:39:28 -0500 | [diff] [blame] | 1268 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1269 | static int __init input_proc_init(void) |
| 1270 | { |
| 1271 | struct proc_dir_entry *entry; |
| 1272 | |
Alexey Dobriyan | 9c37066 | 2008-04-29 01:01:41 -0700 | [diff] [blame] | 1273 | proc_bus_input_dir = proc_mkdir("bus/input", NULL); |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 1274 | if (!proc_bus_input_dir) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1275 | return -ENOMEM; |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 1276 | |
Denis V. Lunev | c7705f3 | 2008-04-29 01:02:35 -0700 | [diff] [blame] | 1277 | entry = proc_create("devices", 0, proc_bus_input_dir, |
| 1278 | &input_devices_fileops); |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 1279 | if (!entry) |
| 1280 | goto fail1; |
| 1281 | |
Denis V. Lunev | c7705f3 | 2008-04-29 01:02:35 -0700 | [diff] [blame] | 1282 | entry = proc_create("handlers", 0, proc_bus_input_dir, |
| 1283 | &input_handlers_fileops); |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 1284 | if (!entry) |
| 1285 | goto fail2; |
| 1286 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1287 | return 0; |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 1288 | |
| 1289 | fail2: remove_proc_entry("devices", proc_bus_input_dir); |
Alexey Dobriyan | 9c37066 | 2008-04-29 01:01:41 -0700 | [diff] [blame] | 1290 | fail1: remove_proc_entry("bus/input", NULL); |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 1291 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1292 | } |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 1293 | |
Andrew Morton | beffbdc | 2005-07-01 23:54:30 -0500 | [diff] [blame] | 1294 | static void input_proc_exit(void) |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 1295 | { |
| 1296 | remove_proc_entry("devices", proc_bus_input_dir); |
| 1297 | remove_proc_entry("handlers", proc_bus_input_dir); |
Alexey Dobriyan | 9c37066 | 2008-04-29 01:01:41 -0700 | [diff] [blame] | 1298 | remove_proc_entry("bus/input", NULL); |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 1299 | } |
| 1300 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1301 | #else /* !CONFIG_PROC_FS */ |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 1302 | static inline void input_wakeup_procfs_readers(void) { } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1303 | static inline int input_proc_init(void) { return 0; } |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 1304 | static inline void input_proc_exit(void) { } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1305 | #endif |
| 1306 | |
Dmitry Torokhov | 9657d75 | 2007-06-14 23:32:24 -0400 | [diff] [blame] | 1307 | #define INPUT_DEV_STRING_ATTR_SHOW(name) \ |
| 1308 | static ssize_t input_dev_show_##name(struct device *dev, \ |
| 1309 | struct device_attribute *attr, \ |
| 1310 | char *buf) \ |
| 1311 | { \ |
| 1312 | struct input_dev *input_dev = to_input_dev(dev); \ |
| 1313 | \ |
| 1314 | return scnprintf(buf, PAGE_SIZE, "%s\n", \ |
| 1315 | input_dev->name ? input_dev->name : ""); \ |
| 1316 | } \ |
| 1317 | static DEVICE_ATTR(name, S_IRUGO, input_dev_show_##name, NULL) |
Dmitry Torokhov | 5c1e9a6 | 2005-09-15 02:01:55 -0500 | [diff] [blame] | 1318 | |
| 1319 | INPUT_DEV_STRING_ATTR_SHOW(name); |
| 1320 | INPUT_DEV_STRING_ATTR_SHOW(phys); |
| 1321 | INPUT_DEV_STRING_ATTR_SHOW(uniq); |
| 1322 | |
Dmitry Torokhov | ac648a6 | 2006-04-02 00:09:51 -0500 | [diff] [blame] | 1323 | static int input_print_modalias_bits(char *buf, int size, |
| 1324 | char name, unsigned long *bm, |
| 1325 | unsigned int min_bit, unsigned int max_bit) |
Rusty Russell | 1d8f430 | 2005-12-07 21:40:34 +0100 | [diff] [blame] | 1326 | { |
Dmitry Torokhov | ac648a6 | 2006-04-02 00:09:51 -0500 | [diff] [blame] | 1327 | int len = 0, i; |
Rusty Russell | 1d8f430 | 2005-12-07 21:40:34 +0100 | [diff] [blame] | 1328 | |
Dmitry Torokhov | ac648a6 | 2006-04-02 00:09:51 -0500 | [diff] [blame] | 1329 | len += snprintf(buf, max(size, 0), "%c", name); |
| 1330 | for (i = min_bit; i < max_bit; i++) |
Jiri Slaby | 7b19ada | 2007-10-18 23:40:32 -0700 | [diff] [blame] | 1331 | if (bm[BIT_WORD(i)] & BIT_MASK(i)) |
Dmitry Torokhov | ac648a6 | 2006-04-02 00:09:51 -0500 | [diff] [blame] | 1332 | len += snprintf(buf + len, max(size - len, 0), "%X,", i); |
Kay Sievers | bd37e5a | 2006-01-05 13:19:55 +0100 | [diff] [blame] | 1333 | return len; |
| 1334 | } |
| 1335 | |
Dmitry Torokhov | 2db6687 | 2006-04-02 00:09:26 -0500 | [diff] [blame] | 1336 | static int input_print_modalias(char *buf, int size, struct input_dev *id, |
| 1337 | int add_cr) |
Kay Sievers | bd37e5a | 2006-01-05 13:19:55 +0100 | [diff] [blame] | 1338 | { |
| 1339 | int len; |
| 1340 | |
Dmitry Torokhov | ac648a6 | 2006-04-02 00:09:51 -0500 | [diff] [blame] | 1341 | len = snprintf(buf, max(size, 0), |
| 1342 | "input:b%04Xv%04Xp%04Xe%04X-", |
| 1343 | id->id.bustype, id->id.vendor, |
| 1344 | id->id.product, id->id.version); |
Kay Sievers | bd37e5a | 2006-01-05 13:19:55 +0100 | [diff] [blame] | 1345 | |
Dmitry Torokhov | ac648a6 | 2006-04-02 00:09:51 -0500 | [diff] [blame] | 1346 | len += input_print_modalias_bits(buf + len, size - len, |
| 1347 | 'e', id->evbit, 0, EV_MAX); |
| 1348 | len += input_print_modalias_bits(buf + len, size - len, |
| 1349 | 'k', id->keybit, KEY_MIN_INTERESTING, KEY_MAX); |
| 1350 | len += input_print_modalias_bits(buf + len, size - len, |
| 1351 | 'r', id->relbit, 0, REL_MAX); |
| 1352 | len += input_print_modalias_bits(buf + len, size - len, |
| 1353 | 'a', id->absbit, 0, ABS_MAX); |
| 1354 | len += input_print_modalias_bits(buf + len, size - len, |
| 1355 | 'm', id->mscbit, 0, MSC_MAX); |
| 1356 | len += input_print_modalias_bits(buf + len, size - len, |
| 1357 | 'l', id->ledbit, 0, LED_MAX); |
| 1358 | len += input_print_modalias_bits(buf + len, size - len, |
| 1359 | 's', id->sndbit, 0, SND_MAX); |
| 1360 | len += input_print_modalias_bits(buf + len, size - len, |
| 1361 | 'f', id->ffbit, 0, FF_MAX); |
| 1362 | len += input_print_modalias_bits(buf + len, size - len, |
| 1363 | 'w', id->swbit, 0, SW_MAX); |
Dmitry Torokhov | 2db6687 | 2006-04-02 00:09:26 -0500 | [diff] [blame] | 1364 | |
| 1365 | if (add_cr) |
Dmitry Torokhov | ac648a6 | 2006-04-02 00:09:51 -0500 | [diff] [blame] | 1366 | len += snprintf(buf + len, max(size - len, 0), "\n"); |
Dmitry Torokhov | 2db6687 | 2006-04-02 00:09:26 -0500 | [diff] [blame] | 1367 | |
Rusty Russell | 1d8f430 | 2005-12-07 21:40:34 +0100 | [diff] [blame] | 1368 | return len; |
| 1369 | } |
| 1370 | |
Dmitry Torokhov | 9657d75 | 2007-06-14 23:32:24 -0400 | [diff] [blame] | 1371 | static ssize_t input_dev_show_modalias(struct device *dev, |
| 1372 | struct device_attribute *attr, |
| 1373 | char *buf) |
Rusty Russell | 1d8f430 | 2005-12-07 21:40:34 +0100 | [diff] [blame] | 1374 | { |
| 1375 | struct input_dev *id = to_input_dev(dev); |
Kay Sievers | bd37e5a | 2006-01-05 13:19:55 +0100 | [diff] [blame] | 1376 | ssize_t len; |
Rusty Russell | 1d8f430 | 2005-12-07 21:40:34 +0100 | [diff] [blame] | 1377 | |
Dmitry Torokhov | 2db6687 | 2006-04-02 00:09:26 -0500 | [diff] [blame] | 1378 | len = input_print_modalias(buf, PAGE_SIZE, id, 1); |
| 1379 | |
Richard Purdie | 8a3cf45 | 2006-06-26 01:48:21 -0400 | [diff] [blame] | 1380 | return min_t(int, len, PAGE_SIZE); |
Rusty Russell | 1d8f430 | 2005-12-07 21:40:34 +0100 | [diff] [blame] | 1381 | } |
Dmitry Torokhov | 9657d75 | 2007-06-14 23:32:24 -0400 | [diff] [blame] | 1382 | static DEVICE_ATTR(modalias, S_IRUGO, input_dev_show_modalias, NULL); |
Rusty Russell | 1d8f430 | 2005-12-07 21:40:34 +0100 | [diff] [blame] | 1383 | |
Henrik Rydberg | 85b7720 | 2010-12-18 20:51:13 +0100 | [diff] [blame] | 1384 | static int input_print_bitmap(char *buf, int buf_size, unsigned long *bitmap, |
| 1385 | int max, int add_cr); |
| 1386 | |
| 1387 | static ssize_t input_dev_show_properties(struct device *dev, |
| 1388 | struct device_attribute *attr, |
| 1389 | char *buf) |
| 1390 | { |
| 1391 | struct input_dev *input_dev = to_input_dev(dev); |
| 1392 | int len = input_print_bitmap(buf, PAGE_SIZE, input_dev->propbit, |
| 1393 | INPUT_PROP_MAX, true); |
| 1394 | return min_t(int, len, PAGE_SIZE); |
| 1395 | } |
| 1396 | static DEVICE_ATTR(properties, S_IRUGO, input_dev_show_properties, NULL); |
| 1397 | |
Greg Kroah-Hartman | 629b77a | 2005-10-27 22:25:43 -0700 | [diff] [blame] | 1398 | static struct attribute *input_dev_attrs[] = { |
Dmitry Torokhov | 9657d75 | 2007-06-14 23:32:24 -0400 | [diff] [blame] | 1399 | &dev_attr_name.attr, |
| 1400 | &dev_attr_phys.attr, |
| 1401 | &dev_attr_uniq.attr, |
| 1402 | &dev_attr_modalias.attr, |
Henrik Rydberg | 85b7720 | 2010-12-18 20:51:13 +0100 | [diff] [blame] | 1403 | &dev_attr_properties.attr, |
Greg Kroah-Hartman | 629b77a | 2005-10-27 22:25:43 -0700 | [diff] [blame] | 1404 | NULL |
| 1405 | }; |
| 1406 | |
Dmitry Torokhov | bd0ef23 | 2005-11-20 00:56:31 -0500 | [diff] [blame] | 1407 | static struct attribute_group input_dev_attr_group = { |
Greg Kroah-Hartman | 629b77a | 2005-10-27 22:25:43 -0700 | [diff] [blame] | 1408 | .attrs = input_dev_attrs, |
Dmitry Torokhov | 5c1e9a6 | 2005-09-15 02:01:55 -0500 | [diff] [blame] | 1409 | }; |
| 1410 | |
Dmitry Torokhov | 9657d75 | 2007-06-14 23:32:24 -0400 | [diff] [blame] | 1411 | #define INPUT_DEV_ID_ATTR(name) \ |
| 1412 | static ssize_t input_dev_show_id_##name(struct device *dev, \ |
| 1413 | struct device_attribute *attr, \ |
| 1414 | char *buf) \ |
| 1415 | { \ |
| 1416 | struct input_dev *input_dev = to_input_dev(dev); \ |
| 1417 | return scnprintf(buf, PAGE_SIZE, "%04x\n", input_dev->id.name); \ |
| 1418 | } \ |
| 1419 | static DEVICE_ATTR(name, S_IRUGO, input_dev_show_id_##name, NULL) |
Dmitry Torokhov | 5c1e9a6 | 2005-09-15 02:01:55 -0500 | [diff] [blame] | 1420 | |
| 1421 | INPUT_DEV_ID_ATTR(bustype); |
| 1422 | INPUT_DEV_ID_ATTR(vendor); |
| 1423 | INPUT_DEV_ID_ATTR(product); |
| 1424 | INPUT_DEV_ID_ATTR(version); |
| 1425 | |
| 1426 | static struct attribute *input_dev_id_attrs[] = { |
Dmitry Torokhov | 9657d75 | 2007-06-14 23:32:24 -0400 | [diff] [blame] | 1427 | &dev_attr_bustype.attr, |
| 1428 | &dev_attr_vendor.attr, |
| 1429 | &dev_attr_product.attr, |
| 1430 | &dev_attr_version.attr, |
Dmitry Torokhov | 5c1e9a6 | 2005-09-15 02:01:55 -0500 | [diff] [blame] | 1431 | NULL |
| 1432 | }; |
| 1433 | |
| 1434 | static struct attribute_group input_dev_id_attr_group = { |
| 1435 | .name = "id", |
| 1436 | .attrs = input_dev_id_attrs, |
| 1437 | }; |
| 1438 | |
Dmitry Torokhov | 969b21c | 2006-04-02 00:09:34 -0500 | [diff] [blame] | 1439 | static int input_print_bitmap(char *buf, int buf_size, unsigned long *bitmap, |
| 1440 | int max, int add_cr) |
| 1441 | { |
| 1442 | int i; |
| 1443 | int len = 0; |
Dmitry Torokhov | 15e184a | 2010-01-11 00:05:43 -0800 | [diff] [blame] | 1444 | bool skip_empty = true; |
Dmitry Torokhov | 969b21c | 2006-04-02 00:09:34 -0500 | [diff] [blame] | 1445 | |
Dmitry Torokhov | 15e184a | 2010-01-11 00:05:43 -0800 | [diff] [blame] | 1446 | for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) { |
| 1447 | len += input_bits_to_string(buf + len, max(buf_size - len, 0), |
| 1448 | bitmap[i], skip_empty); |
| 1449 | if (len) { |
| 1450 | skip_empty = false; |
| 1451 | if (i > 0) |
| 1452 | len += snprintf(buf + len, max(buf_size - len, 0), " "); |
| 1453 | } |
| 1454 | } |
Dmitry Torokhov | 969b21c | 2006-04-02 00:09:34 -0500 | [diff] [blame] | 1455 | |
Dmitry Torokhov | 15e184a | 2010-01-11 00:05:43 -0800 | [diff] [blame] | 1456 | /* |
| 1457 | * If no output was produced print a single 0. |
| 1458 | */ |
| 1459 | if (len == 0) |
| 1460 | len = snprintf(buf, buf_size, "%d", 0); |
Dmitry Torokhov | 969b21c | 2006-04-02 00:09:34 -0500 | [diff] [blame] | 1461 | |
| 1462 | if (add_cr) |
| 1463 | len += snprintf(buf + len, max(buf_size - len, 0), "\n"); |
| 1464 | |
| 1465 | return len; |
| 1466 | } |
| 1467 | |
Dmitry Torokhov | 9657d75 | 2007-06-14 23:32:24 -0400 | [diff] [blame] | 1468 | #define INPUT_DEV_CAP_ATTR(ev, bm) \ |
| 1469 | static ssize_t input_dev_show_cap_##bm(struct device *dev, \ |
| 1470 | struct device_attribute *attr, \ |
| 1471 | char *buf) \ |
| 1472 | { \ |
| 1473 | struct input_dev *input_dev = to_input_dev(dev); \ |
| 1474 | int len = input_print_bitmap(buf, PAGE_SIZE, \ |
Dmitry Torokhov | 15e184a | 2010-01-11 00:05:43 -0800 | [diff] [blame] | 1475 | input_dev->bm##bit, ev##_MAX, \ |
| 1476 | true); \ |
Dmitry Torokhov | 9657d75 | 2007-06-14 23:32:24 -0400 | [diff] [blame] | 1477 | return min_t(int, len, PAGE_SIZE); \ |
| 1478 | } \ |
| 1479 | static DEVICE_ATTR(bm, S_IRUGO, input_dev_show_cap_##bm, NULL) |
Dmitry Torokhov | 5c1e9a6 | 2005-09-15 02:01:55 -0500 | [diff] [blame] | 1480 | |
| 1481 | INPUT_DEV_CAP_ATTR(EV, ev); |
| 1482 | INPUT_DEV_CAP_ATTR(KEY, key); |
| 1483 | INPUT_DEV_CAP_ATTR(REL, rel); |
| 1484 | INPUT_DEV_CAP_ATTR(ABS, abs); |
| 1485 | INPUT_DEV_CAP_ATTR(MSC, msc); |
| 1486 | INPUT_DEV_CAP_ATTR(LED, led); |
| 1487 | INPUT_DEV_CAP_ATTR(SND, snd); |
| 1488 | INPUT_DEV_CAP_ATTR(FF, ff); |
| 1489 | INPUT_DEV_CAP_ATTR(SW, sw); |
| 1490 | |
| 1491 | static struct attribute *input_dev_caps_attrs[] = { |
Dmitry Torokhov | 9657d75 | 2007-06-14 23:32:24 -0400 | [diff] [blame] | 1492 | &dev_attr_ev.attr, |
| 1493 | &dev_attr_key.attr, |
| 1494 | &dev_attr_rel.attr, |
| 1495 | &dev_attr_abs.attr, |
| 1496 | &dev_attr_msc.attr, |
| 1497 | &dev_attr_led.attr, |
| 1498 | &dev_attr_snd.attr, |
| 1499 | &dev_attr_ff.attr, |
| 1500 | &dev_attr_sw.attr, |
Dmitry Torokhov | 5c1e9a6 | 2005-09-15 02:01:55 -0500 | [diff] [blame] | 1501 | NULL |
| 1502 | }; |
| 1503 | |
| 1504 | static struct attribute_group input_dev_caps_attr_group = { |
| 1505 | .name = "capabilities", |
| 1506 | .attrs = input_dev_caps_attrs, |
| 1507 | }; |
| 1508 | |
David Brownell | a4dbd67 | 2009-06-24 10:06:31 -0700 | [diff] [blame] | 1509 | static const struct attribute_group *input_dev_attr_groups[] = { |
Dmitry Torokhov | cb9def4 | 2007-03-07 23:20:26 -0500 | [diff] [blame] | 1510 | &input_dev_attr_group, |
| 1511 | &input_dev_id_attr_group, |
| 1512 | &input_dev_caps_attr_group, |
| 1513 | NULL |
| 1514 | }; |
| 1515 | |
Dmitry Torokhov | 9657d75 | 2007-06-14 23:32:24 -0400 | [diff] [blame] | 1516 | static void input_dev_release(struct device *device) |
Dmitry Torokhov | d19fbe8 | 2005-09-15 02:01:39 -0500 | [diff] [blame] | 1517 | { |
Dmitry Torokhov | 9657d75 | 2007-06-14 23:32:24 -0400 | [diff] [blame] | 1518 | struct input_dev *dev = to_input_dev(device); |
Dmitry Torokhov | d19fbe8 | 2005-09-15 02:01:39 -0500 | [diff] [blame] | 1519 | |
Anssi Hannula | 509ca1a | 2006-07-19 01:40:22 -0400 | [diff] [blame] | 1520 | input_ff_destroy(dev); |
Henrik Rydberg | 40d007e | 2010-07-15 23:10:10 -0700 | [diff] [blame] | 1521 | input_mt_destroy_slots(dev); |
Daniel Mack | d31b286 | 2010-08-02 20:18:21 -0700 | [diff] [blame] | 1522 | kfree(dev->absinfo); |
Henrik Rydberg | 4369c64 | 2012-09-15 15:23:35 +0200 | [diff] [blame] | 1523 | kfree(dev->vals); |
Dmitry Torokhov | d19fbe8 | 2005-09-15 02:01:39 -0500 | [diff] [blame] | 1524 | kfree(dev); |
Anssi Hannula | 509ca1a | 2006-07-19 01:40:22 -0400 | [diff] [blame] | 1525 | |
Dmitry Torokhov | d19fbe8 | 2005-09-15 02:01:39 -0500 | [diff] [blame] | 1526 | module_put(THIS_MODULE); |
| 1527 | } |
| 1528 | |
Dmitry Torokhov | a7fadbe | 2005-09-15 02:01:57 -0500 | [diff] [blame] | 1529 | /* |
Kay Sievers | 312c004 | 2005-11-16 09:00:00 +0100 | [diff] [blame] | 1530 | * Input uevent interface - loading event handlers based on |
Dmitry Torokhov | a7fadbe | 2005-09-15 02:01:57 -0500 | [diff] [blame] | 1531 | * device bitfields. |
| 1532 | */ |
Kay Sievers | 7eff2e7 | 2007-08-14 15:15:12 +0200 | [diff] [blame] | 1533 | static int input_add_uevent_bm_var(struct kobj_uevent_env *env, |
Dmitry Torokhov | ac648a6 | 2006-04-02 00:09:51 -0500 | [diff] [blame] | 1534 | const char *name, unsigned long *bitmap, int max) |
Dmitry Torokhov | a7fadbe | 2005-09-15 02:01:57 -0500 | [diff] [blame] | 1535 | { |
Kay Sievers | 7eff2e7 | 2007-08-14 15:15:12 +0200 | [diff] [blame] | 1536 | int len; |
| 1537 | |
Henrik Rydberg | fcd3027 | 2010-12-18 20:28:26 +0100 | [diff] [blame] | 1538 | if (add_uevent_var(env, "%s", name)) |
Dmitry Torokhov | a7fadbe | 2005-09-15 02:01:57 -0500 | [diff] [blame] | 1539 | return -ENOMEM; |
| 1540 | |
Kay Sievers | 7eff2e7 | 2007-08-14 15:15:12 +0200 | [diff] [blame] | 1541 | len = input_print_bitmap(&env->buf[env->buflen - 1], |
| 1542 | sizeof(env->buf) - env->buflen, |
Dmitry Torokhov | 15e184a | 2010-01-11 00:05:43 -0800 | [diff] [blame] | 1543 | bitmap, max, false); |
Kay Sievers | 7eff2e7 | 2007-08-14 15:15:12 +0200 | [diff] [blame] | 1544 | if (len >= (sizeof(env->buf) - env->buflen)) |
Dmitry Torokhov | a7fadbe | 2005-09-15 02:01:57 -0500 | [diff] [blame] | 1545 | return -ENOMEM; |
| 1546 | |
Kay Sievers | 7eff2e7 | 2007-08-14 15:15:12 +0200 | [diff] [blame] | 1547 | env->buflen += len; |
Dmitry Torokhov | a7fadbe | 2005-09-15 02:01:57 -0500 | [diff] [blame] | 1548 | return 0; |
| 1549 | } |
| 1550 | |
Kay Sievers | 7eff2e7 | 2007-08-14 15:15:12 +0200 | [diff] [blame] | 1551 | static int input_add_uevent_modalias_var(struct kobj_uevent_env *env, |
Dmitry Torokhov | ac648a6 | 2006-04-02 00:09:51 -0500 | [diff] [blame] | 1552 | struct input_dev *dev) |
| 1553 | { |
Kay Sievers | 7eff2e7 | 2007-08-14 15:15:12 +0200 | [diff] [blame] | 1554 | int len; |
| 1555 | |
| 1556 | if (add_uevent_var(env, "MODALIAS=")) |
Dmitry Torokhov | ac648a6 | 2006-04-02 00:09:51 -0500 | [diff] [blame] | 1557 | return -ENOMEM; |
| 1558 | |
Kay Sievers | 7eff2e7 | 2007-08-14 15:15:12 +0200 | [diff] [blame] | 1559 | len = input_print_modalias(&env->buf[env->buflen - 1], |
| 1560 | sizeof(env->buf) - env->buflen, |
| 1561 | dev, 0); |
| 1562 | if (len >= (sizeof(env->buf) - env->buflen)) |
Dmitry Torokhov | ac648a6 | 2006-04-02 00:09:51 -0500 | [diff] [blame] | 1563 | return -ENOMEM; |
| 1564 | |
Kay Sievers | 7eff2e7 | 2007-08-14 15:15:12 +0200 | [diff] [blame] | 1565 | env->buflen += len; |
Dmitry Torokhov | ac648a6 | 2006-04-02 00:09:51 -0500 | [diff] [blame] | 1566 | return 0; |
| 1567 | } |
| 1568 | |
Dmitry Torokhov | a7fadbe | 2005-09-15 02:01:57 -0500 | [diff] [blame] | 1569 | #define INPUT_ADD_HOTPLUG_VAR(fmt, val...) \ |
| 1570 | do { \ |
Kay Sievers | 7eff2e7 | 2007-08-14 15:15:12 +0200 | [diff] [blame] | 1571 | int err = add_uevent_var(env, fmt, val); \ |
Dmitry Torokhov | a7fadbe | 2005-09-15 02:01:57 -0500 | [diff] [blame] | 1572 | if (err) \ |
| 1573 | return err; \ |
| 1574 | } while (0) |
| 1575 | |
| 1576 | #define INPUT_ADD_HOTPLUG_BM_VAR(name, bm, max) \ |
| 1577 | do { \ |
Kay Sievers | 7eff2e7 | 2007-08-14 15:15:12 +0200 | [diff] [blame] | 1578 | int err = input_add_uevent_bm_var(env, name, bm, max); \ |
Dmitry Torokhov | a7fadbe | 2005-09-15 02:01:57 -0500 | [diff] [blame] | 1579 | if (err) \ |
| 1580 | return err; \ |
| 1581 | } while (0) |
| 1582 | |
Dmitry Torokhov | ac648a6 | 2006-04-02 00:09:51 -0500 | [diff] [blame] | 1583 | #define INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev) \ |
| 1584 | do { \ |
Kay Sievers | 7eff2e7 | 2007-08-14 15:15:12 +0200 | [diff] [blame] | 1585 | int err = input_add_uevent_modalias_var(env, dev); \ |
Dmitry Torokhov | ac648a6 | 2006-04-02 00:09:51 -0500 | [diff] [blame] | 1586 | if (err) \ |
| 1587 | return err; \ |
| 1588 | } while (0) |
| 1589 | |
Kay Sievers | 7eff2e7 | 2007-08-14 15:15:12 +0200 | [diff] [blame] | 1590 | static int input_dev_uevent(struct device *device, struct kobj_uevent_env *env) |
Dmitry Torokhov | a7fadbe | 2005-09-15 02:01:57 -0500 | [diff] [blame] | 1591 | { |
Dmitry Torokhov | 9657d75 | 2007-06-14 23:32:24 -0400 | [diff] [blame] | 1592 | struct input_dev *dev = to_input_dev(device); |
Dmitry Torokhov | a7fadbe | 2005-09-15 02:01:57 -0500 | [diff] [blame] | 1593 | |
| 1594 | INPUT_ADD_HOTPLUG_VAR("PRODUCT=%x/%x/%x/%x", |
| 1595 | dev->id.bustype, dev->id.vendor, |
| 1596 | dev->id.product, dev->id.version); |
| 1597 | if (dev->name) |
| 1598 | INPUT_ADD_HOTPLUG_VAR("NAME=\"%s\"", dev->name); |
| 1599 | if (dev->phys) |
| 1600 | INPUT_ADD_HOTPLUG_VAR("PHYS=\"%s\"", dev->phys); |
Dmitry Torokhov | 08de1f0 | 2005-11-08 21:34:29 -0800 | [diff] [blame] | 1601 | if (dev->uniq) |
Dmitry Torokhov | a7fadbe | 2005-09-15 02:01:57 -0500 | [diff] [blame] | 1602 | INPUT_ADD_HOTPLUG_VAR("UNIQ=\"%s\"", dev->uniq); |
| 1603 | |
Henrik Rydberg | 85b7720 | 2010-12-18 20:51:13 +0100 | [diff] [blame] | 1604 | INPUT_ADD_HOTPLUG_BM_VAR("PROP=", dev->propbit, INPUT_PROP_MAX); |
| 1605 | |
Dmitry Torokhov | a7fadbe | 2005-09-15 02:01:57 -0500 | [diff] [blame] | 1606 | INPUT_ADD_HOTPLUG_BM_VAR("EV=", dev->evbit, EV_MAX); |
| 1607 | if (test_bit(EV_KEY, dev->evbit)) |
| 1608 | INPUT_ADD_HOTPLUG_BM_VAR("KEY=", dev->keybit, KEY_MAX); |
| 1609 | if (test_bit(EV_REL, dev->evbit)) |
| 1610 | INPUT_ADD_HOTPLUG_BM_VAR("REL=", dev->relbit, REL_MAX); |
| 1611 | if (test_bit(EV_ABS, dev->evbit)) |
| 1612 | INPUT_ADD_HOTPLUG_BM_VAR("ABS=", dev->absbit, ABS_MAX); |
| 1613 | if (test_bit(EV_MSC, dev->evbit)) |
| 1614 | INPUT_ADD_HOTPLUG_BM_VAR("MSC=", dev->mscbit, MSC_MAX); |
| 1615 | if (test_bit(EV_LED, dev->evbit)) |
| 1616 | INPUT_ADD_HOTPLUG_BM_VAR("LED=", dev->ledbit, LED_MAX); |
| 1617 | if (test_bit(EV_SND, dev->evbit)) |
| 1618 | INPUT_ADD_HOTPLUG_BM_VAR("SND=", dev->sndbit, SND_MAX); |
| 1619 | if (test_bit(EV_FF, dev->evbit)) |
| 1620 | INPUT_ADD_HOTPLUG_BM_VAR("FF=", dev->ffbit, FF_MAX); |
| 1621 | if (test_bit(EV_SW, dev->evbit)) |
| 1622 | INPUT_ADD_HOTPLUG_BM_VAR("SW=", dev->swbit, SW_MAX); |
| 1623 | |
Dmitry Torokhov | ac648a6 | 2006-04-02 00:09:51 -0500 | [diff] [blame] | 1624 | INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev); |
Dmitry Torokhov | a7fadbe | 2005-09-15 02:01:57 -0500 | [diff] [blame] | 1625 | |
| 1626 | return 0; |
| 1627 | } |
| 1628 | |
Dmitry Torokhov | 3cc9635 | 2009-11-12 23:19:05 -0800 | [diff] [blame] | 1629 | #define INPUT_DO_TOGGLE(dev, type, bits, on) \ |
| 1630 | do { \ |
| 1631 | int i; \ |
| 1632 | bool active; \ |
| 1633 | \ |
| 1634 | if (!test_bit(EV_##type, dev->evbit)) \ |
| 1635 | break; \ |
| 1636 | \ |
Anshul Garg | 3e2b03d | 2015-06-25 13:33:12 -0700 | [diff] [blame] | 1637 | for_each_set_bit(i, dev->bits##bit, type##_CNT) { \ |
Dmitry Torokhov | 3cc9635 | 2009-11-12 23:19:05 -0800 | [diff] [blame] | 1638 | active = test_bit(i, dev->bits); \ |
| 1639 | if (!active && !on) \ |
| 1640 | continue; \ |
| 1641 | \ |
| 1642 | dev->event(dev, EV_##type, i, on ? active : 0); \ |
| 1643 | } \ |
Dmitry Torokhov | ffd0db9 | 2009-09-16 01:06:43 -0700 | [diff] [blame] | 1644 | } while (0) |
| 1645 | |
Dmitry Torokhov | b50b521 | 2010-11-03 11:02:31 -0700 | [diff] [blame] | 1646 | static void input_dev_toggle(struct input_dev *dev, bool activate) |
Dmitry Torokhov | ffd0db9 | 2009-09-16 01:06:43 -0700 | [diff] [blame] | 1647 | { |
| 1648 | if (!dev->event) |
| 1649 | return; |
| 1650 | |
| 1651 | INPUT_DO_TOGGLE(dev, LED, led, activate); |
| 1652 | INPUT_DO_TOGGLE(dev, SND, snd, activate); |
| 1653 | |
| 1654 | if (activate && test_bit(EV_REP, dev->evbit)) { |
| 1655 | dev->event(dev, EV_REP, REP_PERIOD, dev->rep[REP_PERIOD]); |
| 1656 | dev->event(dev, EV_REP, REP_DELAY, dev->rep[REP_DELAY]); |
| 1657 | } |
| 1658 | } |
| 1659 | |
Dmitry Torokhov | b50b521 | 2010-11-03 11:02:31 -0700 | [diff] [blame] | 1660 | /** |
| 1661 | * input_reset_device() - reset/restore the state of input device |
| 1662 | * @dev: input device whose state needs to be reset |
| 1663 | * |
| 1664 | * This function tries to reset the state of an opened input device and |
| 1665 | * bring internal state and state if the hardware in sync with each other. |
| 1666 | * We mark all keys as released, restore LED state, repeat rate, etc. |
| 1667 | */ |
| 1668 | void input_reset_device(struct input_dev *dev) |
| 1669 | { |
Aleksej Makarov | 768d9aa | 2013-11-23 10:20:36 -0800 | [diff] [blame] | 1670 | unsigned long flags; |
| 1671 | |
Dmitry Torokhov | b50b521 | 2010-11-03 11:02:31 -0700 | [diff] [blame] | 1672 | mutex_lock(&dev->mutex); |
Aleksej Makarov | 768d9aa | 2013-11-23 10:20:36 -0800 | [diff] [blame] | 1673 | spin_lock_irqsave(&dev->event_lock, flags); |
Dmitry Torokhov | b50b521 | 2010-11-03 11:02:31 -0700 | [diff] [blame] | 1674 | |
Aleksej Makarov | 768d9aa | 2013-11-23 10:20:36 -0800 | [diff] [blame] | 1675 | input_dev_toggle(dev, true); |
| 1676 | input_dev_release_keys(dev); |
Dmitry Torokhov | b50b521 | 2010-11-03 11:02:31 -0700 | [diff] [blame] | 1677 | |
Aleksej Makarov | 768d9aa | 2013-11-23 10:20:36 -0800 | [diff] [blame] | 1678 | spin_unlock_irqrestore(&dev->event_lock, flags); |
Dmitry Torokhov | b50b521 | 2010-11-03 11:02:31 -0700 | [diff] [blame] | 1679 | mutex_unlock(&dev->mutex); |
| 1680 | } |
| 1681 | EXPORT_SYMBOL(input_reset_device); |
| 1682 | |
Aleksej Makarov | 768d9aa | 2013-11-23 10:20:36 -0800 | [diff] [blame] | 1683 | #ifdef CONFIG_PM_SLEEP |
Dmitry Torokhov | ffd0db9 | 2009-09-16 01:06:43 -0700 | [diff] [blame] | 1684 | static int input_dev_suspend(struct device *dev) |
| 1685 | { |
| 1686 | struct input_dev *input_dev = to_input_dev(dev); |
| 1687 | |
Aleksej Makarov | 768d9aa | 2013-11-23 10:20:36 -0800 | [diff] [blame] | 1688 | spin_lock_irq(&input_dev->event_lock); |
Dmitry Torokhov | b50b521 | 2010-11-03 11:02:31 -0700 | [diff] [blame] | 1689 | |
Aleksej Makarov | 768d9aa | 2013-11-23 10:20:36 -0800 | [diff] [blame] | 1690 | /* |
| 1691 | * Keys that are pressed now are unlikely to be |
| 1692 | * still pressed when we resume. |
| 1693 | */ |
| 1694 | input_dev_release_keys(input_dev); |
Dmitry Torokhov | b50b521 | 2010-11-03 11:02:31 -0700 | [diff] [blame] | 1695 | |
Aleksej Makarov | 768d9aa | 2013-11-23 10:20:36 -0800 | [diff] [blame] | 1696 | /* Turn off LEDs and sounds, if any are active. */ |
| 1697 | input_dev_toggle(input_dev, false); |
| 1698 | |
| 1699 | spin_unlock_irq(&input_dev->event_lock); |
Dmitry Torokhov | ffd0db9 | 2009-09-16 01:06:43 -0700 | [diff] [blame] | 1700 | |
| 1701 | return 0; |
| 1702 | } |
| 1703 | |
| 1704 | static int input_dev_resume(struct device *dev) |
| 1705 | { |
| 1706 | struct input_dev *input_dev = to_input_dev(dev); |
| 1707 | |
Aleksej Makarov | 768d9aa | 2013-11-23 10:20:36 -0800 | [diff] [blame] | 1708 | spin_lock_irq(&input_dev->event_lock); |
| 1709 | |
| 1710 | /* Restore state of LEDs and sounds, if any were active. */ |
| 1711 | input_dev_toggle(input_dev, true); |
| 1712 | |
| 1713 | spin_unlock_irq(&input_dev->event_lock); |
| 1714 | |
| 1715 | return 0; |
| 1716 | } |
| 1717 | |
| 1718 | static int input_dev_freeze(struct device *dev) |
| 1719 | { |
| 1720 | struct input_dev *input_dev = to_input_dev(dev); |
| 1721 | |
| 1722 | spin_lock_irq(&input_dev->event_lock); |
| 1723 | |
| 1724 | /* |
| 1725 | * Keys that are pressed now are unlikely to be |
| 1726 | * still pressed when we resume. |
| 1727 | */ |
| 1728 | input_dev_release_keys(input_dev); |
| 1729 | |
| 1730 | spin_unlock_irq(&input_dev->event_lock); |
| 1731 | |
| 1732 | return 0; |
| 1733 | } |
| 1734 | |
| 1735 | static int input_dev_poweroff(struct device *dev) |
| 1736 | { |
| 1737 | struct input_dev *input_dev = to_input_dev(dev); |
| 1738 | |
| 1739 | spin_lock_irq(&input_dev->event_lock); |
| 1740 | |
| 1741 | /* Turn off LEDs and sounds, if any are active. */ |
| 1742 | input_dev_toggle(input_dev, false); |
| 1743 | |
| 1744 | spin_unlock_irq(&input_dev->event_lock); |
Dmitry Torokhov | ffd0db9 | 2009-09-16 01:06:43 -0700 | [diff] [blame] | 1745 | |
| 1746 | return 0; |
| 1747 | } |
| 1748 | |
| 1749 | static const struct dev_pm_ops input_dev_pm_ops = { |
| 1750 | .suspend = input_dev_suspend, |
| 1751 | .resume = input_dev_resume, |
Aleksej Makarov | 768d9aa | 2013-11-23 10:20:36 -0800 | [diff] [blame] | 1752 | .freeze = input_dev_freeze, |
| 1753 | .poweroff = input_dev_poweroff, |
Dmitry Torokhov | ffd0db9 | 2009-09-16 01:06:43 -0700 | [diff] [blame] | 1754 | .restore = input_dev_resume, |
| 1755 | }; |
| 1756 | #endif /* CONFIG_PM */ |
| 1757 | |
Dmitry Torokhov | 9657d75 | 2007-06-14 23:32:24 -0400 | [diff] [blame] | 1758 | static struct device_type input_dev_type = { |
| 1759 | .groups = input_dev_attr_groups, |
| 1760 | .release = input_dev_release, |
| 1761 | .uevent = input_dev_uevent, |
Aleksej Makarov | 768d9aa | 2013-11-23 10:20:36 -0800 | [diff] [blame] | 1762 | #ifdef CONFIG_PM_SLEEP |
Dmitry Torokhov | ffd0db9 | 2009-09-16 01:06:43 -0700 | [diff] [blame] | 1763 | .pm = &input_dev_pm_ops, |
| 1764 | #endif |
Dmitry Torokhov | 9657d75 | 2007-06-14 23:32:24 -0400 | [diff] [blame] | 1765 | }; |
| 1766 | |
Al Viro | 2c9ede5 | 2011-07-23 20:24:48 -0400 | [diff] [blame] | 1767 | static char *input_devnode(struct device *dev, umode_t *mode) |
Kay Sievers | aa5ed63 | 2009-04-30 15:23:42 +0200 | [diff] [blame] | 1768 | { |
| 1769 | return kasprintf(GFP_KERNEL, "input/%s", dev_name(dev)); |
| 1770 | } |
| 1771 | |
Greg Kroah-Hartman | ea9f240 | 2005-10-27 22:25:43 -0700 | [diff] [blame] | 1772 | struct class input_class = { |
Dmitry Torokhov | 9657d75 | 2007-06-14 23:32:24 -0400 | [diff] [blame] | 1773 | .name = "input", |
Kay Sievers | e454cea | 2009-09-18 23:01:12 +0200 | [diff] [blame] | 1774 | .devnode = input_devnode, |
Dmitry Torokhov | d19fbe8 | 2005-09-15 02:01:39 -0500 | [diff] [blame] | 1775 | }; |
Dmitry Torokhov | ca56fe0 | 2006-06-26 01:49:21 -0400 | [diff] [blame] | 1776 | EXPORT_SYMBOL_GPL(input_class); |
Dmitry Torokhov | d19fbe8 | 2005-09-15 02:01:39 -0500 | [diff] [blame] | 1777 | |
Dmitry Torokhov | 1447190 | 2006-11-02 23:26:55 -0500 | [diff] [blame] | 1778 | /** |
| 1779 | * input_allocate_device - allocate memory for new input device |
| 1780 | * |
Dmitry Torokhov | 2be975c | 2012-11-03 12:16:12 -0700 | [diff] [blame] | 1781 | * Returns prepared struct input_dev or %NULL. |
Dmitry Torokhov | 1447190 | 2006-11-02 23:26:55 -0500 | [diff] [blame] | 1782 | * |
| 1783 | * NOTE: Use input_free_device() to free devices that have not been |
| 1784 | * registered; input_unregister_device() should be used for already |
| 1785 | * registered devices. |
| 1786 | */ |
Dmitry Torokhov | d19fbe8 | 2005-09-15 02:01:39 -0500 | [diff] [blame] | 1787 | struct input_dev *input_allocate_device(void) |
| 1788 | { |
Aniroop Mathur | 9c7d66fa | 2014-12-02 15:22:28 -0800 | [diff] [blame] | 1789 | static atomic_t input_no = ATOMIC_INIT(-1); |
Dmitry Torokhov | d19fbe8 | 2005-09-15 02:01:39 -0500 | [diff] [blame] | 1790 | struct input_dev *dev; |
| 1791 | |
| 1792 | dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL); |
| 1793 | if (dev) { |
Dmitry Torokhov | 9657d75 | 2007-06-14 23:32:24 -0400 | [diff] [blame] | 1794 | dev->dev.type = &input_dev_type; |
| 1795 | dev->dev.class = &input_class; |
| 1796 | device_initialize(&dev->dev); |
Dmitry Torokhov | f60d2b1 | 2006-06-26 01:48:36 -0400 | [diff] [blame] | 1797 | mutex_init(&dev->mutex); |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 1798 | spin_lock_init(&dev->event_lock); |
David Herrmann | a60a71b | 2013-10-06 01:15:08 -0700 | [diff] [blame] | 1799 | init_timer(&dev->timer); |
Dmitry Torokhov | d19fbe8 | 2005-09-15 02:01:39 -0500 | [diff] [blame] | 1800 | INIT_LIST_HEAD(&dev->h_list); |
| 1801 | INIT_LIST_HEAD(&dev->node); |
Dmitry Torokhov | 655816e | 2006-09-14 01:32:14 -0400 | [diff] [blame] | 1802 | |
Richard Leitner | bf1d50f | 2014-10-08 14:24:15 -0700 | [diff] [blame] | 1803 | dev_set_name(&dev->dev, "input%lu", |
Aniroop Mathur | 9c7d66fa | 2014-12-02 15:22:28 -0800 | [diff] [blame] | 1804 | (unsigned long)atomic_inc_return(&input_no)); |
David Herrmann | a60a71b | 2013-10-06 01:15:08 -0700 | [diff] [blame] | 1805 | |
Dmitry Torokhov | 655816e | 2006-09-14 01:32:14 -0400 | [diff] [blame] | 1806 | __module_get(THIS_MODULE); |
Dmitry Torokhov | d19fbe8 | 2005-09-15 02:01:39 -0500 | [diff] [blame] | 1807 | } |
| 1808 | |
| 1809 | return dev; |
| 1810 | } |
Dmitry Torokhov | ca56fe0 | 2006-06-26 01:49:21 -0400 | [diff] [blame] | 1811 | EXPORT_SYMBOL(input_allocate_device); |
Dmitry Torokhov | d19fbe8 | 2005-09-15 02:01:39 -0500 | [diff] [blame] | 1812 | |
Dmitry Torokhov | 2be975c | 2012-11-03 12:16:12 -0700 | [diff] [blame] | 1813 | struct input_devres { |
| 1814 | struct input_dev *input; |
| 1815 | }; |
| 1816 | |
| 1817 | static int devm_input_device_match(struct device *dev, void *res, void *data) |
| 1818 | { |
| 1819 | struct input_devres *devres = res; |
| 1820 | |
| 1821 | return devres->input == data; |
| 1822 | } |
| 1823 | |
| 1824 | static void devm_input_device_release(struct device *dev, void *res) |
| 1825 | { |
| 1826 | struct input_devres *devres = res; |
| 1827 | struct input_dev *input = devres->input; |
| 1828 | |
| 1829 | dev_dbg(dev, "%s: dropping reference to %s\n", |
| 1830 | __func__, dev_name(&input->dev)); |
| 1831 | input_put_device(input); |
| 1832 | } |
| 1833 | |
| 1834 | /** |
| 1835 | * devm_input_allocate_device - allocate managed input device |
| 1836 | * @dev: device owning the input device being created |
| 1837 | * |
| 1838 | * Returns prepared struct input_dev or %NULL. |
| 1839 | * |
| 1840 | * Managed input devices do not need to be explicitly unregistered or |
| 1841 | * freed as it will be done automatically when owner device unbinds from |
| 1842 | * its driver (or binding fails). Once managed input device is allocated, |
| 1843 | * it is ready to be set up and registered in the same fashion as regular |
| 1844 | * input device. There are no special devm_input_device_[un]register() |
Dmitry Torokhov | b666263 | 2013-01-08 09:10:31 -0800 | [diff] [blame] | 1845 | * variants, regular ones work with both managed and unmanaged devices, |
| 1846 | * should you need them. In most cases however, managed input device need |
| 1847 | * not be explicitly unregistered or freed. |
Dmitry Torokhov | 2be975c | 2012-11-03 12:16:12 -0700 | [diff] [blame] | 1848 | * |
| 1849 | * NOTE: the owner device is set up as parent of input device and users |
| 1850 | * should not override it. |
| 1851 | */ |
Dmitry Torokhov | 2be975c | 2012-11-03 12:16:12 -0700 | [diff] [blame] | 1852 | struct input_dev *devm_input_allocate_device(struct device *dev) |
| 1853 | { |
| 1854 | struct input_dev *input; |
| 1855 | struct input_devres *devres; |
| 1856 | |
| 1857 | devres = devres_alloc(devm_input_device_release, |
| 1858 | sizeof(struct input_devres), GFP_KERNEL); |
| 1859 | if (!devres) |
| 1860 | return NULL; |
| 1861 | |
| 1862 | input = input_allocate_device(); |
| 1863 | if (!input) { |
| 1864 | devres_free(devres); |
| 1865 | return NULL; |
| 1866 | } |
| 1867 | |
| 1868 | input->dev.parent = dev; |
| 1869 | input->devres_managed = true; |
| 1870 | |
| 1871 | devres->input = input; |
| 1872 | devres_add(dev, devres); |
| 1873 | |
| 1874 | return input; |
| 1875 | } |
| 1876 | EXPORT_SYMBOL(devm_input_allocate_device); |
| 1877 | |
Dmitry Torokhov | 1447190 | 2006-11-02 23:26:55 -0500 | [diff] [blame] | 1878 | /** |
| 1879 | * input_free_device - free memory occupied by input_dev structure |
| 1880 | * @dev: input device to free |
| 1881 | * |
| 1882 | * This function should only be used if input_register_device() |
| 1883 | * was not called yet or if it failed. Once device was registered |
| 1884 | * use input_unregister_device() and memory will be freed once last |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 1885 | * reference to the device is dropped. |
Dmitry Torokhov | 1447190 | 2006-11-02 23:26:55 -0500 | [diff] [blame] | 1886 | * |
| 1887 | * Device should be allocated by input_allocate_device(). |
| 1888 | * |
| 1889 | * NOTE: If there are references to the input device then memory |
| 1890 | * will not be freed until last reference is dropped. |
| 1891 | */ |
Dmitry Torokhov | f60d2b1 | 2006-06-26 01:48:36 -0400 | [diff] [blame] | 1892 | void input_free_device(struct input_dev *dev) |
| 1893 | { |
Dmitry Torokhov | 2be975c | 2012-11-03 12:16:12 -0700 | [diff] [blame] | 1894 | if (dev) { |
| 1895 | if (dev->devres_managed) |
| 1896 | WARN_ON(devres_destroy(dev->dev.parent, |
| 1897 | devm_input_device_release, |
| 1898 | devm_input_device_match, |
| 1899 | dev)); |
Dmitry Torokhov | f60d2b1 | 2006-06-26 01:48:36 -0400 | [diff] [blame] | 1900 | input_put_device(dev); |
Dmitry Torokhov | 2be975c | 2012-11-03 12:16:12 -0700 | [diff] [blame] | 1901 | } |
Dmitry Torokhov | f60d2b1 | 2006-06-26 01:48:36 -0400 | [diff] [blame] | 1902 | } |
Dmitry Torokhov | ca56fe0 | 2006-06-26 01:49:21 -0400 | [diff] [blame] | 1903 | EXPORT_SYMBOL(input_free_device); |
Dmitry Torokhov | f60d2b1 | 2006-06-26 01:48:36 -0400 | [diff] [blame] | 1904 | |
Dmitry Torokhov | 534565f | 2007-04-25 00:53:18 -0400 | [diff] [blame] | 1905 | /** |
| 1906 | * input_set_capability - mark device as capable of a certain event |
| 1907 | * @dev: device that is capable of emitting or accepting event |
| 1908 | * @type: type of the event (EV_KEY, EV_REL, etc...) |
| 1909 | * @code: event code |
| 1910 | * |
| 1911 | * In addition to setting up corresponding bit in appropriate capability |
| 1912 | * bitmap the function also adjusts dev->evbit. |
| 1913 | */ |
| 1914 | void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code) |
| 1915 | { |
| 1916 | switch (type) { |
| 1917 | case EV_KEY: |
| 1918 | __set_bit(code, dev->keybit); |
| 1919 | break; |
| 1920 | |
| 1921 | case EV_REL: |
| 1922 | __set_bit(code, dev->relbit); |
| 1923 | break; |
| 1924 | |
| 1925 | case EV_ABS: |
Dmitry Torokhov | 28a2a2e | 2013-12-26 17:44:29 -0800 | [diff] [blame] | 1926 | input_alloc_absinfo(dev); |
| 1927 | if (!dev->absinfo) |
| 1928 | return; |
| 1929 | |
Dmitry Torokhov | 534565f | 2007-04-25 00:53:18 -0400 | [diff] [blame] | 1930 | __set_bit(code, dev->absbit); |
| 1931 | break; |
| 1932 | |
| 1933 | case EV_MSC: |
| 1934 | __set_bit(code, dev->mscbit); |
| 1935 | break; |
| 1936 | |
| 1937 | case EV_SW: |
| 1938 | __set_bit(code, dev->swbit); |
| 1939 | break; |
| 1940 | |
| 1941 | case EV_LED: |
| 1942 | __set_bit(code, dev->ledbit); |
| 1943 | break; |
| 1944 | |
| 1945 | case EV_SND: |
| 1946 | __set_bit(code, dev->sndbit); |
| 1947 | break; |
| 1948 | |
| 1949 | case EV_FF: |
| 1950 | __set_bit(code, dev->ffbit); |
| 1951 | break; |
| 1952 | |
Dmitry Baryshkov | 22d1c39 | 2007-12-14 01:21:03 -0500 | [diff] [blame] | 1953 | case EV_PWR: |
| 1954 | /* do nothing */ |
| 1955 | break; |
| 1956 | |
Dmitry Torokhov | 534565f | 2007-04-25 00:53:18 -0400 | [diff] [blame] | 1957 | default: |
Joe Perches | da0c490 | 2010-11-29 23:33:07 -0800 | [diff] [blame] | 1958 | pr_err("input_set_capability: unknown type %u (code %u)\n", |
| 1959 | type, code); |
Dmitry Torokhov | 534565f | 2007-04-25 00:53:18 -0400 | [diff] [blame] | 1960 | dump_stack(); |
| 1961 | return; |
| 1962 | } |
| 1963 | |
| 1964 | __set_bit(type, dev->evbit); |
| 1965 | } |
| 1966 | EXPORT_SYMBOL(input_set_capability); |
| 1967 | |
Jeff Brown | 80b4895 | 2011-04-18 10:08:02 -0700 | [diff] [blame] | 1968 | static unsigned int input_estimate_events_per_packet(struct input_dev *dev) |
| 1969 | { |
| 1970 | int mt_slots; |
| 1971 | int i; |
| 1972 | unsigned int events; |
| 1973 | |
Henrik Rydberg | 8d18fba | 2012-09-15 15:15:58 +0200 | [diff] [blame] | 1974 | if (dev->mt) { |
| 1975 | mt_slots = dev->mt->num_slots; |
Jeff Brown | 80b4895 | 2011-04-18 10:08:02 -0700 | [diff] [blame] | 1976 | } else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) { |
| 1977 | mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum - |
| 1978 | dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1, |
Hans Petter Selasky | 8c127f0 | 2011-05-25 09:24:32 -0700 | [diff] [blame] | 1979 | mt_slots = clamp(mt_slots, 2, 32); |
Jeff Brown | 80b4895 | 2011-04-18 10:08:02 -0700 | [diff] [blame] | 1980 | } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) { |
| 1981 | mt_slots = 2; |
| 1982 | } else { |
| 1983 | mt_slots = 0; |
| 1984 | } |
| 1985 | |
| 1986 | events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */ |
| 1987 | |
Anshul Garg | 3e2b03d | 2015-06-25 13:33:12 -0700 | [diff] [blame] | 1988 | if (test_bit(EV_ABS, dev->evbit)) |
| 1989 | for_each_set_bit(i, dev->absbit, ABS_CNT) |
| 1990 | events += input_is_mt_axis(i) ? mt_slots : 1; |
Jeff Brown | 80b4895 | 2011-04-18 10:08:02 -0700 | [diff] [blame] | 1991 | |
Anshul Garg | 3e2b03d | 2015-06-25 13:33:12 -0700 | [diff] [blame] | 1992 | if (test_bit(EV_REL, dev->evbit)) |
| 1993 | events += bitmap_weight(dev->relbit, REL_CNT); |
Jeff Brown | 80b4895 | 2011-04-18 10:08:02 -0700 | [diff] [blame] | 1994 | |
Henrik Rydberg | 7c75bf9 | 2012-09-01 16:15:43 +0200 | [diff] [blame] | 1995 | /* Make room for KEY and MSC events */ |
| 1996 | events += 7; |
| 1997 | |
Jeff Brown | 80b4895 | 2011-04-18 10:08:02 -0700 | [diff] [blame] | 1998 | return events; |
| 1999 | } |
| 2000 | |
Dmitry Torokhov | 92a3a58 | 2010-01-05 17:56:01 -0800 | [diff] [blame] | 2001 | #define INPUT_CLEANSE_BITMASK(dev, type, bits) \ |
| 2002 | do { \ |
| 2003 | if (!test_bit(EV_##type, dev->evbit)) \ |
| 2004 | memset(dev->bits##bit, 0, \ |
| 2005 | sizeof(dev->bits##bit)); \ |
| 2006 | } while (0) |
| 2007 | |
| 2008 | static void input_cleanse_bitmasks(struct input_dev *dev) |
| 2009 | { |
| 2010 | INPUT_CLEANSE_BITMASK(dev, KEY, key); |
| 2011 | INPUT_CLEANSE_BITMASK(dev, REL, rel); |
| 2012 | INPUT_CLEANSE_BITMASK(dev, ABS, abs); |
| 2013 | INPUT_CLEANSE_BITMASK(dev, MSC, msc); |
| 2014 | INPUT_CLEANSE_BITMASK(dev, LED, led); |
| 2015 | INPUT_CLEANSE_BITMASK(dev, SND, snd); |
| 2016 | INPUT_CLEANSE_BITMASK(dev, FF, ff); |
| 2017 | INPUT_CLEANSE_BITMASK(dev, SW, sw); |
| 2018 | } |
| 2019 | |
Dmitry Torokhov | 2be975c | 2012-11-03 12:16:12 -0700 | [diff] [blame] | 2020 | static void __input_unregister_device(struct input_dev *dev) |
| 2021 | { |
| 2022 | struct input_handle *handle, *next; |
| 2023 | |
| 2024 | input_disconnect_device(dev); |
| 2025 | |
| 2026 | mutex_lock(&input_mutex); |
| 2027 | |
| 2028 | list_for_each_entry_safe(handle, next, &dev->h_list, d_node) |
| 2029 | handle->handler->disconnect(handle); |
| 2030 | WARN_ON(!list_empty(&dev->h_list)); |
| 2031 | |
| 2032 | del_timer_sync(&dev->timer); |
| 2033 | list_del_init(&dev->node); |
| 2034 | |
| 2035 | input_wakeup_procfs_readers(); |
| 2036 | |
| 2037 | mutex_unlock(&input_mutex); |
| 2038 | |
| 2039 | device_del(&dev->dev); |
| 2040 | } |
| 2041 | |
| 2042 | static void devm_input_device_unregister(struct device *dev, void *res) |
| 2043 | { |
| 2044 | struct input_devres *devres = res; |
| 2045 | struct input_dev *input = devres->input; |
| 2046 | |
| 2047 | dev_dbg(dev, "%s: unregistering device %s\n", |
| 2048 | __func__, dev_name(&input->dev)); |
| 2049 | __input_unregister_device(input); |
| 2050 | } |
| 2051 | |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 2052 | /** |
Petri Gynther | 027c71b | 2015-10-13 23:13:55 -0700 | [diff] [blame] | 2053 | * input_enable_softrepeat - enable software autorepeat |
| 2054 | * @dev: input device |
| 2055 | * @delay: repeat delay |
| 2056 | * @period: repeat period |
| 2057 | * |
| 2058 | * Enable software autorepeat on the input device. |
| 2059 | */ |
| 2060 | void input_enable_softrepeat(struct input_dev *dev, int delay, int period) |
| 2061 | { |
| 2062 | dev->timer.data = (unsigned long) dev; |
| 2063 | dev->timer.function = input_repeat_key; |
| 2064 | dev->rep[REP_DELAY] = delay; |
| 2065 | dev->rep[REP_PERIOD] = period; |
| 2066 | } |
| 2067 | EXPORT_SYMBOL(input_enable_softrepeat); |
| 2068 | |
| 2069 | /** |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 2070 | * input_register_device - register device with input core |
| 2071 | * @dev: device to be registered |
| 2072 | * |
| 2073 | * This function registers device with input core. The device must be |
| 2074 | * allocated with input_allocate_device() and all it's capabilities |
| 2075 | * set up before registering. |
| 2076 | * If function fails the device must be freed with input_free_device(). |
| 2077 | * Once device has been successfully registered it can be unregistered |
| 2078 | * with input_unregister_device(); input_free_device() should not be |
| 2079 | * called in this case. |
Dmitry Torokhov | b666263 | 2013-01-08 09:10:31 -0800 | [diff] [blame] | 2080 | * |
| 2081 | * Note that this function is also used to register managed input devices |
| 2082 | * (ones allocated with devm_input_allocate_device()). Such managed input |
| 2083 | * devices need not be explicitly unregistered or freed, their tear down |
| 2084 | * is controlled by the devres infrastructure. It is also worth noting |
| 2085 | * that tear down of managed input devices is internally a 2-step process: |
| 2086 | * registered managed input device is first unregistered, but stays in |
| 2087 | * memory and can still handle input_event() calls (although events will |
| 2088 | * not be delivered anywhere). The freeing of managed input device will |
| 2089 | * happen later, when devres stack is unwound to the point where device |
| 2090 | * allocation was made. |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 2091 | */ |
Dmitry Torokhov | 5f94548 | 2005-11-02 22:51:46 -0500 | [diff] [blame] | 2092 | int input_register_device(struct input_dev *dev) |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2093 | { |
Dmitry Torokhov | 2be975c | 2012-11-03 12:16:12 -0700 | [diff] [blame] | 2094 | struct input_devres *devres = NULL; |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2095 | struct input_handler *handler; |
Henrik Rydberg | 7c75bf9 | 2012-09-01 16:15:43 +0200 | [diff] [blame] | 2096 | unsigned int packet_size; |
Dmitry Torokhov | bd0ef23 | 2005-11-20 00:56:31 -0500 | [diff] [blame] | 2097 | const char *path; |
| 2098 | int error; |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2099 | |
Dmitry Torokhov | 2be975c | 2012-11-03 12:16:12 -0700 | [diff] [blame] | 2100 | if (dev->devres_managed) { |
| 2101 | devres = devres_alloc(devm_input_device_unregister, |
| 2102 | sizeof(struct input_devres), GFP_KERNEL); |
| 2103 | if (!devres) |
| 2104 | return -ENOMEM; |
| 2105 | |
| 2106 | devres->input = dev; |
| 2107 | } |
| 2108 | |
Dmitry Torokhov | 4f93df4 | 2010-01-05 17:56:00 -0800 | [diff] [blame] | 2109 | /* Every input device generates EV_SYN/SYN_REPORT events. */ |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 2110 | __set_bit(EV_SYN, dev->evbit); |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2111 | |
Dmitry Torokhov | 4f93df4 | 2010-01-05 17:56:00 -0800 | [diff] [blame] | 2112 | /* KEY_RESERVED is not supposed to be transmitted to userspace. */ |
| 2113 | __clear_bit(KEY_RESERVED, dev->keybit); |
| 2114 | |
Dmitry Torokhov | 92a3a58 | 2010-01-05 17:56:01 -0800 | [diff] [blame] | 2115 | /* Make sure that bitmasks not mentioned in dev->evbit are clean. */ |
| 2116 | input_cleanse_bitmasks(dev); |
| 2117 | |
Henrik Rydberg | 7c75bf9 | 2012-09-01 16:15:43 +0200 | [diff] [blame] | 2118 | packet_size = input_estimate_events_per_packet(dev); |
| 2119 | if (dev->hint_events_per_packet < packet_size) |
| 2120 | dev->hint_events_per_packet = packet_size; |
Jeff Brown | 80b4895 | 2011-04-18 10:08:02 -0700 | [diff] [blame] | 2121 | |
Kang Hu | 95079b8 | 2013-10-31 00:47:53 -0700 | [diff] [blame] | 2122 | dev->max_vals = dev->hint_events_per_packet + 2; |
Henrik Rydberg | 4369c64 | 2012-09-15 15:23:35 +0200 | [diff] [blame] | 2123 | dev->vals = kcalloc(dev->max_vals, sizeof(*dev->vals), GFP_KERNEL); |
Dmitry Torokhov | 2be975c | 2012-11-03 12:16:12 -0700 | [diff] [blame] | 2124 | if (!dev->vals) { |
| 2125 | error = -ENOMEM; |
| 2126 | goto err_devres_free; |
| 2127 | } |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2128 | |
Dmitry Torokhov | 5b2a082 | 2007-04-12 01:29:46 -0400 | [diff] [blame] | 2129 | /* |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2130 | * If delay and period are pre-set by the driver, then autorepeating |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2131 | * is handled by the driver itself and we don't do it in input.c. |
Dmitry Torokhov | 5f94548 | 2005-11-02 22:51:46 -0500 | [diff] [blame] | 2132 | */ |
Petri Gynther | 027c71b | 2015-10-13 23:13:55 -0700 | [diff] [blame] | 2133 | if (!dev->rep[REP_DELAY] && !dev->rep[REP_PERIOD]) |
| 2134 | input_enable_softrepeat(dev, 250, 33); |
Dmitry Torokhov | 5b2a082 | 2007-04-12 01:29:46 -0400 | [diff] [blame] | 2135 | |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2136 | if (!dev->getkeycode) |
Dmitry Torokhov | 5b2a082 | 2007-04-12 01:29:46 -0400 | [diff] [blame] | 2137 | dev->getkeycode = input_default_getkeycode; |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2138 | |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 2139 | if (!dev->setkeycode) |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2140 | dev->setkeycode = input_default_setkeycode; |
| 2141 | |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2142 | error = device_add(&dev->dev); |
| 2143 | if (error) |
Dmitry Torokhov | 2be975c | 2012-11-03 12:16:12 -0700 | [diff] [blame] | 2144 | goto err_free_vals; |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2145 | |
| 2146 | path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL); |
| 2147 | pr_info("%s as %s\n", |
| 2148 | dev->name ? dev->name : "Unspecified device", |
| 2149 | path ? path : "N/A"); |
| 2150 | kfree(path); |
| 2151 | |
| 2152 | error = mutex_lock_interruptible(&input_mutex); |
Dmitry Torokhov | 2be975c | 2012-11-03 12:16:12 -0700 | [diff] [blame] | 2153 | if (error) |
| 2154 | goto err_device_del; |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2155 | |
| 2156 | list_add_tail(&dev->node, &input_dev_list); |
| 2157 | |
| 2158 | list_for_each_entry(handler, &input_handler_list, node) |
| 2159 | input_attach_handler(dev, handler); |
| 2160 | |
| 2161 | input_wakeup_procfs_readers(); |
| 2162 | |
| 2163 | mutex_unlock(&input_mutex); |
| 2164 | |
Dmitry Torokhov | 2be975c | 2012-11-03 12:16:12 -0700 | [diff] [blame] | 2165 | if (dev->devres_managed) { |
| 2166 | dev_dbg(dev->dev.parent, "%s: registering %s with devres.\n", |
| 2167 | __func__, dev_name(&dev->dev)); |
| 2168 | devres_add(dev->dev.parent, devres); |
| 2169 | } |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2170 | return 0; |
Dmitry Torokhov | 2be975c | 2012-11-03 12:16:12 -0700 | [diff] [blame] | 2171 | |
| 2172 | err_device_del: |
| 2173 | device_del(&dev->dev); |
| 2174 | err_free_vals: |
| 2175 | kfree(dev->vals); |
| 2176 | dev->vals = NULL; |
| 2177 | err_devres_free: |
| 2178 | devres_free(devres); |
| 2179 | return error; |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2180 | } |
| 2181 | EXPORT_SYMBOL(input_register_device); |
| 2182 | |
| 2183 | /** |
| 2184 | * input_unregister_device - unregister previously registered device |
| 2185 | * @dev: device to be unregistered |
| 2186 | * |
| 2187 | * This function unregisters an input device. Once device is unregistered |
| 2188 | * the caller should not try to access it as it may get freed at any moment. |
| 2189 | */ |
| 2190 | void input_unregister_device(struct input_dev *dev) |
| 2191 | { |
Dmitry Torokhov | 2be975c | 2012-11-03 12:16:12 -0700 | [diff] [blame] | 2192 | if (dev->devres_managed) { |
| 2193 | WARN_ON(devres_destroy(dev->dev.parent, |
| 2194 | devm_input_device_unregister, |
| 2195 | devm_input_device_match, |
| 2196 | dev)); |
| 2197 | __input_unregister_device(dev); |
| 2198 | /* |
| 2199 | * We do not do input_put_device() here because it will be done |
| 2200 | * when 2nd devres fires up. |
| 2201 | */ |
| 2202 | } else { |
| 2203 | __input_unregister_device(dev); |
| 2204 | input_put_device(dev); |
| 2205 | } |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2206 | } |
| 2207 | EXPORT_SYMBOL(input_unregister_device); |
| 2208 | |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 2209 | /** |
| 2210 | * input_register_handler - register a new input handler |
| 2211 | * @handler: handler to be registered |
| 2212 | * |
| 2213 | * This function registers a new input handler (interface) for input |
| 2214 | * devices in the system and attaches it to all input devices that |
| 2215 | * are compatible with the handler. |
| 2216 | */ |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2217 | int input_register_handler(struct input_handler *handler) |
| 2218 | { |
| 2219 | struct input_dev *dev; |
Dmitry Torokhov | 7f8d4ca | 2012-10-08 09:07:24 -0700 | [diff] [blame] | 2220 | int error; |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 2221 | |
Dmitry Torokhov | 7f8d4ca | 2012-10-08 09:07:24 -0700 | [diff] [blame] | 2222 | error = mutex_lock_interruptible(&input_mutex); |
| 2223 | if (error) |
| 2224 | return error; |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2225 | |
| 2226 | INIT_LIST_HEAD(&handler->h_list); |
| 2227 | |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2228 | list_add_tail(&handler->node, &input_handler_list); |
| 2229 | |
| 2230 | list_for_each_entry(dev, &input_dev_list, node) |
Dmitry Torokhov | 5b2a082 | 2007-04-12 01:29:46 -0400 | [diff] [blame] | 2231 | input_attach_handler(dev, handler); |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2232 | |
| 2233 | input_wakeup_procfs_readers(); |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 2234 | |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 2235 | mutex_unlock(&input_mutex); |
Dmitry Torokhov | 7f8d4ca | 2012-10-08 09:07:24 -0700 | [diff] [blame] | 2236 | return 0; |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2237 | } |
| 2238 | EXPORT_SYMBOL(input_register_handler); |
| 2239 | |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 2240 | /** |
| 2241 | * input_unregister_handler - unregisters an input handler |
| 2242 | * @handler: handler to be unregistered |
| 2243 | * |
| 2244 | * This function disconnects a handler from its input devices and |
| 2245 | * removes it from lists of known handlers. |
| 2246 | */ |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2247 | void input_unregister_handler(struct input_handler *handler) |
| 2248 | { |
Dmitry Torokhov | 5b2a082 | 2007-04-12 01:29:46 -0400 | [diff] [blame] | 2249 | struct input_handle *handle, *next; |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2250 | |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 2251 | mutex_lock(&input_mutex); |
| 2252 | |
Dmitry Torokhov | 5b2a082 | 2007-04-12 01:29:46 -0400 | [diff] [blame] | 2253 | list_for_each_entry_safe(handle, next, &handler->h_list, h_node) |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2254 | handler->disconnect(handle); |
Dmitry Torokhov | 5b2a082 | 2007-04-12 01:29:46 -0400 | [diff] [blame] | 2255 | WARN_ON(!list_empty(&handler->h_list)); |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2256 | |
| 2257 | list_del_init(&handler->node); |
| 2258 | |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2259 | input_wakeup_procfs_readers(); |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 2260 | |
| 2261 | mutex_unlock(&input_mutex); |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2262 | } |
Dmitry Torokhov | ca56fe0 | 2006-06-26 01:49:21 -0400 | [diff] [blame] | 2263 | EXPORT_SYMBOL(input_unregister_handler); |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2264 | |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 2265 | /** |
Dmitry Torokhov | 66d2a59 | 2009-12-01 21:54:35 -0800 | [diff] [blame] | 2266 | * input_handler_for_each_handle - handle iterator |
| 2267 | * @handler: input handler to iterate |
| 2268 | * @data: data for the callback |
| 2269 | * @fn: function to be called for each handle |
| 2270 | * |
| 2271 | * Iterate over @bus's list of devices, and call @fn for each, passing |
| 2272 | * it @data and stop when @fn returns a non-zero value. The function is |
Shailendra Verma | ec8beff | 2015-05-18 10:44:33 -0700 | [diff] [blame] | 2273 | * using RCU to traverse the list and therefore may be using in atomic |
Dmitry Torokhov | 66d2a59 | 2009-12-01 21:54:35 -0800 | [diff] [blame] | 2274 | * contexts. The @fn callback is invoked from RCU critical section and |
| 2275 | * thus must not sleep. |
| 2276 | */ |
| 2277 | int input_handler_for_each_handle(struct input_handler *handler, void *data, |
| 2278 | int (*fn)(struct input_handle *, void *)) |
| 2279 | { |
| 2280 | struct input_handle *handle; |
| 2281 | int retval = 0; |
| 2282 | |
| 2283 | rcu_read_lock(); |
| 2284 | |
| 2285 | list_for_each_entry_rcu(handle, &handler->h_list, h_node) { |
| 2286 | retval = fn(handle, data); |
| 2287 | if (retval) |
| 2288 | break; |
| 2289 | } |
| 2290 | |
| 2291 | rcu_read_unlock(); |
| 2292 | |
| 2293 | return retval; |
| 2294 | } |
| 2295 | EXPORT_SYMBOL(input_handler_for_each_handle); |
| 2296 | |
| 2297 | /** |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 2298 | * input_register_handle - register a new input handle |
| 2299 | * @handle: handle to register |
| 2300 | * |
| 2301 | * This function puts a new input handle onto device's |
| 2302 | * and handler's lists so that events can flow through |
| 2303 | * it once it is opened using input_open_device(). |
| 2304 | * |
| 2305 | * This function is supposed to be called from handler's |
| 2306 | * connect() method. |
| 2307 | */ |
Dmitry Torokhov | 5b2a082 | 2007-04-12 01:29:46 -0400 | [diff] [blame] | 2308 | int input_register_handle(struct input_handle *handle) |
| 2309 | { |
| 2310 | struct input_handler *handler = handle->handler; |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 2311 | struct input_dev *dev = handle->dev; |
| 2312 | int error; |
Dmitry Torokhov | 5b2a082 | 2007-04-12 01:29:46 -0400 | [diff] [blame] | 2313 | |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 2314 | /* |
| 2315 | * We take dev->mutex here to prevent race with |
| 2316 | * input_release_device(). |
| 2317 | */ |
| 2318 | error = mutex_lock_interruptible(&dev->mutex); |
| 2319 | if (error) |
| 2320 | return error; |
Dmitry Torokhov | ef7995f | 2010-01-29 23:59:12 -0800 | [diff] [blame] | 2321 | |
| 2322 | /* |
| 2323 | * Filters go to the head of the list, normal handlers |
| 2324 | * to the tail. |
| 2325 | */ |
| 2326 | if (handler->filter) |
| 2327 | list_add_rcu(&handle->d_node, &dev->h_list); |
| 2328 | else |
| 2329 | list_add_tail_rcu(&handle->d_node, &dev->h_list); |
| 2330 | |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 2331 | mutex_unlock(&dev->mutex); |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 2332 | |
| 2333 | /* |
| 2334 | * Since we are supposed to be called from ->connect() |
| 2335 | * which is mutually exclusive with ->disconnect() |
| 2336 | * we can't be racing with input_unregister_handle() |
| 2337 | * and so separate lock is not needed here. |
| 2338 | */ |
Dmitry Torokhov | 66d2a59 | 2009-12-01 21:54:35 -0800 | [diff] [blame] | 2339 | list_add_tail_rcu(&handle->h_node, &handler->h_list); |
Dmitry Torokhov | 5b2a082 | 2007-04-12 01:29:46 -0400 | [diff] [blame] | 2340 | |
| 2341 | if (handler->start) |
| 2342 | handler->start(handle); |
| 2343 | |
| 2344 | return 0; |
| 2345 | } |
| 2346 | EXPORT_SYMBOL(input_register_handle); |
| 2347 | |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 2348 | /** |
| 2349 | * input_unregister_handle - unregister an input handle |
| 2350 | * @handle: handle to unregister |
| 2351 | * |
| 2352 | * This function removes input handle from device's |
| 2353 | * and handler's lists. |
| 2354 | * |
| 2355 | * This function is supposed to be called from handler's |
| 2356 | * disconnect() method. |
| 2357 | */ |
Dmitry Torokhov | 5b2a082 | 2007-04-12 01:29:46 -0400 | [diff] [blame] | 2358 | void input_unregister_handle(struct input_handle *handle) |
| 2359 | { |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 2360 | struct input_dev *dev = handle->dev; |
| 2361 | |
Dmitry Torokhov | 66d2a59 | 2009-12-01 21:54:35 -0800 | [diff] [blame] | 2362 | list_del_rcu(&handle->h_node); |
Dmitry Torokhov | 8006479 | 2007-08-30 00:22:11 -0400 | [diff] [blame] | 2363 | |
| 2364 | /* |
| 2365 | * Take dev->mutex to prevent race with input_release_device(). |
| 2366 | */ |
| 2367 | mutex_lock(&dev->mutex); |
| 2368 | list_del_rcu(&handle->d_node); |
| 2369 | mutex_unlock(&dev->mutex); |
Dmitry Torokhov | 66d2a59 | 2009-12-01 21:54:35 -0800 | [diff] [blame] | 2370 | |
Dmitry Torokhov | 82ba56c | 2007-10-13 15:46:55 -0400 | [diff] [blame] | 2371 | synchronize_rcu(); |
Dmitry Torokhov | 5b2a082 | 2007-04-12 01:29:46 -0400 | [diff] [blame] | 2372 | } |
| 2373 | EXPORT_SYMBOL(input_unregister_handle); |
| 2374 | |
Dmitry Torokhov | 7f8d4ca | 2012-10-08 09:07:24 -0700 | [diff] [blame] | 2375 | /** |
| 2376 | * input_get_new_minor - allocates a new input minor number |
| 2377 | * @legacy_base: beginning or the legacy range to be searched |
| 2378 | * @legacy_num: size of legacy range |
| 2379 | * @allow_dynamic: whether we can also take ID from the dynamic range |
| 2380 | * |
| 2381 | * This function allocates a new device minor for from input major namespace. |
| 2382 | * Caller can request legacy minor by specifying @legacy_base and @legacy_num |
| 2383 | * parameters and whether ID can be allocated from dynamic range if there are |
| 2384 | * no free IDs in legacy range. |
| 2385 | */ |
| 2386 | int input_get_new_minor(int legacy_base, unsigned int legacy_num, |
| 2387 | bool allow_dynamic) |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2388 | { |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2389 | /* |
Dmitry Torokhov | 7f8d4ca | 2012-10-08 09:07:24 -0700 | [diff] [blame] | 2390 | * This function should be called from input handler's ->connect() |
| 2391 | * methods, which are serialized with input_mutex, so no additional |
| 2392 | * locking is needed here. |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2393 | */ |
Dmitry Torokhov | 7f8d4ca | 2012-10-08 09:07:24 -0700 | [diff] [blame] | 2394 | if (legacy_base >= 0) { |
| 2395 | int minor = ida_simple_get(&input_ida, |
| 2396 | legacy_base, |
| 2397 | legacy_base + legacy_num, |
| 2398 | GFP_KERNEL); |
| 2399 | if (minor >= 0 || !allow_dynamic) |
| 2400 | return minor; |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2401 | } |
Arnd Bergmann | 2f2177c | 2010-03-09 20:38:48 -0800 | [diff] [blame] | 2402 | |
Dmitry Torokhov | 7f8d4ca | 2012-10-08 09:07:24 -0700 | [diff] [blame] | 2403 | return ida_simple_get(&input_ida, |
| 2404 | INPUT_FIRST_DYNAMIC_DEV, INPUT_MAX_CHAR_DEVICES, |
| 2405 | GFP_KERNEL); |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2406 | } |
Dmitry Torokhov | 7f8d4ca | 2012-10-08 09:07:24 -0700 | [diff] [blame] | 2407 | EXPORT_SYMBOL(input_get_new_minor); |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2408 | |
Dmitry Torokhov | 7f8d4ca | 2012-10-08 09:07:24 -0700 | [diff] [blame] | 2409 | /** |
| 2410 | * input_free_minor - release previously allocated minor |
| 2411 | * @minor: minor to be released |
| 2412 | * |
| 2413 | * This function releases previously allocated input minor so that it can be |
| 2414 | * reused later. |
| 2415 | */ |
| 2416 | void input_free_minor(unsigned int minor) |
| 2417 | { |
| 2418 | ida_simple_remove(&input_ida, minor); |
| 2419 | } |
| 2420 | EXPORT_SYMBOL(input_free_minor); |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2421 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2422 | static int __init input_init(void) |
| 2423 | { |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2424 | int err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2425 | |
Greg Kroah-Hartman | ea9f240 | 2005-10-27 22:25:43 -0700 | [diff] [blame] | 2426 | err = class_register(&input_class); |
Dmitry Torokhov | d19fbe8 | 2005-09-15 02:01:39 -0500 | [diff] [blame] | 2427 | if (err) { |
Joe Perches | da0c490 | 2010-11-29 23:33:07 -0800 | [diff] [blame] | 2428 | pr_err("unable to register input_dev class\n"); |
Dmitry Torokhov | d19fbe8 | 2005-09-15 02:01:39 -0500 | [diff] [blame] | 2429 | return err; |
| 2430 | } |
| 2431 | |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2432 | err = input_proc_init(); |
| 2433 | if (err) |
Greg Kroah-Hartman | b0fdfeb | 2005-10-27 22:25:43 -0700 | [diff] [blame] | 2434 | goto fail1; |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2435 | |
Dmitry Torokhov | 7f8d4ca | 2012-10-08 09:07:24 -0700 | [diff] [blame] | 2436 | err = register_chrdev_region(MKDEV(INPUT_MAJOR, 0), |
| 2437 | INPUT_MAX_CHAR_DEVICES, "input"); |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2438 | if (err) { |
Joe Perches | da0c490 | 2010-11-29 23:33:07 -0800 | [diff] [blame] | 2439 | pr_err("unable to register char major %d", INPUT_MAJOR); |
Greg Kroah-Hartman | b0fdfeb | 2005-10-27 22:25:43 -0700 | [diff] [blame] | 2440 | goto fail2; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2441 | } |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2442 | |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2443 | return 0; |
| 2444 | |
Greg Kroah-Hartman | b0fdfeb | 2005-10-27 22:25:43 -0700 | [diff] [blame] | 2445 | fail2: input_proc_exit(); |
Greg Kroah-Hartman | ea9f240 | 2005-10-27 22:25:43 -0700 | [diff] [blame] | 2446 | fail1: class_unregister(&input_class); |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2447 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2448 | } |
| 2449 | |
| 2450 | static void __exit input_exit(void) |
| 2451 | { |
Dmitry Torokhov | f96b434 | 2005-06-30 00:50:29 -0500 | [diff] [blame] | 2452 | input_proc_exit(); |
Dmitry Torokhov | 7f8d4ca | 2012-10-08 09:07:24 -0700 | [diff] [blame] | 2453 | unregister_chrdev_region(MKDEV(INPUT_MAJOR, 0), |
| 2454 | INPUT_MAX_CHAR_DEVICES); |
Greg Kroah-Hartman | ea9f240 | 2005-10-27 22:25:43 -0700 | [diff] [blame] | 2455 | class_unregister(&input_class); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2456 | } |
| 2457 | |
| 2458 | subsys_initcall(input_init); |
| 2459 | module_exit(input_exit); |