blob: d02ee530421789afcfc74e0fb0c1a2fa7b5bb59b [file] [log] [blame]
David Herrmann1ccd7a22012-06-10 15:16:13 +02001/*
2 * User-space I/O driver support for HID subsystem
3 * Copyright (c) 2012 David Herrmann
4 */
5
6/*
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option)
10 * any later version.
11 */
12
13#include <linux/atomic.h>
Dmitry Torokhovbefde022013-02-18 11:26:11 +010014#include <linux/compat.h>
Eric Biggersab26f7f2018-11-14 13:55:09 -080015#include <linux/cred.h>
David Herrmann1ccd7a22012-06-10 15:16:13 +020016#include <linux/device.h>
17#include <linux/fs.h>
18#include <linux/hid.h>
19#include <linux/input.h>
20#include <linux/miscdevice.h>
21#include <linux/module.h>
22#include <linux/mutex.h>
23#include <linux/poll.h>
24#include <linux/sched.h>
25#include <linux/spinlock.h>
26#include <linux/uhid.h>
27#include <linux/wait.h>
Eric Biggersab26f7f2018-11-14 13:55:09 -080028#include <linux/uaccess.h>
David Herrmann1ccd7a22012-06-10 15:16:13 +020029
30#define UHID_NAME "uhid"
David Herrmannace3d862012-06-10 15:16:14 +020031#define UHID_BUFSIZE 32
32
33struct uhid_device {
David Herrmannd937ae52012-06-10 15:16:16 +020034 struct mutex devlock;
David Herrmannd365c6c2012-06-10 15:16:18 +020035 bool running;
36
37 __u8 *rd_data;
38 uint rd_size;
39
David Herrmannace3d862012-06-10 15:16:14 +020040 struct hid_device *hid;
David Herrmann6664ef72012-06-10 15:16:17 +020041 struct uhid_event input_buf;
David Herrmannace3d862012-06-10 15:16:14 +020042
43 wait_queue_head_t waitq;
44 spinlock_t qlock;
45 __u8 head;
46 __u8 tail;
47 struct uhid_event *outq[UHID_BUFSIZE];
David Herrmannfcfcf0d2012-06-10 15:16:25 +020048
David Herrmann8cad5b02014-07-29 17:14:19 +020049 /* blocking GET_REPORT support; state changes protected by qlock */
David Herrmannfcfcf0d2012-06-10 15:16:25 +020050 struct mutex report_lock;
51 wait_queue_head_t report_wait;
David Herrmann5942b842014-07-29 17:14:20 +020052 bool report_running;
David Herrmann8cad5b02014-07-29 17:14:19 +020053 u32 report_id;
David Herrmann11c22152014-07-29 17:14:24 +020054 u32 report_type;
David Herrmannfcfcf0d2012-06-10 15:16:25 +020055 struct uhid_event report_buf;
Roderick Colenbrander67f8ecc2016-05-18 13:11:09 -070056 struct work_struct worker;
David Herrmannace3d862012-06-10 15:16:14 +020057};
David Herrmann1ccd7a22012-06-10 15:16:13 +020058
59static struct miscdevice uhid_misc;
60
Roderick Colenbrander67f8ecc2016-05-18 13:11:09 -070061static void uhid_device_add_worker(struct work_struct *work)
62{
63 struct uhid_device *uhid = container_of(work, struct uhid_device, worker);
64 int ret;
65
66 ret = hid_add_device(uhid->hid);
67 if (ret) {
68 hid_err(uhid->hid, "Cannot register HID device: error %d\n", ret);
69
70 hid_destroy_device(uhid->hid);
71 uhid->hid = NULL;
72 uhid->running = false;
73 }
74}
75
David Herrmannace3d862012-06-10 15:16:14 +020076static void uhid_queue(struct uhid_device *uhid, struct uhid_event *ev)
77{
78 __u8 newhead;
79
80 newhead = (uhid->head + 1) % UHID_BUFSIZE;
81
82 if (newhead != uhid->tail) {
83 uhid->outq[uhid->head] = ev;
84 uhid->head = newhead;
85 wake_up_interruptible(&uhid->waitq);
86 } else {
87 hid_warn(uhid->hid, "Output queue is full\n");
88 kfree(ev);
89 }
90}
91
92static int uhid_queue_event(struct uhid_device *uhid, __u32 event)
93{
94 unsigned long flags;
95 struct uhid_event *ev;
96
97 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
98 if (!ev)
99 return -ENOMEM;
100
101 ev->type = event;
102
103 spin_lock_irqsave(&uhid->qlock, flags);
104 uhid_queue(uhid, ev);
105 spin_unlock_irqrestore(&uhid->qlock, flags);
106
107 return 0;
108}
109
David Herrmannd365c6c2012-06-10 15:16:18 +0200110static int uhid_hid_start(struct hid_device *hid)
111{
David Herrmannec4b7de2012-06-10 15:16:21 +0200112 struct uhid_device *uhid = hid->driver_data;
David Herrmannc2b2f162014-07-29 17:14:25 +0200113 struct uhid_event *ev;
114 unsigned long flags;
David Herrmannec4b7de2012-06-10 15:16:21 +0200115
David Herrmannc2b2f162014-07-29 17:14:25 +0200116 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
117 if (!ev)
118 return -ENOMEM;
119
120 ev->type = UHID_START;
121
122 if (hid->report_enum[HID_FEATURE_REPORT].numbered)
123 ev->u.start.dev_flags |= UHID_DEV_NUMBERED_FEATURE_REPORTS;
124 if (hid->report_enum[HID_OUTPUT_REPORT].numbered)
125 ev->u.start.dev_flags |= UHID_DEV_NUMBERED_OUTPUT_REPORTS;
126 if (hid->report_enum[HID_INPUT_REPORT].numbered)
127 ev->u.start.dev_flags |= UHID_DEV_NUMBERED_INPUT_REPORTS;
128
129 spin_lock_irqsave(&uhid->qlock, flags);
130 uhid_queue(uhid, ev);
131 spin_unlock_irqrestore(&uhid->qlock, flags);
132
133 return 0;
David Herrmannd365c6c2012-06-10 15:16:18 +0200134}
135
136static void uhid_hid_stop(struct hid_device *hid)
137{
David Herrmannec4b7de2012-06-10 15:16:21 +0200138 struct uhid_device *uhid = hid->driver_data;
139
140 hid->claimed = 0;
141 uhid_queue_event(uhid, UHID_STOP);
David Herrmannd365c6c2012-06-10 15:16:18 +0200142}
143
144static int uhid_hid_open(struct hid_device *hid)
145{
David Herrmanne7191472012-06-10 15:16:22 +0200146 struct uhid_device *uhid = hid->driver_data;
147
148 return uhid_queue_event(uhid, UHID_OPEN);
David Herrmannd365c6c2012-06-10 15:16:18 +0200149}
150
151static void uhid_hid_close(struct hid_device *hid)
152{
David Herrmanne7191472012-06-10 15:16:22 +0200153 struct uhid_device *uhid = hid->driver_data;
154
155 uhid_queue_event(uhid, UHID_CLOSE);
David Herrmannd365c6c2012-06-10 15:16:18 +0200156}
157
David Herrmannd365c6c2012-06-10 15:16:18 +0200158static int uhid_hid_parse(struct hid_device *hid)
159{
David Herrmann037c0612012-06-10 15:16:20 +0200160 struct uhid_device *uhid = hid->driver_data;
161
162 return hid_parse_report(hid, uhid->rd_data, uhid->rd_size);
David Herrmannd365c6c2012-06-10 15:16:18 +0200163}
164
David Herrmann11c22152014-07-29 17:14:24 +0200165/* must be called with report_lock held */
166static int __uhid_report_queue_and_wait(struct uhid_device *uhid,
167 struct uhid_event *ev,
168 __u32 *report_id)
Jiri Kosina289a7162014-02-17 14:49:34 +0100169{
Jiri Kosina289a7162014-02-17 14:49:34 +0100170 unsigned long flags;
171 int ret;
Jiri Kosina289a7162014-02-17 14:49:34 +0100172
173 spin_lock_irqsave(&uhid->qlock, flags);
David Herrmann11c22152014-07-29 17:14:24 +0200174 *report_id = ++uhid->report_id;
Benjamin Tissoires8493ecc2014-10-01 11:59:47 -0400175 uhid->report_type = ev->type + 1;
David Herrmann5942b842014-07-29 17:14:20 +0200176 uhid->report_running = true;
Jiri Kosina289a7162014-02-17 14:49:34 +0100177 uhid_queue(uhid, ev);
178 spin_unlock_irqrestore(&uhid->qlock, flags);
179
180 ret = wait_event_interruptible_timeout(uhid->report_wait,
David Herrmann5942b842014-07-29 17:14:20 +0200181 !uhid->report_running || !uhid->running,
182 5 * HZ);
David Herrmann11c22152014-07-29 17:14:24 +0200183 if (!ret || !uhid->running || uhid->report_running)
Jiri Kosina289a7162014-02-17 14:49:34 +0100184 ret = -EIO;
David Herrmann11c22152014-07-29 17:14:24 +0200185 else if (ret < 0)
Jiri Kosina289a7162014-02-17 14:49:34 +0100186 ret = -ERESTARTSYS;
David Herrmann11c22152014-07-29 17:14:24 +0200187 else
188 ret = 0;
Jiri Kosina289a7162014-02-17 14:49:34 +0100189
David Herrmann5942b842014-07-29 17:14:20 +0200190 uhid->report_running = false;
Jiri Kosina289a7162014-02-17 14:49:34 +0100191
David Herrmann11c22152014-07-29 17:14:24 +0200192 return ret;
193}
194
195static void uhid_report_wake_up(struct uhid_device *uhid, u32 id,
196 const struct uhid_event *ev)
197{
198 unsigned long flags;
199
200 spin_lock_irqsave(&uhid->qlock, flags);
201
202 /* id for old report; drop it silently */
203 if (uhid->report_type != ev->type || uhid->report_id != id)
204 goto unlock;
205 if (!uhid->report_running)
206 goto unlock;
207
208 memcpy(&uhid->report_buf, ev, sizeof(*ev));
209 uhid->report_running = false;
210 wake_up_interruptible(&uhid->report_wait);
211
212unlock:
213 spin_unlock_irqrestore(&uhid->qlock, flags);
214}
215
216static int uhid_hid_get_report(struct hid_device *hid, unsigned char rnum,
217 u8 *buf, size_t count, u8 rtype)
218{
219 struct uhid_device *uhid = hid->driver_data;
220 struct uhid_get_report_reply_req *req;
221 struct uhid_event *ev;
222 int ret;
223
224 if (!uhid->running)
225 return -EIO;
226
227 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
228 if (!ev)
229 return -ENOMEM;
230
231 ev->type = UHID_GET_REPORT;
232 ev->u.get_report.rnum = rnum;
233 ev->u.get_report.rtype = rtype;
234
235 ret = mutex_lock_interruptible(&uhid->report_lock);
236 if (ret) {
237 kfree(ev);
238 return ret;
239 }
240
241 /* this _always_ takes ownership of @ev */
242 ret = __uhid_report_queue_and_wait(uhid, ev, &ev->u.get_report.id);
243 if (ret)
244 goto unlock;
245
246 req = &uhid->report_buf.u.get_report_reply;
247 if (req->err) {
248 ret = -EIO;
249 } else {
250 ret = min3(count, (size_t)req->size, (size_t)UHID_DATA_MAX);
251 memcpy(buf, req->data, ret);
252 }
253
Jiri Kosina289a7162014-02-17 14:49:34 +0100254unlock:
255 mutex_unlock(&uhid->report_lock);
David Herrmann11c22152014-07-29 17:14:24 +0200256 return ret;
257}
258
259static int uhid_hid_set_report(struct hid_device *hid, unsigned char rnum,
260 const u8 *buf, size_t count, u8 rtype)
261{
262 struct uhid_device *uhid = hid->driver_data;
263 struct uhid_event *ev;
264 int ret;
265
266 if (!uhid->running || count > UHID_DATA_MAX)
267 return -EIO;
268
269 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
270 if (!ev)
271 return -ENOMEM;
272
273 ev->type = UHID_SET_REPORT;
274 ev->u.set_report.rnum = rnum;
275 ev->u.set_report.rtype = rtype;
276 ev->u.set_report.size = count;
277 memcpy(ev->u.set_report.data, buf, count);
278
279 ret = mutex_lock_interruptible(&uhid->report_lock);
280 if (ret) {
281 kfree(ev);
282 return ret;
283 }
284
285 /* this _always_ takes ownership of @ev */
286 ret = __uhid_report_queue_and_wait(uhid, ev, &ev->u.set_report.id);
287 if (ret)
288 goto unlock;
289
290 if (uhid->report_buf.u.set_report_reply.err)
291 ret = -EIO;
292 else
293 ret = count;
294
295unlock:
296 mutex_unlock(&uhid->report_lock);
297 return ret;
Jiri Kosina289a7162014-02-17 14:49:34 +0100298}
299
David Herrmann7c4003b2014-07-29 17:14:23 +0200300static int uhid_hid_raw_request(struct hid_device *hid, unsigned char reportnum,
301 __u8 *buf, size_t len, unsigned char rtype,
302 int reqtype)
303{
David Herrmann11c22152014-07-29 17:14:24 +0200304 u8 u_rtype;
305
306 switch (rtype) {
307 case HID_FEATURE_REPORT:
308 u_rtype = UHID_FEATURE_REPORT;
309 break;
310 case HID_OUTPUT_REPORT:
311 u_rtype = UHID_OUTPUT_REPORT;
312 break;
313 case HID_INPUT_REPORT:
314 u_rtype = UHID_INPUT_REPORT;
315 break;
316 default:
317 return -EINVAL;
318 }
319
David Herrmann7c4003b2014-07-29 17:14:23 +0200320 switch (reqtype) {
321 case HID_REQ_GET_REPORT:
David Herrmann11c22152014-07-29 17:14:24 +0200322 return uhid_hid_get_report(hid, reportnum, buf, len, u_rtype);
David Herrmann7c4003b2014-07-29 17:14:23 +0200323 case HID_REQ_SET_REPORT:
David Herrmann11c22152014-07-29 17:14:24 +0200324 return uhid_hid_set_report(hid, reportnum, buf, len, u_rtype);
David Herrmann7c4003b2014-07-29 17:14:23 +0200325 default:
326 return -EIO;
327 }
328}
329
David Herrmannd365c6c2012-06-10 15:16:18 +0200330static int uhid_hid_output_raw(struct hid_device *hid, __u8 *buf, size_t count,
331 unsigned char report_type)
332{
David Herrmann3b3baa82012-06-10 15:16:24 +0200333 struct uhid_device *uhid = hid->driver_data;
334 __u8 rtype;
335 unsigned long flags;
336 struct uhid_event *ev;
337
338 switch (report_type) {
339 case HID_FEATURE_REPORT:
340 rtype = UHID_FEATURE_REPORT;
341 break;
342 case HID_OUTPUT_REPORT:
343 rtype = UHID_OUTPUT_REPORT;
344 break;
345 default:
346 return -EINVAL;
347 }
348
349 if (count < 1 || count > UHID_DATA_MAX)
350 return -EINVAL;
351
352 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
353 if (!ev)
354 return -ENOMEM;
355
356 ev->type = UHID_OUTPUT;
357 ev->u.output.size = count;
358 ev->u.output.rtype = rtype;
359 memcpy(ev->u.output.data, buf, count);
360
361 spin_lock_irqsave(&uhid->qlock, flags);
362 uhid_queue(uhid, ev);
363 spin_unlock_irqrestore(&uhid->qlock, flags);
364
365 return count;
David Herrmannd365c6c2012-06-10 15:16:18 +0200366}
367
Frank Praznik596cfdd2014-01-22 13:49:43 -0500368static int uhid_hid_output_report(struct hid_device *hid, __u8 *buf,
369 size_t count)
370{
Benjamin Tissoires41abfb32014-02-10 12:58:46 -0500371 return uhid_hid_output_raw(hid, buf, count, HID_OUTPUT_REPORT);
Frank Praznik596cfdd2014-01-22 13:49:43 -0500372}
373
David Herrmannd365c6c2012-06-10 15:16:18 +0200374static struct hid_ll_driver uhid_hid_driver = {
375 .start = uhid_hid_start,
376 .stop = uhid_hid_stop,
377 .open = uhid_hid_open,
378 .close = uhid_hid_close,
David Herrmannd365c6c2012-06-10 15:16:18 +0200379 .parse = uhid_hid_parse,
David Herrmann7c4003b2014-07-29 17:14:23 +0200380 .raw_request = uhid_hid_raw_request,
Frank Praznik596cfdd2014-01-22 13:49:43 -0500381 .output_report = uhid_hid_output_report,
David Herrmannd365c6c2012-06-10 15:16:18 +0200382};
383
Dmitry Torokhovbefde022013-02-18 11:26:11 +0100384#ifdef CONFIG_COMPAT
385
386/* Apparently we haven't stepped on these rakes enough times yet. */
387struct uhid_create_req_compat {
388 __u8 name[128];
389 __u8 phys[64];
390 __u8 uniq[64];
391
392 compat_uptr_t rd_data;
393 __u16 rd_size;
394
395 __u16 bus;
396 __u32 vendor;
397 __u32 product;
398 __u32 version;
399 __u32 country;
400} __attribute__((__packed__));
401
402static int uhid_event_from_user(const char __user *buffer, size_t len,
403 struct uhid_event *event)
404{
Andy Lutomirski7365abb2016-03-22 14:25:24 -0700405 if (in_compat_syscall()) {
Dmitry Torokhovbefde022013-02-18 11:26:11 +0100406 u32 type;
407
408 if (get_user(type, buffer))
409 return -EFAULT;
410
411 if (type == UHID_CREATE) {
412 /*
413 * This is our messed up request with compat pointer.
414 * It is largish (more than 256 bytes) so we better
415 * allocate it from the heap.
416 */
417 struct uhid_create_req_compat *compat;
418
David Herrmann80897aa2013-11-26 13:58:18 +0100419 compat = kzalloc(sizeof(*compat), GFP_KERNEL);
Dmitry Torokhovbefde022013-02-18 11:26:11 +0100420 if (!compat)
421 return -ENOMEM;
422
423 buffer += sizeof(type);
424 len -= sizeof(type);
425 if (copy_from_user(compat, buffer,
426 min(len, sizeof(*compat)))) {
427 kfree(compat);
428 return -EFAULT;
429 }
430
431 /* Shuffle the data over to proper structure */
432 event->type = type;
433
434 memcpy(event->u.create.name, compat->name,
435 sizeof(compat->name));
436 memcpy(event->u.create.phys, compat->phys,
437 sizeof(compat->phys));
438 memcpy(event->u.create.uniq, compat->uniq,
439 sizeof(compat->uniq));
440
441 event->u.create.rd_data = compat_ptr(compat->rd_data);
442 event->u.create.rd_size = compat->rd_size;
443
444 event->u.create.bus = compat->bus;
445 event->u.create.vendor = compat->vendor;
446 event->u.create.product = compat->product;
447 event->u.create.version = compat->version;
448 event->u.create.country = compat->country;
449
450 kfree(compat);
451 return 0;
452 }
453 /* All others can be copied directly */
454 }
455
456 if (copy_from_user(event, buffer, min(len, sizeof(*event))))
457 return -EFAULT;
458
459 return 0;
460}
461#else
462static int uhid_event_from_user(const char __user *buffer, size_t len,
463 struct uhid_event *event)
464{
465 if (copy_from_user(event, buffer, min(len, sizeof(*event))))
466 return -EFAULT;
467
468 return 0;
469}
470#endif
471
Petri Gynther45226432014-03-24 13:50:01 -0700472static int uhid_dev_create2(struct uhid_device *uhid,
473 const struct uhid_event *ev)
474{
475 struct hid_device *hid;
David Herrmann25be7fe2014-07-29 17:14:18 +0200476 size_t rd_size, len;
David Herrmann41c4a462014-07-29 17:14:17 +0200477 void *rd_data;
Petri Gynther45226432014-03-24 13:50:01 -0700478 int ret;
479
480 if (uhid->running)
481 return -EALREADY;
482
David Herrmann41c4a462014-07-29 17:14:17 +0200483 rd_size = ev->u.create2.rd_size;
484 if (rd_size <= 0 || rd_size > HID_MAX_DESCRIPTOR_SIZE)
Petri Gynther45226432014-03-24 13:50:01 -0700485 return -EINVAL;
486
David Herrmann41c4a462014-07-29 17:14:17 +0200487 rd_data = kmemdup(ev->u.create2.rd_data, rd_size, GFP_KERNEL);
488 if (!rd_data)
Petri Gynther45226432014-03-24 13:50:01 -0700489 return -ENOMEM;
490
David Herrmann41c4a462014-07-29 17:14:17 +0200491 uhid->rd_size = rd_size;
492 uhid->rd_data = rd_data;
493
Petri Gynther45226432014-03-24 13:50:01 -0700494 hid = hid_allocate_device();
495 if (IS_ERR(hid)) {
496 ret = PTR_ERR(hid);
497 goto err_free;
498 }
499
David Herrmann25be7fe2014-07-29 17:14:18 +0200500 len = min(sizeof(hid->name), sizeof(ev->u.create2.name)) - 1;
501 strncpy(hid->name, ev->u.create2.name, len);
502 len = min(sizeof(hid->phys), sizeof(ev->u.create2.phys)) - 1;
503 strncpy(hid->phys, ev->u.create2.phys, len);
504 len = min(sizeof(hid->uniq), sizeof(ev->u.create2.uniq)) - 1;
505 strncpy(hid->uniq, ev->u.create2.uniq, len);
Petri Gynther45226432014-03-24 13:50:01 -0700506
507 hid->ll_driver = &uhid_hid_driver;
508 hid->bus = ev->u.create2.bus;
509 hid->vendor = ev->u.create2.vendor;
510 hid->product = ev->u.create2.product;
511 hid->version = ev->u.create2.version;
512 hid->country = ev->u.create2.country;
513 hid->driver_data = uhid;
514 hid->dev.parent = uhid_misc.this_device;
515
516 uhid->hid = hid;
517 uhid->running = true;
518
Roderick Colenbrander67f8ecc2016-05-18 13:11:09 -0700519 /* Adding of a HID device is done through a worker, to allow HID drivers
520 * which use feature requests during .probe to work, without they would
521 * be blocked on devlock, which is held by uhid_char_write.
522 */
523 schedule_work(&uhid->worker);
Petri Gynther45226432014-03-24 13:50:01 -0700524
525 return 0;
526
Petri Gynther45226432014-03-24 13:50:01 -0700527err_free:
528 kfree(uhid->rd_data);
David Herrmann41c4a462014-07-29 17:14:17 +0200529 uhid->rd_data = NULL;
530 uhid->rd_size = 0;
Petri Gynther45226432014-03-24 13:50:01 -0700531 return ret;
532}
533
David Herrmann56c47752014-07-29 17:14:16 +0200534static int uhid_dev_create(struct uhid_device *uhid,
535 struct uhid_event *ev)
536{
537 struct uhid_create_req orig;
538
539 orig = ev->u.create;
540
541 if (orig.rd_size <= 0 || orig.rd_size > HID_MAX_DESCRIPTOR_SIZE)
542 return -EINVAL;
543 if (copy_from_user(&ev->u.create2.rd_data, orig.rd_data, orig.rd_size))
544 return -EFAULT;
545
546 memcpy(ev->u.create2.name, orig.name, sizeof(orig.name));
547 memcpy(ev->u.create2.phys, orig.phys, sizeof(orig.phys));
548 memcpy(ev->u.create2.uniq, orig.uniq, sizeof(orig.uniq));
549 ev->u.create2.rd_size = orig.rd_size;
550 ev->u.create2.bus = orig.bus;
551 ev->u.create2.vendor = orig.vendor;
552 ev->u.create2.product = orig.product;
553 ev->u.create2.version = orig.version;
554 ev->u.create2.country = orig.country;
555
556 return uhid_dev_create2(uhid, ev);
557}
558
David Herrmannd365c6c2012-06-10 15:16:18 +0200559static int uhid_dev_destroy(struct uhid_device *uhid)
560{
561 if (!uhid->running)
562 return -EINVAL;
563
564 uhid->running = false;
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200565 wake_up_interruptible(&uhid->report_wait);
David Herrmannd365c6c2012-06-10 15:16:18 +0200566
Roderick Colenbrander67f8ecc2016-05-18 13:11:09 -0700567 cancel_work_sync(&uhid->worker);
568
David Herrmannd365c6c2012-06-10 15:16:18 +0200569 hid_destroy_device(uhid->hid);
570 kfree(uhid->rd_data);
571
572 return 0;
573}
574
David Herrmann5e87a362012-06-10 15:16:19 +0200575static int uhid_dev_input(struct uhid_device *uhid, struct uhid_event *ev)
576{
577 if (!uhid->running)
578 return -EINVAL;
579
580 hid_input_report(uhid->hid, HID_INPUT_REPORT, ev->u.input.data,
581 min_t(size_t, ev->u.input.size, UHID_DATA_MAX), 0);
582
583 return 0;
584}
585
Petri Gynther45226432014-03-24 13:50:01 -0700586static int uhid_dev_input2(struct uhid_device *uhid, struct uhid_event *ev)
587{
588 if (!uhid->running)
589 return -EINVAL;
590
591 hid_input_report(uhid->hid, HID_INPUT_REPORT, ev->u.input2.data,
592 min_t(size_t, ev->u.input2.size, UHID_DATA_MAX), 0);
593
594 return 0;
595}
596
David Herrmannfa71f322014-07-29 17:14:21 +0200597static int uhid_dev_get_report_reply(struct uhid_device *uhid,
598 struct uhid_event *ev)
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200599{
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200600 if (!uhid->running)
601 return -EINVAL;
602
David Herrmann11c22152014-07-29 17:14:24 +0200603 uhid_report_wake_up(uhid, ev->u.get_report_reply.id, ev);
604 return 0;
605}
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200606
David Herrmann11c22152014-07-29 17:14:24 +0200607static int uhid_dev_set_report_reply(struct uhid_device *uhid,
608 struct uhid_event *ev)
609{
610 if (!uhid->running)
611 return -EINVAL;
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200612
David Herrmann11c22152014-07-29 17:14:24 +0200613 uhid_report_wake_up(uhid, ev->u.set_report_reply.id, ev);
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200614 return 0;
615}
616
David Herrmann1ccd7a22012-06-10 15:16:13 +0200617static int uhid_char_open(struct inode *inode, struct file *file)
618{
David Herrmannace3d862012-06-10 15:16:14 +0200619 struct uhid_device *uhid;
620
621 uhid = kzalloc(sizeof(*uhid), GFP_KERNEL);
622 if (!uhid)
623 return -ENOMEM;
624
David Herrmannd937ae52012-06-10 15:16:16 +0200625 mutex_init(&uhid->devlock);
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200626 mutex_init(&uhid->report_lock);
David Herrmannace3d862012-06-10 15:16:14 +0200627 spin_lock_init(&uhid->qlock);
628 init_waitqueue_head(&uhid->waitq);
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200629 init_waitqueue_head(&uhid->report_wait);
David Herrmannd365c6c2012-06-10 15:16:18 +0200630 uhid->running = false;
Roderick Colenbrander67f8ecc2016-05-18 13:11:09 -0700631 INIT_WORK(&uhid->worker, uhid_device_add_worker);
David Herrmannace3d862012-06-10 15:16:14 +0200632
633 file->private_data = uhid;
634 nonseekable_open(inode, file);
635
David Herrmann1ccd7a22012-06-10 15:16:13 +0200636 return 0;
637}
638
639static int uhid_char_release(struct inode *inode, struct file *file)
640{
David Herrmannace3d862012-06-10 15:16:14 +0200641 struct uhid_device *uhid = file->private_data;
642 unsigned int i;
643
David Herrmannd365c6c2012-06-10 15:16:18 +0200644 uhid_dev_destroy(uhid);
645
David Herrmannace3d862012-06-10 15:16:14 +0200646 for (i = 0; i < UHID_BUFSIZE; ++i)
647 kfree(uhid->outq[i]);
648
649 kfree(uhid);
650
David Herrmann1ccd7a22012-06-10 15:16:13 +0200651 return 0;
652}
653
654static ssize_t uhid_char_read(struct file *file, char __user *buffer,
655 size_t count, loff_t *ppos)
656{
David Herrmannd937ae52012-06-10 15:16:16 +0200657 struct uhid_device *uhid = file->private_data;
658 int ret;
659 unsigned long flags;
660 size_t len;
661
662 /* they need at least the "type" member of uhid_event */
663 if (count < sizeof(__u32))
664 return -EINVAL;
665
666try_again:
667 if (file->f_flags & O_NONBLOCK) {
668 if (uhid->head == uhid->tail)
669 return -EAGAIN;
670 } else {
671 ret = wait_event_interruptible(uhid->waitq,
672 uhid->head != uhid->tail);
673 if (ret)
674 return ret;
675 }
676
677 ret = mutex_lock_interruptible(&uhid->devlock);
678 if (ret)
679 return ret;
680
681 if (uhid->head == uhid->tail) {
682 mutex_unlock(&uhid->devlock);
683 goto try_again;
684 } else {
685 len = min(count, sizeof(**uhid->outq));
Vinicius Costa Gomesadefb692012-07-14 18:59:25 -0300686 if (copy_to_user(buffer, uhid->outq[uhid->tail], len)) {
David Herrmannd937ae52012-06-10 15:16:16 +0200687 ret = -EFAULT;
688 } else {
689 kfree(uhid->outq[uhid->tail]);
690 uhid->outq[uhid->tail] = NULL;
691
692 spin_lock_irqsave(&uhid->qlock, flags);
693 uhid->tail = (uhid->tail + 1) % UHID_BUFSIZE;
694 spin_unlock_irqrestore(&uhid->qlock, flags);
695 }
696 }
697
698 mutex_unlock(&uhid->devlock);
699 return ret ? ret : len;
David Herrmann1ccd7a22012-06-10 15:16:13 +0200700}
701
702static ssize_t uhid_char_write(struct file *file, const char __user *buffer,
703 size_t count, loff_t *ppos)
704{
David Herrmann6664ef72012-06-10 15:16:17 +0200705 struct uhid_device *uhid = file->private_data;
706 int ret;
707 size_t len;
708
709 /* we need at least the "type" member of uhid_event */
710 if (count < sizeof(__u32))
711 return -EINVAL;
712
713 ret = mutex_lock_interruptible(&uhid->devlock);
714 if (ret)
715 return ret;
716
717 memset(&uhid->input_buf, 0, sizeof(uhid->input_buf));
718 len = min(count, sizeof(uhid->input_buf));
Dmitry Torokhovbefde022013-02-18 11:26:11 +0100719
720 ret = uhid_event_from_user(buffer, len, &uhid->input_buf);
721 if (ret)
David Herrmann6664ef72012-06-10 15:16:17 +0200722 goto unlock;
David Herrmann6664ef72012-06-10 15:16:17 +0200723
724 switch (uhid->input_buf.type) {
David Herrmannd365c6c2012-06-10 15:16:18 +0200725 case UHID_CREATE:
Eric Biggersab26f7f2018-11-14 13:55:09 -0800726 /*
727 * 'struct uhid_create_req' contains a __user pointer which is
728 * copied from, so it's unsafe to allow this with elevated
729 * privileges (e.g. from a setuid binary) or via kernel_write().
730 */
731 if (file->f_cred != current_cred() || uaccess_kernel()) {
732 pr_err_once("UHID_CREATE from different security context by process %d (%s), this is not allowed.\n",
733 task_tgid_vnr(current), current->comm);
734 ret = -EACCES;
735 goto unlock;
736 }
David Herrmannd365c6c2012-06-10 15:16:18 +0200737 ret = uhid_dev_create(uhid, &uhid->input_buf);
738 break;
Petri Gynther45226432014-03-24 13:50:01 -0700739 case UHID_CREATE2:
740 ret = uhid_dev_create2(uhid, &uhid->input_buf);
741 break;
David Herrmannd365c6c2012-06-10 15:16:18 +0200742 case UHID_DESTROY:
743 ret = uhid_dev_destroy(uhid);
744 break;
David Herrmann5e87a362012-06-10 15:16:19 +0200745 case UHID_INPUT:
746 ret = uhid_dev_input(uhid, &uhid->input_buf);
747 break;
Petri Gynther45226432014-03-24 13:50:01 -0700748 case UHID_INPUT2:
749 ret = uhid_dev_input2(uhid, &uhid->input_buf);
750 break;
David Herrmannfa71f322014-07-29 17:14:21 +0200751 case UHID_GET_REPORT_REPLY:
752 ret = uhid_dev_get_report_reply(uhid, &uhid->input_buf);
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200753 break;
David Herrmann11c22152014-07-29 17:14:24 +0200754 case UHID_SET_REPORT_REPLY:
755 ret = uhid_dev_set_report_reply(uhid, &uhid->input_buf);
756 break;
David Herrmann6664ef72012-06-10 15:16:17 +0200757 default:
758 ret = -EOPNOTSUPP;
759 }
760
761unlock:
762 mutex_unlock(&uhid->devlock);
763
764 /* return "count" not "len" to not confuse the caller */
765 return ret ? ret : count;
David Herrmann1ccd7a22012-06-10 15:16:13 +0200766}
767
768static unsigned int uhid_char_poll(struct file *file, poll_table *wait)
769{
David Herrmann1f9dec12012-06-10 15:16:15 +0200770 struct uhid_device *uhid = file->private_data;
771
772 poll_wait(file, &uhid->waitq, wait);
773
774 if (uhid->head != uhid->tail)
775 return POLLIN | POLLRDNORM;
776
David Herrmann1ccd7a22012-06-10 15:16:13 +0200777 return 0;
778}
779
780static const struct file_operations uhid_fops = {
781 .owner = THIS_MODULE,
782 .open = uhid_char_open,
783 .release = uhid_char_release,
784 .read = uhid_char_read,
785 .write = uhid_char_write,
786 .poll = uhid_char_poll,
787 .llseek = no_llseek,
788};
789
790static struct miscdevice uhid_misc = {
791 .fops = &uhid_fops,
David Herrmann19872d22013-09-09 18:33:54 +0200792 .minor = UHID_MINOR,
David Herrmann1ccd7a22012-06-10 15:16:13 +0200793 .name = UHID_NAME,
794};
PrasannaKumar Muralidharanca75d602016-08-25 22:30:49 +0530795module_misc_device(uhid_misc);
David Herrmann1ccd7a22012-06-10 15:16:13 +0200796
David Herrmann1ccd7a22012-06-10 15:16:13 +0200797MODULE_LICENSE("GPL");
798MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>");
799MODULE_DESCRIPTION("User-space I/O driver support for HID subsystem");
David Herrmann19872d22013-09-09 18:33:54 +0200800MODULE_ALIAS_MISCDEV(UHID_MINOR);
Marcel Holtmann60cbd532013-09-01 11:02:46 -0700801MODULE_ALIAS("devname:" UHID_NAME);