blob: 16b6f11a07001c7831a987e6bd891b7d00868f65 [file] [log] [blame]
David Herrmann1ccd7a22012-06-10 15:16:13 +02001/*
2 * User-space I/O driver support for HID subsystem
3 * Copyright (c) 2012 David Herrmann
4 */
5
6/*
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option)
10 * any later version.
11 */
12
13#include <linux/atomic.h>
Dmitry Torokhovbefde022013-02-18 11:26:11 +010014#include <linux/compat.h>
David Herrmann1ccd7a22012-06-10 15:16:13 +020015#include <linux/device.h>
16#include <linux/fs.h>
17#include <linux/hid.h>
18#include <linux/input.h>
19#include <linux/miscdevice.h>
20#include <linux/module.h>
21#include <linux/mutex.h>
22#include <linux/poll.h>
23#include <linux/sched.h>
24#include <linux/spinlock.h>
25#include <linux/uhid.h>
26#include <linux/wait.h>
27
28#define UHID_NAME "uhid"
David Herrmannace3d862012-06-10 15:16:14 +020029#define UHID_BUFSIZE 32
30
31struct uhid_device {
David Herrmannd937ae52012-06-10 15:16:16 +020032 struct mutex devlock;
David Herrmannd365c6c2012-06-10 15:16:18 +020033 bool running;
34
35 __u8 *rd_data;
36 uint rd_size;
37
David Herrmannace3d862012-06-10 15:16:14 +020038 struct hid_device *hid;
David Herrmann6664ef72012-06-10 15:16:17 +020039 struct uhid_event input_buf;
David Herrmannace3d862012-06-10 15:16:14 +020040
41 wait_queue_head_t waitq;
42 spinlock_t qlock;
43 __u8 head;
44 __u8 tail;
45 struct uhid_event *outq[UHID_BUFSIZE];
David Herrmannfcfcf0d2012-06-10 15:16:25 +020046
David Herrmann8cad5b02014-07-29 17:14:19 +020047 /* blocking GET_REPORT support; state changes protected by qlock */
David Herrmannfcfcf0d2012-06-10 15:16:25 +020048 struct mutex report_lock;
49 wait_queue_head_t report_wait;
David Herrmann5942b842014-07-29 17:14:20 +020050 bool report_running;
David Herrmann8cad5b02014-07-29 17:14:19 +020051 u32 report_id;
David Herrmann11c22152014-07-29 17:14:24 +020052 u32 report_type;
David Herrmannfcfcf0d2012-06-10 15:16:25 +020053 struct uhid_event report_buf;
David Herrmannace3d862012-06-10 15:16:14 +020054};
David Herrmann1ccd7a22012-06-10 15:16:13 +020055
56static struct miscdevice uhid_misc;
57
David Herrmannace3d862012-06-10 15:16:14 +020058static void uhid_queue(struct uhid_device *uhid, struct uhid_event *ev)
59{
60 __u8 newhead;
61
62 newhead = (uhid->head + 1) % UHID_BUFSIZE;
63
64 if (newhead != uhid->tail) {
65 uhid->outq[uhid->head] = ev;
66 uhid->head = newhead;
67 wake_up_interruptible(&uhid->waitq);
68 } else {
69 hid_warn(uhid->hid, "Output queue is full\n");
70 kfree(ev);
71 }
72}
73
74static int uhid_queue_event(struct uhid_device *uhid, __u32 event)
75{
76 unsigned long flags;
77 struct uhid_event *ev;
78
79 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
80 if (!ev)
81 return -ENOMEM;
82
83 ev->type = event;
84
85 spin_lock_irqsave(&uhid->qlock, flags);
86 uhid_queue(uhid, ev);
87 spin_unlock_irqrestore(&uhid->qlock, flags);
88
89 return 0;
90}
91
David Herrmannd365c6c2012-06-10 15:16:18 +020092static int uhid_hid_start(struct hid_device *hid)
93{
David Herrmannec4b7de2012-06-10 15:16:21 +020094 struct uhid_device *uhid = hid->driver_data;
David Herrmannc2b2f162014-07-29 17:14:25 +020095 struct uhid_event *ev;
96 unsigned long flags;
David Herrmannec4b7de2012-06-10 15:16:21 +020097
David Herrmannc2b2f162014-07-29 17:14:25 +020098 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
99 if (!ev)
100 return -ENOMEM;
101
102 ev->type = UHID_START;
103
104 if (hid->report_enum[HID_FEATURE_REPORT].numbered)
105 ev->u.start.dev_flags |= UHID_DEV_NUMBERED_FEATURE_REPORTS;
106 if (hid->report_enum[HID_OUTPUT_REPORT].numbered)
107 ev->u.start.dev_flags |= UHID_DEV_NUMBERED_OUTPUT_REPORTS;
108 if (hid->report_enum[HID_INPUT_REPORT].numbered)
109 ev->u.start.dev_flags |= UHID_DEV_NUMBERED_INPUT_REPORTS;
110
111 spin_lock_irqsave(&uhid->qlock, flags);
112 uhid_queue(uhid, ev);
113 spin_unlock_irqrestore(&uhid->qlock, flags);
114
115 return 0;
David Herrmannd365c6c2012-06-10 15:16:18 +0200116}
117
118static void uhid_hid_stop(struct hid_device *hid)
119{
David Herrmannec4b7de2012-06-10 15:16:21 +0200120 struct uhid_device *uhid = hid->driver_data;
121
122 hid->claimed = 0;
123 uhid_queue_event(uhid, UHID_STOP);
David Herrmannd365c6c2012-06-10 15:16:18 +0200124}
125
126static int uhid_hid_open(struct hid_device *hid)
127{
David Herrmanne7191472012-06-10 15:16:22 +0200128 struct uhid_device *uhid = hid->driver_data;
129
130 return uhid_queue_event(uhid, UHID_OPEN);
David Herrmannd365c6c2012-06-10 15:16:18 +0200131}
132
133static void uhid_hid_close(struct hid_device *hid)
134{
David Herrmanne7191472012-06-10 15:16:22 +0200135 struct uhid_device *uhid = hid->driver_data;
136
137 uhid_queue_event(uhid, UHID_CLOSE);
David Herrmannd365c6c2012-06-10 15:16:18 +0200138}
139
David Herrmannd365c6c2012-06-10 15:16:18 +0200140static int uhid_hid_parse(struct hid_device *hid)
141{
David Herrmann037c0612012-06-10 15:16:20 +0200142 struct uhid_device *uhid = hid->driver_data;
143
144 return hid_parse_report(hid, uhid->rd_data, uhid->rd_size);
David Herrmannd365c6c2012-06-10 15:16:18 +0200145}
146
David Herrmann11c22152014-07-29 17:14:24 +0200147/* must be called with report_lock held */
148static int __uhid_report_queue_and_wait(struct uhid_device *uhid,
149 struct uhid_event *ev,
150 __u32 *report_id)
Jiri Kosina289a7162014-02-17 14:49:34 +0100151{
Jiri Kosina289a7162014-02-17 14:49:34 +0100152 unsigned long flags;
153 int ret;
Jiri Kosina289a7162014-02-17 14:49:34 +0100154
155 spin_lock_irqsave(&uhid->qlock, flags);
David Herrmann11c22152014-07-29 17:14:24 +0200156 *report_id = ++uhid->report_id;
Benjamin Tissoires8493ecc2014-10-01 11:59:47 -0400157 uhid->report_type = ev->type + 1;
David Herrmann5942b842014-07-29 17:14:20 +0200158 uhid->report_running = true;
Jiri Kosina289a7162014-02-17 14:49:34 +0100159 uhid_queue(uhid, ev);
160 spin_unlock_irqrestore(&uhid->qlock, flags);
161
162 ret = wait_event_interruptible_timeout(uhid->report_wait,
David Herrmann5942b842014-07-29 17:14:20 +0200163 !uhid->report_running || !uhid->running,
164 5 * HZ);
David Herrmann11c22152014-07-29 17:14:24 +0200165 if (!ret || !uhid->running || uhid->report_running)
Jiri Kosina289a7162014-02-17 14:49:34 +0100166 ret = -EIO;
David Herrmann11c22152014-07-29 17:14:24 +0200167 else if (ret < 0)
Jiri Kosina289a7162014-02-17 14:49:34 +0100168 ret = -ERESTARTSYS;
David Herrmann11c22152014-07-29 17:14:24 +0200169 else
170 ret = 0;
Jiri Kosina289a7162014-02-17 14:49:34 +0100171
David Herrmann5942b842014-07-29 17:14:20 +0200172 uhid->report_running = false;
Jiri Kosina289a7162014-02-17 14:49:34 +0100173
David Herrmann11c22152014-07-29 17:14:24 +0200174 return ret;
175}
176
177static void uhid_report_wake_up(struct uhid_device *uhid, u32 id,
178 const struct uhid_event *ev)
179{
180 unsigned long flags;
181
182 spin_lock_irqsave(&uhid->qlock, flags);
183
184 /* id for old report; drop it silently */
185 if (uhid->report_type != ev->type || uhid->report_id != id)
186 goto unlock;
187 if (!uhid->report_running)
188 goto unlock;
189
190 memcpy(&uhid->report_buf, ev, sizeof(*ev));
191 uhid->report_running = false;
192 wake_up_interruptible(&uhid->report_wait);
193
194unlock:
195 spin_unlock_irqrestore(&uhid->qlock, flags);
196}
197
198static int uhid_hid_get_report(struct hid_device *hid, unsigned char rnum,
199 u8 *buf, size_t count, u8 rtype)
200{
201 struct uhid_device *uhid = hid->driver_data;
202 struct uhid_get_report_reply_req *req;
203 struct uhid_event *ev;
204 int ret;
205
206 if (!uhid->running)
207 return -EIO;
208
209 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
210 if (!ev)
211 return -ENOMEM;
212
213 ev->type = UHID_GET_REPORT;
214 ev->u.get_report.rnum = rnum;
215 ev->u.get_report.rtype = rtype;
216
217 ret = mutex_lock_interruptible(&uhid->report_lock);
218 if (ret) {
219 kfree(ev);
220 return ret;
221 }
222
223 /* this _always_ takes ownership of @ev */
224 ret = __uhid_report_queue_and_wait(uhid, ev, &ev->u.get_report.id);
225 if (ret)
226 goto unlock;
227
228 req = &uhid->report_buf.u.get_report_reply;
229 if (req->err) {
230 ret = -EIO;
231 } else {
232 ret = min3(count, (size_t)req->size, (size_t)UHID_DATA_MAX);
233 memcpy(buf, req->data, ret);
234 }
235
Jiri Kosina289a7162014-02-17 14:49:34 +0100236unlock:
237 mutex_unlock(&uhid->report_lock);
David Herrmann11c22152014-07-29 17:14:24 +0200238 return ret;
239}
240
241static int uhid_hid_set_report(struct hid_device *hid, unsigned char rnum,
242 const u8 *buf, size_t count, u8 rtype)
243{
244 struct uhid_device *uhid = hid->driver_data;
245 struct uhid_event *ev;
246 int ret;
247
248 if (!uhid->running || count > UHID_DATA_MAX)
249 return -EIO;
250
251 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
252 if (!ev)
253 return -ENOMEM;
254
255 ev->type = UHID_SET_REPORT;
256 ev->u.set_report.rnum = rnum;
257 ev->u.set_report.rtype = rtype;
258 ev->u.set_report.size = count;
259 memcpy(ev->u.set_report.data, buf, count);
260
261 ret = mutex_lock_interruptible(&uhid->report_lock);
262 if (ret) {
263 kfree(ev);
264 return ret;
265 }
266
267 /* this _always_ takes ownership of @ev */
268 ret = __uhid_report_queue_and_wait(uhid, ev, &ev->u.set_report.id);
269 if (ret)
270 goto unlock;
271
272 if (uhid->report_buf.u.set_report_reply.err)
273 ret = -EIO;
274 else
275 ret = count;
276
277unlock:
278 mutex_unlock(&uhid->report_lock);
279 return ret;
Jiri Kosina289a7162014-02-17 14:49:34 +0100280}
281
David Herrmann7c4003b2014-07-29 17:14:23 +0200282static int uhid_hid_raw_request(struct hid_device *hid, unsigned char reportnum,
283 __u8 *buf, size_t len, unsigned char rtype,
284 int reqtype)
285{
David Herrmann11c22152014-07-29 17:14:24 +0200286 u8 u_rtype;
287
288 switch (rtype) {
289 case HID_FEATURE_REPORT:
290 u_rtype = UHID_FEATURE_REPORT;
291 break;
292 case HID_OUTPUT_REPORT:
293 u_rtype = UHID_OUTPUT_REPORT;
294 break;
295 case HID_INPUT_REPORT:
296 u_rtype = UHID_INPUT_REPORT;
297 break;
298 default:
299 return -EINVAL;
300 }
301
David Herrmann7c4003b2014-07-29 17:14:23 +0200302 switch (reqtype) {
303 case HID_REQ_GET_REPORT:
David Herrmann11c22152014-07-29 17:14:24 +0200304 return uhid_hid_get_report(hid, reportnum, buf, len, u_rtype);
David Herrmann7c4003b2014-07-29 17:14:23 +0200305 case HID_REQ_SET_REPORT:
David Herrmann11c22152014-07-29 17:14:24 +0200306 return uhid_hid_set_report(hid, reportnum, buf, len, u_rtype);
David Herrmann7c4003b2014-07-29 17:14:23 +0200307 default:
308 return -EIO;
309 }
310}
311
David Herrmannd365c6c2012-06-10 15:16:18 +0200312static int uhid_hid_output_raw(struct hid_device *hid, __u8 *buf, size_t count,
313 unsigned char report_type)
314{
David Herrmann3b3baa82012-06-10 15:16:24 +0200315 struct uhid_device *uhid = hid->driver_data;
316 __u8 rtype;
317 unsigned long flags;
318 struct uhid_event *ev;
319
320 switch (report_type) {
321 case HID_FEATURE_REPORT:
322 rtype = UHID_FEATURE_REPORT;
323 break;
324 case HID_OUTPUT_REPORT:
325 rtype = UHID_OUTPUT_REPORT;
326 break;
327 default:
328 return -EINVAL;
329 }
330
331 if (count < 1 || count > UHID_DATA_MAX)
332 return -EINVAL;
333
334 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
335 if (!ev)
336 return -ENOMEM;
337
338 ev->type = UHID_OUTPUT;
339 ev->u.output.size = count;
340 ev->u.output.rtype = rtype;
341 memcpy(ev->u.output.data, buf, count);
342
343 spin_lock_irqsave(&uhid->qlock, flags);
344 uhid_queue(uhid, ev);
345 spin_unlock_irqrestore(&uhid->qlock, flags);
346
347 return count;
David Herrmannd365c6c2012-06-10 15:16:18 +0200348}
349
Frank Praznik596cfdd2014-01-22 13:49:43 -0500350static int uhid_hid_output_report(struct hid_device *hid, __u8 *buf,
351 size_t count)
352{
Benjamin Tissoires41abfb32014-02-10 12:58:46 -0500353 return uhid_hid_output_raw(hid, buf, count, HID_OUTPUT_REPORT);
Frank Praznik596cfdd2014-01-22 13:49:43 -0500354}
355
David Herrmannd365c6c2012-06-10 15:16:18 +0200356static struct hid_ll_driver uhid_hid_driver = {
357 .start = uhid_hid_start,
358 .stop = uhid_hid_stop,
359 .open = uhid_hid_open,
360 .close = uhid_hid_close,
David Herrmannd365c6c2012-06-10 15:16:18 +0200361 .parse = uhid_hid_parse,
David Herrmann7c4003b2014-07-29 17:14:23 +0200362 .raw_request = uhid_hid_raw_request,
Frank Praznik596cfdd2014-01-22 13:49:43 -0500363 .output_report = uhid_hid_output_report,
David Herrmannd365c6c2012-06-10 15:16:18 +0200364};
365
Dmitry Torokhovbefde022013-02-18 11:26:11 +0100366#ifdef CONFIG_COMPAT
367
368/* Apparently we haven't stepped on these rakes enough times yet. */
369struct uhid_create_req_compat {
370 __u8 name[128];
371 __u8 phys[64];
372 __u8 uniq[64];
373
374 compat_uptr_t rd_data;
375 __u16 rd_size;
376
377 __u16 bus;
378 __u32 vendor;
379 __u32 product;
380 __u32 version;
381 __u32 country;
382} __attribute__((__packed__));
383
384static int uhid_event_from_user(const char __user *buffer, size_t len,
385 struct uhid_event *event)
386{
Andy Lutomirski7365abb2016-03-22 14:25:24 -0700387 if (in_compat_syscall()) {
Dmitry Torokhovbefde022013-02-18 11:26:11 +0100388 u32 type;
389
390 if (get_user(type, buffer))
391 return -EFAULT;
392
393 if (type == UHID_CREATE) {
394 /*
395 * This is our messed up request with compat pointer.
396 * It is largish (more than 256 bytes) so we better
397 * allocate it from the heap.
398 */
399 struct uhid_create_req_compat *compat;
400
David Herrmann80897aa2013-11-26 13:58:18 +0100401 compat = kzalloc(sizeof(*compat), GFP_KERNEL);
Dmitry Torokhovbefde022013-02-18 11:26:11 +0100402 if (!compat)
403 return -ENOMEM;
404
405 buffer += sizeof(type);
406 len -= sizeof(type);
407 if (copy_from_user(compat, buffer,
408 min(len, sizeof(*compat)))) {
409 kfree(compat);
410 return -EFAULT;
411 }
412
413 /* Shuffle the data over to proper structure */
414 event->type = type;
415
416 memcpy(event->u.create.name, compat->name,
417 sizeof(compat->name));
418 memcpy(event->u.create.phys, compat->phys,
419 sizeof(compat->phys));
420 memcpy(event->u.create.uniq, compat->uniq,
421 sizeof(compat->uniq));
422
423 event->u.create.rd_data = compat_ptr(compat->rd_data);
424 event->u.create.rd_size = compat->rd_size;
425
426 event->u.create.bus = compat->bus;
427 event->u.create.vendor = compat->vendor;
428 event->u.create.product = compat->product;
429 event->u.create.version = compat->version;
430 event->u.create.country = compat->country;
431
432 kfree(compat);
433 return 0;
434 }
435 /* All others can be copied directly */
436 }
437
438 if (copy_from_user(event, buffer, min(len, sizeof(*event))))
439 return -EFAULT;
440
441 return 0;
442}
443#else
444static int uhid_event_from_user(const char __user *buffer, size_t len,
445 struct uhid_event *event)
446{
447 if (copy_from_user(event, buffer, min(len, sizeof(*event))))
448 return -EFAULT;
449
450 return 0;
451}
452#endif
453
Petri Gynther45226432014-03-24 13:50:01 -0700454static int uhid_dev_create2(struct uhid_device *uhid,
455 const struct uhid_event *ev)
456{
457 struct hid_device *hid;
David Herrmann25be7fe2014-07-29 17:14:18 +0200458 size_t rd_size, len;
David Herrmann41c4a462014-07-29 17:14:17 +0200459 void *rd_data;
Petri Gynther45226432014-03-24 13:50:01 -0700460 int ret;
461
462 if (uhid->running)
463 return -EALREADY;
464
David Herrmann41c4a462014-07-29 17:14:17 +0200465 rd_size = ev->u.create2.rd_size;
466 if (rd_size <= 0 || rd_size > HID_MAX_DESCRIPTOR_SIZE)
Petri Gynther45226432014-03-24 13:50:01 -0700467 return -EINVAL;
468
David Herrmann41c4a462014-07-29 17:14:17 +0200469 rd_data = kmemdup(ev->u.create2.rd_data, rd_size, GFP_KERNEL);
470 if (!rd_data)
Petri Gynther45226432014-03-24 13:50:01 -0700471 return -ENOMEM;
472
David Herrmann41c4a462014-07-29 17:14:17 +0200473 uhid->rd_size = rd_size;
474 uhid->rd_data = rd_data;
475
Petri Gynther45226432014-03-24 13:50:01 -0700476 hid = hid_allocate_device();
477 if (IS_ERR(hid)) {
478 ret = PTR_ERR(hid);
479 goto err_free;
480 }
481
David Herrmann25be7fe2014-07-29 17:14:18 +0200482 len = min(sizeof(hid->name), sizeof(ev->u.create2.name)) - 1;
483 strncpy(hid->name, ev->u.create2.name, len);
484 len = min(sizeof(hid->phys), sizeof(ev->u.create2.phys)) - 1;
485 strncpy(hid->phys, ev->u.create2.phys, len);
486 len = min(sizeof(hid->uniq), sizeof(ev->u.create2.uniq)) - 1;
487 strncpy(hid->uniq, ev->u.create2.uniq, len);
Petri Gynther45226432014-03-24 13:50:01 -0700488
489 hid->ll_driver = &uhid_hid_driver;
490 hid->bus = ev->u.create2.bus;
491 hid->vendor = ev->u.create2.vendor;
492 hid->product = ev->u.create2.product;
493 hid->version = ev->u.create2.version;
494 hid->country = ev->u.create2.country;
495 hid->driver_data = uhid;
496 hid->dev.parent = uhid_misc.this_device;
497
498 uhid->hid = hid;
499 uhid->running = true;
500
501 ret = hid_add_device(hid);
502 if (ret) {
503 hid_err(hid, "Cannot register HID device\n");
504 goto err_hid;
505 }
506
507 return 0;
508
509err_hid:
510 hid_destroy_device(hid);
511 uhid->hid = NULL;
512 uhid->running = false;
513err_free:
514 kfree(uhid->rd_data);
David Herrmann41c4a462014-07-29 17:14:17 +0200515 uhid->rd_data = NULL;
516 uhid->rd_size = 0;
Petri Gynther45226432014-03-24 13:50:01 -0700517 return ret;
518}
519
David Herrmann56c47752014-07-29 17:14:16 +0200520static int uhid_dev_create(struct uhid_device *uhid,
521 struct uhid_event *ev)
522{
523 struct uhid_create_req orig;
524
525 orig = ev->u.create;
526
527 if (orig.rd_size <= 0 || orig.rd_size > HID_MAX_DESCRIPTOR_SIZE)
528 return -EINVAL;
529 if (copy_from_user(&ev->u.create2.rd_data, orig.rd_data, orig.rd_size))
530 return -EFAULT;
531
532 memcpy(ev->u.create2.name, orig.name, sizeof(orig.name));
533 memcpy(ev->u.create2.phys, orig.phys, sizeof(orig.phys));
534 memcpy(ev->u.create2.uniq, orig.uniq, sizeof(orig.uniq));
535 ev->u.create2.rd_size = orig.rd_size;
536 ev->u.create2.bus = orig.bus;
537 ev->u.create2.vendor = orig.vendor;
538 ev->u.create2.product = orig.product;
539 ev->u.create2.version = orig.version;
540 ev->u.create2.country = orig.country;
541
542 return uhid_dev_create2(uhid, ev);
543}
544
David Herrmannd365c6c2012-06-10 15:16:18 +0200545static int uhid_dev_destroy(struct uhid_device *uhid)
546{
547 if (!uhid->running)
548 return -EINVAL;
549
550 uhid->running = false;
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200551 wake_up_interruptible(&uhid->report_wait);
David Herrmannd365c6c2012-06-10 15:16:18 +0200552
553 hid_destroy_device(uhid->hid);
554 kfree(uhid->rd_data);
555
556 return 0;
557}
558
David Herrmann5e87a362012-06-10 15:16:19 +0200559static int uhid_dev_input(struct uhid_device *uhid, struct uhid_event *ev)
560{
561 if (!uhid->running)
562 return -EINVAL;
563
564 hid_input_report(uhid->hid, HID_INPUT_REPORT, ev->u.input.data,
565 min_t(size_t, ev->u.input.size, UHID_DATA_MAX), 0);
566
567 return 0;
568}
569
Petri Gynther45226432014-03-24 13:50:01 -0700570static int uhid_dev_input2(struct uhid_device *uhid, struct uhid_event *ev)
571{
572 if (!uhid->running)
573 return -EINVAL;
574
575 hid_input_report(uhid->hid, HID_INPUT_REPORT, ev->u.input2.data,
576 min_t(size_t, ev->u.input2.size, UHID_DATA_MAX), 0);
577
578 return 0;
579}
580
David Herrmannfa71f322014-07-29 17:14:21 +0200581static int uhid_dev_get_report_reply(struct uhid_device *uhid,
582 struct uhid_event *ev)
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200583{
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200584 if (!uhid->running)
585 return -EINVAL;
586
David Herrmann11c22152014-07-29 17:14:24 +0200587 uhid_report_wake_up(uhid, ev->u.get_report_reply.id, ev);
588 return 0;
589}
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200590
David Herrmann11c22152014-07-29 17:14:24 +0200591static int uhid_dev_set_report_reply(struct uhid_device *uhid,
592 struct uhid_event *ev)
593{
594 if (!uhid->running)
595 return -EINVAL;
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200596
David Herrmann11c22152014-07-29 17:14:24 +0200597 uhid_report_wake_up(uhid, ev->u.set_report_reply.id, ev);
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200598 return 0;
599}
600
David Herrmann1ccd7a22012-06-10 15:16:13 +0200601static int uhid_char_open(struct inode *inode, struct file *file)
602{
David Herrmannace3d862012-06-10 15:16:14 +0200603 struct uhid_device *uhid;
604
605 uhid = kzalloc(sizeof(*uhid), GFP_KERNEL);
606 if (!uhid)
607 return -ENOMEM;
608
David Herrmannd937ae52012-06-10 15:16:16 +0200609 mutex_init(&uhid->devlock);
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200610 mutex_init(&uhid->report_lock);
David Herrmannace3d862012-06-10 15:16:14 +0200611 spin_lock_init(&uhid->qlock);
612 init_waitqueue_head(&uhid->waitq);
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200613 init_waitqueue_head(&uhid->report_wait);
David Herrmannd365c6c2012-06-10 15:16:18 +0200614 uhid->running = false;
David Herrmannace3d862012-06-10 15:16:14 +0200615
616 file->private_data = uhid;
617 nonseekable_open(inode, file);
618
David Herrmann1ccd7a22012-06-10 15:16:13 +0200619 return 0;
620}
621
622static int uhid_char_release(struct inode *inode, struct file *file)
623{
David Herrmannace3d862012-06-10 15:16:14 +0200624 struct uhid_device *uhid = file->private_data;
625 unsigned int i;
626
David Herrmannd365c6c2012-06-10 15:16:18 +0200627 uhid_dev_destroy(uhid);
628
David Herrmannace3d862012-06-10 15:16:14 +0200629 for (i = 0; i < UHID_BUFSIZE; ++i)
630 kfree(uhid->outq[i]);
631
632 kfree(uhid);
633
David Herrmann1ccd7a22012-06-10 15:16:13 +0200634 return 0;
635}
636
637static ssize_t uhid_char_read(struct file *file, char __user *buffer,
638 size_t count, loff_t *ppos)
639{
David Herrmannd937ae52012-06-10 15:16:16 +0200640 struct uhid_device *uhid = file->private_data;
641 int ret;
642 unsigned long flags;
643 size_t len;
644
645 /* they need at least the "type" member of uhid_event */
646 if (count < sizeof(__u32))
647 return -EINVAL;
648
649try_again:
650 if (file->f_flags & O_NONBLOCK) {
651 if (uhid->head == uhid->tail)
652 return -EAGAIN;
653 } else {
654 ret = wait_event_interruptible(uhid->waitq,
655 uhid->head != uhid->tail);
656 if (ret)
657 return ret;
658 }
659
660 ret = mutex_lock_interruptible(&uhid->devlock);
661 if (ret)
662 return ret;
663
664 if (uhid->head == uhid->tail) {
665 mutex_unlock(&uhid->devlock);
666 goto try_again;
667 } else {
668 len = min(count, sizeof(**uhid->outq));
Vinicius Costa Gomesadefb692012-07-14 18:59:25 -0300669 if (copy_to_user(buffer, uhid->outq[uhid->tail], len)) {
David Herrmannd937ae52012-06-10 15:16:16 +0200670 ret = -EFAULT;
671 } else {
672 kfree(uhid->outq[uhid->tail]);
673 uhid->outq[uhid->tail] = NULL;
674
675 spin_lock_irqsave(&uhid->qlock, flags);
676 uhid->tail = (uhid->tail + 1) % UHID_BUFSIZE;
677 spin_unlock_irqrestore(&uhid->qlock, flags);
678 }
679 }
680
681 mutex_unlock(&uhid->devlock);
682 return ret ? ret : len;
David Herrmann1ccd7a22012-06-10 15:16:13 +0200683}
684
685static ssize_t uhid_char_write(struct file *file, const char __user *buffer,
686 size_t count, loff_t *ppos)
687{
David Herrmann6664ef72012-06-10 15:16:17 +0200688 struct uhid_device *uhid = file->private_data;
689 int ret;
690 size_t len;
691
692 /* we need at least the "type" member of uhid_event */
693 if (count < sizeof(__u32))
694 return -EINVAL;
695
696 ret = mutex_lock_interruptible(&uhid->devlock);
697 if (ret)
698 return ret;
699
700 memset(&uhid->input_buf, 0, sizeof(uhid->input_buf));
701 len = min(count, sizeof(uhid->input_buf));
Dmitry Torokhovbefde022013-02-18 11:26:11 +0100702
703 ret = uhid_event_from_user(buffer, len, &uhid->input_buf);
704 if (ret)
David Herrmann6664ef72012-06-10 15:16:17 +0200705 goto unlock;
David Herrmann6664ef72012-06-10 15:16:17 +0200706
707 switch (uhid->input_buf.type) {
David Herrmannd365c6c2012-06-10 15:16:18 +0200708 case UHID_CREATE:
709 ret = uhid_dev_create(uhid, &uhid->input_buf);
710 break;
Petri Gynther45226432014-03-24 13:50:01 -0700711 case UHID_CREATE2:
712 ret = uhid_dev_create2(uhid, &uhid->input_buf);
713 break;
David Herrmannd365c6c2012-06-10 15:16:18 +0200714 case UHID_DESTROY:
715 ret = uhid_dev_destroy(uhid);
716 break;
David Herrmann5e87a362012-06-10 15:16:19 +0200717 case UHID_INPUT:
718 ret = uhid_dev_input(uhid, &uhid->input_buf);
719 break;
Petri Gynther45226432014-03-24 13:50:01 -0700720 case UHID_INPUT2:
721 ret = uhid_dev_input2(uhid, &uhid->input_buf);
722 break;
David Herrmannfa71f322014-07-29 17:14:21 +0200723 case UHID_GET_REPORT_REPLY:
724 ret = uhid_dev_get_report_reply(uhid, &uhid->input_buf);
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200725 break;
David Herrmann11c22152014-07-29 17:14:24 +0200726 case UHID_SET_REPORT_REPLY:
727 ret = uhid_dev_set_report_reply(uhid, &uhid->input_buf);
728 break;
David Herrmann6664ef72012-06-10 15:16:17 +0200729 default:
730 ret = -EOPNOTSUPP;
731 }
732
733unlock:
734 mutex_unlock(&uhid->devlock);
735
736 /* return "count" not "len" to not confuse the caller */
737 return ret ? ret : count;
David Herrmann1ccd7a22012-06-10 15:16:13 +0200738}
739
740static unsigned int uhid_char_poll(struct file *file, poll_table *wait)
741{
David Herrmann1f9dec12012-06-10 15:16:15 +0200742 struct uhid_device *uhid = file->private_data;
743
744 poll_wait(file, &uhid->waitq, wait);
745
746 if (uhid->head != uhid->tail)
747 return POLLIN | POLLRDNORM;
748
David Herrmann1ccd7a22012-06-10 15:16:13 +0200749 return 0;
750}
751
752static const struct file_operations uhid_fops = {
753 .owner = THIS_MODULE,
754 .open = uhid_char_open,
755 .release = uhid_char_release,
756 .read = uhid_char_read,
757 .write = uhid_char_write,
758 .poll = uhid_char_poll,
759 .llseek = no_llseek,
760};
761
762static struct miscdevice uhid_misc = {
763 .fops = &uhid_fops,
David Herrmann19872d22013-09-09 18:33:54 +0200764 .minor = UHID_MINOR,
David Herrmann1ccd7a22012-06-10 15:16:13 +0200765 .name = UHID_NAME,
766};
767
768static int __init uhid_init(void)
769{
770 return misc_register(&uhid_misc);
771}
772
773static void __exit uhid_exit(void)
774{
775 misc_deregister(&uhid_misc);
776}
777
778module_init(uhid_init);
779module_exit(uhid_exit);
780MODULE_LICENSE("GPL");
781MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>");
782MODULE_DESCRIPTION("User-space I/O driver support for HID subsystem");
David Herrmann19872d22013-09-09 18:33:54 +0200783MODULE_ALIAS_MISCDEV(UHID_MINOR);
Marcel Holtmann60cbd532013-09-01 11:02:46 -0700784MODULE_ALIAS("devname:" UHID_NAME);