blob: fc307e0422afc92c31bfac65d410244d5859e804 [file] [log] [blame]
David Herrmann1ccd7a22012-06-10 15:16:13 +02001/*
2 * User-space I/O driver support for HID subsystem
3 * Copyright (c) 2012 David Herrmann
4 */
5
6/*
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option)
10 * any later version.
11 */
12
13#include <linux/atomic.h>
Dmitry Torokhovbefde022013-02-18 11:26:11 +010014#include <linux/compat.h>
David Herrmann1ccd7a22012-06-10 15:16:13 +020015#include <linux/device.h>
16#include <linux/fs.h>
17#include <linux/hid.h>
18#include <linux/input.h>
19#include <linux/miscdevice.h>
20#include <linux/module.h>
21#include <linux/mutex.h>
22#include <linux/poll.h>
23#include <linux/sched.h>
24#include <linux/spinlock.h>
25#include <linux/uhid.h>
26#include <linux/wait.h>
27
28#define UHID_NAME "uhid"
David Herrmannace3d862012-06-10 15:16:14 +020029#define UHID_BUFSIZE 32
30
31struct uhid_device {
David Herrmannd937ae52012-06-10 15:16:16 +020032 struct mutex devlock;
David Herrmannd365c6c2012-06-10 15:16:18 +020033 bool running;
34
35 __u8 *rd_data;
36 uint rd_size;
37
David Herrmannace3d862012-06-10 15:16:14 +020038 struct hid_device *hid;
David Herrmann6664ef72012-06-10 15:16:17 +020039 struct uhid_event input_buf;
David Herrmannace3d862012-06-10 15:16:14 +020040
41 wait_queue_head_t waitq;
42 spinlock_t qlock;
43 __u8 head;
44 __u8 tail;
45 struct uhid_event *outq[UHID_BUFSIZE];
David Herrmannfcfcf0d2012-06-10 15:16:25 +020046
47 struct mutex report_lock;
48 wait_queue_head_t report_wait;
49 atomic_t report_done;
50 atomic_t report_id;
51 struct uhid_event report_buf;
David Herrmannace3d862012-06-10 15:16:14 +020052};
David Herrmann1ccd7a22012-06-10 15:16:13 +020053
54static struct miscdevice uhid_misc;
55
David Herrmannace3d862012-06-10 15:16:14 +020056static void uhid_queue(struct uhid_device *uhid, struct uhid_event *ev)
57{
58 __u8 newhead;
59
60 newhead = (uhid->head + 1) % UHID_BUFSIZE;
61
62 if (newhead != uhid->tail) {
63 uhid->outq[uhid->head] = ev;
64 uhid->head = newhead;
65 wake_up_interruptible(&uhid->waitq);
66 } else {
67 hid_warn(uhid->hid, "Output queue is full\n");
68 kfree(ev);
69 }
70}
71
72static int uhid_queue_event(struct uhid_device *uhid, __u32 event)
73{
74 unsigned long flags;
75 struct uhid_event *ev;
76
77 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
78 if (!ev)
79 return -ENOMEM;
80
81 ev->type = event;
82
83 spin_lock_irqsave(&uhid->qlock, flags);
84 uhid_queue(uhid, ev);
85 spin_unlock_irqrestore(&uhid->qlock, flags);
86
87 return 0;
88}
89
David Herrmannd365c6c2012-06-10 15:16:18 +020090static int uhid_hid_start(struct hid_device *hid)
91{
David Herrmannec4b7de2012-06-10 15:16:21 +020092 struct uhid_device *uhid = hid->driver_data;
93
94 return uhid_queue_event(uhid, UHID_START);
David Herrmannd365c6c2012-06-10 15:16:18 +020095}
96
97static void uhid_hid_stop(struct hid_device *hid)
98{
David Herrmannec4b7de2012-06-10 15:16:21 +020099 struct uhid_device *uhid = hid->driver_data;
100
101 hid->claimed = 0;
102 uhid_queue_event(uhid, UHID_STOP);
David Herrmannd365c6c2012-06-10 15:16:18 +0200103}
104
105static int uhid_hid_open(struct hid_device *hid)
106{
David Herrmanne7191472012-06-10 15:16:22 +0200107 struct uhid_device *uhid = hid->driver_data;
108
109 return uhid_queue_event(uhid, UHID_OPEN);
David Herrmannd365c6c2012-06-10 15:16:18 +0200110}
111
112static void uhid_hid_close(struct hid_device *hid)
113{
David Herrmanne7191472012-06-10 15:16:22 +0200114 struct uhid_device *uhid = hid->driver_data;
115
116 uhid_queue_event(uhid, UHID_CLOSE);
David Herrmannd365c6c2012-06-10 15:16:18 +0200117}
118
119static int uhid_hid_input(struct input_dev *input, unsigned int type,
120 unsigned int code, int value)
121{
David Herrmannf80e1362012-06-10 15:16:23 +0200122 struct hid_device *hid = input_get_drvdata(input);
123 struct uhid_device *uhid = hid->driver_data;
124 unsigned long flags;
125 struct uhid_event *ev;
126
127 ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
128 if (!ev)
129 return -ENOMEM;
130
131 ev->type = UHID_OUTPUT_EV;
132 ev->u.output_ev.type = type;
133 ev->u.output_ev.code = code;
134 ev->u.output_ev.value = value;
135
136 spin_lock_irqsave(&uhid->qlock, flags);
137 uhid_queue(uhid, ev);
138 spin_unlock_irqrestore(&uhid->qlock, flags);
139
David Herrmannd365c6c2012-06-10 15:16:18 +0200140 return 0;
141}
142
143static int uhid_hid_parse(struct hid_device *hid)
144{
David Herrmann037c0612012-06-10 15:16:20 +0200145 struct uhid_device *uhid = hid->driver_data;
146
147 return hid_parse_report(hid, uhid->rd_data, uhid->rd_size);
David Herrmannd365c6c2012-06-10 15:16:18 +0200148}
149
150static int uhid_hid_get_raw(struct hid_device *hid, unsigned char rnum,
151 __u8 *buf, size_t count, unsigned char rtype)
152{
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200153 struct uhid_device *uhid = hid->driver_data;
154 __u8 report_type;
155 struct uhid_event *ev;
156 unsigned long flags;
157 int ret;
Jiri Kosina1a8b2942012-06-18 17:08:08 +0200158 size_t uninitialized_var(len);
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200159 struct uhid_feature_answer_req *req;
160
161 if (!uhid->running)
162 return -EIO;
163
164 switch (rtype) {
165 case HID_FEATURE_REPORT:
166 report_type = UHID_FEATURE_REPORT;
167 break;
168 case HID_OUTPUT_REPORT:
169 report_type = UHID_OUTPUT_REPORT;
170 break;
171 case HID_INPUT_REPORT:
172 report_type = UHID_INPUT_REPORT;
173 break;
174 default:
175 return -EINVAL;
176 }
177
178 ret = mutex_lock_interruptible(&uhid->report_lock);
179 if (ret)
180 return ret;
181
182 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
183 if (!ev) {
184 ret = -ENOMEM;
185 goto unlock;
186 }
187
188 spin_lock_irqsave(&uhid->qlock, flags);
189 ev->type = UHID_FEATURE;
190 ev->u.feature.id = atomic_inc_return(&uhid->report_id);
191 ev->u.feature.rnum = rnum;
192 ev->u.feature.rtype = report_type;
193
194 atomic_set(&uhid->report_done, 0);
195 uhid_queue(uhid, ev);
196 spin_unlock_irqrestore(&uhid->qlock, flags);
197
198 ret = wait_event_interruptible_timeout(uhid->report_wait,
199 atomic_read(&uhid->report_done), 5 * HZ);
200
201 /*
202 * Make sure "uhid->running" is cleared on shutdown before
203 * "uhid->report_done" is set.
204 */
205 smp_rmb();
206 if (!ret || !uhid->running) {
207 ret = -EIO;
208 } else if (ret < 0) {
209 ret = -ERESTARTSYS;
210 } else {
211 spin_lock_irqsave(&uhid->qlock, flags);
212 req = &uhid->report_buf.u.feature_answer;
213
214 if (req->err) {
215 ret = -EIO;
216 } else {
217 ret = 0;
218 len = min(count,
219 min_t(size_t, req->size, UHID_DATA_MAX));
220 memcpy(buf, req->data, len);
221 }
222
223 spin_unlock_irqrestore(&uhid->qlock, flags);
224 }
225
226 atomic_set(&uhid->report_done, 1);
227
228unlock:
229 mutex_unlock(&uhid->report_lock);
230 return ret ? ret : len;
David Herrmannd365c6c2012-06-10 15:16:18 +0200231}
232
233static int uhid_hid_output_raw(struct hid_device *hid, __u8 *buf, size_t count,
234 unsigned char report_type)
235{
David Herrmann3b3baa82012-06-10 15:16:24 +0200236 struct uhid_device *uhid = hid->driver_data;
237 __u8 rtype;
238 unsigned long flags;
239 struct uhid_event *ev;
240
241 switch (report_type) {
242 case HID_FEATURE_REPORT:
243 rtype = UHID_FEATURE_REPORT;
244 break;
245 case HID_OUTPUT_REPORT:
246 rtype = UHID_OUTPUT_REPORT;
247 break;
248 default:
249 return -EINVAL;
250 }
251
252 if (count < 1 || count > UHID_DATA_MAX)
253 return -EINVAL;
254
255 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
256 if (!ev)
257 return -ENOMEM;
258
259 ev->type = UHID_OUTPUT;
260 ev->u.output.size = count;
261 ev->u.output.rtype = rtype;
262 memcpy(ev->u.output.data, buf, count);
263
264 spin_lock_irqsave(&uhid->qlock, flags);
265 uhid_queue(uhid, ev);
266 spin_unlock_irqrestore(&uhid->qlock, flags);
267
268 return count;
David Herrmannd365c6c2012-06-10 15:16:18 +0200269}
270
271static struct hid_ll_driver uhid_hid_driver = {
272 .start = uhid_hid_start,
273 .stop = uhid_hid_stop,
274 .open = uhid_hid_open,
275 .close = uhid_hid_close,
276 .hidinput_input_event = uhid_hid_input,
277 .parse = uhid_hid_parse,
278};
279
Dmitry Torokhovbefde022013-02-18 11:26:11 +0100280#ifdef CONFIG_COMPAT
281
282/* Apparently we haven't stepped on these rakes enough times yet. */
283struct uhid_create_req_compat {
284 __u8 name[128];
285 __u8 phys[64];
286 __u8 uniq[64];
287
288 compat_uptr_t rd_data;
289 __u16 rd_size;
290
291 __u16 bus;
292 __u32 vendor;
293 __u32 product;
294 __u32 version;
295 __u32 country;
296} __attribute__((__packed__));
297
298static int uhid_event_from_user(const char __user *buffer, size_t len,
299 struct uhid_event *event)
300{
301 if (is_compat_task()) {
302 u32 type;
303
304 if (get_user(type, buffer))
305 return -EFAULT;
306
307 if (type == UHID_CREATE) {
308 /*
309 * This is our messed up request with compat pointer.
310 * It is largish (more than 256 bytes) so we better
311 * allocate it from the heap.
312 */
313 struct uhid_create_req_compat *compat;
314
315 compat = kmalloc(sizeof(*compat), GFP_KERNEL);
316 if (!compat)
317 return -ENOMEM;
318
319 buffer += sizeof(type);
320 len -= sizeof(type);
321 if (copy_from_user(compat, buffer,
322 min(len, sizeof(*compat)))) {
323 kfree(compat);
324 return -EFAULT;
325 }
326
327 /* Shuffle the data over to proper structure */
328 event->type = type;
329
330 memcpy(event->u.create.name, compat->name,
331 sizeof(compat->name));
332 memcpy(event->u.create.phys, compat->phys,
333 sizeof(compat->phys));
334 memcpy(event->u.create.uniq, compat->uniq,
335 sizeof(compat->uniq));
336
337 event->u.create.rd_data = compat_ptr(compat->rd_data);
338 event->u.create.rd_size = compat->rd_size;
339
340 event->u.create.bus = compat->bus;
341 event->u.create.vendor = compat->vendor;
342 event->u.create.product = compat->product;
343 event->u.create.version = compat->version;
344 event->u.create.country = compat->country;
345
346 kfree(compat);
347 return 0;
348 }
349 /* All others can be copied directly */
350 }
351
352 if (copy_from_user(event, buffer, min(len, sizeof(*event))))
353 return -EFAULT;
354
355 return 0;
356}
357#else
358static int uhid_event_from_user(const char __user *buffer, size_t len,
359 struct uhid_event *event)
360{
361 if (copy_from_user(event, buffer, min(len, sizeof(*event))))
362 return -EFAULT;
363
364 return 0;
365}
366#endif
367
David Herrmannd365c6c2012-06-10 15:16:18 +0200368static int uhid_dev_create(struct uhid_device *uhid,
369 const struct uhid_event *ev)
370{
371 struct hid_device *hid;
372 int ret;
373
374 if (uhid->running)
375 return -EALREADY;
376
377 uhid->rd_size = ev->u.create.rd_size;
378 if (uhid->rd_size <= 0 || uhid->rd_size > HID_MAX_DESCRIPTOR_SIZE)
379 return -EINVAL;
380
381 uhid->rd_data = kmalloc(uhid->rd_size, GFP_KERNEL);
382 if (!uhid->rd_data)
383 return -ENOMEM;
384
385 if (copy_from_user(uhid->rd_data, ev->u.create.rd_data,
386 uhid->rd_size)) {
387 ret = -EFAULT;
388 goto err_free;
389 }
390
391 hid = hid_allocate_device();
392 if (IS_ERR(hid)) {
393 ret = PTR_ERR(hid);
394 goto err_free;
395 }
396
397 strncpy(hid->name, ev->u.create.name, 127);
398 hid->name[127] = 0;
399 strncpy(hid->phys, ev->u.create.phys, 63);
400 hid->phys[63] = 0;
401 strncpy(hid->uniq, ev->u.create.uniq, 63);
402 hid->uniq[63] = 0;
403
404 hid->ll_driver = &uhid_hid_driver;
405 hid->hid_get_raw_report = uhid_hid_get_raw;
406 hid->hid_output_raw_report = uhid_hid_output_raw;
407 hid->bus = ev->u.create.bus;
408 hid->vendor = ev->u.create.vendor;
409 hid->product = ev->u.create.product;
410 hid->version = ev->u.create.version;
411 hid->country = ev->u.create.country;
412 hid->driver_data = uhid;
413 hid->dev.parent = uhid_misc.this_device;
414
415 uhid->hid = hid;
416 uhid->running = true;
417
418 ret = hid_add_device(hid);
419 if (ret) {
420 hid_err(hid, "Cannot register HID device\n");
421 goto err_hid;
422 }
423
424 return 0;
425
426err_hid:
427 hid_destroy_device(hid);
428 uhid->hid = NULL;
429 uhid->running = false;
430err_free:
431 kfree(uhid->rd_data);
432 return ret;
433}
434
435static int uhid_dev_destroy(struct uhid_device *uhid)
436{
437 if (!uhid->running)
438 return -EINVAL;
439
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200440 /* clear "running" before setting "report_done" */
David Herrmannd365c6c2012-06-10 15:16:18 +0200441 uhid->running = false;
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200442 smp_wmb();
443 atomic_set(&uhid->report_done, 1);
444 wake_up_interruptible(&uhid->report_wait);
David Herrmannd365c6c2012-06-10 15:16:18 +0200445
446 hid_destroy_device(uhid->hid);
447 kfree(uhid->rd_data);
448
449 return 0;
450}
451
David Herrmann5e87a362012-06-10 15:16:19 +0200452static int uhid_dev_input(struct uhid_device *uhid, struct uhid_event *ev)
453{
454 if (!uhid->running)
455 return -EINVAL;
456
457 hid_input_report(uhid->hid, HID_INPUT_REPORT, ev->u.input.data,
458 min_t(size_t, ev->u.input.size, UHID_DATA_MAX), 0);
459
460 return 0;
461}
462
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200463static int uhid_dev_feature_answer(struct uhid_device *uhid,
464 struct uhid_event *ev)
465{
466 unsigned long flags;
467
468 if (!uhid->running)
469 return -EINVAL;
470
471 spin_lock_irqsave(&uhid->qlock, flags);
472
473 /* id for old report; drop it silently */
474 if (atomic_read(&uhid->report_id) != ev->u.feature_answer.id)
475 goto unlock;
476 if (atomic_read(&uhid->report_done))
477 goto unlock;
478
479 memcpy(&uhid->report_buf, ev, sizeof(*ev));
480 atomic_set(&uhid->report_done, 1);
481 wake_up_interruptible(&uhid->report_wait);
482
483unlock:
484 spin_unlock_irqrestore(&uhid->qlock, flags);
485 return 0;
486}
487
David Herrmann1ccd7a22012-06-10 15:16:13 +0200488static int uhid_char_open(struct inode *inode, struct file *file)
489{
David Herrmannace3d862012-06-10 15:16:14 +0200490 struct uhid_device *uhid;
491
492 uhid = kzalloc(sizeof(*uhid), GFP_KERNEL);
493 if (!uhid)
494 return -ENOMEM;
495
David Herrmannd937ae52012-06-10 15:16:16 +0200496 mutex_init(&uhid->devlock);
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200497 mutex_init(&uhid->report_lock);
David Herrmannace3d862012-06-10 15:16:14 +0200498 spin_lock_init(&uhid->qlock);
499 init_waitqueue_head(&uhid->waitq);
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200500 init_waitqueue_head(&uhid->report_wait);
David Herrmannd365c6c2012-06-10 15:16:18 +0200501 uhid->running = false;
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200502 atomic_set(&uhid->report_done, 1);
David Herrmannace3d862012-06-10 15:16:14 +0200503
504 file->private_data = uhid;
505 nonseekable_open(inode, file);
506
David Herrmann1ccd7a22012-06-10 15:16:13 +0200507 return 0;
508}
509
510static int uhid_char_release(struct inode *inode, struct file *file)
511{
David Herrmannace3d862012-06-10 15:16:14 +0200512 struct uhid_device *uhid = file->private_data;
513 unsigned int i;
514
David Herrmannd365c6c2012-06-10 15:16:18 +0200515 uhid_dev_destroy(uhid);
516
David Herrmannace3d862012-06-10 15:16:14 +0200517 for (i = 0; i < UHID_BUFSIZE; ++i)
518 kfree(uhid->outq[i]);
519
520 kfree(uhid);
521
David Herrmann1ccd7a22012-06-10 15:16:13 +0200522 return 0;
523}
524
525static ssize_t uhid_char_read(struct file *file, char __user *buffer,
526 size_t count, loff_t *ppos)
527{
David Herrmannd937ae52012-06-10 15:16:16 +0200528 struct uhid_device *uhid = file->private_data;
529 int ret;
530 unsigned long flags;
531 size_t len;
532
533 /* they need at least the "type" member of uhid_event */
534 if (count < sizeof(__u32))
535 return -EINVAL;
536
537try_again:
538 if (file->f_flags & O_NONBLOCK) {
539 if (uhid->head == uhid->tail)
540 return -EAGAIN;
541 } else {
542 ret = wait_event_interruptible(uhid->waitq,
543 uhid->head != uhid->tail);
544 if (ret)
545 return ret;
546 }
547
548 ret = mutex_lock_interruptible(&uhid->devlock);
549 if (ret)
550 return ret;
551
552 if (uhid->head == uhid->tail) {
553 mutex_unlock(&uhid->devlock);
554 goto try_again;
555 } else {
556 len = min(count, sizeof(**uhid->outq));
Vinicius Costa Gomesadefb692012-07-14 18:59:25 -0300557 if (copy_to_user(buffer, uhid->outq[uhid->tail], len)) {
David Herrmannd937ae52012-06-10 15:16:16 +0200558 ret = -EFAULT;
559 } else {
560 kfree(uhid->outq[uhid->tail]);
561 uhid->outq[uhid->tail] = NULL;
562
563 spin_lock_irqsave(&uhid->qlock, flags);
564 uhid->tail = (uhid->tail + 1) % UHID_BUFSIZE;
565 spin_unlock_irqrestore(&uhid->qlock, flags);
566 }
567 }
568
569 mutex_unlock(&uhid->devlock);
570 return ret ? ret : len;
David Herrmann1ccd7a22012-06-10 15:16:13 +0200571}
572
573static ssize_t uhid_char_write(struct file *file, const char __user *buffer,
574 size_t count, loff_t *ppos)
575{
David Herrmann6664ef72012-06-10 15:16:17 +0200576 struct uhid_device *uhid = file->private_data;
577 int ret;
578 size_t len;
579
580 /* we need at least the "type" member of uhid_event */
581 if (count < sizeof(__u32))
582 return -EINVAL;
583
584 ret = mutex_lock_interruptible(&uhid->devlock);
585 if (ret)
586 return ret;
587
588 memset(&uhid->input_buf, 0, sizeof(uhid->input_buf));
589 len = min(count, sizeof(uhid->input_buf));
Dmitry Torokhovbefde022013-02-18 11:26:11 +0100590
591 ret = uhid_event_from_user(buffer, len, &uhid->input_buf);
592 if (ret)
David Herrmann6664ef72012-06-10 15:16:17 +0200593 goto unlock;
David Herrmann6664ef72012-06-10 15:16:17 +0200594
595 switch (uhid->input_buf.type) {
David Herrmannd365c6c2012-06-10 15:16:18 +0200596 case UHID_CREATE:
597 ret = uhid_dev_create(uhid, &uhid->input_buf);
598 break;
599 case UHID_DESTROY:
600 ret = uhid_dev_destroy(uhid);
601 break;
David Herrmann5e87a362012-06-10 15:16:19 +0200602 case UHID_INPUT:
603 ret = uhid_dev_input(uhid, &uhid->input_buf);
604 break;
David Herrmannfcfcf0d2012-06-10 15:16:25 +0200605 case UHID_FEATURE_ANSWER:
606 ret = uhid_dev_feature_answer(uhid, &uhid->input_buf);
607 break;
David Herrmann6664ef72012-06-10 15:16:17 +0200608 default:
609 ret = -EOPNOTSUPP;
610 }
611
612unlock:
613 mutex_unlock(&uhid->devlock);
614
615 /* return "count" not "len" to not confuse the caller */
616 return ret ? ret : count;
David Herrmann1ccd7a22012-06-10 15:16:13 +0200617}
618
619static unsigned int uhid_char_poll(struct file *file, poll_table *wait)
620{
David Herrmann1f9dec12012-06-10 15:16:15 +0200621 struct uhid_device *uhid = file->private_data;
622
623 poll_wait(file, &uhid->waitq, wait);
624
625 if (uhid->head != uhid->tail)
626 return POLLIN | POLLRDNORM;
627
David Herrmann1ccd7a22012-06-10 15:16:13 +0200628 return 0;
629}
630
631static const struct file_operations uhid_fops = {
632 .owner = THIS_MODULE,
633 .open = uhid_char_open,
634 .release = uhid_char_release,
635 .read = uhid_char_read,
636 .write = uhid_char_write,
637 .poll = uhid_char_poll,
638 .llseek = no_llseek,
639};
640
641static struct miscdevice uhid_misc = {
642 .fops = &uhid_fops,
643 .minor = MISC_DYNAMIC_MINOR,
644 .name = UHID_NAME,
645};
646
647static int __init uhid_init(void)
648{
649 return misc_register(&uhid_misc);
650}
651
652static void __exit uhid_exit(void)
653{
654 misc_deregister(&uhid_misc);
655}
656
657module_init(uhid_init);
658module_exit(uhid_exit);
659MODULE_LICENSE("GPL");
660MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>");
661MODULE_DESCRIPTION("User-space I/O driver support for HID subsystem");