blob: 2c61a2613475237ad92821a555984e00e4c404ab [file] [log] [blame]
Mike Lockwoodba83b012010-04-16 10:39:22 -04001/*
2 * Gadget Function Driver for MTP
3 *
4 * Copyright (C) 2010 Google, Inc.
5 * Author: Mike Lockwood <lockwood@android.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18/* #define DEBUG */
19/* #define VERBOSE_DEBUG */
20
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/poll.h>
24#include <linux/delay.h>
25#include <linux/wait.h>
26#include <linux/err.h>
27#include <linux/interrupt.h>
28#include <linux/kthread.h>
29#include <linux/freezer.h>
30
31#include <linux/types.h>
32#include <linux/file.h>
33#include <linux/device.h>
34#include <linux/miscdevice.h>
35
36#include <linux/usb.h>
37#include <linux/usb_usual.h>
38#include <linux/usb/ch9.h>
39#include <linux/usb/android_composite.h>
40#include <linux/usb/f_mtp.h>
41
42#define BULK_BUFFER_SIZE 16384
Mike Lockwood1de4d4d2010-07-06 19:27:52 -040043#define INTR_BUFFER_SIZE 28
Mike Lockwoodba83b012010-04-16 10:39:22 -040044
45/* String IDs */
46#define INTERFACE_STRING_INDEX 0
47
48/* values for mtp_dev.state */
49#define STATE_OFFLINE 0 /* initial state, disconnected */
50#define STATE_READY 1 /* ready for userspace calls */
51#define STATE_BUSY 2 /* processing userspace calls */
52#define STATE_CANCELED 3 /* transaction canceled by host */
53#define STATE_ERROR 4 /* error from completion routine */
54
55/* number of tx and rx requests to allocate */
56#define TX_REQ_MAX 4
57#define RX_REQ_MAX 2
58
59/* IO Thread commands */
60#define ANDROID_THREAD_QUIT 1
61#define ANDROID_THREAD_SEND_FILE 2
62#define ANDROID_THREAD_RECEIVE_FILE 3
63
64/* ID for Microsoft MTP OS String */
65#define MTP_OS_STRING_ID 0xEE
66
67/* MTP class reqeusts */
68#define MTP_REQ_CANCEL 0x64
69#define MTP_REQ_GET_EXT_EVENT_DATA 0x65
70#define MTP_REQ_RESET 0x66
71#define MTP_REQ_GET_DEVICE_STATUS 0x67
72
73/* constants for device status */
74#define MTP_RESPONSE_OK 0x2001
75#define MTP_RESPONSE_DEVICE_BUSY 0x2019
76
77static const char shortname[] = "mtp_usb";
78
79struct mtp_dev {
80 struct usb_function function;
81 struct usb_composite_dev *cdev;
82 spinlock_t lock;
83
Mike Lockwood1de4d4d2010-07-06 19:27:52 -040084 /* appear as MTP or PTP when enumerating */
Mike Lockwoodba83b012010-04-16 10:39:22 -040085 int interface_mode;
86
87 struct usb_ep *ep_in;
88 struct usb_ep *ep_out;
89 struct usb_ep *ep_intr;
90
91 int state;
92
Mike Lockwood1de4d4d2010-07-06 19:27:52 -040093 /* synchronize access to our device file */
Mike Lockwoodba83b012010-04-16 10:39:22 -040094 atomic_t open_excl;
95
96 struct list_head tx_idle;
97
98 wait_queue_head_t read_wq;
99 wait_queue_head_t write_wq;
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400100 wait_queue_head_t intr_wq;
Mike Lockwoodba83b012010-04-16 10:39:22 -0400101 struct usb_request *rx_req[RX_REQ_MAX];
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400102 struct usb_request *intr_req;
Mike Lockwoodba83b012010-04-16 10:39:22 -0400103 int rx_done;
104
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400105 /* synchronize access to interrupt endpoint */
106 struct mutex intr_mutex;
107 /* true if interrupt endpoint is busy */
108 int intr_busy;
109
Mike Lockwoodba83b012010-04-16 10:39:22 -0400110 /* for our file IO thread */
111 struct task_struct *thread;
112 /* current command for IO thread (or zero for none) */
113 int thread_command;
114 struct file *thread_file;
115 loff_t thread_file_offset;
116 size_t thread_file_length;
117 /* used to wait for thread to complete current command */
118 struct completion thread_wait;
119 /* result from current command */
120 int thread_result;
121};
122
123static struct usb_interface_descriptor mtp_interface_desc = {
124 .bLength = USB_DT_INTERFACE_SIZE,
125 .bDescriptorType = USB_DT_INTERFACE,
126 .bInterfaceNumber = 0,
127 .bNumEndpoints = 3,
128 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
129 .bInterfaceSubClass = USB_SUBCLASS_VENDOR_SPEC,
130 .bInterfaceProtocol = 0,
131};
132
133static struct usb_interface_descriptor ptp_interface_desc = {
134 .bLength = USB_DT_INTERFACE_SIZE,
135 .bDescriptorType = USB_DT_INTERFACE,
136 .bInterfaceNumber = 0,
137 .bNumEndpoints = 3,
138 .bInterfaceClass = USB_CLASS_STILL_IMAGE,
139 .bInterfaceSubClass = 1,
140 .bInterfaceProtocol = 1,
141};
142
143static struct usb_endpoint_descriptor mtp_highspeed_in_desc = {
144 .bLength = USB_DT_ENDPOINT_SIZE,
145 .bDescriptorType = USB_DT_ENDPOINT,
146 .bEndpointAddress = USB_DIR_IN,
147 .bmAttributes = USB_ENDPOINT_XFER_BULK,
148 .wMaxPacketSize = __constant_cpu_to_le16(512),
149};
150
151static struct usb_endpoint_descriptor mtp_highspeed_out_desc = {
152 .bLength = USB_DT_ENDPOINT_SIZE,
153 .bDescriptorType = USB_DT_ENDPOINT,
154 .bEndpointAddress = USB_DIR_OUT,
155 .bmAttributes = USB_ENDPOINT_XFER_BULK,
156 .wMaxPacketSize = __constant_cpu_to_le16(512),
157};
158
159static struct usb_endpoint_descriptor mtp_fullspeed_in_desc = {
160 .bLength = USB_DT_ENDPOINT_SIZE,
161 .bDescriptorType = USB_DT_ENDPOINT,
162 .bEndpointAddress = USB_DIR_IN,
163 .bmAttributes = USB_ENDPOINT_XFER_BULK,
164};
165
166static struct usb_endpoint_descriptor mtp_fullspeed_out_desc = {
167 .bLength = USB_DT_ENDPOINT_SIZE,
168 .bDescriptorType = USB_DT_ENDPOINT,
169 .bEndpointAddress = USB_DIR_OUT,
170 .bmAttributes = USB_ENDPOINT_XFER_BULK,
171};
172
173static struct usb_endpoint_descriptor mtp_intr_desc = {
174 .bLength = USB_DT_ENDPOINT_SIZE,
175 .bDescriptorType = USB_DT_ENDPOINT,
176 .bEndpointAddress = USB_DIR_IN,
177 .bmAttributes = USB_ENDPOINT_XFER_INT,
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400178 .wMaxPacketSize = __constant_cpu_to_le16(INTR_BUFFER_SIZE),
Mike Lockwoodba83b012010-04-16 10:39:22 -0400179 .bInterval = 6,
180};
181
182static struct usb_descriptor_header *fs_mtp_descs[] = {
183 (struct usb_descriptor_header *) &mtp_interface_desc,
184 (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
185 (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
186 (struct usb_descriptor_header *) &mtp_intr_desc,
187 NULL,
188};
189
190static struct usb_descriptor_header *hs_mtp_descs[] = {
191 (struct usb_descriptor_header *) &mtp_interface_desc,
192 (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
193 (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
194 (struct usb_descriptor_header *) &mtp_intr_desc,
195 NULL,
196};
197
198static struct usb_descriptor_header *fs_ptp_descs[] = {
199 (struct usb_descriptor_header *) &ptp_interface_desc,
200 (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
201 (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
202 (struct usb_descriptor_header *) &mtp_intr_desc,
203 NULL,
204};
205
206static struct usb_descriptor_header *hs_ptp_descs[] = {
207 (struct usb_descriptor_header *) &ptp_interface_desc,
208 (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
209 (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
210 (struct usb_descriptor_header *) &mtp_intr_desc,
211 NULL,
212};
213
214static struct usb_string mtp_string_defs[] = {
215 /* Naming interface "MTP" so libmtp will recognize us */
216 [INTERFACE_STRING_INDEX].s = "MTP",
217 { }, /* end of list */
218};
219
220static struct usb_gadget_strings mtp_string_table = {
221 .language = 0x0409, /* en-US */
222 .strings = mtp_string_defs,
223};
224
225static struct usb_gadget_strings *mtp_strings[] = {
226 &mtp_string_table,
227 NULL,
228};
229
230/* Microsoft MTP OS String */
231static u8 mtp_os_string[] = {
232 18, /* sizeof(mtp_os_string) */
233 USB_DT_STRING,
234 /* Signature field: "MSFT100" */
235 'M', 0, 'S', 0, 'F', 0, 'T', 0, '1', 0, '0', 0, '0', 0,
236 /* vendor code */
237 1,
238 /* padding */
239 0
240};
241
242/* Microsoft Extended Configuration Descriptor Header Section */
243struct mtp_ext_config_desc_header {
244 __le32 dwLength;
245 __u16 bcdVersion;
246 __le16 wIndex;
247 __u8 bCount;
248 __u8 reserved[7];
249};
250
251/* Microsoft Extended Configuration Descriptor Function Section */
252struct mtp_ext_config_desc_function {
253 __u8 bFirstInterfaceNumber;
254 __u8 bInterfaceCount;
255 __u8 compatibleID[8];
256 __u8 subCompatibleID[8];
257 __u8 reserved[6];
258};
259
260/* MTP Extended Configuration Descriptor */
261struct {
262 struct mtp_ext_config_desc_header header;
263 struct mtp_ext_config_desc_function function;
264} mtp_ext_config_desc = {
265 .header = {
266 .dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc)),
267 .bcdVersion = __constant_cpu_to_le16(0x0100),
268 .wIndex = __constant_cpu_to_le16(4),
269 .bCount = __constant_cpu_to_le16(1),
270 },
271 .function = {
272 .bFirstInterfaceNumber = 0,
273 .bInterfaceCount = 1,
274 .compatibleID = { 'M', 'T', 'P' },
275 },
276};
277
278struct mtp_device_status {
279 __le16 wLength;
280 __le16 wCode;
281};
282
283/* temporary variable used between mtp_open() and mtp_gadget_bind() */
284static struct mtp_dev *_mtp_dev;
285
286static inline struct mtp_dev *func_to_dev(struct usb_function *f)
287{
288 return container_of(f, struct mtp_dev, function);
289}
290
291static struct usb_request *mtp_request_new(struct usb_ep *ep, int buffer_size)
292{
293 struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
294 if (!req)
295 return NULL;
296
297 /* now allocate buffers for the requests */
298 req->buf = kmalloc(buffer_size, GFP_KERNEL);
299 if (!req->buf) {
300 usb_ep_free_request(ep, req);
301 return NULL;
302 }
303
304 return req;
305}
306
307static void mtp_request_free(struct usb_request *req, struct usb_ep *ep)
308{
309 if (req) {
310 kfree(req->buf);
311 usb_ep_free_request(ep, req);
312 }
313}
314
315static inline int _lock(atomic_t *excl)
316{
317 if (atomic_inc_return(excl) == 1) {
318 return 0;
319 } else {
320 atomic_dec(excl);
321 return -1;
322 }
323}
324
325static inline void _unlock(atomic_t *excl)
326{
327 atomic_dec(excl);
328}
329
330/* add a request to the tail of a list */
331static void req_put(struct mtp_dev *dev, struct list_head *head,
332 struct usb_request *req)
333{
334 unsigned long flags;
335
336 spin_lock_irqsave(&dev->lock, flags);
337 list_add_tail(&req->list, head);
338 spin_unlock_irqrestore(&dev->lock, flags);
339}
340
341/* remove a request from the head of a list */
342static struct usb_request *req_get(struct mtp_dev *dev, struct list_head *head)
343{
344 unsigned long flags;
345 struct usb_request *req;
346
347 spin_lock_irqsave(&dev->lock, flags);
348 if (list_empty(head)) {
349 req = 0;
350 } else {
351 req = list_first_entry(head, struct usb_request, list);
352 list_del(&req->list);
353 }
354 spin_unlock_irqrestore(&dev->lock, flags);
355 return req;
356}
357
358static void mtp_complete_in(struct usb_ep *ep, struct usb_request *req)
359{
360 struct mtp_dev *dev = _mtp_dev;
361
362 if (req->status != 0)
363 dev->state = STATE_ERROR;
364
365 req_put(dev, &dev->tx_idle, req);
366
367 wake_up(&dev->write_wq);
368}
369
370static void mtp_complete_out(struct usb_ep *ep, struct usb_request *req)
371{
372 struct mtp_dev *dev = _mtp_dev;
373
374 dev->rx_done = 1;
375 if (req->status != 0)
376 dev->state = STATE_ERROR;
377
378 wake_up(&dev->read_wq);
379}
380
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400381static void mtp_complete_intr(struct usb_ep *ep, struct usb_request *req)
382{
383 struct mtp_dev *dev = _mtp_dev;
384
385 DBG(dev->cdev, "mtp_complete_intr status: %d actual: %d\n", req->status, req->actual);
386 dev->intr_busy = 0;
387 if (req->status != 0)
388 dev->state = STATE_ERROR;
389
390 wake_up(&dev->intr_wq);
391}
392
Mike Lockwoodba83b012010-04-16 10:39:22 -0400393static int __init create_bulk_endpoints(struct mtp_dev *dev,
394 struct usb_endpoint_descriptor *in_desc,
395 struct usb_endpoint_descriptor *out_desc,
396 struct usb_endpoint_descriptor *intr_desc)
397{
398 struct usb_composite_dev *cdev = dev->cdev;
399 struct usb_request *req;
400 struct usb_ep *ep;
401 int i;
402
403 DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
404
405 ep = usb_ep_autoconfig(cdev->gadget, in_desc);
406 if (!ep) {
407 DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
408 return -ENODEV;
409 }
410 DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
411 ep->driver_data = dev; /* claim the endpoint */
412 dev->ep_in = ep;
413
414 ep = usb_ep_autoconfig(cdev->gadget, out_desc);
415 if (!ep) {
416 DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
417 return -ENODEV;
418 }
419 DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name);
420 ep->driver_data = dev; /* claim the endpoint */
421 dev->ep_out = ep;
422
423 ep = usb_ep_autoconfig(cdev->gadget, out_desc);
424 if (!ep) {
425 DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
426 return -ENODEV;
427 }
428 DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name);
429 ep->driver_data = dev; /* claim the endpoint */
430 dev->ep_out = ep;
431
432 ep = usb_ep_autoconfig(cdev->gadget, intr_desc);
433 if (!ep) {
434 DBG(cdev, "usb_ep_autoconfig for ep_intr failed\n");
435 return -ENODEV;
436 }
437 DBG(cdev, "usb_ep_autoconfig for mtp ep_intr got %s\n", ep->name);
438 ep->driver_data = dev; /* claim the endpoint */
439 dev->ep_intr = ep;
440
441 /* now allocate requests for our endpoints */
442 for (i = 0; i < TX_REQ_MAX; i++) {
443 req = mtp_request_new(dev->ep_in, BULK_BUFFER_SIZE);
444 if (!req)
445 goto fail;
446 req->complete = mtp_complete_in;
447 req_put(dev, &dev->tx_idle, req);
448 }
449 for (i = 0; i < RX_REQ_MAX; i++) {
450 req = mtp_request_new(dev->ep_out, BULK_BUFFER_SIZE);
451 if (!req)
452 goto fail;
453 req->complete = mtp_complete_out;
454 dev->rx_req[i] = req;
455 }
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400456 req = mtp_request_new(dev->ep_intr, INTR_BUFFER_SIZE);
457 if (!req)
458 goto fail;
459 req->complete = mtp_complete_intr;
460 dev->intr_req = req;
Mike Lockwoodba83b012010-04-16 10:39:22 -0400461
462 return 0;
463
464fail:
465 printk(KERN_ERR "mtp_bind() could not allocate requests\n");
466 return -1;
467}
468
469static ssize_t mtp_read(struct file *fp, char __user *buf,
470 size_t count, loff_t *pos)
471{
472 struct mtp_dev *dev = fp->private_data;
473 struct usb_composite_dev *cdev = dev->cdev;
474 struct usb_request *req;
475 int r = count, xfer;
476 int ret = 0;
477
478 DBG(cdev, "mtp_read(%d)\n", count);
479
480 if (count > BULK_BUFFER_SIZE)
481 return -EINVAL;
482
483 /* we will block until we're online */
484 DBG(cdev, "mtp_read: waiting for online state\n");
485 ret = wait_event_interruptible(dev->read_wq,
486 dev->state != STATE_OFFLINE);
487 if (ret < 0) {
488 r = ret;
489 goto done;
490 }
491 spin_lock_irq(&dev->lock);
492 if (dev->state == STATE_CANCELED) {
493 /* report cancelation to userspace */
494 dev->state = STATE_READY;
495 spin_unlock_irq(&dev->lock);
496 return -ECANCELED;
497 }
498 dev->state = STATE_BUSY;
499 spin_unlock_irq(&dev->lock);
500
501requeue_req:
502 /* queue a request */
503 req = dev->rx_req[0];
504 req->length = count;
505 dev->rx_done = 0;
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400506 ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
Mike Lockwoodba83b012010-04-16 10:39:22 -0400507 if (ret < 0) {
508 r = -EIO;
509 goto done;
510 } else {
511 DBG(cdev, "rx %p queue\n", req);
512 }
513
514 /* wait for a request to complete */
515 ret = wait_event_interruptible(dev->read_wq, dev->rx_done);
516 if (ret < 0) {
517 r = ret;
518 goto done;
519 }
520 if (dev->state == STATE_BUSY) {
521 /* If we got a 0-len packet, throw it back and try again. */
522 if (req->actual == 0)
523 goto requeue_req;
524
525 DBG(cdev, "rx %p %d\n", req, req->actual);
526 xfer = (req->actual < count) ? req->actual : count;
527 r = xfer;
528 if (copy_to_user(buf, req->buf, xfer))
529 r = -EFAULT;
530 } else
531 r = -EIO;
532
533done:
534 spin_lock_irq(&dev->lock);
535 if (dev->state == STATE_CANCELED)
536 r = -ECANCELED;
537 else if (dev->state != STATE_OFFLINE)
538 dev->state = STATE_READY;
539 spin_unlock_irq(&dev->lock);
540
541 DBG(cdev, "mtp_read returning %d\n", r);
542 return r;
543}
544
545static ssize_t mtp_write(struct file *fp, const char __user *buf,
546 size_t count, loff_t *pos)
547{
548 struct mtp_dev *dev = fp->private_data;
549 struct usb_composite_dev *cdev = dev->cdev;
550 struct usb_request *req = 0;
551 int r = count, xfer;
552 int ret;
553
554 DBG(cdev, "mtp_write(%d)\n", count);
555
556 spin_lock_irq(&dev->lock);
557 if (dev->state == STATE_CANCELED) {
558 /* report cancelation to userspace */
559 dev->state = STATE_READY;
560 spin_unlock_irq(&dev->lock);
561 return -ECANCELED;
562 }
563 if (dev->state == STATE_OFFLINE) {
564 spin_unlock_irq(&dev->lock);
565 return -ENODEV;
566 }
567 dev->state = STATE_BUSY;
568 spin_unlock_irq(&dev->lock);
569
570 while (count > 0) {
571 if (dev->state != STATE_BUSY) {
572 DBG(cdev, "mtp_write dev->error\n");
573 r = -EIO;
574 break;
575 }
576
577 /* get an idle tx request to use */
578 req = 0;
579 ret = wait_event_interruptible(dev->write_wq,
580 ((req = req_get(dev, &dev->tx_idle))
581 || dev->state != STATE_BUSY));
582 if (!req) {
583 r = ret;
584 break;
585 }
586
587 if (count > BULK_BUFFER_SIZE)
588 xfer = BULK_BUFFER_SIZE;
589 else
590 xfer = count;
591 if (copy_from_user(req->buf, buf, xfer)) {
592 r = -EFAULT;
593 break;
594 }
595
596 req->length = xfer;
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400597 ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
Mike Lockwoodba83b012010-04-16 10:39:22 -0400598 if (ret < 0) {
599 DBG(cdev, "mtp_write: xfer error %d\n", ret);
600 r = -EIO;
601 break;
602 }
603
604 buf += xfer;
605 count -= xfer;
606
607 /* zero this so we don't try to free it on error exit */
608 req = 0;
609 }
610
611 if (req)
612 req_put(dev, &dev->tx_idle, req);
613
614 spin_lock_irq(&dev->lock);
615 if (dev->state == STATE_CANCELED)
616 r = -ECANCELED;
617 else if (dev->state != STATE_OFFLINE)
618 dev->state = STATE_READY;
619 spin_unlock_irq(&dev->lock);
620
621 DBG(cdev, "mtp_write returning %d\n", r);
622 return r;
623}
624
625static int mtp_send_file(struct mtp_dev *dev, struct file *filp,
626 loff_t offset, size_t count)
627{
628 struct usb_composite_dev *cdev = dev->cdev;
629 struct usb_request *req = 0;
630 int r = count, xfer, ret;
631
632 DBG(cdev, "mtp_send_file(%lld %d)\n", offset, count);
633
634 while (count > 0) {
635 /* get an idle tx request to use */
636 req = 0;
637 ret = wait_event_interruptible(dev->write_wq,
638 (req = req_get(dev, &dev->tx_idle))
639 || dev->state != STATE_BUSY);
640 if (!req) {
641 r = ret;
642 break;
643 }
644
645 if (count > BULK_BUFFER_SIZE)
646 xfer = BULK_BUFFER_SIZE;
647 else
648 xfer = count;
649 ret = vfs_read(filp, req->buf, xfer, &offset);
650 if (ret < 0) {
651 r = ret;
652 break;
653 }
654 xfer = ret;
655
656 req->length = xfer;
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400657 ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
Mike Lockwoodba83b012010-04-16 10:39:22 -0400658 if (ret < 0) {
659 DBG(cdev, "mtp_write: xfer error %d\n", ret);
660 dev->state = STATE_ERROR;
661 r = -EIO;
662 break;
663 }
664
665 count -= xfer;
666
667 /* zero this so we don't try to free it on error exit */
668 req = 0;
669 }
670
671 if (req)
672 req_put(dev, &dev->tx_idle, req);
673
674 DBG(cdev, "mtp_write returning %d\n", r);
675 return r;
676}
677
678static int mtp_receive_file(struct mtp_dev *dev, struct file *filp,
679 loff_t offset, size_t count)
680{
681 struct usb_composite_dev *cdev = dev->cdev;
682 struct usb_request *read_req = NULL, *write_req = NULL;
683 int r = count;
684 int ret;
685 int cur_buf = 0;
686
687 DBG(cdev, "mtp_receive_file(%d)\n", count);
688
689 while (count > 0 || write_req) {
690 if (count > 0) {
691 /* queue a request */
692 read_req = dev->rx_req[cur_buf];
693 cur_buf = (cur_buf + 1) % RX_REQ_MAX;
694
695 read_req->length = (count > BULK_BUFFER_SIZE
696 ? BULK_BUFFER_SIZE : count);
697 dev->rx_done = 0;
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400698 ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL);
Mike Lockwoodba83b012010-04-16 10:39:22 -0400699 if (ret < 0) {
700 r = -EIO;
701 dev->state = STATE_ERROR;
702 break;
703 }
704 count -= ret;
705 }
706
707 if (write_req) {
708 DBG(cdev, "rx %p %d\n", write_req, write_req->actual);
709 ret = vfs_write(filp, write_req->buf, write_req->actual,
710 &offset);
711 DBG(cdev, "vfs_write %d\n", ret);
712 if (ret != write_req->actual) {
713 r = -EIO;
714 dev->state = STATE_ERROR;
715 break;
716 }
717 write_req = NULL;
718 }
719
720 if (read_req) {
721 /* wait for our last read to complete */
722 ret = wait_event_interruptible(dev->read_wq,
723 dev->rx_done || dev->state != STATE_BUSY);
724 if (ret < 0 || dev->state != STATE_BUSY) {
725 r = ret;
726 break;
727 }
728 count -= read_req->actual;
729 write_req = read_req;
730 read_req = NULL;
731 }
732 }
733
734 DBG(cdev, "mtp_read returning %d\n", r);
735 return r;
736}
737
738/* Kernel thread for handling file IO operations */
739static int mtp_thread(void *data)
740{
741 struct mtp_dev *dev = (struct mtp_dev *)data;
742 struct usb_composite_dev *cdev = dev->cdev;
743 int flags;
744
745 DBG(cdev, "mtp_thread started\n");
746
747 while (1) {
748 /* wait for a command */
749 while (1) {
750 try_to_freeze();
751 set_current_state(TASK_INTERRUPTIBLE);
752 if (dev->thread_command != 0)
753 break;
754 schedule();
755 }
756 __set_current_state(TASK_RUNNING);
757
758 if (dev->thread_command == ANDROID_THREAD_QUIT) {
759 DBG(cdev, "ANDROID_THREAD_QUIT\n");
760 dev->thread_result = 0;
761 goto done;
762 }
763
764 if (dev->thread_command == ANDROID_THREAD_SEND_FILE)
765 flags = O_RDONLY | O_LARGEFILE;
766 else
767 flags = O_WRONLY | O_LARGEFILE | O_CREAT;
768
769 if (dev->thread_command == ANDROID_THREAD_SEND_FILE) {
770 dev->thread_result = mtp_send_file(dev,
771 dev->thread_file,
772 dev->thread_file_offset,
773 dev->thread_file_length);
774 } else {
775 dev->thread_result = mtp_receive_file(dev,
776 dev->thread_file,
777 dev->thread_file_offset,
778 dev->thread_file_length);
779 }
780
781 if (dev->thread_file) {
782 fput(dev->thread_file);
783 dev->thread_file = NULL;
784 }
785 dev->thread_command = 0;
786 complete(&dev->thread_wait);
787 }
788
789done:
790 DBG(cdev, "android_thread done\n");
791 complete_and_exit(&dev->thread_wait, 0);
792}
793
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400794static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event)
795{
796 struct usb_request *req;
797 int ret;
798 int length = event->length;
799
800 DBG(dev->cdev, "mtp_send_event(%d)\n", event->length);
801
802 if (length < 0 || length > INTR_BUFFER_SIZE)
803 return -EINVAL;
804
805 mutex_lock(&dev->intr_mutex);
806
807 /* wait for a request to complete */
808 ret = wait_event_interruptible(dev->intr_wq, !dev->intr_busy || dev->state == STATE_OFFLINE);
809 if (ret < 0)
810 goto done;
811 if (dev->state == STATE_OFFLINE) {
812 ret = -ENODEV;
813 goto done;
814 }
815 req = dev->intr_req;
816 if (copy_from_user(req->buf, (void __user *)event->data, length)) {
817 ret = -EFAULT;
818 goto done;
819 }
820 req->length = length;
821 dev->intr_busy = 1;
822 ret = usb_ep_queue(dev->ep_intr, req, GFP_KERNEL);
823 if (ret)
824 dev->intr_busy = 0;
825
826done:
827 mutex_unlock(&dev->intr_mutex);
828 return ret;
829}
830
Mike Lockwoodba83b012010-04-16 10:39:22 -0400831static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value)
832{
833 struct mtp_dev *dev = fp->private_data;
834 struct file *filp = NULL;
835 int ret = -EINVAL;
836
837 switch (code) {
838 case MTP_SEND_FILE:
839 case MTP_RECEIVE_FILE:
840 {
841 struct mtp_file_range mfr;
842
843 spin_lock_irq(&dev->lock);
844 if (dev->state == STATE_CANCELED) {
845 /* report cancelation to userspace */
846 dev->state = STATE_READY;
847 spin_unlock_irq(&dev->lock);
848 return -ECANCELED;
849 }
850 if (dev->state == STATE_OFFLINE) {
851 spin_unlock_irq(&dev->lock);
852 return -ENODEV;
853 }
854 dev->state = STATE_BUSY;
855 spin_unlock_irq(&dev->lock);
856
857 if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) {
858 ret = -EFAULT;
859 goto fail;
860 }
861 filp = fget(mfr.fd);
862 if (!filp) {
863 ret = -EBADF;
864 goto fail;
865 }
866
867 dev->thread_file = filp;
868 dev->thread_file_offset = mfr.offset;
869 dev->thread_file_length = mfr.length;
870
871 if (code == MTP_SEND_FILE)
872 dev->thread_command = ANDROID_THREAD_SEND_FILE;
873 else
874 dev->thread_command = ANDROID_THREAD_RECEIVE_FILE;
875
876 /* wake up the thread */
877 init_completion(&dev->thread_wait);
878 wake_up_process(dev->thread);
879
880 /* wait for the thread to complete the command */
881 wait_for_completion(&dev->thread_wait);
882 ret = dev->thread_result;
883 DBG(dev->cdev, "thread returned %d\n", ret);
884 break;
885 }
886 case MTP_SET_INTERFACE_MODE:
887 if (value == MTP_INTERFACE_MODE_MTP ||
888 value == MTP_INTERFACE_MODE_PTP) {
889 dev->interface_mode = value;
890 if (value == MTP_INTERFACE_MODE_PTP) {
891 dev->function.descriptors = fs_ptp_descs;
892 dev->function.hs_descriptors = hs_ptp_descs;
893 } else {
894 dev->function.descriptors = fs_mtp_descs;
895 dev->function.hs_descriptors = hs_mtp_descs;
896 }
897 ret = 0;
898 }
899 break;
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400900 case MTP_SEND_EVENT:
901 {
902 struct mtp_event event;
903 /* return here so we don't change dev->state below,
904 * which would interfere with bulk transfer state.
905 */
906 if (copy_from_user(&event, (void __user *)value, sizeof(event)))
907 return -EFAULT;
908 else
909 return mtp_send_event(dev, &event);
910 }
Mike Lockwoodba83b012010-04-16 10:39:22 -0400911 }
912
913fail:
914 if (filp)
915 fput(filp);
916 spin_lock_irq(&dev->lock);
917 if (dev->state == STATE_CANCELED)
918 ret = -ECANCELED;
919 else if (dev->state != STATE_OFFLINE)
920 dev->state = STATE_READY;
921 spin_unlock_irq(&dev->lock);
922 DBG(dev->cdev, "ioctl returning %d\n", ret);
923 return ret;
924}
925
926static int mtp_open(struct inode *ip, struct file *fp)
927{
928 printk(KERN_INFO "mtp_open\n");
929 if (_lock(&_mtp_dev->open_excl))
930 return -EBUSY;
931
932 _mtp_dev->thread = kthread_create(mtp_thread, _mtp_dev, "f_mtp");
933 if (IS_ERR(_mtp_dev->thread))
934 return -ENOMEM;
935
936 /* clear any error condition */
937 if (_mtp_dev->state != STATE_OFFLINE)
938 _mtp_dev->state = STATE_READY;
939
940 fp->private_data = _mtp_dev;
941 return 0;
942}
943
944static int mtp_release(struct inode *ip, struct file *fp)
945{
946 printk(KERN_INFO "mtp_release\n");
947
948 /* tell the thread to quit */
949 if (_mtp_dev->thread) {
950 _mtp_dev->thread_command = ANDROID_THREAD_QUIT;
951 init_completion(&_mtp_dev->thread_wait);
952 wake_up_process(_mtp_dev->thread);
953 wait_for_completion(&_mtp_dev->thread_wait);
954 }
955
956 _unlock(&_mtp_dev->open_excl);
957 return 0;
958}
959
960/* file operations for /dev/mtp_usb */
961static const struct file_operations mtp_fops = {
962 .owner = THIS_MODULE,
963 .read = mtp_read,
964 .write = mtp_write,
965 .unlocked_ioctl = mtp_ioctl,
966 .open = mtp_open,
967 .release = mtp_release,
968};
969
970static struct miscdevice mtp_device = {
971 .minor = MISC_DYNAMIC_MINOR,
972 .name = shortname,
973 .fops = &mtp_fops,
974};
975
976static int
977mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
978{
979 struct usb_composite_dev *cdev = c->cdev;
980 struct mtp_dev *dev = func_to_dev(f);
981 int id;
982 int ret;
983
984 dev->cdev = cdev;
985 DBG(cdev, "mtp_function_bind dev: %p\n", dev);
986
987 /* allocate interface ID(s) */
988 id = usb_interface_id(c, f);
989 if (id < 0)
990 return id;
991 mtp_interface_desc.bInterfaceNumber = id;
992
993 /* allocate endpoints */
994 ret = create_bulk_endpoints(dev, &mtp_fullspeed_in_desc,
995 &mtp_fullspeed_out_desc, &mtp_intr_desc);
996 if (ret)
997 return ret;
998
999 /* support high speed hardware */
1000 if (gadget_is_dualspeed(c->cdev->gadget)) {
1001 mtp_highspeed_in_desc.bEndpointAddress =
1002 mtp_fullspeed_in_desc.bEndpointAddress;
1003 mtp_highspeed_out_desc.bEndpointAddress =
1004 mtp_fullspeed_out_desc.bEndpointAddress;
1005 }
1006
1007 DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
1008 gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
1009 f->name, dev->ep_in->name, dev->ep_out->name);
1010 return 0;
1011}
1012
1013static void
1014mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)
1015{
1016 struct mtp_dev *dev = func_to_dev(f);
1017 struct usb_request *req;
1018 int i;
1019
1020 spin_lock_irq(&dev->lock);
1021 while ((req = req_get(dev, &dev->tx_idle)))
1022 mtp_request_free(req, dev->ep_in);
1023 for (i = 0; i < RX_REQ_MAX; i++)
1024 mtp_request_free(dev->rx_req[i], dev->ep_out);
Mike Lockwood1de4d4d2010-07-06 19:27:52 -04001025 mtp_request_free(dev->intr_req, dev->ep_intr);
Mike Lockwoodba83b012010-04-16 10:39:22 -04001026 dev->state = STATE_OFFLINE;
1027 spin_unlock_irq(&dev->lock);
Mike Lockwood1de4d4d2010-07-06 19:27:52 -04001028 wake_up(&dev->intr_wq);
Mike Lockwoodba83b012010-04-16 10:39:22 -04001029
1030 misc_deregister(&mtp_device);
1031 kfree(_mtp_dev);
1032 _mtp_dev = NULL;
1033}
1034
1035static int mtp_function_setup(struct usb_function *f,
1036 const struct usb_ctrlrequest *ctrl)
1037{
1038 struct mtp_dev *dev = func_to_dev(f);
1039 struct usb_composite_dev *cdev = dev->cdev;
1040 int value = -EOPNOTSUPP;
1041 u16 w_index = le16_to_cpu(ctrl->wIndex);
1042 u16 w_value = le16_to_cpu(ctrl->wValue);
1043 u16 w_length = le16_to_cpu(ctrl->wLength);
1044 unsigned long flags;
1045
1046 /* do nothing if we are disabled */
1047 if (dev->function.disabled)
1048 return value;
1049
1050 VDBG(cdev, "mtp_function_setup "
1051 "%02x.%02x v%04x i%04x l%u\n",
1052 ctrl->bRequestType, ctrl->bRequest,
1053 w_value, w_index, w_length);
1054
1055 /* Handle MTP OS string */
1056 if (dev->interface_mode == MTP_INTERFACE_MODE_MTP
1057 && ctrl->bRequestType ==
1058 (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE)
1059 && ctrl->bRequest == USB_REQ_GET_DESCRIPTOR
1060 && (w_value >> 8) == USB_DT_STRING
1061 && (w_value & 0xFF) == MTP_OS_STRING_ID) {
1062 value = (w_length < sizeof(mtp_os_string)
1063 ? w_length : sizeof(mtp_os_string));
1064 memcpy(cdev->req->buf, mtp_os_string, value);
1065 /* return here since composite.c will send for us */
1066 return value;
1067 }
1068 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) {
1069 /* Handle MTP OS descriptor */
1070 DBG(cdev, "vendor request: %d index: %d value: %d length: %d\n",
1071 ctrl->bRequest, w_index, w_value, w_length);
1072
1073 if (dev->interface_mode == MTP_INTERFACE_MODE_MTP
1074 && ctrl->bRequest == 1
1075 && (ctrl->bRequestType & USB_DIR_IN)
1076 && (w_index == 4 || w_index == 5)) {
1077 value = (w_length < sizeof(mtp_ext_config_desc) ?
1078 w_length : sizeof(mtp_ext_config_desc));
1079 memcpy(cdev->req->buf, &mtp_ext_config_desc, value);
1080 }
1081 }
1082 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
1083 DBG(cdev, "class request: %d index: %d value: %d length: %d\n",
1084 ctrl->bRequest, w_index, w_value, w_length);
1085
1086 if (ctrl->bRequest == MTP_REQ_CANCEL && w_index == 0
1087 && w_value == 0) {
1088 DBG(cdev, "MTP_REQ_CANCEL\n");
1089
1090 spin_lock_irqsave(&dev->lock, flags);
1091 if (dev->state == STATE_BUSY) {
1092 dev->state = STATE_CANCELED;
1093 wake_up(&dev->read_wq);
1094 wake_up(&dev->write_wq);
1095 }
1096 spin_unlock_irqrestore(&dev->lock, flags);
1097
1098 /* We need to queue a request to read the remaining
1099 * bytes, but we don't actually need to look at
1100 * the contents.
1101 */
1102 value = w_length;
1103 } else if (ctrl->bRequest == MTP_REQ_GET_DEVICE_STATUS
1104 && w_index == 0 && w_value == 0) {
1105 struct mtp_device_status *status = cdev->req->buf;
1106 status->wLength =
1107 __constant_cpu_to_le16(sizeof(*status));
1108
1109 DBG(cdev, "MTP_REQ_GET_DEVICE_STATUS\n");
1110 spin_lock_irqsave(&dev->lock, flags);
1111 /* device status is "busy" until we report
1112 * the cancelation to userspace
1113 */
1114 if (dev->state == STATE_BUSY
1115 || dev->state == STATE_CANCELED)
1116 status->wCode =
1117 __cpu_to_le16(MTP_RESPONSE_DEVICE_BUSY);
1118 else
1119 status->wCode =
1120 __cpu_to_le16(MTP_RESPONSE_OK);
1121 spin_unlock_irqrestore(&dev->lock, flags);
1122 value = sizeof(*status);
1123 }
1124 }
1125
1126 /* respond with data transfer or status phase? */
1127 if (value >= 0) {
1128 int rc;
1129 cdev->req->zero = value < w_length;
1130 cdev->req->length = value;
1131 rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
1132 if (rc < 0)
1133 ERROR(cdev, "%s setup response queue error\n", __func__);
1134 }
1135
1136 if (value == -EOPNOTSUPP)
1137 VDBG(cdev,
1138 "unknown class-specific control req "
1139 "%02x.%02x v%04x i%04x l%u\n",
1140 ctrl->bRequestType, ctrl->bRequest,
1141 w_value, w_index, w_length);
1142 return value;
1143}
1144
1145static int mtp_function_set_alt(struct usb_function *f,
1146 unsigned intf, unsigned alt)
1147{
1148 struct mtp_dev *dev = func_to_dev(f);
1149 struct usb_composite_dev *cdev = f->config->cdev;
1150 int ret;
1151
1152 DBG(cdev, "mtp_function_set_alt intf: %d alt: %d\n", intf, alt);
1153 ret = usb_ep_enable(dev->ep_in,
1154 ep_choose(cdev->gadget,
1155 &mtp_highspeed_in_desc,
1156 &mtp_fullspeed_in_desc));
1157 if (ret)
1158 return ret;
1159 ret = usb_ep_enable(dev->ep_out,
1160 ep_choose(cdev->gadget,
1161 &mtp_highspeed_out_desc,
1162 &mtp_fullspeed_out_desc));
1163 if (ret) {
1164 usb_ep_disable(dev->ep_in);
1165 return ret;
1166 }
1167 ret = usb_ep_enable(dev->ep_intr, &mtp_intr_desc);
1168 if (ret) {
1169 usb_ep_disable(dev->ep_out);
1170 usb_ep_disable(dev->ep_in);
1171 return ret;
1172 }
1173 dev->state = STATE_READY;
1174
1175 /* readers may be blocked waiting for us to go online */
1176 wake_up(&dev->read_wq);
1177 return 0;
1178}
1179
1180static void mtp_function_disable(struct usb_function *f)
1181{
1182 struct mtp_dev *dev = func_to_dev(f);
1183 struct usb_composite_dev *cdev = dev->cdev;
1184
1185 DBG(cdev, "mtp_function_disable\n");
1186 dev->state = STATE_OFFLINE;
1187 usb_ep_disable(dev->ep_in);
1188 usb_ep_disable(dev->ep_out);
1189 usb_ep_disable(dev->ep_intr);
1190
1191 /* readers may be blocked waiting for us to go online */
1192 wake_up(&dev->read_wq);
Mike Lockwood1de4d4d2010-07-06 19:27:52 -04001193 wake_up(&dev->intr_wq);
Mike Lockwoodba83b012010-04-16 10:39:22 -04001194
1195 VDBG(cdev, "%s disabled\n", dev->function.name);
1196}
1197
1198static int mtp_bind_config(struct usb_configuration *c)
1199{
1200 struct mtp_dev *dev;
1201 int ret;
1202
1203 printk(KERN_INFO "mtp_bind_config\n");
1204
1205 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1206 if (!dev)
1207 return -ENOMEM;
1208
1209 /* allocate a string ID for our interface */
1210 if (mtp_string_defs[INTERFACE_STRING_INDEX].id == 0) {
1211 ret = usb_string_id(c->cdev);
1212 if (ret < 0)
1213 return ret;
1214 mtp_string_defs[INTERFACE_STRING_INDEX].id = ret;
1215 mtp_interface_desc.iInterface = ret;
1216 }
1217
1218 spin_lock_init(&dev->lock);
1219 init_completion(&dev->thread_wait);
1220 init_waitqueue_head(&dev->read_wq);
1221 init_waitqueue_head(&dev->write_wq);
Mike Lockwood1de4d4d2010-07-06 19:27:52 -04001222 init_waitqueue_head(&dev->intr_wq);
Mike Lockwoodba83b012010-04-16 10:39:22 -04001223 atomic_set(&dev->open_excl, 0);
1224 INIT_LIST_HEAD(&dev->tx_idle);
Mike Lockwood1de4d4d2010-07-06 19:27:52 -04001225 mutex_init(&dev->intr_mutex);
Mike Lockwoodba83b012010-04-16 10:39:22 -04001226
1227 dev->cdev = c->cdev;
1228 dev->function.name = "mtp";
1229 dev->function.strings = mtp_strings,
1230 dev->function.descriptors = fs_mtp_descs;
1231 dev->function.hs_descriptors = hs_mtp_descs;
1232 dev->function.bind = mtp_function_bind;
1233 dev->function.unbind = mtp_function_unbind;
1234 dev->function.setup = mtp_function_setup;
1235 dev->function.set_alt = mtp_function_set_alt;
1236 dev->function.disable = mtp_function_disable;
1237
1238 /* MTP mode by default */
1239 dev->interface_mode = MTP_INTERFACE_MODE_MTP;
1240
1241 /* _mtp_dev must be set before calling usb_gadget_register_driver */
1242 _mtp_dev = dev;
1243
1244 ret = misc_register(&mtp_device);
1245 if (ret)
1246 goto err1;
1247
1248 ret = usb_add_function(c, &dev->function);
1249 if (ret)
1250 goto err2;
1251
1252 return 0;
1253
1254err2:
1255 misc_deregister(&mtp_device);
1256err1:
1257 kfree(dev);
1258 printk(KERN_ERR "mtp gadget driver failed to initialize\n");
1259 return ret;
1260}
1261
1262static struct android_usb_function mtp_function = {
1263 .name = "mtp",
1264 .bind_config = mtp_bind_config,
1265};
1266
1267static int __init init(void)
1268{
1269 printk(KERN_INFO "f_mtp init\n");
1270 android_register_function(&mtp_function);
1271 return 0;
1272}
1273module_init(init);