blob: 37189d8615f6d6affa49494d794aa5ae810e94e5 [file] [log] [blame]
Benoit Gobyf0fbc482011-12-19 14:37:50 -08001/*
2 * Gadget Function Driver for MTP
3 *
4 * Copyright (C) 2010 Google, Inc.
5 * Author: Mike Lockwood <lockwood@android.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18/* #define DEBUG */
19/* #define VERBOSE_DEBUG */
20
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/poll.h>
24#include <linux/delay.h>
25#include <linux/wait.h>
26#include <linux/err.h>
27#include <linux/interrupt.h>
28
29#include <linux/types.h>
30#include <linux/file.h>
31#include <linux/device.h>
32#include <linux/miscdevice.h>
33
34#include <linux/usb.h>
35#include <linux/usb_usual.h>
36#include <linux/usb/ch9.h>
37#include <linux/usb/f_mtp.h>
38
39#define MTP_BULK_BUFFER_SIZE 16384
40#define INTR_BUFFER_SIZE 28
41
42/* String IDs */
43#define INTERFACE_STRING_INDEX 0
44
45/* values for mtp_dev.state */
46#define STATE_OFFLINE 0 /* initial state, disconnected */
47#define STATE_READY 1 /* ready for userspace calls */
48#define STATE_BUSY 2 /* processing userspace calls */
49#define STATE_CANCELED 3 /* transaction canceled by host */
50#define STATE_ERROR 4 /* error from completion routine */
51
52/* number of tx and rx requests to allocate */
Vijayavardhan Vennapusa2537d562013-05-20 16:06:01 +053053#define MTP_TX_REQ_MAX 8
Benoit Gobyf0fbc482011-12-19 14:37:50 -080054#define RX_REQ_MAX 2
55#define INTR_REQ_MAX 5
56
57/* ID for Microsoft MTP OS String */
58#define MTP_OS_STRING_ID 0xEE
59
60/* MTP class reqeusts */
61#define MTP_REQ_CANCEL 0x64
62#define MTP_REQ_GET_EXT_EVENT_DATA 0x65
63#define MTP_REQ_RESET 0x66
64#define MTP_REQ_GET_DEVICE_STATUS 0x67
65
66/* constants for device status */
67#define MTP_RESPONSE_OK 0x2001
68#define MTP_RESPONSE_DEVICE_BUSY 0x2019
69
Pavankumar Kondetie79aa682012-12-19 20:19:35 +053070unsigned int mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;
71module_param(mtp_rx_req_len, uint, S_IRUGO | S_IWUSR);
72
Vijayavardhan Vennapusa2537d562013-05-20 16:06:01 +053073unsigned int mtp_tx_req_len = MTP_BULK_BUFFER_SIZE;
74module_param(mtp_tx_req_len, uint, S_IRUGO | S_IWUSR);
75
76unsigned int mtp_tx_reqs = MTP_TX_REQ_MAX;
77module_param(mtp_tx_reqs, uint, S_IRUGO | S_IWUSR);
78
Benoit Gobyf0fbc482011-12-19 14:37:50 -080079static const char mtp_shortname[] = "mtp_usb";
80
81struct mtp_dev {
82 struct usb_function function;
83 struct usb_composite_dev *cdev;
84 spinlock_t lock;
85
86 struct usb_ep *ep_in;
87 struct usb_ep *ep_out;
88 struct usb_ep *ep_intr;
89
90 int state;
91
92 /* synchronize access to our device file */
93 atomic_t open_excl;
94 /* to enforce only one ioctl at a time */
95 atomic_t ioctl_excl;
96
97 struct list_head tx_idle;
98 struct list_head intr_idle;
99
100 wait_queue_head_t read_wq;
101 wait_queue_head_t write_wq;
102 wait_queue_head_t intr_wq;
103 struct usb_request *rx_req[RX_REQ_MAX];
104 int rx_done;
105
106 /* for processing MTP_SEND_FILE, MTP_RECEIVE_FILE and
107 * MTP_SEND_FILE_WITH_HEADER ioctls on a work queue
108 */
109 struct workqueue_struct *wq;
110 struct work_struct send_file_work;
111 struct work_struct receive_file_work;
112 struct file *xfer_file;
113 loff_t xfer_file_offset;
114 int64_t xfer_file_length;
115 unsigned xfer_send_header;
116 uint16_t xfer_command;
117 uint32_t xfer_transaction_id;
118 int xfer_result;
119};
120
121static struct usb_interface_descriptor mtp_interface_desc = {
122 .bLength = USB_DT_INTERFACE_SIZE,
123 .bDescriptorType = USB_DT_INTERFACE,
124 .bInterfaceNumber = 0,
125 .bNumEndpoints = 3,
126 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
127 .bInterfaceSubClass = USB_SUBCLASS_VENDOR_SPEC,
128 .bInterfaceProtocol = 0,
129};
130
131static struct usb_interface_descriptor ptp_interface_desc = {
132 .bLength = USB_DT_INTERFACE_SIZE,
133 .bDescriptorType = USB_DT_INTERFACE,
134 .bInterfaceNumber = 0,
135 .bNumEndpoints = 3,
136 .bInterfaceClass = USB_CLASS_STILL_IMAGE,
137 .bInterfaceSubClass = 1,
138 .bInterfaceProtocol = 1,
139};
140
Pavankumar Kondeti6f94bc92012-08-03 09:34:32 +0530141static struct usb_endpoint_descriptor mtp_superspeed_in_desc = {
142 .bLength = USB_DT_ENDPOINT_SIZE,
143 .bDescriptorType = USB_DT_ENDPOINT,
144 .bEndpointAddress = USB_DIR_IN,
145 .bmAttributes = USB_ENDPOINT_XFER_BULK,
146 .wMaxPacketSize = __constant_cpu_to_le16(1024),
147};
148
149static struct usb_ss_ep_comp_descriptor mtp_superspeed_in_comp_desc = {
150 .bLength = sizeof mtp_superspeed_in_comp_desc,
151 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
152
153 /* the following 2 values can be tweaked if necessary */
154 /* .bMaxBurst = 0, */
155 /* .bmAttributes = 0, */
156};
157
158static struct usb_endpoint_descriptor mtp_superspeed_out_desc = {
159 .bLength = USB_DT_ENDPOINT_SIZE,
160 .bDescriptorType = USB_DT_ENDPOINT,
161 .bEndpointAddress = USB_DIR_OUT,
162 .bmAttributes = USB_ENDPOINT_XFER_BULK,
163 .wMaxPacketSize = __constant_cpu_to_le16(1024),
164};
165
166static struct usb_ss_ep_comp_descriptor mtp_superspeed_out_comp_desc = {
167 .bLength = sizeof mtp_superspeed_out_comp_desc,
168 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
169
170 /* the following 2 values can be tweaked if necessary */
171 /* .bMaxBurst = 0, */
172 /* .bmAttributes = 0, */
173};
174
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800175static struct usb_endpoint_descriptor mtp_highspeed_in_desc = {
176 .bLength = USB_DT_ENDPOINT_SIZE,
177 .bDescriptorType = USB_DT_ENDPOINT,
178 .bEndpointAddress = USB_DIR_IN,
179 .bmAttributes = USB_ENDPOINT_XFER_BULK,
180 .wMaxPacketSize = __constant_cpu_to_le16(512),
181};
182
183static struct usb_endpoint_descriptor mtp_highspeed_out_desc = {
184 .bLength = USB_DT_ENDPOINT_SIZE,
185 .bDescriptorType = USB_DT_ENDPOINT,
186 .bEndpointAddress = USB_DIR_OUT,
187 .bmAttributes = USB_ENDPOINT_XFER_BULK,
188 .wMaxPacketSize = __constant_cpu_to_le16(512),
189};
190
191static struct usb_endpoint_descriptor mtp_fullspeed_in_desc = {
192 .bLength = USB_DT_ENDPOINT_SIZE,
193 .bDescriptorType = USB_DT_ENDPOINT,
194 .bEndpointAddress = USB_DIR_IN,
195 .bmAttributes = USB_ENDPOINT_XFER_BULK,
196};
197
198static struct usb_endpoint_descriptor mtp_fullspeed_out_desc = {
199 .bLength = USB_DT_ENDPOINT_SIZE,
200 .bDescriptorType = USB_DT_ENDPOINT,
201 .bEndpointAddress = USB_DIR_OUT,
202 .bmAttributes = USB_ENDPOINT_XFER_BULK,
203};
204
205static struct usb_endpoint_descriptor mtp_intr_desc = {
206 .bLength = USB_DT_ENDPOINT_SIZE,
207 .bDescriptorType = USB_DT_ENDPOINT,
208 .bEndpointAddress = USB_DIR_IN,
209 .bmAttributes = USB_ENDPOINT_XFER_INT,
210 .wMaxPacketSize = __constant_cpu_to_le16(INTR_BUFFER_SIZE),
211 .bInterval = 6,
212};
213
Pavankumar Kondeti6f94bc92012-08-03 09:34:32 +0530214static struct usb_ss_ep_comp_descriptor mtp_superspeed_intr_comp_desc = {
215 .bLength = sizeof mtp_superspeed_intr_comp_desc,
216 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
217
218 /* the following 3 values can be tweaked if necessary */
219 /* .bMaxBurst = 0, */
220 /* .bmAttributes = 0, */
221 .wBytesPerInterval = cpu_to_le16(INTR_BUFFER_SIZE),
222};
223
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800224static struct usb_descriptor_header *fs_mtp_descs[] = {
225 (struct usb_descriptor_header *) &mtp_interface_desc,
226 (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
227 (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
228 (struct usb_descriptor_header *) &mtp_intr_desc,
229 NULL,
230};
231
232static struct usb_descriptor_header *hs_mtp_descs[] = {
233 (struct usb_descriptor_header *) &mtp_interface_desc,
234 (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
235 (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
236 (struct usb_descriptor_header *) &mtp_intr_desc,
237 NULL,
238};
239
Pavankumar Kondeti6f94bc92012-08-03 09:34:32 +0530240static struct usb_descriptor_header *ss_mtp_descs[] = {
241 (struct usb_descriptor_header *) &mtp_interface_desc,
242 (struct usb_descriptor_header *) &mtp_superspeed_in_desc,
243 (struct usb_descriptor_header *) &mtp_superspeed_in_comp_desc,
244 (struct usb_descriptor_header *) &mtp_superspeed_out_desc,
245 (struct usb_descriptor_header *) &mtp_superspeed_out_comp_desc,
246 (struct usb_descriptor_header *) &mtp_intr_desc,
247 (struct usb_descriptor_header *) &mtp_superspeed_intr_comp_desc,
248 NULL,
249};
250
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800251static struct usb_descriptor_header *fs_ptp_descs[] = {
252 (struct usb_descriptor_header *) &ptp_interface_desc,
253 (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
254 (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
255 (struct usb_descriptor_header *) &mtp_intr_desc,
256 NULL,
257};
258
259static struct usb_descriptor_header *hs_ptp_descs[] = {
260 (struct usb_descriptor_header *) &ptp_interface_desc,
261 (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
262 (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
263 (struct usb_descriptor_header *) &mtp_intr_desc,
264 NULL,
265};
266
Pavankumar Kondeti6f94bc92012-08-03 09:34:32 +0530267static struct usb_descriptor_header *ss_ptp_descs[] = {
268 (struct usb_descriptor_header *) &ptp_interface_desc,
269 (struct usb_descriptor_header *) &mtp_superspeed_in_desc,
270 (struct usb_descriptor_header *) &mtp_superspeed_in_comp_desc,
271 (struct usb_descriptor_header *) &mtp_superspeed_out_desc,
272 (struct usb_descriptor_header *) &mtp_superspeed_out_comp_desc,
273 (struct usb_descriptor_header *) &mtp_intr_desc,
274 (struct usb_descriptor_header *) &mtp_superspeed_intr_comp_desc,
275 NULL,
276};
277
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800278static struct usb_string mtp_string_defs[] = {
279 /* Naming interface "MTP" so libmtp will recognize us */
280 [INTERFACE_STRING_INDEX].s = "MTP",
281 { }, /* end of list */
282};
283
284static struct usb_gadget_strings mtp_string_table = {
285 .language = 0x0409, /* en-US */
286 .strings = mtp_string_defs,
287};
288
289static struct usb_gadget_strings *mtp_strings[] = {
290 &mtp_string_table,
291 NULL,
292};
293
294/* Microsoft MTP OS String */
295static u8 mtp_os_string[] = {
296 18, /* sizeof(mtp_os_string) */
297 USB_DT_STRING,
298 /* Signature field: "MSFT100" */
299 'M', 0, 'S', 0, 'F', 0, 'T', 0, '1', 0, '0', 0, '0', 0,
300 /* vendor code */
301 1,
302 /* padding */
303 0
304};
305
306/* Microsoft Extended Configuration Descriptor Header Section */
307struct mtp_ext_config_desc_header {
308 __le32 dwLength;
309 __u16 bcdVersion;
310 __le16 wIndex;
311 __u8 bCount;
312 __u8 reserved[7];
313};
314
315/* Microsoft Extended Configuration Descriptor Function Section */
316struct mtp_ext_config_desc_function {
317 __u8 bFirstInterfaceNumber;
318 __u8 bInterfaceCount;
319 __u8 compatibleID[8];
320 __u8 subCompatibleID[8];
321 __u8 reserved[6];
322};
323
324/* MTP Extended Configuration Descriptor */
325struct {
326 struct mtp_ext_config_desc_header header;
327 struct mtp_ext_config_desc_function function;
328} mtp_ext_config_desc = {
329 .header = {
330 .dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc)),
331 .bcdVersion = __constant_cpu_to_le16(0x0100),
332 .wIndex = __constant_cpu_to_le16(4),
333 .bCount = __constant_cpu_to_le16(1),
334 },
335 .function = {
336 .bFirstInterfaceNumber = 0,
337 .bInterfaceCount = 1,
338 .compatibleID = { 'M', 'T', 'P' },
339 },
340};
341
342struct mtp_device_status {
343 __le16 wLength;
344 __le16 wCode;
345};
346
347/* temporary variable used between mtp_open() and mtp_gadget_bind() */
348static struct mtp_dev *_mtp_dev;
349
350static inline struct mtp_dev *func_to_mtp(struct usb_function *f)
351{
352 return container_of(f, struct mtp_dev, function);
353}
354
355static struct usb_request *mtp_request_new(struct usb_ep *ep, int buffer_size)
356{
357 struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
358 if (!req)
359 return NULL;
360
361 /* now allocate buffers for the requests */
362 req->buf = kmalloc(buffer_size, GFP_KERNEL);
363 if (!req->buf) {
364 usb_ep_free_request(ep, req);
365 return NULL;
366 }
367
368 return req;
369}
370
371static void mtp_request_free(struct usb_request *req, struct usb_ep *ep)
372{
373 if (req) {
374 kfree(req->buf);
375 usb_ep_free_request(ep, req);
376 }
377}
378
379static inline int mtp_lock(atomic_t *excl)
380{
381 if (atomic_inc_return(excl) == 1) {
382 return 0;
383 } else {
384 atomic_dec(excl);
385 return -1;
386 }
387}
388
389static inline void mtp_unlock(atomic_t *excl)
390{
391 atomic_dec(excl);
392}
393
394/* add a request to the tail of a list */
395static void mtp_req_put(struct mtp_dev *dev, struct list_head *head,
396 struct usb_request *req)
397{
398 unsigned long flags;
399
400 spin_lock_irqsave(&dev->lock, flags);
401 list_add_tail(&req->list, head);
402 spin_unlock_irqrestore(&dev->lock, flags);
403}
404
405/* remove a request from the head of a list */
406static struct usb_request
407*mtp_req_get(struct mtp_dev *dev, struct list_head *head)
408{
409 unsigned long flags;
410 struct usb_request *req;
411
412 spin_lock_irqsave(&dev->lock, flags);
413 if (list_empty(head)) {
414 req = 0;
415 } else {
416 req = list_first_entry(head, struct usb_request, list);
417 list_del(&req->list);
418 }
419 spin_unlock_irqrestore(&dev->lock, flags);
420 return req;
421}
422
423static void mtp_complete_in(struct usb_ep *ep, struct usb_request *req)
424{
425 struct mtp_dev *dev = _mtp_dev;
426
427 if (req->status != 0)
428 dev->state = STATE_ERROR;
429
430 mtp_req_put(dev, &dev->tx_idle, req);
431
432 wake_up(&dev->write_wq);
433}
434
435static void mtp_complete_out(struct usb_ep *ep, struct usb_request *req)
436{
437 struct mtp_dev *dev = _mtp_dev;
438
439 dev->rx_done = 1;
440 if (req->status != 0)
441 dev->state = STATE_ERROR;
442
443 wake_up(&dev->read_wq);
444}
445
446static void mtp_complete_intr(struct usb_ep *ep, struct usb_request *req)
447{
448 struct mtp_dev *dev = _mtp_dev;
449
450 if (req->status != 0)
451 dev->state = STATE_ERROR;
452
453 mtp_req_put(dev, &dev->intr_idle, req);
454
455 wake_up(&dev->intr_wq);
456}
457
458static int mtp_create_bulk_endpoints(struct mtp_dev *dev,
459 struct usb_endpoint_descriptor *in_desc,
460 struct usb_endpoint_descriptor *out_desc,
461 struct usb_endpoint_descriptor *intr_desc)
462{
463 struct usb_composite_dev *cdev = dev->cdev;
464 struct usb_request *req;
465 struct usb_ep *ep;
466 int i;
467
468 DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
469
470 ep = usb_ep_autoconfig(cdev->gadget, in_desc);
471 if (!ep) {
472 DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
473 return -ENODEV;
474 }
475 DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
476 ep->driver_data = dev; /* claim the endpoint */
477 dev->ep_in = ep;
478
479 ep = usb_ep_autoconfig(cdev->gadget, out_desc);
480 if (!ep) {
481 DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
482 return -ENODEV;
483 }
484 DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name);
485 ep->driver_data = dev; /* claim the endpoint */
486 dev->ep_out = ep;
487
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800488 ep = usb_ep_autoconfig(cdev->gadget, intr_desc);
489 if (!ep) {
490 DBG(cdev, "usb_ep_autoconfig for ep_intr failed\n");
491 return -ENODEV;
492 }
493 DBG(cdev, "usb_ep_autoconfig for mtp ep_intr got %s\n", ep->name);
494 ep->driver_data = dev; /* claim the endpoint */
495 dev->ep_intr = ep;
496
Vijayavardhan Vennapusa2537d562013-05-20 16:06:01 +0530497retry_tx_alloc:
498 if (mtp_tx_req_len > MTP_BULK_BUFFER_SIZE)
499 mtp_tx_reqs = 4;
500
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800501 /* now allocate requests for our endpoints */
Vijayavardhan Vennapusa2537d562013-05-20 16:06:01 +0530502 for (i = 0; i < mtp_tx_reqs; i++) {
503 req = mtp_request_new(dev->ep_in, mtp_tx_req_len);
504 if (!req) {
505 if (mtp_tx_req_len <= MTP_BULK_BUFFER_SIZE)
506 goto fail;
507 while ((req = mtp_req_get(dev, &dev->tx_idle)))
508 mtp_request_free(req, dev->ep_in);
509 mtp_tx_req_len = MTP_BULK_BUFFER_SIZE;
510 mtp_tx_reqs = MTP_TX_REQ_MAX;
511 goto retry_tx_alloc;
512 }
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800513 req->complete = mtp_complete_in;
514 mtp_req_put(dev, &dev->tx_idle, req);
515 }
Pavankumar Kondetie79aa682012-12-19 20:19:35 +0530516
517 /*
518 * The RX buffer should be aligned to EP max packet for
519 * some controllers. At bind time, we don't know the
520 * operational speed. Hence assuming super speed max
521 * packet size.
522 */
523 if (mtp_rx_req_len % 1024)
524 mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;
525
526retry_rx_alloc:
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800527 for (i = 0; i < RX_REQ_MAX; i++) {
Pavankumar Kondetie79aa682012-12-19 20:19:35 +0530528 req = mtp_request_new(dev->ep_out, mtp_rx_req_len);
529 if (!req) {
530 if (mtp_rx_req_len <= MTP_BULK_BUFFER_SIZE)
531 goto fail;
532 for (; i > 0; i--)
533 mtp_request_free(dev->rx_req[i], dev->ep_out);
534 mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;
535 goto retry_rx_alloc;
536 }
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800537 req->complete = mtp_complete_out;
538 dev->rx_req[i] = req;
539 }
540 for (i = 0; i < INTR_REQ_MAX; i++) {
541 req = mtp_request_new(dev->ep_intr, INTR_BUFFER_SIZE);
542 if (!req)
543 goto fail;
544 req->complete = mtp_complete_intr;
545 mtp_req_put(dev, &dev->intr_idle, req);
546 }
547
548 return 0;
549
550fail:
551 printk(KERN_ERR "mtp_bind() could not allocate requests\n");
552 return -1;
553}
554
555static ssize_t mtp_read(struct file *fp, char __user *buf,
556 size_t count, loff_t *pos)
557{
558 struct mtp_dev *dev = fp->private_data;
559 struct usb_composite_dev *cdev = dev->cdev;
560 struct usb_request *req;
561 int r = count, xfer;
562 int ret = 0;
563
564 DBG(cdev, "mtp_read(%d)\n", count);
565
Pavankumar Kondetie79aa682012-12-19 20:19:35 +0530566 if (count > mtp_rx_req_len)
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800567 return -EINVAL;
568
Manu Gautambe803b42012-10-03 18:49:33 +0530569 if (!IS_ALIGNED(count, dev->ep_out->maxpacket))
570 DBG(cdev, "%s - count(%d) not multiple of mtu(%d)\n", __func__,
571 count, dev->ep_out->maxpacket);
572
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800573 /* we will block until we're online */
574 DBG(cdev, "mtp_read: waiting for online state\n");
575 ret = wait_event_interruptible(dev->read_wq,
576 dev->state != STATE_OFFLINE);
577 if (ret < 0) {
578 r = ret;
579 goto done;
580 }
581 spin_lock_irq(&dev->lock);
582 if (dev->state == STATE_CANCELED) {
583 /* report cancelation to userspace */
584 dev->state = STATE_READY;
585 spin_unlock_irq(&dev->lock);
586 return -ECANCELED;
587 }
588 dev->state = STATE_BUSY;
589 spin_unlock_irq(&dev->lock);
590
591requeue_req:
592 /* queue a request */
593 req = dev->rx_req[0];
Pavankumar Kondetie79aa682012-12-19 20:19:35 +0530594 req->length = mtp_rx_req_len;
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800595 dev->rx_done = 0;
596 ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
597 if (ret < 0) {
598 r = -EIO;
599 goto done;
600 } else {
601 DBG(cdev, "rx %p queue\n", req);
602 }
603
604 /* wait for a request to complete */
Rajkumar Raghupathy20298e02012-03-09 12:22:36 +0530605 ret = wait_event_interruptible(dev->read_wq,
606 dev->rx_done || dev->state != STATE_BUSY);
607 if (dev->state == STATE_CANCELED) {
608 r = -ECANCELED;
609 if (!dev->rx_done)
610 usb_ep_dequeue(dev->ep_out, req);
611 spin_lock_irq(&dev->lock);
612 dev->state = STATE_CANCELED;
613 spin_unlock_irq(&dev->lock);
614 goto done;
615 }
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800616 if (ret < 0) {
617 r = ret;
618 usb_ep_dequeue(dev->ep_out, req);
619 goto done;
620 }
621 if (dev->state == STATE_BUSY) {
622 /* If we got a 0-len packet, throw it back and try again. */
623 if (req->actual == 0)
624 goto requeue_req;
625
626 DBG(cdev, "rx %p %d\n", req, req->actual);
627 xfer = (req->actual < count) ? req->actual : count;
628 r = xfer;
629 if (copy_to_user(buf, req->buf, xfer))
630 r = -EFAULT;
631 } else
632 r = -EIO;
633
634done:
635 spin_lock_irq(&dev->lock);
636 if (dev->state == STATE_CANCELED)
637 r = -ECANCELED;
638 else if (dev->state != STATE_OFFLINE)
639 dev->state = STATE_READY;
640 spin_unlock_irq(&dev->lock);
641
642 DBG(cdev, "mtp_read returning %d\n", r);
643 return r;
644}
645
646static ssize_t mtp_write(struct file *fp, const char __user *buf,
647 size_t count, loff_t *pos)
648{
649 struct mtp_dev *dev = fp->private_data;
650 struct usb_composite_dev *cdev = dev->cdev;
651 struct usb_request *req = 0;
652 int r = count, xfer;
653 int sendZLP = 0;
654 int ret;
655
656 DBG(cdev, "mtp_write(%d)\n", count);
657
658 spin_lock_irq(&dev->lock);
659 if (dev->state == STATE_CANCELED) {
660 /* report cancelation to userspace */
661 dev->state = STATE_READY;
662 spin_unlock_irq(&dev->lock);
663 return -ECANCELED;
664 }
665 if (dev->state == STATE_OFFLINE) {
666 spin_unlock_irq(&dev->lock);
667 return -ENODEV;
668 }
669 dev->state = STATE_BUSY;
670 spin_unlock_irq(&dev->lock);
671
672 /* we need to send a zero length packet to signal the end of transfer
673 * if the transfer size is aligned to a packet boundary.
674 */
675 if ((count & (dev->ep_in->maxpacket - 1)) == 0)
676 sendZLP = 1;
677
678 while (count > 0 || sendZLP) {
679 /* so we exit after sending ZLP */
680 if (count == 0)
681 sendZLP = 0;
682
683 if (dev->state != STATE_BUSY) {
684 DBG(cdev, "mtp_write dev->error\n");
685 r = -EIO;
686 break;
687 }
688
689 /* get an idle tx request to use */
690 req = 0;
691 ret = wait_event_interruptible(dev->write_wq,
692 ((req = mtp_req_get(dev, &dev->tx_idle))
693 || dev->state != STATE_BUSY));
694 if (!req) {
695 r = ret;
696 break;
697 }
698
699 if (count > MTP_BULK_BUFFER_SIZE)
700 xfer = MTP_BULK_BUFFER_SIZE;
701 else
702 xfer = count;
703 if (xfer && copy_from_user(req->buf, buf, xfer)) {
704 r = -EFAULT;
705 break;
706 }
707
708 req->length = xfer;
709 ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
710 if (ret < 0) {
711 DBG(cdev, "mtp_write: xfer error %d\n", ret);
712 r = -EIO;
713 break;
714 }
715
716 buf += xfer;
717 count -= xfer;
718
719 /* zero this so we don't try to free it on error exit */
720 req = 0;
721 }
722
723 if (req)
724 mtp_req_put(dev, &dev->tx_idle, req);
725
726 spin_lock_irq(&dev->lock);
727 if (dev->state == STATE_CANCELED)
728 r = -ECANCELED;
729 else if (dev->state != STATE_OFFLINE)
730 dev->state = STATE_READY;
731 spin_unlock_irq(&dev->lock);
732
733 DBG(cdev, "mtp_write returning %d\n", r);
734 return r;
735}
736
737/* read from a local file and write to USB */
738static void send_file_work(struct work_struct *data)
739{
740 struct mtp_dev *dev = container_of(data, struct mtp_dev,
741 send_file_work);
742 struct usb_composite_dev *cdev = dev->cdev;
743 struct usb_request *req = 0;
744 struct mtp_data_header *header;
745 struct file *filp;
746 loff_t offset;
747 int64_t count;
748 int xfer, ret, hdr_size;
749 int r = 0;
750 int sendZLP = 0;
751
752 /* read our parameters */
753 smp_rmb();
754 filp = dev->xfer_file;
755 offset = dev->xfer_file_offset;
756 count = dev->xfer_file_length;
757
758 DBG(cdev, "send_file_work(%lld %lld)\n", offset, count);
759
760 if (dev->xfer_send_header) {
761 hdr_size = sizeof(struct mtp_data_header);
762 count += hdr_size;
763 } else {
764 hdr_size = 0;
765 }
766
767 /* we need to send a zero length packet to signal the end of transfer
768 * if the transfer size is aligned to a packet boundary.
769 */
770 if ((count & (dev->ep_in->maxpacket - 1)) == 0)
771 sendZLP = 1;
772
773 while (count > 0 || sendZLP) {
774 /* so we exit after sending ZLP */
775 if (count == 0)
776 sendZLP = 0;
777
778 /* get an idle tx request to use */
779 req = 0;
780 ret = wait_event_interruptible(dev->write_wq,
781 (req = mtp_req_get(dev, &dev->tx_idle))
782 || dev->state != STATE_BUSY);
783 if (dev->state == STATE_CANCELED) {
784 r = -ECANCELED;
785 break;
786 }
787 if (!req) {
788 r = ret;
789 break;
790 }
791
792 if (count > MTP_BULK_BUFFER_SIZE)
793 xfer = MTP_BULK_BUFFER_SIZE;
794 else
795 xfer = count;
796
797 if (hdr_size) {
798 /* prepend MTP data header */
799 header = (struct mtp_data_header *)req->buf;
800 header->length = __cpu_to_le32(count);
801 header->type = __cpu_to_le16(2); /* data packet */
802 header->command = __cpu_to_le16(dev->xfer_command);
803 header->transaction_id =
804 __cpu_to_le32(dev->xfer_transaction_id);
805 }
806
807 ret = vfs_read(filp, req->buf + hdr_size, xfer - hdr_size,
808 &offset);
809 if (ret < 0) {
810 r = ret;
811 break;
812 }
813 xfer = ret + hdr_size;
814 hdr_size = 0;
815
816 req->length = xfer;
817 ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
818 if (ret < 0) {
819 DBG(cdev, "send_file_work: xfer error %d\n", ret);
Vijayavardhan Vennapusa00972232012-05-18 11:18:40 +0530820 if (dev->state != STATE_OFFLINE)
821 dev->state = STATE_ERROR;
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800822 r = -EIO;
823 break;
824 }
825
826 count -= xfer;
827
828 /* zero this so we don't try to free it on error exit */
829 req = 0;
830 }
831
832 if (req)
833 mtp_req_put(dev, &dev->tx_idle, req);
834
835 DBG(cdev, "send_file_work returning %d\n", r);
836 /* write the result */
837 dev->xfer_result = r;
838 smp_wmb();
839}
840
841/* read from USB and write to a local file */
842static void receive_file_work(struct work_struct *data)
843{
844 struct mtp_dev *dev = container_of(data, struct mtp_dev,
845 receive_file_work);
846 struct usb_composite_dev *cdev = dev->cdev;
847 struct usb_request *read_req = NULL, *write_req = NULL;
848 struct file *filp;
849 loff_t offset;
850 int64_t count;
851 int ret, cur_buf = 0;
852 int r = 0;
853
854 /* read our parameters */
855 smp_rmb();
856 filp = dev->xfer_file;
857 offset = dev->xfer_file_offset;
858 count = dev->xfer_file_length;
859
860 DBG(cdev, "receive_file_work(%lld)\n", count);
Manu Gautambe803b42012-10-03 18:49:33 +0530861 if (!IS_ALIGNED(count, dev->ep_out->maxpacket))
862 DBG(cdev, "%s- count(%lld) not multiple of mtu(%d)\n", __func__,
863 count, dev->ep_out->maxpacket);
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800864
865 while (count > 0 || write_req) {
866 if (count > 0) {
867 /* queue a request */
868 read_req = dev->rx_req[cur_buf];
869 cur_buf = (cur_buf + 1) % RX_REQ_MAX;
870
Manu Gautambe803b42012-10-03 18:49:33 +0530871 /* some h/w expects size to be aligned to ep's MTU */
Pavankumar Kondetie79aa682012-12-19 20:19:35 +0530872 read_req->length = mtp_rx_req_len;
Manu Gautambe803b42012-10-03 18:49:33 +0530873
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800874 dev->rx_done = 0;
875 ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL);
876 if (ret < 0) {
877 r = -EIO;
Vijayavardhan Vennapusa00972232012-05-18 11:18:40 +0530878 if (dev->state != STATE_OFFLINE)
879 dev->state = STATE_ERROR;
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800880 break;
881 }
882 }
883
884 if (write_req) {
885 DBG(cdev, "rx %p %d\n", write_req, write_req->actual);
886 ret = vfs_write(filp, write_req->buf, write_req->actual,
887 &offset);
888 DBG(cdev, "vfs_write %d\n", ret);
889 if (ret != write_req->actual) {
890 r = -EIO;
Vijayavardhan Vennapusa00972232012-05-18 11:18:40 +0530891 if (dev->state != STATE_OFFLINE)
892 dev->state = STATE_ERROR;
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800893 break;
894 }
895 write_req = NULL;
896 }
897
898 if (read_req) {
899 /* wait for our last read to complete */
900 ret = wait_event_interruptible(dev->read_wq,
901 dev->rx_done || dev->state != STATE_BUSY);
Rajkumar Raghupathy7c3c45b2012-07-24 16:13:36 +0530902 if (dev->state == STATE_CANCELED
903 || dev->state == STATE_OFFLINE) {
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800904 r = -ECANCELED;
905 if (!dev->rx_done)
906 usb_ep_dequeue(dev->ep_out, read_req);
907 break;
908 }
Manu Gautambe803b42012-10-03 18:49:33 +0530909 /* Check if we aligned the size due to MTU constraint */
910 if (count < read_req->length)
911 read_req->actual = (read_req->actual > count ?
912 count : read_req->actual);
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800913 /* if xfer_file_length is 0xFFFFFFFF, then we read until
914 * we get a zero length packet
915 */
916 if (count != 0xFFFFFFFF)
917 count -= read_req->actual;
918 if (read_req->actual < read_req->length) {
919 /*
920 * short packet is used to signal EOF for
921 * sizes > 4 gig
922 */
923 DBG(cdev, "got short packet\n");
924 count = 0;
925 }
926
927 write_req = read_req;
928 read_req = NULL;
929 }
930 }
931
932 DBG(cdev, "receive_file_work returning %d\n", r);
933 /* write the result */
934 dev->xfer_result = r;
935 smp_wmb();
936}
937
938static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event)
939{
940 struct usb_request *req = NULL;
941 int ret;
942 int length = event->length;
943
944 DBG(dev->cdev, "mtp_send_event(%d)\n", event->length);
945
946 if (length < 0 || length > INTR_BUFFER_SIZE)
947 return -EINVAL;
948 if (dev->state == STATE_OFFLINE)
949 return -ENODEV;
950
951 ret = wait_event_interruptible_timeout(dev->intr_wq,
952 (req = mtp_req_get(dev, &dev->intr_idle)),
953 msecs_to_jiffies(1000));
954 if (!req)
955 return -ETIME;
956
957 if (copy_from_user(req->buf, (void __user *)event->data, length)) {
958 mtp_req_put(dev, &dev->intr_idle, req);
959 return -EFAULT;
960 }
961 req->length = length;
962 ret = usb_ep_queue(dev->ep_intr, req, GFP_KERNEL);
963 if (ret)
964 mtp_req_put(dev, &dev->intr_idle, req);
965
966 return ret;
967}
968
969static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value)
970{
971 struct mtp_dev *dev = fp->private_data;
972 struct file *filp = NULL;
973 int ret = -EINVAL;
974
975 if (mtp_lock(&dev->ioctl_excl))
976 return -EBUSY;
977
978 switch (code) {
979 case MTP_SEND_FILE:
980 case MTP_RECEIVE_FILE:
981 case MTP_SEND_FILE_WITH_HEADER:
982 {
983 struct mtp_file_range mfr;
984 struct work_struct *work;
985
986 spin_lock_irq(&dev->lock);
987 if (dev->state == STATE_CANCELED) {
988 /* report cancelation to userspace */
989 dev->state = STATE_READY;
990 spin_unlock_irq(&dev->lock);
991 ret = -ECANCELED;
992 goto out;
993 }
994 if (dev->state == STATE_OFFLINE) {
995 spin_unlock_irq(&dev->lock);
996 ret = -ENODEV;
997 goto out;
998 }
999 dev->state = STATE_BUSY;
1000 spin_unlock_irq(&dev->lock);
1001
1002 if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) {
1003 ret = -EFAULT;
1004 goto fail;
1005 }
1006 /* hold a reference to the file while we are working with it */
1007 filp = fget(mfr.fd);
1008 if (!filp) {
1009 ret = -EBADF;
1010 goto fail;
1011 }
1012
1013 /* write the parameters */
1014 dev->xfer_file = filp;
1015 dev->xfer_file_offset = mfr.offset;
1016 dev->xfer_file_length = mfr.length;
1017 smp_wmb();
1018
1019 if (code == MTP_SEND_FILE_WITH_HEADER) {
1020 work = &dev->send_file_work;
1021 dev->xfer_send_header = 1;
1022 dev->xfer_command = mfr.command;
1023 dev->xfer_transaction_id = mfr.transaction_id;
1024 } else if (code == MTP_SEND_FILE) {
1025 work = &dev->send_file_work;
1026 dev->xfer_send_header = 0;
1027 } else {
1028 work = &dev->receive_file_work;
1029 }
1030
1031 /* We do the file transfer on a work queue so it will run
1032 * in kernel context, which is necessary for vfs_read and
1033 * vfs_write to use our buffers in the kernel address space.
1034 */
1035 queue_work(dev->wq, work);
1036 /* wait for operation to complete */
1037 flush_workqueue(dev->wq);
1038 fput(filp);
1039
1040 /* read the result */
1041 smp_rmb();
1042 ret = dev->xfer_result;
1043 break;
1044 }
1045 case MTP_SEND_EVENT:
1046 {
1047 struct mtp_event event;
1048 /* return here so we don't change dev->state below,
1049 * which would interfere with bulk transfer state.
1050 */
1051 if (copy_from_user(&event, (void __user *)value, sizeof(event)))
1052 ret = -EFAULT;
1053 else
1054 ret = mtp_send_event(dev, &event);
1055 goto out;
1056 }
1057 }
1058
1059fail:
1060 spin_lock_irq(&dev->lock);
1061 if (dev->state == STATE_CANCELED)
1062 ret = -ECANCELED;
1063 else if (dev->state != STATE_OFFLINE)
1064 dev->state = STATE_READY;
1065 spin_unlock_irq(&dev->lock);
1066out:
1067 mtp_unlock(&dev->ioctl_excl);
1068 DBG(dev->cdev, "ioctl returning %d\n", ret);
1069 return ret;
1070}
1071
1072static int mtp_open(struct inode *ip, struct file *fp)
1073{
1074 printk(KERN_INFO "mtp_open\n");
1075 if (mtp_lock(&_mtp_dev->open_excl))
1076 return -EBUSY;
1077
1078 /* clear any error condition */
1079 if (_mtp_dev->state != STATE_OFFLINE)
1080 _mtp_dev->state = STATE_READY;
1081
1082 fp->private_data = _mtp_dev;
1083 return 0;
1084}
1085
1086static int mtp_release(struct inode *ip, struct file *fp)
1087{
1088 printk(KERN_INFO "mtp_release\n");
1089
1090 mtp_unlock(&_mtp_dev->open_excl);
1091 return 0;
1092}
1093
1094/* file operations for /dev/mtp_usb */
1095static const struct file_operations mtp_fops = {
1096 .owner = THIS_MODULE,
1097 .read = mtp_read,
1098 .write = mtp_write,
1099 .unlocked_ioctl = mtp_ioctl,
1100 .open = mtp_open,
1101 .release = mtp_release,
1102};
1103
1104static struct miscdevice mtp_device = {
1105 .minor = MISC_DYNAMIC_MINOR,
1106 .name = mtp_shortname,
1107 .fops = &mtp_fops,
1108};
1109
1110static int mtp_ctrlrequest(struct usb_composite_dev *cdev,
1111 const struct usb_ctrlrequest *ctrl)
1112{
1113 struct mtp_dev *dev = _mtp_dev;
1114 int value = -EOPNOTSUPP;
1115 u16 w_index = le16_to_cpu(ctrl->wIndex);
1116 u16 w_value = le16_to_cpu(ctrl->wValue);
1117 u16 w_length = le16_to_cpu(ctrl->wLength);
1118 unsigned long flags;
1119
1120 VDBG(cdev, "mtp_ctrlrequest "
1121 "%02x.%02x v%04x i%04x l%u\n",
1122 ctrl->bRequestType, ctrl->bRequest,
1123 w_value, w_index, w_length);
1124
1125 /* Handle MTP OS string */
1126 if (ctrl->bRequestType ==
1127 (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE)
1128 && ctrl->bRequest == USB_REQ_GET_DESCRIPTOR
1129 && (w_value >> 8) == USB_DT_STRING
1130 && (w_value & 0xFF) == MTP_OS_STRING_ID) {
1131 value = (w_length < sizeof(mtp_os_string)
1132 ? w_length : sizeof(mtp_os_string));
1133 memcpy(cdev->req->buf, mtp_os_string, value);
1134 } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) {
1135 /* Handle MTP OS descriptor */
1136 DBG(cdev, "vendor request: %d index: %d value: %d length: %d\n",
1137 ctrl->bRequest, w_index, w_value, w_length);
1138
1139 if (ctrl->bRequest == 1
1140 && (ctrl->bRequestType & USB_DIR_IN)
1141 && (w_index == 4 || w_index == 5)) {
1142 value = (w_length < sizeof(mtp_ext_config_desc) ?
1143 w_length : sizeof(mtp_ext_config_desc));
1144 memcpy(cdev->req->buf, &mtp_ext_config_desc, value);
1145 }
1146 } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
1147 DBG(cdev, "class request: %d index: %d value: %d length: %d\n",
1148 ctrl->bRequest, w_index, w_value, w_length);
1149
1150 if (ctrl->bRequest == MTP_REQ_CANCEL && w_index == 0
1151 && w_value == 0) {
1152 DBG(cdev, "MTP_REQ_CANCEL\n");
1153
1154 spin_lock_irqsave(&dev->lock, flags);
1155 if (dev->state == STATE_BUSY) {
1156 dev->state = STATE_CANCELED;
1157 wake_up(&dev->read_wq);
1158 wake_up(&dev->write_wq);
1159 }
1160 spin_unlock_irqrestore(&dev->lock, flags);
1161
1162 /* We need to queue a request to read the remaining
1163 * bytes, but we don't actually need to look at
1164 * the contents.
1165 */
1166 value = w_length;
1167 } else if (ctrl->bRequest == MTP_REQ_GET_DEVICE_STATUS
1168 && w_index == 0 && w_value == 0) {
1169 struct mtp_device_status *status = cdev->req->buf;
1170 status->wLength =
1171 __constant_cpu_to_le16(sizeof(*status));
1172
1173 DBG(cdev, "MTP_REQ_GET_DEVICE_STATUS\n");
1174 spin_lock_irqsave(&dev->lock, flags);
1175 /* device status is "busy" until we report
1176 * the cancelation to userspace
1177 */
1178 if (dev->state == STATE_CANCELED)
1179 status->wCode =
1180 __cpu_to_le16(MTP_RESPONSE_DEVICE_BUSY);
1181 else
1182 status->wCode =
1183 __cpu_to_le16(MTP_RESPONSE_OK);
1184 spin_unlock_irqrestore(&dev->lock, flags);
1185 value = sizeof(*status);
1186 }
1187 }
1188
1189 /* respond with data transfer or status phase? */
1190 if (value >= 0) {
1191 int rc;
1192 cdev->req->zero = value < w_length;
1193 cdev->req->length = value;
1194 rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
1195 if (rc < 0)
1196 ERROR(cdev, "%s: response queue error\n", __func__);
1197 }
1198 return value;
1199}
1200
1201static int
1202mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
1203{
1204 struct usb_composite_dev *cdev = c->cdev;
1205 struct mtp_dev *dev = func_to_mtp(f);
1206 int id;
1207 int ret;
1208
1209 dev->cdev = cdev;
1210 DBG(cdev, "mtp_function_bind dev: %p\n", dev);
1211
1212 /* allocate interface ID(s) */
1213 id = usb_interface_id(c, f);
1214 if (id < 0)
1215 return id;
1216 mtp_interface_desc.bInterfaceNumber = id;
1217
1218 /* allocate endpoints */
1219 ret = mtp_create_bulk_endpoints(dev, &mtp_fullspeed_in_desc,
1220 &mtp_fullspeed_out_desc, &mtp_intr_desc);
1221 if (ret)
1222 return ret;
1223
1224 /* support high speed hardware */
1225 if (gadget_is_dualspeed(c->cdev->gadget)) {
1226 mtp_highspeed_in_desc.bEndpointAddress =
1227 mtp_fullspeed_in_desc.bEndpointAddress;
1228 mtp_highspeed_out_desc.bEndpointAddress =
1229 mtp_fullspeed_out_desc.bEndpointAddress;
1230 }
1231
Pavankumar Kondeti6f94bc92012-08-03 09:34:32 +05301232 /* support super speed hardware */
1233 if (gadget_is_superspeed(c->cdev->gadget)) {
1234 mtp_superspeed_in_desc.bEndpointAddress =
1235 mtp_fullspeed_in_desc.bEndpointAddress;
1236 mtp_superspeed_out_desc.bEndpointAddress =
1237 mtp_fullspeed_out_desc.bEndpointAddress;
1238 }
1239
Benoit Gobyf0fbc482011-12-19 14:37:50 -08001240 DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
1241 gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
1242 f->name, dev->ep_in->name, dev->ep_out->name);
1243 return 0;
1244}
1245
1246static void
1247mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)
1248{
1249 struct mtp_dev *dev = func_to_mtp(f);
1250 struct usb_request *req;
1251 int i;
1252
1253 while ((req = mtp_req_get(dev, &dev->tx_idle)))
1254 mtp_request_free(req, dev->ep_in);
1255 for (i = 0; i < RX_REQ_MAX; i++)
1256 mtp_request_free(dev->rx_req[i], dev->ep_out);
1257 while ((req = mtp_req_get(dev, &dev->intr_idle)))
1258 mtp_request_free(req, dev->ep_intr);
1259 dev->state = STATE_OFFLINE;
1260}
1261
1262static int mtp_function_set_alt(struct usb_function *f,
1263 unsigned intf, unsigned alt)
1264{
1265 struct mtp_dev *dev = func_to_mtp(f);
1266 struct usb_composite_dev *cdev = f->config->cdev;
1267 int ret;
1268
1269 DBG(cdev, "mtp_function_set_alt intf: %d alt: %d\n", intf, alt);
1270
1271 ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
Benoit Gobyf0fbc482011-12-19 14:37:50 -08001272 if (ret) {
Tatyana Brokhman31ac3522011-06-28 15:33:50 +02001273 dev->ep_in->desc = NULL;
1274 ERROR(cdev, "config_ep_by_speed failes for ep %s, result %d\n",
1275 dev->ep_in->name, ret);
1276 return ret;
1277 }
1278 ret = usb_ep_enable(dev->ep_in);
1279 if (ret) {
1280 ERROR(cdev, "failed to enable ep %s, result %d\n",
1281 dev->ep_in->name, ret);
Benoit Gobyf0fbc482011-12-19 14:37:50 -08001282 return ret;
1283 }
1284
Tatyana Brokhman31ac3522011-06-28 15:33:50 +02001285 ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out);
1286 if (ret) {
1287 dev->ep_out->desc = NULL;
1288 ERROR(cdev, "config_ep_by_speed failes for ep %s, result %d\n",
1289 dev->ep_out->name, ret);
Tatyana Brokhmanebd3f392011-06-28 16:33:50 +03001290 usb_ep_disable(dev->ep_in);
Benoit Gobyf0fbc482011-12-19 14:37:50 -08001291 return ret;
Tatyana Brokhmanebd3f392011-06-28 16:33:50 +03001292 }
Tatyana Brokhman31ac3522011-06-28 15:33:50 +02001293 ret = usb_ep_enable(dev->ep_out);
1294 if (ret) {
1295 ERROR(cdev, "failed to enable ep %s, result %d\n",
1296 dev->ep_out->name, ret);
Tatyana Brokhmanebd3f392011-06-28 16:33:50 +03001297 usb_ep_disable(dev->ep_in);
1298 return ret;
1299 }
Tatyana Brokhmancf709c12011-06-28 16:33:48 +03001300 dev->ep_intr->desc = &mtp_intr_desc;
Benoit Gobyf0fbc482011-12-19 14:37:50 -08001301 ret = usb_ep_enable(dev->ep_intr);
1302 if (ret) {
1303 usb_ep_disable(dev->ep_out);
1304 usb_ep_disable(dev->ep_in);
1305 return ret;
1306 }
1307 dev->state = STATE_READY;
1308
1309 /* readers may be blocked waiting for us to go online */
1310 wake_up(&dev->read_wq);
1311 return 0;
1312}
1313
1314static void mtp_function_disable(struct usb_function *f)
1315{
1316 struct mtp_dev *dev = func_to_mtp(f);
1317 struct usb_composite_dev *cdev = dev->cdev;
1318
1319 DBG(cdev, "mtp_function_disable\n");
1320 dev->state = STATE_OFFLINE;
1321 usb_ep_disable(dev->ep_in);
1322 usb_ep_disable(dev->ep_out);
1323 usb_ep_disable(dev->ep_intr);
1324
1325 /* readers may be blocked waiting for us to go online */
1326 wake_up(&dev->read_wq);
1327
1328 VDBG(cdev, "%s disabled\n", dev->function.name);
1329}
1330
1331static int mtp_bind_config(struct usb_configuration *c, bool ptp_config)
1332{
1333 struct mtp_dev *dev = _mtp_dev;
1334 int ret = 0;
1335
1336 printk(KERN_INFO "mtp_bind_config\n");
1337
1338 /* allocate a string ID for our interface */
1339 if (mtp_string_defs[INTERFACE_STRING_INDEX].id == 0) {
1340 ret = usb_string_id(c->cdev);
1341 if (ret < 0)
1342 return ret;
1343 mtp_string_defs[INTERFACE_STRING_INDEX].id = ret;
1344 mtp_interface_desc.iInterface = ret;
1345 }
1346
1347 dev->cdev = c->cdev;
1348 dev->function.name = "mtp";
1349 dev->function.strings = mtp_strings;
1350 if (ptp_config) {
1351 dev->function.descriptors = fs_ptp_descs;
1352 dev->function.hs_descriptors = hs_ptp_descs;
Pavankumar Kondeti6f94bc92012-08-03 09:34:32 +05301353 if (gadget_is_superspeed(c->cdev->gadget))
1354 dev->function.ss_descriptors = ss_ptp_descs;
Benoit Gobyf0fbc482011-12-19 14:37:50 -08001355 } else {
1356 dev->function.descriptors = fs_mtp_descs;
1357 dev->function.hs_descriptors = hs_mtp_descs;
Pavankumar Kondeti6f94bc92012-08-03 09:34:32 +05301358 if (gadget_is_superspeed(c->cdev->gadget))
1359 dev->function.ss_descriptors = ss_mtp_descs;
Benoit Gobyf0fbc482011-12-19 14:37:50 -08001360 }
1361 dev->function.bind = mtp_function_bind;
1362 dev->function.unbind = mtp_function_unbind;
1363 dev->function.set_alt = mtp_function_set_alt;
1364 dev->function.disable = mtp_function_disable;
1365
1366 return usb_add_function(c, &dev->function);
1367}
1368
1369static int mtp_setup(void)
1370{
1371 struct mtp_dev *dev;
1372 int ret;
1373
1374 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1375 if (!dev)
1376 return -ENOMEM;
1377
1378 spin_lock_init(&dev->lock);
1379 init_waitqueue_head(&dev->read_wq);
1380 init_waitqueue_head(&dev->write_wq);
1381 init_waitqueue_head(&dev->intr_wq);
1382 atomic_set(&dev->open_excl, 0);
1383 atomic_set(&dev->ioctl_excl, 0);
1384 INIT_LIST_HEAD(&dev->tx_idle);
1385 INIT_LIST_HEAD(&dev->intr_idle);
1386
1387 dev->wq = create_singlethread_workqueue("f_mtp");
1388 if (!dev->wq) {
1389 ret = -ENOMEM;
1390 goto err1;
1391 }
1392 INIT_WORK(&dev->send_file_work, send_file_work);
1393 INIT_WORK(&dev->receive_file_work, receive_file_work);
1394
1395 _mtp_dev = dev;
1396
1397 ret = misc_register(&mtp_device);
1398 if (ret)
1399 goto err2;
1400
1401 return 0;
1402
1403err2:
1404 destroy_workqueue(dev->wq);
1405err1:
1406 _mtp_dev = NULL;
1407 kfree(dev);
1408 printk(KERN_ERR "mtp gadget driver failed to initialize\n");
1409 return ret;
1410}
1411
1412static void mtp_cleanup(void)
1413{
1414 struct mtp_dev *dev = _mtp_dev;
1415
1416 if (!dev)
1417 return;
1418
1419 misc_deregister(&mtp_device);
1420 destroy_workqueue(dev->wq);
1421 _mtp_dev = NULL;
1422 kfree(dev);
1423}