blob: a4192cacfa387aa9e40607e62a477c63b111ab6e [file] [log] [blame]
Benoit Gobyf0fbc482011-12-19 14:37:50 -08001/*
2 * Gadget Function Driver for MTP
3 *
4 * Copyright (C) 2010 Google, Inc.
5 * Author: Mike Lockwood <lockwood@android.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18/* #define DEBUG */
19/* #define VERBOSE_DEBUG */
20
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/poll.h>
24#include <linux/delay.h>
25#include <linux/wait.h>
26#include <linux/err.h>
27#include <linux/interrupt.h>
28
29#include <linux/types.h>
30#include <linux/file.h>
31#include <linux/device.h>
32#include <linux/miscdevice.h>
33
34#include <linux/usb.h>
35#include <linux/usb_usual.h>
36#include <linux/usb/ch9.h>
37#include <linux/usb/f_mtp.h>
38
39#define MTP_BULK_BUFFER_SIZE 16384
40#define INTR_BUFFER_SIZE 28
41
42/* String IDs */
43#define INTERFACE_STRING_INDEX 0
44
45/* values for mtp_dev.state */
46#define STATE_OFFLINE 0 /* initial state, disconnected */
47#define STATE_READY 1 /* ready for userspace calls */
48#define STATE_BUSY 2 /* processing userspace calls */
49#define STATE_CANCELED 3 /* transaction canceled by host */
50#define STATE_ERROR 4 /* error from completion routine */
51
52/* number of tx and rx requests to allocate */
Vijayavardhan Vennapusa2537d562013-05-20 16:06:01 +053053#define MTP_TX_REQ_MAX 8
Benoit Gobyf0fbc482011-12-19 14:37:50 -080054#define RX_REQ_MAX 2
55#define INTR_REQ_MAX 5
56
57/* ID for Microsoft MTP OS String */
58#define MTP_OS_STRING_ID 0xEE
59
60/* MTP class reqeusts */
61#define MTP_REQ_CANCEL 0x64
62#define MTP_REQ_GET_EXT_EVENT_DATA 0x65
63#define MTP_REQ_RESET 0x66
64#define MTP_REQ_GET_DEVICE_STATUS 0x67
65
66/* constants for device status */
67#define MTP_RESPONSE_OK 0x2001
68#define MTP_RESPONSE_DEVICE_BUSY 0x2019
69
Pavankumar Kondetie79aa682012-12-19 20:19:35 +053070unsigned int mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;
71module_param(mtp_rx_req_len, uint, S_IRUGO | S_IWUSR);
72
Vijayavardhan Vennapusa2537d562013-05-20 16:06:01 +053073unsigned int mtp_tx_req_len = MTP_BULK_BUFFER_SIZE;
74module_param(mtp_tx_req_len, uint, S_IRUGO | S_IWUSR);
75
76unsigned int mtp_tx_reqs = MTP_TX_REQ_MAX;
77module_param(mtp_tx_reqs, uint, S_IRUGO | S_IWUSR);
78
Benoit Gobyf0fbc482011-12-19 14:37:50 -080079static const char mtp_shortname[] = "mtp_usb";
80
81struct mtp_dev {
82 struct usb_function function;
83 struct usb_composite_dev *cdev;
84 spinlock_t lock;
85
86 struct usb_ep *ep_in;
87 struct usb_ep *ep_out;
88 struct usb_ep *ep_intr;
89
90 int state;
91
92 /* synchronize access to our device file */
93 atomic_t open_excl;
94 /* to enforce only one ioctl at a time */
95 atomic_t ioctl_excl;
96
97 struct list_head tx_idle;
98 struct list_head intr_idle;
99
100 wait_queue_head_t read_wq;
101 wait_queue_head_t write_wq;
102 wait_queue_head_t intr_wq;
103 struct usb_request *rx_req[RX_REQ_MAX];
104 int rx_done;
105
106 /* for processing MTP_SEND_FILE, MTP_RECEIVE_FILE and
107 * MTP_SEND_FILE_WITH_HEADER ioctls on a work queue
108 */
109 struct workqueue_struct *wq;
110 struct work_struct send_file_work;
111 struct work_struct receive_file_work;
112 struct file *xfer_file;
113 loff_t xfer_file_offset;
114 int64_t xfer_file_length;
115 unsigned xfer_send_header;
116 uint16_t xfer_command;
117 uint32_t xfer_transaction_id;
118 int xfer_result;
119};
120
121static struct usb_interface_descriptor mtp_interface_desc = {
122 .bLength = USB_DT_INTERFACE_SIZE,
123 .bDescriptorType = USB_DT_INTERFACE,
124 .bInterfaceNumber = 0,
125 .bNumEndpoints = 3,
126 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
127 .bInterfaceSubClass = USB_SUBCLASS_VENDOR_SPEC,
128 .bInterfaceProtocol = 0,
129};
130
131static struct usb_interface_descriptor ptp_interface_desc = {
132 .bLength = USB_DT_INTERFACE_SIZE,
133 .bDescriptorType = USB_DT_INTERFACE,
134 .bInterfaceNumber = 0,
135 .bNumEndpoints = 3,
136 .bInterfaceClass = USB_CLASS_STILL_IMAGE,
137 .bInterfaceSubClass = 1,
138 .bInterfaceProtocol = 1,
139};
140
Pavankumar Kondeti6f94bc92012-08-03 09:34:32 +0530141static struct usb_endpoint_descriptor mtp_superspeed_in_desc = {
142 .bLength = USB_DT_ENDPOINT_SIZE,
143 .bDescriptorType = USB_DT_ENDPOINT,
144 .bEndpointAddress = USB_DIR_IN,
145 .bmAttributes = USB_ENDPOINT_XFER_BULK,
146 .wMaxPacketSize = __constant_cpu_to_le16(1024),
147};
148
149static struct usb_ss_ep_comp_descriptor mtp_superspeed_in_comp_desc = {
150 .bLength = sizeof mtp_superspeed_in_comp_desc,
151 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
152
153 /* the following 2 values can be tweaked if necessary */
154 /* .bMaxBurst = 0, */
155 /* .bmAttributes = 0, */
156};
157
158static struct usb_endpoint_descriptor mtp_superspeed_out_desc = {
159 .bLength = USB_DT_ENDPOINT_SIZE,
160 .bDescriptorType = USB_DT_ENDPOINT,
161 .bEndpointAddress = USB_DIR_OUT,
162 .bmAttributes = USB_ENDPOINT_XFER_BULK,
163 .wMaxPacketSize = __constant_cpu_to_le16(1024),
164};
165
166static struct usb_ss_ep_comp_descriptor mtp_superspeed_out_comp_desc = {
167 .bLength = sizeof mtp_superspeed_out_comp_desc,
168 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
169
170 /* the following 2 values can be tweaked if necessary */
171 /* .bMaxBurst = 0, */
172 /* .bmAttributes = 0, */
173};
174
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800175static struct usb_endpoint_descriptor mtp_highspeed_in_desc = {
176 .bLength = USB_DT_ENDPOINT_SIZE,
177 .bDescriptorType = USB_DT_ENDPOINT,
178 .bEndpointAddress = USB_DIR_IN,
179 .bmAttributes = USB_ENDPOINT_XFER_BULK,
180 .wMaxPacketSize = __constant_cpu_to_le16(512),
181};
182
183static struct usb_endpoint_descriptor mtp_highspeed_out_desc = {
184 .bLength = USB_DT_ENDPOINT_SIZE,
185 .bDescriptorType = USB_DT_ENDPOINT,
186 .bEndpointAddress = USB_DIR_OUT,
187 .bmAttributes = USB_ENDPOINT_XFER_BULK,
188 .wMaxPacketSize = __constant_cpu_to_le16(512),
189};
190
191static struct usb_endpoint_descriptor mtp_fullspeed_in_desc = {
192 .bLength = USB_DT_ENDPOINT_SIZE,
193 .bDescriptorType = USB_DT_ENDPOINT,
194 .bEndpointAddress = USB_DIR_IN,
195 .bmAttributes = USB_ENDPOINT_XFER_BULK,
196};
197
198static struct usb_endpoint_descriptor mtp_fullspeed_out_desc = {
199 .bLength = USB_DT_ENDPOINT_SIZE,
200 .bDescriptorType = USB_DT_ENDPOINT,
201 .bEndpointAddress = USB_DIR_OUT,
202 .bmAttributes = USB_ENDPOINT_XFER_BULK,
203};
204
205static struct usb_endpoint_descriptor mtp_intr_desc = {
206 .bLength = USB_DT_ENDPOINT_SIZE,
207 .bDescriptorType = USB_DT_ENDPOINT,
208 .bEndpointAddress = USB_DIR_IN,
209 .bmAttributes = USB_ENDPOINT_XFER_INT,
210 .wMaxPacketSize = __constant_cpu_to_le16(INTR_BUFFER_SIZE),
211 .bInterval = 6,
212};
213
Pavankumar Kondeti6f94bc92012-08-03 09:34:32 +0530214static struct usb_ss_ep_comp_descriptor mtp_superspeed_intr_comp_desc = {
215 .bLength = sizeof mtp_superspeed_intr_comp_desc,
216 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
217
218 /* the following 3 values can be tweaked if necessary */
219 /* .bMaxBurst = 0, */
220 /* .bmAttributes = 0, */
221 .wBytesPerInterval = cpu_to_le16(INTR_BUFFER_SIZE),
222};
223
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800224static struct usb_descriptor_header *fs_mtp_descs[] = {
225 (struct usb_descriptor_header *) &mtp_interface_desc,
226 (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
227 (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
228 (struct usb_descriptor_header *) &mtp_intr_desc,
229 NULL,
230};
231
232static struct usb_descriptor_header *hs_mtp_descs[] = {
233 (struct usb_descriptor_header *) &mtp_interface_desc,
234 (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
235 (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
236 (struct usb_descriptor_header *) &mtp_intr_desc,
237 NULL,
238};
239
Pavankumar Kondeti6f94bc92012-08-03 09:34:32 +0530240static struct usb_descriptor_header *ss_mtp_descs[] = {
241 (struct usb_descriptor_header *) &mtp_interface_desc,
242 (struct usb_descriptor_header *) &mtp_superspeed_in_desc,
243 (struct usb_descriptor_header *) &mtp_superspeed_in_comp_desc,
244 (struct usb_descriptor_header *) &mtp_superspeed_out_desc,
245 (struct usb_descriptor_header *) &mtp_superspeed_out_comp_desc,
246 (struct usb_descriptor_header *) &mtp_intr_desc,
247 (struct usb_descriptor_header *) &mtp_superspeed_intr_comp_desc,
248 NULL,
249};
250
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800251static struct usb_descriptor_header *fs_ptp_descs[] = {
252 (struct usb_descriptor_header *) &ptp_interface_desc,
253 (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
254 (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
255 (struct usb_descriptor_header *) &mtp_intr_desc,
256 NULL,
257};
258
259static struct usb_descriptor_header *hs_ptp_descs[] = {
260 (struct usb_descriptor_header *) &ptp_interface_desc,
261 (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
262 (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
263 (struct usb_descriptor_header *) &mtp_intr_desc,
264 NULL,
265};
266
Pavankumar Kondeti6f94bc92012-08-03 09:34:32 +0530267static struct usb_descriptor_header *ss_ptp_descs[] = {
268 (struct usb_descriptor_header *) &ptp_interface_desc,
269 (struct usb_descriptor_header *) &mtp_superspeed_in_desc,
270 (struct usb_descriptor_header *) &mtp_superspeed_in_comp_desc,
271 (struct usb_descriptor_header *) &mtp_superspeed_out_desc,
272 (struct usb_descriptor_header *) &mtp_superspeed_out_comp_desc,
273 (struct usb_descriptor_header *) &mtp_intr_desc,
274 (struct usb_descriptor_header *) &mtp_superspeed_intr_comp_desc,
275 NULL,
276};
277
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800278static struct usb_string mtp_string_defs[] = {
279 /* Naming interface "MTP" so libmtp will recognize us */
280 [INTERFACE_STRING_INDEX].s = "MTP",
281 { }, /* end of list */
282};
283
284static struct usb_gadget_strings mtp_string_table = {
285 .language = 0x0409, /* en-US */
286 .strings = mtp_string_defs,
287};
288
289static struct usb_gadget_strings *mtp_strings[] = {
290 &mtp_string_table,
291 NULL,
292};
293
294/* Microsoft MTP OS String */
295static u8 mtp_os_string[] = {
296 18, /* sizeof(mtp_os_string) */
297 USB_DT_STRING,
298 /* Signature field: "MSFT100" */
299 'M', 0, 'S', 0, 'F', 0, 'T', 0, '1', 0, '0', 0, '0', 0,
300 /* vendor code */
301 1,
302 /* padding */
303 0
304};
305
306/* Microsoft Extended Configuration Descriptor Header Section */
307struct mtp_ext_config_desc_header {
308 __le32 dwLength;
309 __u16 bcdVersion;
310 __le16 wIndex;
311 __u8 bCount;
312 __u8 reserved[7];
313};
314
315/* Microsoft Extended Configuration Descriptor Function Section */
316struct mtp_ext_config_desc_function {
317 __u8 bFirstInterfaceNumber;
318 __u8 bInterfaceCount;
319 __u8 compatibleID[8];
320 __u8 subCompatibleID[8];
321 __u8 reserved[6];
322};
323
324/* MTP Extended Configuration Descriptor */
325struct {
326 struct mtp_ext_config_desc_header header;
327 struct mtp_ext_config_desc_function function;
328} mtp_ext_config_desc = {
329 .header = {
330 .dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc)),
331 .bcdVersion = __constant_cpu_to_le16(0x0100),
332 .wIndex = __constant_cpu_to_le16(4),
333 .bCount = __constant_cpu_to_le16(1),
334 },
335 .function = {
336 .bFirstInterfaceNumber = 0,
337 .bInterfaceCount = 1,
338 .compatibleID = { 'M', 'T', 'P' },
339 },
340};
341
342struct mtp_device_status {
343 __le16 wLength;
344 __le16 wCode;
345};
346
347/* temporary variable used between mtp_open() and mtp_gadget_bind() */
348static struct mtp_dev *_mtp_dev;
349
350static inline struct mtp_dev *func_to_mtp(struct usb_function *f)
351{
352 return container_of(f, struct mtp_dev, function);
353}
354
355static struct usb_request *mtp_request_new(struct usb_ep *ep, int buffer_size)
356{
357 struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
358 if (!req)
359 return NULL;
360
361 /* now allocate buffers for the requests */
362 req->buf = kmalloc(buffer_size, GFP_KERNEL);
363 if (!req->buf) {
364 usb_ep_free_request(ep, req);
365 return NULL;
366 }
367
368 return req;
369}
370
371static void mtp_request_free(struct usb_request *req, struct usb_ep *ep)
372{
373 if (req) {
374 kfree(req->buf);
375 usb_ep_free_request(ep, req);
376 }
377}
378
379static inline int mtp_lock(atomic_t *excl)
380{
381 if (atomic_inc_return(excl) == 1) {
382 return 0;
383 } else {
384 atomic_dec(excl);
385 return -1;
386 }
387}
388
389static inline void mtp_unlock(atomic_t *excl)
390{
391 atomic_dec(excl);
392}
393
394/* add a request to the tail of a list */
395static void mtp_req_put(struct mtp_dev *dev, struct list_head *head,
396 struct usb_request *req)
397{
398 unsigned long flags;
399
400 spin_lock_irqsave(&dev->lock, flags);
401 list_add_tail(&req->list, head);
402 spin_unlock_irqrestore(&dev->lock, flags);
403}
404
405/* remove a request from the head of a list */
406static struct usb_request
407*mtp_req_get(struct mtp_dev *dev, struct list_head *head)
408{
409 unsigned long flags;
410 struct usb_request *req;
411
412 spin_lock_irqsave(&dev->lock, flags);
413 if (list_empty(head)) {
414 req = 0;
415 } else {
416 req = list_first_entry(head, struct usb_request, list);
417 list_del(&req->list);
418 }
419 spin_unlock_irqrestore(&dev->lock, flags);
420 return req;
421}
422
423static void mtp_complete_in(struct usb_ep *ep, struct usb_request *req)
424{
425 struct mtp_dev *dev = _mtp_dev;
426
427 if (req->status != 0)
428 dev->state = STATE_ERROR;
429
430 mtp_req_put(dev, &dev->tx_idle, req);
431
432 wake_up(&dev->write_wq);
433}
434
435static void mtp_complete_out(struct usb_ep *ep, struct usb_request *req)
436{
437 struct mtp_dev *dev = _mtp_dev;
438
439 dev->rx_done = 1;
440 if (req->status != 0)
441 dev->state = STATE_ERROR;
442
443 wake_up(&dev->read_wq);
444}
445
446static void mtp_complete_intr(struct usb_ep *ep, struct usb_request *req)
447{
448 struct mtp_dev *dev = _mtp_dev;
449
450 if (req->status != 0)
451 dev->state = STATE_ERROR;
452
453 mtp_req_put(dev, &dev->intr_idle, req);
454
455 wake_up(&dev->intr_wq);
456}
457
458static int mtp_create_bulk_endpoints(struct mtp_dev *dev,
459 struct usb_endpoint_descriptor *in_desc,
460 struct usb_endpoint_descriptor *out_desc,
461 struct usb_endpoint_descriptor *intr_desc)
462{
463 struct usb_composite_dev *cdev = dev->cdev;
464 struct usb_request *req;
465 struct usb_ep *ep;
466 int i;
467
468 DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
469
470 ep = usb_ep_autoconfig(cdev->gadget, in_desc);
471 if (!ep) {
472 DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
473 return -ENODEV;
474 }
475 DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
476 ep->driver_data = dev; /* claim the endpoint */
477 dev->ep_in = ep;
478
479 ep = usb_ep_autoconfig(cdev->gadget, out_desc);
480 if (!ep) {
481 DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
482 return -ENODEV;
483 }
484 DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name);
485 ep->driver_data = dev; /* claim the endpoint */
486 dev->ep_out = ep;
487
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800488 ep = usb_ep_autoconfig(cdev->gadget, intr_desc);
489 if (!ep) {
490 DBG(cdev, "usb_ep_autoconfig for ep_intr failed\n");
491 return -ENODEV;
492 }
493 DBG(cdev, "usb_ep_autoconfig for mtp ep_intr got %s\n", ep->name);
494 ep->driver_data = dev; /* claim the endpoint */
495 dev->ep_intr = ep;
496
Vijayavardhan Vennapusa2537d562013-05-20 16:06:01 +0530497retry_tx_alloc:
498 if (mtp_tx_req_len > MTP_BULK_BUFFER_SIZE)
499 mtp_tx_reqs = 4;
500
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800501 /* now allocate requests for our endpoints */
Vijayavardhan Vennapusa2537d562013-05-20 16:06:01 +0530502 for (i = 0; i < mtp_tx_reqs; i++) {
503 req = mtp_request_new(dev->ep_in, mtp_tx_req_len);
504 if (!req) {
505 if (mtp_tx_req_len <= MTP_BULK_BUFFER_SIZE)
506 goto fail;
507 while ((req = mtp_req_get(dev, &dev->tx_idle)))
508 mtp_request_free(req, dev->ep_in);
509 mtp_tx_req_len = MTP_BULK_BUFFER_SIZE;
510 mtp_tx_reqs = MTP_TX_REQ_MAX;
511 goto retry_tx_alloc;
512 }
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800513 req->complete = mtp_complete_in;
514 mtp_req_put(dev, &dev->tx_idle, req);
515 }
Pavankumar Kondetie79aa682012-12-19 20:19:35 +0530516
517 /*
518 * The RX buffer should be aligned to EP max packet for
519 * some controllers. At bind time, we don't know the
520 * operational speed. Hence assuming super speed max
521 * packet size.
522 */
523 if (mtp_rx_req_len % 1024)
524 mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;
525
526retry_rx_alloc:
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800527 for (i = 0; i < RX_REQ_MAX; i++) {
Pavankumar Kondetie79aa682012-12-19 20:19:35 +0530528 req = mtp_request_new(dev->ep_out, mtp_rx_req_len);
529 if (!req) {
530 if (mtp_rx_req_len <= MTP_BULK_BUFFER_SIZE)
531 goto fail;
532 for (; i > 0; i--)
533 mtp_request_free(dev->rx_req[i], dev->ep_out);
534 mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;
535 goto retry_rx_alloc;
536 }
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800537 req->complete = mtp_complete_out;
538 dev->rx_req[i] = req;
539 }
540 for (i = 0; i < INTR_REQ_MAX; i++) {
541 req = mtp_request_new(dev->ep_intr, INTR_BUFFER_SIZE);
542 if (!req)
543 goto fail;
544 req->complete = mtp_complete_intr;
545 mtp_req_put(dev, &dev->intr_idle, req);
546 }
547
548 return 0;
549
550fail:
551 printk(KERN_ERR "mtp_bind() could not allocate requests\n");
552 return -1;
553}
554
555static ssize_t mtp_read(struct file *fp, char __user *buf,
556 size_t count, loff_t *pos)
557{
558 struct mtp_dev *dev = fp->private_data;
559 struct usb_composite_dev *cdev = dev->cdev;
560 struct usb_request *req;
Pavankumar Kondeti36cf60a2013-06-14 11:43:22 +0530561 int r = count, xfer, len;
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800562 int ret = 0;
563
564 DBG(cdev, "mtp_read(%d)\n", count);
565
Pavankumar Kondeti36cf60a2013-06-14 11:43:22 +0530566 len = ALIGN(count, dev->ep_out->maxpacket);
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800567
Pavankumar Kondeti36cf60a2013-06-14 11:43:22 +0530568 if (len > mtp_rx_req_len)
569 return -EINVAL;
Manu Gautambe803b42012-10-03 18:49:33 +0530570
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800571 /* we will block until we're online */
572 DBG(cdev, "mtp_read: waiting for online state\n");
573 ret = wait_event_interruptible(dev->read_wq,
574 dev->state != STATE_OFFLINE);
575 if (ret < 0) {
576 r = ret;
577 goto done;
578 }
579 spin_lock_irq(&dev->lock);
580 if (dev->state == STATE_CANCELED) {
581 /* report cancelation to userspace */
582 dev->state = STATE_READY;
583 spin_unlock_irq(&dev->lock);
584 return -ECANCELED;
585 }
586 dev->state = STATE_BUSY;
587 spin_unlock_irq(&dev->lock);
588
589requeue_req:
590 /* queue a request */
591 req = dev->rx_req[0];
Pavankumar Kondeti36cf60a2013-06-14 11:43:22 +0530592 req->length = len;
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800593 dev->rx_done = 0;
594 ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
595 if (ret < 0) {
596 r = -EIO;
597 goto done;
598 } else {
599 DBG(cdev, "rx %p queue\n", req);
600 }
601
602 /* wait for a request to complete */
Rajkumar Raghupathy20298e02012-03-09 12:22:36 +0530603 ret = wait_event_interruptible(dev->read_wq,
604 dev->rx_done || dev->state != STATE_BUSY);
605 if (dev->state == STATE_CANCELED) {
606 r = -ECANCELED;
607 if (!dev->rx_done)
608 usb_ep_dequeue(dev->ep_out, req);
609 spin_lock_irq(&dev->lock);
610 dev->state = STATE_CANCELED;
611 spin_unlock_irq(&dev->lock);
612 goto done;
613 }
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800614 if (ret < 0) {
615 r = ret;
616 usb_ep_dequeue(dev->ep_out, req);
617 goto done;
618 }
619 if (dev->state == STATE_BUSY) {
620 /* If we got a 0-len packet, throw it back and try again. */
621 if (req->actual == 0)
622 goto requeue_req;
623
624 DBG(cdev, "rx %p %d\n", req, req->actual);
625 xfer = (req->actual < count) ? req->actual : count;
626 r = xfer;
627 if (copy_to_user(buf, req->buf, xfer))
628 r = -EFAULT;
629 } else
630 r = -EIO;
631
632done:
633 spin_lock_irq(&dev->lock);
634 if (dev->state == STATE_CANCELED)
635 r = -ECANCELED;
636 else if (dev->state != STATE_OFFLINE)
637 dev->state = STATE_READY;
638 spin_unlock_irq(&dev->lock);
639
640 DBG(cdev, "mtp_read returning %d\n", r);
641 return r;
642}
643
644static ssize_t mtp_write(struct file *fp, const char __user *buf,
645 size_t count, loff_t *pos)
646{
647 struct mtp_dev *dev = fp->private_data;
648 struct usb_composite_dev *cdev = dev->cdev;
649 struct usb_request *req = 0;
650 int r = count, xfer;
651 int sendZLP = 0;
652 int ret;
653
654 DBG(cdev, "mtp_write(%d)\n", count);
655
656 spin_lock_irq(&dev->lock);
657 if (dev->state == STATE_CANCELED) {
658 /* report cancelation to userspace */
659 dev->state = STATE_READY;
660 spin_unlock_irq(&dev->lock);
661 return -ECANCELED;
662 }
663 if (dev->state == STATE_OFFLINE) {
664 spin_unlock_irq(&dev->lock);
665 return -ENODEV;
666 }
667 dev->state = STATE_BUSY;
668 spin_unlock_irq(&dev->lock);
669
670 /* we need to send a zero length packet to signal the end of transfer
671 * if the transfer size is aligned to a packet boundary.
672 */
673 if ((count & (dev->ep_in->maxpacket - 1)) == 0)
674 sendZLP = 1;
675
676 while (count > 0 || sendZLP) {
677 /* so we exit after sending ZLP */
678 if (count == 0)
679 sendZLP = 0;
680
681 if (dev->state != STATE_BUSY) {
682 DBG(cdev, "mtp_write dev->error\n");
683 r = -EIO;
684 break;
685 }
686
687 /* get an idle tx request to use */
688 req = 0;
689 ret = wait_event_interruptible(dev->write_wq,
690 ((req = mtp_req_get(dev, &dev->tx_idle))
691 || dev->state != STATE_BUSY));
692 if (!req) {
693 r = ret;
694 break;
695 }
696
Vijayavardhan Vennapusaf9493bb2013-06-05 16:48:43 +0530697 if (count > mtp_tx_req_len)
698 xfer = mtp_tx_req_len;
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800699 else
700 xfer = count;
701 if (xfer && copy_from_user(req->buf, buf, xfer)) {
702 r = -EFAULT;
703 break;
704 }
705
706 req->length = xfer;
707 ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
708 if (ret < 0) {
709 DBG(cdev, "mtp_write: xfer error %d\n", ret);
710 r = -EIO;
711 break;
712 }
713
714 buf += xfer;
715 count -= xfer;
716
717 /* zero this so we don't try to free it on error exit */
718 req = 0;
719 }
720
721 if (req)
722 mtp_req_put(dev, &dev->tx_idle, req);
723
724 spin_lock_irq(&dev->lock);
725 if (dev->state == STATE_CANCELED)
726 r = -ECANCELED;
727 else if (dev->state != STATE_OFFLINE)
728 dev->state = STATE_READY;
729 spin_unlock_irq(&dev->lock);
730
731 DBG(cdev, "mtp_write returning %d\n", r);
732 return r;
733}
734
735/* read from a local file and write to USB */
736static void send_file_work(struct work_struct *data)
737{
738 struct mtp_dev *dev = container_of(data, struct mtp_dev,
739 send_file_work);
740 struct usb_composite_dev *cdev = dev->cdev;
741 struct usb_request *req = 0;
742 struct mtp_data_header *header;
743 struct file *filp;
744 loff_t offset;
745 int64_t count;
746 int xfer, ret, hdr_size;
747 int r = 0;
748 int sendZLP = 0;
749
750 /* read our parameters */
751 smp_rmb();
752 filp = dev->xfer_file;
753 offset = dev->xfer_file_offset;
754 count = dev->xfer_file_length;
755
756 DBG(cdev, "send_file_work(%lld %lld)\n", offset, count);
757
758 if (dev->xfer_send_header) {
759 hdr_size = sizeof(struct mtp_data_header);
760 count += hdr_size;
761 } else {
762 hdr_size = 0;
763 }
764
765 /* we need to send a zero length packet to signal the end of transfer
766 * if the transfer size is aligned to a packet boundary.
767 */
768 if ((count & (dev->ep_in->maxpacket - 1)) == 0)
769 sendZLP = 1;
770
771 while (count > 0 || sendZLP) {
772 /* so we exit after sending ZLP */
773 if (count == 0)
774 sendZLP = 0;
775
776 /* get an idle tx request to use */
777 req = 0;
778 ret = wait_event_interruptible(dev->write_wq,
779 (req = mtp_req_get(dev, &dev->tx_idle))
780 || dev->state != STATE_BUSY);
781 if (dev->state == STATE_CANCELED) {
782 r = -ECANCELED;
783 break;
784 }
785 if (!req) {
786 r = ret;
787 break;
788 }
789
Vijayavardhan Vennapusaf9493bb2013-06-05 16:48:43 +0530790 if (count > mtp_tx_req_len)
791 xfer = mtp_tx_req_len;
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800792 else
793 xfer = count;
794
795 if (hdr_size) {
796 /* prepend MTP data header */
797 header = (struct mtp_data_header *)req->buf;
798 header->length = __cpu_to_le32(count);
799 header->type = __cpu_to_le16(2); /* data packet */
800 header->command = __cpu_to_le16(dev->xfer_command);
801 header->transaction_id =
802 __cpu_to_le32(dev->xfer_transaction_id);
803 }
804
805 ret = vfs_read(filp, req->buf + hdr_size, xfer - hdr_size,
806 &offset);
807 if (ret < 0) {
808 r = ret;
809 break;
810 }
811 xfer = ret + hdr_size;
812 hdr_size = 0;
813
814 req->length = xfer;
815 ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
816 if (ret < 0) {
817 DBG(cdev, "send_file_work: xfer error %d\n", ret);
Vijayavardhan Vennapusa00972232012-05-18 11:18:40 +0530818 if (dev->state != STATE_OFFLINE)
819 dev->state = STATE_ERROR;
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800820 r = -EIO;
821 break;
822 }
823
824 count -= xfer;
825
826 /* zero this so we don't try to free it on error exit */
827 req = 0;
828 }
829
830 if (req)
831 mtp_req_put(dev, &dev->tx_idle, req);
832
833 DBG(cdev, "send_file_work returning %d\n", r);
834 /* write the result */
835 dev->xfer_result = r;
836 smp_wmb();
837}
838
839/* read from USB and write to a local file */
840static void receive_file_work(struct work_struct *data)
841{
842 struct mtp_dev *dev = container_of(data, struct mtp_dev,
843 receive_file_work);
844 struct usb_composite_dev *cdev = dev->cdev;
845 struct usb_request *read_req = NULL, *write_req = NULL;
846 struct file *filp;
847 loff_t offset;
848 int64_t count;
849 int ret, cur_buf = 0;
850 int r = 0;
851
852 /* read our parameters */
853 smp_rmb();
854 filp = dev->xfer_file;
855 offset = dev->xfer_file_offset;
856 count = dev->xfer_file_length;
857
858 DBG(cdev, "receive_file_work(%lld)\n", count);
Manu Gautambe803b42012-10-03 18:49:33 +0530859 if (!IS_ALIGNED(count, dev->ep_out->maxpacket))
860 DBG(cdev, "%s- count(%lld) not multiple of mtu(%d)\n", __func__,
861 count, dev->ep_out->maxpacket);
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800862
863 while (count > 0 || write_req) {
864 if (count > 0) {
865 /* queue a request */
866 read_req = dev->rx_req[cur_buf];
867 cur_buf = (cur_buf + 1) % RX_REQ_MAX;
868
Manu Gautambe803b42012-10-03 18:49:33 +0530869 /* some h/w expects size to be aligned to ep's MTU */
Pavankumar Kondetie79aa682012-12-19 20:19:35 +0530870 read_req->length = mtp_rx_req_len;
Manu Gautambe803b42012-10-03 18:49:33 +0530871
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800872 dev->rx_done = 0;
873 ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL);
874 if (ret < 0) {
875 r = -EIO;
Vijayavardhan Vennapusa00972232012-05-18 11:18:40 +0530876 if (dev->state != STATE_OFFLINE)
877 dev->state = STATE_ERROR;
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800878 break;
879 }
880 }
881
882 if (write_req) {
883 DBG(cdev, "rx %p %d\n", write_req, write_req->actual);
884 ret = vfs_write(filp, write_req->buf, write_req->actual,
885 &offset);
886 DBG(cdev, "vfs_write %d\n", ret);
887 if (ret != write_req->actual) {
888 r = -EIO;
Vijayavardhan Vennapusa00972232012-05-18 11:18:40 +0530889 if (dev->state != STATE_OFFLINE)
890 dev->state = STATE_ERROR;
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800891 break;
892 }
893 write_req = NULL;
894 }
895
896 if (read_req) {
897 /* wait for our last read to complete */
898 ret = wait_event_interruptible(dev->read_wq,
899 dev->rx_done || dev->state != STATE_BUSY);
Rajkumar Raghupathy7c3c45b2012-07-24 16:13:36 +0530900 if (dev->state == STATE_CANCELED
901 || dev->state == STATE_OFFLINE) {
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800902 r = -ECANCELED;
903 if (!dev->rx_done)
904 usb_ep_dequeue(dev->ep_out, read_req);
905 break;
906 }
Manu Gautambe803b42012-10-03 18:49:33 +0530907 /* Check if we aligned the size due to MTU constraint */
908 if (count < read_req->length)
909 read_req->actual = (read_req->actual > count ?
910 count : read_req->actual);
Benoit Gobyf0fbc482011-12-19 14:37:50 -0800911 /* if xfer_file_length is 0xFFFFFFFF, then we read until
912 * we get a zero length packet
913 */
914 if (count != 0xFFFFFFFF)
915 count -= read_req->actual;
916 if (read_req->actual < read_req->length) {
917 /*
918 * short packet is used to signal EOF for
919 * sizes > 4 gig
920 */
921 DBG(cdev, "got short packet\n");
922 count = 0;
923 }
924
925 write_req = read_req;
926 read_req = NULL;
927 }
928 }
929
930 DBG(cdev, "receive_file_work returning %d\n", r);
931 /* write the result */
932 dev->xfer_result = r;
933 smp_wmb();
934}
935
936static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event)
937{
938 struct usb_request *req = NULL;
939 int ret;
940 int length = event->length;
941
942 DBG(dev->cdev, "mtp_send_event(%d)\n", event->length);
943
944 if (length < 0 || length > INTR_BUFFER_SIZE)
945 return -EINVAL;
946 if (dev->state == STATE_OFFLINE)
947 return -ENODEV;
948
949 ret = wait_event_interruptible_timeout(dev->intr_wq,
950 (req = mtp_req_get(dev, &dev->intr_idle)),
951 msecs_to_jiffies(1000));
952 if (!req)
953 return -ETIME;
954
955 if (copy_from_user(req->buf, (void __user *)event->data, length)) {
956 mtp_req_put(dev, &dev->intr_idle, req);
957 return -EFAULT;
958 }
959 req->length = length;
960 ret = usb_ep_queue(dev->ep_intr, req, GFP_KERNEL);
961 if (ret)
962 mtp_req_put(dev, &dev->intr_idle, req);
963
964 return ret;
965}
966
967static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value)
968{
969 struct mtp_dev *dev = fp->private_data;
970 struct file *filp = NULL;
971 int ret = -EINVAL;
972
973 if (mtp_lock(&dev->ioctl_excl))
974 return -EBUSY;
975
976 switch (code) {
977 case MTP_SEND_FILE:
978 case MTP_RECEIVE_FILE:
979 case MTP_SEND_FILE_WITH_HEADER:
980 {
981 struct mtp_file_range mfr;
982 struct work_struct *work;
983
984 spin_lock_irq(&dev->lock);
985 if (dev->state == STATE_CANCELED) {
986 /* report cancelation to userspace */
987 dev->state = STATE_READY;
988 spin_unlock_irq(&dev->lock);
989 ret = -ECANCELED;
990 goto out;
991 }
992 if (dev->state == STATE_OFFLINE) {
993 spin_unlock_irq(&dev->lock);
994 ret = -ENODEV;
995 goto out;
996 }
997 dev->state = STATE_BUSY;
998 spin_unlock_irq(&dev->lock);
999
1000 if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) {
1001 ret = -EFAULT;
1002 goto fail;
1003 }
1004 /* hold a reference to the file while we are working with it */
1005 filp = fget(mfr.fd);
1006 if (!filp) {
1007 ret = -EBADF;
1008 goto fail;
1009 }
1010
1011 /* write the parameters */
1012 dev->xfer_file = filp;
1013 dev->xfer_file_offset = mfr.offset;
1014 dev->xfer_file_length = mfr.length;
1015 smp_wmb();
1016
1017 if (code == MTP_SEND_FILE_WITH_HEADER) {
1018 work = &dev->send_file_work;
1019 dev->xfer_send_header = 1;
1020 dev->xfer_command = mfr.command;
1021 dev->xfer_transaction_id = mfr.transaction_id;
1022 } else if (code == MTP_SEND_FILE) {
1023 work = &dev->send_file_work;
1024 dev->xfer_send_header = 0;
1025 } else {
1026 work = &dev->receive_file_work;
1027 }
1028
1029 /* We do the file transfer on a work queue so it will run
1030 * in kernel context, which is necessary for vfs_read and
1031 * vfs_write to use our buffers in the kernel address space.
1032 */
1033 queue_work(dev->wq, work);
1034 /* wait for operation to complete */
1035 flush_workqueue(dev->wq);
1036 fput(filp);
1037
1038 /* read the result */
1039 smp_rmb();
1040 ret = dev->xfer_result;
1041 break;
1042 }
1043 case MTP_SEND_EVENT:
1044 {
1045 struct mtp_event event;
1046 /* return here so we don't change dev->state below,
1047 * which would interfere with bulk transfer state.
1048 */
1049 if (copy_from_user(&event, (void __user *)value, sizeof(event)))
1050 ret = -EFAULT;
1051 else
1052 ret = mtp_send_event(dev, &event);
1053 goto out;
1054 }
1055 }
1056
1057fail:
1058 spin_lock_irq(&dev->lock);
1059 if (dev->state == STATE_CANCELED)
1060 ret = -ECANCELED;
1061 else if (dev->state != STATE_OFFLINE)
1062 dev->state = STATE_READY;
1063 spin_unlock_irq(&dev->lock);
1064out:
1065 mtp_unlock(&dev->ioctl_excl);
1066 DBG(dev->cdev, "ioctl returning %d\n", ret);
1067 return ret;
1068}
1069
1070static int mtp_open(struct inode *ip, struct file *fp)
1071{
1072 printk(KERN_INFO "mtp_open\n");
1073 if (mtp_lock(&_mtp_dev->open_excl))
1074 return -EBUSY;
1075
1076 /* clear any error condition */
1077 if (_mtp_dev->state != STATE_OFFLINE)
1078 _mtp_dev->state = STATE_READY;
1079
1080 fp->private_data = _mtp_dev;
1081 return 0;
1082}
1083
1084static int mtp_release(struct inode *ip, struct file *fp)
1085{
1086 printk(KERN_INFO "mtp_release\n");
1087
1088 mtp_unlock(&_mtp_dev->open_excl);
1089 return 0;
1090}
1091
1092/* file operations for /dev/mtp_usb */
1093static const struct file_operations mtp_fops = {
1094 .owner = THIS_MODULE,
1095 .read = mtp_read,
1096 .write = mtp_write,
1097 .unlocked_ioctl = mtp_ioctl,
1098 .open = mtp_open,
1099 .release = mtp_release,
1100};
1101
1102static struct miscdevice mtp_device = {
1103 .minor = MISC_DYNAMIC_MINOR,
1104 .name = mtp_shortname,
1105 .fops = &mtp_fops,
1106};
1107
1108static int mtp_ctrlrequest(struct usb_composite_dev *cdev,
1109 const struct usb_ctrlrequest *ctrl)
1110{
1111 struct mtp_dev *dev = _mtp_dev;
1112 int value = -EOPNOTSUPP;
1113 u16 w_index = le16_to_cpu(ctrl->wIndex);
1114 u16 w_value = le16_to_cpu(ctrl->wValue);
1115 u16 w_length = le16_to_cpu(ctrl->wLength);
1116 unsigned long flags;
1117
1118 VDBG(cdev, "mtp_ctrlrequest "
1119 "%02x.%02x v%04x i%04x l%u\n",
1120 ctrl->bRequestType, ctrl->bRequest,
1121 w_value, w_index, w_length);
1122
1123 /* Handle MTP OS string */
1124 if (ctrl->bRequestType ==
1125 (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE)
1126 && ctrl->bRequest == USB_REQ_GET_DESCRIPTOR
1127 && (w_value >> 8) == USB_DT_STRING
1128 && (w_value & 0xFF) == MTP_OS_STRING_ID) {
1129 value = (w_length < sizeof(mtp_os_string)
1130 ? w_length : sizeof(mtp_os_string));
1131 memcpy(cdev->req->buf, mtp_os_string, value);
1132 } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) {
1133 /* Handle MTP OS descriptor */
1134 DBG(cdev, "vendor request: %d index: %d value: %d length: %d\n",
1135 ctrl->bRequest, w_index, w_value, w_length);
1136
1137 if (ctrl->bRequest == 1
1138 && (ctrl->bRequestType & USB_DIR_IN)
1139 && (w_index == 4 || w_index == 5)) {
1140 value = (w_length < sizeof(mtp_ext_config_desc) ?
1141 w_length : sizeof(mtp_ext_config_desc));
1142 memcpy(cdev->req->buf, &mtp_ext_config_desc, value);
1143 }
1144 } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
1145 DBG(cdev, "class request: %d index: %d value: %d length: %d\n",
1146 ctrl->bRequest, w_index, w_value, w_length);
1147
1148 if (ctrl->bRequest == MTP_REQ_CANCEL && w_index == 0
1149 && w_value == 0) {
1150 DBG(cdev, "MTP_REQ_CANCEL\n");
1151
1152 spin_lock_irqsave(&dev->lock, flags);
1153 if (dev->state == STATE_BUSY) {
1154 dev->state = STATE_CANCELED;
1155 wake_up(&dev->read_wq);
1156 wake_up(&dev->write_wq);
1157 }
1158 spin_unlock_irqrestore(&dev->lock, flags);
1159
1160 /* We need to queue a request to read the remaining
1161 * bytes, but we don't actually need to look at
1162 * the contents.
1163 */
1164 value = w_length;
1165 } else if (ctrl->bRequest == MTP_REQ_GET_DEVICE_STATUS
1166 && w_index == 0 && w_value == 0) {
1167 struct mtp_device_status *status = cdev->req->buf;
1168 status->wLength =
1169 __constant_cpu_to_le16(sizeof(*status));
1170
1171 DBG(cdev, "MTP_REQ_GET_DEVICE_STATUS\n");
1172 spin_lock_irqsave(&dev->lock, flags);
1173 /* device status is "busy" until we report
1174 * the cancelation to userspace
1175 */
1176 if (dev->state == STATE_CANCELED)
1177 status->wCode =
1178 __cpu_to_le16(MTP_RESPONSE_DEVICE_BUSY);
1179 else
1180 status->wCode =
1181 __cpu_to_le16(MTP_RESPONSE_OK);
1182 spin_unlock_irqrestore(&dev->lock, flags);
1183 value = sizeof(*status);
1184 }
1185 }
1186
1187 /* respond with data transfer or status phase? */
1188 if (value >= 0) {
1189 int rc;
1190 cdev->req->zero = value < w_length;
1191 cdev->req->length = value;
1192 rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
1193 if (rc < 0)
1194 ERROR(cdev, "%s: response queue error\n", __func__);
1195 }
1196 return value;
1197}
1198
1199static int
1200mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
1201{
1202 struct usb_composite_dev *cdev = c->cdev;
1203 struct mtp_dev *dev = func_to_mtp(f);
1204 int id;
1205 int ret;
1206
1207 dev->cdev = cdev;
1208 DBG(cdev, "mtp_function_bind dev: %p\n", dev);
1209
1210 /* allocate interface ID(s) */
1211 id = usb_interface_id(c, f);
1212 if (id < 0)
1213 return id;
1214 mtp_interface_desc.bInterfaceNumber = id;
1215
1216 /* allocate endpoints */
1217 ret = mtp_create_bulk_endpoints(dev, &mtp_fullspeed_in_desc,
1218 &mtp_fullspeed_out_desc, &mtp_intr_desc);
1219 if (ret)
1220 return ret;
1221
1222 /* support high speed hardware */
1223 if (gadget_is_dualspeed(c->cdev->gadget)) {
1224 mtp_highspeed_in_desc.bEndpointAddress =
1225 mtp_fullspeed_in_desc.bEndpointAddress;
1226 mtp_highspeed_out_desc.bEndpointAddress =
1227 mtp_fullspeed_out_desc.bEndpointAddress;
1228 }
1229
Pavankumar Kondeti6f94bc92012-08-03 09:34:32 +05301230 /* support super speed hardware */
1231 if (gadget_is_superspeed(c->cdev->gadget)) {
1232 mtp_superspeed_in_desc.bEndpointAddress =
1233 mtp_fullspeed_in_desc.bEndpointAddress;
1234 mtp_superspeed_out_desc.bEndpointAddress =
1235 mtp_fullspeed_out_desc.bEndpointAddress;
1236 }
1237
Benoit Gobyf0fbc482011-12-19 14:37:50 -08001238 DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
1239 gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
1240 f->name, dev->ep_in->name, dev->ep_out->name);
1241 return 0;
1242}
1243
1244static void
1245mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)
1246{
1247 struct mtp_dev *dev = func_to_mtp(f);
1248 struct usb_request *req;
1249 int i;
1250
1251 while ((req = mtp_req_get(dev, &dev->tx_idle)))
1252 mtp_request_free(req, dev->ep_in);
1253 for (i = 0; i < RX_REQ_MAX; i++)
1254 mtp_request_free(dev->rx_req[i], dev->ep_out);
1255 while ((req = mtp_req_get(dev, &dev->intr_idle)))
1256 mtp_request_free(req, dev->ep_intr);
1257 dev->state = STATE_OFFLINE;
1258}
1259
1260static int mtp_function_set_alt(struct usb_function *f,
1261 unsigned intf, unsigned alt)
1262{
1263 struct mtp_dev *dev = func_to_mtp(f);
1264 struct usb_composite_dev *cdev = f->config->cdev;
1265 int ret;
1266
1267 DBG(cdev, "mtp_function_set_alt intf: %d alt: %d\n", intf, alt);
1268
1269 ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
Benoit Gobyf0fbc482011-12-19 14:37:50 -08001270 if (ret) {
Tatyana Brokhman31ac3522011-06-28 15:33:50 +02001271 dev->ep_in->desc = NULL;
1272 ERROR(cdev, "config_ep_by_speed failes for ep %s, result %d\n",
1273 dev->ep_in->name, ret);
1274 return ret;
1275 }
1276 ret = usb_ep_enable(dev->ep_in);
1277 if (ret) {
1278 ERROR(cdev, "failed to enable ep %s, result %d\n",
1279 dev->ep_in->name, ret);
Benoit Gobyf0fbc482011-12-19 14:37:50 -08001280 return ret;
1281 }
1282
Tatyana Brokhman31ac3522011-06-28 15:33:50 +02001283 ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out);
1284 if (ret) {
1285 dev->ep_out->desc = NULL;
1286 ERROR(cdev, "config_ep_by_speed failes for ep %s, result %d\n",
1287 dev->ep_out->name, ret);
Tatyana Brokhmanebd3f392011-06-28 16:33:50 +03001288 usb_ep_disable(dev->ep_in);
Benoit Gobyf0fbc482011-12-19 14:37:50 -08001289 return ret;
Tatyana Brokhmanebd3f392011-06-28 16:33:50 +03001290 }
Tatyana Brokhman31ac3522011-06-28 15:33:50 +02001291 ret = usb_ep_enable(dev->ep_out);
1292 if (ret) {
1293 ERROR(cdev, "failed to enable ep %s, result %d\n",
1294 dev->ep_out->name, ret);
Tatyana Brokhmanebd3f392011-06-28 16:33:50 +03001295 usb_ep_disable(dev->ep_in);
1296 return ret;
1297 }
Tatyana Brokhmancf709c12011-06-28 16:33:48 +03001298 dev->ep_intr->desc = &mtp_intr_desc;
Benoit Gobyf0fbc482011-12-19 14:37:50 -08001299 ret = usb_ep_enable(dev->ep_intr);
1300 if (ret) {
1301 usb_ep_disable(dev->ep_out);
1302 usb_ep_disable(dev->ep_in);
1303 return ret;
1304 }
1305 dev->state = STATE_READY;
1306
1307 /* readers may be blocked waiting for us to go online */
1308 wake_up(&dev->read_wq);
1309 return 0;
1310}
1311
1312static void mtp_function_disable(struct usb_function *f)
1313{
1314 struct mtp_dev *dev = func_to_mtp(f);
1315 struct usb_composite_dev *cdev = dev->cdev;
1316
1317 DBG(cdev, "mtp_function_disable\n");
1318 dev->state = STATE_OFFLINE;
1319 usb_ep_disable(dev->ep_in);
1320 usb_ep_disable(dev->ep_out);
1321 usb_ep_disable(dev->ep_intr);
1322
1323 /* readers may be blocked waiting for us to go online */
1324 wake_up(&dev->read_wq);
1325
1326 VDBG(cdev, "%s disabled\n", dev->function.name);
1327}
1328
1329static int mtp_bind_config(struct usb_configuration *c, bool ptp_config)
1330{
1331 struct mtp_dev *dev = _mtp_dev;
1332 int ret = 0;
1333
1334 printk(KERN_INFO "mtp_bind_config\n");
1335
1336 /* allocate a string ID for our interface */
1337 if (mtp_string_defs[INTERFACE_STRING_INDEX].id == 0) {
1338 ret = usb_string_id(c->cdev);
1339 if (ret < 0)
1340 return ret;
1341 mtp_string_defs[INTERFACE_STRING_INDEX].id = ret;
1342 mtp_interface_desc.iInterface = ret;
1343 }
1344
1345 dev->cdev = c->cdev;
1346 dev->function.name = "mtp";
1347 dev->function.strings = mtp_strings;
1348 if (ptp_config) {
1349 dev->function.descriptors = fs_ptp_descs;
1350 dev->function.hs_descriptors = hs_ptp_descs;
Pavankumar Kondeti6f94bc92012-08-03 09:34:32 +05301351 if (gadget_is_superspeed(c->cdev->gadget))
1352 dev->function.ss_descriptors = ss_ptp_descs;
Benoit Gobyf0fbc482011-12-19 14:37:50 -08001353 } else {
1354 dev->function.descriptors = fs_mtp_descs;
1355 dev->function.hs_descriptors = hs_mtp_descs;
Pavankumar Kondeti6f94bc92012-08-03 09:34:32 +05301356 if (gadget_is_superspeed(c->cdev->gadget))
1357 dev->function.ss_descriptors = ss_mtp_descs;
Benoit Gobyf0fbc482011-12-19 14:37:50 -08001358 }
1359 dev->function.bind = mtp_function_bind;
1360 dev->function.unbind = mtp_function_unbind;
1361 dev->function.set_alt = mtp_function_set_alt;
1362 dev->function.disable = mtp_function_disable;
1363
1364 return usb_add_function(c, &dev->function);
1365}
1366
1367static int mtp_setup(void)
1368{
1369 struct mtp_dev *dev;
1370 int ret;
1371
1372 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1373 if (!dev)
1374 return -ENOMEM;
1375
1376 spin_lock_init(&dev->lock);
1377 init_waitqueue_head(&dev->read_wq);
1378 init_waitqueue_head(&dev->write_wq);
1379 init_waitqueue_head(&dev->intr_wq);
1380 atomic_set(&dev->open_excl, 0);
1381 atomic_set(&dev->ioctl_excl, 0);
1382 INIT_LIST_HEAD(&dev->tx_idle);
1383 INIT_LIST_HEAD(&dev->intr_idle);
1384
1385 dev->wq = create_singlethread_workqueue("f_mtp");
1386 if (!dev->wq) {
1387 ret = -ENOMEM;
1388 goto err1;
1389 }
1390 INIT_WORK(&dev->send_file_work, send_file_work);
1391 INIT_WORK(&dev->receive_file_work, receive_file_work);
1392
1393 _mtp_dev = dev;
1394
1395 ret = misc_register(&mtp_device);
1396 if (ret)
1397 goto err2;
1398
1399 return 0;
1400
1401err2:
1402 destroy_workqueue(dev->wq);
1403err1:
1404 _mtp_dev = NULL;
1405 kfree(dev);
1406 printk(KERN_ERR "mtp gadget driver failed to initialize\n");
1407 return ret;
1408}
1409
1410static void mtp_cleanup(void)
1411{
1412 struct mtp_dev *dev = _mtp_dev;
1413
1414 if (!dev)
1415 return;
1416
1417 misc_deregister(&mtp_device);
1418 destroy_workqueue(dev->wq);
1419 _mtp_dev = NULL;
1420 kfree(dev);
1421}