blob: a5b646394426974230b39958e55a7b4515e134df [file] [log] [blame]
Mike Lockwoodba83b012010-04-16 10:39:22 -04001/*
2 * Gadget Function Driver for MTP
3 *
4 * Copyright (C) 2010 Google, Inc.
5 * Author: Mike Lockwood <lockwood@android.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18/* #define DEBUG */
19/* #define VERBOSE_DEBUG */
20
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/poll.h>
24#include <linux/delay.h>
25#include <linux/wait.h>
26#include <linux/err.h>
27#include <linux/interrupt.h>
Mike Lockwoodba83b012010-04-16 10:39:22 -040028
29#include <linux/types.h>
30#include <linux/file.h>
31#include <linux/device.h>
32#include <linux/miscdevice.h>
33
34#include <linux/usb.h>
35#include <linux/usb_usual.h>
36#include <linux/usb/ch9.h>
Mike Lockwoodba83b012010-04-16 10:39:22 -040037#include <linux/usb/f_mtp.h>
38
Benoit Gobyaab96812011-04-19 20:37:33 -070039#define MTP_BULK_BUFFER_SIZE 16384
Mike Lockwood1de4d4d2010-07-06 19:27:52 -040040#define INTR_BUFFER_SIZE 28
Mike Lockwoodba83b012010-04-16 10:39:22 -040041
42/* String IDs */
43#define INTERFACE_STRING_INDEX 0
44
45/* values for mtp_dev.state */
46#define STATE_OFFLINE 0 /* initial state, disconnected */
47#define STATE_READY 1 /* ready for userspace calls */
48#define STATE_BUSY 2 /* processing userspace calls */
49#define STATE_CANCELED 3 /* transaction canceled by host */
50#define STATE_ERROR 4 /* error from completion routine */
51
52/* number of tx and rx requests to allocate */
53#define TX_REQ_MAX 4
54#define RX_REQ_MAX 2
Mike Lockwoodba3673b2011-05-01 20:36:19 -040055#define INTR_REQ_MAX 5
Mike Lockwoodba83b012010-04-16 10:39:22 -040056
Mike Lockwoodba83b012010-04-16 10:39:22 -040057/* ID for Microsoft MTP OS String */
58#define MTP_OS_STRING_ID 0xEE
59
60/* MTP class reqeusts */
61#define MTP_REQ_CANCEL 0x64
62#define MTP_REQ_GET_EXT_EVENT_DATA 0x65
63#define MTP_REQ_RESET 0x66
64#define MTP_REQ_GET_DEVICE_STATUS 0x67
65
66/* constants for device status */
67#define MTP_RESPONSE_OK 0x2001
68#define MTP_RESPONSE_DEVICE_BUSY 0x2019
69
Benoit Gobyaab96812011-04-19 20:37:33 -070070static const char mtp_shortname[] = "mtp_usb";
Mike Lockwoodba83b012010-04-16 10:39:22 -040071
72struct mtp_dev {
73 struct usb_function function;
74 struct usb_composite_dev *cdev;
75 spinlock_t lock;
76
Mike Lockwoodba83b012010-04-16 10:39:22 -040077 struct usb_ep *ep_in;
78 struct usb_ep *ep_out;
79 struct usb_ep *ep_intr;
80
81 int state;
82
Mike Lockwood1de4d4d2010-07-06 19:27:52 -040083 /* synchronize access to our device file */
Mike Lockwoodba83b012010-04-16 10:39:22 -040084 atomic_t open_excl;
Mike Lockwood491d4182010-11-08 10:41:31 -050085 /* to enforce only one ioctl at a time */
86 atomic_t ioctl_excl;
Mike Lockwoodba83b012010-04-16 10:39:22 -040087
88 struct list_head tx_idle;
Mike Lockwoodba3673b2011-05-01 20:36:19 -040089 struct list_head intr_idle;
Mike Lockwoodba83b012010-04-16 10:39:22 -040090
91 wait_queue_head_t read_wq;
92 wait_queue_head_t write_wq;
Mike Lockwoodba3673b2011-05-01 20:36:19 -040093 wait_queue_head_t intr_wq;
Mike Lockwoodba83b012010-04-16 10:39:22 -040094 struct usb_request *rx_req[RX_REQ_MAX];
95 int rx_done;
Mike Lockwood1de4d4d2010-07-06 19:27:52 -040096
Mike Lockwood491d4182010-11-08 10:41:31 -050097 /* for processing MTP_SEND_FILE and MTP_RECEIVE_FILE
98 * ioctls on a work queue
99 */
100 struct workqueue_struct *wq;
101 struct work_struct send_file_work;
102 struct work_struct receive_file_work;
103 struct file *xfer_file;
104 loff_t xfer_file_offset;
Mike Lockwood3e800b62010-11-16 17:14:32 -0500105 int64_t xfer_file_length;
Mike Lockwood491d4182010-11-08 10:41:31 -0500106 int xfer_result;
Mike Lockwoodba83b012010-04-16 10:39:22 -0400107};
108
109static struct usb_interface_descriptor mtp_interface_desc = {
110 .bLength = USB_DT_INTERFACE_SIZE,
111 .bDescriptorType = USB_DT_INTERFACE,
112 .bInterfaceNumber = 0,
113 .bNumEndpoints = 3,
114 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
115 .bInterfaceSubClass = USB_SUBCLASS_VENDOR_SPEC,
116 .bInterfaceProtocol = 0,
117};
118
119static struct usb_interface_descriptor ptp_interface_desc = {
120 .bLength = USB_DT_INTERFACE_SIZE,
121 .bDescriptorType = USB_DT_INTERFACE,
122 .bInterfaceNumber = 0,
123 .bNumEndpoints = 3,
124 .bInterfaceClass = USB_CLASS_STILL_IMAGE,
125 .bInterfaceSubClass = 1,
126 .bInterfaceProtocol = 1,
127};
128
129static struct usb_endpoint_descriptor mtp_highspeed_in_desc = {
130 .bLength = USB_DT_ENDPOINT_SIZE,
131 .bDescriptorType = USB_DT_ENDPOINT,
132 .bEndpointAddress = USB_DIR_IN,
133 .bmAttributes = USB_ENDPOINT_XFER_BULK,
134 .wMaxPacketSize = __constant_cpu_to_le16(512),
135};
136
137static struct usb_endpoint_descriptor mtp_highspeed_out_desc = {
138 .bLength = USB_DT_ENDPOINT_SIZE,
139 .bDescriptorType = USB_DT_ENDPOINT,
140 .bEndpointAddress = USB_DIR_OUT,
141 .bmAttributes = USB_ENDPOINT_XFER_BULK,
142 .wMaxPacketSize = __constant_cpu_to_le16(512),
143};
144
145static struct usb_endpoint_descriptor mtp_fullspeed_in_desc = {
146 .bLength = USB_DT_ENDPOINT_SIZE,
147 .bDescriptorType = USB_DT_ENDPOINT,
148 .bEndpointAddress = USB_DIR_IN,
149 .bmAttributes = USB_ENDPOINT_XFER_BULK,
150};
151
152static struct usb_endpoint_descriptor mtp_fullspeed_out_desc = {
153 .bLength = USB_DT_ENDPOINT_SIZE,
154 .bDescriptorType = USB_DT_ENDPOINT,
155 .bEndpointAddress = USB_DIR_OUT,
156 .bmAttributes = USB_ENDPOINT_XFER_BULK,
157};
158
159static struct usb_endpoint_descriptor mtp_intr_desc = {
160 .bLength = USB_DT_ENDPOINT_SIZE,
161 .bDescriptorType = USB_DT_ENDPOINT,
162 .bEndpointAddress = USB_DIR_IN,
163 .bmAttributes = USB_ENDPOINT_XFER_INT,
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400164 .wMaxPacketSize = __constant_cpu_to_le16(INTR_BUFFER_SIZE),
Mike Lockwoodba83b012010-04-16 10:39:22 -0400165 .bInterval = 6,
166};
167
168static struct usb_descriptor_header *fs_mtp_descs[] = {
169 (struct usb_descriptor_header *) &mtp_interface_desc,
170 (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
171 (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
172 (struct usb_descriptor_header *) &mtp_intr_desc,
173 NULL,
174};
175
176static struct usb_descriptor_header *hs_mtp_descs[] = {
177 (struct usb_descriptor_header *) &mtp_interface_desc,
178 (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
179 (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
180 (struct usb_descriptor_header *) &mtp_intr_desc,
181 NULL,
182};
183
184static struct usb_descriptor_header *fs_ptp_descs[] = {
185 (struct usb_descriptor_header *) &ptp_interface_desc,
186 (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
187 (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
188 (struct usb_descriptor_header *) &mtp_intr_desc,
189 NULL,
190};
191
192static struct usb_descriptor_header *hs_ptp_descs[] = {
193 (struct usb_descriptor_header *) &ptp_interface_desc,
194 (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
195 (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
196 (struct usb_descriptor_header *) &mtp_intr_desc,
197 NULL,
198};
199
200static struct usb_string mtp_string_defs[] = {
201 /* Naming interface "MTP" so libmtp will recognize us */
202 [INTERFACE_STRING_INDEX].s = "MTP",
203 { }, /* end of list */
204};
205
206static struct usb_gadget_strings mtp_string_table = {
207 .language = 0x0409, /* en-US */
208 .strings = mtp_string_defs,
209};
210
211static struct usb_gadget_strings *mtp_strings[] = {
212 &mtp_string_table,
213 NULL,
214};
215
216/* Microsoft MTP OS String */
217static u8 mtp_os_string[] = {
218 18, /* sizeof(mtp_os_string) */
219 USB_DT_STRING,
220 /* Signature field: "MSFT100" */
221 'M', 0, 'S', 0, 'F', 0, 'T', 0, '1', 0, '0', 0, '0', 0,
222 /* vendor code */
223 1,
224 /* padding */
225 0
226};
227
228/* Microsoft Extended Configuration Descriptor Header Section */
229struct mtp_ext_config_desc_header {
230 __le32 dwLength;
231 __u16 bcdVersion;
232 __le16 wIndex;
233 __u8 bCount;
234 __u8 reserved[7];
235};
236
237/* Microsoft Extended Configuration Descriptor Function Section */
238struct mtp_ext_config_desc_function {
239 __u8 bFirstInterfaceNumber;
240 __u8 bInterfaceCount;
241 __u8 compatibleID[8];
242 __u8 subCompatibleID[8];
243 __u8 reserved[6];
244};
245
246/* MTP Extended Configuration Descriptor */
247struct {
248 struct mtp_ext_config_desc_header header;
249 struct mtp_ext_config_desc_function function;
250} mtp_ext_config_desc = {
251 .header = {
252 .dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc)),
253 .bcdVersion = __constant_cpu_to_le16(0x0100),
254 .wIndex = __constant_cpu_to_le16(4),
255 .bCount = __constant_cpu_to_le16(1),
256 },
257 .function = {
258 .bFirstInterfaceNumber = 0,
259 .bInterfaceCount = 1,
260 .compatibleID = { 'M', 'T', 'P' },
261 },
262};
263
264struct mtp_device_status {
265 __le16 wLength;
266 __le16 wCode;
267};
268
269/* temporary variable used between mtp_open() and mtp_gadget_bind() */
270static struct mtp_dev *_mtp_dev;
271
Benoit Gobyaab96812011-04-19 20:37:33 -0700272static inline struct mtp_dev *func_to_mtp(struct usb_function *f)
Mike Lockwoodba83b012010-04-16 10:39:22 -0400273{
274 return container_of(f, struct mtp_dev, function);
275}
276
277static struct usb_request *mtp_request_new(struct usb_ep *ep, int buffer_size)
278{
279 struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
280 if (!req)
281 return NULL;
282
283 /* now allocate buffers for the requests */
284 req->buf = kmalloc(buffer_size, GFP_KERNEL);
285 if (!req->buf) {
286 usb_ep_free_request(ep, req);
287 return NULL;
288 }
289
290 return req;
291}
292
293static void mtp_request_free(struct usb_request *req, struct usb_ep *ep)
294{
295 if (req) {
296 kfree(req->buf);
297 usb_ep_free_request(ep, req);
298 }
299}
300
Benoit Gobyaab96812011-04-19 20:37:33 -0700301static inline int mtp_lock(atomic_t *excl)
Mike Lockwoodba83b012010-04-16 10:39:22 -0400302{
303 if (atomic_inc_return(excl) == 1) {
304 return 0;
305 } else {
306 atomic_dec(excl);
307 return -1;
308 }
309}
310
Benoit Gobyaab96812011-04-19 20:37:33 -0700311static inline void mtp_unlock(atomic_t *excl)
Mike Lockwoodba83b012010-04-16 10:39:22 -0400312{
313 atomic_dec(excl);
314}
315
316/* add a request to the tail of a list */
Benoit Gobyaab96812011-04-19 20:37:33 -0700317static void mtp_req_put(struct mtp_dev *dev, struct list_head *head,
Mike Lockwoodba83b012010-04-16 10:39:22 -0400318 struct usb_request *req)
319{
320 unsigned long flags;
321
322 spin_lock_irqsave(&dev->lock, flags);
323 list_add_tail(&req->list, head);
324 spin_unlock_irqrestore(&dev->lock, flags);
325}
326
327/* remove a request from the head of a list */
Benoit Gobyaab96812011-04-19 20:37:33 -0700328static struct usb_request
329*mtp_req_get(struct mtp_dev *dev, struct list_head *head)
Mike Lockwoodba83b012010-04-16 10:39:22 -0400330{
331 unsigned long flags;
332 struct usb_request *req;
333
334 spin_lock_irqsave(&dev->lock, flags);
335 if (list_empty(head)) {
336 req = 0;
337 } else {
338 req = list_first_entry(head, struct usb_request, list);
339 list_del(&req->list);
340 }
341 spin_unlock_irqrestore(&dev->lock, flags);
342 return req;
343}
344
345static void mtp_complete_in(struct usb_ep *ep, struct usb_request *req)
346{
347 struct mtp_dev *dev = _mtp_dev;
348
349 if (req->status != 0)
350 dev->state = STATE_ERROR;
351
Benoit Gobyaab96812011-04-19 20:37:33 -0700352 mtp_req_put(dev, &dev->tx_idle, req);
Mike Lockwoodba83b012010-04-16 10:39:22 -0400353
354 wake_up(&dev->write_wq);
355}
356
357static void mtp_complete_out(struct usb_ep *ep, struct usb_request *req)
358{
359 struct mtp_dev *dev = _mtp_dev;
360
361 dev->rx_done = 1;
362 if (req->status != 0)
363 dev->state = STATE_ERROR;
364
365 wake_up(&dev->read_wq);
366}
367
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400368static void mtp_complete_intr(struct usb_ep *ep, struct usb_request *req)
369{
370 struct mtp_dev *dev = _mtp_dev;
371
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400372 if (req->status != 0)
373 dev->state = STATE_ERROR;
Mike Lockwoodba3673b2011-05-01 20:36:19 -0400374
375 mtp_req_put(dev, &dev->intr_idle, req);
376
377 wake_up(&dev->intr_wq);
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400378}
379
Benoit Gobyaab96812011-04-19 20:37:33 -0700380static int mtp_create_bulk_endpoints(struct mtp_dev *dev,
Mike Lockwoodba83b012010-04-16 10:39:22 -0400381 struct usb_endpoint_descriptor *in_desc,
382 struct usb_endpoint_descriptor *out_desc,
383 struct usb_endpoint_descriptor *intr_desc)
384{
385 struct usb_composite_dev *cdev = dev->cdev;
386 struct usb_request *req;
387 struct usb_ep *ep;
388 int i;
389
390 DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
391
392 ep = usb_ep_autoconfig(cdev->gadget, in_desc);
393 if (!ep) {
394 DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
395 return -ENODEV;
396 }
397 DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
398 ep->driver_data = dev; /* claim the endpoint */
399 dev->ep_in = ep;
400
401 ep = usb_ep_autoconfig(cdev->gadget, out_desc);
402 if (!ep) {
403 DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
404 return -ENODEV;
405 }
406 DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name);
407 ep->driver_data = dev; /* claim the endpoint */
408 dev->ep_out = ep;
409
410 ep = usb_ep_autoconfig(cdev->gadget, out_desc);
411 if (!ep) {
412 DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
413 return -ENODEV;
414 }
415 DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name);
416 ep->driver_data = dev; /* claim the endpoint */
417 dev->ep_out = ep;
418
419 ep = usb_ep_autoconfig(cdev->gadget, intr_desc);
420 if (!ep) {
421 DBG(cdev, "usb_ep_autoconfig for ep_intr failed\n");
422 return -ENODEV;
423 }
424 DBG(cdev, "usb_ep_autoconfig for mtp ep_intr got %s\n", ep->name);
425 ep->driver_data = dev; /* claim the endpoint */
426 dev->ep_intr = ep;
427
428 /* now allocate requests for our endpoints */
429 for (i = 0; i < TX_REQ_MAX; i++) {
Benoit Gobyaab96812011-04-19 20:37:33 -0700430 req = mtp_request_new(dev->ep_in, MTP_BULK_BUFFER_SIZE);
Mike Lockwoodba83b012010-04-16 10:39:22 -0400431 if (!req)
432 goto fail;
433 req->complete = mtp_complete_in;
Benoit Gobyaab96812011-04-19 20:37:33 -0700434 mtp_req_put(dev, &dev->tx_idle, req);
Mike Lockwoodba83b012010-04-16 10:39:22 -0400435 }
436 for (i = 0; i < RX_REQ_MAX; i++) {
Benoit Gobyaab96812011-04-19 20:37:33 -0700437 req = mtp_request_new(dev->ep_out, MTP_BULK_BUFFER_SIZE);
Mike Lockwoodba83b012010-04-16 10:39:22 -0400438 if (!req)
439 goto fail;
440 req->complete = mtp_complete_out;
441 dev->rx_req[i] = req;
442 }
Mike Lockwoodba3673b2011-05-01 20:36:19 -0400443 for (i = 0; i < INTR_REQ_MAX; i++) {
444 req = mtp_request_new(dev->ep_intr, INTR_BUFFER_SIZE);
445 if (!req)
446 goto fail;
447 req->complete = mtp_complete_intr;
448 mtp_req_put(dev, &dev->intr_idle, req);
449 }
Mike Lockwoodba83b012010-04-16 10:39:22 -0400450
451 return 0;
452
453fail:
454 printk(KERN_ERR "mtp_bind() could not allocate requests\n");
455 return -1;
456}
457
458static ssize_t mtp_read(struct file *fp, char __user *buf,
459 size_t count, loff_t *pos)
460{
461 struct mtp_dev *dev = fp->private_data;
462 struct usb_composite_dev *cdev = dev->cdev;
463 struct usb_request *req;
464 int r = count, xfer;
465 int ret = 0;
466
467 DBG(cdev, "mtp_read(%d)\n", count);
468
Benoit Gobyaab96812011-04-19 20:37:33 -0700469 if (count > MTP_BULK_BUFFER_SIZE)
Mike Lockwoodba83b012010-04-16 10:39:22 -0400470 return -EINVAL;
471
472 /* we will block until we're online */
473 DBG(cdev, "mtp_read: waiting for online state\n");
474 ret = wait_event_interruptible(dev->read_wq,
475 dev->state != STATE_OFFLINE);
476 if (ret < 0) {
477 r = ret;
478 goto done;
479 }
480 spin_lock_irq(&dev->lock);
481 if (dev->state == STATE_CANCELED) {
482 /* report cancelation to userspace */
483 dev->state = STATE_READY;
484 spin_unlock_irq(&dev->lock);
485 return -ECANCELED;
486 }
487 dev->state = STATE_BUSY;
488 spin_unlock_irq(&dev->lock);
489
490requeue_req:
491 /* queue a request */
492 req = dev->rx_req[0];
493 req->length = count;
494 dev->rx_done = 0;
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400495 ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
Mike Lockwoodba83b012010-04-16 10:39:22 -0400496 if (ret < 0) {
497 r = -EIO;
498 goto done;
499 } else {
500 DBG(cdev, "rx %p queue\n", req);
501 }
502
503 /* wait for a request to complete */
504 ret = wait_event_interruptible(dev->read_wq, dev->rx_done);
505 if (ret < 0) {
506 r = ret;
Mike Lockwood43f3dc82011-02-19 15:33:17 -0500507 usb_ep_dequeue(dev->ep_out, req);
Mike Lockwoodba83b012010-04-16 10:39:22 -0400508 goto done;
509 }
510 if (dev->state == STATE_BUSY) {
511 /* If we got a 0-len packet, throw it back and try again. */
512 if (req->actual == 0)
513 goto requeue_req;
514
515 DBG(cdev, "rx %p %d\n", req, req->actual);
516 xfer = (req->actual < count) ? req->actual : count;
517 r = xfer;
518 if (copy_to_user(buf, req->buf, xfer))
519 r = -EFAULT;
520 } else
521 r = -EIO;
522
523done:
524 spin_lock_irq(&dev->lock);
525 if (dev->state == STATE_CANCELED)
526 r = -ECANCELED;
527 else if (dev->state != STATE_OFFLINE)
528 dev->state = STATE_READY;
529 spin_unlock_irq(&dev->lock);
530
531 DBG(cdev, "mtp_read returning %d\n", r);
532 return r;
533}
534
535static ssize_t mtp_write(struct file *fp, const char __user *buf,
536 size_t count, loff_t *pos)
537{
538 struct mtp_dev *dev = fp->private_data;
539 struct usb_composite_dev *cdev = dev->cdev;
540 struct usb_request *req = 0;
541 int r = count, xfer;
Mike Lockwood16c08c22010-11-17 11:16:35 -0500542 int sendZLP = 0;
Mike Lockwoodba83b012010-04-16 10:39:22 -0400543 int ret;
544
545 DBG(cdev, "mtp_write(%d)\n", count);
546
547 spin_lock_irq(&dev->lock);
548 if (dev->state == STATE_CANCELED) {
549 /* report cancelation to userspace */
550 dev->state = STATE_READY;
551 spin_unlock_irq(&dev->lock);
552 return -ECANCELED;
553 }
554 if (dev->state == STATE_OFFLINE) {
555 spin_unlock_irq(&dev->lock);
556 return -ENODEV;
557 }
558 dev->state = STATE_BUSY;
559 spin_unlock_irq(&dev->lock);
560
Mike Lockwood16c08c22010-11-17 11:16:35 -0500561 /* we need to send a zero length packet to signal the end of transfer
562 * if the transfer size is aligned to a packet boundary.
563 */
564 if ((count & (dev->ep_in->maxpacket - 1)) == 0) {
565 sendZLP = 1;
566 }
567
568 while (count > 0 || sendZLP) {
569 /* so we exit after sending ZLP */
570 if (count == 0)
571 sendZLP = 0;
572
Mike Lockwoodba83b012010-04-16 10:39:22 -0400573 if (dev->state != STATE_BUSY) {
574 DBG(cdev, "mtp_write dev->error\n");
575 r = -EIO;
576 break;
577 }
578
579 /* get an idle tx request to use */
580 req = 0;
581 ret = wait_event_interruptible(dev->write_wq,
Benoit Gobyaab96812011-04-19 20:37:33 -0700582 ((req = mtp_req_get(dev, &dev->tx_idle))
Mike Lockwoodba83b012010-04-16 10:39:22 -0400583 || dev->state != STATE_BUSY));
584 if (!req) {
585 r = ret;
586 break;
587 }
588
Benoit Gobyaab96812011-04-19 20:37:33 -0700589 if (count > MTP_BULK_BUFFER_SIZE)
590 xfer = MTP_BULK_BUFFER_SIZE;
Mike Lockwoodba83b012010-04-16 10:39:22 -0400591 else
592 xfer = count;
Mike Lockwood3e800b62010-11-16 17:14:32 -0500593 if (xfer && copy_from_user(req->buf, buf, xfer)) {
Mike Lockwoodba83b012010-04-16 10:39:22 -0400594 r = -EFAULT;
595 break;
596 }
597
598 req->length = xfer;
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400599 ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
Mike Lockwoodba83b012010-04-16 10:39:22 -0400600 if (ret < 0) {
601 DBG(cdev, "mtp_write: xfer error %d\n", ret);
602 r = -EIO;
603 break;
604 }
605
606 buf += xfer;
607 count -= xfer;
608
609 /* zero this so we don't try to free it on error exit */
610 req = 0;
Mike Lockwood16c08c22010-11-17 11:16:35 -0500611 }
Mike Lockwoodba83b012010-04-16 10:39:22 -0400612
613 if (req)
Benoit Gobyaab96812011-04-19 20:37:33 -0700614 mtp_req_put(dev, &dev->tx_idle, req);
Mike Lockwoodba83b012010-04-16 10:39:22 -0400615
616 spin_lock_irq(&dev->lock);
617 if (dev->state == STATE_CANCELED)
618 r = -ECANCELED;
619 else if (dev->state != STATE_OFFLINE)
620 dev->state = STATE_READY;
621 spin_unlock_irq(&dev->lock);
622
623 DBG(cdev, "mtp_write returning %d\n", r);
624 return r;
625}
626
Mike Lockwood491d4182010-11-08 10:41:31 -0500627/* read from a local file and write to USB */
628static void send_file_work(struct work_struct *data) {
629 struct mtp_dev *dev = container_of(data, struct mtp_dev, send_file_work);
Mike Lockwoodba83b012010-04-16 10:39:22 -0400630 struct usb_composite_dev *cdev = dev->cdev;
631 struct usb_request *req = 0;
Mike Lockwood491d4182010-11-08 10:41:31 -0500632 struct file *filp;
633 loff_t offset;
Mike Lockwood3e800b62010-11-16 17:14:32 -0500634 int64_t count;
Mike Lockwood76ac6552010-11-15 15:22:21 -0500635 int xfer, ret;
636 int r = 0;
Mike Lockwood3e800b62010-11-16 17:14:32 -0500637 int sendZLP = 0;
Mike Lockwoodba83b012010-04-16 10:39:22 -0400638
Mike Lockwood491d4182010-11-08 10:41:31 -0500639 /* read our parameters */
640 smp_rmb();
641 filp = dev->xfer_file;
642 offset = dev->xfer_file_offset;
Mike Lockwood76ac6552010-11-15 15:22:21 -0500643 count = dev->xfer_file_length;
Mike Lockwood491d4182010-11-08 10:41:31 -0500644
Mike Lockwood3e800b62010-11-16 17:14:32 -0500645 DBG(cdev, "send_file_work(%lld %lld)\n", offset, count);
Mike Lockwoodba83b012010-04-16 10:39:22 -0400646
Mike Lockwood3e800b62010-11-16 17:14:32 -0500647 /* we need to send a zero length packet to signal the end of transfer
Mike Lockwood16c08c22010-11-17 11:16:35 -0500648 * if the transfer size is aligned to a packet boundary.
Mike Lockwood3e800b62010-11-16 17:14:32 -0500649 */
Mike Lockwood16c08c22010-11-17 11:16:35 -0500650 if ((dev->xfer_file_length & (dev->ep_in->maxpacket - 1)) == 0) {
Mike Lockwood3e800b62010-11-16 17:14:32 -0500651 sendZLP = 1;
652 }
653
654 while (count > 0 || sendZLP) {
655 /* so we exit after sending ZLP */
656 if (count == 0)
657 sendZLP = 0;
658
Mike Lockwoodba83b012010-04-16 10:39:22 -0400659 /* get an idle tx request to use */
660 req = 0;
661 ret = wait_event_interruptible(dev->write_wq,
Benoit Gobyaab96812011-04-19 20:37:33 -0700662 (req = mtp_req_get(dev, &dev->tx_idle))
Mike Lockwoodba83b012010-04-16 10:39:22 -0400663 || dev->state != STATE_BUSY);
Mike Lockwood090cbc42011-02-07 11:51:07 -0500664 if (dev->state == STATE_CANCELED) {
665 r = -ECANCELED;
666 break;
667 }
Mike Lockwoodba83b012010-04-16 10:39:22 -0400668 if (!req) {
669 r = ret;
670 break;
671 }
672
Benoit Gobyaab96812011-04-19 20:37:33 -0700673 if (count > MTP_BULK_BUFFER_SIZE)
674 xfer = MTP_BULK_BUFFER_SIZE;
Mike Lockwoodba83b012010-04-16 10:39:22 -0400675 else
676 xfer = count;
677 ret = vfs_read(filp, req->buf, xfer, &offset);
678 if (ret < 0) {
679 r = ret;
680 break;
681 }
682 xfer = ret;
683
684 req->length = xfer;
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400685 ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
Mike Lockwoodba83b012010-04-16 10:39:22 -0400686 if (ret < 0) {
Mike Lockwood491d4182010-11-08 10:41:31 -0500687 DBG(cdev, "send_file_work: xfer error %d\n", ret);
Mike Lockwoodba83b012010-04-16 10:39:22 -0400688 dev->state = STATE_ERROR;
689 r = -EIO;
690 break;
691 }
692
693 count -= xfer;
694
695 /* zero this so we don't try to free it on error exit */
696 req = 0;
697 }
698
699 if (req)
Benoit Gobyaab96812011-04-19 20:37:33 -0700700 mtp_req_put(dev, &dev->tx_idle, req);
Mike Lockwoodba83b012010-04-16 10:39:22 -0400701
Mike Lockwood491d4182010-11-08 10:41:31 -0500702 DBG(cdev, "send_file_work returning %d\n", r);
703 /* write the result */
704 dev->xfer_result = r;
705 smp_wmb();
Mike Lockwoodba83b012010-04-16 10:39:22 -0400706}
707
Mike Lockwood491d4182010-11-08 10:41:31 -0500708/* read from USB and write to a local file */
709static void receive_file_work(struct work_struct *data)
Mike Lockwoodba83b012010-04-16 10:39:22 -0400710{
Mike Lockwood491d4182010-11-08 10:41:31 -0500711 struct mtp_dev *dev = container_of(data, struct mtp_dev, receive_file_work);
Mike Lockwoodba83b012010-04-16 10:39:22 -0400712 struct usb_composite_dev *cdev = dev->cdev;
713 struct usb_request *read_req = NULL, *write_req = NULL;
Mike Lockwood491d4182010-11-08 10:41:31 -0500714 struct file *filp;
715 loff_t offset;
Mike Lockwood3e800b62010-11-16 17:14:32 -0500716 int64_t count;
Mike Lockwood76ac6552010-11-15 15:22:21 -0500717 int ret, cur_buf = 0;
718 int r = 0;
Mike Lockwoodba83b012010-04-16 10:39:22 -0400719
Mike Lockwood491d4182010-11-08 10:41:31 -0500720 /* read our parameters */
721 smp_rmb();
722 filp = dev->xfer_file;
723 offset = dev->xfer_file_offset;
Mike Lockwood76ac6552010-11-15 15:22:21 -0500724 count = dev->xfer_file_length;
Mike Lockwood491d4182010-11-08 10:41:31 -0500725
Mike Lockwood3e800b62010-11-16 17:14:32 -0500726 DBG(cdev, "receive_file_work(%lld)\n", count);
Mike Lockwoodba83b012010-04-16 10:39:22 -0400727
728 while (count > 0 || write_req) {
729 if (count > 0) {
730 /* queue a request */
731 read_req = dev->rx_req[cur_buf];
732 cur_buf = (cur_buf + 1) % RX_REQ_MAX;
733
Benoit Gobyaab96812011-04-19 20:37:33 -0700734 read_req->length = (count > MTP_BULK_BUFFER_SIZE
735 ? MTP_BULK_BUFFER_SIZE : count);
Mike Lockwoodba83b012010-04-16 10:39:22 -0400736 dev->rx_done = 0;
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400737 ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL);
Mike Lockwoodba83b012010-04-16 10:39:22 -0400738 if (ret < 0) {
739 r = -EIO;
740 dev->state = STATE_ERROR;
741 break;
742 }
Mike Lockwoodba83b012010-04-16 10:39:22 -0400743 }
744
745 if (write_req) {
746 DBG(cdev, "rx %p %d\n", write_req, write_req->actual);
747 ret = vfs_write(filp, write_req->buf, write_req->actual,
748 &offset);
749 DBG(cdev, "vfs_write %d\n", ret);
750 if (ret != write_req->actual) {
751 r = -EIO;
752 dev->state = STATE_ERROR;
753 break;
754 }
755 write_req = NULL;
756 }
757
758 if (read_req) {
759 /* wait for our last read to complete */
760 ret = wait_event_interruptible(dev->read_wq,
761 dev->rx_done || dev->state != STATE_BUSY);
Mike Lockwood50fe49a2011-01-13 16:19:57 -0500762 if (dev->state == STATE_CANCELED) {
763 r = -ECANCELED;
764 if (!dev->rx_done)
765 usb_ep_dequeue(dev->ep_out, read_req);
Mike Lockwoodba83b012010-04-16 10:39:22 -0400766 break;
767 }
Mike Lockwood3e800b62010-11-16 17:14:32 -0500768 /* if xfer_file_length is 0xFFFFFFFF, then we read until
769 * we get a zero length packet
770 */
771 if (count != 0xFFFFFFFF)
772 count -= read_req->actual;
773 if (read_req->actual < read_req->length) {
774 /* short packet is used to signal EOF for sizes > 4 gig */
775 DBG(cdev, "got short packet\n");
776 count = 0;
777 }
778
Mike Lockwoodba83b012010-04-16 10:39:22 -0400779 write_req = read_req;
780 read_req = NULL;
781 }
782 }
783
Mike Lockwood491d4182010-11-08 10:41:31 -0500784 DBG(cdev, "receive_file_work returning %d\n", r);
785 /* write the result */
786 dev->xfer_result = r;
787 smp_wmb();
Mike Lockwoodba83b012010-04-16 10:39:22 -0400788}
789
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400790static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event)
791{
Mike Lockwoodba3673b2011-05-01 20:36:19 -0400792 struct usb_request *req= NULL;
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400793 int ret;
794 int length = event->length;
795
796 DBG(dev->cdev, "mtp_send_event(%d)\n", event->length);
797
798 if (length < 0 || length > INTR_BUFFER_SIZE)
799 return -EINVAL;
Mike Lockwood491d4182010-11-08 10:41:31 -0500800 if (dev->state == STATE_OFFLINE)
801 return -ENODEV;
Mike Lockwood292b9632011-02-10 11:54:53 -0500802
Mike Lockwoodba3673b2011-05-01 20:36:19 -0400803 ret = wait_event_interruptible_timeout(dev->intr_wq,
804 (req = mtp_req_get(dev, &dev->intr_idle)), msecs_to_jiffies(1000));
805 if (!req)
806 return -ETIME;
807
808 if (copy_from_user(req->buf, (void __user *)event->data, length)) {
809 mtp_req_put(dev, &dev->intr_idle, req);
Mike Lockwood491d4182010-11-08 10:41:31 -0500810 return -EFAULT;
Mike Lockwoodba3673b2011-05-01 20:36:19 -0400811 }
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400812 req->length = length;
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400813 ret = usb_ep_queue(dev->ep_intr, req, GFP_KERNEL);
814 if (ret)
Mike Lockwoodba3673b2011-05-01 20:36:19 -0400815 mtp_req_put(dev, &dev->intr_idle, req);
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400816
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400817 return ret;
818}
819
Mike Lockwoodba83b012010-04-16 10:39:22 -0400820static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value)
821{
822 struct mtp_dev *dev = fp->private_data;
823 struct file *filp = NULL;
824 int ret = -EINVAL;
825
Benoit Gobyaab96812011-04-19 20:37:33 -0700826 if (mtp_lock(&dev->ioctl_excl))
Mike Lockwood491d4182010-11-08 10:41:31 -0500827 return -EBUSY;
828
Mike Lockwoodba83b012010-04-16 10:39:22 -0400829 switch (code) {
830 case MTP_SEND_FILE:
831 case MTP_RECEIVE_FILE:
832 {
833 struct mtp_file_range mfr;
Mike Lockwood491d4182010-11-08 10:41:31 -0500834 struct work_struct *work;
Mike Lockwoodba83b012010-04-16 10:39:22 -0400835
836 spin_lock_irq(&dev->lock);
837 if (dev->state == STATE_CANCELED) {
838 /* report cancelation to userspace */
839 dev->state = STATE_READY;
840 spin_unlock_irq(&dev->lock);
Mike Lockwood491d4182010-11-08 10:41:31 -0500841 ret = -ECANCELED;
842 goto out;
Mike Lockwoodba83b012010-04-16 10:39:22 -0400843 }
844 if (dev->state == STATE_OFFLINE) {
845 spin_unlock_irq(&dev->lock);
Mike Lockwood491d4182010-11-08 10:41:31 -0500846 ret = -ENODEV;
847 goto out;
Mike Lockwoodba83b012010-04-16 10:39:22 -0400848 }
849 dev->state = STATE_BUSY;
850 spin_unlock_irq(&dev->lock);
851
852 if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) {
853 ret = -EFAULT;
854 goto fail;
855 }
Mike Lockwood491d4182010-11-08 10:41:31 -0500856 /* hold a reference to the file while we are working with it */
Mike Lockwoodba83b012010-04-16 10:39:22 -0400857 filp = fget(mfr.fd);
858 if (!filp) {
859 ret = -EBADF;
860 goto fail;
861 }
862
Mike Lockwood491d4182010-11-08 10:41:31 -0500863 /* write the parameters */
864 dev->xfer_file = filp;
865 dev->xfer_file_offset = mfr.offset;
866 dev->xfer_file_length = mfr.length;
867 smp_wmb();
Mike Lockwoodba83b012010-04-16 10:39:22 -0400868
869 if (code == MTP_SEND_FILE)
Mike Lockwood491d4182010-11-08 10:41:31 -0500870 work = &dev->send_file_work;
Mike Lockwoodba83b012010-04-16 10:39:22 -0400871 else
Mike Lockwood491d4182010-11-08 10:41:31 -0500872 work = &dev->receive_file_work;
Mike Lockwoodba83b012010-04-16 10:39:22 -0400873
Mike Lockwood491d4182010-11-08 10:41:31 -0500874 /* We do the file transfer on a work queue so it will run
875 * in kernel context, which is necessary for vfs_read and
876 * vfs_write to use our buffers in the kernel address space.
877 */
878 queue_work(dev->wq, work);
879 /* wait for operation to complete */
880 flush_workqueue(dev->wq);
881 fput(filp);
Mike Lockwoodba83b012010-04-16 10:39:22 -0400882
Mike Lockwood491d4182010-11-08 10:41:31 -0500883 /* read the result */
884 smp_rmb();
885 ret = dev->xfer_result;
Mike Lockwoodba83b012010-04-16 10:39:22 -0400886 break;
887 }
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400888 case MTP_SEND_EVENT:
889 {
890 struct mtp_event event;
891 /* return here so we don't change dev->state below,
892 * which would interfere with bulk transfer state.
893 */
894 if (copy_from_user(&event, (void __user *)value, sizeof(event)))
Mike Lockwood491d4182010-11-08 10:41:31 -0500895 ret = -EFAULT;
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400896 else
Mike Lockwood491d4182010-11-08 10:41:31 -0500897 ret = mtp_send_event(dev, &event);
898 goto out;
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400899 }
Mike Lockwoodba83b012010-04-16 10:39:22 -0400900 }
901
902fail:
Mike Lockwoodba83b012010-04-16 10:39:22 -0400903 spin_lock_irq(&dev->lock);
904 if (dev->state == STATE_CANCELED)
905 ret = -ECANCELED;
906 else if (dev->state != STATE_OFFLINE)
907 dev->state = STATE_READY;
908 spin_unlock_irq(&dev->lock);
Mike Lockwood491d4182010-11-08 10:41:31 -0500909out:
Benoit Gobyaab96812011-04-19 20:37:33 -0700910 mtp_unlock(&dev->ioctl_excl);
Mike Lockwoodba83b012010-04-16 10:39:22 -0400911 DBG(dev->cdev, "ioctl returning %d\n", ret);
912 return ret;
913}
914
915static int mtp_open(struct inode *ip, struct file *fp)
916{
917 printk(KERN_INFO "mtp_open\n");
Benoit Gobyaab96812011-04-19 20:37:33 -0700918 if (mtp_lock(&_mtp_dev->open_excl))
Mike Lockwoodba83b012010-04-16 10:39:22 -0400919 return -EBUSY;
920
Mike Lockwoodba83b012010-04-16 10:39:22 -0400921 /* clear any error condition */
922 if (_mtp_dev->state != STATE_OFFLINE)
923 _mtp_dev->state = STATE_READY;
924
925 fp->private_data = _mtp_dev;
926 return 0;
927}
928
929static int mtp_release(struct inode *ip, struct file *fp)
930{
931 printk(KERN_INFO "mtp_release\n");
932
Benoit Gobyaab96812011-04-19 20:37:33 -0700933 mtp_unlock(&_mtp_dev->open_excl);
Mike Lockwoodba83b012010-04-16 10:39:22 -0400934 return 0;
935}
936
937/* file operations for /dev/mtp_usb */
938static const struct file_operations mtp_fops = {
939 .owner = THIS_MODULE,
940 .read = mtp_read,
941 .write = mtp_write,
942 .unlocked_ioctl = mtp_ioctl,
943 .open = mtp_open,
944 .release = mtp_release,
945};
946
947static struct miscdevice mtp_device = {
948 .minor = MISC_DYNAMIC_MINOR,
Benoit Gobyaab96812011-04-19 20:37:33 -0700949 .name = mtp_shortname,
Mike Lockwoodba83b012010-04-16 10:39:22 -0400950 .fops = &mtp_fops,
951};
952
Benoit Gobyaab96812011-04-19 20:37:33 -0700953static int mtp_ctrlrequest(struct usb_composite_dev *cdev,
954 const struct usb_ctrlrequest *ctrl)
955{
Benoit Gobyaab96812011-04-19 20:37:33 -0700956 int value = -EOPNOTSUPP;
957 u16 w_index = le16_to_cpu(ctrl->wIndex);
958 u16 w_value = le16_to_cpu(ctrl->wValue);
959 u16 w_length = le16_to_cpu(ctrl->wLength);
960
961 VDBG(cdev, "mtp_ctrlrequest "
962 "%02x.%02x v%04x i%04x l%u\n",
963 ctrl->bRequestType, ctrl->bRequest,
964 w_value, w_index, w_length);
965
966 /* Handle MTP OS string */
Mike Lockwoodcf7addf2011-06-01 22:17:36 -0400967 if (ctrl->bRequestType ==
Benoit Gobyaab96812011-04-19 20:37:33 -0700968 (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE)
969 && ctrl->bRequest == USB_REQ_GET_DESCRIPTOR
970 && (w_value >> 8) == USB_DT_STRING
971 && (w_value & 0xFF) == MTP_OS_STRING_ID) {
972 value = (w_length < sizeof(mtp_os_string)
973 ? w_length : sizeof(mtp_os_string));
974 memcpy(cdev->req->buf, mtp_os_string, value);
975 }
976 /* respond with data transfer or status phase? */
977 if (value >= 0) {
978 int rc;
979 cdev->req->zero = value < w_length;
980 cdev->req->length = value;
981 rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
982 if (rc < 0)
983 ERROR(cdev, "%s setup response queue error\n", __func__);
984 }
985 return value;
986}
987
Mike Lockwoodba83b012010-04-16 10:39:22 -0400988static int
989mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
990{
991 struct usb_composite_dev *cdev = c->cdev;
Benoit Gobyaab96812011-04-19 20:37:33 -0700992 struct mtp_dev *dev = func_to_mtp(f);
Mike Lockwoodba83b012010-04-16 10:39:22 -0400993 int id;
994 int ret;
995
996 dev->cdev = cdev;
997 DBG(cdev, "mtp_function_bind dev: %p\n", dev);
998
999 /* allocate interface ID(s) */
1000 id = usb_interface_id(c, f);
1001 if (id < 0)
1002 return id;
1003 mtp_interface_desc.bInterfaceNumber = id;
1004
1005 /* allocate endpoints */
Benoit Gobyaab96812011-04-19 20:37:33 -07001006 ret = mtp_create_bulk_endpoints(dev, &mtp_fullspeed_in_desc,
Mike Lockwoodba83b012010-04-16 10:39:22 -04001007 &mtp_fullspeed_out_desc, &mtp_intr_desc);
1008 if (ret)
1009 return ret;
1010
1011 /* support high speed hardware */
1012 if (gadget_is_dualspeed(c->cdev->gadget)) {
1013 mtp_highspeed_in_desc.bEndpointAddress =
1014 mtp_fullspeed_in_desc.bEndpointAddress;
1015 mtp_highspeed_out_desc.bEndpointAddress =
1016 mtp_fullspeed_out_desc.bEndpointAddress;
1017 }
1018
1019 DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
1020 gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
1021 f->name, dev->ep_in->name, dev->ep_out->name);
1022 return 0;
1023}
1024
1025static void
1026mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)
1027{
Benoit Gobyaab96812011-04-19 20:37:33 -07001028 struct mtp_dev *dev = func_to_mtp(f);
Mike Lockwoodba83b012010-04-16 10:39:22 -04001029 struct usb_request *req;
1030 int i;
1031
Benoit Gobyaab96812011-04-19 20:37:33 -07001032 while ((req = mtp_req_get(dev, &dev->tx_idle)))
Mike Lockwoodba83b012010-04-16 10:39:22 -04001033 mtp_request_free(req, dev->ep_in);
1034 for (i = 0; i < RX_REQ_MAX; i++)
1035 mtp_request_free(dev->rx_req[i], dev->ep_out);
Mike Lockwoodba3673b2011-05-01 20:36:19 -04001036 while ((req = mtp_req_get(dev, &dev->intr_idle)))
1037 mtp_request_free(req, dev->ep_intr);
Mike Lockwoodba83b012010-04-16 10:39:22 -04001038 dev->state = STATE_OFFLINE;
Mike Lockwoodba83b012010-04-16 10:39:22 -04001039}
1040
1041static int mtp_function_setup(struct usb_function *f,
1042 const struct usb_ctrlrequest *ctrl)
1043{
Benoit Gobyaab96812011-04-19 20:37:33 -07001044 struct mtp_dev *dev = func_to_mtp(f);
Mike Lockwoodba83b012010-04-16 10:39:22 -04001045 struct usb_composite_dev *cdev = dev->cdev;
1046 int value = -EOPNOTSUPP;
1047 u16 w_index = le16_to_cpu(ctrl->wIndex);
1048 u16 w_value = le16_to_cpu(ctrl->wValue);
1049 u16 w_length = le16_to_cpu(ctrl->wLength);
1050 unsigned long flags;
1051
Mike Lockwoodba83b012010-04-16 10:39:22 -04001052 VDBG(cdev, "mtp_function_setup "
1053 "%02x.%02x v%04x i%04x l%u\n",
1054 ctrl->bRequestType, ctrl->bRequest,
1055 w_value, w_index, w_length);
1056
Mike Lockwoodba83b012010-04-16 10:39:22 -04001057 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) {
1058 /* Handle MTP OS descriptor */
1059 DBG(cdev, "vendor request: %d index: %d value: %d length: %d\n",
1060 ctrl->bRequest, w_index, w_value, w_length);
1061
Mike Lockwoodcf7addf2011-06-01 22:17:36 -04001062 if (ctrl->bRequest == 1
Mike Lockwoodba83b012010-04-16 10:39:22 -04001063 && (ctrl->bRequestType & USB_DIR_IN)
1064 && (w_index == 4 || w_index == 5)) {
1065 value = (w_length < sizeof(mtp_ext_config_desc) ?
1066 w_length : sizeof(mtp_ext_config_desc));
1067 memcpy(cdev->req->buf, &mtp_ext_config_desc, value);
1068 }
1069 }
1070 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
1071 DBG(cdev, "class request: %d index: %d value: %d length: %d\n",
1072 ctrl->bRequest, w_index, w_value, w_length);
1073
1074 if (ctrl->bRequest == MTP_REQ_CANCEL && w_index == 0
1075 && w_value == 0) {
1076 DBG(cdev, "MTP_REQ_CANCEL\n");
1077
1078 spin_lock_irqsave(&dev->lock, flags);
1079 if (dev->state == STATE_BUSY) {
1080 dev->state = STATE_CANCELED;
1081 wake_up(&dev->read_wq);
1082 wake_up(&dev->write_wq);
1083 }
1084 spin_unlock_irqrestore(&dev->lock, flags);
1085
1086 /* We need to queue a request to read the remaining
1087 * bytes, but we don't actually need to look at
1088 * the contents.
1089 */
1090 value = w_length;
1091 } else if (ctrl->bRequest == MTP_REQ_GET_DEVICE_STATUS
1092 && w_index == 0 && w_value == 0) {
1093 struct mtp_device_status *status = cdev->req->buf;
1094 status->wLength =
1095 __constant_cpu_to_le16(sizeof(*status));
1096
1097 DBG(cdev, "MTP_REQ_GET_DEVICE_STATUS\n");
1098 spin_lock_irqsave(&dev->lock, flags);
1099 /* device status is "busy" until we report
1100 * the cancelation to userspace
1101 */
Mike Lockwood090cbc42011-02-07 11:51:07 -05001102 if (dev->state == STATE_CANCELED)
Mike Lockwoodba83b012010-04-16 10:39:22 -04001103 status->wCode =
1104 __cpu_to_le16(MTP_RESPONSE_DEVICE_BUSY);
1105 else
1106 status->wCode =
1107 __cpu_to_le16(MTP_RESPONSE_OK);
Mike Lockwood090cbc42011-02-07 11:51:07 -05001108 spin_unlock_irqrestore(&dev->lock, flags);
Mike Lockwoodba83b012010-04-16 10:39:22 -04001109 value = sizeof(*status);
1110 }
1111 }
1112
1113 /* respond with data transfer or status phase? */
1114 if (value >= 0) {
1115 int rc;
1116 cdev->req->zero = value < w_length;
1117 cdev->req->length = value;
1118 rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
1119 if (rc < 0)
1120 ERROR(cdev, "%s setup response queue error\n", __func__);
1121 }
1122
1123 if (value == -EOPNOTSUPP)
1124 VDBG(cdev,
1125 "unknown class-specific control req "
1126 "%02x.%02x v%04x i%04x l%u\n",
1127 ctrl->bRequestType, ctrl->bRequest,
1128 w_value, w_index, w_length);
1129 return value;
1130}
1131
1132static int mtp_function_set_alt(struct usb_function *f,
1133 unsigned intf, unsigned alt)
1134{
Benoit Gobyaab96812011-04-19 20:37:33 -07001135 struct mtp_dev *dev = func_to_mtp(f);
Mike Lockwoodba83b012010-04-16 10:39:22 -04001136 struct usb_composite_dev *cdev = f->config->cdev;
1137 int ret;
1138
1139 DBG(cdev, "mtp_function_set_alt intf: %d alt: %d\n", intf, alt);
1140 ret = usb_ep_enable(dev->ep_in,
1141 ep_choose(cdev->gadget,
1142 &mtp_highspeed_in_desc,
1143 &mtp_fullspeed_in_desc));
1144 if (ret)
1145 return ret;
1146 ret = usb_ep_enable(dev->ep_out,
1147 ep_choose(cdev->gadget,
1148 &mtp_highspeed_out_desc,
1149 &mtp_fullspeed_out_desc));
1150 if (ret) {
1151 usb_ep_disable(dev->ep_in);
1152 return ret;
1153 }
1154 ret = usb_ep_enable(dev->ep_intr, &mtp_intr_desc);
1155 if (ret) {
1156 usb_ep_disable(dev->ep_out);
1157 usb_ep_disable(dev->ep_in);
1158 return ret;
1159 }
1160 dev->state = STATE_READY;
1161
1162 /* readers may be blocked waiting for us to go online */
1163 wake_up(&dev->read_wq);
1164 return 0;
1165}
1166
1167static void mtp_function_disable(struct usb_function *f)
1168{
Benoit Gobyaab96812011-04-19 20:37:33 -07001169 struct mtp_dev *dev = func_to_mtp(f);
Mike Lockwoodba83b012010-04-16 10:39:22 -04001170 struct usb_composite_dev *cdev = dev->cdev;
1171
1172 DBG(cdev, "mtp_function_disable\n");
1173 dev->state = STATE_OFFLINE;
1174 usb_ep_disable(dev->ep_in);
1175 usb_ep_disable(dev->ep_out);
1176 usb_ep_disable(dev->ep_intr);
1177
1178 /* readers may be blocked waiting for us to go online */
1179 wake_up(&dev->read_wq);
1180
1181 VDBG(cdev, "%s disabled\n", dev->function.name);
1182}
1183
Mike Lockwoodcf7addf2011-06-01 22:17:36 -04001184static int mtp_bind_config(struct usb_configuration *c, bool ptp_config)
Mike Lockwoodba83b012010-04-16 10:39:22 -04001185{
Benoit Gobyaab96812011-04-19 20:37:33 -07001186 struct mtp_dev *dev = _mtp_dev;
Mike Lockwood090cbc42011-02-07 11:51:07 -05001187 int ret = 0;
Mike Lockwoodba83b012010-04-16 10:39:22 -04001188
1189 printk(KERN_INFO "mtp_bind_config\n");
1190
Mike Lockwoodba83b012010-04-16 10:39:22 -04001191 /* allocate a string ID for our interface */
1192 if (mtp_string_defs[INTERFACE_STRING_INDEX].id == 0) {
1193 ret = usb_string_id(c->cdev);
1194 if (ret < 0)
1195 return ret;
1196 mtp_string_defs[INTERFACE_STRING_INDEX].id = ret;
1197 mtp_interface_desc.iInterface = ret;
1198 }
1199
Mike Lockwoodba83b012010-04-16 10:39:22 -04001200 dev->cdev = c->cdev;
1201 dev->function.name = "mtp";
Mike Lockwoodcf7addf2011-06-01 22:17:36 -04001202 dev->function.strings = mtp_strings;
1203 if (ptp_config) {
1204 dev->function.descriptors = fs_ptp_descs;
1205 dev->function.hs_descriptors = hs_ptp_descs;
1206 } else {
1207 dev->function.descriptors = fs_mtp_descs;
1208 dev->function.hs_descriptors = hs_mtp_descs;
1209 }
Mike Lockwoodba83b012010-04-16 10:39:22 -04001210 dev->function.bind = mtp_function_bind;
1211 dev->function.unbind = mtp_function_unbind;
1212 dev->function.setup = mtp_function_setup;
1213 dev->function.set_alt = mtp_function_set_alt;
1214 dev->function.disable = mtp_function_disable;
1215
Benoit Gobyaab96812011-04-19 20:37:33 -07001216 return usb_add_function(c, &dev->function);
1217}
1218
1219static int mtp_setup(void)
1220{
1221 struct mtp_dev *dev;
1222 int ret;
1223
1224 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1225 if (!dev)
1226 return -ENOMEM;
1227
1228 spin_lock_init(&dev->lock);
1229 init_waitqueue_head(&dev->read_wq);
1230 init_waitqueue_head(&dev->write_wq);
Mike Lockwoodba3673b2011-05-01 20:36:19 -04001231 init_waitqueue_head(&dev->intr_wq);
Benoit Gobyaab96812011-04-19 20:37:33 -07001232 atomic_set(&dev->open_excl, 0);
1233 atomic_set(&dev->ioctl_excl, 0);
1234 INIT_LIST_HEAD(&dev->tx_idle);
Mike Lockwoodba3673b2011-05-01 20:36:19 -04001235 INIT_LIST_HEAD(&dev->intr_idle);
Benoit Gobyaab96812011-04-19 20:37:33 -07001236
1237 dev->wq = create_singlethread_workqueue("f_mtp");
1238 if (!dev->wq) {
1239 ret = -ENOMEM;
1240 goto err1;
1241 }
1242 INIT_WORK(&dev->send_file_work, send_file_work);
1243 INIT_WORK(&dev->receive_file_work, receive_file_work);
1244
Mike Lockwoodba83b012010-04-16 10:39:22 -04001245 _mtp_dev = dev;
1246
1247 ret = misc_register(&mtp_device);
1248 if (ret)
Mike Lockwoodba83b012010-04-16 10:39:22 -04001249 goto err2;
1250
1251 return 0;
1252
1253err2:
Benoit Gobyaab96812011-04-19 20:37:33 -07001254 destroy_workqueue(dev->wq);
Mike Lockwoodba83b012010-04-16 10:39:22 -04001255err1:
Benoit Gobyaab96812011-04-19 20:37:33 -07001256 _mtp_dev = NULL;
Mike Lockwoodba83b012010-04-16 10:39:22 -04001257 kfree(dev);
1258 printk(KERN_ERR "mtp gadget driver failed to initialize\n");
1259 return ret;
1260}
1261
Benoit Gobyaab96812011-04-19 20:37:33 -07001262static void mtp_cleanup(void)
Mike Lockwoodba83b012010-04-16 10:39:22 -04001263{
Benoit Gobyaab96812011-04-19 20:37:33 -07001264 struct mtp_dev *dev = _mtp_dev;
1265
1266 if (!dev)
1267 return;
1268
1269 misc_deregister(&mtp_device);
1270 destroy_workqueue(dev->wq);
1271 _mtp_dev = NULL;
1272 kfree(dev);
Mike Lockwoodba83b012010-04-16 10:39:22 -04001273}