blob: ea171641cb307f35aa5c90292fd3757ec9ff4b10 [file] [log] [blame]
Benoit Goby27d01e52011-12-19 14:37:50 -08001/*
2 * Gadget Function Driver for MTP
3 *
4 * Copyright (C) 2010 Google, Inc.
5 * Author: Mike Lockwood <lockwood@android.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18/* #define DEBUG */
19/* #define VERBOSE_DEBUG */
20
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/poll.h>
24#include <linux/delay.h>
25#include <linux/wait.h>
26#include <linux/err.h>
27#include <linux/interrupt.h>
28
Hemant Kumarfc2b8f02016-05-02 11:18:48 -070029#include <linux/seq_file.h>
30#include <linux/debugfs.h>
Benoit Goby27d01e52011-12-19 14:37:50 -080031#include <linux/types.h>
32#include <linux/file.h>
33#include <linux/device.h>
34#include <linux/miscdevice.h>
35
36#include <linux/usb.h>
37#include <linux/usb_usual.h>
38#include <linux/usb/ch9.h>
39#include <linux/usb/f_mtp.h>
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -080040#include <linux/configfs.h>
41#include <linux/usb/composite.h>
42
43#include "configfs.h"
Benoit Goby27d01e52011-12-19 14:37:50 -080044
Hemant Kumarc044ad02016-05-02 11:27:21 -070045#define MTP_RX_BUFFER_INIT_SIZE 1048576
Hemant Kumar28dd7c42016-12-29 15:47:31 -080046#define MTP_TX_BUFFER_INIT_SIZE 1048576
Benoit Goby27d01e52011-12-19 14:37:50 -080047#define MTP_BULK_BUFFER_SIZE 16384
48#define INTR_BUFFER_SIZE 28
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -080049#define MAX_INST_NAME_LEN 40
Witold Sciuk6db5c3e2016-02-13 11:08:37 +010050#define MTP_MAX_FILE_SIZE 0xFFFFFFFFL
Benoit Goby27d01e52011-12-19 14:37:50 -080051
52/* String IDs */
53#define INTERFACE_STRING_INDEX 0
54
55/* values for mtp_dev.state */
56#define STATE_OFFLINE 0 /* initial state, disconnected */
57#define STATE_READY 1 /* ready for userspace calls */
58#define STATE_BUSY 2 /* processing userspace calls */
59#define STATE_CANCELED 3 /* transaction canceled by host */
60#define STATE_ERROR 4 /* error from completion routine */
61
62/* number of tx and rx requests to allocate */
Hemant Kumarc07309f2016-05-01 17:44:56 -070063#define MTP_TX_REQ_MAX 8
Benoit Goby27d01e52011-12-19 14:37:50 -080064#define RX_REQ_MAX 2
65#define INTR_REQ_MAX 5
66
67/* ID for Microsoft MTP OS String */
68#define MTP_OS_STRING_ID 0xEE
69
70/* MTP class reqeusts */
71#define MTP_REQ_CANCEL 0x64
72#define MTP_REQ_GET_EXT_EVENT_DATA 0x65
73#define MTP_REQ_RESET 0x66
74#define MTP_REQ_GET_DEVICE_STATUS 0x67
75
76/* constants for device status */
77#define MTP_RESPONSE_OK 0x2001
78#define MTP_RESPONSE_DEVICE_BUSY 0x2019
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -080079#define DRIVER_NAME "mtp"
Benoit Goby27d01e52011-12-19 14:37:50 -080080
Hemant Kumarfc2b8f02016-05-02 11:18:48 -070081#define MAX_ITERATION 100
82
Hemant Kumarc044ad02016-05-02 11:27:21 -070083unsigned int mtp_rx_req_len = MTP_RX_BUFFER_INIT_SIZE;
Hemant Kumar4aed14e2016-05-01 17:35:36 -070084module_param(mtp_rx_req_len, uint, 0644);
85
Hemant Kumar28dd7c42016-12-29 15:47:31 -080086unsigned int mtp_tx_req_len = MTP_TX_BUFFER_INIT_SIZE;
Hemant Kumarc07309f2016-05-01 17:44:56 -070087module_param(mtp_tx_req_len, uint, 0644);
88
89unsigned int mtp_tx_reqs = MTP_TX_REQ_MAX;
90module_param(mtp_tx_reqs, uint, 0644);
91
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -080092static const char mtp_shortname[] = DRIVER_NAME "_usb";
Benoit Goby27d01e52011-12-19 14:37:50 -080093
94struct mtp_dev {
95 struct usb_function function;
96 struct usb_composite_dev *cdev;
97 spinlock_t lock;
98
99 struct usb_ep *ep_in;
100 struct usb_ep *ep_out;
101 struct usb_ep *ep_intr;
102
103 int state;
104
105 /* synchronize access to our device file */
106 atomic_t open_excl;
107 /* to enforce only one ioctl at a time */
108 atomic_t ioctl_excl;
109
110 struct list_head tx_idle;
111 struct list_head intr_idle;
112
113 wait_queue_head_t read_wq;
114 wait_queue_head_t write_wq;
115 wait_queue_head_t intr_wq;
116 struct usb_request *rx_req[RX_REQ_MAX];
117 int rx_done;
118
119 /* for processing MTP_SEND_FILE, MTP_RECEIVE_FILE and
120 * MTP_SEND_FILE_WITH_HEADER ioctls on a work queue
121 */
122 struct workqueue_struct *wq;
123 struct work_struct send_file_work;
124 struct work_struct receive_file_work;
125 struct file *xfer_file;
126 loff_t xfer_file_offset;
127 int64_t xfer_file_length;
128 unsigned xfer_send_header;
129 uint16_t xfer_command;
130 uint32_t xfer_transaction_id;
131 int xfer_result;
Hemant Kumarfc2b8f02016-05-02 11:18:48 -0700132 struct {
133 unsigned long vfs_rbytes;
134 unsigned long vfs_wbytes;
135 unsigned int vfs_rtime;
136 unsigned int vfs_wtime;
137 } perf[MAX_ITERATION];
138 unsigned int dbg_read_index;
139 unsigned int dbg_write_index;
Benoit Goby27d01e52011-12-19 14:37:50 -0800140};
141
142static struct usb_interface_descriptor mtp_interface_desc = {
143 .bLength = USB_DT_INTERFACE_SIZE,
144 .bDescriptorType = USB_DT_INTERFACE,
145 .bInterfaceNumber = 0,
146 .bNumEndpoints = 3,
147 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
148 .bInterfaceSubClass = USB_SUBCLASS_VENDOR_SPEC,
149 .bInterfaceProtocol = 0,
150};
151
152static struct usb_interface_descriptor ptp_interface_desc = {
153 .bLength = USB_DT_INTERFACE_SIZE,
154 .bDescriptorType = USB_DT_INTERFACE,
155 .bInterfaceNumber = 0,
156 .bNumEndpoints = 3,
157 .bInterfaceClass = USB_CLASS_STILL_IMAGE,
158 .bInterfaceSubClass = 1,
159 .bInterfaceProtocol = 1,
160};
161
Mark Kuo1b61b272015-08-20 13:01:46 +0800162static struct usb_endpoint_descriptor mtp_ss_in_desc = {
163 .bLength = USB_DT_ENDPOINT_SIZE,
164 .bDescriptorType = USB_DT_ENDPOINT,
165 .bEndpointAddress = USB_DIR_IN,
166 .bmAttributes = USB_ENDPOINT_XFER_BULK,
167 .wMaxPacketSize = __constant_cpu_to_le16(1024),
168};
169
170static struct usb_ss_ep_comp_descriptor mtp_ss_in_comp_desc = {
171 .bLength = sizeof(mtp_ss_in_comp_desc),
172 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
173 /* .bMaxBurst = DYNAMIC, */
174};
175
176static struct usb_endpoint_descriptor mtp_ss_out_desc = {
177 .bLength = USB_DT_ENDPOINT_SIZE,
178 .bDescriptorType = USB_DT_ENDPOINT,
179 .bEndpointAddress = USB_DIR_OUT,
180 .bmAttributes = USB_ENDPOINT_XFER_BULK,
181 .wMaxPacketSize = __constant_cpu_to_le16(1024),
182};
183
184static struct usb_ss_ep_comp_descriptor mtp_ss_out_comp_desc = {
185 .bLength = sizeof(mtp_ss_out_comp_desc),
186 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
187 /* .bMaxBurst = DYNAMIC, */
188};
189
Benoit Goby27d01e52011-12-19 14:37:50 -0800190static struct usb_endpoint_descriptor mtp_highspeed_in_desc = {
191 .bLength = USB_DT_ENDPOINT_SIZE,
192 .bDescriptorType = USB_DT_ENDPOINT,
193 .bEndpointAddress = USB_DIR_IN,
194 .bmAttributes = USB_ENDPOINT_XFER_BULK,
195 .wMaxPacketSize = __constant_cpu_to_le16(512),
196};
197
198static struct usb_endpoint_descriptor mtp_highspeed_out_desc = {
199 .bLength = USB_DT_ENDPOINT_SIZE,
200 .bDescriptorType = USB_DT_ENDPOINT,
201 .bEndpointAddress = USB_DIR_OUT,
202 .bmAttributes = USB_ENDPOINT_XFER_BULK,
203 .wMaxPacketSize = __constant_cpu_to_le16(512),
204};
205
206static struct usb_endpoint_descriptor mtp_fullspeed_in_desc = {
207 .bLength = USB_DT_ENDPOINT_SIZE,
208 .bDescriptorType = USB_DT_ENDPOINT,
209 .bEndpointAddress = USB_DIR_IN,
210 .bmAttributes = USB_ENDPOINT_XFER_BULK,
211};
212
213static struct usb_endpoint_descriptor mtp_fullspeed_out_desc = {
214 .bLength = USB_DT_ENDPOINT_SIZE,
215 .bDescriptorType = USB_DT_ENDPOINT,
216 .bEndpointAddress = USB_DIR_OUT,
217 .bmAttributes = USB_ENDPOINT_XFER_BULK,
218};
219
220static struct usb_endpoint_descriptor mtp_intr_desc = {
221 .bLength = USB_DT_ENDPOINT_SIZE,
222 .bDescriptorType = USB_DT_ENDPOINT,
223 .bEndpointAddress = USB_DIR_IN,
224 .bmAttributes = USB_ENDPOINT_XFER_INT,
225 .wMaxPacketSize = __constant_cpu_to_le16(INTR_BUFFER_SIZE),
226 .bInterval = 6,
227};
228
Mark Kuo1b61b272015-08-20 13:01:46 +0800229static struct usb_ss_ep_comp_descriptor mtp_intr_ss_comp_desc = {
230 .bLength = sizeof(mtp_intr_ss_comp_desc),
231 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
Mark Kuo21a3e932015-09-11 16:12:59 +0800232 .wBytesPerInterval = cpu_to_le16(INTR_BUFFER_SIZE),
Mark Kuo1b61b272015-08-20 13:01:46 +0800233};
234
Benoit Goby27d01e52011-12-19 14:37:50 -0800235static struct usb_descriptor_header *fs_mtp_descs[] = {
236 (struct usb_descriptor_header *) &mtp_interface_desc,
237 (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
238 (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
239 (struct usb_descriptor_header *) &mtp_intr_desc,
240 NULL,
241};
242
243static struct usb_descriptor_header *hs_mtp_descs[] = {
244 (struct usb_descriptor_header *) &mtp_interface_desc,
245 (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
246 (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
247 (struct usb_descriptor_header *) &mtp_intr_desc,
248 NULL,
249};
250
Mark Kuo1b61b272015-08-20 13:01:46 +0800251static struct usb_descriptor_header *ss_mtp_descs[] = {
252 (struct usb_descriptor_header *) &mtp_interface_desc,
253 (struct usb_descriptor_header *) &mtp_ss_in_desc,
254 (struct usb_descriptor_header *) &mtp_ss_in_comp_desc,
255 (struct usb_descriptor_header *) &mtp_ss_out_desc,
256 (struct usb_descriptor_header *) &mtp_ss_out_comp_desc,
257 (struct usb_descriptor_header *) &mtp_intr_desc,
258 (struct usb_descriptor_header *) &mtp_intr_ss_comp_desc,
259 NULL,
260};
261
Benoit Goby27d01e52011-12-19 14:37:50 -0800262static struct usb_descriptor_header *fs_ptp_descs[] = {
263 (struct usb_descriptor_header *) &ptp_interface_desc,
264 (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
265 (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
266 (struct usb_descriptor_header *) &mtp_intr_desc,
267 NULL,
268};
269
270static struct usb_descriptor_header *hs_ptp_descs[] = {
271 (struct usb_descriptor_header *) &ptp_interface_desc,
272 (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
273 (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
274 (struct usb_descriptor_header *) &mtp_intr_desc,
275 NULL,
276};
277
Mark Kuo1b61b272015-08-20 13:01:46 +0800278static struct usb_descriptor_header *ss_ptp_descs[] = {
279 (struct usb_descriptor_header *) &ptp_interface_desc,
280 (struct usb_descriptor_header *) &mtp_ss_in_desc,
281 (struct usb_descriptor_header *) &mtp_ss_in_comp_desc,
282 (struct usb_descriptor_header *) &mtp_ss_out_desc,
283 (struct usb_descriptor_header *) &mtp_ss_out_comp_desc,
284 (struct usb_descriptor_header *) &mtp_intr_desc,
285 (struct usb_descriptor_header *) &mtp_intr_ss_comp_desc,
286 NULL,
287};
288
Benoit Goby27d01e52011-12-19 14:37:50 -0800289static struct usb_string mtp_string_defs[] = {
290 /* Naming interface "MTP" so libmtp will recognize us */
291 [INTERFACE_STRING_INDEX].s = "MTP",
292 { }, /* end of list */
293};
294
295static struct usb_gadget_strings mtp_string_table = {
296 .language = 0x0409, /* en-US */
297 .strings = mtp_string_defs,
298};
299
300static struct usb_gadget_strings *mtp_strings[] = {
301 &mtp_string_table,
302 NULL,
303};
304
305/* Microsoft MTP OS String */
306static u8 mtp_os_string[] = {
307 18, /* sizeof(mtp_os_string) */
308 USB_DT_STRING,
309 /* Signature field: "MSFT100" */
310 'M', 0, 'S', 0, 'F', 0, 'T', 0, '1', 0, '0', 0, '0', 0,
311 /* vendor code */
312 1,
313 /* padding */
314 0
315};
316
317/* Microsoft Extended Configuration Descriptor Header Section */
318struct mtp_ext_config_desc_header {
319 __le32 dwLength;
320 __u16 bcdVersion;
321 __le16 wIndex;
322 __u8 bCount;
323 __u8 reserved[7];
324};
325
326/* Microsoft Extended Configuration Descriptor Function Section */
327struct mtp_ext_config_desc_function {
328 __u8 bFirstInterfaceNumber;
329 __u8 bInterfaceCount;
330 __u8 compatibleID[8];
331 __u8 subCompatibleID[8];
332 __u8 reserved[6];
333};
334
335/* MTP Extended Configuration Descriptor */
Jack Pham50439242017-02-07 11:48:25 -0800336struct mtp_ext_config_desc {
Benoit Goby27d01e52011-12-19 14:37:50 -0800337 struct mtp_ext_config_desc_header header;
338 struct mtp_ext_config_desc_function function;
Jack Pham50439242017-02-07 11:48:25 -0800339};
340
341static struct mtp_ext_config_desc mtp_ext_config_desc = {
Benoit Goby27d01e52011-12-19 14:37:50 -0800342 .header = {
343 .dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc)),
344 .bcdVersion = __constant_cpu_to_le16(0x0100),
345 .wIndex = __constant_cpu_to_le16(4),
Brian Norrise7304622016-02-29 17:44:51 -0800346 .bCount = 1,
Benoit Goby27d01e52011-12-19 14:37:50 -0800347 },
348 .function = {
349 .bFirstInterfaceNumber = 0,
350 .bInterfaceCount = 1,
351 .compatibleID = { 'M', 'T', 'P' },
352 },
353};
354
355struct mtp_device_status {
356 __le16 wLength;
357 __le16 wCode;
358};
359
Colin Crossccebeef2013-11-07 13:08:15 -0800360struct mtp_data_header {
361 /* length of packet, including this header */
362 __le32 length;
363 /* container type (2 for data packet) */
364 __le16 type;
365 /* MTP command code */
366 __le16 command;
367 /* MTP transaction ID */
368 __le32 transaction_id;
369};
370
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -0800371struct mtp_instance {
372 struct usb_function_instance func_inst;
373 const char *name;
374 struct mtp_dev *dev;
Badhri Jagan Sridharan54856462015-10-06 20:32:01 -0700375 char mtp_ext_compat_id[16];
376 struct usb_os_desc mtp_os_desc;
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -0800377};
378
Benoit Goby27d01e52011-12-19 14:37:50 -0800379/* temporary variable used between mtp_open() and mtp_gadget_bind() */
380static struct mtp_dev *_mtp_dev;
381
382static inline struct mtp_dev *func_to_mtp(struct usb_function *f)
383{
384 return container_of(f, struct mtp_dev, function);
385}
386
387static struct usb_request *mtp_request_new(struct usb_ep *ep, int buffer_size)
388{
389 struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
Anson Jacob051584e2016-11-11 01:10:04 -0500390
Benoit Goby27d01e52011-12-19 14:37:50 -0800391 if (!req)
392 return NULL;
393
394 /* now allocate buffers for the requests */
395 req->buf = kmalloc(buffer_size, GFP_KERNEL);
396 if (!req->buf) {
397 usb_ep_free_request(ep, req);
398 return NULL;
399 }
400
401 return req;
402}
403
404static void mtp_request_free(struct usb_request *req, struct usb_ep *ep)
405{
406 if (req) {
407 kfree(req->buf);
408 usb_ep_free_request(ep, req);
409 }
410}
411
412static inline int mtp_lock(atomic_t *excl)
413{
414 if (atomic_inc_return(excl) == 1) {
415 return 0;
416 } else {
417 atomic_dec(excl);
418 return -1;
419 }
420}
421
422static inline void mtp_unlock(atomic_t *excl)
423{
424 atomic_dec(excl);
425}
426
427/* add a request to the tail of a list */
428static void mtp_req_put(struct mtp_dev *dev, struct list_head *head,
429 struct usb_request *req)
430{
431 unsigned long flags;
432
433 spin_lock_irqsave(&dev->lock, flags);
434 list_add_tail(&req->list, head);
435 spin_unlock_irqrestore(&dev->lock, flags);
436}
437
438/* remove a request from the head of a list */
439static struct usb_request
440*mtp_req_get(struct mtp_dev *dev, struct list_head *head)
441{
442 unsigned long flags;
443 struct usb_request *req;
444
445 spin_lock_irqsave(&dev->lock, flags);
446 if (list_empty(head)) {
447 req = 0;
448 } else {
449 req = list_first_entry(head, struct usb_request, list);
450 list_del(&req->list);
451 }
452 spin_unlock_irqrestore(&dev->lock, flags);
453 return req;
454}
455
456static void mtp_complete_in(struct usb_ep *ep, struct usb_request *req)
457{
458 struct mtp_dev *dev = _mtp_dev;
459
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +0530460 if (req->status != 0 && dev->state != STATE_OFFLINE)
Benoit Goby27d01e52011-12-19 14:37:50 -0800461 dev->state = STATE_ERROR;
462
463 mtp_req_put(dev, &dev->tx_idle, req);
464
465 wake_up(&dev->write_wq);
466}
467
468static void mtp_complete_out(struct usb_ep *ep, struct usb_request *req)
469{
470 struct mtp_dev *dev = _mtp_dev;
471
472 dev->rx_done = 1;
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +0530473 if (req->status != 0 && dev->state != STATE_OFFLINE)
Benoit Goby27d01e52011-12-19 14:37:50 -0800474 dev->state = STATE_ERROR;
475
476 wake_up(&dev->read_wq);
477}
478
479static void mtp_complete_intr(struct usb_ep *ep, struct usb_request *req)
480{
481 struct mtp_dev *dev = _mtp_dev;
482
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +0530483 if (req->status != 0 && dev->state != STATE_OFFLINE)
Benoit Goby27d01e52011-12-19 14:37:50 -0800484 dev->state = STATE_ERROR;
485
486 mtp_req_put(dev, &dev->intr_idle, req);
487
488 wake_up(&dev->intr_wq);
489}
490
491static int mtp_create_bulk_endpoints(struct mtp_dev *dev,
492 struct usb_endpoint_descriptor *in_desc,
493 struct usb_endpoint_descriptor *out_desc,
494 struct usb_endpoint_descriptor *intr_desc)
495{
496 struct usb_composite_dev *cdev = dev->cdev;
497 struct usb_request *req;
498 struct usb_ep *ep;
499 int i;
500
501 DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
502
503 ep = usb_ep_autoconfig(cdev->gadget, in_desc);
504 if (!ep) {
505 DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
506 return -ENODEV;
507 }
508 DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
509 ep->driver_data = dev; /* claim the endpoint */
510 dev->ep_in = ep;
511
512 ep = usb_ep_autoconfig(cdev->gadget, out_desc);
513 if (!ep) {
514 DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
515 return -ENODEV;
516 }
517 DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name);
518 ep->driver_data = dev; /* claim the endpoint */
519 dev->ep_out = ep;
520
Benoit Goby27d01e52011-12-19 14:37:50 -0800521 ep = usb_ep_autoconfig(cdev->gadget, intr_desc);
522 if (!ep) {
523 DBG(cdev, "usb_ep_autoconfig for ep_intr failed\n");
524 return -ENODEV;
525 }
526 DBG(cdev, "usb_ep_autoconfig for mtp ep_intr got %s\n", ep->name);
527 ep->driver_data = dev; /* claim the endpoint */
528 dev->ep_intr = ep;
529
Hemant Kumarc07309f2016-05-01 17:44:56 -0700530retry_tx_alloc:
Benoit Goby27d01e52011-12-19 14:37:50 -0800531 /* now allocate requests for our endpoints */
Hemant Kumarc07309f2016-05-01 17:44:56 -0700532 for (i = 0; i < mtp_tx_reqs; i++) {
533 req = mtp_request_new(dev->ep_in, mtp_tx_req_len);
534 if (!req) {
535 if (mtp_tx_req_len <= MTP_BULK_BUFFER_SIZE)
536 goto fail;
537 while ((req = mtp_req_get(dev, &dev->tx_idle)))
538 mtp_request_free(req, dev->ep_in);
539 mtp_tx_req_len = MTP_BULK_BUFFER_SIZE;
540 mtp_tx_reqs = MTP_TX_REQ_MAX;
541 goto retry_tx_alloc;
542 }
Benoit Goby27d01e52011-12-19 14:37:50 -0800543 req->complete = mtp_complete_in;
544 mtp_req_put(dev, &dev->tx_idle, req);
545 }
Hemant Kumar4aed14e2016-05-01 17:35:36 -0700546
547 /*
548 * The RX buffer should be aligned to EP max packet for
549 * some controllers. At bind time, we don't know the
550 * operational speed. Hence assuming super speed max
551 * packet size.
552 */
553 if (mtp_rx_req_len % 1024)
554 mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;
555
556retry_rx_alloc:
Benoit Goby27d01e52011-12-19 14:37:50 -0800557 for (i = 0; i < RX_REQ_MAX; i++) {
Hemant Kumar4aed14e2016-05-01 17:35:36 -0700558 req = mtp_request_new(dev->ep_out, mtp_rx_req_len);
559 if (!req) {
560 if (mtp_rx_req_len <= MTP_BULK_BUFFER_SIZE)
561 goto fail;
Hemant Kumara1258712016-05-02 10:46:49 -0700562 for (--i; i >= 0; i--)
Hemant Kumar4aed14e2016-05-01 17:35:36 -0700563 mtp_request_free(dev->rx_req[i], dev->ep_out);
564 mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;
565 goto retry_rx_alloc;
566 }
Benoit Goby27d01e52011-12-19 14:37:50 -0800567 req->complete = mtp_complete_out;
568 dev->rx_req[i] = req;
569 }
570 for (i = 0; i < INTR_REQ_MAX; i++) {
571 req = mtp_request_new(dev->ep_intr, INTR_BUFFER_SIZE);
572 if (!req)
573 goto fail;
574 req->complete = mtp_complete_intr;
575 mtp_req_put(dev, &dev->intr_idle, req);
576 }
577
578 return 0;
579
580fail:
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -0800581 pr_err("mtp_bind() could not allocate requests\n");
Benoit Goby27d01e52011-12-19 14:37:50 -0800582 return -1;
583}
584
585static ssize_t mtp_read(struct file *fp, char __user *buf,
586 size_t count, loff_t *pos)
587{
588 struct mtp_dev *dev = fp->private_data;
589 struct usb_composite_dev *cdev = dev->cdev;
590 struct usb_request *req;
Greg Hackmann1b07ec72014-02-24 10:19:13 -0800591 ssize_t r = count;
592 unsigned xfer;
Benoit Goby27d01e52011-12-19 14:37:50 -0800593 int ret = 0;
Jiebing Li3bbd8982015-03-10 11:25:50 +0800594 size_t len = 0;
Benoit Goby27d01e52011-12-19 14:37:50 -0800595
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +0530596 DBG(cdev, "mtp_read(%zu) state:%d\n", count, dev->state);
Benoit Goby27d01e52011-12-19 14:37:50 -0800597
Benoit Goby27d01e52011-12-19 14:37:50 -0800598 /* we will block until we're online */
599 DBG(cdev, "mtp_read: waiting for online state\n");
600 ret = wait_event_interruptible(dev->read_wq,
601 dev->state != STATE_OFFLINE);
602 if (ret < 0) {
603 r = ret;
604 goto done;
605 }
Hemant Kumar63e7d792016-05-02 11:09:00 -0700606
607 len = ALIGN(count, dev->ep_out->maxpacket);
608 if (len > mtp_rx_req_len)
609 return -EINVAL;
610
Benoit Goby27d01e52011-12-19 14:37:50 -0800611 spin_lock_irq(&dev->lock);
Jiebing Li3bbd8982015-03-10 11:25:50 +0800612 if (dev->ep_out->desc) {
613 len = usb_ep_align_maybe(cdev->gadget, dev->ep_out, count);
614 if (len > MTP_BULK_BUFFER_SIZE) {
615 spin_unlock_irq(&dev->lock);
616 return -EINVAL;
617 }
618 }
619
Benoit Goby27d01e52011-12-19 14:37:50 -0800620 if (dev->state == STATE_CANCELED) {
621 /* report cancelation to userspace */
622 dev->state = STATE_READY;
623 spin_unlock_irq(&dev->lock);
624 return -ECANCELED;
625 }
626 dev->state = STATE_BUSY;
627 spin_unlock_irq(&dev->lock);
628
629requeue_req:
630 /* queue a request */
631 req = dev->rx_req[0];
Hemant Kumar96b01be2016-05-01 17:54:31 -0700632 req->length = len;
Benoit Goby27d01e52011-12-19 14:37:50 -0800633 dev->rx_done = 0;
634 ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
635 if (ret < 0) {
636 r = -EIO;
637 goto done;
638 } else {
639 DBG(cdev, "rx %p queue\n", req);
640 }
641
642 /* wait for a request to complete */
Hemant Kumar32565c22016-05-01 22:09:34 -0700643 ret = wait_event_interruptible(dev->read_wq,
644 dev->rx_done || dev->state != STATE_BUSY);
645 if (dev->state == STATE_CANCELED) {
646 r = -ECANCELED;
647 if (!dev->rx_done)
648 usb_ep_dequeue(dev->ep_out, req);
649 spin_lock_irq(&dev->lock);
650 dev->state = STATE_CANCELED;
651 spin_unlock_irq(&dev->lock);
652 goto done;
653 }
Benoit Goby27d01e52011-12-19 14:37:50 -0800654 if (ret < 0) {
655 r = ret;
656 usb_ep_dequeue(dev->ep_out, req);
657 goto done;
658 }
659 if (dev->state == STATE_BUSY) {
660 /* If we got a 0-len packet, throw it back and try again. */
661 if (req->actual == 0)
662 goto requeue_req;
663
664 DBG(cdev, "rx %p %d\n", req, req->actual);
665 xfer = (req->actual < count) ? req->actual : count;
666 r = xfer;
667 if (copy_to_user(buf, req->buf, xfer))
668 r = -EFAULT;
669 } else
670 r = -EIO;
671
672done:
673 spin_lock_irq(&dev->lock);
674 if (dev->state == STATE_CANCELED)
675 r = -ECANCELED;
676 else if (dev->state != STATE_OFFLINE)
677 dev->state = STATE_READY;
678 spin_unlock_irq(&dev->lock);
679
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +0530680 DBG(cdev, "mtp_read returning %zd state:%d\n", r, dev->state);
Benoit Goby27d01e52011-12-19 14:37:50 -0800681 return r;
682}
683
684static ssize_t mtp_write(struct file *fp, const char __user *buf,
685 size_t count, loff_t *pos)
686{
687 struct mtp_dev *dev = fp->private_data;
688 struct usb_composite_dev *cdev = dev->cdev;
689 struct usb_request *req = 0;
Greg Hackmann3a725f42014-02-24 10:19:13 -0800690 ssize_t r = count;
691 unsigned xfer;
Benoit Goby27d01e52011-12-19 14:37:50 -0800692 int sendZLP = 0;
693 int ret;
694
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +0530695 DBG(cdev, "mtp_write(%zu) state:%d\n", count, dev->state);
Benoit Goby27d01e52011-12-19 14:37:50 -0800696
697 spin_lock_irq(&dev->lock);
698 if (dev->state == STATE_CANCELED) {
699 /* report cancelation to userspace */
700 dev->state = STATE_READY;
701 spin_unlock_irq(&dev->lock);
702 return -ECANCELED;
703 }
704 if (dev->state == STATE_OFFLINE) {
705 spin_unlock_irq(&dev->lock);
706 return -ENODEV;
707 }
708 dev->state = STATE_BUSY;
709 spin_unlock_irq(&dev->lock);
710
711 /* we need to send a zero length packet to signal the end of transfer
712 * if the transfer size is aligned to a packet boundary.
713 */
714 if ((count & (dev->ep_in->maxpacket - 1)) == 0)
715 sendZLP = 1;
716
717 while (count > 0 || sendZLP) {
718 /* so we exit after sending ZLP */
719 if (count == 0)
720 sendZLP = 0;
721
722 if (dev->state != STATE_BUSY) {
723 DBG(cdev, "mtp_write dev->error\n");
724 r = -EIO;
725 break;
726 }
727
728 /* get an idle tx request to use */
729 req = 0;
730 ret = wait_event_interruptible(dev->write_wq,
731 ((req = mtp_req_get(dev, &dev->tx_idle))
732 || dev->state != STATE_BUSY));
733 if (!req) {
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +0530734 DBG(cdev, "mtp_write request NULL ret:%d state:%d\n",
735 ret, dev->state);
Benoit Goby27d01e52011-12-19 14:37:50 -0800736 r = ret;
737 break;
738 }
739
Hemant Kumar28dd7c42016-12-29 15:47:31 -0800740 if (count > mtp_tx_req_len)
741 xfer = mtp_tx_req_len;
Benoit Goby27d01e52011-12-19 14:37:50 -0800742 else
743 xfer = count;
744 if (xfer && copy_from_user(req->buf, buf, xfer)) {
745 r = -EFAULT;
746 break;
747 }
748
749 req->length = xfer;
750 ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
751 if (ret < 0) {
752 DBG(cdev, "mtp_write: xfer error %d\n", ret);
753 r = -EIO;
754 break;
755 }
756
757 buf += xfer;
758 count -= xfer;
759
760 /* zero this so we don't try to free it on error exit */
761 req = 0;
762 }
763
764 if (req)
765 mtp_req_put(dev, &dev->tx_idle, req);
766
767 spin_lock_irq(&dev->lock);
768 if (dev->state == STATE_CANCELED)
769 r = -ECANCELED;
770 else if (dev->state != STATE_OFFLINE)
771 dev->state = STATE_READY;
772 spin_unlock_irq(&dev->lock);
773
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +0530774 DBG(cdev, "mtp_write returning %zd state:%d\n", r, dev->state);
Benoit Goby27d01e52011-12-19 14:37:50 -0800775 return r;
776}
777
778/* read from a local file and write to USB */
779static void send_file_work(struct work_struct *data)
780{
781 struct mtp_dev *dev = container_of(data, struct mtp_dev,
782 send_file_work);
783 struct usb_composite_dev *cdev = dev->cdev;
784 struct usb_request *req = 0;
785 struct mtp_data_header *header;
786 struct file *filp;
787 loff_t offset;
788 int64_t count;
789 int xfer, ret, hdr_size;
790 int r = 0;
791 int sendZLP = 0;
Hemant Kumarfc2b8f02016-05-02 11:18:48 -0700792 ktime_t start_time;
Benoit Goby27d01e52011-12-19 14:37:50 -0800793
794 /* read our parameters */
795 smp_rmb();
796 filp = dev->xfer_file;
797 offset = dev->xfer_file_offset;
798 count = dev->xfer_file_length;
799
800 DBG(cdev, "send_file_work(%lld %lld)\n", offset, count);
801
802 if (dev->xfer_send_header) {
803 hdr_size = sizeof(struct mtp_data_header);
804 count += hdr_size;
805 } else {
806 hdr_size = 0;
807 }
808
809 /* we need to send a zero length packet to signal the end of transfer
810 * if the transfer size is aligned to a packet boundary.
811 */
812 if ((count & (dev->ep_in->maxpacket - 1)) == 0)
813 sendZLP = 1;
814
815 while (count > 0 || sendZLP) {
816 /* so we exit after sending ZLP */
817 if (count == 0)
818 sendZLP = 0;
819
820 /* get an idle tx request to use */
821 req = 0;
822 ret = wait_event_interruptible(dev->write_wq,
823 (req = mtp_req_get(dev, &dev->tx_idle))
824 || dev->state != STATE_BUSY);
825 if (dev->state == STATE_CANCELED) {
826 r = -ECANCELED;
827 break;
828 }
829 if (!req) {
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +0530830 DBG(cdev,
831 "send_file_work request NULL ret:%d state:%d\n",
832 ret, dev->state);
Benoit Goby27d01e52011-12-19 14:37:50 -0800833 r = ret;
834 break;
835 }
836
Hemant Kumar28dd7c42016-12-29 15:47:31 -0800837 if (count > mtp_tx_req_len)
838 xfer = mtp_tx_req_len;
Benoit Goby27d01e52011-12-19 14:37:50 -0800839 else
840 xfer = count;
841
842 if (hdr_size) {
843 /* prepend MTP data header */
844 header = (struct mtp_data_header *)req->buf;
Witold Sciuk6db5c3e2016-02-13 11:08:37 +0100845 /*
846 * set file size with header according to
847 * MTP Specification v1.0
848 */
849 header->length = (count > MTP_MAX_FILE_SIZE) ?
850 MTP_MAX_FILE_SIZE : __cpu_to_le32(count);
Benoit Goby27d01e52011-12-19 14:37:50 -0800851 header->type = __cpu_to_le16(2); /* data packet */
852 header->command = __cpu_to_le16(dev->xfer_command);
853 header->transaction_id =
854 __cpu_to_le32(dev->xfer_transaction_id);
855 }
Hemant Kumarfc2b8f02016-05-02 11:18:48 -0700856 start_time = ktime_get();
Benoit Goby27d01e52011-12-19 14:37:50 -0800857 ret = vfs_read(filp, req->buf + hdr_size, xfer - hdr_size,
858 &offset);
859 if (ret < 0) {
860 r = ret;
861 break;
862 }
Hemant Kumarfc2b8f02016-05-02 11:18:48 -0700863
Benoit Goby27d01e52011-12-19 14:37:50 -0800864 xfer = ret + hdr_size;
Hemant Kumarfc2b8f02016-05-02 11:18:48 -0700865 dev->perf[dev->dbg_read_index].vfs_rtime =
866 ktime_to_us(ktime_sub(ktime_get(), start_time));
867 dev->perf[dev->dbg_read_index].vfs_rbytes = xfer;
868 dev->dbg_read_index = (dev->dbg_read_index + 1) % MAX_ITERATION;
Benoit Goby27d01e52011-12-19 14:37:50 -0800869 hdr_size = 0;
870
871 req->length = xfer;
872 ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
873 if (ret < 0) {
874 DBG(cdev, "send_file_work: xfer error %d\n", ret);
Hemant Kumar9bcbf7b2016-05-01 22:15:04 -0700875 if (dev->state != STATE_OFFLINE)
876 dev->state = STATE_ERROR;
Benoit Goby27d01e52011-12-19 14:37:50 -0800877 r = -EIO;
878 break;
879 }
880
881 count -= xfer;
882
883 /* zero this so we don't try to free it on error exit */
884 req = 0;
885 }
886
887 if (req)
888 mtp_req_put(dev, &dev->tx_idle, req);
889
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +0530890 DBG(cdev, "send_file_work returning %d state:%d\n", r, dev->state);
Benoit Goby27d01e52011-12-19 14:37:50 -0800891 /* write the result */
892 dev->xfer_result = r;
893 smp_wmb();
894}
895
896/* read from USB and write to a local file */
897static void receive_file_work(struct work_struct *data)
898{
899 struct mtp_dev *dev = container_of(data, struct mtp_dev,
900 receive_file_work);
901 struct usb_composite_dev *cdev = dev->cdev;
902 struct usb_request *read_req = NULL, *write_req = NULL;
903 struct file *filp;
904 loff_t offset;
905 int64_t count;
906 int ret, cur_buf = 0;
907 int r = 0;
Hemant Kumarfc2b8f02016-05-02 11:18:48 -0700908 ktime_t start_time;
Benoit Goby27d01e52011-12-19 14:37:50 -0800909
910 /* read our parameters */
911 smp_rmb();
912 filp = dev->xfer_file;
913 offset = dev->xfer_file_offset;
914 count = dev->xfer_file_length;
915
916 DBG(cdev, "receive_file_work(%lld)\n", count);
Manu Gautam0caf9082012-10-03 18:49:33 +0530917 if (!IS_ALIGNED(count, dev->ep_out->maxpacket))
918 DBG(cdev, "%s- count(%lld) not multiple of mtu(%d)\n", __func__,
919 count, dev->ep_out->maxpacket);
Benoit Goby27d01e52011-12-19 14:37:50 -0800920
921 while (count > 0 || write_req) {
922 if (count > 0) {
923 /* queue a request */
924 read_req = dev->rx_req[cur_buf];
925 cur_buf = (cur_buf + 1) % RX_REQ_MAX;
926
Manu Gautam0caf9082012-10-03 18:49:33 +0530927 /* some h/w expects size to be aligned to ep's MTU */
Hemant Kumar4aed14e2016-05-01 17:35:36 -0700928 read_req->length = mtp_rx_req_len;
Manu Gautam0caf9082012-10-03 18:49:33 +0530929
Benoit Goby27d01e52011-12-19 14:37:50 -0800930 dev->rx_done = 0;
931 ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL);
932 if (ret < 0) {
933 r = -EIO;
Hemant Kumar9bcbf7b2016-05-01 22:15:04 -0700934 if (dev->state != STATE_OFFLINE)
935 dev->state = STATE_ERROR;
Benoit Goby27d01e52011-12-19 14:37:50 -0800936 break;
937 }
938 }
939
940 if (write_req) {
941 DBG(cdev, "rx %p %d\n", write_req, write_req->actual);
Hemant Kumarfc2b8f02016-05-02 11:18:48 -0700942 start_time = ktime_get();
Benoit Goby27d01e52011-12-19 14:37:50 -0800943 ret = vfs_write(filp, write_req->buf, write_req->actual,
944 &offset);
945 DBG(cdev, "vfs_write %d\n", ret);
946 if (ret != write_req->actual) {
947 r = -EIO;
Hemant Kumar9bcbf7b2016-05-01 22:15:04 -0700948 if (dev->state != STATE_OFFLINE)
949 dev->state = STATE_ERROR;
Benoit Goby27d01e52011-12-19 14:37:50 -0800950 break;
951 }
Hemant Kumarfc2b8f02016-05-02 11:18:48 -0700952 dev->perf[dev->dbg_write_index].vfs_wtime =
953 ktime_to_us(ktime_sub(ktime_get(), start_time));
954 dev->perf[dev->dbg_write_index].vfs_wbytes = ret;
955 dev->dbg_write_index =
956 (dev->dbg_write_index + 1) % MAX_ITERATION;
Benoit Goby27d01e52011-12-19 14:37:50 -0800957 write_req = NULL;
958 }
959
960 if (read_req) {
961 /* wait for our last read to complete */
962 ret = wait_event_interruptible(dev->read_wq,
963 dev->rx_done || dev->state != STATE_BUSY);
Hemant Kumar1597fd62016-05-01 22:22:35 -0700964 if (dev->state == STATE_CANCELED
965 || dev->state == STATE_OFFLINE) {
Hemant Kumare3fe6742016-05-02 10:50:35 -0700966 if (dev->state == STATE_OFFLINE)
967 r = -EIO;
968 else
969 r = -ECANCELED;
Benoit Goby27d01e52011-12-19 14:37:50 -0800970 if (!dev->rx_done)
971 usb_ep_dequeue(dev->ep_out, read_req);
972 break;
973 }
Manu Gautam0caf9082012-10-03 18:49:33 +0530974
Jiebing Lidf5d3202015-03-10 11:27:10 +0800975 if (read_req->status) {
976 r = read_req->status;
977 break;
978 }
Manu Gautam0caf9082012-10-03 18:49:33 +0530979 /* Check if we aligned the size due to MTU constraint */
980 if (count < read_req->length)
981 read_req->actual = (read_req->actual > count ?
982 count : read_req->actual);
Benoit Goby27d01e52011-12-19 14:37:50 -0800983 /* if xfer_file_length is 0xFFFFFFFF, then we read until
984 * we get a zero length packet
985 */
986 if (count != 0xFFFFFFFF)
987 count -= read_req->actual;
988 if (read_req->actual < read_req->length) {
989 /*
990 * short packet is used to signal EOF for
991 * sizes > 4 gig
992 */
993 DBG(cdev, "got short packet\n");
994 count = 0;
995 }
996
997 write_req = read_req;
998 read_req = NULL;
999 }
1000 }
1001
1002 DBG(cdev, "receive_file_work returning %d\n", r);
1003 /* write the result */
1004 dev->xfer_result = r;
1005 smp_wmb();
1006}
1007
1008static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event)
1009{
1010 struct usb_request *req = NULL;
1011 int ret;
1012 int length = event->length;
1013
Greg Hackmann3a725f42014-02-24 10:19:13 -08001014 DBG(dev->cdev, "mtp_send_event(%zu)\n", event->length);
Benoit Goby27d01e52011-12-19 14:37:50 -08001015
1016 if (length < 0 || length > INTR_BUFFER_SIZE)
1017 return -EINVAL;
1018 if (dev->state == STATE_OFFLINE)
1019 return -ENODEV;
1020
1021 ret = wait_event_interruptible_timeout(dev->intr_wq,
1022 (req = mtp_req_get(dev, &dev->intr_idle)),
1023 msecs_to_jiffies(1000));
1024 if (!req)
1025 return -ETIME;
1026
1027 if (copy_from_user(req->buf, (void __user *)event->data, length)) {
1028 mtp_req_put(dev, &dev->intr_idle, req);
1029 return -EFAULT;
1030 }
1031 req->length = length;
1032 ret = usb_ep_queue(dev->ep_intr, req, GFP_KERNEL);
1033 if (ret)
1034 mtp_req_put(dev, &dev->intr_idle, req);
1035
1036 return ret;
1037}
1038
Hemant Kumar83d6a262016-05-02 10:57:12 -07001039static long mtp_send_receive_ioctl(struct file *fp, unsigned int code,
1040 struct mtp_file_range *mfr)
Benoit Goby27d01e52011-12-19 14:37:50 -08001041{
1042 struct mtp_dev *dev = fp->private_data;
1043 struct file *filp = NULL;
Hemant Kumar83d6a262016-05-02 10:57:12 -07001044 struct work_struct *work;
Benoit Goby27d01e52011-12-19 14:37:50 -08001045 int ret = -EINVAL;
1046
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +05301047 if (mtp_lock(&dev->ioctl_excl)) {
1048 DBG(dev->cdev, "ioctl returning EBUSY state:%d\n", dev->state);
Benoit Goby27d01e52011-12-19 14:37:50 -08001049 return -EBUSY;
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +05301050 }
Benoit Goby27d01e52011-12-19 14:37:50 -08001051
Hemant Kumar83d6a262016-05-02 10:57:12 -07001052 spin_lock_irq(&dev->lock);
1053 if (dev->state == STATE_CANCELED) {
1054 /* report cancellation to userspace */
1055 dev->state = STATE_READY;
Benoit Goby27d01e52011-12-19 14:37:50 -08001056 spin_unlock_irq(&dev->lock);
Hemant Kumar83d6a262016-05-02 10:57:12 -07001057 ret = -ECANCELED;
Benoit Goby27d01e52011-12-19 14:37:50 -08001058 goto out;
1059 }
Hemant Kumar83d6a262016-05-02 10:57:12 -07001060 if (dev->state == STATE_OFFLINE) {
1061 spin_unlock_irq(&dev->lock);
1062 ret = -ENODEV;
1063 goto out;
Benoit Goby27d01e52011-12-19 14:37:50 -08001064 }
Hemant Kumar83d6a262016-05-02 10:57:12 -07001065 dev->state = STATE_BUSY;
1066 spin_unlock_irq(&dev->lock);
1067
1068 /* hold a reference to the file while we are working with it */
1069 filp = fget(mfr->fd);
1070 if (!filp) {
1071 ret = -EBADF;
1072 goto fail;
1073 }
1074
1075 /* write the parameters */
1076 dev->xfer_file = filp;
1077 dev->xfer_file_offset = mfr->offset;
1078 dev->xfer_file_length = mfr->length;
1079 /* make sure write is done before parameters are read */
1080 smp_wmb();
1081
1082 if (code == MTP_SEND_FILE_WITH_HEADER) {
1083 work = &dev->send_file_work;
1084 dev->xfer_send_header = 1;
1085 dev->xfer_command = mfr->command;
1086 dev->xfer_transaction_id = mfr->transaction_id;
1087 } else if (code == MTP_SEND_FILE) {
1088 work = &dev->send_file_work;
1089 dev->xfer_send_header = 0;
1090 } else {
1091 work = &dev->receive_file_work;
1092 }
1093
1094 /* We do the file transfer on a work queue so it will run
1095 * in kernel context, which is necessary for vfs_read and
1096 * vfs_write to use our buffers in the kernel address space.
1097 */
1098 queue_work(dev->wq, work);
1099 /* wait for operation to complete */
1100 flush_workqueue(dev->wq);
1101 fput(filp);
1102
1103 /* read the result */
1104 smp_rmb();
1105 ret = dev->xfer_result;
Benoit Goby27d01e52011-12-19 14:37:50 -08001106
1107fail:
1108 spin_lock_irq(&dev->lock);
1109 if (dev->state == STATE_CANCELED)
1110 ret = -ECANCELED;
1111 else if (dev->state != STATE_OFFLINE)
1112 dev->state = STATE_READY;
1113 spin_unlock_irq(&dev->lock);
1114out:
1115 mtp_unlock(&dev->ioctl_excl);
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +05301116 DBG(dev->cdev, "ioctl returning %d state:%d\n", ret, dev->state);
Benoit Goby27d01e52011-12-19 14:37:50 -08001117 return ret;
1118}
1119
Hemant Kumar83d6a262016-05-02 10:57:12 -07001120static long mtp_ioctl(struct file *fp, unsigned int code, unsigned long value)
1121{
1122 struct mtp_dev *dev = fp->private_data;
1123 struct mtp_file_range mfr;
1124 struct mtp_event event;
1125 int ret = -EINVAL;
1126
1127 switch (code) {
1128 case MTP_SEND_FILE:
1129 case MTP_RECEIVE_FILE:
1130 case MTP_SEND_FILE_WITH_HEADER:
1131 if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) {
1132 ret = -EFAULT;
1133 goto fail;
1134 }
1135 ret = mtp_send_receive_ioctl(fp, code, &mfr);
1136 break;
1137 case MTP_SEND_EVENT:
1138 if (mtp_lock(&dev->ioctl_excl))
1139 return -EBUSY;
1140 /* return here so we don't change dev->state below,
1141 * which would interfere with bulk transfer state.
1142 */
1143 if (copy_from_user(&event, (void __user *)value, sizeof(event)))
1144 ret = -EFAULT;
1145 else
1146 ret = mtp_send_event(dev, &event);
1147 mtp_unlock(&dev->ioctl_excl);
1148 break;
1149 default:
1150 DBG(dev->cdev, "unknown ioctl code: %d\n", code);
1151 }
1152fail:
1153 return ret;
1154}
1155
1156/*
1157 * 32 bit userspace calling into 64 bit kernel. handle ioctl code
1158 * and userspace pointer
1159 */
1160#ifdef CONFIG_COMPAT
1161static long compat_mtp_ioctl(struct file *fp, unsigned int code,
1162 unsigned long value)
1163{
1164 struct mtp_dev *dev = fp->private_data;
1165 struct mtp_file_range mfr;
1166 struct __compat_mtp_file_range cmfr;
1167 struct mtp_event event;
1168 struct __compat_mtp_event cevent;
1169 unsigned int cmd;
1170 bool send_file = false;
1171 int ret = -EINVAL;
1172
1173 switch (code) {
1174 case COMPAT_MTP_SEND_FILE:
1175 cmd = MTP_SEND_FILE;
1176 send_file = true;
1177 break;
1178 case COMPAT_MTP_RECEIVE_FILE:
1179 cmd = MTP_RECEIVE_FILE;
1180 send_file = true;
1181 break;
1182 case COMPAT_MTP_SEND_FILE_WITH_HEADER:
1183 cmd = MTP_SEND_FILE_WITH_HEADER;
1184 send_file = true;
1185 break;
1186 case COMPAT_MTP_SEND_EVENT:
1187 cmd = MTP_SEND_EVENT;
1188 break;
1189 default:
1190 DBG(dev->cdev, "unknown compat_ioctl code: %d\n", code);
1191 ret = -ENOIOCTLCMD;
1192 goto fail;
1193 }
1194
1195 if (send_file) {
1196 if (copy_from_user(&cmfr, (void __user *)value, sizeof(cmfr))) {
1197 ret = -EFAULT;
1198 goto fail;
1199 }
1200 mfr.fd = cmfr.fd;
1201 mfr.offset = cmfr.offset;
1202 mfr.length = cmfr.length;
1203 mfr.command = cmfr.command;
1204 mfr.transaction_id = cmfr.transaction_id;
1205 ret = mtp_send_receive_ioctl(fp, cmd, &mfr);
1206 } else {
1207 if (mtp_lock(&dev->ioctl_excl))
1208 return -EBUSY;
1209 /* return here so we don't change dev->state below,
1210 * which would interfere with bulk transfer state.
1211 */
1212 if (copy_from_user(&cevent, (void __user *)value,
1213 sizeof(cevent))) {
1214 ret = -EFAULT;
1215 goto fail;
1216 }
1217 event.length = cevent.length;
1218 event.data = compat_ptr(cevent.data);
1219 ret = mtp_send_event(dev, &event);
1220 mtp_unlock(&dev->ioctl_excl);
1221 }
1222fail:
1223 return ret;
1224}
1225#endif
1226
Benoit Goby27d01e52011-12-19 14:37:50 -08001227static int mtp_open(struct inode *ip, struct file *fp)
1228{
1229 printk(KERN_INFO "mtp_open\n");
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +05301230 if (mtp_lock(&_mtp_dev->open_excl)) {
1231 pr_err("%s mtp_release not called returning EBUSY\n", __func__);
Benoit Goby27d01e52011-12-19 14:37:50 -08001232 return -EBUSY;
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +05301233 }
Benoit Goby27d01e52011-12-19 14:37:50 -08001234
1235 /* clear any error condition */
1236 if (_mtp_dev->state != STATE_OFFLINE)
1237 _mtp_dev->state = STATE_READY;
1238
1239 fp->private_data = _mtp_dev;
1240 return 0;
1241}
1242
1243static int mtp_release(struct inode *ip, struct file *fp)
1244{
1245 printk(KERN_INFO "mtp_release\n");
1246
1247 mtp_unlock(&_mtp_dev->open_excl);
1248 return 0;
1249}
1250
1251/* file operations for /dev/mtp_usb */
1252static const struct file_operations mtp_fops = {
1253 .owner = THIS_MODULE,
1254 .read = mtp_read,
1255 .write = mtp_write,
1256 .unlocked_ioctl = mtp_ioctl,
Hemant Kumar83d6a262016-05-02 10:57:12 -07001257#ifdef CONFIG_COMPAT
1258 .compat_ioctl = compat_mtp_ioctl,
1259#endif
Benoit Goby27d01e52011-12-19 14:37:50 -08001260 .open = mtp_open,
1261 .release = mtp_release,
1262};
1263
1264static struct miscdevice mtp_device = {
1265 .minor = MISC_DYNAMIC_MINOR,
1266 .name = mtp_shortname,
1267 .fops = &mtp_fops,
1268};
1269
1270static int mtp_ctrlrequest(struct usb_composite_dev *cdev,
1271 const struct usb_ctrlrequest *ctrl)
1272{
1273 struct mtp_dev *dev = _mtp_dev;
1274 int value = -EOPNOTSUPP;
1275 u16 w_index = le16_to_cpu(ctrl->wIndex);
1276 u16 w_value = le16_to_cpu(ctrl->wValue);
1277 u16 w_length = le16_to_cpu(ctrl->wLength);
1278 unsigned long flags;
1279
1280 VDBG(cdev, "mtp_ctrlrequest "
1281 "%02x.%02x v%04x i%04x l%u\n",
1282 ctrl->bRequestType, ctrl->bRequest,
1283 w_value, w_index, w_length);
1284
1285 /* Handle MTP OS string */
1286 if (ctrl->bRequestType ==
1287 (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE)
1288 && ctrl->bRequest == USB_REQ_GET_DESCRIPTOR
1289 && (w_value >> 8) == USB_DT_STRING
1290 && (w_value & 0xFF) == MTP_OS_STRING_ID) {
1291 value = (w_length < sizeof(mtp_os_string)
1292 ? w_length : sizeof(mtp_os_string));
1293 memcpy(cdev->req->buf, mtp_os_string, value);
1294 } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) {
1295 /* Handle MTP OS descriptor */
1296 DBG(cdev, "vendor request: %d index: %d value: %d length: %d\n",
1297 ctrl->bRequest, w_index, w_value, w_length);
1298
1299 if (ctrl->bRequest == 1
1300 && (ctrl->bRequestType & USB_DIR_IN)
1301 && (w_index == 4 || w_index == 5)) {
1302 value = (w_length < sizeof(mtp_ext_config_desc) ?
1303 w_length : sizeof(mtp_ext_config_desc));
1304 memcpy(cdev->req->buf, &mtp_ext_config_desc, value);
Jack Pham50439242017-02-07 11:48:25 -08001305
1306 /* update compatibleID if PTP */
1307 if (dev->function.fs_descriptors == fs_ptp_descs) {
1308 struct mtp_ext_config_desc *d = cdev->req->buf;
1309
1310 d->function.compatibleID[0] = 'P';
1311 }
Benoit Goby27d01e52011-12-19 14:37:50 -08001312 }
1313 } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
1314 DBG(cdev, "class request: %d index: %d value: %d length: %d\n",
1315 ctrl->bRequest, w_index, w_value, w_length);
1316
1317 if (ctrl->bRequest == MTP_REQ_CANCEL && w_index == 0
1318 && w_value == 0) {
1319 DBG(cdev, "MTP_REQ_CANCEL\n");
1320
1321 spin_lock_irqsave(&dev->lock, flags);
1322 if (dev->state == STATE_BUSY) {
1323 dev->state = STATE_CANCELED;
1324 wake_up(&dev->read_wq);
1325 wake_up(&dev->write_wq);
1326 }
1327 spin_unlock_irqrestore(&dev->lock, flags);
1328
1329 /* We need to queue a request to read the remaining
1330 * bytes, but we don't actually need to look at
1331 * the contents.
1332 */
1333 value = w_length;
1334 } else if (ctrl->bRequest == MTP_REQ_GET_DEVICE_STATUS
1335 && w_index == 0 && w_value == 0) {
1336 struct mtp_device_status *status = cdev->req->buf;
Anson Jacob051584e2016-11-11 01:10:04 -05001337
Benoit Goby27d01e52011-12-19 14:37:50 -08001338 status->wLength =
1339 __constant_cpu_to_le16(sizeof(*status));
1340
1341 DBG(cdev, "MTP_REQ_GET_DEVICE_STATUS\n");
1342 spin_lock_irqsave(&dev->lock, flags);
1343 /* device status is "busy" until we report
1344 * the cancelation to userspace
1345 */
1346 if (dev->state == STATE_CANCELED)
1347 status->wCode =
1348 __cpu_to_le16(MTP_RESPONSE_DEVICE_BUSY);
1349 else
1350 status->wCode =
1351 __cpu_to_le16(MTP_RESPONSE_OK);
1352 spin_unlock_irqrestore(&dev->lock, flags);
1353 value = sizeof(*status);
1354 }
1355 }
1356
1357 /* respond with data transfer or status phase? */
1358 if (value >= 0) {
1359 int rc;
Anson Jacob051584e2016-11-11 01:10:04 -05001360
Benoit Goby27d01e52011-12-19 14:37:50 -08001361 cdev->req->zero = value < w_length;
1362 cdev->req->length = value;
1363 rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
1364 if (rc < 0)
1365 ERROR(cdev, "%s: response queue error\n", __func__);
1366 }
1367 return value;
1368}
1369
1370static int
1371mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
1372{
1373 struct usb_composite_dev *cdev = c->cdev;
1374 struct mtp_dev *dev = func_to_mtp(f);
1375 int id;
1376 int ret;
Badhri Jagan Sridharan54856462015-10-06 20:32:01 -07001377 struct mtp_instance *fi_mtp;
Benoit Goby27d01e52011-12-19 14:37:50 -08001378
1379 dev->cdev = cdev;
1380 DBG(cdev, "mtp_function_bind dev: %p\n", dev);
1381
1382 /* allocate interface ID(s) */
1383 id = usb_interface_id(c, f);
1384 if (id < 0)
1385 return id;
1386 mtp_interface_desc.bInterfaceNumber = id;
1387
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001388 if (mtp_string_defs[INTERFACE_STRING_INDEX].id == 0) {
1389 ret = usb_string_id(c->cdev);
1390 if (ret < 0)
1391 return ret;
1392 mtp_string_defs[INTERFACE_STRING_INDEX].id = ret;
1393 mtp_interface_desc.iInterface = ret;
1394 }
Badhri Jagan Sridharan54856462015-10-06 20:32:01 -07001395
1396 fi_mtp = container_of(f->fi, struct mtp_instance, func_inst);
1397
1398 if (cdev->use_os_string) {
1399 f->os_desc_table = kzalloc(sizeof(*f->os_desc_table),
1400 GFP_KERNEL);
1401 if (!f->os_desc_table)
1402 return -ENOMEM;
1403 f->os_desc_n = 1;
1404 f->os_desc_table[0].os_desc = &fi_mtp->mtp_os_desc;
1405 }
1406
Benoit Goby27d01e52011-12-19 14:37:50 -08001407 /* allocate endpoints */
1408 ret = mtp_create_bulk_endpoints(dev, &mtp_fullspeed_in_desc,
1409 &mtp_fullspeed_out_desc, &mtp_intr_desc);
1410 if (ret)
1411 return ret;
1412
1413 /* support high speed hardware */
1414 if (gadget_is_dualspeed(c->cdev->gadget)) {
1415 mtp_highspeed_in_desc.bEndpointAddress =
1416 mtp_fullspeed_in_desc.bEndpointAddress;
1417 mtp_highspeed_out_desc.bEndpointAddress =
1418 mtp_fullspeed_out_desc.bEndpointAddress;
1419 }
Mark Kuo1b61b272015-08-20 13:01:46 +08001420 /* support super speed hardware */
1421 if (gadget_is_superspeed(c->cdev->gadget)) {
1422 unsigned max_burst;
1423
1424 /* Calculate bMaxBurst, we know packet size is 1024 */
1425 max_burst = min_t(unsigned, MTP_BULK_BUFFER_SIZE / 1024, 15);
1426 mtp_ss_in_desc.bEndpointAddress =
1427 mtp_fullspeed_in_desc.bEndpointAddress;
1428 mtp_ss_in_comp_desc.bMaxBurst = max_burst;
1429 mtp_ss_out_desc.bEndpointAddress =
1430 mtp_fullspeed_out_desc.bEndpointAddress;
1431 mtp_ss_out_comp_desc.bMaxBurst = max_burst;
1432 }
Benoit Goby27d01e52011-12-19 14:37:50 -08001433
1434 DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
Mark Kuo1b61b272015-08-20 13:01:46 +08001435 gadget_is_superspeed(c->cdev->gadget) ? "super" :
1436 (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full"),
1437 f->name, dev->ep_in->name, dev->ep_out->name);
Benoit Goby27d01e52011-12-19 14:37:50 -08001438 return 0;
1439}
1440
1441static void
1442mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)
1443{
1444 struct mtp_dev *dev = func_to_mtp(f);
1445 struct usb_request *req;
1446 int i;
1447
Mayank Rana9ce075b2017-03-28 15:17:03 -07001448 mtp_string_defs[INTERFACE_STRING_INDEX].id = 0;
Benoit Goby27d01e52011-12-19 14:37:50 -08001449 while ((req = mtp_req_get(dev, &dev->tx_idle)))
1450 mtp_request_free(req, dev->ep_in);
1451 for (i = 0; i < RX_REQ_MAX; i++)
1452 mtp_request_free(dev->rx_req[i], dev->ep_out);
1453 while ((req = mtp_req_get(dev, &dev->intr_idle)))
1454 mtp_request_free(req, dev->ep_intr);
1455 dev->state = STATE_OFFLINE;
Badhri Jagan Sridharan54856462015-10-06 20:32:01 -07001456 kfree(f->os_desc_table);
1457 f->os_desc_n = 0;
Benoit Goby27d01e52011-12-19 14:37:50 -08001458}
1459
1460static int mtp_function_set_alt(struct usb_function *f,
1461 unsigned intf, unsigned alt)
1462{
1463 struct mtp_dev *dev = func_to_mtp(f);
1464 struct usb_composite_dev *cdev = f->config->cdev;
1465 int ret;
1466
1467 DBG(cdev, "mtp_function_set_alt intf: %d alt: %d\n", intf, alt);
1468
1469 ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
1470 if (ret)
1471 return ret;
1472
1473 ret = usb_ep_enable(dev->ep_in);
1474 if (ret)
1475 return ret;
1476
1477 ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out);
1478 if (ret)
1479 return ret;
1480
1481 ret = usb_ep_enable(dev->ep_out);
1482 if (ret) {
1483 usb_ep_disable(dev->ep_in);
1484 return ret;
1485 }
1486
1487 ret = config_ep_by_speed(cdev->gadget, f, dev->ep_intr);
1488 if (ret)
1489 return ret;
1490
1491 ret = usb_ep_enable(dev->ep_intr);
1492 if (ret) {
1493 usb_ep_disable(dev->ep_out);
1494 usb_ep_disable(dev->ep_in);
1495 return ret;
1496 }
1497 dev->state = STATE_READY;
1498
1499 /* readers may be blocked waiting for us to go online */
1500 wake_up(&dev->read_wq);
1501 return 0;
1502}
1503
1504static void mtp_function_disable(struct usb_function *f)
1505{
1506 struct mtp_dev *dev = func_to_mtp(f);
1507 struct usb_composite_dev *cdev = dev->cdev;
1508
1509 DBG(cdev, "mtp_function_disable\n");
1510 dev->state = STATE_OFFLINE;
1511 usb_ep_disable(dev->ep_in);
1512 usb_ep_disable(dev->ep_out);
1513 usb_ep_disable(dev->ep_intr);
1514
1515 /* readers may be blocked waiting for us to go online */
1516 wake_up(&dev->read_wq);
1517
1518 VDBG(cdev, "%s disabled\n", dev->function.name);
1519}
1520
Hemant Kumarfc2b8f02016-05-02 11:18:48 -07001521static int debug_mtp_read_stats(struct seq_file *s, void *unused)
1522{
1523 struct mtp_dev *dev = _mtp_dev;
1524 int i;
1525 unsigned long flags;
1526 unsigned int min, max = 0, sum = 0, iteration = 0;
1527
1528 seq_puts(s, "\n=======================\n");
1529 seq_puts(s, "USB MTP OUT related VFS write stats:\n");
1530 seq_puts(s, "\n=======================\n");
1531 spin_lock_irqsave(&dev->lock, flags);
1532 min = dev->perf[0].vfs_wtime;
1533 for (i = 0; i < MAX_ITERATION; i++) {
1534 seq_printf(s, "vfs write: bytes:%ld\t\t time:%d\n",
1535 dev->perf[i].vfs_wbytes,
1536 dev->perf[i].vfs_wtime);
1537 if (dev->perf[i].vfs_wbytes == mtp_rx_req_len) {
1538 sum += dev->perf[i].vfs_wtime;
1539 if (min > dev->perf[i].vfs_wtime)
1540 min = dev->perf[i].vfs_wtime;
1541 if (max < dev->perf[i].vfs_wtime)
1542 max = dev->perf[i].vfs_wtime;
1543 iteration++;
1544 }
1545 }
1546
1547 seq_printf(s, "vfs_write(time in usec) min:%d\t max:%d\t avg:%d\n",
1548 min, max, sum / iteration);
1549 min = max = sum = iteration = 0;
1550 seq_puts(s, "\n=======================\n");
1551 seq_puts(s, "USB MTP IN related VFS read stats:\n");
1552 seq_puts(s, "\n=======================\n");
1553
1554 min = dev->perf[0].vfs_rtime;
1555 for (i = 0; i < MAX_ITERATION; i++) {
1556 seq_printf(s, "vfs read: bytes:%ld\t\t time:%d\n",
1557 dev->perf[i].vfs_rbytes,
1558 dev->perf[i].vfs_rtime);
1559 if (dev->perf[i].vfs_rbytes == mtp_tx_req_len) {
1560 sum += dev->perf[i].vfs_rtime;
1561 if (min > dev->perf[i].vfs_rtime)
1562 min = dev->perf[i].vfs_rtime;
1563 if (max < dev->perf[i].vfs_rtime)
1564 max = dev->perf[i].vfs_rtime;
1565 iteration++;
1566 }
1567 }
1568
1569 seq_printf(s, "vfs_read(time in usec) min:%d\t max:%d\t avg:%d\n",
1570 min, max, sum / iteration);
1571 spin_unlock_irqrestore(&dev->lock, flags);
1572 return 0;
1573}
1574
1575static ssize_t debug_mtp_reset_stats(struct file *file, const char __user *buf,
1576 size_t count, loff_t *ppos)
1577{
1578 int clear_stats;
1579 unsigned long flags;
1580 struct mtp_dev *dev = _mtp_dev;
1581
1582 if (buf == NULL) {
1583 pr_err("[%s] EINVAL\n", __func__);
1584 goto done;
1585 }
1586
1587 if (kstrtoint(buf, 0, &clear_stats) || clear_stats != 0) {
1588 pr_err("Wrong value. To clear stats, enter value as 0.\n");
1589 goto done;
1590 }
1591
1592 spin_lock_irqsave(&dev->lock, flags);
1593 memset(&dev->perf[0], 0, MAX_ITERATION * sizeof(dev->perf[0]));
1594 dev->dbg_read_index = 0;
1595 dev->dbg_write_index = 0;
1596 spin_unlock_irqrestore(&dev->lock, flags);
1597done:
1598 return count;
1599}
1600
1601static int debug_mtp_open(struct inode *inode, struct file *file)
1602{
1603 return single_open(file, debug_mtp_read_stats, inode->i_private);
1604}
1605
1606static const struct file_operations debug_mtp_ops = {
1607 .open = debug_mtp_open,
1608 .read = seq_read,
1609 .write = debug_mtp_reset_stats,
1610};
1611
1612struct dentry *dent_mtp;
1613static void mtp_debugfs_init(void)
1614{
1615 struct dentry *dent_mtp_status;
1616
1617 dent_mtp = debugfs_create_dir("usb_mtp", 0);
1618 if (!dent_mtp || IS_ERR(dent_mtp))
1619 return;
1620
1621 dent_mtp_status = debugfs_create_file("status", 0644, dent_mtp,
1622 0, &debug_mtp_ops);
1623 if (!dent_mtp_status || IS_ERR(dent_mtp_status)) {
1624 debugfs_remove(dent_mtp);
1625 dent_mtp = NULL;
1626 return;
1627 }
1628}
1629
1630static void mtp_debugfs_remove(void)
1631{
1632 debugfs_remove_recursive(dent_mtp);
1633}
1634
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001635static int __mtp_setup(struct mtp_instance *fi_mtp)
Benoit Goby27d01e52011-12-19 14:37:50 -08001636{
1637 struct mtp_dev *dev;
1638 int ret;
1639
1640 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001641
1642 if (fi_mtp != NULL)
1643 fi_mtp->dev = dev;
1644
Benoit Goby27d01e52011-12-19 14:37:50 -08001645 if (!dev)
1646 return -ENOMEM;
1647
1648 spin_lock_init(&dev->lock);
1649 init_waitqueue_head(&dev->read_wq);
1650 init_waitqueue_head(&dev->write_wq);
1651 init_waitqueue_head(&dev->intr_wq);
1652 atomic_set(&dev->open_excl, 0);
1653 atomic_set(&dev->ioctl_excl, 0);
1654 INIT_LIST_HEAD(&dev->tx_idle);
1655 INIT_LIST_HEAD(&dev->intr_idle);
1656
1657 dev->wq = create_singlethread_workqueue("f_mtp");
1658 if (!dev->wq) {
1659 ret = -ENOMEM;
1660 goto err1;
1661 }
1662 INIT_WORK(&dev->send_file_work, send_file_work);
1663 INIT_WORK(&dev->receive_file_work, receive_file_work);
1664
1665 _mtp_dev = dev;
1666
1667 ret = misc_register(&mtp_device);
1668 if (ret)
1669 goto err2;
1670
Hemant Kumarfc2b8f02016-05-02 11:18:48 -07001671 mtp_debugfs_init();
Benoit Goby27d01e52011-12-19 14:37:50 -08001672 return 0;
1673
1674err2:
1675 destroy_workqueue(dev->wq);
1676err1:
1677 _mtp_dev = NULL;
1678 kfree(dev);
1679 printk(KERN_ERR "mtp gadget driver failed to initialize\n");
1680 return ret;
1681}
1682
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001683static int mtp_setup_configfs(struct mtp_instance *fi_mtp)
1684{
1685 return __mtp_setup(fi_mtp);
1686}
1687
1688
Benoit Goby27d01e52011-12-19 14:37:50 -08001689static void mtp_cleanup(void)
1690{
1691 struct mtp_dev *dev = _mtp_dev;
1692
1693 if (!dev)
1694 return;
1695
Hemant Kumarfc2b8f02016-05-02 11:18:48 -07001696 mtp_debugfs_remove();
Benoit Goby27d01e52011-12-19 14:37:50 -08001697 misc_deregister(&mtp_device);
1698 destroy_workqueue(dev->wq);
1699 _mtp_dev = NULL;
1700 kfree(dev);
1701}
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001702
1703static struct mtp_instance *to_mtp_instance(struct config_item *item)
1704{
1705 return container_of(to_config_group(item), struct mtp_instance,
1706 func_inst.group);
1707}
1708
1709static void mtp_attr_release(struct config_item *item)
1710{
1711 struct mtp_instance *fi_mtp = to_mtp_instance(item);
Anson Jacob051584e2016-11-11 01:10:04 -05001712
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001713 usb_put_function_instance(&fi_mtp->func_inst);
1714}
1715
1716static struct configfs_item_operations mtp_item_ops = {
1717 .release = mtp_attr_release,
1718};
1719
1720static struct config_item_type mtp_func_type = {
1721 .ct_item_ops = &mtp_item_ops,
1722 .ct_owner = THIS_MODULE,
1723};
1724
1725
1726static struct mtp_instance *to_fi_mtp(struct usb_function_instance *fi)
1727{
1728 return container_of(fi, struct mtp_instance, func_inst);
1729}
1730
1731static int mtp_set_inst_name(struct usb_function_instance *fi, const char *name)
1732{
1733 struct mtp_instance *fi_mtp;
1734 char *ptr;
1735 int name_len;
1736
1737 name_len = strlen(name) + 1;
1738 if (name_len > MAX_INST_NAME_LEN)
1739 return -ENAMETOOLONG;
1740
1741 ptr = kstrndup(name, name_len, GFP_KERNEL);
1742 if (!ptr)
1743 return -ENOMEM;
1744
1745 fi_mtp = to_fi_mtp(fi);
1746 fi_mtp->name = ptr;
1747
1748 return 0;
1749}
1750
1751static void mtp_free_inst(struct usb_function_instance *fi)
1752{
1753 struct mtp_instance *fi_mtp;
1754
1755 fi_mtp = to_fi_mtp(fi);
1756 kfree(fi_mtp->name);
1757 mtp_cleanup();
1758 kfree(fi_mtp);
1759}
1760
1761struct usb_function_instance *alloc_inst_mtp_ptp(bool mtp_config)
1762{
1763 struct mtp_instance *fi_mtp;
1764 int ret = 0;
Badhri Jagan Sridharan54856462015-10-06 20:32:01 -07001765 struct usb_os_desc *descs[1];
1766 char *names[1];
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001767
1768 fi_mtp = kzalloc(sizeof(*fi_mtp), GFP_KERNEL);
1769 if (!fi_mtp)
1770 return ERR_PTR(-ENOMEM);
1771 fi_mtp->func_inst.set_inst_name = mtp_set_inst_name;
1772 fi_mtp->func_inst.free_func_inst = mtp_free_inst;
1773
Badhri Jagan Sridharan54856462015-10-06 20:32:01 -07001774 fi_mtp->mtp_os_desc.ext_compat_id = fi_mtp->mtp_ext_compat_id;
1775 INIT_LIST_HEAD(&fi_mtp->mtp_os_desc.ext_prop);
1776 descs[0] = &fi_mtp->mtp_os_desc;
1777 names[0] = "MTP";
Badhri Jagan Sridharan54856462015-10-06 20:32:01 -07001778
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001779 if (mtp_config) {
1780 ret = mtp_setup_configfs(fi_mtp);
1781 if (ret) {
1782 kfree(fi_mtp);
1783 pr_err("Error setting MTP\n");
1784 return ERR_PTR(ret);
1785 }
1786 } else
1787 fi_mtp->dev = _mtp_dev;
1788
1789 config_group_init_type_name(&fi_mtp->func_inst.group,
1790 "", &mtp_func_type);
Amit Pundir8157e622016-04-05 21:09:54 +05301791 usb_os_desc_prepare_interf_dir(&fi_mtp->func_inst.group, 1,
1792 descs, names, THIS_MODULE);
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001793
1794 return &fi_mtp->func_inst;
1795}
1796EXPORT_SYMBOL_GPL(alloc_inst_mtp_ptp);
1797
1798static struct usb_function_instance *mtp_alloc_inst(void)
1799{
1800 return alloc_inst_mtp_ptp(true);
1801}
1802
1803static int mtp_ctrlreq_configfs(struct usb_function *f,
1804 const struct usb_ctrlrequest *ctrl)
1805{
1806 return mtp_ctrlrequest(f->config->cdev, ctrl);
1807}
1808
1809static void mtp_free(struct usb_function *f)
1810{
1811 /*NO-OP: no function specific resource allocation in mtp_alloc*/
1812}
1813
1814struct usb_function *function_alloc_mtp_ptp(struct usb_function_instance *fi,
1815 bool mtp_config)
1816{
1817 struct mtp_instance *fi_mtp = to_fi_mtp(fi);
Amit Pundir340114e2015-08-01 03:26:51 +05301818 struct mtp_dev *dev;
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001819
Amit Pundir340114e2015-08-01 03:26:51 +05301820 /*
1821 * PTP piggybacks on MTP function so make sure we have
1822 * created MTP function before we associate this PTP
1823 * function with a gadget configuration.
1824 */
1825 if (fi_mtp->dev == NULL) {
1826 pr_err("Error: Create MTP function before linking"
1827 " PTP function with a gadget configuration\n");
1828 pr_err("\t1: Delete existing PTP function if any\n");
1829 pr_err("\t2: Create MTP function\n");
1830 pr_err("\t3: Create and symlink PTP function"
1831 " with a gadget configuration\n");
Amit Pundirf8e6ad22015-08-11 12:34:45 +05301832 return ERR_PTR(-EINVAL); /* Invalid Configuration */
Amit Pundir340114e2015-08-01 03:26:51 +05301833 }
1834
1835 dev = fi_mtp->dev;
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001836 dev->function.name = DRIVER_NAME;
1837 dev->function.strings = mtp_strings;
1838 if (mtp_config) {
1839 dev->function.fs_descriptors = fs_mtp_descs;
1840 dev->function.hs_descriptors = hs_mtp_descs;
Mark Kuo1b61b272015-08-20 13:01:46 +08001841 dev->function.ss_descriptors = ss_mtp_descs;
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001842 } else {
1843 dev->function.fs_descriptors = fs_ptp_descs;
1844 dev->function.hs_descriptors = hs_ptp_descs;
Mark Kuo1b61b272015-08-20 13:01:46 +08001845 dev->function.ss_descriptors = ss_ptp_descs;
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001846 }
1847 dev->function.bind = mtp_function_bind;
1848 dev->function.unbind = mtp_function_unbind;
1849 dev->function.set_alt = mtp_function_set_alt;
1850 dev->function.disable = mtp_function_disable;
1851 dev->function.setup = mtp_ctrlreq_configfs;
1852 dev->function.free_func = mtp_free;
Hemant Kumarac9d7c82016-10-05 19:08:05 -07001853 fi->f = &dev->function;
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001854
1855 return &dev->function;
1856}
1857EXPORT_SYMBOL_GPL(function_alloc_mtp_ptp);
1858
1859static struct usb_function *mtp_alloc(struct usb_function_instance *fi)
1860{
1861 return function_alloc_mtp_ptp(fi, true);
1862}
1863
1864DECLARE_USB_FUNCTION_INIT(mtp, mtp_alloc_inst, mtp_alloc);
1865MODULE_LICENSE("GPL");