blob: 239d9bfb16bd6c108c1ba0aa0aeee4fbcae3fa57 [file] [log] [blame]
Benoit Goby27d01e52011-12-19 14:37:50 -08001/*
2 * Gadget Function Driver for MTP
3 *
4 * Copyright (C) 2010 Google, Inc.
5 * Author: Mike Lockwood <lockwood@android.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18/* #define DEBUG */
19/* #define VERBOSE_DEBUG */
20
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/poll.h>
24#include <linux/delay.h>
25#include <linux/wait.h>
26#include <linux/err.h>
27#include <linux/interrupt.h>
28
Hemant Kumarfc2b8f02016-05-02 11:18:48 -070029#include <linux/seq_file.h>
30#include <linux/debugfs.h>
Benoit Goby27d01e52011-12-19 14:37:50 -080031#include <linux/types.h>
32#include <linux/file.h>
33#include <linux/device.h>
34#include <linux/miscdevice.h>
35
36#include <linux/usb.h>
37#include <linux/usb_usual.h>
38#include <linux/usb/ch9.h>
39#include <linux/usb/f_mtp.h>
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -080040#include <linux/configfs.h>
41#include <linux/usb/composite.h>
42
43#include "configfs.h"
Benoit Goby27d01e52011-12-19 14:37:50 -080044
Hemant Kumarc044ad02016-05-02 11:27:21 -070045#define MTP_RX_BUFFER_INIT_SIZE 1048576
Hemant Kumar28dd7c42016-12-29 15:47:31 -080046#define MTP_TX_BUFFER_INIT_SIZE 1048576
Benoit Goby27d01e52011-12-19 14:37:50 -080047#define MTP_BULK_BUFFER_SIZE 16384
48#define INTR_BUFFER_SIZE 28
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -080049#define MAX_INST_NAME_LEN 40
Witold Sciuk6db5c3e2016-02-13 11:08:37 +010050#define MTP_MAX_FILE_SIZE 0xFFFFFFFFL
Benoit Goby27d01e52011-12-19 14:37:50 -080051
52/* String IDs */
53#define INTERFACE_STRING_INDEX 0
54
55/* values for mtp_dev.state */
56#define STATE_OFFLINE 0 /* initial state, disconnected */
57#define STATE_READY 1 /* ready for userspace calls */
58#define STATE_BUSY 2 /* processing userspace calls */
59#define STATE_CANCELED 3 /* transaction canceled by host */
60#define STATE_ERROR 4 /* error from completion routine */
61
62/* number of tx and rx requests to allocate */
Hemant Kumarc07309f2016-05-01 17:44:56 -070063#define MTP_TX_REQ_MAX 8
Benoit Goby27d01e52011-12-19 14:37:50 -080064#define RX_REQ_MAX 2
65#define INTR_REQ_MAX 5
66
67/* ID for Microsoft MTP OS String */
68#define MTP_OS_STRING_ID 0xEE
69
70/* MTP class reqeusts */
71#define MTP_REQ_CANCEL 0x64
72#define MTP_REQ_GET_EXT_EVENT_DATA 0x65
73#define MTP_REQ_RESET 0x66
74#define MTP_REQ_GET_DEVICE_STATUS 0x67
75
76/* constants for device status */
77#define MTP_RESPONSE_OK 0x2001
78#define MTP_RESPONSE_DEVICE_BUSY 0x2019
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -080079#define DRIVER_NAME "mtp"
Benoit Goby27d01e52011-12-19 14:37:50 -080080
Hemant Kumarfc2b8f02016-05-02 11:18:48 -070081#define MAX_ITERATION 100
82
Hemant Kumarc044ad02016-05-02 11:27:21 -070083unsigned int mtp_rx_req_len = MTP_RX_BUFFER_INIT_SIZE;
Hemant Kumar4aed14e2016-05-01 17:35:36 -070084module_param(mtp_rx_req_len, uint, 0644);
85
Hemant Kumar28dd7c42016-12-29 15:47:31 -080086unsigned int mtp_tx_req_len = MTP_TX_BUFFER_INIT_SIZE;
Hemant Kumarc07309f2016-05-01 17:44:56 -070087module_param(mtp_tx_req_len, uint, 0644);
88
89unsigned int mtp_tx_reqs = MTP_TX_REQ_MAX;
90module_param(mtp_tx_reqs, uint, 0644);
91
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -080092static const char mtp_shortname[] = DRIVER_NAME "_usb";
Benoit Goby27d01e52011-12-19 14:37:50 -080093
94struct mtp_dev {
95 struct usb_function function;
96 struct usb_composite_dev *cdev;
97 spinlock_t lock;
98
99 struct usb_ep *ep_in;
100 struct usb_ep *ep_out;
101 struct usb_ep *ep_intr;
102
103 int state;
104
105 /* synchronize access to our device file */
106 atomic_t open_excl;
107 /* to enforce only one ioctl at a time */
108 atomic_t ioctl_excl;
109
110 struct list_head tx_idle;
111 struct list_head intr_idle;
112
113 wait_queue_head_t read_wq;
114 wait_queue_head_t write_wq;
115 wait_queue_head_t intr_wq;
116 struct usb_request *rx_req[RX_REQ_MAX];
117 int rx_done;
118
119 /* for processing MTP_SEND_FILE, MTP_RECEIVE_FILE and
120 * MTP_SEND_FILE_WITH_HEADER ioctls on a work queue
121 */
122 struct workqueue_struct *wq;
123 struct work_struct send_file_work;
124 struct work_struct receive_file_work;
125 struct file *xfer_file;
126 loff_t xfer_file_offset;
127 int64_t xfer_file_length;
128 unsigned xfer_send_header;
129 uint16_t xfer_command;
130 uint32_t xfer_transaction_id;
131 int xfer_result;
Hemant Kumarfc2b8f02016-05-02 11:18:48 -0700132 struct {
133 unsigned long vfs_rbytes;
134 unsigned long vfs_wbytes;
135 unsigned int vfs_rtime;
136 unsigned int vfs_wtime;
137 } perf[MAX_ITERATION];
138 unsigned int dbg_read_index;
139 unsigned int dbg_write_index;
Vijayavardhan Vennapusab28d3e82017-02-16 15:22:14 +0530140 struct mutex read_mutex;
Benoit Goby27d01e52011-12-19 14:37:50 -0800141};
142
143static struct usb_interface_descriptor mtp_interface_desc = {
144 .bLength = USB_DT_INTERFACE_SIZE,
145 .bDescriptorType = USB_DT_INTERFACE,
146 .bInterfaceNumber = 0,
147 .bNumEndpoints = 3,
148 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
149 .bInterfaceSubClass = USB_SUBCLASS_VENDOR_SPEC,
150 .bInterfaceProtocol = 0,
151};
152
153static struct usb_interface_descriptor ptp_interface_desc = {
154 .bLength = USB_DT_INTERFACE_SIZE,
155 .bDescriptorType = USB_DT_INTERFACE,
156 .bInterfaceNumber = 0,
157 .bNumEndpoints = 3,
158 .bInterfaceClass = USB_CLASS_STILL_IMAGE,
159 .bInterfaceSubClass = 1,
160 .bInterfaceProtocol = 1,
161};
162
Mark Kuo1b61b272015-08-20 13:01:46 +0800163static struct usb_endpoint_descriptor mtp_ss_in_desc = {
164 .bLength = USB_DT_ENDPOINT_SIZE,
165 .bDescriptorType = USB_DT_ENDPOINT,
166 .bEndpointAddress = USB_DIR_IN,
167 .bmAttributes = USB_ENDPOINT_XFER_BULK,
168 .wMaxPacketSize = __constant_cpu_to_le16(1024),
169};
170
171static struct usb_ss_ep_comp_descriptor mtp_ss_in_comp_desc = {
172 .bLength = sizeof(mtp_ss_in_comp_desc),
173 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
174 /* .bMaxBurst = DYNAMIC, */
175};
176
177static struct usb_endpoint_descriptor mtp_ss_out_desc = {
178 .bLength = USB_DT_ENDPOINT_SIZE,
179 .bDescriptorType = USB_DT_ENDPOINT,
180 .bEndpointAddress = USB_DIR_OUT,
181 .bmAttributes = USB_ENDPOINT_XFER_BULK,
182 .wMaxPacketSize = __constant_cpu_to_le16(1024),
183};
184
185static struct usb_ss_ep_comp_descriptor mtp_ss_out_comp_desc = {
186 .bLength = sizeof(mtp_ss_out_comp_desc),
187 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
188 /* .bMaxBurst = DYNAMIC, */
189};
190
Benoit Goby27d01e52011-12-19 14:37:50 -0800191static struct usb_endpoint_descriptor mtp_highspeed_in_desc = {
192 .bLength = USB_DT_ENDPOINT_SIZE,
193 .bDescriptorType = USB_DT_ENDPOINT,
194 .bEndpointAddress = USB_DIR_IN,
195 .bmAttributes = USB_ENDPOINT_XFER_BULK,
196 .wMaxPacketSize = __constant_cpu_to_le16(512),
197};
198
199static struct usb_endpoint_descriptor mtp_highspeed_out_desc = {
200 .bLength = USB_DT_ENDPOINT_SIZE,
201 .bDescriptorType = USB_DT_ENDPOINT,
202 .bEndpointAddress = USB_DIR_OUT,
203 .bmAttributes = USB_ENDPOINT_XFER_BULK,
204 .wMaxPacketSize = __constant_cpu_to_le16(512),
205};
206
207static struct usb_endpoint_descriptor mtp_fullspeed_in_desc = {
208 .bLength = USB_DT_ENDPOINT_SIZE,
209 .bDescriptorType = USB_DT_ENDPOINT,
210 .bEndpointAddress = USB_DIR_IN,
211 .bmAttributes = USB_ENDPOINT_XFER_BULK,
212};
213
214static struct usb_endpoint_descriptor mtp_fullspeed_out_desc = {
215 .bLength = USB_DT_ENDPOINT_SIZE,
216 .bDescriptorType = USB_DT_ENDPOINT,
217 .bEndpointAddress = USB_DIR_OUT,
218 .bmAttributes = USB_ENDPOINT_XFER_BULK,
219};
220
221static struct usb_endpoint_descriptor mtp_intr_desc = {
222 .bLength = USB_DT_ENDPOINT_SIZE,
223 .bDescriptorType = USB_DT_ENDPOINT,
224 .bEndpointAddress = USB_DIR_IN,
225 .bmAttributes = USB_ENDPOINT_XFER_INT,
226 .wMaxPacketSize = __constant_cpu_to_le16(INTR_BUFFER_SIZE),
227 .bInterval = 6,
228};
229
Mark Kuo1b61b272015-08-20 13:01:46 +0800230static struct usb_ss_ep_comp_descriptor mtp_intr_ss_comp_desc = {
231 .bLength = sizeof(mtp_intr_ss_comp_desc),
232 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
Mark Kuo21a3e932015-09-11 16:12:59 +0800233 .wBytesPerInterval = cpu_to_le16(INTR_BUFFER_SIZE),
Mark Kuo1b61b272015-08-20 13:01:46 +0800234};
235
Benoit Goby27d01e52011-12-19 14:37:50 -0800236static struct usb_descriptor_header *fs_mtp_descs[] = {
237 (struct usb_descriptor_header *) &mtp_interface_desc,
238 (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
239 (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
240 (struct usb_descriptor_header *) &mtp_intr_desc,
241 NULL,
242};
243
244static struct usb_descriptor_header *hs_mtp_descs[] = {
245 (struct usb_descriptor_header *) &mtp_interface_desc,
246 (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
247 (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
248 (struct usb_descriptor_header *) &mtp_intr_desc,
249 NULL,
250};
251
Mark Kuo1b61b272015-08-20 13:01:46 +0800252static struct usb_descriptor_header *ss_mtp_descs[] = {
253 (struct usb_descriptor_header *) &mtp_interface_desc,
254 (struct usb_descriptor_header *) &mtp_ss_in_desc,
255 (struct usb_descriptor_header *) &mtp_ss_in_comp_desc,
256 (struct usb_descriptor_header *) &mtp_ss_out_desc,
257 (struct usb_descriptor_header *) &mtp_ss_out_comp_desc,
258 (struct usb_descriptor_header *) &mtp_intr_desc,
259 (struct usb_descriptor_header *) &mtp_intr_ss_comp_desc,
260 NULL,
261};
262
Benoit Goby27d01e52011-12-19 14:37:50 -0800263static struct usb_descriptor_header *fs_ptp_descs[] = {
264 (struct usb_descriptor_header *) &ptp_interface_desc,
265 (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
266 (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
267 (struct usb_descriptor_header *) &mtp_intr_desc,
268 NULL,
269};
270
271static struct usb_descriptor_header *hs_ptp_descs[] = {
272 (struct usb_descriptor_header *) &ptp_interface_desc,
273 (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
274 (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
275 (struct usb_descriptor_header *) &mtp_intr_desc,
276 NULL,
277};
278
Mark Kuo1b61b272015-08-20 13:01:46 +0800279static struct usb_descriptor_header *ss_ptp_descs[] = {
280 (struct usb_descriptor_header *) &ptp_interface_desc,
281 (struct usb_descriptor_header *) &mtp_ss_in_desc,
282 (struct usb_descriptor_header *) &mtp_ss_in_comp_desc,
283 (struct usb_descriptor_header *) &mtp_ss_out_desc,
284 (struct usb_descriptor_header *) &mtp_ss_out_comp_desc,
285 (struct usb_descriptor_header *) &mtp_intr_desc,
286 (struct usb_descriptor_header *) &mtp_intr_ss_comp_desc,
287 NULL,
288};
289
Benoit Goby27d01e52011-12-19 14:37:50 -0800290static struct usb_string mtp_string_defs[] = {
291 /* Naming interface "MTP" so libmtp will recognize us */
292 [INTERFACE_STRING_INDEX].s = "MTP",
293 { }, /* end of list */
294};
295
296static struct usb_gadget_strings mtp_string_table = {
297 .language = 0x0409, /* en-US */
298 .strings = mtp_string_defs,
299};
300
301static struct usb_gadget_strings *mtp_strings[] = {
302 &mtp_string_table,
303 NULL,
304};
305
306/* Microsoft MTP OS String */
307static u8 mtp_os_string[] = {
308 18, /* sizeof(mtp_os_string) */
309 USB_DT_STRING,
310 /* Signature field: "MSFT100" */
311 'M', 0, 'S', 0, 'F', 0, 'T', 0, '1', 0, '0', 0, '0', 0,
312 /* vendor code */
313 1,
314 /* padding */
315 0
316};
317
318/* Microsoft Extended Configuration Descriptor Header Section */
319struct mtp_ext_config_desc_header {
320 __le32 dwLength;
321 __u16 bcdVersion;
322 __le16 wIndex;
323 __u8 bCount;
324 __u8 reserved[7];
325};
326
327/* Microsoft Extended Configuration Descriptor Function Section */
328struct mtp_ext_config_desc_function {
329 __u8 bFirstInterfaceNumber;
330 __u8 bInterfaceCount;
331 __u8 compatibleID[8];
332 __u8 subCompatibleID[8];
333 __u8 reserved[6];
334};
335
336/* MTP Extended Configuration Descriptor */
Jack Pham50439242017-02-07 11:48:25 -0800337struct mtp_ext_config_desc {
Benoit Goby27d01e52011-12-19 14:37:50 -0800338 struct mtp_ext_config_desc_header header;
339 struct mtp_ext_config_desc_function function;
Jack Pham50439242017-02-07 11:48:25 -0800340};
341
342static struct mtp_ext_config_desc mtp_ext_config_desc = {
Benoit Goby27d01e52011-12-19 14:37:50 -0800343 .header = {
344 .dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc)),
345 .bcdVersion = __constant_cpu_to_le16(0x0100),
346 .wIndex = __constant_cpu_to_le16(4),
Brian Norrise7304622016-02-29 17:44:51 -0800347 .bCount = 1,
Benoit Goby27d01e52011-12-19 14:37:50 -0800348 },
349 .function = {
350 .bFirstInterfaceNumber = 0,
351 .bInterfaceCount = 1,
352 .compatibleID = { 'M', 'T', 'P' },
353 },
354};
355
356struct mtp_device_status {
357 __le16 wLength;
358 __le16 wCode;
359};
360
Colin Crossccebeef2013-11-07 13:08:15 -0800361struct mtp_data_header {
362 /* length of packet, including this header */
363 __le32 length;
364 /* container type (2 for data packet) */
365 __le16 type;
366 /* MTP command code */
367 __le16 command;
368 /* MTP transaction ID */
369 __le32 transaction_id;
370};
371
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -0800372struct mtp_instance {
373 struct usb_function_instance func_inst;
374 const char *name;
375 struct mtp_dev *dev;
Badhri Jagan Sridharan54856462015-10-06 20:32:01 -0700376 char mtp_ext_compat_id[16];
377 struct usb_os_desc mtp_os_desc;
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -0800378};
379
Benoit Goby27d01e52011-12-19 14:37:50 -0800380/* temporary variable used between mtp_open() and mtp_gadget_bind() */
381static struct mtp_dev *_mtp_dev;
382
383static inline struct mtp_dev *func_to_mtp(struct usb_function *f)
384{
385 return container_of(f, struct mtp_dev, function);
386}
387
388static struct usb_request *mtp_request_new(struct usb_ep *ep, int buffer_size)
389{
390 struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
Anson Jacob051584e2016-11-11 01:10:04 -0500391
Benoit Goby27d01e52011-12-19 14:37:50 -0800392 if (!req)
393 return NULL;
394
395 /* now allocate buffers for the requests */
396 req->buf = kmalloc(buffer_size, GFP_KERNEL);
397 if (!req->buf) {
398 usb_ep_free_request(ep, req);
399 return NULL;
400 }
401
402 return req;
403}
404
405static void mtp_request_free(struct usb_request *req, struct usb_ep *ep)
406{
407 if (req) {
408 kfree(req->buf);
409 usb_ep_free_request(ep, req);
410 }
411}
412
413static inline int mtp_lock(atomic_t *excl)
414{
415 if (atomic_inc_return(excl) == 1) {
416 return 0;
417 } else {
418 atomic_dec(excl);
419 return -1;
420 }
421}
422
423static inline void mtp_unlock(atomic_t *excl)
424{
425 atomic_dec(excl);
426}
427
428/* add a request to the tail of a list */
429static void mtp_req_put(struct mtp_dev *dev, struct list_head *head,
430 struct usb_request *req)
431{
432 unsigned long flags;
433
434 spin_lock_irqsave(&dev->lock, flags);
435 list_add_tail(&req->list, head);
436 spin_unlock_irqrestore(&dev->lock, flags);
437}
438
439/* remove a request from the head of a list */
440static struct usb_request
441*mtp_req_get(struct mtp_dev *dev, struct list_head *head)
442{
443 unsigned long flags;
444 struct usb_request *req;
445
446 spin_lock_irqsave(&dev->lock, flags);
447 if (list_empty(head)) {
448 req = 0;
449 } else {
450 req = list_first_entry(head, struct usb_request, list);
451 list_del(&req->list);
452 }
453 spin_unlock_irqrestore(&dev->lock, flags);
454 return req;
455}
456
457static void mtp_complete_in(struct usb_ep *ep, struct usb_request *req)
458{
459 struct mtp_dev *dev = _mtp_dev;
460
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +0530461 if (req->status != 0 && dev->state != STATE_OFFLINE)
Benoit Goby27d01e52011-12-19 14:37:50 -0800462 dev->state = STATE_ERROR;
463
464 mtp_req_put(dev, &dev->tx_idle, req);
465
466 wake_up(&dev->write_wq);
467}
468
469static void mtp_complete_out(struct usb_ep *ep, struct usb_request *req)
470{
471 struct mtp_dev *dev = _mtp_dev;
472
473 dev->rx_done = 1;
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +0530474 if (req->status != 0 && dev->state != STATE_OFFLINE)
Benoit Goby27d01e52011-12-19 14:37:50 -0800475 dev->state = STATE_ERROR;
476
477 wake_up(&dev->read_wq);
478}
479
480static void mtp_complete_intr(struct usb_ep *ep, struct usb_request *req)
481{
482 struct mtp_dev *dev = _mtp_dev;
483
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +0530484 if (req->status != 0 && dev->state != STATE_OFFLINE)
Benoit Goby27d01e52011-12-19 14:37:50 -0800485 dev->state = STATE_ERROR;
486
487 mtp_req_put(dev, &dev->intr_idle, req);
488
489 wake_up(&dev->intr_wq);
490}
491
492static int mtp_create_bulk_endpoints(struct mtp_dev *dev,
493 struct usb_endpoint_descriptor *in_desc,
494 struct usb_endpoint_descriptor *out_desc,
495 struct usb_endpoint_descriptor *intr_desc)
496{
497 struct usb_composite_dev *cdev = dev->cdev;
498 struct usb_request *req;
499 struct usb_ep *ep;
500 int i;
501
Chandana Kishori Chiluveru7f5670a2017-10-28 23:05:45 +0530502 DBG(cdev, "create_bulk_endpoints dev: %pK\n", dev);
Benoit Goby27d01e52011-12-19 14:37:50 -0800503
504 ep = usb_ep_autoconfig(cdev->gadget, in_desc);
505 if (!ep) {
506 DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
507 return -ENODEV;
508 }
509 DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
510 ep->driver_data = dev; /* claim the endpoint */
511 dev->ep_in = ep;
512
513 ep = usb_ep_autoconfig(cdev->gadget, out_desc);
514 if (!ep) {
515 DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
516 return -ENODEV;
517 }
518 DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name);
519 ep->driver_data = dev; /* claim the endpoint */
520 dev->ep_out = ep;
521
Benoit Goby27d01e52011-12-19 14:37:50 -0800522 ep = usb_ep_autoconfig(cdev->gadget, intr_desc);
523 if (!ep) {
524 DBG(cdev, "usb_ep_autoconfig for ep_intr failed\n");
525 return -ENODEV;
526 }
527 DBG(cdev, "usb_ep_autoconfig for mtp ep_intr got %s\n", ep->name);
528 ep->driver_data = dev; /* claim the endpoint */
529 dev->ep_intr = ep;
530
Hemant Kumarc07309f2016-05-01 17:44:56 -0700531retry_tx_alloc:
Benoit Goby27d01e52011-12-19 14:37:50 -0800532 /* now allocate requests for our endpoints */
Hemant Kumarc07309f2016-05-01 17:44:56 -0700533 for (i = 0; i < mtp_tx_reqs; i++) {
534 req = mtp_request_new(dev->ep_in, mtp_tx_req_len);
535 if (!req) {
536 if (mtp_tx_req_len <= MTP_BULK_BUFFER_SIZE)
537 goto fail;
538 while ((req = mtp_req_get(dev, &dev->tx_idle)))
539 mtp_request_free(req, dev->ep_in);
540 mtp_tx_req_len = MTP_BULK_BUFFER_SIZE;
541 mtp_tx_reqs = MTP_TX_REQ_MAX;
542 goto retry_tx_alloc;
543 }
Benoit Goby27d01e52011-12-19 14:37:50 -0800544 req->complete = mtp_complete_in;
545 mtp_req_put(dev, &dev->tx_idle, req);
546 }
Hemant Kumar4aed14e2016-05-01 17:35:36 -0700547
548 /*
549 * The RX buffer should be aligned to EP max packet for
550 * some controllers. At bind time, we don't know the
551 * operational speed. Hence assuming super speed max
552 * packet size.
553 */
554 if (mtp_rx_req_len % 1024)
555 mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;
556
557retry_rx_alloc:
Benoit Goby27d01e52011-12-19 14:37:50 -0800558 for (i = 0; i < RX_REQ_MAX; i++) {
Hemant Kumar4aed14e2016-05-01 17:35:36 -0700559 req = mtp_request_new(dev->ep_out, mtp_rx_req_len);
560 if (!req) {
561 if (mtp_rx_req_len <= MTP_BULK_BUFFER_SIZE)
562 goto fail;
Hemant Kumara1258712016-05-02 10:46:49 -0700563 for (--i; i >= 0; i--)
Hemant Kumar4aed14e2016-05-01 17:35:36 -0700564 mtp_request_free(dev->rx_req[i], dev->ep_out);
565 mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;
566 goto retry_rx_alloc;
567 }
Benoit Goby27d01e52011-12-19 14:37:50 -0800568 req->complete = mtp_complete_out;
569 dev->rx_req[i] = req;
570 }
571 for (i = 0; i < INTR_REQ_MAX; i++) {
572 req = mtp_request_new(dev->ep_intr, INTR_BUFFER_SIZE);
573 if (!req)
574 goto fail;
575 req->complete = mtp_complete_intr;
576 mtp_req_put(dev, &dev->intr_idle, req);
577 }
578
579 return 0;
580
581fail:
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -0800582 pr_err("mtp_bind() could not allocate requests\n");
Benoit Goby27d01e52011-12-19 14:37:50 -0800583 return -1;
584}
585
586static ssize_t mtp_read(struct file *fp, char __user *buf,
587 size_t count, loff_t *pos)
588{
589 struct mtp_dev *dev = fp->private_data;
590 struct usb_composite_dev *cdev = dev->cdev;
591 struct usb_request *req;
Greg Hackmann1b07ec72014-02-24 10:19:13 -0800592 ssize_t r = count;
593 unsigned xfer;
Benoit Goby27d01e52011-12-19 14:37:50 -0800594 int ret = 0;
Jiebing Li3bbd8982015-03-10 11:25:50 +0800595 size_t len = 0;
Benoit Goby27d01e52011-12-19 14:37:50 -0800596
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +0530597 DBG(cdev, "mtp_read(%zu) state:%d\n", count, dev->state);
Benoit Goby27d01e52011-12-19 14:37:50 -0800598
Benoit Goby27d01e52011-12-19 14:37:50 -0800599 /* we will block until we're online */
600 DBG(cdev, "mtp_read: waiting for online state\n");
601 ret = wait_event_interruptible(dev->read_wq,
602 dev->state != STATE_OFFLINE);
603 if (ret < 0) {
604 r = ret;
605 goto done;
606 }
Hemant Kumar63e7d792016-05-02 11:09:00 -0700607
608 len = ALIGN(count, dev->ep_out->maxpacket);
609 if (len > mtp_rx_req_len)
610 return -EINVAL;
611
Benoit Goby27d01e52011-12-19 14:37:50 -0800612 spin_lock_irq(&dev->lock);
Jiebing Li3bbd8982015-03-10 11:25:50 +0800613 if (dev->ep_out->desc) {
614 len = usb_ep_align_maybe(cdev->gadget, dev->ep_out, count);
615 if (len > MTP_BULK_BUFFER_SIZE) {
616 spin_unlock_irq(&dev->lock);
617 return -EINVAL;
618 }
619 }
620
Benoit Goby27d01e52011-12-19 14:37:50 -0800621 if (dev->state == STATE_CANCELED) {
622 /* report cancelation to userspace */
623 dev->state = STATE_READY;
624 spin_unlock_irq(&dev->lock);
625 return -ECANCELED;
626 }
627 dev->state = STATE_BUSY;
628 spin_unlock_irq(&dev->lock);
629
Vijayavardhan Vennapusab28d3e82017-02-16 15:22:14 +0530630 mutex_lock(&dev->read_mutex);
631 if (dev->state == STATE_OFFLINE) {
632 r = -EIO;
633 mutex_unlock(&dev->read_mutex);
634 goto done;
635 }
Benoit Goby27d01e52011-12-19 14:37:50 -0800636requeue_req:
637 /* queue a request */
638 req = dev->rx_req[0];
Hemant Kumar96b01be2016-05-01 17:54:31 -0700639 req->length = len;
Benoit Goby27d01e52011-12-19 14:37:50 -0800640 dev->rx_done = 0;
Vijayavardhan Vennapusab28d3e82017-02-16 15:22:14 +0530641 mutex_unlock(&dev->read_mutex);
Benoit Goby27d01e52011-12-19 14:37:50 -0800642 ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
643 if (ret < 0) {
644 r = -EIO;
645 goto done;
646 } else {
Chandana Kishori Chiluveru7f5670a2017-10-28 23:05:45 +0530647 DBG(cdev, "rx %pK queue\n", req);
Benoit Goby27d01e52011-12-19 14:37:50 -0800648 }
649
650 /* wait for a request to complete */
Hemant Kumar32565c22016-05-01 22:09:34 -0700651 ret = wait_event_interruptible(dev->read_wq,
652 dev->rx_done || dev->state != STATE_BUSY);
653 if (dev->state == STATE_CANCELED) {
654 r = -ECANCELED;
655 if (!dev->rx_done)
656 usb_ep_dequeue(dev->ep_out, req);
657 spin_lock_irq(&dev->lock);
658 dev->state = STATE_CANCELED;
659 spin_unlock_irq(&dev->lock);
660 goto done;
661 }
Benoit Goby27d01e52011-12-19 14:37:50 -0800662 if (ret < 0) {
663 r = ret;
664 usb_ep_dequeue(dev->ep_out, req);
665 goto done;
666 }
Vijayavardhan Vennapusab28d3e82017-02-16 15:22:14 +0530667 mutex_lock(&dev->read_mutex);
Benoit Goby27d01e52011-12-19 14:37:50 -0800668 if (dev->state == STATE_BUSY) {
669 /* If we got a 0-len packet, throw it back and try again. */
670 if (req->actual == 0)
671 goto requeue_req;
672
Chandana Kishori Chiluveru7f5670a2017-10-28 23:05:45 +0530673 DBG(cdev, "rx %pK %d\n", req, req->actual);
Benoit Goby27d01e52011-12-19 14:37:50 -0800674 xfer = (req->actual < count) ? req->actual : count;
675 r = xfer;
676 if (copy_to_user(buf, req->buf, xfer))
677 r = -EFAULT;
678 } else
679 r = -EIO;
680
Vijayavardhan Vennapusab28d3e82017-02-16 15:22:14 +0530681 mutex_unlock(&dev->read_mutex);
Benoit Goby27d01e52011-12-19 14:37:50 -0800682done:
683 spin_lock_irq(&dev->lock);
684 if (dev->state == STATE_CANCELED)
685 r = -ECANCELED;
686 else if (dev->state != STATE_OFFLINE)
687 dev->state = STATE_READY;
688 spin_unlock_irq(&dev->lock);
689
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +0530690 DBG(cdev, "mtp_read returning %zd state:%d\n", r, dev->state);
Benoit Goby27d01e52011-12-19 14:37:50 -0800691 return r;
692}
693
694static ssize_t mtp_write(struct file *fp, const char __user *buf,
695 size_t count, loff_t *pos)
696{
697 struct mtp_dev *dev = fp->private_data;
698 struct usb_composite_dev *cdev = dev->cdev;
699 struct usb_request *req = 0;
Greg Hackmann3a725f42014-02-24 10:19:13 -0800700 ssize_t r = count;
701 unsigned xfer;
Benoit Goby27d01e52011-12-19 14:37:50 -0800702 int sendZLP = 0;
703 int ret;
704
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +0530705 DBG(cdev, "mtp_write(%zu) state:%d\n", count, dev->state);
Benoit Goby27d01e52011-12-19 14:37:50 -0800706
707 spin_lock_irq(&dev->lock);
708 if (dev->state == STATE_CANCELED) {
709 /* report cancelation to userspace */
710 dev->state = STATE_READY;
711 spin_unlock_irq(&dev->lock);
712 return -ECANCELED;
713 }
714 if (dev->state == STATE_OFFLINE) {
715 spin_unlock_irq(&dev->lock);
716 return -ENODEV;
717 }
718 dev->state = STATE_BUSY;
719 spin_unlock_irq(&dev->lock);
720
721 /* we need to send a zero length packet to signal the end of transfer
722 * if the transfer size is aligned to a packet boundary.
723 */
724 if ((count & (dev->ep_in->maxpacket - 1)) == 0)
725 sendZLP = 1;
726
727 while (count > 0 || sendZLP) {
728 /* so we exit after sending ZLP */
729 if (count == 0)
730 sendZLP = 0;
731
732 if (dev->state != STATE_BUSY) {
733 DBG(cdev, "mtp_write dev->error\n");
734 r = -EIO;
735 break;
736 }
737
738 /* get an idle tx request to use */
739 req = 0;
740 ret = wait_event_interruptible(dev->write_wq,
741 ((req = mtp_req_get(dev, &dev->tx_idle))
742 || dev->state != STATE_BUSY));
743 if (!req) {
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +0530744 DBG(cdev, "mtp_write request NULL ret:%d state:%d\n",
745 ret, dev->state);
Benoit Goby27d01e52011-12-19 14:37:50 -0800746 r = ret;
747 break;
748 }
749
Hemant Kumar28dd7c42016-12-29 15:47:31 -0800750 if (count > mtp_tx_req_len)
751 xfer = mtp_tx_req_len;
Benoit Goby27d01e52011-12-19 14:37:50 -0800752 else
753 xfer = count;
754 if (xfer && copy_from_user(req->buf, buf, xfer)) {
755 r = -EFAULT;
756 break;
757 }
758
759 req->length = xfer;
760 ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
761 if (ret < 0) {
762 DBG(cdev, "mtp_write: xfer error %d\n", ret);
763 r = -EIO;
764 break;
765 }
766
767 buf += xfer;
768 count -= xfer;
769
770 /* zero this so we don't try to free it on error exit */
771 req = 0;
772 }
773
774 if (req)
775 mtp_req_put(dev, &dev->tx_idle, req);
776
777 spin_lock_irq(&dev->lock);
778 if (dev->state == STATE_CANCELED)
779 r = -ECANCELED;
780 else if (dev->state != STATE_OFFLINE)
781 dev->state = STATE_READY;
782 spin_unlock_irq(&dev->lock);
783
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +0530784 DBG(cdev, "mtp_write returning %zd state:%d\n", r, dev->state);
Benoit Goby27d01e52011-12-19 14:37:50 -0800785 return r;
786}
787
788/* read from a local file and write to USB */
789static void send_file_work(struct work_struct *data)
790{
791 struct mtp_dev *dev = container_of(data, struct mtp_dev,
792 send_file_work);
793 struct usb_composite_dev *cdev = dev->cdev;
794 struct usb_request *req = 0;
795 struct mtp_data_header *header;
796 struct file *filp;
797 loff_t offset;
798 int64_t count;
799 int xfer, ret, hdr_size;
800 int r = 0;
801 int sendZLP = 0;
Hemant Kumarfc2b8f02016-05-02 11:18:48 -0700802 ktime_t start_time;
Benoit Goby27d01e52011-12-19 14:37:50 -0800803
804 /* read our parameters */
805 smp_rmb();
806 filp = dev->xfer_file;
807 offset = dev->xfer_file_offset;
808 count = dev->xfer_file_length;
809
810 DBG(cdev, "send_file_work(%lld %lld)\n", offset, count);
811
812 if (dev->xfer_send_header) {
813 hdr_size = sizeof(struct mtp_data_header);
814 count += hdr_size;
815 } else {
816 hdr_size = 0;
817 }
818
819 /* we need to send a zero length packet to signal the end of transfer
820 * if the transfer size is aligned to a packet boundary.
821 */
822 if ((count & (dev->ep_in->maxpacket - 1)) == 0)
823 sendZLP = 1;
824
825 while (count > 0 || sendZLP) {
826 /* so we exit after sending ZLP */
827 if (count == 0)
828 sendZLP = 0;
829
830 /* get an idle tx request to use */
831 req = 0;
832 ret = wait_event_interruptible(dev->write_wq,
833 (req = mtp_req_get(dev, &dev->tx_idle))
834 || dev->state != STATE_BUSY);
835 if (dev->state == STATE_CANCELED) {
836 r = -ECANCELED;
837 break;
838 }
839 if (!req) {
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +0530840 DBG(cdev,
841 "send_file_work request NULL ret:%d state:%d\n",
842 ret, dev->state);
Benoit Goby27d01e52011-12-19 14:37:50 -0800843 r = ret;
844 break;
845 }
846
Hemant Kumar28dd7c42016-12-29 15:47:31 -0800847 if (count > mtp_tx_req_len)
848 xfer = mtp_tx_req_len;
Benoit Goby27d01e52011-12-19 14:37:50 -0800849 else
850 xfer = count;
851
852 if (hdr_size) {
853 /* prepend MTP data header */
854 header = (struct mtp_data_header *)req->buf;
Witold Sciuk6db5c3e2016-02-13 11:08:37 +0100855 /*
856 * set file size with header according to
857 * MTP Specification v1.0
858 */
859 header->length = (count > MTP_MAX_FILE_SIZE) ?
860 MTP_MAX_FILE_SIZE : __cpu_to_le32(count);
Benoit Goby27d01e52011-12-19 14:37:50 -0800861 header->type = __cpu_to_le16(2); /* data packet */
862 header->command = __cpu_to_le16(dev->xfer_command);
863 header->transaction_id =
864 __cpu_to_le32(dev->xfer_transaction_id);
865 }
Hemant Kumarfc2b8f02016-05-02 11:18:48 -0700866 start_time = ktime_get();
Benoit Goby27d01e52011-12-19 14:37:50 -0800867 ret = vfs_read(filp, req->buf + hdr_size, xfer - hdr_size,
868 &offset);
869 if (ret < 0) {
870 r = ret;
871 break;
872 }
Hemant Kumarfc2b8f02016-05-02 11:18:48 -0700873
Benoit Goby27d01e52011-12-19 14:37:50 -0800874 xfer = ret + hdr_size;
Hemant Kumarfc2b8f02016-05-02 11:18:48 -0700875 dev->perf[dev->dbg_read_index].vfs_rtime =
876 ktime_to_us(ktime_sub(ktime_get(), start_time));
877 dev->perf[dev->dbg_read_index].vfs_rbytes = xfer;
878 dev->dbg_read_index = (dev->dbg_read_index + 1) % MAX_ITERATION;
Benoit Goby27d01e52011-12-19 14:37:50 -0800879 hdr_size = 0;
880
881 req->length = xfer;
882 ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
883 if (ret < 0) {
884 DBG(cdev, "send_file_work: xfer error %d\n", ret);
Hemant Kumar9bcbf7b2016-05-01 22:15:04 -0700885 if (dev->state != STATE_OFFLINE)
886 dev->state = STATE_ERROR;
Benoit Goby27d01e52011-12-19 14:37:50 -0800887 r = -EIO;
888 break;
889 }
890
891 count -= xfer;
892
893 /* zero this so we don't try to free it on error exit */
894 req = 0;
895 }
896
897 if (req)
898 mtp_req_put(dev, &dev->tx_idle, req);
899
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +0530900 DBG(cdev, "send_file_work returning %d state:%d\n", r, dev->state);
Benoit Goby27d01e52011-12-19 14:37:50 -0800901 /* write the result */
902 dev->xfer_result = r;
903 smp_wmb();
904}
905
906/* read from USB and write to a local file */
907static void receive_file_work(struct work_struct *data)
908{
909 struct mtp_dev *dev = container_of(data, struct mtp_dev,
910 receive_file_work);
911 struct usb_composite_dev *cdev = dev->cdev;
912 struct usb_request *read_req = NULL, *write_req = NULL;
913 struct file *filp;
914 loff_t offset;
915 int64_t count;
916 int ret, cur_buf = 0;
917 int r = 0;
Hemant Kumarfc2b8f02016-05-02 11:18:48 -0700918 ktime_t start_time;
Benoit Goby27d01e52011-12-19 14:37:50 -0800919
920 /* read our parameters */
921 smp_rmb();
922 filp = dev->xfer_file;
923 offset = dev->xfer_file_offset;
924 count = dev->xfer_file_length;
925
926 DBG(cdev, "receive_file_work(%lld)\n", count);
Manu Gautam0caf9082012-10-03 18:49:33 +0530927 if (!IS_ALIGNED(count, dev->ep_out->maxpacket))
928 DBG(cdev, "%s- count(%lld) not multiple of mtu(%d)\n", __func__,
929 count, dev->ep_out->maxpacket);
Benoit Goby27d01e52011-12-19 14:37:50 -0800930
931 while (count > 0 || write_req) {
932 if (count > 0) {
Vijayavardhan Vennapusab28d3e82017-02-16 15:22:14 +0530933 mutex_lock(&dev->read_mutex);
934 if (dev->state == STATE_OFFLINE) {
935 r = -EIO;
936 mutex_unlock(&dev->read_mutex);
937 break;
938 }
Benoit Goby27d01e52011-12-19 14:37:50 -0800939 /* queue a request */
940 read_req = dev->rx_req[cur_buf];
941 cur_buf = (cur_buf + 1) % RX_REQ_MAX;
942
Manu Gautam0caf9082012-10-03 18:49:33 +0530943 /* some h/w expects size to be aligned to ep's MTU */
Hemant Kumar4aed14e2016-05-01 17:35:36 -0700944 read_req->length = mtp_rx_req_len;
Manu Gautam0caf9082012-10-03 18:49:33 +0530945
Benoit Goby27d01e52011-12-19 14:37:50 -0800946 dev->rx_done = 0;
Vijayavardhan Vennapusab28d3e82017-02-16 15:22:14 +0530947 mutex_unlock(&dev->read_mutex);
Benoit Goby27d01e52011-12-19 14:37:50 -0800948 ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL);
949 if (ret < 0) {
950 r = -EIO;
Hemant Kumar9bcbf7b2016-05-01 22:15:04 -0700951 if (dev->state != STATE_OFFLINE)
952 dev->state = STATE_ERROR;
Benoit Goby27d01e52011-12-19 14:37:50 -0800953 break;
954 }
955 }
956
957 if (write_req) {
Chandana Kishori Chiluveru7f5670a2017-10-28 23:05:45 +0530958 DBG(cdev, "rx %pK %d\n", write_req, write_req->actual);
Hemant Kumarfc2b8f02016-05-02 11:18:48 -0700959 start_time = ktime_get();
Vijayavardhan Vennapusab28d3e82017-02-16 15:22:14 +0530960 mutex_lock(&dev->read_mutex);
961 if (dev->state == STATE_OFFLINE) {
962 r = -EIO;
963 mutex_unlock(&dev->read_mutex);
964 break;
965 }
Benoit Goby27d01e52011-12-19 14:37:50 -0800966 ret = vfs_write(filp, write_req->buf, write_req->actual,
967 &offset);
968 DBG(cdev, "vfs_write %d\n", ret);
969 if (ret != write_req->actual) {
970 r = -EIO;
Vijayavardhan Vennapusab28d3e82017-02-16 15:22:14 +0530971 mutex_unlock(&dev->read_mutex);
Hemant Kumar9bcbf7b2016-05-01 22:15:04 -0700972 if (dev->state != STATE_OFFLINE)
973 dev->state = STATE_ERROR;
Benoit Goby27d01e52011-12-19 14:37:50 -0800974 break;
975 }
Vijayavardhan Vennapusab28d3e82017-02-16 15:22:14 +0530976 mutex_unlock(&dev->read_mutex);
Hemant Kumarfc2b8f02016-05-02 11:18:48 -0700977 dev->perf[dev->dbg_write_index].vfs_wtime =
978 ktime_to_us(ktime_sub(ktime_get(), start_time));
979 dev->perf[dev->dbg_write_index].vfs_wbytes = ret;
980 dev->dbg_write_index =
981 (dev->dbg_write_index + 1) % MAX_ITERATION;
Benoit Goby27d01e52011-12-19 14:37:50 -0800982 write_req = NULL;
983 }
984
985 if (read_req) {
986 /* wait for our last read to complete */
987 ret = wait_event_interruptible(dev->read_wq,
988 dev->rx_done || dev->state != STATE_BUSY);
Hemant Kumar1597fd62016-05-01 22:22:35 -0700989 if (dev->state == STATE_CANCELED
990 || dev->state == STATE_OFFLINE) {
Hemant Kumare3fe6742016-05-02 10:50:35 -0700991 if (dev->state == STATE_OFFLINE)
992 r = -EIO;
993 else
994 r = -ECANCELED;
Benoit Goby27d01e52011-12-19 14:37:50 -0800995 if (!dev->rx_done)
996 usb_ep_dequeue(dev->ep_out, read_req);
997 break;
998 }
Manu Gautam0caf9082012-10-03 18:49:33 +0530999
Jiebing Lidf5d3202015-03-10 11:27:10 +08001000 if (read_req->status) {
1001 r = read_req->status;
1002 break;
1003 }
Vijayavardhan Vennapusab28d3e82017-02-16 15:22:14 +05301004
1005 mutex_lock(&dev->read_mutex);
1006 if (dev->state == STATE_OFFLINE) {
1007 r = -EIO;
1008 mutex_unlock(&dev->read_mutex);
1009 break;
1010 }
Manu Gautam0caf9082012-10-03 18:49:33 +05301011 /* Check if we aligned the size due to MTU constraint */
1012 if (count < read_req->length)
1013 read_req->actual = (read_req->actual > count ?
1014 count : read_req->actual);
Benoit Goby27d01e52011-12-19 14:37:50 -08001015 /* if xfer_file_length is 0xFFFFFFFF, then we read until
1016 * we get a zero length packet
1017 */
1018 if (count != 0xFFFFFFFF)
1019 count -= read_req->actual;
1020 if (read_req->actual < read_req->length) {
1021 /*
1022 * short packet is used to signal EOF for
1023 * sizes > 4 gig
1024 */
1025 DBG(cdev, "got short packet\n");
1026 count = 0;
1027 }
1028
1029 write_req = read_req;
1030 read_req = NULL;
Vijayavardhan Vennapusab28d3e82017-02-16 15:22:14 +05301031 mutex_unlock(&dev->read_mutex);
Benoit Goby27d01e52011-12-19 14:37:50 -08001032 }
1033 }
1034
1035 DBG(cdev, "receive_file_work returning %d\n", r);
1036 /* write the result */
1037 dev->xfer_result = r;
1038 smp_wmb();
1039}
1040
1041static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event)
1042{
1043 struct usb_request *req = NULL;
1044 int ret;
1045 int length = event->length;
1046
Greg Hackmann3a725f42014-02-24 10:19:13 -08001047 DBG(dev->cdev, "mtp_send_event(%zu)\n", event->length);
Benoit Goby27d01e52011-12-19 14:37:50 -08001048
1049 if (length < 0 || length > INTR_BUFFER_SIZE)
1050 return -EINVAL;
1051 if (dev->state == STATE_OFFLINE)
1052 return -ENODEV;
1053
1054 ret = wait_event_interruptible_timeout(dev->intr_wq,
1055 (req = mtp_req_get(dev, &dev->intr_idle)),
1056 msecs_to_jiffies(1000));
1057 if (!req)
1058 return -ETIME;
1059
1060 if (copy_from_user(req->buf, (void __user *)event->data, length)) {
1061 mtp_req_put(dev, &dev->intr_idle, req);
1062 return -EFAULT;
1063 }
1064 req->length = length;
1065 ret = usb_ep_queue(dev->ep_intr, req, GFP_KERNEL);
1066 if (ret)
1067 mtp_req_put(dev, &dev->intr_idle, req);
1068
1069 return ret;
1070}
1071
Hemant Kumar83d6a262016-05-02 10:57:12 -07001072static long mtp_send_receive_ioctl(struct file *fp, unsigned int code,
1073 struct mtp_file_range *mfr)
Benoit Goby27d01e52011-12-19 14:37:50 -08001074{
1075 struct mtp_dev *dev = fp->private_data;
1076 struct file *filp = NULL;
Hemant Kumar83d6a262016-05-02 10:57:12 -07001077 struct work_struct *work;
Benoit Goby27d01e52011-12-19 14:37:50 -08001078 int ret = -EINVAL;
1079
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +05301080 if (mtp_lock(&dev->ioctl_excl)) {
1081 DBG(dev->cdev, "ioctl returning EBUSY state:%d\n", dev->state);
Benoit Goby27d01e52011-12-19 14:37:50 -08001082 return -EBUSY;
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +05301083 }
Benoit Goby27d01e52011-12-19 14:37:50 -08001084
Hemant Kumar83d6a262016-05-02 10:57:12 -07001085 spin_lock_irq(&dev->lock);
1086 if (dev->state == STATE_CANCELED) {
1087 /* report cancellation to userspace */
1088 dev->state = STATE_READY;
Benoit Goby27d01e52011-12-19 14:37:50 -08001089 spin_unlock_irq(&dev->lock);
Hemant Kumar83d6a262016-05-02 10:57:12 -07001090 ret = -ECANCELED;
Benoit Goby27d01e52011-12-19 14:37:50 -08001091 goto out;
1092 }
Hemant Kumar83d6a262016-05-02 10:57:12 -07001093 if (dev->state == STATE_OFFLINE) {
1094 spin_unlock_irq(&dev->lock);
1095 ret = -ENODEV;
1096 goto out;
Benoit Goby27d01e52011-12-19 14:37:50 -08001097 }
Hemant Kumar83d6a262016-05-02 10:57:12 -07001098 dev->state = STATE_BUSY;
1099 spin_unlock_irq(&dev->lock);
1100
1101 /* hold a reference to the file while we are working with it */
1102 filp = fget(mfr->fd);
1103 if (!filp) {
1104 ret = -EBADF;
1105 goto fail;
1106 }
1107
1108 /* write the parameters */
1109 dev->xfer_file = filp;
1110 dev->xfer_file_offset = mfr->offset;
1111 dev->xfer_file_length = mfr->length;
1112 /* make sure write is done before parameters are read */
1113 smp_wmb();
1114
1115 if (code == MTP_SEND_FILE_WITH_HEADER) {
1116 work = &dev->send_file_work;
1117 dev->xfer_send_header = 1;
1118 dev->xfer_command = mfr->command;
1119 dev->xfer_transaction_id = mfr->transaction_id;
1120 } else if (code == MTP_SEND_FILE) {
1121 work = &dev->send_file_work;
1122 dev->xfer_send_header = 0;
1123 } else {
1124 work = &dev->receive_file_work;
1125 }
1126
1127 /* We do the file transfer on a work queue so it will run
1128 * in kernel context, which is necessary for vfs_read and
1129 * vfs_write to use our buffers in the kernel address space.
1130 */
1131 queue_work(dev->wq, work);
1132 /* wait for operation to complete */
1133 flush_workqueue(dev->wq);
1134 fput(filp);
1135
1136 /* read the result */
1137 smp_rmb();
1138 ret = dev->xfer_result;
Benoit Goby27d01e52011-12-19 14:37:50 -08001139
1140fail:
1141 spin_lock_irq(&dev->lock);
1142 if (dev->state == STATE_CANCELED)
1143 ret = -ECANCELED;
1144 else if (dev->state != STATE_OFFLINE)
1145 dev->state = STATE_READY;
1146 spin_unlock_irq(&dev->lock);
1147out:
1148 mtp_unlock(&dev->ioctl_excl);
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +05301149 DBG(dev->cdev, "ioctl returning %d state:%d\n", ret, dev->state);
Benoit Goby27d01e52011-12-19 14:37:50 -08001150 return ret;
1151}
1152
Hemant Kumar83d6a262016-05-02 10:57:12 -07001153static long mtp_ioctl(struct file *fp, unsigned int code, unsigned long value)
1154{
1155 struct mtp_dev *dev = fp->private_data;
1156 struct mtp_file_range mfr;
1157 struct mtp_event event;
1158 int ret = -EINVAL;
1159
1160 switch (code) {
1161 case MTP_SEND_FILE:
1162 case MTP_RECEIVE_FILE:
1163 case MTP_SEND_FILE_WITH_HEADER:
1164 if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) {
1165 ret = -EFAULT;
1166 goto fail;
1167 }
1168 ret = mtp_send_receive_ioctl(fp, code, &mfr);
1169 break;
1170 case MTP_SEND_EVENT:
1171 if (mtp_lock(&dev->ioctl_excl))
1172 return -EBUSY;
1173 /* return here so we don't change dev->state below,
1174 * which would interfere with bulk transfer state.
1175 */
1176 if (copy_from_user(&event, (void __user *)value, sizeof(event)))
1177 ret = -EFAULT;
1178 else
1179 ret = mtp_send_event(dev, &event);
1180 mtp_unlock(&dev->ioctl_excl);
1181 break;
1182 default:
1183 DBG(dev->cdev, "unknown ioctl code: %d\n", code);
1184 }
1185fail:
1186 return ret;
1187}
1188
1189/*
1190 * 32 bit userspace calling into 64 bit kernel. handle ioctl code
1191 * and userspace pointer
1192 */
1193#ifdef CONFIG_COMPAT
1194static long compat_mtp_ioctl(struct file *fp, unsigned int code,
1195 unsigned long value)
1196{
1197 struct mtp_dev *dev = fp->private_data;
1198 struct mtp_file_range mfr;
1199 struct __compat_mtp_file_range cmfr;
1200 struct mtp_event event;
1201 struct __compat_mtp_event cevent;
1202 unsigned int cmd;
1203 bool send_file = false;
1204 int ret = -EINVAL;
1205
1206 switch (code) {
1207 case COMPAT_MTP_SEND_FILE:
1208 cmd = MTP_SEND_FILE;
1209 send_file = true;
1210 break;
1211 case COMPAT_MTP_RECEIVE_FILE:
1212 cmd = MTP_RECEIVE_FILE;
1213 send_file = true;
1214 break;
1215 case COMPAT_MTP_SEND_FILE_WITH_HEADER:
1216 cmd = MTP_SEND_FILE_WITH_HEADER;
1217 send_file = true;
1218 break;
1219 case COMPAT_MTP_SEND_EVENT:
1220 cmd = MTP_SEND_EVENT;
1221 break;
1222 default:
1223 DBG(dev->cdev, "unknown compat_ioctl code: %d\n", code);
1224 ret = -ENOIOCTLCMD;
1225 goto fail;
1226 }
1227
1228 if (send_file) {
1229 if (copy_from_user(&cmfr, (void __user *)value, sizeof(cmfr))) {
1230 ret = -EFAULT;
1231 goto fail;
1232 }
1233 mfr.fd = cmfr.fd;
1234 mfr.offset = cmfr.offset;
1235 mfr.length = cmfr.length;
1236 mfr.command = cmfr.command;
1237 mfr.transaction_id = cmfr.transaction_id;
1238 ret = mtp_send_receive_ioctl(fp, cmd, &mfr);
1239 } else {
1240 if (mtp_lock(&dev->ioctl_excl))
1241 return -EBUSY;
1242 /* return here so we don't change dev->state below,
1243 * which would interfere with bulk transfer state.
1244 */
1245 if (copy_from_user(&cevent, (void __user *)value,
1246 sizeof(cevent))) {
1247 ret = -EFAULT;
1248 goto fail;
1249 }
1250 event.length = cevent.length;
1251 event.data = compat_ptr(cevent.data);
1252 ret = mtp_send_event(dev, &event);
1253 mtp_unlock(&dev->ioctl_excl);
1254 }
1255fail:
1256 return ret;
1257}
1258#endif
1259
Benoit Goby27d01e52011-12-19 14:37:50 -08001260static int mtp_open(struct inode *ip, struct file *fp)
1261{
1262 printk(KERN_INFO "mtp_open\n");
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +05301263 if (mtp_lock(&_mtp_dev->open_excl)) {
1264 pr_err("%s mtp_release not called returning EBUSY\n", __func__);
Benoit Goby27d01e52011-12-19 14:37:50 -08001265 return -EBUSY;
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +05301266 }
Benoit Goby27d01e52011-12-19 14:37:50 -08001267
1268 /* clear any error condition */
1269 if (_mtp_dev->state != STATE_OFFLINE)
1270 _mtp_dev->state = STATE_READY;
1271
1272 fp->private_data = _mtp_dev;
1273 return 0;
1274}
1275
1276static int mtp_release(struct inode *ip, struct file *fp)
1277{
1278 printk(KERN_INFO "mtp_release\n");
1279
1280 mtp_unlock(&_mtp_dev->open_excl);
1281 return 0;
1282}
1283
1284/* file operations for /dev/mtp_usb */
1285static const struct file_operations mtp_fops = {
1286 .owner = THIS_MODULE,
1287 .read = mtp_read,
1288 .write = mtp_write,
1289 .unlocked_ioctl = mtp_ioctl,
Hemant Kumar83d6a262016-05-02 10:57:12 -07001290#ifdef CONFIG_COMPAT
1291 .compat_ioctl = compat_mtp_ioctl,
1292#endif
Benoit Goby27d01e52011-12-19 14:37:50 -08001293 .open = mtp_open,
1294 .release = mtp_release,
1295};
1296
1297static struct miscdevice mtp_device = {
1298 .minor = MISC_DYNAMIC_MINOR,
1299 .name = mtp_shortname,
1300 .fops = &mtp_fops,
1301};
1302
1303static int mtp_ctrlrequest(struct usb_composite_dev *cdev,
1304 const struct usb_ctrlrequest *ctrl)
1305{
1306 struct mtp_dev *dev = _mtp_dev;
1307 int value = -EOPNOTSUPP;
1308 u16 w_index = le16_to_cpu(ctrl->wIndex);
1309 u16 w_value = le16_to_cpu(ctrl->wValue);
1310 u16 w_length = le16_to_cpu(ctrl->wLength);
1311 unsigned long flags;
1312
1313 VDBG(cdev, "mtp_ctrlrequest "
1314 "%02x.%02x v%04x i%04x l%u\n",
1315 ctrl->bRequestType, ctrl->bRequest,
1316 w_value, w_index, w_length);
1317
1318 /* Handle MTP OS string */
1319 if (ctrl->bRequestType ==
1320 (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE)
1321 && ctrl->bRequest == USB_REQ_GET_DESCRIPTOR
1322 && (w_value >> 8) == USB_DT_STRING
1323 && (w_value & 0xFF) == MTP_OS_STRING_ID) {
1324 value = (w_length < sizeof(mtp_os_string)
1325 ? w_length : sizeof(mtp_os_string));
1326 memcpy(cdev->req->buf, mtp_os_string, value);
1327 } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) {
1328 /* Handle MTP OS descriptor */
1329 DBG(cdev, "vendor request: %d index: %d value: %d length: %d\n",
1330 ctrl->bRequest, w_index, w_value, w_length);
1331
1332 if (ctrl->bRequest == 1
1333 && (ctrl->bRequestType & USB_DIR_IN)
1334 && (w_index == 4 || w_index == 5)) {
1335 value = (w_length < sizeof(mtp_ext_config_desc) ?
1336 w_length : sizeof(mtp_ext_config_desc));
1337 memcpy(cdev->req->buf, &mtp_ext_config_desc, value);
Jack Pham50439242017-02-07 11:48:25 -08001338
1339 /* update compatibleID if PTP */
1340 if (dev->function.fs_descriptors == fs_ptp_descs) {
1341 struct mtp_ext_config_desc *d = cdev->req->buf;
1342
1343 d->function.compatibleID[0] = 'P';
1344 }
Benoit Goby27d01e52011-12-19 14:37:50 -08001345 }
1346 } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
1347 DBG(cdev, "class request: %d index: %d value: %d length: %d\n",
1348 ctrl->bRequest, w_index, w_value, w_length);
1349
1350 if (ctrl->bRequest == MTP_REQ_CANCEL && w_index == 0
1351 && w_value == 0) {
1352 DBG(cdev, "MTP_REQ_CANCEL\n");
1353
1354 spin_lock_irqsave(&dev->lock, flags);
1355 if (dev->state == STATE_BUSY) {
1356 dev->state = STATE_CANCELED;
1357 wake_up(&dev->read_wq);
1358 wake_up(&dev->write_wq);
1359 }
1360 spin_unlock_irqrestore(&dev->lock, flags);
1361
1362 /* We need to queue a request to read the remaining
1363 * bytes, but we don't actually need to look at
1364 * the contents.
1365 */
1366 value = w_length;
1367 } else if (ctrl->bRequest == MTP_REQ_GET_DEVICE_STATUS
1368 && w_index == 0 && w_value == 0) {
1369 struct mtp_device_status *status = cdev->req->buf;
Anson Jacob051584e2016-11-11 01:10:04 -05001370
Benoit Goby27d01e52011-12-19 14:37:50 -08001371 status->wLength =
1372 __constant_cpu_to_le16(sizeof(*status));
1373
1374 DBG(cdev, "MTP_REQ_GET_DEVICE_STATUS\n");
1375 spin_lock_irqsave(&dev->lock, flags);
1376 /* device status is "busy" until we report
1377 * the cancelation to userspace
1378 */
1379 if (dev->state == STATE_CANCELED)
1380 status->wCode =
1381 __cpu_to_le16(MTP_RESPONSE_DEVICE_BUSY);
1382 else
1383 status->wCode =
1384 __cpu_to_le16(MTP_RESPONSE_OK);
1385 spin_unlock_irqrestore(&dev->lock, flags);
1386 value = sizeof(*status);
1387 }
1388 }
1389
1390 /* respond with data transfer or status phase? */
1391 if (value >= 0) {
1392 int rc;
Anson Jacob051584e2016-11-11 01:10:04 -05001393
Benoit Goby27d01e52011-12-19 14:37:50 -08001394 cdev->req->zero = value < w_length;
1395 cdev->req->length = value;
1396 rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
1397 if (rc < 0)
1398 ERROR(cdev, "%s: response queue error\n", __func__);
1399 }
1400 return value;
1401}
1402
1403static int
1404mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
1405{
1406 struct usb_composite_dev *cdev = c->cdev;
1407 struct mtp_dev *dev = func_to_mtp(f);
1408 int id;
1409 int ret;
Badhri Jagan Sridharan54856462015-10-06 20:32:01 -07001410 struct mtp_instance *fi_mtp;
Benoit Goby27d01e52011-12-19 14:37:50 -08001411
1412 dev->cdev = cdev;
Chandana Kishori Chiluveru7f5670a2017-10-28 23:05:45 +05301413 DBG(cdev, "mtp_function_bind dev: %pK\n", dev);
Benoit Goby27d01e52011-12-19 14:37:50 -08001414
1415 /* allocate interface ID(s) */
1416 id = usb_interface_id(c, f);
1417 if (id < 0)
1418 return id;
1419 mtp_interface_desc.bInterfaceNumber = id;
1420
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001421 if (mtp_string_defs[INTERFACE_STRING_INDEX].id == 0) {
1422 ret = usb_string_id(c->cdev);
1423 if (ret < 0)
1424 return ret;
1425 mtp_string_defs[INTERFACE_STRING_INDEX].id = ret;
1426 mtp_interface_desc.iInterface = ret;
1427 }
Badhri Jagan Sridharan54856462015-10-06 20:32:01 -07001428
1429 fi_mtp = container_of(f->fi, struct mtp_instance, func_inst);
1430
1431 if (cdev->use_os_string) {
1432 f->os_desc_table = kzalloc(sizeof(*f->os_desc_table),
1433 GFP_KERNEL);
1434 if (!f->os_desc_table)
1435 return -ENOMEM;
1436 f->os_desc_n = 1;
1437 f->os_desc_table[0].os_desc = &fi_mtp->mtp_os_desc;
1438 }
1439
Benoit Goby27d01e52011-12-19 14:37:50 -08001440 /* allocate endpoints */
1441 ret = mtp_create_bulk_endpoints(dev, &mtp_fullspeed_in_desc,
1442 &mtp_fullspeed_out_desc, &mtp_intr_desc);
1443 if (ret)
1444 return ret;
1445
1446 /* support high speed hardware */
1447 if (gadget_is_dualspeed(c->cdev->gadget)) {
1448 mtp_highspeed_in_desc.bEndpointAddress =
1449 mtp_fullspeed_in_desc.bEndpointAddress;
1450 mtp_highspeed_out_desc.bEndpointAddress =
1451 mtp_fullspeed_out_desc.bEndpointAddress;
1452 }
Mark Kuo1b61b272015-08-20 13:01:46 +08001453 /* support super speed hardware */
1454 if (gadget_is_superspeed(c->cdev->gadget)) {
1455 unsigned max_burst;
1456
1457 /* Calculate bMaxBurst, we know packet size is 1024 */
1458 max_burst = min_t(unsigned, MTP_BULK_BUFFER_SIZE / 1024, 15);
1459 mtp_ss_in_desc.bEndpointAddress =
1460 mtp_fullspeed_in_desc.bEndpointAddress;
1461 mtp_ss_in_comp_desc.bMaxBurst = max_burst;
1462 mtp_ss_out_desc.bEndpointAddress =
1463 mtp_fullspeed_out_desc.bEndpointAddress;
1464 mtp_ss_out_comp_desc.bMaxBurst = max_burst;
1465 }
Benoit Goby27d01e52011-12-19 14:37:50 -08001466
Pratham Pratapd88fbcd2017-12-06 16:47:10 +05301467 fi_mtp->func_inst.f = &dev->function;
Benoit Goby27d01e52011-12-19 14:37:50 -08001468 DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
Mark Kuo1b61b272015-08-20 13:01:46 +08001469 gadget_is_superspeed(c->cdev->gadget) ? "super" :
1470 (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full"),
1471 f->name, dev->ep_in->name, dev->ep_out->name);
Benoit Goby27d01e52011-12-19 14:37:50 -08001472 return 0;
1473}
1474
1475static void
1476mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)
1477{
1478 struct mtp_dev *dev = func_to_mtp(f);
Pratham Pratapd88fbcd2017-12-06 16:47:10 +05301479 struct mtp_instance *fi_mtp;
Benoit Goby27d01e52011-12-19 14:37:50 -08001480 struct usb_request *req;
1481 int i;
Pratham Pratapd88fbcd2017-12-06 16:47:10 +05301482 fi_mtp = container_of(f->fi, struct mtp_instance, func_inst);
Mayank Rana9ce075b2017-03-28 15:17:03 -07001483 mtp_string_defs[INTERFACE_STRING_INDEX].id = 0;
Vijayavardhan Vennapusab28d3e82017-02-16 15:22:14 +05301484 mutex_lock(&dev->read_mutex);
Benoit Goby27d01e52011-12-19 14:37:50 -08001485 while ((req = mtp_req_get(dev, &dev->tx_idle)))
1486 mtp_request_free(req, dev->ep_in);
1487 for (i = 0; i < RX_REQ_MAX; i++)
1488 mtp_request_free(dev->rx_req[i], dev->ep_out);
1489 while ((req = mtp_req_get(dev, &dev->intr_idle)))
1490 mtp_request_free(req, dev->ep_intr);
Vijayavardhan Vennapusab28d3e82017-02-16 15:22:14 +05301491 mutex_unlock(&dev->read_mutex);
Benoit Goby27d01e52011-12-19 14:37:50 -08001492 dev->state = STATE_OFFLINE;
Badhri Jagan Sridharan54856462015-10-06 20:32:01 -07001493 kfree(f->os_desc_table);
1494 f->os_desc_n = 0;
Pratham Pratapd88fbcd2017-12-06 16:47:10 +05301495 fi_mtp->func_inst.f = NULL;
Benoit Goby27d01e52011-12-19 14:37:50 -08001496}
1497
1498static int mtp_function_set_alt(struct usb_function *f,
1499 unsigned intf, unsigned alt)
1500{
1501 struct mtp_dev *dev = func_to_mtp(f);
1502 struct usb_composite_dev *cdev = f->config->cdev;
1503 int ret;
1504
1505 DBG(cdev, "mtp_function_set_alt intf: %d alt: %d\n", intf, alt);
1506
1507 ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
1508 if (ret)
1509 return ret;
1510
1511 ret = usb_ep_enable(dev->ep_in);
1512 if (ret)
1513 return ret;
1514
1515 ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out);
1516 if (ret)
1517 return ret;
1518
1519 ret = usb_ep_enable(dev->ep_out);
1520 if (ret) {
1521 usb_ep_disable(dev->ep_in);
1522 return ret;
1523 }
1524
1525 ret = config_ep_by_speed(cdev->gadget, f, dev->ep_intr);
1526 if (ret)
1527 return ret;
1528
1529 ret = usb_ep_enable(dev->ep_intr);
1530 if (ret) {
1531 usb_ep_disable(dev->ep_out);
1532 usb_ep_disable(dev->ep_in);
1533 return ret;
1534 }
1535 dev->state = STATE_READY;
1536
1537 /* readers may be blocked waiting for us to go online */
1538 wake_up(&dev->read_wq);
1539 return 0;
1540}
1541
1542static void mtp_function_disable(struct usb_function *f)
1543{
1544 struct mtp_dev *dev = func_to_mtp(f);
1545 struct usb_composite_dev *cdev = dev->cdev;
1546
1547 DBG(cdev, "mtp_function_disable\n");
1548 dev->state = STATE_OFFLINE;
1549 usb_ep_disable(dev->ep_in);
1550 usb_ep_disable(dev->ep_out);
1551 usb_ep_disable(dev->ep_intr);
1552
1553 /* readers may be blocked waiting for us to go online */
1554 wake_up(&dev->read_wq);
1555
1556 VDBG(cdev, "%s disabled\n", dev->function.name);
1557}
1558
Hemant Kumarfc2b8f02016-05-02 11:18:48 -07001559static int debug_mtp_read_stats(struct seq_file *s, void *unused)
1560{
1561 struct mtp_dev *dev = _mtp_dev;
1562 int i;
1563 unsigned long flags;
1564 unsigned int min, max = 0, sum = 0, iteration = 0;
1565
1566 seq_puts(s, "\n=======================\n");
1567 seq_puts(s, "USB MTP OUT related VFS write stats:\n");
1568 seq_puts(s, "\n=======================\n");
1569 spin_lock_irqsave(&dev->lock, flags);
1570 min = dev->perf[0].vfs_wtime;
1571 for (i = 0; i < MAX_ITERATION; i++) {
1572 seq_printf(s, "vfs write: bytes:%ld\t\t time:%d\n",
1573 dev->perf[i].vfs_wbytes,
1574 dev->perf[i].vfs_wtime);
1575 if (dev->perf[i].vfs_wbytes == mtp_rx_req_len) {
1576 sum += dev->perf[i].vfs_wtime;
1577 if (min > dev->perf[i].vfs_wtime)
1578 min = dev->perf[i].vfs_wtime;
1579 if (max < dev->perf[i].vfs_wtime)
1580 max = dev->perf[i].vfs_wtime;
1581 iteration++;
1582 }
1583 }
1584
1585 seq_printf(s, "vfs_write(time in usec) min:%d\t max:%d\t avg:%d\n",
Ajay Agarwal579c3f32017-06-13 10:59:17 +05301586 min, max, (iteration ? (sum / iteration) : 0));
Hemant Kumarfc2b8f02016-05-02 11:18:48 -07001587 min = max = sum = iteration = 0;
1588 seq_puts(s, "\n=======================\n");
1589 seq_puts(s, "USB MTP IN related VFS read stats:\n");
1590 seq_puts(s, "\n=======================\n");
1591
1592 min = dev->perf[0].vfs_rtime;
1593 for (i = 0; i < MAX_ITERATION; i++) {
1594 seq_printf(s, "vfs read: bytes:%ld\t\t time:%d\n",
1595 dev->perf[i].vfs_rbytes,
1596 dev->perf[i].vfs_rtime);
1597 if (dev->perf[i].vfs_rbytes == mtp_tx_req_len) {
1598 sum += dev->perf[i].vfs_rtime;
1599 if (min > dev->perf[i].vfs_rtime)
1600 min = dev->perf[i].vfs_rtime;
1601 if (max < dev->perf[i].vfs_rtime)
1602 max = dev->perf[i].vfs_rtime;
1603 iteration++;
1604 }
1605 }
1606
1607 seq_printf(s, "vfs_read(time in usec) min:%d\t max:%d\t avg:%d\n",
Ajay Agarwal579c3f32017-06-13 10:59:17 +05301608 min, max, (iteration ? (sum / iteration) : 0));
Hemant Kumarfc2b8f02016-05-02 11:18:48 -07001609 spin_unlock_irqrestore(&dev->lock, flags);
1610 return 0;
1611}
1612
1613static ssize_t debug_mtp_reset_stats(struct file *file, const char __user *buf,
1614 size_t count, loff_t *ppos)
1615{
1616 int clear_stats;
1617 unsigned long flags;
1618 struct mtp_dev *dev = _mtp_dev;
1619
1620 if (buf == NULL) {
1621 pr_err("[%s] EINVAL\n", __func__);
1622 goto done;
1623 }
1624
1625 if (kstrtoint(buf, 0, &clear_stats) || clear_stats != 0) {
1626 pr_err("Wrong value. To clear stats, enter value as 0.\n");
1627 goto done;
1628 }
1629
1630 spin_lock_irqsave(&dev->lock, flags);
1631 memset(&dev->perf[0], 0, MAX_ITERATION * sizeof(dev->perf[0]));
1632 dev->dbg_read_index = 0;
1633 dev->dbg_write_index = 0;
1634 spin_unlock_irqrestore(&dev->lock, flags);
1635done:
1636 return count;
1637}
1638
1639static int debug_mtp_open(struct inode *inode, struct file *file)
1640{
1641 return single_open(file, debug_mtp_read_stats, inode->i_private);
1642}
1643
1644static const struct file_operations debug_mtp_ops = {
1645 .open = debug_mtp_open,
1646 .read = seq_read,
1647 .write = debug_mtp_reset_stats,
1648};
1649
1650struct dentry *dent_mtp;
1651static void mtp_debugfs_init(void)
1652{
1653 struct dentry *dent_mtp_status;
1654
1655 dent_mtp = debugfs_create_dir("usb_mtp", 0);
1656 if (!dent_mtp || IS_ERR(dent_mtp))
1657 return;
1658
1659 dent_mtp_status = debugfs_create_file("status", 0644, dent_mtp,
1660 0, &debug_mtp_ops);
1661 if (!dent_mtp_status || IS_ERR(dent_mtp_status)) {
1662 debugfs_remove(dent_mtp);
1663 dent_mtp = NULL;
1664 return;
1665 }
1666}
1667
1668static void mtp_debugfs_remove(void)
1669{
1670 debugfs_remove_recursive(dent_mtp);
1671}
1672
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001673static int __mtp_setup(struct mtp_instance *fi_mtp)
Benoit Goby27d01e52011-12-19 14:37:50 -08001674{
1675 struct mtp_dev *dev;
1676 int ret;
1677
1678 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001679
1680 if (fi_mtp != NULL)
1681 fi_mtp->dev = dev;
1682
Benoit Goby27d01e52011-12-19 14:37:50 -08001683 if (!dev)
1684 return -ENOMEM;
1685
1686 spin_lock_init(&dev->lock);
1687 init_waitqueue_head(&dev->read_wq);
1688 init_waitqueue_head(&dev->write_wq);
1689 init_waitqueue_head(&dev->intr_wq);
1690 atomic_set(&dev->open_excl, 0);
1691 atomic_set(&dev->ioctl_excl, 0);
1692 INIT_LIST_HEAD(&dev->tx_idle);
1693 INIT_LIST_HEAD(&dev->intr_idle);
1694
1695 dev->wq = create_singlethread_workqueue("f_mtp");
1696 if (!dev->wq) {
1697 ret = -ENOMEM;
1698 goto err1;
1699 }
1700 INIT_WORK(&dev->send_file_work, send_file_work);
1701 INIT_WORK(&dev->receive_file_work, receive_file_work);
1702
1703 _mtp_dev = dev;
1704
1705 ret = misc_register(&mtp_device);
1706 if (ret)
1707 goto err2;
1708
Hemant Kumarfc2b8f02016-05-02 11:18:48 -07001709 mtp_debugfs_init();
Benoit Goby27d01e52011-12-19 14:37:50 -08001710 return 0;
1711
1712err2:
1713 destroy_workqueue(dev->wq);
1714err1:
1715 _mtp_dev = NULL;
1716 kfree(dev);
1717 printk(KERN_ERR "mtp gadget driver failed to initialize\n");
1718 return ret;
1719}
1720
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001721static int mtp_setup_configfs(struct mtp_instance *fi_mtp)
1722{
1723 return __mtp_setup(fi_mtp);
1724}
1725
1726
Benoit Goby27d01e52011-12-19 14:37:50 -08001727static void mtp_cleanup(void)
1728{
1729 struct mtp_dev *dev = _mtp_dev;
1730
1731 if (!dev)
1732 return;
1733
Hemant Kumarfc2b8f02016-05-02 11:18:48 -07001734 mtp_debugfs_remove();
Benoit Goby27d01e52011-12-19 14:37:50 -08001735 misc_deregister(&mtp_device);
1736 destroy_workqueue(dev->wq);
1737 _mtp_dev = NULL;
1738 kfree(dev);
1739}
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001740
1741static struct mtp_instance *to_mtp_instance(struct config_item *item)
1742{
1743 return container_of(to_config_group(item), struct mtp_instance,
1744 func_inst.group);
1745}
1746
1747static void mtp_attr_release(struct config_item *item)
1748{
1749 struct mtp_instance *fi_mtp = to_mtp_instance(item);
Anson Jacob051584e2016-11-11 01:10:04 -05001750
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001751 usb_put_function_instance(&fi_mtp->func_inst);
1752}
1753
1754static struct configfs_item_operations mtp_item_ops = {
1755 .release = mtp_attr_release,
1756};
1757
1758static struct config_item_type mtp_func_type = {
1759 .ct_item_ops = &mtp_item_ops,
1760 .ct_owner = THIS_MODULE,
1761};
1762
1763
1764static struct mtp_instance *to_fi_mtp(struct usb_function_instance *fi)
1765{
1766 return container_of(fi, struct mtp_instance, func_inst);
1767}
1768
1769static int mtp_set_inst_name(struct usb_function_instance *fi, const char *name)
1770{
1771 struct mtp_instance *fi_mtp;
1772 char *ptr;
1773 int name_len;
1774
1775 name_len = strlen(name) + 1;
1776 if (name_len > MAX_INST_NAME_LEN)
1777 return -ENAMETOOLONG;
1778
1779 ptr = kstrndup(name, name_len, GFP_KERNEL);
1780 if (!ptr)
1781 return -ENOMEM;
1782
1783 fi_mtp = to_fi_mtp(fi);
1784 fi_mtp->name = ptr;
1785
1786 return 0;
1787}
1788
1789static void mtp_free_inst(struct usb_function_instance *fi)
1790{
1791 struct mtp_instance *fi_mtp;
1792
1793 fi_mtp = to_fi_mtp(fi);
1794 kfree(fi_mtp->name);
1795 mtp_cleanup();
1796 kfree(fi_mtp);
1797}
1798
1799struct usb_function_instance *alloc_inst_mtp_ptp(bool mtp_config)
1800{
1801 struct mtp_instance *fi_mtp;
1802 int ret = 0;
Badhri Jagan Sridharan54856462015-10-06 20:32:01 -07001803 struct usb_os_desc *descs[1];
1804 char *names[1];
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001805
1806 fi_mtp = kzalloc(sizeof(*fi_mtp), GFP_KERNEL);
1807 if (!fi_mtp)
1808 return ERR_PTR(-ENOMEM);
1809 fi_mtp->func_inst.set_inst_name = mtp_set_inst_name;
1810 fi_mtp->func_inst.free_func_inst = mtp_free_inst;
1811
Badhri Jagan Sridharan54856462015-10-06 20:32:01 -07001812 fi_mtp->mtp_os_desc.ext_compat_id = fi_mtp->mtp_ext_compat_id;
1813 INIT_LIST_HEAD(&fi_mtp->mtp_os_desc.ext_prop);
1814 descs[0] = &fi_mtp->mtp_os_desc;
1815 names[0] = "MTP";
Badhri Jagan Sridharan54856462015-10-06 20:32:01 -07001816
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001817 if (mtp_config) {
1818 ret = mtp_setup_configfs(fi_mtp);
1819 if (ret) {
1820 kfree(fi_mtp);
1821 pr_err("Error setting MTP\n");
1822 return ERR_PTR(ret);
1823 }
1824 } else
1825 fi_mtp->dev = _mtp_dev;
1826
1827 config_group_init_type_name(&fi_mtp->func_inst.group,
1828 "", &mtp_func_type);
Amit Pundir8157e622016-04-05 21:09:54 +05301829 usb_os_desc_prepare_interf_dir(&fi_mtp->func_inst.group, 1,
1830 descs, names, THIS_MODULE);
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001831
Liangliang Lu56703d112017-05-03 16:06:35 +08001832 mutex_init(&fi_mtp->dev->read_mutex);
1833
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001834 return &fi_mtp->func_inst;
1835}
1836EXPORT_SYMBOL_GPL(alloc_inst_mtp_ptp);
1837
1838static struct usb_function_instance *mtp_alloc_inst(void)
1839{
1840 return alloc_inst_mtp_ptp(true);
1841}
1842
1843static int mtp_ctrlreq_configfs(struct usb_function *f,
1844 const struct usb_ctrlrequest *ctrl)
1845{
1846 return mtp_ctrlrequest(f->config->cdev, ctrl);
1847}
1848
1849static void mtp_free(struct usb_function *f)
1850{
1851 /*NO-OP: no function specific resource allocation in mtp_alloc*/
1852}
1853
1854struct usb_function *function_alloc_mtp_ptp(struct usb_function_instance *fi,
1855 bool mtp_config)
1856{
1857 struct mtp_instance *fi_mtp = to_fi_mtp(fi);
Amit Pundir340114e2015-08-01 03:26:51 +05301858 struct mtp_dev *dev;
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001859
Amit Pundir340114e2015-08-01 03:26:51 +05301860 /*
1861 * PTP piggybacks on MTP function so make sure we have
1862 * created MTP function before we associate this PTP
1863 * function with a gadget configuration.
1864 */
1865 if (fi_mtp->dev == NULL) {
1866 pr_err("Error: Create MTP function before linking"
1867 " PTP function with a gadget configuration\n");
1868 pr_err("\t1: Delete existing PTP function if any\n");
1869 pr_err("\t2: Create MTP function\n");
1870 pr_err("\t3: Create and symlink PTP function"
1871 " with a gadget configuration\n");
Amit Pundirf8e6ad22015-08-11 12:34:45 +05301872 return ERR_PTR(-EINVAL); /* Invalid Configuration */
Amit Pundir340114e2015-08-01 03:26:51 +05301873 }
1874
1875 dev = fi_mtp->dev;
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001876 dev->function.name = DRIVER_NAME;
1877 dev->function.strings = mtp_strings;
1878 if (mtp_config) {
1879 dev->function.fs_descriptors = fs_mtp_descs;
1880 dev->function.hs_descriptors = hs_mtp_descs;
Mark Kuo1b61b272015-08-20 13:01:46 +08001881 dev->function.ss_descriptors = ss_mtp_descs;
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001882 } else {
1883 dev->function.fs_descriptors = fs_ptp_descs;
1884 dev->function.hs_descriptors = hs_ptp_descs;
Mark Kuo1b61b272015-08-20 13:01:46 +08001885 dev->function.ss_descriptors = ss_ptp_descs;
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001886 }
1887 dev->function.bind = mtp_function_bind;
1888 dev->function.unbind = mtp_function_unbind;
1889 dev->function.set_alt = mtp_function_set_alt;
1890 dev->function.disable = mtp_function_disable;
1891 dev->function.setup = mtp_ctrlreq_configfs;
1892 dev->function.free_func = mtp_free;
Hemant Kumarac9d7c82016-10-05 19:08:05 -07001893 fi->f = &dev->function;
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001894
1895 return &dev->function;
1896}
1897EXPORT_SYMBOL_GPL(function_alloc_mtp_ptp);
1898
1899static struct usb_function *mtp_alloc(struct usb_function_instance *fi)
1900{
1901 return function_alloc_mtp_ptp(fi, true);
1902}
1903
1904DECLARE_USB_FUNCTION_INIT(mtp, mtp_alloc_inst, mtp_alloc);
1905MODULE_LICENSE("GPL");