blob: 3f2594661701f22aacba0c1aa5e7473883bc4986 [file] [log] [blame]
Benoit Goby27d01e52011-12-19 14:37:50 -08001/*
2 * Gadget Function Driver for MTP
3 *
4 * Copyright (C) 2010 Google, Inc.
5 * Author: Mike Lockwood <lockwood@android.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18/* #define DEBUG */
19/* #define VERBOSE_DEBUG */
20
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/poll.h>
24#include <linux/delay.h>
25#include <linux/wait.h>
26#include <linux/err.h>
27#include <linux/interrupt.h>
28
Hemant Kumarfc2b8f02016-05-02 11:18:48 -070029#include <linux/seq_file.h>
30#include <linux/debugfs.h>
Benoit Goby27d01e52011-12-19 14:37:50 -080031#include <linux/types.h>
32#include <linux/file.h>
33#include <linux/device.h>
34#include <linux/miscdevice.h>
35
36#include <linux/usb.h>
37#include <linux/usb_usual.h>
38#include <linux/usb/ch9.h>
39#include <linux/usb/f_mtp.h>
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -080040#include <linux/configfs.h>
41#include <linux/usb/composite.h>
42
43#include "configfs.h"
Benoit Goby27d01e52011-12-19 14:37:50 -080044
Hemant Kumarc044ad02016-05-02 11:27:21 -070045#define MTP_RX_BUFFER_INIT_SIZE 1048576
Hemant Kumar28dd7c42016-12-29 15:47:31 -080046#define MTP_TX_BUFFER_INIT_SIZE 1048576
Benoit Goby27d01e52011-12-19 14:37:50 -080047#define MTP_BULK_BUFFER_SIZE 16384
48#define INTR_BUFFER_SIZE 28
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -080049#define MAX_INST_NAME_LEN 40
Witold Sciuk6db5c3e2016-02-13 11:08:37 +010050#define MTP_MAX_FILE_SIZE 0xFFFFFFFFL
Benoit Goby27d01e52011-12-19 14:37:50 -080051
52/* String IDs */
53#define INTERFACE_STRING_INDEX 0
54
55/* values for mtp_dev.state */
56#define STATE_OFFLINE 0 /* initial state, disconnected */
57#define STATE_READY 1 /* ready for userspace calls */
58#define STATE_BUSY 2 /* processing userspace calls */
59#define STATE_CANCELED 3 /* transaction canceled by host */
60#define STATE_ERROR 4 /* error from completion routine */
61
62/* number of tx and rx requests to allocate */
Hemant Kumarc07309f2016-05-01 17:44:56 -070063#define MTP_TX_REQ_MAX 8
Benoit Goby27d01e52011-12-19 14:37:50 -080064#define RX_REQ_MAX 2
65#define INTR_REQ_MAX 5
66
67/* ID for Microsoft MTP OS String */
68#define MTP_OS_STRING_ID 0xEE
69
70/* MTP class reqeusts */
71#define MTP_REQ_CANCEL 0x64
72#define MTP_REQ_GET_EXT_EVENT_DATA 0x65
73#define MTP_REQ_RESET 0x66
74#define MTP_REQ_GET_DEVICE_STATUS 0x67
75
76/* constants for device status */
77#define MTP_RESPONSE_OK 0x2001
78#define MTP_RESPONSE_DEVICE_BUSY 0x2019
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -080079#define DRIVER_NAME "mtp"
Benoit Goby27d01e52011-12-19 14:37:50 -080080
Hemant Kumarfc2b8f02016-05-02 11:18:48 -070081#define MAX_ITERATION 100
82
Hemant Kumarc044ad02016-05-02 11:27:21 -070083unsigned int mtp_rx_req_len = MTP_RX_BUFFER_INIT_SIZE;
Hemant Kumar4aed14e2016-05-01 17:35:36 -070084module_param(mtp_rx_req_len, uint, 0644);
85
Hemant Kumar28dd7c42016-12-29 15:47:31 -080086unsigned int mtp_tx_req_len = MTP_TX_BUFFER_INIT_SIZE;
Hemant Kumarc07309f2016-05-01 17:44:56 -070087module_param(mtp_tx_req_len, uint, 0644);
88
89unsigned int mtp_tx_reqs = MTP_TX_REQ_MAX;
90module_param(mtp_tx_reqs, uint, 0644);
91
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -080092static const char mtp_shortname[] = DRIVER_NAME "_usb";
Benoit Goby27d01e52011-12-19 14:37:50 -080093
94struct mtp_dev {
95 struct usb_function function;
96 struct usb_composite_dev *cdev;
97 spinlock_t lock;
98
99 struct usb_ep *ep_in;
100 struct usb_ep *ep_out;
101 struct usb_ep *ep_intr;
102
103 int state;
104
105 /* synchronize access to our device file */
106 atomic_t open_excl;
107 /* to enforce only one ioctl at a time */
108 atomic_t ioctl_excl;
109
110 struct list_head tx_idle;
111 struct list_head intr_idle;
112
113 wait_queue_head_t read_wq;
114 wait_queue_head_t write_wq;
115 wait_queue_head_t intr_wq;
116 struct usb_request *rx_req[RX_REQ_MAX];
117 int rx_done;
118
119 /* for processing MTP_SEND_FILE, MTP_RECEIVE_FILE and
120 * MTP_SEND_FILE_WITH_HEADER ioctls on a work queue
121 */
122 struct workqueue_struct *wq;
123 struct work_struct send_file_work;
124 struct work_struct receive_file_work;
125 struct file *xfer_file;
126 loff_t xfer_file_offset;
127 int64_t xfer_file_length;
128 unsigned xfer_send_header;
129 uint16_t xfer_command;
130 uint32_t xfer_transaction_id;
131 int xfer_result;
Hemant Kumarfc2b8f02016-05-02 11:18:48 -0700132 struct {
133 unsigned long vfs_rbytes;
134 unsigned long vfs_wbytes;
135 unsigned int vfs_rtime;
136 unsigned int vfs_wtime;
137 } perf[MAX_ITERATION];
138 unsigned int dbg_read_index;
139 unsigned int dbg_write_index;
Vijayavardhan Vennapusab28d3e82017-02-16 15:22:14 +0530140 struct mutex read_mutex;
Benoit Goby27d01e52011-12-19 14:37:50 -0800141};
142
143static struct usb_interface_descriptor mtp_interface_desc = {
144 .bLength = USB_DT_INTERFACE_SIZE,
145 .bDescriptorType = USB_DT_INTERFACE,
146 .bInterfaceNumber = 0,
147 .bNumEndpoints = 3,
148 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
149 .bInterfaceSubClass = USB_SUBCLASS_VENDOR_SPEC,
150 .bInterfaceProtocol = 0,
151};
152
153static struct usb_interface_descriptor ptp_interface_desc = {
154 .bLength = USB_DT_INTERFACE_SIZE,
155 .bDescriptorType = USB_DT_INTERFACE,
156 .bInterfaceNumber = 0,
157 .bNumEndpoints = 3,
158 .bInterfaceClass = USB_CLASS_STILL_IMAGE,
159 .bInterfaceSubClass = 1,
160 .bInterfaceProtocol = 1,
161};
162
Mark Kuo1b61b272015-08-20 13:01:46 +0800163static struct usb_endpoint_descriptor mtp_ss_in_desc = {
164 .bLength = USB_DT_ENDPOINT_SIZE,
165 .bDescriptorType = USB_DT_ENDPOINT,
166 .bEndpointAddress = USB_DIR_IN,
167 .bmAttributes = USB_ENDPOINT_XFER_BULK,
168 .wMaxPacketSize = __constant_cpu_to_le16(1024),
169};
170
171static struct usb_ss_ep_comp_descriptor mtp_ss_in_comp_desc = {
172 .bLength = sizeof(mtp_ss_in_comp_desc),
173 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
174 /* .bMaxBurst = DYNAMIC, */
175};
176
177static struct usb_endpoint_descriptor mtp_ss_out_desc = {
178 .bLength = USB_DT_ENDPOINT_SIZE,
179 .bDescriptorType = USB_DT_ENDPOINT,
180 .bEndpointAddress = USB_DIR_OUT,
181 .bmAttributes = USB_ENDPOINT_XFER_BULK,
182 .wMaxPacketSize = __constant_cpu_to_le16(1024),
183};
184
185static struct usb_ss_ep_comp_descriptor mtp_ss_out_comp_desc = {
186 .bLength = sizeof(mtp_ss_out_comp_desc),
187 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
188 /* .bMaxBurst = DYNAMIC, */
189};
190
Benoit Goby27d01e52011-12-19 14:37:50 -0800191static struct usb_endpoint_descriptor mtp_highspeed_in_desc = {
192 .bLength = USB_DT_ENDPOINT_SIZE,
193 .bDescriptorType = USB_DT_ENDPOINT,
194 .bEndpointAddress = USB_DIR_IN,
195 .bmAttributes = USB_ENDPOINT_XFER_BULK,
196 .wMaxPacketSize = __constant_cpu_to_le16(512),
197};
198
199static struct usb_endpoint_descriptor mtp_highspeed_out_desc = {
200 .bLength = USB_DT_ENDPOINT_SIZE,
201 .bDescriptorType = USB_DT_ENDPOINT,
202 .bEndpointAddress = USB_DIR_OUT,
203 .bmAttributes = USB_ENDPOINT_XFER_BULK,
204 .wMaxPacketSize = __constant_cpu_to_le16(512),
205};
206
207static struct usb_endpoint_descriptor mtp_fullspeed_in_desc = {
208 .bLength = USB_DT_ENDPOINT_SIZE,
209 .bDescriptorType = USB_DT_ENDPOINT,
210 .bEndpointAddress = USB_DIR_IN,
211 .bmAttributes = USB_ENDPOINT_XFER_BULK,
212};
213
214static struct usb_endpoint_descriptor mtp_fullspeed_out_desc = {
215 .bLength = USB_DT_ENDPOINT_SIZE,
216 .bDescriptorType = USB_DT_ENDPOINT,
217 .bEndpointAddress = USB_DIR_OUT,
218 .bmAttributes = USB_ENDPOINT_XFER_BULK,
219};
220
221static struct usb_endpoint_descriptor mtp_intr_desc = {
222 .bLength = USB_DT_ENDPOINT_SIZE,
223 .bDescriptorType = USB_DT_ENDPOINT,
224 .bEndpointAddress = USB_DIR_IN,
225 .bmAttributes = USB_ENDPOINT_XFER_INT,
226 .wMaxPacketSize = __constant_cpu_to_le16(INTR_BUFFER_SIZE),
227 .bInterval = 6,
228};
229
Mark Kuo1b61b272015-08-20 13:01:46 +0800230static struct usb_ss_ep_comp_descriptor mtp_intr_ss_comp_desc = {
231 .bLength = sizeof(mtp_intr_ss_comp_desc),
232 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
Mark Kuo21a3e932015-09-11 16:12:59 +0800233 .wBytesPerInterval = cpu_to_le16(INTR_BUFFER_SIZE),
Mark Kuo1b61b272015-08-20 13:01:46 +0800234};
235
Benoit Goby27d01e52011-12-19 14:37:50 -0800236static struct usb_descriptor_header *fs_mtp_descs[] = {
237 (struct usb_descriptor_header *) &mtp_interface_desc,
238 (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
239 (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
240 (struct usb_descriptor_header *) &mtp_intr_desc,
241 NULL,
242};
243
244static struct usb_descriptor_header *hs_mtp_descs[] = {
245 (struct usb_descriptor_header *) &mtp_interface_desc,
246 (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
247 (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
248 (struct usb_descriptor_header *) &mtp_intr_desc,
249 NULL,
250};
251
Mark Kuo1b61b272015-08-20 13:01:46 +0800252static struct usb_descriptor_header *ss_mtp_descs[] = {
253 (struct usb_descriptor_header *) &mtp_interface_desc,
254 (struct usb_descriptor_header *) &mtp_ss_in_desc,
255 (struct usb_descriptor_header *) &mtp_ss_in_comp_desc,
256 (struct usb_descriptor_header *) &mtp_ss_out_desc,
257 (struct usb_descriptor_header *) &mtp_ss_out_comp_desc,
258 (struct usb_descriptor_header *) &mtp_intr_desc,
259 (struct usb_descriptor_header *) &mtp_intr_ss_comp_desc,
260 NULL,
261};
262
Benoit Goby27d01e52011-12-19 14:37:50 -0800263static struct usb_descriptor_header *fs_ptp_descs[] = {
264 (struct usb_descriptor_header *) &ptp_interface_desc,
265 (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
266 (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
267 (struct usb_descriptor_header *) &mtp_intr_desc,
268 NULL,
269};
270
271static struct usb_descriptor_header *hs_ptp_descs[] = {
272 (struct usb_descriptor_header *) &ptp_interface_desc,
273 (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
274 (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
275 (struct usb_descriptor_header *) &mtp_intr_desc,
276 NULL,
277};
278
Mark Kuo1b61b272015-08-20 13:01:46 +0800279static struct usb_descriptor_header *ss_ptp_descs[] = {
280 (struct usb_descriptor_header *) &ptp_interface_desc,
281 (struct usb_descriptor_header *) &mtp_ss_in_desc,
282 (struct usb_descriptor_header *) &mtp_ss_in_comp_desc,
283 (struct usb_descriptor_header *) &mtp_ss_out_desc,
284 (struct usb_descriptor_header *) &mtp_ss_out_comp_desc,
285 (struct usb_descriptor_header *) &mtp_intr_desc,
286 (struct usb_descriptor_header *) &mtp_intr_ss_comp_desc,
287 NULL,
288};
289
Benoit Goby27d01e52011-12-19 14:37:50 -0800290static struct usb_string mtp_string_defs[] = {
291 /* Naming interface "MTP" so libmtp will recognize us */
292 [INTERFACE_STRING_INDEX].s = "MTP",
293 { }, /* end of list */
294};
295
296static struct usb_gadget_strings mtp_string_table = {
297 .language = 0x0409, /* en-US */
298 .strings = mtp_string_defs,
299};
300
301static struct usb_gadget_strings *mtp_strings[] = {
302 &mtp_string_table,
303 NULL,
304};
305
306/* Microsoft MTP OS String */
307static u8 mtp_os_string[] = {
308 18, /* sizeof(mtp_os_string) */
309 USB_DT_STRING,
310 /* Signature field: "MSFT100" */
311 'M', 0, 'S', 0, 'F', 0, 'T', 0, '1', 0, '0', 0, '0', 0,
312 /* vendor code */
313 1,
314 /* padding */
315 0
316};
317
318/* Microsoft Extended Configuration Descriptor Header Section */
319struct mtp_ext_config_desc_header {
320 __le32 dwLength;
321 __u16 bcdVersion;
322 __le16 wIndex;
323 __u8 bCount;
324 __u8 reserved[7];
325};
326
327/* Microsoft Extended Configuration Descriptor Function Section */
328struct mtp_ext_config_desc_function {
329 __u8 bFirstInterfaceNumber;
330 __u8 bInterfaceCount;
331 __u8 compatibleID[8];
332 __u8 subCompatibleID[8];
333 __u8 reserved[6];
334};
335
336/* MTP Extended Configuration Descriptor */
Jack Pham50439242017-02-07 11:48:25 -0800337struct mtp_ext_config_desc {
Benoit Goby27d01e52011-12-19 14:37:50 -0800338 struct mtp_ext_config_desc_header header;
339 struct mtp_ext_config_desc_function function;
Jack Pham50439242017-02-07 11:48:25 -0800340};
341
342static struct mtp_ext_config_desc mtp_ext_config_desc = {
Benoit Goby27d01e52011-12-19 14:37:50 -0800343 .header = {
344 .dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc)),
345 .bcdVersion = __constant_cpu_to_le16(0x0100),
346 .wIndex = __constant_cpu_to_le16(4),
Brian Norrise7304622016-02-29 17:44:51 -0800347 .bCount = 1,
Benoit Goby27d01e52011-12-19 14:37:50 -0800348 },
349 .function = {
350 .bFirstInterfaceNumber = 0,
351 .bInterfaceCount = 1,
352 .compatibleID = { 'M', 'T', 'P' },
353 },
354};
355
356struct mtp_device_status {
357 __le16 wLength;
358 __le16 wCode;
359};
360
Colin Crossccebeef2013-11-07 13:08:15 -0800361struct mtp_data_header {
362 /* length of packet, including this header */
363 __le32 length;
364 /* container type (2 for data packet) */
365 __le16 type;
366 /* MTP command code */
367 __le16 command;
368 /* MTP transaction ID */
369 __le32 transaction_id;
370};
371
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -0800372struct mtp_instance {
373 struct usb_function_instance func_inst;
374 const char *name;
375 struct mtp_dev *dev;
Badhri Jagan Sridharan54856462015-10-06 20:32:01 -0700376 char mtp_ext_compat_id[16];
377 struct usb_os_desc mtp_os_desc;
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -0800378};
379
Benoit Goby27d01e52011-12-19 14:37:50 -0800380/* temporary variable used between mtp_open() and mtp_gadget_bind() */
381static struct mtp_dev *_mtp_dev;
382
383static inline struct mtp_dev *func_to_mtp(struct usb_function *f)
384{
385 return container_of(f, struct mtp_dev, function);
386}
387
388static struct usb_request *mtp_request_new(struct usb_ep *ep, int buffer_size)
389{
390 struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
Anson Jacob051584e2016-11-11 01:10:04 -0500391
Benoit Goby27d01e52011-12-19 14:37:50 -0800392 if (!req)
393 return NULL;
394
395 /* now allocate buffers for the requests */
396 req->buf = kmalloc(buffer_size, GFP_KERNEL);
397 if (!req->buf) {
398 usb_ep_free_request(ep, req);
399 return NULL;
400 }
401
402 return req;
403}
404
405static void mtp_request_free(struct usb_request *req, struct usb_ep *ep)
406{
407 if (req) {
408 kfree(req->buf);
409 usb_ep_free_request(ep, req);
410 }
411}
412
413static inline int mtp_lock(atomic_t *excl)
414{
415 if (atomic_inc_return(excl) == 1) {
416 return 0;
417 } else {
418 atomic_dec(excl);
419 return -1;
420 }
421}
422
423static inline void mtp_unlock(atomic_t *excl)
424{
425 atomic_dec(excl);
426}
427
428/* add a request to the tail of a list */
429static void mtp_req_put(struct mtp_dev *dev, struct list_head *head,
430 struct usb_request *req)
431{
432 unsigned long flags;
433
434 spin_lock_irqsave(&dev->lock, flags);
435 list_add_tail(&req->list, head);
436 spin_unlock_irqrestore(&dev->lock, flags);
437}
438
439/* remove a request from the head of a list */
440static struct usb_request
441*mtp_req_get(struct mtp_dev *dev, struct list_head *head)
442{
443 unsigned long flags;
444 struct usb_request *req;
445
446 spin_lock_irqsave(&dev->lock, flags);
447 if (list_empty(head)) {
448 req = 0;
449 } else {
450 req = list_first_entry(head, struct usb_request, list);
451 list_del(&req->list);
452 }
453 spin_unlock_irqrestore(&dev->lock, flags);
454 return req;
455}
456
457static void mtp_complete_in(struct usb_ep *ep, struct usb_request *req)
458{
459 struct mtp_dev *dev = _mtp_dev;
460
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +0530461 if (req->status != 0 && dev->state != STATE_OFFLINE)
Benoit Goby27d01e52011-12-19 14:37:50 -0800462 dev->state = STATE_ERROR;
463
464 mtp_req_put(dev, &dev->tx_idle, req);
465
466 wake_up(&dev->write_wq);
467}
468
469static void mtp_complete_out(struct usb_ep *ep, struct usb_request *req)
470{
471 struct mtp_dev *dev = _mtp_dev;
472
473 dev->rx_done = 1;
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +0530474 if (req->status != 0 && dev->state != STATE_OFFLINE)
Benoit Goby27d01e52011-12-19 14:37:50 -0800475 dev->state = STATE_ERROR;
476
477 wake_up(&dev->read_wq);
478}
479
480static void mtp_complete_intr(struct usb_ep *ep, struct usb_request *req)
481{
482 struct mtp_dev *dev = _mtp_dev;
483
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +0530484 if (req->status != 0 && dev->state != STATE_OFFLINE)
Benoit Goby27d01e52011-12-19 14:37:50 -0800485 dev->state = STATE_ERROR;
486
487 mtp_req_put(dev, &dev->intr_idle, req);
488
489 wake_up(&dev->intr_wq);
490}
491
492static int mtp_create_bulk_endpoints(struct mtp_dev *dev,
493 struct usb_endpoint_descriptor *in_desc,
494 struct usb_endpoint_descriptor *out_desc,
495 struct usb_endpoint_descriptor *intr_desc)
496{
497 struct usb_composite_dev *cdev = dev->cdev;
498 struct usb_request *req;
499 struct usb_ep *ep;
ChandanaKishori Chiluveru854d05b2015-08-05 15:22:44 +0530500 size_t extra_buf_alloc = cdev->gadget->extra_buf_alloc;
Benoit Goby27d01e52011-12-19 14:37:50 -0800501 int i;
502
Chandana Kishori Chiluveru7f5670a2017-10-28 23:05:45 +0530503 DBG(cdev, "create_bulk_endpoints dev: %pK\n", dev);
Benoit Goby27d01e52011-12-19 14:37:50 -0800504
505 ep = usb_ep_autoconfig(cdev->gadget, in_desc);
506 if (!ep) {
507 DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
508 return -ENODEV;
509 }
510 DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
511 ep->driver_data = dev; /* claim the endpoint */
512 dev->ep_in = ep;
513
514 ep = usb_ep_autoconfig(cdev->gadget, out_desc);
515 if (!ep) {
516 DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
517 return -ENODEV;
518 }
519 DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name);
520 ep->driver_data = dev; /* claim the endpoint */
521 dev->ep_out = ep;
522
Benoit Goby27d01e52011-12-19 14:37:50 -0800523 ep = usb_ep_autoconfig(cdev->gadget, intr_desc);
524 if (!ep) {
525 DBG(cdev, "usb_ep_autoconfig for ep_intr failed\n");
526 return -ENODEV;
527 }
528 DBG(cdev, "usb_ep_autoconfig for mtp ep_intr got %s\n", ep->name);
529 ep->driver_data = dev; /* claim the endpoint */
530 dev->ep_intr = ep;
531
Hemant Kumarc07309f2016-05-01 17:44:56 -0700532retry_tx_alloc:
Benoit Goby27d01e52011-12-19 14:37:50 -0800533 /* now allocate requests for our endpoints */
Hemant Kumarc07309f2016-05-01 17:44:56 -0700534 for (i = 0; i < mtp_tx_reqs; i++) {
ChandanaKishori Chiluveru854d05b2015-08-05 15:22:44 +0530535 req = mtp_request_new(dev->ep_in,
536 mtp_tx_req_len + extra_buf_alloc);
Hemant Kumarc07309f2016-05-01 17:44:56 -0700537 if (!req) {
538 if (mtp_tx_req_len <= MTP_BULK_BUFFER_SIZE)
539 goto fail;
540 while ((req = mtp_req_get(dev, &dev->tx_idle)))
541 mtp_request_free(req, dev->ep_in);
542 mtp_tx_req_len = MTP_BULK_BUFFER_SIZE;
543 mtp_tx_reqs = MTP_TX_REQ_MAX;
544 goto retry_tx_alloc;
545 }
Benoit Goby27d01e52011-12-19 14:37:50 -0800546 req->complete = mtp_complete_in;
547 mtp_req_put(dev, &dev->tx_idle, req);
548 }
Hemant Kumar4aed14e2016-05-01 17:35:36 -0700549
550 /*
551 * The RX buffer should be aligned to EP max packet for
552 * some controllers. At bind time, we don't know the
553 * operational speed. Hence assuming super speed max
554 * packet size.
555 */
556 if (mtp_rx_req_len % 1024)
557 mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;
558
559retry_rx_alloc:
Benoit Goby27d01e52011-12-19 14:37:50 -0800560 for (i = 0; i < RX_REQ_MAX; i++) {
Hemant Kumar4aed14e2016-05-01 17:35:36 -0700561 req = mtp_request_new(dev->ep_out, mtp_rx_req_len);
562 if (!req) {
563 if (mtp_rx_req_len <= MTP_BULK_BUFFER_SIZE)
564 goto fail;
Hemant Kumara1258712016-05-02 10:46:49 -0700565 for (--i; i >= 0; i--)
Hemant Kumar4aed14e2016-05-01 17:35:36 -0700566 mtp_request_free(dev->rx_req[i], dev->ep_out);
567 mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;
568 goto retry_rx_alloc;
569 }
Benoit Goby27d01e52011-12-19 14:37:50 -0800570 req->complete = mtp_complete_out;
571 dev->rx_req[i] = req;
572 }
573 for (i = 0; i < INTR_REQ_MAX; i++) {
ChandanaKishori Chiluveru854d05b2015-08-05 15:22:44 +0530574 req = mtp_request_new(dev->ep_intr,
575 INTR_BUFFER_SIZE + extra_buf_alloc);
Benoit Goby27d01e52011-12-19 14:37:50 -0800576 if (!req)
577 goto fail;
578 req->complete = mtp_complete_intr;
579 mtp_req_put(dev, &dev->intr_idle, req);
580 }
581
582 return 0;
583
584fail:
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -0800585 pr_err("mtp_bind() could not allocate requests\n");
Benoit Goby27d01e52011-12-19 14:37:50 -0800586 return -1;
587}
588
589static ssize_t mtp_read(struct file *fp, char __user *buf,
590 size_t count, loff_t *pos)
591{
592 struct mtp_dev *dev = fp->private_data;
593 struct usb_composite_dev *cdev = dev->cdev;
594 struct usb_request *req;
Greg Hackmann1b07ec72014-02-24 10:19:13 -0800595 ssize_t r = count;
596 unsigned xfer;
Benoit Goby27d01e52011-12-19 14:37:50 -0800597 int ret = 0;
Jiebing Li3bbd8982015-03-10 11:25:50 +0800598 size_t len = 0;
Benoit Goby27d01e52011-12-19 14:37:50 -0800599
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +0530600 DBG(cdev, "mtp_read(%zu) state:%d\n", count, dev->state);
Benoit Goby27d01e52011-12-19 14:37:50 -0800601
Benoit Goby27d01e52011-12-19 14:37:50 -0800602 /* we will block until we're online */
603 DBG(cdev, "mtp_read: waiting for online state\n");
604 ret = wait_event_interruptible(dev->read_wq,
605 dev->state != STATE_OFFLINE);
606 if (ret < 0) {
607 r = ret;
608 goto done;
609 }
Hemant Kumar63e7d792016-05-02 11:09:00 -0700610
611 len = ALIGN(count, dev->ep_out->maxpacket);
612 if (len > mtp_rx_req_len)
613 return -EINVAL;
614
Benoit Goby27d01e52011-12-19 14:37:50 -0800615 spin_lock_irq(&dev->lock);
Pratham Pratap10bc3e62018-05-08 16:17:52 +0530616 if (dev->state == STATE_OFFLINE) {
617 spin_unlock_irq(&dev->lock);
618 return -ENODEV;
619 }
620
Jiebing Li3bbd8982015-03-10 11:25:50 +0800621 if (dev->ep_out->desc) {
Pratham Pratap10bc3e62018-05-08 16:17:52 +0530622 if (!cdev) {
623 spin_unlock_irq(&dev->lock);
624 return -ENODEV;
625 }
626
Jiebing Li3bbd8982015-03-10 11:25:50 +0800627 len = usb_ep_align_maybe(cdev->gadget, dev->ep_out, count);
628 if (len > MTP_BULK_BUFFER_SIZE) {
629 spin_unlock_irq(&dev->lock);
630 return -EINVAL;
631 }
632 }
633
Benoit Goby27d01e52011-12-19 14:37:50 -0800634 if (dev->state == STATE_CANCELED) {
635 /* report cancelation to userspace */
636 dev->state = STATE_READY;
637 spin_unlock_irq(&dev->lock);
638 return -ECANCELED;
639 }
640 dev->state = STATE_BUSY;
641 spin_unlock_irq(&dev->lock);
642
Vijayavardhan Vennapusab28d3e82017-02-16 15:22:14 +0530643 mutex_lock(&dev->read_mutex);
644 if (dev->state == STATE_OFFLINE) {
645 r = -EIO;
646 mutex_unlock(&dev->read_mutex);
647 goto done;
648 }
Benoit Goby27d01e52011-12-19 14:37:50 -0800649requeue_req:
650 /* queue a request */
651 req = dev->rx_req[0];
Hemant Kumar96b01be2016-05-01 17:54:31 -0700652 req->length = len;
Benoit Goby27d01e52011-12-19 14:37:50 -0800653 dev->rx_done = 0;
Vijayavardhan Vennapusab28d3e82017-02-16 15:22:14 +0530654 mutex_unlock(&dev->read_mutex);
Benoit Goby27d01e52011-12-19 14:37:50 -0800655 ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
656 if (ret < 0) {
657 r = -EIO;
658 goto done;
659 } else {
Chandana Kishori Chiluveru7f5670a2017-10-28 23:05:45 +0530660 DBG(cdev, "rx %pK queue\n", req);
Benoit Goby27d01e52011-12-19 14:37:50 -0800661 }
662
663 /* wait for a request to complete */
Hemant Kumar32565c22016-05-01 22:09:34 -0700664 ret = wait_event_interruptible(dev->read_wq,
665 dev->rx_done || dev->state != STATE_BUSY);
666 if (dev->state == STATE_CANCELED) {
667 r = -ECANCELED;
668 if (!dev->rx_done)
669 usb_ep_dequeue(dev->ep_out, req);
670 spin_lock_irq(&dev->lock);
671 dev->state = STATE_CANCELED;
672 spin_unlock_irq(&dev->lock);
673 goto done;
674 }
Benoit Goby27d01e52011-12-19 14:37:50 -0800675 if (ret < 0) {
676 r = ret;
677 usb_ep_dequeue(dev->ep_out, req);
678 goto done;
679 }
Vijayavardhan Vennapusab28d3e82017-02-16 15:22:14 +0530680 mutex_lock(&dev->read_mutex);
Benoit Goby27d01e52011-12-19 14:37:50 -0800681 if (dev->state == STATE_BUSY) {
682 /* If we got a 0-len packet, throw it back and try again. */
683 if (req->actual == 0)
684 goto requeue_req;
685
Chandana Kishori Chiluveru7f5670a2017-10-28 23:05:45 +0530686 DBG(cdev, "rx %pK %d\n", req, req->actual);
Benoit Goby27d01e52011-12-19 14:37:50 -0800687 xfer = (req->actual < count) ? req->actual : count;
688 r = xfer;
689 if (copy_to_user(buf, req->buf, xfer))
690 r = -EFAULT;
691 } else
692 r = -EIO;
693
Vijayavardhan Vennapusab28d3e82017-02-16 15:22:14 +0530694 mutex_unlock(&dev->read_mutex);
Benoit Goby27d01e52011-12-19 14:37:50 -0800695done:
696 spin_lock_irq(&dev->lock);
697 if (dev->state == STATE_CANCELED)
698 r = -ECANCELED;
699 else if (dev->state != STATE_OFFLINE)
700 dev->state = STATE_READY;
701 spin_unlock_irq(&dev->lock);
702
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +0530703 DBG(cdev, "mtp_read returning %zd state:%d\n", r, dev->state);
Benoit Goby27d01e52011-12-19 14:37:50 -0800704 return r;
705}
706
707static ssize_t mtp_write(struct file *fp, const char __user *buf,
708 size_t count, loff_t *pos)
709{
710 struct mtp_dev *dev = fp->private_data;
711 struct usb_composite_dev *cdev = dev->cdev;
712 struct usb_request *req = 0;
Greg Hackmann3a725f42014-02-24 10:19:13 -0800713 ssize_t r = count;
714 unsigned xfer;
Benoit Goby27d01e52011-12-19 14:37:50 -0800715 int sendZLP = 0;
716 int ret;
717
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +0530718 DBG(cdev, "mtp_write(%zu) state:%d\n", count, dev->state);
Benoit Goby27d01e52011-12-19 14:37:50 -0800719
720 spin_lock_irq(&dev->lock);
721 if (dev->state == STATE_CANCELED) {
722 /* report cancelation to userspace */
723 dev->state = STATE_READY;
724 spin_unlock_irq(&dev->lock);
725 return -ECANCELED;
726 }
727 if (dev->state == STATE_OFFLINE) {
728 spin_unlock_irq(&dev->lock);
729 return -ENODEV;
730 }
731 dev->state = STATE_BUSY;
732 spin_unlock_irq(&dev->lock);
733
734 /* we need to send a zero length packet to signal the end of transfer
735 * if the transfer size is aligned to a packet boundary.
736 */
737 if ((count & (dev->ep_in->maxpacket - 1)) == 0)
738 sendZLP = 1;
739
740 while (count > 0 || sendZLP) {
741 /* so we exit after sending ZLP */
742 if (count == 0)
743 sendZLP = 0;
744
745 if (dev->state != STATE_BUSY) {
746 DBG(cdev, "mtp_write dev->error\n");
747 r = -EIO;
748 break;
749 }
750
751 /* get an idle tx request to use */
752 req = 0;
753 ret = wait_event_interruptible(dev->write_wq,
754 ((req = mtp_req_get(dev, &dev->tx_idle))
755 || dev->state != STATE_BUSY));
756 if (!req) {
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +0530757 DBG(cdev, "mtp_write request NULL ret:%d state:%d\n",
758 ret, dev->state);
Benoit Goby27d01e52011-12-19 14:37:50 -0800759 r = ret;
760 break;
761 }
762
Hemant Kumar28dd7c42016-12-29 15:47:31 -0800763 if (count > mtp_tx_req_len)
764 xfer = mtp_tx_req_len;
Benoit Goby27d01e52011-12-19 14:37:50 -0800765 else
766 xfer = count;
767 if (xfer && copy_from_user(req->buf, buf, xfer)) {
768 r = -EFAULT;
769 break;
770 }
771
772 req->length = xfer;
773 ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
774 if (ret < 0) {
775 DBG(cdev, "mtp_write: xfer error %d\n", ret);
776 r = -EIO;
777 break;
778 }
779
780 buf += xfer;
781 count -= xfer;
782
783 /* zero this so we don't try to free it on error exit */
784 req = 0;
785 }
786
787 if (req)
788 mtp_req_put(dev, &dev->tx_idle, req);
789
790 spin_lock_irq(&dev->lock);
791 if (dev->state == STATE_CANCELED)
792 r = -ECANCELED;
793 else if (dev->state != STATE_OFFLINE)
794 dev->state = STATE_READY;
795 spin_unlock_irq(&dev->lock);
796
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +0530797 DBG(cdev, "mtp_write returning %zd state:%d\n", r, dev->state);
Benoit Goby27d01e52011-12-19 14:37:50 -0800798 return r;
799}
800
801/* read from a local file and write to USB */
802static void send_file_work(struct work_struct *data)
803{
804 struct mtp_dev *dev = container_of(data, struct mtp_dev,
805 send_file_work);
806 struct usb_composite_dev *cdev = dev->cdev;
807 struct usb_request *req = 0;
808 struct mtp_data_header *header;
809 struct file *filp;
810 loff_t offset;
811 int64_t count;
812 int xfer, ret, hdr_size;
813 int r = 0;
814 int sendZLP = 0;
Hemant Kumarfc2b8f02016-05-02 11:18:48 -0700815 ktime_t start_time;
Benoit Goby27d01e52011-12-19 14:37:50 -0800816
817 /* read our parameters */
818 smp_rmb();
819 filp = dev->xfer_file;
820 offset = dev->xfer_file_offset;
821 count = dev->xfer_file_length;
822
823 DBG(cdev, "send_file_work(%lld %lld)\n", offset, count);
824
825 if (dev->xfer_send_header) {
826 hdr_size = sizeof(struct mtp_data_header);
827 count += hdr_size;
828 } else {
829 hdr_size = 0;
830 }
831
832 /* we need to send a zero length packet to signal the end of transfer
833 * if the transfer size is aligned to a packet boundary.
834 */
835 if ((count & (dev->ep_in->maxpacket - 1)) == 0)
836 sendZLP = 1;
837
838 while (count > 0 || sendZLP) {
839 /* so we exit after sending ZLP */
840 if (count == 0)
841 sendZLP = 0;
842
843 /* get an idle tx request to use */
844 req = 0;
845 ret = wait_event_interruptible(dev->write_wq,
846 (req = mtp_req_get(dev, &dev->tx_idle))
847 || dev->state != STATE_BUSY);
848 if (dev->state == STATE_CANCELED) {
849 r = -ECANCELED;
850 break;
851 }
852 if (!req) {
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +0530853 DBG(cdev,
854 "send_file_work request NULL ret:%d state:%d\n",
855 ret, dev->state);
Benoit Goby27d01e52011-12-19 14:37:50 -0800856 r = ret;
857 break;
858 }
859
Hemant Kumar28dd7c42016-12-29 15:47:31 -0800860 if (count > mtp_tx_req_len)
861 xfer = mtp_tx_req_len;
Benoit Goby27d01e52011-12-19 14:37:50 -0800862 else
863 xfer = count;
864
865 if (hdr_size) {
866 /* prepend MTP data header */
867 header = (struct mtp_data_header *)req->buf;
Witold Sciuk6db5c3e2016-02-13 11:08:37 +0100868 /*
869 * set file size with header according to
870 * MTP Specification v1.0
871 */
872 header->length = (count > MTP_MAX_FILE_SIZE) ?
873 MTP_MAX_FILE_SIZE : __cpu_to_le32(count);
Benoit Goby27d01e52011-12-19 14:37:50 -0800874 header->type = __cpu_to_le16(2); /* data packet */
875 header->command = __cpu_to_le16(dev->xfer_command);
876 header->transaction_id =
877 __cpu_to_le32(dev->xfer_transaction_id);
878 }
Hemant Kumarfc2b8f02016-05-02 11:18:48 -0700879 start_time = ktime_get();
Benoit Goby27d01e52011-12-19 14:37:50 -0800880 ret = vfs_read(filp, req->buf + hdr_size, xfer - hdr_size,
881 &offset);
882 if (ret < 0) {
883 r = ret;
884 break;
885 }
Hemant Kumarfc2b8f02016-05-02 11:18:48 -0700886
Benoit Goby27d01e52011-12-19 14:37:50 -0800887 xfer = ret + hdr_size;
Hemant Kumarfc2b8f02016-05-02 11:18:48 -0700888 dev->perf[dev->dbg_read_index].vfs_rtime =
889 ktime_to_us(ktime_sub(ktime_get(), start_time));
890 dev->perf[dev->dbg_read_index].vfs_rbytes = xfer;
891 dev->dbg_read_index = (dev->dbg_read_index + 1) % MAX_ITERATION;
Benoit Goby27d01e52011-12-19 14:37:50 -0800892 hdr_size = 0;
893
894 req->length = xfer;
895 ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
896 if (ret < 0) {
897 DBG(cdev, "send_file_work: xfer error %d\n", ret);
Hemant Kumar9bcbf7b2016-05-01 22:15:04 -0700898 if (dev->state != STATE_OFFLINE)
899 dev->state = STATE_ERROR;
Benoit Goby27d01e52011-12-19 14:37:50 -0800900 r = -EIO;
901 break;
902 }
903
904 count -= xfer;
905
906 /* zero this so we don't try to free it on error exit */
907 req = 0;
908 }
909
910 if (req)
911 mtp_req_put(dev, &dev->tx_idle, req);
912
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +0530913 DBG(cdev, "send_file_work returning %d state:%d\n", r, dev->state);
Benoit Goby27d01e52011-12-19 14:37:50 -0800914 /* write the result */
915 dev->xfer_result = r;
916 smp_wmb();
917}
918
919/* read from USB and write to a local file */
920static void receive_file_work(struct work_struct *data)
921{
922 struct mtp_dev *dev = container_of(data, struct mtp_dev,
923 receive_file_work);
924 struct usb_composite_dev *cdev = dev->cdev;
925 struct usb_request *read_req = NULL, *write_req = NULL;
926 struct file *filp;
927 loff_t offset;
928 int64_t count;
929 int ret, cur_buf = 0;
930 int r = 0;
Hemant Kumarfc2b8f02016-05-02 11:18:48 -0700931 ktime_t start_time;
Benoit Goby27d01e52011-12-19 14:37:50 -0800932
933 /* read our parameters */
934 smp_rmb();
935 filp = dev->xfer_file;
936 offset = dev->xfer_file_offset;
937 count = dev->xfer_file_length;
938
939 DBG(cdev, "receive_file_work(%lld)\n", count);
Manu Gautam0caf9082012-10-03 18:49:33 +0530940 if (!IS_ALIGNED(count, dev->ep_out->maxpacket))
941 DBG(cdev, "%s- count(%lld) not multiple of mtu(%d)\n", __func__,
942 count, dev->ep_out->maxpacket);
Benoit Goby27d01e52011-12-19 14:37:50 -0800943
944 while (count > 0 || write_req) {
945 if (count > 0) {
Vijayavardhan Vennapusab28d3e82017-02-16 15:22:14 +0530946 mutex_lock(&dev->read_mutex);
947 if (dev->state == STATE_OFFLINE) {
948 r = -EIO;
949 mutex_unlock(&dev->read_mutex);
950 break;
951 }
Benoit Goby27d01e52011-12-19 14:37:50 -0800952 /* queue a request */
953 read_req = dev->rx_req[cur_buf];
954 cur_buf = (cur_buf + 1) % RX_REQ_MAX;
955
Manu Gautam0caf9082012-10-03 18:49:33 +0530956 /* some h/w expects size to be aligned to ep's MTU */
Hemant Kumar4aed14e2016-05-01 17:35:36 -0700957 read_req->length = mtp_rx_req_len;
Manu Gautam0caf9082012-10-03 18:49:33 +0530958
Benoit Goby27d01e52011-12-19 14:37:50 -0800959 dev->rx_done = 0;
Vijayavardhan Vennapusab28d3e82017-02-16 15:22:14 +0530960 mutex_unlock(&dev->read_mutex);
Benoit Goby27d01e52011-12-19 14:37:50 -0800961 ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL);
962 if (ret < 0) {
963 r = -EIO;
Hemant Kumar9bcbf7b2016-05-01 22:15:04 -0700964 if (dev->state != STATE_OFFLINE)
965 dev->state = STATE_ERROR;
Benoit Goby27d01e52011-12-19 14:37:50 -0800966 break;
967 }
968 }
969
970 if (write_req) {
Chandana Kishori Chiluveru7f5670a2017-10-28 23:05:45 +0530971 DBG(cdev, "rx %pK %d\n", write_req, write_req->actual);
Hemant Kumarfc2b8f02016-05-02 11:18:48 -0700972 start_time = ktime_get();
Vijayavardhan Vennapusab28d3e82017-02-16 15:22:14 +0530973 mutex_lock(&dev->read_mutex);
974 if (dev->state == STATE_OFFLINE) {
975 r = -EIO;
976 mutex_unlock(&dev->read_mutex);
977 break;
978 }
Benoit Goby27d01e52011-12-19 14:37:50 -0800979 ret = vfs_write(filp, write_req->buf, write_req->actual,
980 &offset);
981 DBG(cdev, "vfs_write %d\n", ret);
982 if (ret != write_req->actual) {
983 r = -EIO;
Vijayavardhan Vennapusab28d3e82017-02-16 15:22:14 +0530984 mutex_unlock(&dev->read_mutex);
Hemant Kumar9bcbf7b2016-05-01 22:15:04 -0700985 if (dev->state != STATE_OFFLINE)
986 dev->state = STATE_ERROR;
Benoit Goby27d01e52011-12-19 14:37:50 -0800987 break;
988 }
Vijayavardhan Vennapusab28d3e82017-02-16 15:22:14 +0530989 mutex_unlock(&dev->read_mutex);
Hemant Kumarfc2b8f02016-05-02 11:18:48 -0700990 dev->perf[dev->dbg_write_index].vfs_wtime =
991 ktime_to_us(ktime_sub(ktime_get(), start_time));
992 dev->perf[dev->dbg_write_index].vfs_wbytes = ret;
993 dev->dbg_write_index =
994 (dev->dbg_write_index + 1) % MAX_ITERATION;
Benoit Goby27d01e52011-12-19 14:37:50 -0800995 write_req = NULL;
996 }
997
998 if (read_req) {
999 /* wait for our last read to complete */
1000 ret = wait_event_interruptible(dev->read_wq,
1001 dev->rx_done || dev->state != STATE_BUSY);
Hemant Kumar1597fd62016-05-01 22:22:35 -07001002 if (dev->state == STATE_CANCELED
1003 || dev->state == STATE_OFFLINE) {
Hemant Kumare3fe6742016-05-02 10:50:35 -07001004 if (dev->state == STATE_OFFLINE)
1005 r = -EIO;
1006 else
1007 r = -ECANCELED;
Benoit Goby27d01e52011-12-19 14:37:50 -08001008 if (!dev->rx_done)
1009 usb_ep_dequeue(dev->ep_out, read_req);
1010 break;
1011 }
Manu Gautam0caf9082012-10-03 18:49:33 +05301012
Jiebing Lidf5d3202015-03-10 11:27:10 +08001013 if (read_req->status) {
1014 r = read_req->status;
1015 break;
1016 }
Vijayavardhan Vennapusab28d3e82017-02-16 15:22:14 +05301017
1018 mutex_lock(&dev->read_mutex);
1019 if (dev->state == STATE_OFFLINE) {
1020 r = -EIO;
1021 mutex_unlock(&dev->read_mutex);
1022 break;
1023 }
Manu Gautam0caf9082012-10-03 18:49:33 +05301024 /* Check if we aligned the size due to MTU constraint */
1025 if (count < read_req->length)
1026 read_req->actual = (read_req->actual > count ?
1027 count : read_req->actual);
Benoit Goby27d01e52011-12-19 14:37:50 -08001028 /* if xfer_file_length is 0xFFFFFFFF, then we read until
1029 * we get a zero length packet
1030 */
1031 if (count != 0xFFFFFFFF)
1032 count -= read_req->actual;
1033 if (read_req->actual < read_req->length) {
1034 /*
1035 * short packet is used to signal EOF for
1036 * sizes > 4 gig
1037 */
1038 DBG(cdev, "got short packet\n");
1039 count = 0;
1040 }
1041
1042 write_req = read_req;
1043 read_req = NULL;
Vijayavardhan Vennapusab28d3e82017-02-16 15:22:14 +05301044 mutex_unlock(&dev->read_mutex);
Benoit Goby27d01e52011-12-19 14:37:50 -08001045 }
1046 }
1047
1048 DBG(cdev, "receive_file_work returning %d\n", r);
1049 /* write the result */
1050 dev->xfer_result = r;
1051 smp_wmb();
1052}
1053
1054static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event)
1055{
1056 struct usb_request *req = NULL;
1057 int ret;
1058 int length = event->length;
1059
Greg Hackmann3a725f42014-02-24 10:19:13 -08001060 DBG(dev->cdev, "mtp_send_event(%zu)\n", event->length);
Benoit Goby27d01e52011-12-19 14:37:50 -08001061
1062 if (length < 0 || length > INTR_BUFFER_SIZE)
1063 return -EINVAL;
1064 if (dev->state == STATE_OFFLINE)
1065 return -ENODEV;
1066
1067 ret = wait_event_interruptible_timeout(dev->intr_wq,
1068 (req = mtp_req_get(dev, &dev->intr_idle)),
1069 msecs_to_jiffies(1000));
1070 if (!req)
1071 return -ETIME;
1072
1073 if (copy_from_user(req->buf, (void __user *)event->data, length)) {
1074 mtp_req_put(dev, &dev->intr_idle, req);
1075 return -EFAULT;
1076 }
1077 req->length = length;
1078 ret = usb_ep_queue(dev->ep_intr, req, GFP_KERNEL);
1079 if (ret)
1080 mtp_req_put(dev, &dev->intr_idle, req);
1081
1082 return ret;
1083}
1084
Hemant Kumar83d6a262016-05-02 10:57:12 -07001085static long mtp_send_receive_ioctl(struct file *fp, unsigned int code,
1086 struct mtp_file_range *mfr)
Benoit Goby27d01e52011-12-19 14:37:50 -08001087{
1088 struct mtp_dev *dev = fp->private_data;
1089 struct file *filp = NULL;
Hemant Kumar83d6a262016-05-02 10:57:12 -07001090 struct work_struct *work;
Benoit Goby27d01e52011-12-19 14:37:50 -08001091 int ret = -EINVAL;
1092
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +05301093 if (mtp_lock(&dev->ioctl_excl)) {
1094 DBG(dev->cdev, "ioctl returning EBUSY state:%d\n", dev->state);
Benoit Goby27d01e52011-12-19 14:37:50 -08001095 return -EBUSY;
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +05301096 }
Benoit Goby27d01e52011-12-19 14:37:50 -08001097
Hemant Kumar83d6a262016-05-02 10:57:12 -07001098 spin_lock_irq(&dev->lock);
1099 if (dev->state == STATE_CANCELED) {
1100 /* report cancellation to userspace */
1101 dev->state = STATE_READY;
Benoit Goby27d01e52011-12-19 14:37:50 -08001102 spin_unlock_irq(&dev->lock);
Hemant Kumar83d6a262016-05-02 10:57:12 -07001103 ret = -ECANCELED;
Benoit Goby27d01e52011-12-19 14:37:50 -08001104 goto out;
1105 }
Hemant Kumar83d6a262016-05-02 10:57:12 -07001106 if (dev->state == STATE_OFFLINE) {
1107 spin_unlock_irq(&dev->lock);
1108 ret = -ENODEV;
1109 goto out;
Benoit Goby27d01e52011-12-19 14:37:50 -08001110 }
Hemant Kumar83d6a262016-05-02 10:57:12 -07001111 dev->state = STATE_BUSY;
1112 spin_unlock_irq(&dev->lock);
1113
1114 /* hold a reference to the file while we are working with it */
1115 filp = fget(mfr->fd);
1116 if (!filp) {
1117 ret = -EBADF;
1118 goto fail;
1119 }
1120
1121 /* write the parameters */
1122 dev->xfer_file = filp;
1123 dev->xfer_file_offset = mfr->offset;
1124 dev->xfer_file_length = mfr->length;
1125 /* make sure write is done before parameters are read */
1126 smp_wmb();
1127
1128 if (code == MTP_SEND_FILE_WITH_HEADER) {
1129 work = &dev->send_file_work;
1130 dev->xfer_send_header = 1;
1131 dev->xfer_command = mfr->command;
1132 dev->xfer_transaction_id = mfr->transaction_id;
1133 } else if (code == MTP_SEND_FILE) {
1134 work = &dev->send_file_work;
1135 dev->xfer_send_header = 0;
1136 } else {
1137 work = &dev->receive_file_work;
1138 }
1139
1140 /* We do the file transfer on a work queue so it will run
1141 * in kernel context, which is necessary for vfs_read and
1142 * vfs_write to use our buffers in the kernel address space.
1143 */
1144 queue_work(dev->wq, work);
1145 /* wait for operation to complete */
1146 flush_workqueue(dev->wq);
1147 fput(filp);
1148
1149 /* read the result */
1150 smp_rmb();
1151 ret = dev->xfer_result;
Benoit Goby27d01e52011-12-19 14:37:50 -08001152
1153fail:
1154 spin_lock_irq(&dev->lock);
1155 if (dev->state == STATE_CANCELED)
1156 ret = -ECANCELED;
1157 else if (dev->state != STATE_OFFLINE)
1158 dev->state = STATE_READY;
1159 spin_unlock_irq(&dev->lock);
1160out:
1161 mtp_unlock(&dev->ioctl_excl);
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +05301162 DBG(dev->cdev, "ioctl returning %d state:%d\n", ret, dev->state);
Benoit Goby27d01e52011-12-19 14:37:50 -08001163 return ret;
1164}
1165
Hemant Kumar83d6a262016-05-02 10:57:12 -07001166static long mtp_ioctl(struct file *fp, unsigned int code, unsigned long value)
1167{
1168 struct mtp_dev *dev = fp->private_data;
1169 struct mtp_file_range mfr;
1170 struct mtp_event event;
1171 int ret = -EINVAL;
1172
1173 switch (code) {
1174 case MTP_SEND_FILE:
1175 case MTP_RECEIVE_FILE:
1176 case MTP_SEND_FILE_WITH_HEADER:
1177 if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) {
1178 ret = -EFAULT;
1179 goto fail;
1180 }
1181 ret = mtp_send_receive_ioctl(fp, code, &mfr);
1182 break;
1183 case MTP_SEND_EVENT:
1184 if (mtp_lock(&dev->ioctl_excl))
1185 return -EBUSY;
1186 /* return here so we don't change dev->state below,
1187 * which would interfere with bulk transfer state.
1188 */
1189 if (copy_from_user(&event, (void __user *)value, sizeof(event)))
1190 ret = -EFAULT;
1191 else
1192 ret = mtp_send_event(dev, &event);
1193 mtp_unlock(&dev->ioctl_excl);
1194 break;
1195 default:
1196 DBG(dev->cdev, "unknown ioctl code: %d\n", code);
1197 }
1198fail:
1199 return ret;
1200}
1201
1202/*
1203 * 32 bit userspace calling into 64 bit kernel. handle ioctl code
1204 * and userspace pointer
1205 */
1206#ifdef CONFIG_COMPAT
1207static long compat_mtp_ioctl(struct file *fp, unsigned int code,
1208 unsigned long value)
1209{
1210 struct mtp_dev *dev = fp->private_data;
1211 struct mtp_file_range mfr;
1212 struct __compat_mtp_file_range cmfr;
1213 struct mtp_event event;
1214 struct __compat_mtp_event cevent;
1215 unsigned int cmd;
1216 bool send_file = false;
1217 int ret = -EINVAL;
1218
1219 switch (code) {
1220 case COMPAT_MTP_SEND_FILE:
1221 cmd = MTP_SEND_FILE;
1222 send_file = true;
1223 break;
1224 case COMPAT_MTP_RECEIVE_FILE:
1225 cmd = MTP_RECEIVE_FILE;
1226 send_file = true;
1227 break;
1228 case COMPAT_MTP_SEND_FILE_WITH_HEADER:
1229 cmd = MTP_SEND_FILE_WITH_HEADER;
1230 send_file = true;
1231 break;
1232 case COMPAT_MTP_SEND_EVENT:
1233 cmd = MTP_SEND_EVENT;
1234 break;
1235 default:
1236 DBG(dev->cdev, "unknown compat_ioctl code: %d\n", code);
1237 ret = -ENOIOCTLCMD;
1238 goto fail;
1239 }
1240
1241 if (send_file) {
1242 if (copy_from_user(&cmfr, (void __user *)value, sizeof(cmfr))) {
1243 ret = -EFAULT;
1244 goto fail;
1245 }
1246 mfr.fd = cmfr.fd;
1247 mfr.offset = cmfr.offset;
1248 mfr.length = cmfr.length;
1249 mfr.command = cmfr.command;
1250 mfr.transaction_id = cmfr.transaction_id;
1251 ret = mtp_send_receive_ioctl(fp, cmd, &mfr);
1252 } else {
1253 if (mtp_lock(&dev->ioctl_excl))
1254 return -EBUSY;
1255 /* return here so we don't change dev->state below,
1256 * which would interfere with bulk transfer state.
1257 */
1258 if (copy_from_user(&cevent, (void __user *)value,
1259 sizeof(cevent))) {
1260 ret = -EFAULT;
1261 goto fail;
1262 }
1263 event.length = cevent.length;
1264 event.data = compat_ptr(cevent.data);
1265 ret = mtp_send_event(dev, &event);
1266 mtp_unlock(&dev->ioctl_excl);
1267 }
1268fail:
1269 return ret;
1270}
1271#endif
1272
Benoit Goby27d01e52011-12-19 14:37:50 -08001273static int mtp_open(struct inode *ip, struct file *fp)
1274{
1275 printk(KERN_INFO "mtp_open\n");
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +05301276 if (mtp_lock(&_mtp_dev->open_excl)) {
1277 pr_err("%s mtp_release not called returning EBUSY\n", __func__);
Benoit Goby27d01e52011-12-19 14:37:50 -08001278 return -EBUSY;
ChandanaKishori Chiluveru9a7474a2015-11-13 11:25:52 +05301279 }
Benoit Goby27d01e52011-12-19 14:37:50 -08001280
1281 /* clear any error condition */
1282 if (_mtp_dev->state != STATE_OFFLINE)
1283 _mtp_dev->state = STATE_READY;
1284
1285 fp->private_data = _mtp_dev;
1286 return 0;
1287}
1288
1289static int mtp_release(struct inode *ip, struct file *fp)
1290{
1291 printk(KERN_INFO "mtp_release\n");
1292
1293 mtp_unlock(&_mtp_dev->open_excl);
1294 return 0;
1295}
1296
1297/* file operations for /dev/mtp_usb */
1298static const struct file_operations mtp_fops = {
1299 .owner = THIS_MODULE,
1300 .read = mtp_read,
1301 .write = mtp_write,
1302 .unlocked_ioctl = mtp_ioctl,
Hemant Kumar83d6a262016-05-02 10:57:12 -07001303#ifdef CONFIG_COMPAT
1304 .compat_ioctl = compat_mtp_ioctl,
1305#endif
Benoit Goby27d01e52011-12-19 14:37:50 -08001306 .open = mtp_open,
1307 .release = mtp_release,
1308};
1309
1310static struct miscdevice mtp_device = {
1311 .minor = MISC_DYNAMIC_MINOR,
1312 .name = mtp_shortname,
1313 .fops = &mtp_fops,
1314};
1315
1316static int mtp_ctrlrequest(struct usb_composite_dev *cdev,
1317 const struct usb_ctrlrequest *ctrl)
1318{
1319 struct mtp_dev *dev = _mtp_dev;
1320 int value = -EOPNOTSUPP;
1321 u16 w_index = le16_to_cpu(ctrl->wIndex);
1322 u16 w_value = le16_to_cpu(ctrl->wValue);
1323 u16 w_length = le16_to_cpu(ctrl->wLength);
1324 unsigned long flags;
1325
1326 VDBG(cdev, "mtp_ctrlrequest "
1327 "%02x.%02x v%04x i%04x l%u\n",
1328 ctrl->bRequestType, ctrl->bRequest,
1329 w_value, w_index, w_length);
1330
1331 /* Handle MTP OS string */
1332 if (ctrl->bRequestType ==
1333 (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE)
1334 && ctrl->bRequest == USB_REQ_GET_DESCRIPTOR
1335 && (w_value >> 8) == USB_DT_STRING
1336 && (w_value & 0xFF) == MTP_OS_STRING_ID) {
1337 value = (w_length < sizeof(mtp_os_string)
1338 ? w_length : sizeof(mtp_os_string));
1339 memcpy(cdev->req->buf, mtp_os_string, value);
1340 } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) {
1341 /* Handle MTP OS descriptor */
1342 DBG(cdev, "vendor request: %d index: %d value: %d length: %d\n",
1343 ctrl->bRequest, w_index, w_value, w_length);
1344
1345 if (ctrl->bRequest == 1
1346 && (ctrl->bRequestType & USB_DIR_IN)
1347 && (w_index == 4 || w_index == 5)) {
1348 value = (w_length < sizeof(mtp_ext_config_desc) ?
1349 w_length : sizeof(mtp_ext_config_desc));
1350 memcpy(cdev->req->buf, &mtp_ext_config_desc, value);
Jack Pham50439242017-02-07 11:48:25 -08001351
1352 /* update compatibleID if PTP */
1353 if (dev->function.fs_descriptors == fs_ptp_descs) {
1354 struct mtp_ext_config_desc *d = cdev->req->buf;
1355
1356 d->function.compatibleID[0] = 'P';
1357 }
Benoit Goby27d01e52011-12-19 14:37:50 -08001358 }
1359 } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
1360 DBG(cdev, "class request: %d index: %d value: %d length: %d\n",
1361 ctrl->bRequest, w_index, w_value, w_length);
1362
1363 if (ctrl->bRequest == MTP_REQ_CANCEL && w_index == 0
1364 && w_value == 0) {
1365 DBG(cdev, "MTP_REQ_CANCEL\n");
1366
1367 spin_lock_irqsave(&dev->lock, flags);
1368 if (dev->state == STATE_BUSY) {
1369 dev->state = STATE_CANCELED;
1370 wake_up(&dev->read_wq);
1371 wake_up(&dev->write_wq);
1372 }
1373 spin_unlock_irqrestore(&dev->lock, flags);
1374
1375 /* We need to queue a request to read the remaining
1376 * bytes, but we don't actually need to look at
1377 * the contents.
1378 */
1379 value = w_length;
1380 } else if (ctrl->bRequest == MTP_REQ_GET_DEVICE_STATUS
1381 && w_index == 0 && w_value == 0) {
1382 struct mtp_device_status *status = cdev->req->buf;
Anson Jacob051584e2016-11-11 01:10:04 -05001383
Benoit Goby27d01e52011-12-19 14:37:50 -08001384 status->wLength =
1385 __constant_cpu_to_le16(sizeof(*status));
1386
1387 DBG(cdev, "MTP_REQ_GET_DEVICE_STATUS\n");
1388 spin_lock_irqsave(&dev->lock, flags);
1389 /* device status is "busy" until we report
1390 * the cancelation to userspace
1391 */
1392 if (dev->state == STATE_CANCELED)
1393 status->wCode =
1394 __cpu_to_le16(MTP_RESPONSE_DEVICE_BUSY);
1395 else
1396 status->wCode =
1397 __cpu_to_le16(MTP_RESPONSE_OK);
1398 spin_unlock_irqrestore(&dev->lock, flags);
1399 value = sizeof(*status);
1400 }
1401 }
1402
1403 /* respond with data transfer or status phase? */
1404 if (value >= 0) {
1405 int rc;
Anson Jacob051584e2016-11-11 01:10:04 -05001406
Benoit Goby27d01e52011-12-19 14:37:50 -08001407 cdev->req->zero = value < w_length;
1408 cdev->req->length = value;
1409 rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
1410 if (rc < 0)
1411 ERROR(cdev, "%s: response queue error\n", __func__);
1412 }
1413 return value;
1414}
1415
1416static int
1417mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
1418{
1419 struct usb_composite_dev *cdev = c->cdev;
1420 struct mtp_dev *dev = func_to_mtp(f);
1421 int id;
1422 int ret;
Badhri Jagan Sridharan54856462015-10-06 20:32:01 -07001423 struct mtp_instance *fi_mtp;
Benoit Goby27d01e52011-12-19 14:37:50 -08001424
1425 dev->cdev = cdev;
Chandana Kishori Chiluveru7f5670a2017-10-28 23:05:45 +05301426 DBG(cdev, "mtp_function_bind dev: %pK\n", dev);
Benoit Goby27d01e52011-12-19 14:37:50 -08001427
Ajay Agarwal89071502018-04-11 19:05:24 +05301428 /* ChipIdea controller supports 16K request length for IN endpoint */
1429 if (cdev->gadget->is_chipidea && mtp_tx_req_len > 16384) {
1430 DBG(cdev, "Truncating Tx Req length to 16K for ChipIdea\n");
1431 mtp_tx_req_len = 16384;
1432 }
1433
Benoit Goby27d01e52011-12-19 14:37:50 -08001434 /* allocate interface ID(s) */
1435 id = usb_interface_id(c, f);
1436 if (id < 0)
1437 return id;
1438 mtp_interface_desc.bInterfaceNumber = id;
1439
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001440 if (mtp_string_defs[INTERFACE_STRING_INDEX].id == 0) {
1441 ret = usb_string_id(c->cdev);
1442 if (ret < 0)
1443 return ret;
1444 mtp_string_defs[INTERFACE_STRING_INDEX].id = ret;
1445 mtp_interface_desc.iInterface = ret;
1446 }
Badhri Jagan Sridharan54856462015-10-06 20:32:01 -07001447
1448 fi_mtp = container_of(f->fi, struct mtp_instance, func_inst);
1449
1450 if (cdev->use_os_string) {
1451 f->os_desc_table = kzalloc(sizeof(*f->os_desc_table),
1452 GFP_KERNEL);
1453 if (!f->os_desc_table)
1454 return -ENOMEM;
1455 f->os_desc_n = 1;
1456 f->os_desc_table[0].os_desc = &fi_mtp->mtp_os_desc;
1457 }
1458
Benoit Goby27d01e52011-12-19 14:37:50 -08001459 /* allocate endpoints */
1460 ret = mtp_create_bulk_endpoints(dev, &mtp_fullspeed_in_desc,
1461 &mtp_fullspeed_out_desc, &mtp_intr_desc);
1462 if (ret)
1463 return ret;
1464
1465 /* support high speed hardware */
1466 if (gadget_is_dualspeed(c->cdev->gadget)) {
1467 mtp_highspeed_in_desc.bEndpointAddress =
1468 mtp_fullspeed_in_desc.bEndpointAddress;
1469 mtp_highspeed_out_desc.bEndpointAddress =
1470 mtp_fullspeed_out_desc.bEndpointAddress;
1471 }
Mark Kuo1b61b272015-08-20 13:01:46 +08001472 /* support super speed hardware */
1473 if (gadget_is_superspeed(c->cdev->gadget)) {
1474 unsigned max_burst;
1475
1476 /* Calculate bMaxBurst, we know packet size is 1024 */
1477 max_burst = min_t(unsigned, MTP_BULK_BUFFER_SIZE / 1024, 15);
1478 mtp_ss_in_desc.bEndpointAddress =
1479 mtp_fullspeed_in_desc.bEndpointAddress;
1480 mtp_ss_in_comp_desc.bMaxBurst = max_burst;
1481 mtp_ss_out_desc.bEndpointAddress =
1482 mtp_fullspeed_out_desc.bEndpointAddress;
1483 mtp_ss_out_comp_desc.bMaxBurst = max_burst;
1484 }
Benoit Goby27d01e52011-12-19 14:37:50 -08001485
Pratham Pratapd88fbcd2017-12-06 16:47:10 +05301486 fi_mtp->func_inst.f = &dev->function;
Benoit Goby27d01e52011-12-19 14:37:50 -08001487 DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
Mark Kuo1b61b272015-08-20 13:01:46 +08001488 gadget_is_superspeed(c->cdev->gadget) ? "super" :
1489 (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full"),
1490 f->name, dev->ep_in->name, dev->ep_out->name);
Benoit Goby27d01e52011-12-19 14:37:50 -08001491 return 0;
1492}
1493
1494static void
1495mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)
1496{
1497 struct mtp_dev *dev = func_to_mtp(f);
Pratham Pratapd88fbcd2017-12-06 16:47:10 +05301498 struct mtp_instance *fi_mtp;
Benoit Goby27d01e52011-12-19 14:37:50 -08001499 struct usb_request *req;
1500 int i;
Pratham Pratapd88fbcd2017-12-06 16:47:10 +05301501 fi_mtp = container_of(f->fi, struct mtp_instance, func_inst);
Mayank Rana9ce075b2017-03-28 15:17:03 -07001502 mtp_string_defs[INTERFACE_STRING_INDEX].id = 0;
Vijayavardhan Vennapusab28d3e82017-02-16 15:22:14 +05301503 mutex_lock(&dev->read_mutex);
Benoit Goby27d01e52011-12-19 14:37:50 -08001504 while ((req = mtp_req_get(dev, &dev->tx_idle)))
1505 mtp_request_free(req, dev->ep_in);
1506 for (i = 0; i < RX_REQ_MAX; i++)
1507 mtp_request_free(dev->rx_req[i], dev->ep_out);
1508 while ((req = mtp_req_get(dev, &dev->intr_idle)))
1509 mtp_request_free(req, dev->ep_intr);
Vijayavardhan Vennapusab28d3e82017-02-16 15:22:14 +05301510 mutex_unlock(&dev->read_mutex);
Pratham Pratap10bc3e62018-05-08 16:17:52 +05301511 spin_lock_irq(&dev->lock);
Benoit Goby27d01e52011-12-19 14:37:50 -08001512 dev->state = STATE_OFFLINE;
Pratham Pratap10bc3e62018-05-08 16:17:52 +05301513 dev->cdev = NULL;
1514 spin_unlock_irq(&dev->lock);
Badhri Jagan Sridharan54856462015-10-06 20:32:01 -07001515 kfree(f->os_desc_table);
1516 f->os_desc_n = 0;
Pratham Pratapd88fbcd2017-12-06 16:47:10 +05301517 fi_mtp->func_inst.f = NULL;
Benoit Goby27d01e52011-12-19 14:37:50 -08001518}
1519
1520static int mtp_function_set_alt(struct usb_function *f,
1521 unsigned intf, unsigned alt)
1522{
1523 struct mtp_dev *dev = func_to_mtp(f);
1524 struct usb_composite_dev *cdev = f->config->cdev;
1525 int ret;
1526
1527 DBG(cdev, "mtp_function_set_alt intf: %d alt: %d\n", intf, alt);
1528
1529 ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
1530 if (ret)
1531 return ret;
1532
1533 ret = usb_ep_enable(dev->ep_in);
1534 if (ret)
1535 return ret;
1536
1537 ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out);
1538 if (ret)
1539 return ret;
1540
1541 ret = usb_ep_enable(dev->ep_out);
1542 if (ret) {
1543 usb_ep_disable(dev->ep_in);
1544 return ret;
1545 }
1546
1547 ret = config_ep_by_speed(cdev->gadget, f, dev->ep_intr);
1548 if (ret)
1549 return ret;
1550
1551 ret = usb_ep_enable(dev->ep_intr);
1552 if (ret) {
1553 usb_ep_disable(dev->ep_out);
1554 usb_ep_disable(dev->ep_in);
1555 return ret;
1556 }
1557 dev->state = STATE_READY;
1558
1559 /* readers may be blocked waiting for us to go online */
1560 wake_up(&dev->read_wq);
1561 return 0;
1562}
1563
1564static void mtp_function_disable(struct usb_function *f)
1565{
1566 struct mtp_dev *dev = func_to_mtp(f);
1567 struct usb_composite_dev *cdev = dev->cdev;
1568
1569 DBG(cdev, "mtp_function_disable\n");
Pratham Pratap10bc3e62018-05-08 16:17:52 +05301570 spin_lock_irq(&dev->lock);
Benoit Goby27d01e52011-12-19 14:37:50 -08001571 dev->state = STATE_OFFLINE;
Pratham Pratap10bc3e62018-05-08 16:17:52 +05301572 spin_unlock_irq(&dev->lock);
Benoit Goby27d01e52011-12-19 14:37:50 -08001573 usb_ep_disable(dev->ep_in);
1574 usb_ep_disable(dev->ep_out);
1575 usb_ep_disable(dev->ep_intr);
1576
1577 /* readers may be blocked waiting for us to go online */
1578 wake_up(&dev->read_wq);
1579
1580 VDBG(cdev, "%s disabled\n", dev->function.name);
1581}
1582
Hemant Kumarfc2b8f02016-05-02 11:18:48 -07001583static int debug_mtp_read_stats(struct seq_file *s, void *unused)
1584{
1585 struct mtp_dev *dev = _mtp_dev;
1586 int i;
1587 unsigned long flags;
1588 unsigned int min, max = 0, sum = 0, iteration = 0;
1589
1590 seq_puts(s, "\n=======================\n");
1591 seq_puts(s, "USB MTP OUT related VFS write stats:\n");
1592 seq_puts(s, "\n=======================\n");
1593 spin_lock_irqsave(&dev->lock, flags);
1594 min = dev->perf[0].vfs_wtime;
1595 for (i = 0; i < MAX_ITERATION; i++) {
1596 seq_printf(s, "vfs write: bytes:%ld\t\t time:%d\n",
1597 dev->perf[i].vfs_wbytes,
1598 dev->perf[i].vfs_wtime);
1599 if (dev->perf[i].vfs_wbytes == mtp_rx_req_len) {
1600 sum += dev->perf[i].vfs_wtime;
1601 if (min > dev->perf[i].vfs_wtime)
1602 min = dev->perf[i].vfs_wtime;
1603 if (max < dev->perf[i].vfs_wtime)
1604 max = dev->perf[i].vfs_wtime;
1605 iteration++;
1606 }
1607 }
1608
1609 seq_printf(s, "vfs_write(time in usec) min:%d\t max:%d\t avg:%d\n",
Ajay Agarwal579c3f32017-06-13 10:59:17 +05301610 min, max, (iteration ? (sum / iteration) : 0));
Hemant Kumarfc2b8f02016-05-02 11:18:48 -07001611 min = max = sum = iteration = 0;
1612 seq_puts(s, "\n=======================\n");
1613 seq_puts(s, "USB MTP IN related VFS read stats:\n");
1614 seq_puts(s, "\n=======================\n");
1615
1616 min = dev->perf[0].vfs_rtime;
1617 for (i = 0; i < MAX_ITERATION; i++) {
1618 seq_printf(s, "vfs read: bytes:%ld\t\t time:%d\n",
1619 dev->perf[i].vfs_rbytes,
1620 dev->perf[i].vfs_rtime);
1621 if (dev->perf[i].vfs_rbytes == mtp_tx_req_len) {
1622 sum += dev->perf[i].vfs_rtime;
1623 if (min > dev->perf[i].vfs_rtime)
1624 min = dev->perf[i].vfs_rtime;
1625 if (max < dev->perf[i].vfs_rtime)
1626 max = dev->perf[i].vfs_rtime;
1627 iteration++;
1628 }
1629 }
1630
1631 seq_printf(s, "vfs_read(time in usec) min:%d\t max:%d\t avg:%d\n",
Ajay Agarwal579c3f32017-06-13 10:59:17 +05301632 min, max, (iteration ? (sum / iteration) : 0));
Hemant Kumarfc2b8f02016-05-02 11:18:48 -07001633 spin_unlock_irqrestore(&dev->lock, flags);
1634 return 0;
1635}
1636
1637static ssize_t debug_mtp_reset_stats(struct file *file, const char __user *buf,
1638 size_t count, loff_t *ppos)
1639{
1640 int clear_stats;
1641 unsigned long flags;
1642 struct mtp_dev *dev = _mtp_dev;
1643
1644 if (buf == NULL) {
1645 pr_err("[%s] EINVAL\n", __func__);
1646 goto done;
1647 }
1648
1649 if (kstrtoint(buf, 0, &clear_stats) || clear_stats != 0) {
1650 pr_err("Wrong value. To clear stats, enter value as 0.\n");
1651 goto done;
1652 }
1653
1654 spin_lock_irqsave(&dev->lock, flags);
1655 memset(&dev->perf[0], 0, MAX_ITERATION * sizeof(dev->perf[0]));
1656 dev->dbg_read_index = 0;
1657 dev->dbg_write_index = 0;
1658 spin_unlock_irqrestore(&dev->lock, flags);
1659done:
1660 return count;
1661}
1662
1663static int debug_mtp_open(struct inode *inode, struct file *file)
1664{
1665 return single_open(file, debug_mtp_read_stats, inode->i_private);
1666}
1667
1668static const struct file_operations debug_mtp_ops = {
1669 .open = debug_mtp_open,
1670 .read = seq_read,
1671 .write = debug_mtp_reset_stats,
1672};
1673
1674struct dentry *dent_mtp;
1675static void mtp_debugfs_init(void)
1676{
1677 struct dentry *dent_mtp_status;
1678
1679 dent_mtp = debugfs_create_dir("usb_mtp", 0);
1680 if (!dent_mtp || IS_ERR(dent_mtp))
1681 return;
1682
1683 dent_mtp_status = debugfs_create_file("status", 0644, dent_mtp,
1684 0, &debug_mtp_ops);
1685 if (!dent_mtp_status || IS_ERR(dent_mtp_status)) {
1686 debugfs_remove(dent_mtp);
1687 dent_mtp = NULL;
1688 return;
1689 }
1690}
1691
1692static void mtp_debugfs_remove(void)
1693{
1694 debugfs_remove_recursive(dent_mtp);
1695}
1696
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001697static int __mtp_setup(struct mtp_instance *fi_mtp)
Benoit Goby27d01e52011-12-19 14:37:50 -08001698{
1699 struct mtp_dev *dev;
1700 int ret;
1701
1702 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001703
1704 if (fi_mtp != NULL)
1705 fi_mtp->dev = dev;
1706
Benoit Goby27d01e52011-12-19 14:37:50 -08001707 if (!dev)
1708 return -ENOMEM;
1709
1710 spin_lock_init(&dev->lock);
1711 init_waitqueue_head(&dev->read_wq);
1712 init_waitqueue_head(&dev->write_wq);
1713 init_waitqueue_head(&dev->intr_wq);
1714 atomic_set(&dev->open_excl, 0);
1715 atomic_set(&dev->ioctl_excl, 0);
1716 INIT_LIST_HEAD(&dev->tx_idle);
1717 INIT_LIST_HEAD(&dev->intr_idle);
1718
1719 dev->wq = create_singlethread_workqueue("f_mtp");
1720 if (!dev->wq) {
1721 ret = -ENOMEM;
1722 goto err1;
1723 }
1724 INIT_WORK(&dev->send_file_work, send_file_work);
1725 INIT_WORK(&dev->receive_file_work, receive_file_work);
1726
1727 _mtp_dev = dev;
1728
1729 ret = misc_register(&mtp_device);
1730 if (ret)
1731 goto err2;
1732
Hemant Kumarfc2b8f02016-05-02 11:18:48 -07001733 mtp_debugfs_init();
Benoit Goby27d01e52011-12-19 14:37:50 -08001734 return 0;
1735
1736err2:
1737 destroy_workqueue(dev->wq);
1738err1:
1739 _mtp_dev = NULL;
1740 kfree(dev);
1741 printk(KERN_ERR "mtp gadget driver failed to initialize\n");
1742 return ret;
1743}
1744
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001745static int mtp_setup_configfs(struct mtp_instance *fi_mtp)
1746{
1747 return __mtp_setup(fi_mtp);
1748}
1749
1750
Benoit Goby27d01e52011-12-19 14:37:50 -08001751static void mtp_cleanup(void)
1752{
1753 struct mtp_dev *dev = _mtp_dev;
1754
1755 if (!dev)
1756 return;
1757
Hemant Kumarfc2b8f02016-05-02 11:18:48 -07001758 mtp_debugfs_remove();
Benoit Goby27d01e52011-12-19 14:37:50 -08001759 misc_deregister(&mtp_device);
1760 destroy_workqueue(dev->wq);
1761 _mtp_dev = NULL;
1762 kfree(dev);
1763}
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001764
1765static struct mtp_instance *to_mtp_instance(struct config_item *item)
1766{
1767 return container_of(to_config_group(item), struct mtp_instance,
1768 func_inst.group);
1769}
1770
1771static void mtp_attr_release(struct config_item *item)
1772{
1773 struct mtp_instance *fi_mtp = to_mtp_instance(item);
Anson Jacob051584e2016-11-11 01:10:04 -05001774
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001775 usb_put_function_instance(&fi_mtp->func_inst);
1776}
1777
1778static struct configfs_item_operations mtp_item_ops = {
1779 .release = mtp_attr_release,
1780};
1781
1782static struct config_item_type mtp_func_type = {
1783 .ct_item_ops = &mtp_item_ops,
1784 .ct_owner = THIS_MODULE,
1785};
1786
1787
1788static struct mtp_instance *to_fi_mtp(struct usb_function_instance *fi)
1789{
1790 return container_of(fi, struct mtp_instance, func_inst);
1791}
1792
1793static int mtp_set_inst_name(struct usb_function_instance *fi, const char *name)
1794{
1795 struct mtp_instance *fi_mtp;
1796 char *ptr;
1797 int name_len;
1798
1799 name_len = strlen(name) + 1;
1800 if (name_len > MAX_INST_NAME_LEN)
1801 return -ENAMETOOLONG;
1802
1803 ptr = kstrndup(name, name_len, GFP_KERNEL);
1804 if (!ptr)
1805 return -ENOMEM;
1806
1807 fi_mtp = to_fi_mtp(fi);
1808 fi_mtp->name = ptr;
1809
1810 return 0;
1811}
1812
1813static void mtp_free_inst(struct usb_function_instance *fi)
1814{
1815 struct mtp_instance *fi_mtp;
1816
1817 fi_mtp = to_fi_mtp(fi);
1818 kfree(fi_mtp->name);
1819 mtp_cleanup();
1820 kfree(fi_mtp);
1821}
1822
1823struct usb_function_instance *alloc_inst_mtp_ptp(bool mtp_config)
1824{
1825 struct mtp_instance *fi_mtp;
1826 int ret = 0;
Badhri Jagan Sridharan54856462015-10-06 20:32:01 -07001827 struct usb_os_desc *descs[1];
1828 char *names[1];
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001829
1830 fi_mtp = kzalloc(sizeof(*fi_mtp), GFP_KERNEL);
1831 if (!fi_mtp)
1832 return ERR_PTR(-ENOMEM);
1833 fi_mtp->func_inst.set_inst_name = mtp_set_inst_name;
1834 fi_mtp->func_inst.free_func_inst = mtp_free_inst;
1835
Badhri Jagan Sridharan54856462015-10-06 20:32:01 -07001836 fi_mtp->mtp_os_desc.ext_compat_id = fi_mtp->mtp_ext_compat_id;
1837 INIT_LIST_HEAD(&fi_mtp->mtp_os_desc.ext_prop);
1838 descs[0] = &fi_mtp->mtp_os_desc;
1839 names[0] = "MTP";
Badhri Jagan Sridharan54856462015-10-06 20:32:01 -07001840
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001841 if (mtp_config) {
1842 ret = mtp_setup_configfs(fi_mtp);
1843 if (ret) {
1844 kfree(fi_mtp);
1845 pr_err("Error setting MTP\n");
1846 return ERR_PTR(ret);
1847 }
1848 } else
1849 fi_mtp->dev = _mtp_dev;
1850
1851 config_group_init_type_name(&fi_mtp->func_inst.group,
1852 "", &mtp_func_type);
Amit Pundir8157e622016-04-05 21:09:54 +05301853 usb_os_desc_prepare_interf_dir(&fi_mtp->func_inst.group, 1,
1854 descs, names, THIS_MODULE);
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001855
Liangliang Lu56703d112017-05-03 16:06:35 +08001856 mutex_init(&fi_mtp->dev->read_mutex);
1857
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001858 return &fi_mtp->func_inst;
1859}
1860EXPORT_SYMBOL_GPL(alloc_inst_mtp_ptp);
1861
1862static struct usb_function_instance *mtp_alloc_inst(void)
1863{
1864 return alloc_inst_mtp_ptp(true);
1865}
1866
1867static int mtp_ctrlreq_configfs(struct usb_function *f,
1868 const struct usb_ctrlrequest *ctrl)
1869{
1870 return mtp_ctrlrequest(f->config->cdev, ctrl);
1871}
1872
1873static void mtp_free(struct usb_function *f)
1874{
1875 /*NO-OP: no function specific resource allocation in mtp_alloc*/
1876}
1877
1878struct usb_function *function_alloc_mtp_ptp(struct usb_function_instance *fi,
1879 bool mtp_config)
1880{
1881 struct mtp_instance *fi_mtp = to_fi_mtp(fi);
Amit Pundir340114e2015-08-01 03:26:51 +05301882 struct mtp_dev *dev;
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001883
Amit Pundir340114e2015-08-01 03:26:51 +05301884 /*
1885 * PTP piggybacks on MTP function so make sure we have
1886 * created MTP function before we associate this PTP
1887 * function with a gadget configuration.
1888 */
1889 if (fi_mtp->dev == NULL) {
1890 pr_err("Error: Create MTP function before linking"
1891 " PTP function with a gadget configuration\n");
1892 pr_err("\t1: Delete existing PTP function if any\n");
1893 pr_err("\t2: Create MTP function\n");
1894 pr_err("\t3: Create and symlink PTP function"
1895 " with a gadget configuration\n");
Amit Pundirf8e6ad22015-08-11 12:34:45 +05301896 return ERR_PTR(-EINVAL); /* Invalid Configuration */
Amit Pundir340114e2015-08-01 03:26:51 +05301897 }
1898
1899 dev = fi_mtp->dev;
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001900 dev->function.name = DRIVER_NAME;
1901 dev->function.strings = mtp_strings;
1902 if (mtp_config) {
1903 dev->function.fs_descriptors = fs_mtp_descs;
1904 dev->function.hs_descriptors = hs_mtp_descs;
Mark Kuo1b61b272015-08-20 13:01:46 +08001905 dev->function.ss_descriptors = ss_mtp_descs;
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001906 } else {
1907 dev->function.fs_descriptors = fs_ptp_descs;
1908 dev->function.hs_descriptors = hs_ptp_descs;
Mark Kuo1b61b272015-08-20 13:01:46 +08001909 dev->function.ss_descriptors = ss_ptp_descs;
Badhri Jagan Sridharanf545f542014-11-17 21:11:23 -08001910 }
1911 dev->function.bind = mtp_function_bind;
1912 dev->function.unbind = mtp_function_unbind;
1913 dev->function.set_alt = mtp_function_set_alt;
1914 dev->function.disable = mtp_function_disable;
1915 dev->function.setup = mtp_ctrlreq_configfs;
1916 dev->function.free_func = mtp_free;
1917
1918 return &dev->function;
1919}
1920EXPORT_SYMBOL_GPL(function_alloc_mtp_ptp);
1921
1922static struct usb_function *mtp_alloc(struct usb_function_instance *fi)
1923{
1924 return function_alloc_mtp_ptp(fi, true);
1925}
1926
1927DECLARE_USB_FUNCTION_INIT(mtp, mtp_alloc_inst, mtp_alloc);
1928MODULE_LICENSE("GPL");