blob: 7c6db0d76ef7ae186168bf586e3b03d12ed825f8 [file] [log] [blame]
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001/*
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002 * f_fs.c -- user mode file system API for USB composite function controllers
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003 *
4 * Copyright (C) 2010 Samsung Electronics
Michal Nazarewicz54b83602012-01-13 15:05:16 +01005 * Author: Michal Nazarewicz <mina86@mina86.com>
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02006 *
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01007 * Based on inode.c (GadgetFS) which was:
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02008 * Copyright (C) 2003-2004 David Brownell
9 * Copyright (C) 2003 Agilent Technologies
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +020015 */
16
17
18/* #define DEBUG */
19/* #define VERBOSE_DEBUG */
20
21#include <linux/blkdev.h>
Randy Dunlapb0608692010-05-10 10:51:36 -070022#include <linux/pagemap.h>
Paul Gortmakerf940fcd2011-05-27 09:56:31 -040023#include <linux/export.h>
Koen Beel560f1182012-05-30 20:43:37 +020024#include <linux/hid.h>
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +010025#include <linux/module.h>
Christoph Hellwige2e40f22015-02-22 08:58:50 -080026#include <linux/uio.h>
Hemant Kumarde406b72016-07-28 11:51:07 -070027#include <linux/ipc_logging.h>
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +020028#include <asm/unaligned.h>
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +020029
30#include <linux/usb/composite.h>
31#include <linux/usb/functionfs.h>
32
Robert Baldyga2e4c7552014-02-10 10:42:44 +010033#include <linux/aio.h>
34#include <linux/mmu_context.h>
Robert Baldyga23de91e2014-02-10 10:42:43 +010035#include <linux/poll.h>
Robert Baldyga5e33f6f2015-01-23 13:41:01 +010036#include <linux/eventfd.h>
Robert Baldyga23de91e2014-02-10 10:42:43 +010037
Andrzej Pietrasiewicze72c39c2013-12-03 15:15:31 +010038#include "u_fs.h"
Andrzej Pietrasiewicz74d48462014-05-08 14:06:21 +020039#include "u_f.h"
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +020040#include "u_os_desc.h"
Andrzej Pietrasiewiczb6584992013-12-03 15:15:36 +010041#include "configfs.h"
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +020042
43#define FUNCTIONFS_MAGIC 0xa647361 /* Chosen by a honest dice roll ;) */
44
Hemant Kumarde406b72016-07-28 11:51:07 -070045#define NUM_PAGES 10 /* # of pages for ipc logging */
46
47static void *ffs_ipc_log;
48#define ffs_log(fmt, ...) do { \
49 ipc_log_string(ffs_ipc_log, "%s: " fmt, __func__, \
50 ##__VA_ARGS__); \
Liangliang Lu55ba19d2017-12-07 14:42:55 +080051 pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
Hemant Kumarde406b72016-07-28 11:51:07 -070052} while (0)
53
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +020054/* Reference counter handling */
55static void ffs_data_get(struct ffs_data *ffs);
56static void ffs_data_put(struct ffs_data *ffs);
57/* Creates new ffs_data object. */
58static struct ffs_data *__must_check ffs_data_new(void) __attribute__((malloc));
59
60/* Opened counter handling. */
61static void ffs_data_opened(struct ffs_data *ffs);
62static void ffs_data_closed(struct ffs_data *ffs);
63
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +010064/* Called with ffs->mutex held; take over ownership of data. */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +020065static int __must_check
66__ffs_data_got_descs(struct ffs_data *ffs, char *data, size_t len);
67static int __must_check
68__ffs_data_got_strings(struct ffs_data *ffs, char *data, size_t len);
69
Liangliang Luececc4e2017-12-27 13:54:33 +080070static LIST_HEAD(inst_list);
71
Liangliang Lu55ba19d2017-12-07 14:42:55 +080072/* ffs instance status */
Liangliang Luececc4e2017-12-27 13:54:33 +080073#define INST_NAME_SIZE 16
74
75struct ffs_inst_status {
76 char inst_name[INST_NAME_SIZE];
77 struct list_head list;
78 struct mutex ffs_lock;
79 bool inst_exist;
80 struct f_fs_opts *opts;
81 struct ffs_data *ffs_data;
82};
Liangliang Lu55ba19d2017-12-07 14:42:55 +080083
84/* Free instance structures */
Liangliang Luececc4e2017-12-27 13:54:33 +080085static void ffs_inst_clean(struct f_fs_opts *opts,
86 const char *inst_name);
87static void ffs_inst_clean_delay(const char *inst_name);
88static int ffs_inst_exist_check(const char *inst_name);
89static struct ffs_inst_status *name_to_inst_status(
90 const char *inst_name, bool create_inst);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +020091
92/* The function structure ***************************************************/
93
94struct ffs_ep;
95
96struct ffs_function {
97 struct usb_configuration *conf;
98 struct usb_gadget *gadget;
99 struct ffs_data *ffs;
100
101 struct ffs_ep *eps;
102 u8 eps_revmap[16];
103 short *interfaces_nums;
104
105 struct usb_function function;
106};
107
108
109static struct ffs_function *ffs_func_from_usb(struct usb_function *f)
110{
111 return container_of(f, struct ffs_function, function);
112}
113
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200114
Michal Nazarewicza7ecf052014-02-10 10:42:41 +0100115static inline enum ffs_setup_state
116ffs_setup_state_clear_cancelled(struct ffs_data *ffs)
117{
118 return (enum ffs_setup_state)
119 cmpxchg(&ffs->setup_state, FFS_SETUP_CANCELLED, FFS_NO_SETUP);
120}
121
122
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200123static void ffs_func_eps_disable(struct ffs_function *func);
124static int __must_check ffs_func_eps_enable(struct ffs_function *func);
125
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200126static int ffs_func_bind(struct usb_configuration *,
127 struct usb_function *);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200128static int ffs_func_set_alt(struct usb_function *, unsigned, unsigned);
129static void ffs_func_disable(struct usb_function *);
130static int ffs_func_setup(struct usb_function *,
131 const struct usb_ctrlrequest *);
Felix Hädicke54dfce62016-06-22 01:12:07 +0200132static bool ffs_func_req_match(struct usb_function *,
Felix Hädicke1a00b452016-06-22 01:12:08 +0200133 const struct usb_ctrlrequest *,
134 bool config0);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200135static void ffs_func_suspend(struct usb_function *);
136static void ffs_func_resume(struct usb_function *);
137
138
139static int ffs_func_revmap_ep(struct ffs_function *func, u8 num);
140static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf);
141
142
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200143/* The endpoints structures *************************************************/
144
145struct ffs_ep {
146 struct usb_ep *ep; /* P: ffs->eps_lock */
147 struct usb_request *req; /* P: epfile->mutex */
148
Manu Gautam8d4e8972014-02-28 16:50:22 +0530149 /* [0]: full speed, [1]: high speed, [2]: super speed */
150 struct usb_endpoint_descriptor *descs[3];
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200151
152 u8 num;
153
154 int status; /* P: epfile->mutex */
Tarun Guptabccaa562016-11-11 10:32:17 -0800155 bool is_busy;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200156};
157
158struct ffs_epfile {
159 /* Protects ep->ep and ep->req. */
160 struct mutex mutex;
161 wait_queue_head_t wait;
Sujeet Kumarba857d42016-11-11 09:34:52 -0800162 atomic_t error;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200163
164 struct ffs_data *ffs;
165 struct ffs_ep *ep; /* P: ffs->eps_lock */
166
167 struct dentry *dentry;
168
Michal Nazarewicz9353afb2016-05-21 20:47:35 +0200169 /*
170 * Buffer for holding data from partial reads which may happen since
171 * we’re rounding user read requests to a multiple of a max packet size.
Michal Nazarewicza9e6f832016-10-04 02:07:34 +0200172 *
173 * The pointer is initialised with NULL value and may be set by
174 * __ffs_epfile_read_data function to point to a temporary buffer.
175 *
176 * In normal operation, calls to __ffs_epfile_read_buffered will consume
177 * data from said buffer and eventually free it. Importantly, while the
178 * function is using the buffer, it sets the pointer to NULL. This is
179 * all right since __ffs_epfile_read_data and __ffs_epfile_read_buffered
180 * can never run concurrently (they are synchronised by epfile->mutex)
181 * so the latter will not assign a new value to the pointer.
182 *
183 * Meanwhile ffs_func_eps_disable frees the buffer (if the pointer is
184 * valid) and sets the pointer to READ_BUFFER_DROP value. This special
185 * value is crux of the synchronisation between ffs_func_eps_disable and
186 * __ffs_epfile_read_data.
187 *
188 * Once __ffs_epfile_read_data is about to finish it will try to set the
189 * pointer back to its old value (as described above), but seeing as the
190 * pointer is not-NULL (namely READ_BUFFER_DROP) it will instead free
191 * the buffer.
192 *
193 * == State transitions ==
194 *
195 * • ptr == NULL: (initial state)
196 * â—¦ __ffs_epfile_read_buffer_free: go to ptr == DROP
197 * â—¦ __ffs_epfile_read_buffered: nop
198 * â—¦ __ffs_epfile_read_data allocates temp buffer: go to ptr == buf
199 * ◦ reading finishes: n/a, not in ‘and reading’ state
200 * • ptr == DROP:
201 * â—¦ __ffs_epfile_read_buffer_free: nop
202 * â—¦ __ffs_epfile_read_buffered: go to ptr == NULL
203 * â—¦ __ffs_epfile_read_data allocates temp buffer: free buf, nop
204 * ◦ reading finishes: n/a, not in ‘and reading’ state
205 * • ptr == buf:
206 * â—¦ __ffs_epfile_read_buffer_free: free buf, go to ptr == DROP
207 * â—¦ __ffs_epfile_read_buffered: go to ptr == NULL and reading
208 * â—¦ __ffs_epfile_read_data: n/a, __ffs_epfile_read_buffered
209 * is always called first
210 * ◦ reading finishes: n/a, not in ‘and reading’ state
211 * • ptr == NULL and reading:
212 * â—¦ __ffs_epfile_read_buffer_free: go to ptr == DROP and reading
213 * â—¦ __ffs_epfile_read_buffered: n/a, mutex is held
214 * â—¦ __ffs_epfile_read_data: n/a, mutex is held
215 * ◦ reading finishes and …
216 * … all data read: free buf, go to ptr == NULL
217 * … otherwise: go to ptr == buf and reading
218 * • ptr == DROP and reading:
219 * â—¦ __ffs_epfile_read_buffer_free: nop
220 * â—¦ __ffs_epfile_read_buffered: n/a, mutex is held
221 * â—¦ __ffs_epfile_read_data: n/a, mutex is held
222 * â—¦ reading finishes: free buf, go to ptr == DROP
Michal Nazarewicz9353afb2016-05-21 20:47:35 +0200223 */
Michal Nazarewicza9e6f832016-10-04 02:07:34 +0200224 struct ffs_buffer *read_buffer;
225#define READ_BUFFER_DROP ((struct ffs_buffer *)ERR_PTR(-ESHUTDOWN))
Michal Nazarewicz9353afb2016-05-21 20:47:35 +0200226
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200227 char name[5];
228
229 unsigned char in; /* P: ffs->eps_lock */
230 unsigned char isoc; /* P: ffs->eps_lock */
231
232 unsigned char _pad;
Mayank Ranaea6ae442016-11-11 10:40:05 -0800233 atomic_t opened;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200234};
235
Michal Nazarewicz9353afb2016-05-21 20:47:35 +0200236struct ffs_buffer {
237 size_t length;
238 char *data;
239 char storage[];
240};
241
Robert Baldyga2e4c7552014-02-10 10:42:44 +0100242/* ffs_io_data structure ***************************************************/
243
244struct ffs_io_data {
245 bool aio;
246 bool read;
247
248 struct kiocb *kiocb;
Al Viroc993c392015-01-31 23:23:35 -0500249 struct iov_iter data;
250 const void *to_free;
251 char *buf;
Robert Baldyga2e4c7552014-02-10 10:42:44 +0100252
253 struct mm_struct *mm;
254 struct work_struct work;
255
256 struct usb_ep *ep;
257 struct usb_request *req;
Robert Baldyga5e33f6f2015-01-23 13:41:01 +0100258
259 struct ffs_data *ffs;
Robert Baldyga2e4c7552014-02-10 10:42:44 +0100260};
261
Robert Baldyga6d5c1c72014-08-25 11:16:27 +0200262struct ffs_desc_helper {
263 struct ffs_data *ffs;
264 unsigned interfaces_count;
265 unsigned eps_count;
266};
267
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200268static int __must_check ffs_epfiles_create(struct ffs_data *ffs);
269static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count);
270
Al Viro1bb27ca2014-09-03 13:32:19 -0400271static struct dentry *
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200272ffs_sb_create_file(struct super_block *sb, const char *name, void *data,
Al Viro1bb27ca2014-09-03 13:32:19 -0400273 const struct file_operations *fops);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200274
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +0100275/* Devices management *******************************************************/
276
277DEFINE_MUTEX(ffs_lock);
Felipe Balbi0700faa2014-04-01 13:19:32 -0500278EXPORT_SYMBOL_GPL(ffs_lock);
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +0100279
Andrzej Pietrasiewiczda13a772014-01-13 16:49:38 +0100280static struct ffs_dev *_ffs_find_dev(const char *name);
281static struct ffs_dev *_ffs_alloc_dev(void);
Andrzej Pietrasiewiczb6584992013-12-03 15:15:36 +0100282static int _ffs_name_dev(struct ffs_dev *dev, const char *name);
Andrzej Pietrasiewiczda13a772014-01-13 16:49:38 +0100283static void _ffs_free_dev(struct ffs_dev *dev);
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +0100284static void *ffs_acquire_dev(const char *dev_name);
285static void ffs_release_dev(struct ffs_data *ffs_data);
286static int ffs_ready(struct ffs_data *ffs);
287static void ffs_closed(struct ffs_data *ffs);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200288
289/* Misc helper functions ****************************************************/
290
291static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
292 __attribute__((warn_unused_result, nonnull));
ChandanaKishori Chiluverued923f32015-08-10 10:17:52 +0530293static char *ffs_prepare_buffer(const char __user *buf, size_t len,
294 size_t extra_buf_alloc)
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200295 __attribute__((warn_unused_result, nonnull));
296
297
298/* Control file aka ep0 *****************************************************/
299
300static void ffs_ep0_complete(struct usb_ep *ep, struct usb_request *req)
301{
302 struct ffs_data *ffs = req->context;
303
304 complete_all(&ffs->ep0req_completion);
305}
306
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200307static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
308{
309 struct usb_request *req = ffs->ep0req;
310 int ret;
311
312 req->zero = len < le16_to_cpu(ffs->ev.setup.wLength);
313
314 spin_unlock_irq(&ffs->ev.waitq.lock);
315
Hemant Kumarde406b72016-07-28 11:51:07 -0700316 ffs_log("enter: state %d setup_state %d flags %lu", ffs->state,
317 ffs->setup_state, ffs->flags);
318
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200319 req->buf = data;
320 req->length = len;
321
Marek Szyprowskice1fd352011-01-28 13:55:36 +0100322 /*
323 * UDC layer requires to provide a buffer even for ZLP, but should
324 * not use it at all. Let's provide some poisoned pointer to catch
325 * possible bug in the driver.
326 */
327 if (req->buf == NULL)
328 req->buf = (void *)0xDEADBABE;
329
Wolfram Sang16735d02013-11-14 14:32:02 -0800330 reinit_completion(&ffs->ep0req_completion);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200331
332 ret = usb_ep_queue(ffs->gadget->ep0, req, GFP_ATOMIC);
333 if (unlikely(ret < 0))
334 return ret;
335
336 ret = wait_for_completion_interruptible(&ffs->ep0req_completion);
337 if (unlikely(ret)) {
338 usb_ep_dequeue(ffs->gadget->ep0, req);
339 return -EINTR;
340 }
341
342 ffs->setup_state = FFS_NO_SETUP;
Hemant Kumarde406b72016-07-28 11:51:07 -0700343
344 ffs_log("exit: state %d setup_state %d flags %lu", ffs->state,
345 ffs->setup_state, ffs->flags);
346
Robert Baldyga0a7b1f82014-02-10 10:42:42 +0100347 return req->status ? req->status : req->actual;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200348}
349
350static int __ffs_ep0_stall(struct ffs_data *ffs)
351{
Hemant Kumarde406b72016-07-28 11:51:07 -0700352 ffs_log("state %d setup_state %d flags %lu can_stall %d", ffs->state,
353 ffs->setup_state, ffs->flags, ffs->ev.can_stall);
354
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200355 if (ffs->ev.can_stall) {
Michal Nazarewiczaa02f172010-11-17 17:09:47 +0100356 pr_vdebug("ep0 stall\n");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200357 usb_ep_set_halt(ffs->gadget->ep0);
358 ffs->setup_state = FFS_NO_SETUP;
359 return -EL2HLT;
360 } else {
Michal Nazarewiczaa02f172010-11-17 17:09:47 +0100361 pr_debug("bogus ep0 stall!\n");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200362 return -ESRCH;
363 }
364}
365
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200366static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
367 size_t len, loff_t *ptr)
368{
369 struct ffs_data *ffs = file->private_data;
ChandanaKishori Chiluverued923f32015-08-10 10:17:52 +0530370 struct usb_gadget *gadget = ffs->gadget;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200371 ssize_t ret;
372 char *data;
373
374 ENTER();
375
Hemant Kumarde406b72016-07-28 11:51:07 -0700376 ffs_log("enter:len %zu state %d setup_state %d flags %lu", len,
377 ffs->state, ffs->setup_state, ffs->flags);
378
Liangliang Luececc4e2017-12-27 13:54:33 +0800379 ret = ffs_inst_exist_check(ffs->dev_name);
Liangliang Lu55ba19d2017-12-07 14:42:55 +0800380 if (ret < 0)
381 return ret;
382
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200383 /* Fast check if setup was canceled */
Michal Nazarewicza7ecf052014-02-10 10:42:41 +0100384 if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200385 return -EIDRM;
386
387 /* Acquire mutex */
388 ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
389 if (unlikely(ret < 0))
390 return ret;
391
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200392 /* Check state */
393 switch (ffs->state) {
394 case FFS_READ_DESCRIPTORS:
395 case FFS_READ_STRINGS:
396 /* Copy data */
397 if (unlikely(len < 16)) {
398 ret = -EINVAL;
399 break;
400 }
401
ChandanaKishori Chiluverued923f32015-08-10 10:17:52 +0530402 data = ffs_prepare_buffer(buf, len, 0);
Tobias Klauser537baab2010-12-09 15:52:39 +0100403 if (IS_ERR(data)) {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200404 ret = PTR_ERR(data);
405 break;
406 }
407
408 /* Handle data */
409 if (ffs->state == FFS_READ_DESCRIPTORS) {
Michal Nazarewiczaa02f172010-11-17 17:09:47 +0100410 pr_info("read descriptors\n");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200411 ret = __ffs_data_got_descs(ffs, data, len);
412 if (unlikely(ret < 0))
413 break;
414
415 ffs->state = FFS_READ_STRINGS;
416 ret = len;
417 } else {
Michal Nazarewiczaa02f172010-11-17 17:09:47 +0100418 pr_info("read strings\n");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200419 ret = __ffs_data_got_strings(ffs, data, len);
420 if (unlikely(ret < 0))
421 break;
422
423 ret = ffs_epfiles_create(ffs);
424 if (unlikely(ret)) {
425 ffs->state = FFS_CLOSING;
426 break;
427 }
428
429 ffs->state = FFS_ACTIVE;
430 mutex_unlock(&ffs->mutex);
431
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +0100432 ret = ffs_ready(ffs);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200433 if (unlikely(ret < 0)) {
434 ffs->state = FFS_CLOSING;
435 return ret;
436 }
437
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200438 return len;
439 }
440 break;
441
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200442 case FFS_ACTIVE:
443 data = NULL;
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100444 /*
445 * We're called from user space, we can use _irq
446 * rather then _irqsave
447 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200448 spin_lock_irq(&ffs->ev.waitq.lock);
Michal Nazarewicza7ecf052014-02-10 10:42:41 +0100449 switch (ffs_setup_state_clear_cancelled(ffs)) {
Michal Nazarewicze46318a2014-02-10 10:42:40 +0100450 case FFS_SETUP_CANCELLED:
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200451 ret = -EIDRM;
452 goto done_spin;
453
454 case FFS_NO_SETUP:
455 ret = -ESRCH;
456 goto done_spin;
457
458 case FFS_SETUP_PENDING:
459 break;
460 }
461
462 /* FFS_SETUP_PENDING */
463 if (!(ffs->ev.setup.bRequestType & USB_DIR_IN)) {
464 spin_unlock_irq(&ffs->ev.waitq.lock);
465 ret = __ffs_ep0_stall(ffs);
466 break;
467 }
468
469 /* FFS_SETUP_PENDING and not stall */
470 len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
471
472 spin_unlock_irq(&ffs->ev.waitq.lock);
473
ChandanaKishori Chiluverued923f32015-08-10 10:17:52 +0530474 data = ffs_prepare_buffer(buf, len, gadget->extra_buf_alloc);
Tobias Klauser537baab2010-12-09 15:52:39 +0100475 if (IS_ERR(data)) {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200476 ret = PTR_ERR(data);
477 break;
478 }
479
480 spin_lock_irq(&ffs->ev.waitq.lock);
481
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100482 /*
483 * We are guaranteed to be still in FFS_ACTIVE state
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200484 * but the state of setup could have changed from
Michal Nazarewicze46318a2014-02-10 10:42:40 +0100485 * FFS_SETUP_PENDING to FFS_SETUP_CANCELLED so we need
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200486 * to check for that. If that happened we copied data
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100487 * from user space in vain but it's unlikely.
488 *
489 * For sure we are not in FFS_NO_SETUP since this is
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200490 * the only place FFS_SETUP_PENDING -> FFS_NO_SETUP
491 * transition can be performed and it's protected by
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100492 * mutex.
493 */
Michal Nazarewicza7ecf052014-02-10 10:42:41 +0100494 if (ffs_setup_state_clear_cancelled(ffs) ==
495 FFS_SETUP_CANCELLED) {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200496 ret = -EIDRM;
497done_spin:
498 spin_unlock_irq(&ffs->ev.waitq.lock);
499 } else {
500 /* unlocks spinlock */
501 ret = __ffs_ep0_queue_wait(ffs, data, len);
502 }
503 kfree(data);
504 break;
505
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200506 default:
507 ret = -EBADFD;
508 break;
509 }
510
Hemant Kumarde406b72016-07-28 11:51:07 -0700511 ffs_log("exit:ret %zu state %d setup_state %d flags %lu", ret,
512 ffs->state, ffs->setup_state, ffs->flags);
513
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200514 mutex_unlock(&ffs->mutex);
515 return ret;
516}
517
Michal Nazarewicz67913bb2014-09-10 17:50:24 +0200518/* Called with ffs->ev.waitq.lock and ffs->mutex held, both released on exit. */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200519static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf,
520 size_t n)
521{
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100522 /*
Michal Nazarewicz67913bb2014-09-10 17:50:24 +0200523 * n cannot be bigger than ffs->ev.count, which cannot be bigger than
524 * size of ffs->ev.types array (which is four) so that's how much space
525 * we reserve.
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100526 */
Michal Nazarewicz67913bb2014-09-10 17:50:24 +0200527 struct usb_functionfs_event events[ARRAY_SIZE(ffs->ev.types)];
528 const size_t size = n * sizeof *events;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200529 unsigned i = 0;
530
Michal Nazarewicz67913bb2014-09-10 17:50:24 +0200531 memset(events, 0, size);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200532
533 do {
534 events[i].type = ffs->ev.types[i];
535 if (events[i].type == FUNCTIONFS_SETUP) {
536 events[i].u.setup = ffs->ev.setup;
537 ffs->setup_state = FFS_SETUP_PENDING;
538 }
539 } while (++i < n);
540
Michal Nazarewicz67913bb2014-09-10 17:50:24 +0200541 ffs->ev.count -= n;
542 if (ffs->ev.count)
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200543 memmove(ffs->ev.types, ffs->ev.types + n,
544 ffs->ev.count * sizeof *ffs->ev.types);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200545
546 spin_unlock_irq(&ffs->ev.waitq.lock);
Hemant Kumarde406b72016-07-28 11:51:07 -0700547
548 ffs_log("state %d setup_state %d flags %lu #evt %zu", ffs->state,
549 ffs->setup_state, ffs->flags, n);
550
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200551 mutex_unlock(&ffs->mutex);
552
Daniel Walter7fe9a932015-11-18 17:15:49 +0100553 return unlikely(copy_to_user(buf, events, size)) ? -EFAULT : size;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200554}
555
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200556static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
557 size_t len, loff_t *ptr)
558{
559 struct ffs_data *ffs = file->private_data;
560 char *data = NULL;
561 size_t n;
562 int ret;
563
564 ENTER();
565
Hemant Kumarde406b72016-07-28 11:51:07 -0700566 ffs_log("enter:len %zu state %d setup_state %d flags %lu", len,
567 ffs->state, ffs->setup_state, ffs->flags);
568
Liangliang Luececc4e2017-12-27 13:54:33 +0800569 ret = ffs_inst_exist_check(ffs->dev_name);
Liangliang Lu55ba19d2017-12-07 14:42:55 +0800570 if (ret < 0)
571 return ret;
572
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200573 /* Fast check if setup was canceled */
Michal Nazarewicza7ecf052014-02-10 10:42:41 +0100574 if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200575 return -EIDRM;
576
577 /* Acquire mutex */
578 ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
579 if (unlikely(ret < 0))
580 return ret;
581
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200582 /* Check state */
583 if (ffs->state != FFS_ACTIVE) {
584 ret = -EBADFD;
585 goto done_mutex;
586 }
587
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100588 /*
589 * We're called from user space, we can use _irq rather then
590 * _irqsave
591 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200592 spin_lock_irq(&ffs->ev.waitq.lock);
593
Michal Nazarewicza7ecf052014-02-10 10:42:41 +0100594 switch (ffs_setup_state_clear_cancelled(ffs)) {
Michal Nazarewicze46318a2014-02-10 10:42:40 +0100595 case FFS_SETUP_CANCELLED:
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200596 ret = -EIDRM;
597 break;
598
599 case FFS_NO_SETUP:
600 n = len / sizeof(struct usb_functionfs_event);
601 if (unlikely(!n)) {
602 ret = -EINVAL;
603 break;
604 }
605
606 if ((file->f_flags & O_NONBLOCK) && !ffs->ev.count) {
607 ret = -EAGAIN;
608 break;
609 }
610
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100611 if (wait_event_interruptible_exclusive_locked_irq(ffs->ev.waitq,
612 ffs->ev.count)) {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200613 ret = -EINTR;
614 break;
615 }
616
617 return __ffs_ep0_read_events(ffs, buf,
618 min(n, (size_t)ffs->ev.count));
619
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200620 case FFS_SETUP_PENDING:
621 if (ffs->ev.setup.bRequestType & USB_DIR_IN) {
622 spin_unlock_irq(&ffs->ev.waitq.lock);
623 ret = __ffs_ep0_stall(ffs);
624 goto done_mutex;
625 }
626
627 len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
628
629 spin_unlock_irq(&ffs->ev.waitq.lock);
630
631 if (likely(len)) {
632 data = kmalloc(len, GFP_KERNEL);
633 if (unlikely(!data)) {
634 ret = -ENOMEM;
635 goto done_mutex;
636 }
637 }
638
639 spin_lock_irq(&ffs->ev.waitq.lock);
640
641 /* See ffs_ep0_write() */
Michal Nazarewicza7ecf052014-02-10 10:42:41 +0100642 if (ffs_setup_state_clear_cancelled(ffs) ==
643 FFS_SETUP_CANCELLED) {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200644 ret = -EIDRM;
645 break;
646 }
647
648 /* unlocks spinlock */
649 ret = __ffs_ep0_queue_wait(ffs, data, len);
Daniel Walter7fe9a932015-11-18 17:15:49 +0100650 if (likely(ret > 0) && unlikely(copy_to_user(buf, data, len)))
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200651 ret = -EFAULT;
652 goto done_mutex;
653
654 default:
655 ret = -EBADFD;
656 break;
657 }
658
659 spin_unlock_irq(&ffs->ev.waitq.lock);
660done_mutex:
Hemant Kumarde406b72016-07-28 11:51:07 -0700661 ffs_log("exit:ret %d state %d setup_state %d flags %lu", ret,
662 ffs->state, ffs->setup_state, ffs->flags);
663
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200664 mutex_unlock(&ffs->mutex);
665 kfree(data);
Hemant Kumarde406b72016-07-28 11:51:07 -0700666
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200667 return ret;
668}
669
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200670static int ffs_ep0_open(struct inode *inode, struct file *file)
671{
672 struct ffs_data *ffs = inode->i_private;
Liangliang Lu55ba19d2017-12-07 14:42:55 +0800673 int ret;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200674
675 ENTER();
676
Hemant Kumarde406b72016-07-28 11:51:07 -0700677 ffs_log("state %d setup_state %d flags %lu opened %d", ffs->state,
678 ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
679
Liangliang Luececc4e2017-12-27 13:54:33 +0800680 ret = ffs_inst_exist_check(ffs->dev_name);
Liangliang Lu55ba19d2017-12-07 14:42:55 +0800681 if (ret < 0)
682 return ret;
683
Mayank Rana758d7522016-11-11 10:23:13 -0800684 /* to get updated opened atomic variable value */
685 smp_mb__before_atomic();
Saket Saurabh8ee75a12016-11-11 10:16:42 -0800686 if (atomic_read(&ffs->opened))
687 return -EBUSY;
688
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200689 if (unlikely(ffs->state == FFS_CLOSING))
690 return -EBUSY;
691
692 file->private_data = ffs;
693 ffs_data_opened(ffs);
694
695 return 0;
696}
697
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200698static int ffs_ep0_release(struct inode *inode, struct file *file)
699{
700 struct ffs_data *ffs = file->private_data;
701
702 ENTER();
703
Hemant Kumarde406b72016-07-28 11:51:07 -0700704 ffs_log("state %d setup_state %d flags %lu opened %d", ffs->state,
705 ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
706
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200707 ffs_data_closed(ffs);
708
709 return 0;
710}
711
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200712static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
713{
714 struct ffs_data *ffs = file->private_data;
715 struct usb_gadget *gadget = ffs->gadget;
716 long ret;
717
718 ENTER();
719
Hemant Kumarde406b72016-07-28 11:51:07 -0700720 ffs_log("state %d setup_state %d flags %lu opened %d", ffs->state,
721 ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
722
Liangliang Luececc4e2017-12-27 13:54:33 +0800723 ret = ffs_inst_exist_check(ffs->dev_name);
Liangliang Lu55ba19d2017-12-07 14:42:55 +0800724 if (ret < 0)
725 return ret;
726
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200727 if (code == FUNCTIONFS_INTERFACE_REVMAP) {
728 struct ffs_function *func = ffs->func;
729 ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV;
Andrzej Pietrasiewicz92b0abf2012-03-28 09:30:50 +0200730 } else if (gadget && gadget->ops->ioctl) {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200731 ret = gadget->ops->ioctl(gadget, code, value);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200732 } else {
733 ret = -ENOTTY;
734 }
735
736 return ret;
737}
738
Robert Baldyga23de91e2014-02-10 10:42:43 +0100739static unsigned int ffs_ep0_poll(struct file *file, poll_table *wait)
740{
741 struct ffs_data *ffs = file->private_data;
742 unsigned int mask = POLLWRNORM;
743 int ret;
744
Hemant Kumarde406b72016-07-28 11:51:07 -0700745 ffs_log("enter:state %d setup_state %d flags %lu opened %d", ffs->state,
746 ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
747
Liangliang Luececc4e2017-12-27 13:54:33 +0800748 ret = ffs_inst_exist_check(ffs->dev_name);
Liangliang Lu55ba19d2017-12-07 14:42:55 +0800749 if (ret < 0)
750 return ret;
751
Robert Baldyga23de91e2014-02-10 10:42:43 +0100752 poll_wait(file, &ffs->ev.waitq, wait);
753
754 ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
755 if (unlikely(ret < 0))
756 return mask;
757
758 switch (ffs->state) {
759 case FFS_READ_DESCRIPTORS:
760 case FFS_READ_STRINGS:
761 mask |= POLLOUT;
762 break;
763
764 case FFS_ACTIVE:
765 switch (ffs->setup_state) {
766 case FFS_NO_SETUP:
767 if (ffs->ev.count)
768 mask |= POLLIN;
769 break;
770
771 case FFS_SETUP_PENDING:
772 case FFS_SETUP_CANCELLED:
773 mask |= (POLLIN | POLLOUT);
774 break;
775 }
776 case FFS_CLOSING:
777 break;
Robert Baldyga18d6b32f2014-12-18 09:55:10 +0100778 case FFS_DEACTIVATED:
779 break;
Robert Baldyga23de91e2014-02-10 10:42:43 +0100780 }
781
Hemant Kumarde406b72016-07-28 11:51:07 -0700782 ffs_log("exit: mask %u", mask);
783
Robert Baldyga23de91e2014-02-10 10:42:43 +0100784 mutex_unlock(&ffs->mutex);
785
786 return mask;
787}
788
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200789static const struct file_operations ffs_ep0_operations = {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200790 .llseek = no_llseek,
791
792 .open = ffs_ep0_open,
793 .write = ffs_ep0_write,
794 .read = ffs_ep0_read,
795 .release = ffs_ep0_release,
796 .unlocked_ioctl = ffs_ep0_ioctl,
Robert Baldyga23de91e2014-02-10 10:42:43 +0100797 .poll = ffs_ep0_poll,
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200798};
799
800
801/* "Normal" endpoints operations ********************************************/
802
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200803static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req)
804{
Sujeet Kumarbc66ea42016-11-11 10:08:49 -0800805 struct ffs_ep *ep = _ep->driver_data;
806
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200807 ENTER();
Sujeet Kumarbc66ea42016-11-11 10:08:49 -0800808
809 /* req may be freed during unbind */
810 if (ep && ep->req && likely(req->context)) {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200811 struct ffs_ep *ep = _ep->driver_data;
812 ep->status = req->status ? req->status : req->actual;
Chandana Kishori Chiluveru7f5670a2017-10-28 23:05:45 +0530813 ffs_log("ep status %d for req %pK", ep->status, req);
Tarun Guptabccaa562016-11-11 10:32:17 -0800814 /* Set is_busy false to indicate completion of last request */
815 ep->is_busy = false;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200816 complete(req->context);
817 }
818}
819
Michal Nazarewiczc662a312016-05-21 20:47:34 +0200820static ssize_t ffs_copy_to_iter(void *data, int data_len, struct iov_iter *iter)
821{
822 ssize_t ret = copy_to_iter(data, data_len, iter);
823 if (likely(ret == data_len))
824 return ret;
825
826 if (unlikely(iov_iter_count(iter)))
827 return -EFAULT;
828
829 /*
830 * Dear user space developer!
831 *
832 * TL;DR: To stop getting below error message in your kernel log, change
833 * user space code using functionfs to align read buffers to a max
834 * packet size.
835 *
836 * Some UDCs (e.g. dwc3) require request sizes to be a multiple of a max
837 * packet size. When unaligned buffer is passed to functionfs, it
838 * internally uses a larger, aligned buffer so that such UDCs are happy.
839 *
840 * Unfortunately, this means that host may send more data than was
841 * requested in read(2) system call. f_fs doesn’t know what to do with
842 * that excess data so it simply drops it.
843 *
844 * Was the buffer aligned in the first place, no such problem would
845 * happen.
846 *
Michal Nazarewicz9353afb2016-05-21 20:47:35 +0200847 * Data may be dropped only in AIO reads. Synchronous reads are handled
848 * by splitting a request into multiple parts. This splitting may still
849 * be a problem though so it’s likely best to align the buffer
850 * regardless of it being AIO or not..
851 *
Michal Nazarewiczc662a312016-05-21 20:47:34 +0200852 * This only affects OUT endpoints, i.e. reading data with a read(2),
853 * aio_read(2) etc. system calls. Writing data to an IN endpoint is not
854 * affected.
855 */
856 pr_err("functionfs read size %d > requested size %zd, dropping excess data. "
857 "Align read buffer size to max packet size to avoid the problem.\n",
858 data_len, ret);
859
860 return ret;
861}
862
Robert Baldyga2e4c7552014-02-10 10:42:44 +0100863static void ffs_user_copy_worker(struct work_struct *work)
864{
865 struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
866 work);
867 int ret = io_data->req->status ? io_data->req->status :
868 io_data->req->actual;
Lars-Peter Clausen38740a52016-04-14 17:01:17 +0200869 bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
Robert Baldyga2e4c7552014-02-10 10:42:44 +0100870
Hemant Kumarde406b72016-07-28 11:51:07 -0700871 ffs_log("enter: ret %d", ret);
872
Robert Baldyga2e4c7552014-02-10 10:42:44 +0100873 if (io_data->read && ret > 0) {
Lars-Peter Clausend85f74f2018-01-12 11:05:02 +0100874 mm_segment_t oldfs = get_fs();
875
876 set_fs(USER_DS);
Robert Baldyga2e4c7552014-02-10 10:42:44 +0100877 use_mm(io_data->mm);
Michal Nazarewiczc662a312016-05-21 20:47:34 +0200878 ret = ffs_copy_to_iter(io_data->buf, ret, &io_data->data);
Robert Baldyga2e4c7552014-02-10 10:42:44 +0100879 unuse_mm(io_data->mm);
Lars-Peter Clausend85f74f2018-01-12 11:05:02 +0100880 set_fs(oldfs);
Robert Baldyga2e4c7552014-02-10 10:42:44 +0100881 }
882
Christoph Hellwig04b2fa92015-02-02 14:49:06 +0100883 io_data->kiocb->ki_complete(io_data->kiocb, ret, ret);
Robert Baldyga2e4c7552014-02-10 10:42:44 +0100884
Lars-Peter Clausen38740a52016-04-14 17:01:17 +0200885 if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
Robert Baldyga5e33f6f2015-01-23 13:41:01 +0100886 eventfd_signal(io_data->ffs->ffs_eventfd, 1);
887
Robert Baldyga2e4c7552014-02-10 10:42:44 +0100888 usb_ep_free_request(io_data->ep, io_data->req);
889
Robert Baldyga2e4c7552014-02-10 10:42:44 +0100890 if (io_data->read)
Al Viroc993c392015-01-31 23:23:35 -0500891 kfree(io_data->to_free);
Robert Baldyga2e4c7552014-02-10 10:42:44 +0100892 kfree(io_data->buf);
893 kfree(io_data);
Hemant Kumarde406b72016-07-28 11:51:07 -0700894
895 ffs_log("exit");
Robert Baldyga2e4c7552014-02-10 10:42:44 +0100896}
897
898static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
899 struct usb_request *req)
900{
901 struct ffs_io_data *io_data = req->context;
902
903 ENTER();
904
Hemant Kumarde406b72016-07-28 11:51:07 -0700905 ffs_log("enter");
906
Robert Baldyga2e4c7552014-02-10 10:42:44 +0100907 INIT_WORK(&io_data->work, ffs_user_copy_worker);
908 schedule_work(&io_data->work);
Hemant Kumarde406b72016-07-28 11:51:07 -0700909
910 ffs_log("exit");
Robert Baldyga2e4c7552014-02-10 10:42:44 +0100911}
912
Michal Nazarewicza9e6f832016-10-04 02:07:34 +0200913static void __ffs_epfile_read_buffer_free(struct ffs_epfile *epfile)
914{
915 /*
916 * See comment in struct ffs_epfile for full read_buffer pointer
917 * synchronisation story.
918 */
919 struct ffs_buffer *buf = xchg(&epfile->read_buffer, READ_BUFFER_DROP);
920 if (buf && buf != READ_BUFFER_DROP)
921 kfree(buf);
922}
923
Michal Nazarewicz9353afb2016-05-21 20:47:35 +0200924/* Assumes epfile->mutex is held. */
925static ssize_t __ffs_epfile_read_buffered(struct ffs_epfile *epfile,
926 struct iov_iter *iter)
927{
Michal Nazarewicza9e6f832016-10-04 02:07:34 +0200928 /*
929 * Null out epfile->read_buffer so ffs_func_eps_disable does not free
930 * the buffer while we are using it. See comment in struct ffs_epfile
931 * for full read_buffer pointer synchronisation story.
932 */
933 struct ffs_buffer *buf = xchg(&epfile->read_buffer, NULL);
Michal Nazarewicz9353afb2016-05-21 20:47:35 +0200934 ssize_t ret;
Michal Nazarewicza9e6f832016-10-04 02:07:34 +0200935 if (!buf || buf == READ_BUFFER_DROP)
Michal Nazarewicz9353afb2016-05-21 20:47:35 +0200936 return 0;
937
938 ret = copy_to_iter(buf->data, buf->length, iter);
939 if (buf->length == ret) {
940 kfree(buf);
Michal Nazarewicza9e6f832016-10-04 02:07:34 +0200941 return ret;
942 }
943
944 if (unlikely(iov_iter_count(iter))) {
Michal Nazarewicz9353afb2016-05-21 20:47:35 +0200945 ret = -EFAULT;
946 } else {
947 buf->length -= ret;
948 buf->data += ret;
949 }
Michal Nazarewicza9e6f832016-10-04 02:07:34 +0200950
951 if (cmpxchg(&epfile->read_buffer, NULL, buf))
952 kfree(buf);
953
Michal Nazarewicz9353afb2016-05-21 20:47:35 +0200954 return ret;
955}
956
957/* Assumes epfile->mutex is held. */
958static ssize_t __ffs_epfile_read_data(struct ffs_epfile *epfile,
959 void *data, int data_len,
960 struct iov_iter *iter)
961{
962 struct ffs_buffer *buf;
963
964 ssize_t ret = copy_to_iter(data, data_len, iter);
965 if (likely(data_len == ret))
966 return ret;
967
968 if (unlikely(iov_iter_count(iter)))
969 return -EFAULT;
970
971 /* See ffs_copy_to_iter for more context. */
972 pr_warn("functionfs read size %d > requested size %zd, splitting request into multiple reads.",
973 data_len, ret);
974
975 data_len -= ret;
976 buf = kmalloc(sizeof(*buf) + data_len, GFP_KERNEL);
Dan Carpenter44963d62016-06-24 15:23:16 +0300977 if (!buf)
978 return -ENOMEM;
Michal Nazarewicz9353afb2016-05-21 20:47:35 +0200979 buf->length = data_len;
980 buf->data = buf->storage;
981 memcpy(buf->storage, data + ret, data_len);
Michal Nazarewicza9e6f832016-10-04 02:07:34 +0200982
983 /*
984 * At this point read_buffer is NULL or READ_BUFFER_DROP (if
985 * ffs_func_eps_disable has been called in the meanwhile). See comment
986 * in struct ffs_epfile for full read_buffer pointer synchronisation
987 * story.
988 */
989 if (unlikely(cmpxchg(&epfile->read_buffer, NULL, buf)))
990 kfree(buf);
Michal Nazarewicz9353afb2016-05-21 20:47:35 +0200991
992 return ret;
993}
994
Robert Baldyga2e4c7552014-02-10 10:42:44 +0100995static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200996{
997 struct ffs_epfile *epfile = file->private_data;
Michal Nazarewiczae76e132016-01-04 21:05:59 +0100998 struct usb_request *req;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200999 struct ffs_ep *ep;
1000 char *data = NULL;
David Cohenc0d31b32014-10-13 11:15:54 -07001001 ssize_t ret, data_len = -EINVAL;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001002 int halt;
ChandanaKishori Chiluverued923f32015-08-10 10:17:52 +05301003 size_t extra_buf_alloc = 0;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001004
Vijayavardhan Vennapusade58029b2016-11-11 09:51:37 -08001005 ffs_log("enter: epfile name %s epfile err %d (%s)", epfile->name,
1006 atomic_read(&epfile->error), io_data->read ? "READ" : "WRITE");
1007
Liangliang Luececc4e2017-12-27 13:54:33 +08001008 ret = ffs_inst_exist_check(epfile->ffs->dev_name);
1009 if (ret < 0)
1010 return ret;
1011
Mayank Rana758d7522016-11-11 10:23:13 -08001012 /* to get updated error atomic variable value */
1013 smp_mb__before_atomic();
Vijayavardhan Vennapusade58029b2016-11-11 09:51:37 -08001014 if (atomic_read(&epfile->error))
1015 return -ENODEV;
Hemant Kumarde406b72016-07-28 11:51:07 -07001016
Michal Nazarewicz7fa68032013-12-09 15:55:36 -08001017 /* Are we still active? */
Michal Nazarewiczb3591f62016-01-04 20:58:12 +01001018 if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
1019 return -ENODEV;
Michal Nazarewicz7fa68032013-12-09 15:55:36 -08001020
1021 /* Wait for endpoint to be enabled */
1022 ep = epfile->ep;
1023 if (!ep) {
Michal Nazarewiczb3591f62016-01-04 20:58:12 +01001024 if (file->f_flags & O_NONBLOCK)
1025 return -EAGAIN;
Michal Nazarewicz7fa68032013-12-09 15:55:36 -08001026
Vijayavardhan Vennapusa2b92dce2016-11-11 09:26:57 -08001027 /* Don't wait on write if device is offline */
1028 if (!io_data->read)
Sujeet Kumarba857d42016-11-11 09:34:52 -08001029 return -ENODEV;
Vijayavardhan Vennapusa2b92dce2016-11-11 09:26:57 -08001030
Mayank Rana758d7522016-11-11 10:23:13 -08001031 /* to get updated error atomic variable value */
1032 smp_mb__before_atomic();
1033
Sujeet Kumarba857d42016-11-11 09:34:52 -08001034 /*
1035 * if ep is disabled, this fails all current IOs
1036 * and wait for next epfile open to happen
1037 */
1038 if (!atomic_read(&epfile->error)) {
1039 ret = wait_event_interruptible(epfile->wait,
1040 (ep = epfile->ep));
1041 if (ret < 0)
1042 return -EINTR;
1043 }
1044
1045 if (!ep)
1046 return -ENODEV;
Michal Nazarewicz7fa68032013-12-09 15:55:36 -08001047 }
1048
1049 /* Do we halt? */
Robert Baldyga2e4c7552014-02-10 10:42:44 +01001050 halt = (!io_data->read == !epfile->in);
Michal Nazarewiczb3591f62016-01-04 20:58:12 +01001051 if (halt && epfile->isoc)
1052 return -EINVAL;
Michal Nazarewicz7fa68032013-12-09 15:55:36 -08001053
Michal Nazarewicz9353afb2016-05-21 20:47:35 +02001054 /* We will be using request and read_buffer */
1055 ret = ffs_mutex_lock(&epfile->mutex, file->f_flags & O_NONBLOCK);
1056 if (unlikely(ret))
1057 goto error;
1058
Michal Nazarewicz7fa68032013-12-09 15:55:36 -08001059 /* Allocate & copy */
1060 if (!halt) {
Michal Nazarewicz9353afb2016-05-21 20:47:35 +02001061 struct usb_gadget *gadget;
1062
1063 /*
1064 * Do we have buffered data from previous partial read? Check
1065 * that for synchronous case only because we do not have
1066 * facility to ‘wake up’ a pending asynchronous read and push
1067 * buffered data to it which we would need to make things behave
1068 * consistently.
1069 */
1070 if (!io_data->aio && io_data->read) {
1071 ret = __ffs_epfile_read_buffered(epfile, &io_data->data);
1072 if (ret)
1073 goto error_mutex;
1074 }
1075
Michal Nazarewicz219580e2013-12-09 15:55:37 -08001076 /*
Andrzej Pietrasiewiczf0f42202014-01-20 08:33:50 +01001077 * if we _do_ wait above, the epfile->ffs->gadget might be NULL
Michal Nazarewiczae76e132016-01-04 21:05:59 +01001078 * before the waiting completes, so do not assign to 'gadget'
1079 * earlier
Andrzej Pietrasiewiczf0f42202014-01-20 08:33:50 +01001080 */
Michal Nazarewicz9353afb2016-05-21 20:47:35 +02001081 gadget = epfile->ffs->gadget;
Andrzej Pietrasiewiczf0f42202014-01-20 08:33:50 +01001082
Chao Bi97839ca2014-04-14 11:19:53 +08001083 spin_lock_irq(&epfile->ffs->eps_lock);
1084 /* In the meantime, endpoint got disabled or changed. */
1085 if (epfile->ep != ep) {
Michal Nazarewicz9353afb2016-05-21 20:47:35 +02001086 ret = -ESHUTDOWN;
1087 goto error_lock;
Chao Bi97839ca2014-04-14 11:19:53 +08001088 }
Al Viroc993c392015-01-31 23:23:35 -05001089 data_len = iov_iter_count(&io_data->data);
Andrzej Pietrasiewiczf0f42202014-01-20 08:33:50 +01001090 /*
Michal Nazarewicz219580e2013-12-09 15:55:37 -08001091 * Controller may require buffer size to be aligned to
1092 * maxpacketsize of an out endpoint.
1093 */
Al Viroc993c392015-01-31 23:23:35 -05001094 if (io_data->read)
1095 data_len = usb_ep_align_maybe(gadget, ep->ep, data_len);
Chao Bi97839ca2014-04-14 11:19:53 +08001096 spin_unlock_irq(&epfile->ffs->eps_lock);
Michal Nazarewicz219580e2013-12-09 15:55:37 -08001097
Vijayavardhan Vennapusaf2eff0e2018-06-27 16:21:48 +05301098 extra_buf_alloc = gadget->extra_buf_alloc;
Vijayavardhan Vennapusaeecc5672016-03-09 18:32:45 +05301099 if (!io_data->read)
ChandanaKishori Chiluverued923f32015-08-10 10:17:52 +05301100 data = kmalloc(data_len + extra_buf_alloc,
1101 GFP_KERNEL);
1102 else
1103 data = kmalloc(data_len, GFP_KERNEL);
Michal Nazarewicz9353afb2016-05-21 20:47:35 +02001104 if (unlikely(!data)) {
1105 ret = -ENOMEM;
1106 goto error_mutex;
1107 }
1108 if (!io_data->read &&
1109 copy_from_iter(data, data_len, &io_data->data) != data_len) {
1110 ret = -EFAULT;
1111 goto error_mutex;
Michal Nazarewicz7fa68032013-12-09 15:55:36 -08001112 }
1113 }
1114
Michal Nazarewicz7fa68032013-12-09 15:55:36 -08001115 spin_lock_irq(&epfile->ffs->eps_lock);
1116
1117 if (epfile->ep != ep) {
1118 /* In the meantime, endpoint got disabled or changed. */
1119 ret = -ESHUTDOWN;
Michal Nazarewicz7fa68032013-12-09 15:55:36 -08001120 } else if (halt) {
1121 /* Halt */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001122 if (likely(epfile->ep == ep) && !WARN_ON(!ep->ep))
1123 usb_ep_set_halt(ep->ep);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001124 ret = -EBADMSG;
Michal Nazarewiczae76e132016-01-04 21:05:59 +01001125 } else if (unlikely(data_len == -EINVAL)) {
David Cohenc0d31b32014-10-13 11:15:54 -07001126 /*
1127 * Sanity Check: even though data_len can't be used
1128 * uninitialized at the time I write this comment, some
1129 * compilers complain about this situation.
1130 * In order to keep the code clean from warnings, data_len is
1131 * being initialized to -EINVAL during its declaration, which
1132 * means we can't rely on compiler anymore to warn no future
1133 * changes won't result in data_len being used uninitialized.
1134 * For such reason, we're adding this redundant sanity check
1135 * here.
1136 */
Michal Nazarewiczae76e132016-01-04 21:05:59 +01001137 WARN(1, "%s: data_len == -EINVAL\n", __func__);
1138 ret = -EINVAL;
1139 } else if (!io_data->aio) {
Sujeet Kumarbc66ea42016-11-11 10:08:49 -08001140 struct completion *done;
Du, Changbinef150882015-12-29 14:36:58 +08001141 bool interrupted = false;
Michal Nazarewiczae76e132016-01-04 21:05:59 +01001142
1143 req = ep->req;
1144 req->buf = data;
1145 req->length = data_len;
Tarun Guptabccaa562016-11-11 10:32:17 -08001146 ret = 0;
Michal Nazarewiczae76e132016-01-04 21:05:59 +01001147 req->complete = ffs_epfile_io_complete;
1148
Sujeet Kumarbc66ea42016-11-11 10:08:49 -08001149 if (io_data->read) {
1150 reinit_completion(&epfile->ffs->epout_completion);
1151 done = &epfile->ffs->epout_completion;
1152 } else {
1153 reinit_completion(&epfile->ffs->epin_completion);
1154 done = &epfile->ffs->epin_completion;
1155 }
1156
1157 req->context = done;
Tarun Guptabccaa562016-11-11 10:32:17 -08001158 /*
1159 * Don't queue another read request if previous is
1160 * still busy.
1161 */
1162 if (!(io_data->read && ep->is_busy)) {
1163 ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
1164 ep->is_busy = true;
1165 }
1166
Sujeet Kumarba857d42016-11-11 09:34:52 -08001167 if (unlikely(ret < 0)) {
1168 ret = -EIO;
Tarun Guptabccaa562016-11-11 10:32:17 -08001169 ep->is_busy = false;
Michal Nazarewiczae76e132016-01-04 21:05:59 +01001170 goto error_lock;
Sujeet Kumarba857d42016-11-11 09:34:52 -08001171 }
Michal Nazarewiczae76e132016-01-04 21:05:59 +01001172
1173 spin_unlock_irq(&epfile->ffs->eps_lock);
1174
Sujeet Kumarbc66ea42016-11-11 10:08:49 -08001175 if (unlikely(wait_for_completion_interruptible(done))) {
Du, Changbinef150882015-12-29 14:36:58 +08001176 /*
1177 * To avoid race condition with ffs_epfile_io_complete,
1178 * dequeue the request first then check
1179 * status. usb_ep_dequeue API should guarantee no race
1180 * condition with req->complete callback.
1181 */
Sujeet Kumarf9dc7622016-11-10 18:41:38 -08001182 spin_lock_irq(&epfile->ffs->eps_lock);
1183 interrupted = true;
Manu Gautamb484e382016-11-11 09:58:16 -08001184 /*
1185 * While we were acquiring lock endpoint got
1186 * disabled (disconnect) or changed
1187 (composition switch) ?
1188 */
1189 if (epfile->ep == ep) {
Sujeet Kumarf9dc7622016-11-10 18:41:38 -08001190 usb_ep_dequeue(ep->ep, req);
jianzhou685434d2019-05-30 13:40:07 +08001191 spin_unlock_irq(&epfile->ffs->eps_lock);
1192 wait_for_completion(done);
Sujeet Kumarf9dc7622016-11-10 18:41:38 -08001193 interrupted = ep->status < 0;
jianzhou685434d2019-05-30 13:40:07 +08001194 } else {
1195 spin_unlock_irq(&epfile->ffs->eps_lock);
Sujeet Kumarf9dc7622016-11-10 18:41:38 -08001196 }
Michal Nazarewiczae76e132016-01-04 21:05:59 +01001197 }
1198
Sujeet Kumarf9dc7622016-11-10 18:41:38 -08001199 if (interrupted) {
Michal Nazarewiczc662a312016-05-21 20:47:34 +02001200 ret = -EINTR;
Sujeet Kumarf9dc7622016-11-10 18:41:38 -08001201 goto error_mutex;
1202 }
1203
1204 ret = -ENODEV;
1205 spin_lock_irq(&epfile->ffs->eps_lock);
Manu Gautamb484e382016-11-11 09:58:16 -08001206 /*
1207 * While we were acquiring lock endpoint got
1208 * disabled (disconnect) or changed
1209 * (composition switch) ?
1210 */
1211 if (epfile->ep == ep)
Sujeet Kumarf9dc7622016-11-10 18:41:38 -08001212 ret = ep->status;
1213 spin_unlock_irq(&epfile->ffs->eps_lock);
1214 if (io_data->read && ret > 0)
Michal Nazarewicz9353afb2016-05-21 20:47:35 +02001215 ret = __ffs_epfile_read_data(epfile, data, ep->status,
1216 &io_data->data);
Michal Nazarewiczae76e132016-01-04 21:05:59 +01001217 goto error_mutex;
Vincent Pelletier16648cb2017-11-26 06:52:53 +00001218 } else if (!(req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC))) {
Michal Nazarewiczae76e132016-01-04 21:05:59 +01001219 ret = -ENOMEM;
1220 } else {
1221 req->buf = data;
1222 req->length = data_len;
1223
1224 io_data->buf = data;
1225 io_data->ep = ep->ep;
1226 io_data->req = req;
1227 io_data->ffs = epfile->ffs;
1228
1229 req->context = io_data;
1230 req->complete = ffs_epfile_async_io_complete;
1231
1232 ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
1233 if (unlikely(ret)) {
1234 usb_ep_free_request(ep->ep, req);
David Cohenc0d31b32014-10-13 11:15:54 -07001235 goto error_lock;
1236 }
1237
Michal Nazarewiczae76e132016-01-04 21:05:59 +01001238 ret = -EIOCBQUEUED;
1239 /*
1240 * Do not kfree the buffer in this function. It will be freed
1241 * by ffs_user_copy_worker.
1242 */
1243 data = NULL;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001244 }
1245
Robert Baldyga48968f82014-03-10 09:33:37 +01001246error_lock:
1247 spin_unlock_irq(&epfile->ffs->eps_lock);
Michal Nazarewiczae76e132016-01-04 21:05:59 +01001248error_mutex:
Robert Baldyga48968f82014-03-10 09:33:37 +01001249 mutex_unlock(&epfile->mutex);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001250error:
1251 kfree(data);
Hemant Kumarde406b72016-07-28 11:51:07 -07001252
1253 ffs_log("exit: ret %zu", ret);
1254
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001255 return ret;
1256}
1257
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001258static int
1259ffs_epfile_open(struct inode *inode, struct file *file)
1260{
1261 struct ffs_epfile *epfile = inode->i_private;
Liangliang Lu55ba19d2017-12-07 14:42:55 +08001262 int ret;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001263
1264 ENTER();
1265
Hemant Kumarde406b72016-07-28 11:51:07 -07001266 ffs_log("enter:state %d setup_state %d flag %lu", epfile->ffs->state,
1267 epfile->ffs->setup_state, epfile->ffs->flags);
1268
Liangliang Luececc4e2017-12-27 13:54:33 +08001269 ret = ffs_inst_exist_check(epfile->ffs->dev_name);
Liangliang Lu55ba19d2017-12-07 14:42:55 +08001270 if (ret < 0)
1271 return ret;
1272
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001273 if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
1274 return -ENODEV;
1275
Mayank Ranaea6ae442016-11-11 10:40:05 -08001276 /* to get updated opened atomic variable value */
1277 smp_mb__before_atomic();
1278 if (atomic_read(&epfile->opened)) {
1279 pr_err("%s(): ep(%s) is already opened.\n",
1280 __func__, epfile->name);
1281 return -EBUSY;
1282 }
1283
1284 atomic_set(&epfile->opened, 1);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001285 file->private_data = epfile;
1286 ffs_data_opened(epfile->ffs);
Sujeet Kumarba857d42016-11-11 09:34:52 -08001287 atomic_set(&epfile->error, 0);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001288
Hemant Kumarde406b72016-07-28 11:51:07 -07001289 ffs_log("exit:state %d setup_state %d flag %lu", epfile->ffs->state,
1290 epfile->ffs->setup_state, epfile->ffs->flags);
1291
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001292 return 0;
1293}
1294
Robert Baldyga2e4c7552014-02-10 10:42:44 +01001295static int ffs_aio_cancel(struct kiocb *kiocb)
1296{
1297 struct ffs_io_data *io_data = kiocb->private;
1298 struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
Lars-Peter Clausenf0db53e2020-01-16 15:29:01 +02001299 unsigned long flags;
Robert Baldyga2e4c7552014-02-10 10:42:44 +01001300 int value;
1301
1302 ENTER();
1303
Hemant Kumarde406b72016-07-28 11:51:07 -07001304 ffs_log("enter:state %d setup_state %d flag %lu", epfile->ffs->state,
1305 epfile->ffs->setup_state, epfile->ffs->flags);
1306
Lars-Peter Clausenf0db53e2020-01-16 15:29:01 +02001307 spin_lock_irqsave(&epfile->ffs->eps_lock, flags);
Robert Baldyga2e4c7552014-02-10 10:42:44 +01001308
1309 if (likely(io_data && io_data->ep && io_data->req))
1310 value = usb_ep_dequeue(io_data->ep, io_data->req);
1311 else
1312 value = -EINVAL;
1313
Lars-Peter Clausenf0db53e2020-01-16 15:29:01 +02001314 spin_unlock_irqrestore(&epfile->ffs->eps_lock, flags);
Robert Baldyga2e4c7552014-02-10 10:42:44 +01001315
Hemant Kumarde406b72016-07-28 11:51:07 -07001316 ffs_log("exit: value %d", value);
1317
Robert Baldyga2e4c7552014-02-10 10:42:44 +01001318 return value;
1319}
1320
Al Viro70e60d92015-01-31 23:55:39 -05001321static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
Robert Baldyga2e4c7552014-02-10 10:42:44 +01001322{
Al Viro70e60d92015-01-31 23:55:39 -05001323 struct ffs_io_data io_data, *p = &io_data;
Al Virode2080d2015-01-31 23:42:34 -05001324 ssize_t res;
Robert Baldyga2e4c7552014-02-10 10:42:44 +01001325
1326 ENTER();
1327
Hemant Kumarde406b72016-07-28 11:51:07 -07001328 ffs_log("enter");
1329
Al Viro70e60d92015-01-31 23:55:39 -05001330 if (!is_sync_kiocb(kiocb)) {
Andrzej Pietrasiewiczd5855892019-06-03 19:05:28 +02001331 p = kzalloc(sizeof(io_data), GFP_KERNEL);
Al Viro70e60d92015-01-31 23:55:39 -05001332 if (unlikely(!p))
1333 return -ENOMEM;
1334 p->aio = true;
1335 } else {
Andrzej Pietrasiewiczd5855892019-06-03 19:05:28 +02001336 memset(p, 0, sizeof(*p));
Al Viro70e60d92015-01-31 23:55:39 -05001337 p->aio = false;
1338 }
Robert Baldyga2e4c7552014-02-10 10:42:44 +01001339
Al Viro70e60d92015-01-31 23:55:39 -05001340 p->read = false;
1341 p->kiocb = kiocb;
1342 p->data = *from;
1343 p->mm = current->mm;
Robert Baldyga2e4c7552014-02-10 10:42:44 +01001344
Al Viro70e60d92015-01-31 23:55:39 -05001345 kiocb->private = p;
Robert Baldyga2e4c7552014-02-10 10:42:44 +01001346
Rui Miguel Silva4088acf2015-05-18 16:02:07 +01001347 if (p->aio)
1348 kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
Robert Baldyga2e4c7552014-02-10 10:42:44 +01001349
Al Viro70e60d92015-01-31 23:55:39 -05001350 res = ffs_epfile_io(kiocb->ki_filp, p);
1351 if (res == -EIOCBQUEUED)
1352 return res;
1353 if (p->aio)
1354 kfree(p);
1355 else
1356 *from = p->data;
Hemant Kumarde406b72016-07-28 11:51:07 -07001357
1358 ffs_log("exit");
1359
Al Virode2080d2015-01-31 23:42:34 -05001360 return res;
Robert Baldyga2e4c7552014-02-10 10:42:44 +01001361}
1362
Al Viro70e60d92015-01-31 23:55:39 -05001363static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
Robert Baldyga2e4c7552014-02-10 10:42:44 +01001364{
Al Viro70e60d92015-01-31 23:55:39 -05001365 struct ffs_io_data io_data, *p = &io_data;
Al Virode2080d2015-01-31 23:42:34 -05001366 ssize_t res;
Robert Baldyga2e4c7552014-02-10 10:42:44 +01001367
1368 ENTER();
1369
Hemant Kumarde406b72016-07-28 11:51:07 -07001370 ffs_log("enter");
1371
Al Viro70e60d92015-01-31 23:55:39 -05001372 if (!is_sync_kiocb(kiocb)) {
Andrzej Pietrasiewiczd5855892019-06-03 19:05:28 +02001373 p = kzalloc(sizeof(io_data), GFP_KERNEL);
Al Viro70e60d92015-01-31 23:55:39 -05001374 if (unlikely(!p))
1375 return -ENOMEM;
1376 p->aio = true;
1377 } else {
Andrzej Pietrasiewiczd5855892019-06-03 19:05:28 +02001378 memset(p, 0, sizeof(*p));
Al Viro70e60d92015-01-31 23:55:39 -05001379 p->aio = false;
Robert Baldyga2e4c7552014-02-10 10:42:44 +01001380 }
1381
Al Viro70e60d92015-01-31 23:55:39 -05001382 p->read = true;
1383 p->kiocb = kiocb;
1384 if (p->aio) {
1385 p->to_free = dup_iter(&p->data, to, GFP_KERNEL);
1386 if (!p->to_free) {
1387 kfree(p);
1388 return -ENOMEM;
1389 }
1390 } else {
1391 p->data = *to;
1392 p->to_free = NULL;
1393 }
1394 p->mm = current->mm;
Robert Baldyga2e4c7552014-02-10 10:42:44 +01001395
Al Viro70e60d92015-01-31 23:55:39 -05001396 kiocb->private = p;
Robert Baldyga2e4c7552014-02-10 10:42:44 +01001397
Rui Miguel Silva4088acf2015-05-18 16:02:07 +01001398 if (p->aio)
1399 kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
Robert Baldyga2e4c7552014-02-10 10:42:44 +01001400
Al Viro70e60d92015-01-31 23:55:39 -05001401 res = ffs_epfile_io(kiocb->ki_filp, p);
1402 if (res == -EIOCBQUEUED)
1403 return res;
1404
1405 if (p->aio) {
1406 kfree(p->to_free);
1407 kfree(p);
1408 } else {
1409 *to = p->data;
Al Virode2080d2015-01-31 23:42:34 -05001410 }
Hemant Kumarde406b72016-07-28 11:51:07 -07001411
Ajay Agarwaldd11a242017-04-11 12:43:56 +05301412 ffs_log("exit");
Hemant Kumarde406b72016-07-28 11:51:07 -07001413
Al Virode2080d2015-01-31 23:42:34 -05001414 return res;
Robert Baldyga2e4c7552014-02-10 10:42:44 +01001415}
1416
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001417static int
1418ffs_epfile_release(struct inode *inode, struct file *file)
1419{
1420 struct ffs_epfile *epfile = inode->i_private;
1421
1422 ENTER();
1423
Mayank Ranaea6ae442016-11-11 10:40:05 -08001424 atomic_set(&epfile->opened, 0);
Michal Nazarewicza9e6f832016-10-04 02:07:34 +02001425 __ffs_epfile_read_buffer_free(epfile);
Hemant Kumarde406b72016-07-28 11:51:07 -07001426 ffs_log("enter:state %d setup_state %d flag %lu", epfile->ffs->state,
1427 epfile->ffs->setup_state, epfile->ffs->flags);
Sujeet Kumarba857d42016-11-11 09:34:52 -08001428 atomic_set(&epfile->error, 1);
Vijayavardhan Vennapusade58029b2016-11-11 09:51:37 -08001429 ffs_data_closed(epfile->ffs);
1430 file->private_data = NULL;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001431
Hemant Kumarde406b72016-07-28 11:51:07 -07001432 ffs_log("exit");
1433
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001434 return 0;
1435}
1436
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001437static long ffs_epfile_ioctl(struct file *file, unsigned code,
1438 unsigned long value)
1439{
1440 struct ffs_epfile *epfile = file->private_data;
1441 int ret;
1442
1443 ENTER();
1444
Hemant Kumarde406b72016-07-28 11:51:07 -07001445 ffs_log("enter:state %d setup_state %d flag %lu", epfile->ffs->state,
1446 epfile->ffs->setup_state, epfile->ffs->flags);
1447
Liangliang Luececc4e2017-12-27 13:54:33 +08001448 ret = ffs_inst_exist_check(epfile->ffs->dev_name);
Liangliang Lu55ba19d2017-12-07 14:42:55 +08001449 if (ret < 0)
1450 return ret;
1451
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001452 if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
1453 return -ENODEV;
1454
1455 spin_lock_irq(&epfile->ffs->eps_lock);
1456 if (likely(epfile->ep)) {
1457 switch (code) {
1458 case FUNCTIONFS_FIFO_STATUS:
1459 ret = usb_ep_fifo_status(epfile->ep->ep);
1460 break;
1461 case FUNCTIONFS_FIFO_FLUSH:
1462 usb_ep_fifo_flush(epfile->ep->ep);
1463 ret = 0;
1464 break;
1465 case FUNCTIONFS_CLEAR_HALT:
1466 ret = usb_ep_clear_halt(epfile->ep->ep);
1467 break;
1468 case FUNCTIONFS_ENDPOINT_REVMAP:
1469 ret = epfile->ep->num;
1470 break;
Robert Baldygac559a352014-09-09 08:23:16 +02001471 case FUNCTIONFS_ENDPOINT_DESC:
1472 {
1473 int desc_idx;
1474 struct usb_endpoint_descriptor *desc;
1475
1476 switch (epfile->ffs->gadget->speed) {
1477 case USB_SPEED_SUPER:
1478 desc_idx = 2;
1479 break;
1480 case USB_SPEED_HIGH:
1481 desc_idx = 1;
1482 break;
1483 default:
1484 desc_idx = 0;
1485 }
1486 desc = epfile->ep->descs[desc_idx];
1487
1488 spin_unlock_irq(&epfile->ffs->eps_lock);
1489 ret = copy_to_user((void *)value, desc, sizeof(*desc));
1490 if (ret)
1491 ret = -EFAULT;
1492 return ret;
1493 }
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001494 default:
1495 ret = -ENOTTY;
1496 }
1497 } else {
1498 ret = -ENODEV;
1499 }
1500 spin_unlock_irq(&epfile->ffs->eps_lock);
1501
Hemant Kumarde406b72016-07-28 11:51:07 -07001502 ffs_log("exit:ret %d", ret);
1503
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001504 return ret;
1505}
1506
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001507static const struct file_operations ffs_epfile_operations = {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001508 .llseek = no_llseek,
1509
1510 .open = ffs_epfile_open,
Al Viro70e60d92015-01-31 23:55:39 -05001511 .write_iter = ffs_epfile_write_iter,
1512 .read_iter = ffs_epfile_read_iter,
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001513 .release = ffs_epfile_release,
1514 .unlocked_ioctl = ffs_epfile_ioctl,
1515};
1516
1517
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001518/* File system and super block operations ***********************************/
1519
1520/*
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01001521 * Mounting the file system creates a controller file, used first for
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001522 * function configuration then later for event monitoring.
1523 */
1524
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001525static struct inode *__must_check
1526ffs_sb_make_inode(struct super_block *sb, void *data,
1527 const struct file_operations *fops,
1528 const struct inode_operations *iops,
1529 struct ffs_file_perms *perms)
1530{
1531 struct inode *inode;
1532
1533 ENTER();
1534
Hemant Kumarde406b72016-07-28 11:51:07 -07001535 ffs_log("enter");
1536
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001537 inode = new_inode(sb);
1538
1539 if (likely(inode)) {
Deepa Dinamani078cd822016-09-14 07:48:04 -07001540 struct timespec ts = current_time(inode);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001541
Al Viro12ba8d12010-10-27 04:19:36 +01001542 inode->i_ino = get_next_ino();
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001543 inode->i_mode = perms->mode;
1544 inode->i_uid = perms->uid;
1545 inode->i_gid = perms->gid;
Deepa Dinamani078cd822016-09-14 07:48:04 -07001546 inode->i_atime = ts;
1547 inode->i_mtime = ts;
1548 inode->i_ctime = ts;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001549 inode->i_private = data;
1550 if (fops)
1551 inode->i_fop = fops;
1552 if (iops)
1553 inode->i_op = iops;
1554 }
1555
Hemant Kumarde406b72016-07-28 11:51:07 -07001556 ffs_log("exit");
1557
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001558 return inode;
1559}
1560
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001561/* Create "regular" file */
Al Viro1bb27ca2014-09-03 13:32:19 -04001562static struct dentry *ffs_sb_create_file(struct super_block *sb,
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001563 const char *name, void *data,
Al Viro1bb27ca2014-09-03 13:32:19 -04001564 const struct file_operations *fops)
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001565{
1566 struct ffs_data *ffs = sb->s_fs_info;
1567 struct dentry *dentry;
1568 struct inode *inode;
1569
1570 ENTER();
1571
Hemant Kumarde406b72016-07-28 11:51:07 -07001572 ffs_log("enter");
1573
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001574 dentry = d_alloc_name(sb->s_root, name);
1575 if (unlikely(!dentry))
1576 return NULL;
1577
1578 inode = ffs_sb_make_inode(sb, data, fops, NULL, &ffs->file_perms);
1579 if (unlikely(!inode)) {
1580 dput(dentry);
1581 return NULL;
1582 }
1583
1584 d_add(dentry, inode);
Hemant Kumarde406b72016-07-28 11:51:07 -07001585
1586 ffs_log("exit");
1587
Al Viro1bb27ca2014-09-03 13:32:19 -04001588 return dentry;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001589}
1590
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001591/* Super block */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001592static const struct super_operations ffs_sb_operations = {
1593 .statfs = simple_statfs,
1594 .drop_inode = generic_delete_inode,
1595};
1596
1597struct ffs_sb_fill_data {
1598 struct ffs_file_perms perms;
1599 umode_t root_mode;
1600 const char *dev_name;
Robert Baldyga18d6b32f2014-12-18 09:55:10 +01001601 bool no_disconnect;
Al Viro2606b282013-09-20 17:14:21 +01001602 struct ffs_data *ffs_data;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001603};
1604
1605static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
1606{
1607 struct ffs_sb_fill_data *data = _data;
1608 struct inode *inode;
Al Viro2606b282013-09-20 17:14:21 +01001609 struct ffs_data *ffs = data->ffs_data;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001610
1611 ENTER();
1612
Hemant Kumarde406b72016-07-28 11:51:07 -07001613 ffs_log("enter");
1614
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001615 ffs->sb = sb;
Al Viro2606b282013-09-20 17:14:21 +01001616 data->ffs_data = NULL;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001617 sb->s_fs_info = ffs;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001618 sb->s_blocksize = PAGE_SIZE;
1619 sb->s_blocksize_bits = PAGE_SHIFT;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001620 sb->s_magic = FUNCTIONFS_MAGIC;
1621 sb->s_op = &ffs_sb_operations;
1622 sb->s_time_gran = 1;
1623
1624 /* Root inode */
1625 data->perms.mode = data->root_mode;
1626 inode = ffs_sb_make_inode(sb, NULL,
1627 &simple_dir_operations,
1628 &simple_dir_inode_operations,
1629 &data->perms);
Al Viro48fde702012-01-08 22:15:13 -05001630 sb->s_root = d_make_root(inode);
1631 if (unlikely(!sb->s_root))
Al Viro2606b282013-09-20 17:14:21 +01001632 return -ENOMEM;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001633
1634 /* EP0 file */
1635 if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs,
Al Viro1bb27ca2014-09-03 13:32:19 -04001636 &ffs_ep0_operations)))
Al Viro2606b282013-09-20 17:14:21 +01001637 return -ENOMEM;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001638
Hemant Kumarde406b72016-07-28 11:51:07 -07001639 ffs_log("exit");
1640
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001641 return 0;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001642}
1643
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001644static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
1645{
1646 ENTER();
1647
Hemant Kumarde406b72016-07-28 11:51:07 -07001648 ffs_log("enter");
1649
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001650 if (!opts || !*opts)
1651 return 0;
1652
1653 for (;;) {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001654 unsigned long value;
Michal Nazarewiczafd2e182013-01-09 10:17:47 +01001655 char *eq, *comma;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001656
1657 /* Option limit */
1658 comma = strchr(opts, ',');
1659 if (comma)
1660 *comma = 0;
1661
1662 /* Value limit */
1663 eq = strchr(opts, '=');
1664 if (unlikely(!eq)) {
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001665 pr_err("'=' missing in %s\n", opts);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001666 return -EINVAL;
1667 }
1668 *eq = 0;
1669
1670 /* Parse value */
Michal Nazarewiczafd2e182013-01-09 10:17:47 +01001671 if (kstrtoul(eq + 1, 0, &value)) {
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001672 pr_err("%s: invalid value: %s\n", opts, eq + 1);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001673 return -EINVAL;
1674 }
1675
1676 /* Interpret option */
1677 switch (eq - opts) {
Robert Baldyga18d6b32f2014-12-18 09:55:10 +01001678 case 13:
1679 if (!memcmp(opts, "no_disconnect", 13))
1680 data->no_disconnect = !!value;
1681 else
1682 goto invalid;
1683 break;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001684 case 5:
1685 if (!memcmp(opts, "rmode", 5))
1686 data->root_mode = (value & 0555) | S_IFDIR;
1687 else if (!memcmp(opts, "fmode", 5))
1688 data->perms.mode = (value & 0666) | S_IFREG;
1689 else
1690 goto invalid;
1691 break;
1692
1693 case 4:
1694 if (!memcmp(opts, "mode", 4)) {
1695 data->root_mode = (value & 0555) | S_IFDIR;
1696 data->perms.mode = (value & 0666) | S_IFREG;
1697 } else {
1698 goto invalid;
1699 }
1700 break;
1701
1702 case 3:
Eric W. Biedermanb9b73f72012-06-14 01:19:23 -07001703 if (!memcmp(opts, "uid", 3)) {
1704 data->perms.uid = make_kuid(current_user_ns(), value);
1705 if (!uid_valid(data->perms.uid)) {
1706 pr_err("%s: unmapped value: %lu\n", opts, value);
1707 return -EINVAL;
1708 }
Benoit Gobyb8100752013-01-08 19:57:09 -08001709 } else if (!memcmp(opts, "gid", 3)) {
Eric W. Biedermanb9b73f72012-06-14 01:19:23 -07001710 data->perms.gid = make_kgid(current_user_ns(), value);
1711 if (!gid_valid(data->perms.gid)) {
1712 pr_err("%s: unmapped value: %lu\n", opts, value);
1713 return -EINVAL;
1714 }
Benoit Gobyb8100752013-01-08 19:57:09 -08001715 } else {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001716 goto invalid;
Benoit Gobyb8100752013-01-08 19:57:09 -08001717 }
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001718 break;
1719
1720 default:
1721invalid:
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001722 pr_err("%s: invalid option\n", opts);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001723 return -EINVAL;
1724 }
1725
1726 /* Next iteration */
1727 if (!comma)
1728 break;
1729 opts = comma + 1;
1730 }
1731
Hemant Kumarde406b72016-07-28 11:51:07 -07001732 ffs_log("exit");
1733
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001734 return 0;
1735}
1736
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001737/* "mount -t functionfs dev_name /dev/function" ends up here */
1738
Al Virofc14f2f2010-07-25 01:48:30 +04001739static struct dentry *
1740ffs_fs_mount(struct file_system_type *t, int flags,
1741 const char *dev_name, void *opts)
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001742{
1743 struct ffs_sb_fill_data data = {
1744 .perms = {
1745 .mode = S_IFREG | 0600,
Eric W. Biedermanb9b73f72012-06-14 01:19:23 -07001746 .uid = GLOBAL_ROOT_UID,
1747 .gid = GLOBAL_ROOT_GID,
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001748 },
1749 .root_mode = S_IFDIR | 0500,
Robert Baldyga18d6b32f2014-12-18 09:55:10 +01001750 .no_disconnect = false,
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001751 };
Andrzej Pietrasiewicz581791f2012-05-14 15:51:52 +02001752 struct dentry *rv;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001753 int ret;
Andrzej Pietrasiewicz581791f2012-05-14 15:51:52 +02001754 void *ffs_dev;
Al Viro2606b282013-09-20 17:14:21 +01001755 struct ffs_data *ffs;
Liangliang Luececc4e2017-12-27 13:54:33 +08001756 struct ffs_inst_status *inst_status;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001757
1758 ENTER();
1759
Hemant Kumarde406b72016-07-28 11:51:07 -07001760 ffs_log("enter");
1761
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001762 ret = ffs_fs_parse_opts(&data, opts);
1763 if (unlikely(ret < 0))
Al Virofc14f2f2010-07-25 01:48:30 +04001764 return ERR_PTR(ret);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001765
Al Viro2606b282013-09-20 17:14:21 +01001766 ffs = ffs_data_new();
1767 if (unlikely(!ffs))
1768 return ERR_PTR(-ENOMEM);
1769 ffs->file_perms = data.perms;
Robert Baldyga18d6b32f2014-12-18 09:55:10 +01001770 ffs->no_disconnect = data.no_disconnect;
Al Viro2606b282013-09-20 17:14:21 +01001771
1772 ffs->dev_name = kstrdup(dev_name, GFP_KERNEL);
1773 if (unlikely(!ffs->dev_name)) {
1774 ffs_data_put(ffs);
1775 return ERR_PTR(-ENOMEM);
1776 }
1777
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01001778 ffs_dev = ffs_acquire_dev(dev_name);
Al Viro2606b282013-09-20 17:14:21 +01001779 if (IS_ERR(ffs_dev)) {
1780 ffs_data_put(ffs);
1781 return ERR_CAST(ffs_dev);
1782 }
1783 ffs->private_data = ffs_dev;
1784 data.ffs_data = ffs;
Andrzej Pietrasiewicz581791f2012-05-14 15:51:52 +02001785
Liangliang Luececc4e2017-12-27 13:54:33 +08001786 inst_status = name_to_inst_status(ffs->dev_name, false);
1787 if (IS_ERR(inst_status)) {
1788 ffs_log("failed to find instance (%s)\n",
1789 ffs->dev_name);
1790 return ERR_PTR(-EINVAL);
1791 }
1792
1793 /* Store ffs to global status structure */
1794 ffs_dev_lock();
1795 inst_status->ffs_data = ffs;
1796 ffs_dev_unlock();
1797
Andrzej Pietrasiewicz581791f2012-05-14 15:51:52 +02001798 rv = mount_nodev(t, flags, &data, ffs_sb_fill);
Al Viro2606b282013-09-20 17:14:21 +01001799 if (IS_ERR(rv) && data.ffs_data) {
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01001800 ffs_release_dev(data.ffs_data);
Al Viro2606b282013-09-20 17:14:21 +01001801 ffs_data_put(data.ffs_data);
1802 }
Hemant Kumarde406b72016-07-28 11:51:07 -07001803
1804 ffs_log("exit");
1805
Andrzej Pietrasiewicz581791f2012-05-14 15:51:52 +02001806 return rv;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001807}
1808
1809static void
1810ffs_fs_kill_sb(struct super_block *sb)
1811{
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001812 ENTER();
1813
Hemant Kumarde406b72016-07-28 11:51:07 -07001814 ffs_log("enter");
1815
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001816 kill_litter_super(sb);
Andrzej Pietrasiewicz581791f2012-05-14 15:51:52 +02001817 if (sb->s_fs_info) {
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01001818 ffs_release_dev(sb->s_fs_info);
Robert Baldyga18d6b32f2014-12-18 09:55:10 +01001819 ffs_data_closed(sb->s_fs_info);
Andrzej Pietrasiewicz581791f2012-05-14 15:51:52 +02001820 }
Hemant Kumarde406b72016-07-28 11:51:07 -07001821
1822 ffs_log("exit");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001823}
1824
1825static struct file_system_type ffs_fs_type = {
1826 .owner = THIS_MODULE,
1827 .name = "functionfs",
Al Virofc14f2f2010-07-25 01:48:30 +04001828 .mount = ffs_fs_mount,
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001829 .kill_sb = ffs_fs_kill_sb,
1830};
Eric W. Biederman7f78e032013-03-02 19:39:14 -08001831MODULE_ALIAS_FS("functionfs");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001832
1833
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001834/* Driver's main init/cleanup functions *************************************/
1835
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001836static int functionfs_init(void)
1837{
1838 int ret;
1839
1840 ENTER();
1841
1842 ret = register_filesystem(&ffs_fs_type);
1843 if (likely(!ret))
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001844 pr_info("file system registered\n");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001845 else
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001846 pr_err("failed registering file system (%d)\n", ret);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001847
Hemant Kumarde406b72016-07-28 11:51:07 -07001848 ffs_ipc_log = ipc_log_context_create(NUM_PAGES, "f_fs", 0);
Hemant Kumar624d3f22017-02-28 18:15:48 -08001849 if (IS_ERR_OR_NULL(ffs_ipc_log))
1850 ffs_ipc_log = NULL;
Hemant Kumarde406b72016-07-28 11:51:07 -07001851
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001852 return ret;
1853}
1854
1855static void functionfs_cleanup(void)
1856{
1857 ENTER();
1858
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001859 pr_info("unloading\n");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001860 unregister_filesystem(&ffs_fs_type);
Hemant Kumar624d3f22017-02-28 18:15:48 -08001861
1862 if (ffs_ipc_log) {
1863 ipc_log_context_destroy(ffs_ipc_log);
1864 ffs_ipc_log = NULL;
1865 }
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001866}
1867
1868
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001869/* ffs_data and ffs_function construction and destruction code **************/
1870
1871static void ffs_data_clear(struct ffs_data *ffs);
1872static void ffs_data_reset(struct ffs_data *ffs);
1873
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001874static void ffs_data_get(struct ffs_data *ffs)
1875{
1876 ENTER();
1877
Hemant Kumarde406b72016-07-28 11:51:07 -07001878 ffs_log("enter");
1879
Mayank Rana758d7522016-11-11 10:23:13 -08001880 /* to get updated ref atomic variable value */
1881 smp_mb__before_atomic();
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001882 atomic_inc(&ffs->ref);
Hemant Kumarde406b72016-07-28 11:51:07 -07001883
1884 ffs_log("exit");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001885}
1886
1887static void ffs_data_opened(struct ffs_data *ffs)
1888{
1889 ENTER();
1890
Hemant Kumarde406b72016-07-28 11:51:07 -07001891 ffs_log("enter: state %d setup_state %d flag %lu opened %d", ffs->state,
1892 ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
1893
Mayank Rana758d7522016-11-11 10:23:13 -08001894 /* to get updated ref atomic variable value */
1895 smp_mb__before_atomic();
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001896 atomic_inc(&ffs->ref);
Robert Baldyga18d6b32f2014-12-18 09:55:10 +01001897 if (atomic_add_return(1, &ffs->opened) == 1 &&
1898 ffs->state == FFS_DEACTIVATED) {
1899 ffs->state = FFS_CLOSING;
1900 ffs_data_reset(ffs);
1901 }
Hemant Kumarde406b72016-07-28 11:51:07 -07001902
1903 ffs_log("exit: state %d setup_state %d flag %lu", ffs->state,
1904 ffs->setup_state, ffs->flags);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001905}
1906
1907static void ffs_data_put(struct ffs_data *ffs)
1908{
Liangliang Luececc4e2017-12-27 13:54:33 +08001909 struct ffs_inst_status *inst_status;
1910 const char *dev_name;
1911
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001912 ENTER();
1913
Hemant Kumarde406b72016-07-28 11:51:07 -07001914 ffs_log("enter");
1915
Mayank Rana758d7522016-11-11 10:23:13 -08001916 /* to get updated ref atomic variable value */
1917 smp_mb__before_atomic();
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001918 if (unlikely(atomic_dec_and_test(&ffs->ref))) {
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001919 pr_info("%s(): freeing\n", __func__);
Liangliang Luececc4e2017-12-27 13:54:33 +08001920 /* Clear ffs from global structure */
1921 inst_status = name_to_inst_status(ffs->dev_name, false);
1922 if (!IS_ERR(inst_status)) {
1923 ffs_dev_lock();
1924 inst_status->ffs_data = NULL;
1925 ffs_dev_unlock();
1926 }
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001927 ffs_data_clear(ffs);
Andi Kleen647d5582012-03-16 12:01:02 -07001928 BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001929 waitqueue_active(&ffs->ep0req_completion.wait));
Liangliang Luececc4e2017-12-27 13:54:33 +08001930 dev_name = ffs->dev_name;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001931 kfree(ffs);
Liangliang Luececc4e2017-12-27 13:54:33 +08001932 ffs_inst_clean_delay(dev_name);
1933 kfree(dev_name);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001934 }
Hemant Kumarde406b72016-07-28 11:51:07 -07001935
1936 ffs_log("exit");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001937}
1938
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001939static void ffs_data_closed(struct ffs_data *ffs)
1940{
1941 ENTER();
1942
Hemant Kumarde406b72016-07-28 11:51:07 -07001943 ffs_log("enter: state %d setup_state %d flag %lu opened %d", ffs->state,
1944 ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
1945
Mayank Rana758d7522016-11-11 10:23:13 -08001946 /* to get updated opened atomic variable value */
1947 smp_mb__before_atomic();
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001948 if (atomic_dec_and_test(&ffs->opened)) {
Robert Baldyga18d6b32f2014-12-18 09:55:10 +01001949 if (ffs->no_disconnect) {
1950 ffs->state = FFS_DEACTIVATED;
1951 if (ffs->epfiles) {
1952 ffs_epfiles_destroy(ffs->epfiles,
1953 ffs->eps_count);
1954 ffs->epfiles = NULL;
1955 }
1956 if (ffs->setup_state == FFS_SETUP_PENDING)
1957 __ffs_ep0_stall(ffs);
1958 } else {
1959 ffs->state = FFS_CLOSING;
1960 ffs_data_reset(ffs);
1961 }
1962 }
Mayank Rana758d7522016-11-11 10:23:13 -08001963
1964 /* to get updated opened atomic variable value */
1965 smp_mb__before_atomic();
Robert Baldyga18d6b32f2014-12-18 09:55:10 +01001966 if (atomic_read(&ffs->opened) < 0) {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001967 ffs->state = FFS_CLOSING;
1968 ffs_data_reset(ffs);
1969 }
1970
Hemant Kumarde406b72016-07-28 11:51:07 -07001971 ffs_log("exit: state %d setup_state %d flag %lu", ffs->state,
1972 ffs->setup_state, ffs->flags);
1973
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001974 ffs_data_put(ffs);
1975}
1976
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001977static struct ffs_data *ffs_data_new(void)
1978{
1979 struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL);
1980 if (unlikely(!ffs))
Felipe Balbif8800d42013-12-12 12:15:43 -06001981 return NULL;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001982
1983 ENTER();
1984
Hemant Kumarde406b72016-07-28 11:51:07 -07001985 ffs_log("enter");
1986
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001987 atomic_set(&ffs->ref, 1);
1988 atomic_set(&ffs->opened, 0);
1989 ffs->state = FFS_READ_DESCRIPTORS;
1990 mutex_init(&ffs->mutex);
1991 spin_lock_init(&ffs->eps_lock);
1992 init_waitqueue_head(&ffs->ev.waitq);
1993 init_completion(&ffs->ep0req_completion);
Sujeet Kumarbc66ea42016-11-11 10:08:49 -08001994 init_completion(&ffs->epout_completion);
1995 init_completion(&ffs->epin_completion);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001996
1997 /* XXX REVISIT need to update it in some places, or do we? */
1998 ffs->ev.can_stall = 1;
1999
Hemant Kumarde406b72016-07-28 11:51:07 -07002000 ffs_log("exit");
2001
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002002 return ffs;
2003}
2004
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002005static void ffs_data_clear(struct ffs_data *ffs)
2006{
2007 ENTER();
2008
Hemant Kumarde406b72016-07-28 11:51:07 -07002009 ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
2010 ffs->setup_state, ffs->flags);
2011
Chandana Kishori Chiluveru7f5670a2017-10-28 23:05:45 +05302012 pr_debug("%s: ffs->gadget= %pK, ffs->flags= %lu\n",
Hemant Kumarde406b72016-07-28 11:51:07 -07002013 __func__, ffs->gadget, ffs->flags);
Krzysztof Opasiak49a79d82015-05-22 17:25:18 +02002014 ffs_closed(ffs);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002015
2016 BUG_ON(ffs->gadget);
2017
2018 if (ffs->epfiles)
2019 ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count);
2020
Robert Baldyga5e33f6f2015-01-23 13:41:01 +01002021 if (ffs->ffs_eventfd)
2022 eventfd_ctx_put(ffs->ffs_eventfd);
2023
Michal Nazarewiczac8dde12014-02-28 16:50:23 +05302024 kfree(ffs->raw_descs_data);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002025 kfree(ffs->raw_strings);
2026 kfree(ffs->stringtabs);
Hemant Kumarde406b72016-07-28 11:51:07 -07002027
2028 ffs_log("exit: state %d setup_state %d flag %lu", ffs->state,
2029 ffs->setup_state, ffs->flags);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002030}
2031
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002032static void ffs_data_reset(struct ffs_data *ffs)
2033{
2034 ENTER();
2035
Hemant Kumarde406b72016-07-28 11:51:07 -07002036 ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
2037 ffs->setup_state, ffs->flags);
2038
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002039 ffs_data_clear(ffs);
2040
2041 ffs->epfiles = NULL;
Michal Nazarewiczac8dde12014-02-28 16:50:23 +05302042 ffs->raw_descs_data = NULL;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002043 ffs->raw_descs = NULL;
2044 ffs->raw_strings = NULL;
2045 ffs->stringtabs = NULL;
2046
2047 ffs->raw_descs_length = 0;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002048 ffs->fs_descs_count = 0;
2049 ffs->hs_descs_count = 0;
Manu Gautam8d4e8972014-02-28 16:50:22 +05302050 ffs->ss_descs_count = 0;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002051
2052 ffs->strings_count = 0;
2053 ffs->interfaces_count = 0;
2054 ffs->eps_count = 0;
2055
2056 ffs->ev.count = 0;
2057
2058 ffs->state = FFS_READ_DESCRIPTORS;
2059 ffs->setup_state = FFS_NO_SETUP;
2060 ffs->flags = 0;
Hemant Kumarde406b72016-07-28 11:51:07 -07002061
2062 ffs_log("exit: state %d setup_state %d flag %lu", ffs->state,
2063 ffs->setup_state, ffs->flags);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002064}
2065
2066
2067static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
2068{
Michal Nazarewiczfd7c9a02010-06-16 12:08:00 +02002069 struct usb_gadget_strings **lang;
2070 int first_id;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002071
2072 ENTER();
2073
Hemant Kumarde406b72016-07-28 11:51:07 -07002074 ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
2075 ffs->setup_state, ffs->flags);
2076
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002077 if (WARN_ON(ffs->state != FFS_ACTIVE
2078 || test_and_set_bit(FFS_FL_BOUND, &ffs->flags)))
2079 return -EBADFD;
2080
Michal Nazarewiczfd7c9a02010-06-16 12:08:00 +02002081 first_id = usb_string_ids_n(cdev, ffs->strings_count);
2082 if (unlikely(first_id < 0))
2083 return first_id;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002084
2085 ffs->ep0req = usb_ep_alloc_request(cdev->gadget->ep0, GFP_KERNEL);
2086 if (unlikely(!ffs->ep0req))
2087 return -ENOMEM;
2088 ffs->ep0req->complete = ffs_ep0_complete;
2089 ffs->ep0req->context = ffs;
2090
Michal Nazarewiczfd7c9a02010-06-16 12:08:00 +02002091 lang = ffs->stringtabs;
Michal Nazarewiczf0688c82014-06-17 17:47:41 +02002092 if (lang) {
2093 for (; *lang; ++lang) {
2094 struct usb_string *str = (*lang)->strings;
2095 int id = first_id;
2096 for (; str->s; ++id, ++str)
2097 str->id = id;
2098 }
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002099 }
2100
2101 ffs->gadget = cdev->gadget;
Hemant Kumarde406b72016-07-28 11:51:07 -07002102
Chandana Kishori Chiluveru7f5670a2017-10-28 23:05:45 +05302103 ffs_log("exit: state %d setup_state %d flag %lu gadget %pK\n",
Hemant Kumarde406b72016-07-28 11:51:07 -07002104 ffs->state, ffs->setup_state, ffs->flags, ffs->gadget);
2105
Michal Nazarewiczfd7c9a02010-06-16 12:08:00 +02002106 ffs_data_get(ffs);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002107 return 0;
2108}
2109
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002110static void functionfs_unbind(struct ffs_data *ffs)
2111{
2112 ENTER();
2113
2114 if (!WARN_ON(!ffs->gadget)) {
2115 usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req);
2116 ffs->ep0req = NULL;
2117 ffs->gadget = NULL;
Andrzej Pietrasiewicze2190a92012-03-12 12:55:41 +01002118 clear_bit(FFS_FL_BOUND, &ffs->flags);
Chandana Kishori Chiluveru7f5670a2017-10-28 23:05:45 +05302119 ffs_log("state %d setup_state %d flag %lu gadget %pK\n",
Hemant Kumarde406b72016-07-28 11:51:07 -07002120 ffs->state, ffs->setup_state, ffs->flags, ffs->gadget);
Dan Carpenterdf498992013-08-23 11:16:15 +03002121 ffs_data_put(ffs);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002122 }
2123}
2124
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002125static int ffs_epfiles_create(struct ffs_data *ffs)
2126{
2127 struct ffs_epfile *epfile, *epfiles;
2128 unsigned i, count;
2129
2130 ENTER();
2131
Hemant Kumarde406b72016-07-28 11:51:07 -07002132 ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
2133 ffs->setup_state, ffs->flags);
2134
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002135 count = ffs->eps_count;
Thomas Meyer9823a522011-11-29 22:08:00 +01002136 epfiles = kcalloc(count, sizeof(*epfiles), GFP_KERNEL);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002137 if (!epfiles)
2138 return -ENOMEM;
2139
2140 epfile = epfiles;
2141 for (i = 1; i <= count; ++i, ++epfile) {
2142 epfile->ffs = ffs;
2143 mutex_init(&epfile->mutex);
2144 init_waitqueue_head(&epfile->wait);
Mayank Ranaea6ae442016-11-11 10:40:05 -08002145 atomic_set(&epfile->opened, 0);
Robert Baldyga1b0bf882014-09-09 08:23:17 +02002146 if (ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
Mario Schuknechtacba23f2015-01-26 20:40:21 +01002147 sprintf(epfile->name, "ep%02x", ffs->eps_addrmap[i]);
Robert Baldyga1b0bf882014-09-09 08:23:17 +02002148 else
Mario Schuknechtacba23f2015-01-26 20:40:21 +01002149 sprintf(epfile->name, "ep%u", i);
2150 epfile->dentry = ffs_sb_create_file(ffs->sb, epfile->name,
Al Viro1bb27ca2014-09-03 13:32:19 -04002151 epfile,
2152 &ffs_epfile_operations);
2153 if (unlikely(!epfile->dentry)) {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002154 ffs_epfiles_destroy(epfiles, i - 1);
2155 return -ENOMEM;
2156 }
2157 }
2158
2159 ffs->epfiles = epfiles;
Hemant Kumarde406b72016-07-28 11:51:07 -07002160
Jack Phambdf16ea2017-03-07 14:34:16 -08002161 ffs_log("exit: eps_count %u state %d setup_state %d flag %lu",
2162 count, ffs->state, ffs->setup_state, ffs->flags);
Hemant Kumarde406b72016-07-28 11:51:07 -07002163
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002164 return 0;
2165}
2166
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002167static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
2168{
2169 struct ffs_epfile *epfile = epfiles;
2170
2171 ENTER();
2172
Jack Phambdf16ea2017-03-07 14:34:16 -08002173 ffs_log("enter: count %u", count);
Hemant Kumarde406b72016-07-28 11:51:07 -07002174
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002175 for (; count; --count, ++epfile) {
2176 BUG_ON(mutex_is_locked(&epfile->mutex) ||
2177 waitqueue_active(&epfile->wait));
2178 if (epfile->dentry) {
2179 d_delete(epfile->dentry);
2180 dput(epfile->dentry);
2181 epfile->dentry = NULL;
2182 }
2183 }
2184
2185 kfree(epfiles);
Hemant Kumarde406b72016-07-28 11:51:07 -07002186
2187 ffs_log("exit");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002188}
2189
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002190static void ffs_func_eps_disable(struct ffs_function *func)
2191{
2192 struct ffs_ep *ep = func->eps;
2193 struct ffs_epfile *epfile = func->ffs->epfiles;
2194 unsigned count = func->ffs->eps_count;
2195 unsigned long flags;
2196
Hemant Kumarde406b72016-07-28 11:51:07 -07002197 ffs_log("enter: state %d setup_state %d flag %lu", func->ffs->state,
2198 func->ffs->setup_state, func->ffs->flags);
2199
Michal Nazarewicza9e6f832016-10-04 02:07:34 +02002200 spin_lock_irqsave(&func->ffs->eps_lock, flags);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002201 do {
Vijayavardhan Vennapusade58029b2016-11-11 09:51:37 -08002202
2203 if (epfile)
2204 atomic_set(&epfile->error, 1);
2205
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002206 /* pending requests get nuked */
2207 if (likely(ep->ep))
2208 usb_ep_disable(ep->ep);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002209 ++ep;
Robert Baldyga18d6b32f2014-12-18 09:55:10 +01002210
2211 if (epfile) {
2212 epfile->ep = NULL;
Michal Nazarewicza9e6f832016-10-04 02:07:34 +02002213 __ffs_epfile_read_buffer_free(epfile);
Robert Baldyga18d6b32f2014-12-18 09:55:10 +01002214 ++epfile;
2215 }
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002216 } while (--count);
Michal Nazarewicza9e6f832016-10-04 02:07:34 +02002217 spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
Hemant Kumarde406b72016-07-28 11:51:07 -07002218
2219 ffs_log("exit");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002220}
2221
2222static int ffs_func_eps_enable(struct ffs_function *func)
2223{
2224 struct ffs_data *ffs = func->ffs;
2225 struct ffs_ep *ep = func->eps;
2226 struct ffs_epfile *epfile = ffs->epfiles;
2227 unsigned count = ffs->eps_count;
2228 unsigned long flags;
2229 int ret = 0;
2230
Hemant Kumarde406b72016-07-28 11:51:07 -07002231 ffs_log("enter: state %d setup_state %d flag %lu", func->ffs->state,
2232 func->ffs->setup_state, func->ffs->flags);
2233
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002234 spin_lock_irqsave(&func->ffs->eps_lock, flags);
2235 do {
2236 struct usb_endpoint_descriptor *ds;
Manu Gautam8d4e8972014-02-28 16:50:22 +05302237 int desc_idx;
2238
2239 if (ffs->gadget->speed == USB_SPEED_SUPER)
2240 desc_idx = 2;
2241 else if (ffs->gadget->speed == USB_SPEED_HIGH)
2242 desc_idx = 1;
2243 else
2244 desc_idx = 0;
2245
2246 /* fall-back to lower speed if desc missing for current speed */
2247 do {
2248 ds = ep->descs[desc_idx];
2249 } while (!ds && --desc_idx >= 0);
2250
2251 if (!ds) {
2252 ret = -EINVAL;
2253 break;
2254 }
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002255
2256 ep->ep->driver_data = ep;
Tatyana Brokhman72c973d2011-06-28 16:33:48 +03002257 ep->ep->desc = ds;
Mayank Rana09bc7252016-11-08 14:49:27 -08002258
2259 ret = config_ep_by_speed(func->gadget, &func->function, ep->ep);
2260 if (ret) {
2261 pr_err("%s(): config_ep_by_speed(%d) err for %s\n",
2262 __func__, ret, ep->ep->name);
2263 break;
2264 }
2265
Tatyana Brokhman72c973d2011-06-28 16:33:48 +03002266 ret = usb_ep_enable(ep->ep);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002267 if (likely(!ret)) {
2268 epfile->ep = ep;
2269 epfile->in = usb_endpoint_dir_in(ds);
2270 epfile->isoc = usb_endpoint_xfer_isoc(ds);
Hemant Kumarde406b72016-07-28 11:51:07 -07002271 ffs_log("usb_ep_enable %s", ep->ep->name);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002272 } else {
2273 break;
2274 }
2275
2276 wake_up(&epfile->wait);
2277
2278 ++ep;
2279 ++epfile;
2280 } while (--count);
2281 spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
2282
Hemant Kumarde406b72016-07-28 11:51:07 -07002283 ffs_log("exit: ret %d", ret);
2284
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002285 return ret;
2286}
2287
2288
2289/* Parsing and building descriptors and strings *****************************/
2290
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002291/*
2292 * This validates if data pointed by data is a valid USB descriptor as
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002293 * well as record how many interfaces, endpoints and strings are
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002294 * required by given configuration. Returns address after the
2295 * descriptor or NULL if data is invalid.
2296 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002297
2298enum ffs_entity_type {
2299 FFS_DESCRIPTOR, FFS_INTERFACE, FFS_STRING, FFS_ENDPOINT
2300};
2301
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02002302enum ffs_os_desc_type {
2303 FFS_OS_DESC, FFS_OS_DESC_EXT_COMPAT, FFS_OS_DESC_EXT_PROP
2304};
2305
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002306typedef int (*ffs_entity_callback)(enum ffs_entity_type entity,
2307 u8 *valuep,
2308 struct usb_descriptor_header *desc,
2309 void *priv);
2310
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02002311typedef int (*ffs_os_desc_callback)(enum ffs_os_desc_type entity,
2312 struct usb_os_desc_header *h, void *data,
2313 unsigned len, void *priv);
2314
Andrzej Pietrasiewiczf96cbd12014-07-09 12:20:06 +02002315static int __must_check ffs_do_single_desc(char *data, unsigned len,
2316 ffs_entity_callback entity,
2317 void *priv)
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002318{
2319 struct usb_descriptor_header *_ds = (void *)data;
2320 u8 length;
2321 int ret;
2322
2323 ENTER();
2324
Hemant Kumarde406b72016-07-28 11:51:07 -07002325 ffs_log("enter: len %u", len);
2326
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002327 /* At least two bytes are required: length and type */
2328 if (len < 2) {
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01002329 pr_vdebug("descriptor too short\n");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002330 return -EINVAL;
2331 }
2332
2333 /* If we have at least as many bytes as the descriptor takes? */
2334 length = _ds->bLength;
2335 if (len < length) {
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01002336 pr_vdebug("descriptor longer then available data\n");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002337 return -EINVAL;
2338 }
2339
2340#define __entity_check_INTERFACE(val) 1
2341#define __entity_check_STRING(val) (val)
2342#define __entity_check_ENDPOINT(val) ((val) & USB_ENDPOINT_NUMBER_MASK)
2343#define __entity(type, val) do { \
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01002344 pr_vdebug("entity " #type "(%02x)\n", (val)); \
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002345 if (unlikely(!__entity_check_ ##type(val))) { \
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01002346 pr_vdebug("invalid entity's value\n"); \
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002347 return -EINVAL; \
2348 } \
2349 ret = entity(FFS_ ##type, &val, _ds, priv); \
2350 if (unlikely(ret < 0)) { \
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01002351 pr_debug("entity " #type "(%02x); ret = %d\n", \
Michal Nazarewiczd8df0b62010-11-12 14:29:29 +01002352 (val), ret); \
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002353 return ret; \
2354 } \
2355 } while (0)
2356
2357 /* Parse descriptor depending on type. */
2358 switch (_ds->bDescriptorType) {
2359 case USB_DT_DEVICE:
2360 case USB_DT_CONFIG:
2361 case USB_DT_STRING:
2362 case USB_DT_DEVICE_QUALIFIER:
2363 /* function can't have any of those */
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01002364 pr_vdebug("descriptor reserved for gadget: %d\n",
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002365 _ds->bDescriptorType);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002366 return -EINVAL;
2367
2368 case USB_DT_INTERFACE: {
2369 struct usb_interface_descriptor *ds = (void *)_ds;
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01002370 pr_vdebug("interface descriptor\n");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002371 if (length != sizeof *ds)
2372 goto inv_length;
2373
2374 __entity(INTERFACE, ds->bInterfaceNumber);
2375 if (ds->iInterface)
2376 __entity(STRING, ds->iInterface);
2377 }
2378 break;
2379
2380 case USB_DT_ENDPOINT: {
2381 struct usb_endpoint_descriptor *ds = (void *)_ds;
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01002382 pr_vdebug("endpoint descriptor\n");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002383 if (length != USB_DT_ENDPOINT_SIZE &&
2384 length != USB_DT_ENDPOINT_AUDIO_SIZE)
2385 goto inv_length;
2386 __entity(ENDPOINT, ds->bEndpointAddress);
2387 }
2388 break;
2389
Koen Beel560f1182012-05-30 20:43:37 +02002390 case HID_DT_HID:
2391 pr_vdebug("hid descriptor\n");
2392 if (length != sizeof(struct hid_descriptor))
2393 goto inv_length;
2394 break;
2395
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002396 case USB_DT_OTG:
2397 if (length != sizeof(struct usb_otg_descriptor))
2398 goto inv_length;
2399 break;
2400
2401 case USB_DT_INTERFACE_ASSOCIATION: {
2402 struct usb_interface_assoc_descriptor *ds = (void *)_ds;
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01002403 pr_vdebug("interface association descriptor\n");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002404 if (length != sizeof *ds)
2405 goto inv_length;
2406 if (ds->iFunction)
2407 __entity(STRING, ds->iFunction);
2408 }
2409 break;
2410
Manu Gautam8d4e8972014-02-28 16:50:22 +05302411 case USB_DT_SS_ENDPOINT_COMP:
2412 pr_vdebug("EP SS companion descriptor\n");
2413 if (length != sizeof(struct usb_ss_ep_comp_descriptor))
2414 goto inv_length;
2415 break;
2416
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002417 case USB_DT_OTHER_SPEED_CONFIG:
2418 case USB_DT_INTERFACE_POWER:
2419 case USB_DT_DEBUG:
2420 case USB_DT_SECURITY:
2421 case USB_DT_CS_RADIO_CONTROL:
2422 /* TODO */
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01002423 pr_vdebug("unimplemented descriptor: %d\n", _ds->bDescriptorType);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002424 return -EINVAL;
2425
2426 default:
2427 /* We should never be here */
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01002428 pr_vdebug("unknown descriptor: %d\n", _ds->bDescriptorType);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002429 return -EINVAL;
2430
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002431inv_length:
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01002432 pr_vdebug("invalid length: %d (descriptor %d)\n",
Michal Nazarewiczd8df0b62010-11-12 14:29:29 +01002433 _ds->bLength, _ds->bDescriptorType);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002434 return -EINVAL;
2435 }
2436
2437#undef __entity
2438#undef __entity_check_DESCRIPTOR
2439#undef __entity_check_INTERFACE
2440#undef __entity_check_STRING
2441#undef __entity_check_ENDPOINT
2442
Hemant Kumarde406b72016-07-28 11:51:07 -07002443 ffs_log("exit: desc type %d length %d", _ds->bDescriptorType, length);
2444
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002445 return length;
2446}
2447
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002448static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
2449 ffs_entity_callback entity, void *priv)
2450{
2451 const unsigned _len = len;
2452 unsigned long num = 0;
2453
2454 ENTER();
2455
Hemant Kumarde406b72016-07-28 11:51:07 -07002456 ffs_log("enter: len %u", len);
2457
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002458 for (;;) {
2459 int ret;
2460
2461 if (num == count)
2462 data = NULL;
2463
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002464 /* Record "descriptor" entity */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002465 ret = entity(FFS_DESCRIPTOR, (u8 *)num, (void *)data, priv);
2466 if (unlikely(ret < 0)) {
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01002467 pr_debug("entity DESCRIPTOR(%02lx); ret = %d\n",
Michal Nazarewiczd8df0b62010-11-12 14:29:29 +01002468 num, ret);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002469 return ret;
2470 }
2471
2472 if (!data)
2473 return _len - len;
2474
Andrzej Pietrasiewiczf96cbd12014-07-09 12:20:06 +02002475 ret = ffs_do_single_desc(data, len, entity, priv);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002476 if (unlikely(ret < 0)) {
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01002477 pr_debug("%s returns %d\n", __func__, ret);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002478 return ret;
2479 }
2480
2481 len -= ret;
2482 data += ret;
2483 ++num;
2484 }
Hemant Kumarde406b72016-07-28 11:51:07 -07002485
2486 ffs_log("exit: len %u", len);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002487}
2488
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002489static int __ffs_data_do_entity(enum ffs_entity_type type,
2490 u8 *valuep, struct usb_descriptor_header *desc,
2491 void *priv)
2492{
Robert Baldyga6d5c1c72014-08-25 11:16:27 +02002493 struct ffs_desc_helper *helper = priv;
2494 struct usb_endpoint_descriptor *d;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002495
2496 ENTER();
2497
Hemant Kumarde406b72016-07-28 11:51:07 -07002498 ffs_log("enter: type %u", type);
2499
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002500 switch (type) {
2501 case FFS_DESCRIPTOR:
2502 break;
2503
2504 case FFS_INTERFACE:
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002505 /*
2506 * Interfaces are indexed from zero so if we
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002507 * encountered interface "n" then there are at least
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002508 * "n+1" interfaces.
2509 */
Robert Baldyga6d5c1c72014-08-25 11:16:27 +02002510 if (*valuep >= helper->interfaces_count)
2511 helper->interfaces_count = *valuep + 1;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002512 break;
2513
2514 case FFS_STRING:
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002515 /*
2516 * Strings are indexed from 1 (0 is magic ;) reserved
2517 * for languages list or some such)
2518 */
Robert Baldyga6d5c1c72014-08-25 11:16:27 +02002519 if (*valuep > helper->ffs->strings_count)
2520 helper->ffs->strings_count = *valuep;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002521 break;
2522
2523 case FFS_ENDPOINT:
Robert Baldyga6d5c1c72014-08-25 11:16:27 +02002524 d = (void *)desc;
2525 helper->eps_count++;
2526 if (helper->eps_count >= 15)
2527 return -EINVAL;
2528 /* Check if descriptors for any speed were already parsed */
2529 if (!helper->ffs->eps_count && !helper->ffs->interfaces_count)
2530 helper->ffs->eps_addrmap[helper->eps_count] =
2531 d->bEndpointAddress;
2532 else if (helper->ffs->eps_addrmap[helper->eps_count] !=
2533 d->bEndpointAddress)
2534 return -EINVAL;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002535 break;
2536 }
2537
Hemant Kumarde406b72016-07-28 11:51:07 -07002538 ffs_log("exit");
2539
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002540 return 0;
2541}
2542
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02002543static int __ffs_do_os_desc_header(enum ffs_os_desc_type *next_type,
2544 struct usb_os_desc_header *desc)
2545{
2546 u16 bcd_version = le16_to_cpu(desc->bcdVersion);
2547 u16 w_index = le16_to_cpu(desc->wIndex);
2548
Hemant Kumarde406b72016-07-28 11:51:07 -07002549 ffs_log("enter");
2550
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02002551 if (bcd_version != 1) {
2552 pr_vdebug("unsupported os descriptors version: %d",
2553 bcd_version);
2554 return -EINVAL;
2555 }
2556 switch (w_index) {
2557 case 0x4:
2558 *next_type = FFS_OS_DESC_EXT_COMPAT;
2559 break;
2560 case 0x5:
2561 *next_type = FFS_OS_DESC_EXT_PROP;
2562 break;
2563 default:
2564 pr_vdebug("unsupported os descriptor type: %d", w_index);
2565 return -EINVAL;
2566 }
2567
Hemant Kumarde406b72016-07-28 11:51:07 -07002568 ffs_log("exit: size of desc %zu", sizeof(*desc));
2569
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02002570 return sizeof(*desc);
2571}
2572
2573/*
2574 * Process all extended compatibility/extended property descriptors
2575 * of a feature descriptor
2576 */
2577static int __must_check ffs_do_single_os_desc(char *data, unsigned len,
2578 enum ffs_os_desc_type type,
2579 u16 feature_count,
2580 ffs_os_desc_callback entity,
2581 void *priv,
2582 struct usb_os_desc_header *h)
2583{
2584 int ret;
2585 const unsigned _len = len;
2586
2587 ENTER();
2588
Hemant Kumarde406b72016-07-28 11:51:07 -07002589 ffs_log("enter: len %u os desc type %d", len, type);
2590
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02002591 /* loop over all ext compat/ext prop descriptors */
2592 while (feature_count--) {
2593 ret = entity(type, h, data, len, priv);
2594 if (unlikely(ret < 0)) {
2595 pr_debug("bad OS descriptor, type: %d\n", type);
2596 return ret;
2597 }
2598 data += ret;
2599 len -= ret;
2600 }
Hemant Kumarde406b72016-07-28 11:51:07 -07002601
2602 ffs_log("exit");
2603
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02002604 return _len - len;
2605}
2606
2607/* Process a number of complete Feature Descriptors (Ext Compat or Ext Prop) */
2608static int __must_check ffs_do_os_descs(unsigned count,
2609 char *data, unsigned len,
2610 ffs_os_desc_callback entity, void *priv)
2611{
2612 const unsigned _len = len;
2613 unsigned long num = 0;
2614
2615 ENTER();
2616
Hemant Kumarde406b72016-07-28 11:51:07 -07002617 ffs_log("enter: len %u", len);
2618
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02002619 for (num = 0; num < count; ++num) {
2620 int ret;
2621 enum ffs_os_desc_type type;
2622 u16 feature_count;
2623 struct usb_os_desc_header *desc = (void *)data;
2624
2625 if (len < sizeof(*desc))
2626 return -EINVAL;
2627
2628 /*
2629 * Record "descriptor" entity.
2630 * Process dwLength, bcdVersion, wIndex, get b/wCount.
2631 * Move the data pointer to the beginning of extended
2632 * compatibilities proper or extended properties proper
2633 * portions of the data
2634 */
2635 if (le32_to_cpu(desc->dwLength) > len)
2636 return -EINVAL;
2637
2638 ret = __ffs_do_os_desc_header(&type, desc);
2639 if (unlikely(ret < 0)) {
2640 pr_debug("entity OS_DESCRIPTOR(%02lx); ret = %d\n",
2641 num, ret);
2642 return ret;
2643 }
2644 /*
2645 * 16-bit hex "?? 00" Little Endian looks like 8-bit hex "??"
2646 */
2647 feature_count = le16_to_cpu(desc->wCount);
2648 if (type == FFS_OS_DESC_EXT_COMPAT &&
2649 (feature_count > 255 || desc->Reserved))
2650 return -EINVAL;
2651 len -= ret;
2652 data += ret;
2653
2654 /*
2655 * Process all function/property descriptors
2656 * of this Feature Descriptor
2657 */
2658 ret = ffs_do_single_os_desc(data, len, type,
2659 feature_count, entity, priv, desc);
2660 if (unlikely(ret < 0)) {
2661 pr_debug("%s returns %d\n", __func__, ret);
2662 return ret;
2663 }
2664
2665 len -= ret;
2666 data += ret;
2667 }
Hemant Kumarde406b72016-07-28 11:51:07 -07002668
2669 ffs_log("exit");
2670
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02002671 return _len - len;
2672}
2673
2674/**
2675 * Validate contents of the buffer from userspace related to OS descriptors.
2676 */
2677static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
2678 struct usb_os_desc_header *h, void *data,
2679 unsigned len, void *priv)
2680{
2681 struct ffs_data *ffs = priv;
2682 u8 length;
2683
2684 ENTER();
2685
Hemant Kumarde406b72016-07-28 11:51:07 -07002686 ffs_log("enter: len %u", len);
2687
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02002688 switch (type) {
2689 case FFS_OS_DESC_EXT_COMPAT: {
2690 struct usb_ext_compat_desc *d = data;
2691 int i;
2692
2693 if (len < sizeof(*d) ||
John Keeping112b8a82017-11-27 18:15:40 +00002694 d->bFirstInterfaceNumber >= ffs->interfaces_count)
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02002695 return -EINVAL;
John Keeping112b8a82017-11-27 18:15:40 +00002696 if (d->Reserved1 != 1) {
2697 /*
2698 * According to the spec, Reserved1 must be set to 1
2699 * but older kernels incorrectly rejected non-zero
2700 * values. We fix it here to avoid returning EINVAL
2701 * in response to values we used to accept.
2702 */
2703 pr_debug("usb_ext_compat_desc::Reserved1 forced to 1\n");
2704 d->Reserved1 = 1;
2705 }
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02002706 for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
2707 if (d->Reserved2[i])
2708 return -EINVAL;
2709
2710 length = sizeof(struct usb_ext_compat_desc);
2711 }
2712 break;
2713 case FFS_OS_DESC_EXT_PROP: {
2714 struct usb_ext_prop_desc *d = data;
2715 u32 type, pdl;
2716 u16 pnl;
2717
2718 if (len < sizeof(*d) || h->interface >= ffs->interfaces_count)
2719 return -EINVAL;
2720 length = le32_to_cpu(d->dwSize);
Vincent Pelletier12a9c112017-01-18 00:57:44 +00002721 if (len < length)
2722 return -EINVAL;
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02002723 type = le32_to_cpu(d->dwPropertyDataType);
2724 if (type < USB_EXT_PROP_UNICODE ||
2725 type > USB_EXT_PROP_UNICODE_MULTI) {
2726 pr_vdebug("unsupported os descriptor property type: %d",
2727 type);
2728 return -EINVAL;
2729 }
2730 pnl = le16_to_cpu(d->wPropertyNameLength);
Vincent Pelletier12a9c112017-01-18 00:57:44 +00002731 if (length < 14 + pnl) {
2732 pr_vdebug("invalid os descriptor length: %d pnl:%d (descriptor %d)\n",
2733 length, pnl, type);
2734 return -EINVAL;
2735 }
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02002736 pdl = le32_to_cpu(*(u32 *)((u8 *)data + 10 + pnl));
2737 if (length != 14 + pnl + pdl) {
2738 pr_vdebug("invalid os descriptor length: %d pnl:%d pdl:%d (descriptor %d)\n",
2739 length, pnl, pdl, type);
2740 return -EINVAL;
2741 }
2742 ++ffs->ms_os_descs_ext_prop_count;
2743 /* property name reported to the host as "WCHAR"s */
2744 ffs->ms_os_descs_ext_prop_name_len += pnl * 2;
2745 ffs->ms_os_descs_ext_prop_data_len += pdl;
2746 }
2747 break;
2748 default:
2749 pr_vdebug("unknown descriptor: %d\n", type);
2750 return -EINVAL;
2751 }
Hemant Kumarde406b72016-07-28 11:51:07 -07002752
2753 ffs_log("exit");
2754
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02002755 return length;
2756}
2757
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002758static int __ffs_data_got_descs(struct ffs_data *ffs,
2759 char *const _data, size_t len)
2760{
Michal Nazarewiczac8dde12014-02-28 16:50:23 +05302761 char *data = _data, *raw_descs;
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02002762 unsigned os_descs_count = 0, counts[3], flags;
Michal Nazarewiczac8dde12014-02-28 16:50:23 +05302763 int ret = -EINVAL, i;
Robert Baldyga6d5c1c72014-08-25 11:16:27 +02002764 struct ffs_desc_helper helper;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002765
2766 ENTER();
2767
Hemant Kumarde406b72016-07-28 11:51:07 -07002768 ffs_log("enter: len %zu", len);
2769
Michal Nazarewiczac8dde12014-02-28 16:50:23 +05302770 if (get_unaligned_le32(data + 4) != len)
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002771 goto error;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002772
Michal Nazarewiczac8dde12014-02-28 16:50:23 +05302773 switch (get_unaligned_le32(data)) {
2774 case FUNCTIONFS_DESCRIPTORS_MAGIC:
2775 flags = FUNCTIONFS_HAS_FS_DESC | FUNCTIONFS_HAS_HS_DESC;
Manu Gautam8d4e8972014-02-28 16:50:22 +05302776 data += 8;
2777 len -= 8;
Michal Nazarewiczac8dde12014-02-28 16:50:23 +05302778 break;
2779 case FUNCTIONFS_DESCRIPTORS_MAGIC_V2:
2780 flags = get_unaligned_le32(data + 8);
Robert Baldyga1b0bf882014-09-09 08:23:17 +02002781 ffs->user_flags = flags;
Michal Nazarewiczac8dde12014-02-28 16:50:23 +05302782 if (flags & ~(FUNCTIONFS_HAS_FS_DESC |
2783 FUNCTIONFS_HAS_HS_DESC |
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02002784 FUNCTIONFS_HAS_SS_DESC |
Robert Baldyga1b0bf882014-09-09 08:23:17 +02002785 FUNCTIONFS_HAS_MS_OS_DESC |
Robert Baldyga5e33f6f2015-01-23 13:41:01 +01002786 FUNCTIONFS_VIRTUAL_ADDR |
Felix Hädicke54dfce62016-06-22 01:12:07 +02002787 FUNCTIONFS_EVENTFD |
Felix Hädicke4368c282016-06-22 01:12:09 +02002788 FUNCTIONFS_ALL_CTRL_RECIP |
2789 FUNCTIONFS_CONFIG0_SETUP)) {
Michal Nazarewiczac8dde12014-02-28 16:50:23 +05302790 ret = -ENOSYS;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002791 goto error;
2792 }
Michal Nazarewiczac8dde12014-02-28 16:50:23 +05302793 data += 12;
2794 len -= 12;
2795 break;
2796 default:
2797 goto error;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002798 }
2799
Robert Baldyga5e33f6f2015-01-23 13:41:01 +01002800 if (flags & FUNCTIONFS_EVENTFD) {
2801 if (len < 4)
2802 goto error;
2803 ffs->ffs_eventfd =
2804 eventfd_ctx_fdget((int)get_unaligned_le32(data));
2805 if (IS_ERR(ffs->ffs_eventfd)) {
2806 ret = PTR_ERR(ffs->ffs_eventfd);
2807 ffs->ffs_eventfd = NULL;
2808 goto error;
2809 }
2810 data += 4;
2811 len -= 4;
2812 }
2813
Michal Nazarewiczac8dde12014-02-28 16:50:23 +05302814 /* Read fs_count, hs_count and ss_count (if present) */
2815 for (i = 0; i < 3; ++i) {
2816 if (!(flags & (1 << i))) {
2817 counts[i] = 0;
2818 } else if (len < 4) {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002819 goto error;
Michal Nazarewiczac8dde12014-02-28 16:50:23 +05302820 } else {
2821 counts[i] = get_unaligned_le32(data);
2822 data += 4;
2823 len -= 4;
2824 }
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002825 }
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02002826 if (flags & (1 << i)) {
Vincent Pelletier12a9c112017-01-18 00:57:44 +00002827 if (len < 4) {
2828 goto error;
2829 }
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02002830 os_descs_count = get_unaligned_le32(data);
2831 data += 4;
2832 len -= 4;
2833 };
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002834
Michal Nazarewiczac8dde12014-02-28 16:50:23 +05302835 /* Read descriptors */
2836 raw_descs = data;
Robert Baldyga6d5c1c72014-08-25 11:16:27 +02002837 helper.ffs = ffs;
Michal Nazarewiczac8dde12014-02-28 16:50:23 +05302838 for (i = 0; i < 3; ++i) {
2839 if (!counts[i])
2840 continue;
Robert Baldyga6d5c1c72014-08-25 11:16:27 +02002841 helper.interfaces_count = 0;
2842 helper.eps_count = 0;
Michal Nazarewiczac8dde12014-02-28 16:50:23 +05302843 ret = ffs_do_descs(counts[i], data, len,
Robert Baldyga6d5c1c72014-08-25 11:16:27 +02002844 __ffs_data_do_entity, &helper);
Michal Nazarewiczac8dde12014-02-28 16:50:23 +05302845 if (ret < 0)
2846 goto error;
Robert Baldyga6d5c1c72014-08-25 11:16:27 +02002847 if (!ffs->eps_count && !ffs->interfaces_count) {
2848 ffs->eps_count = helper.eps_count;
2849 ffs->interfaces_count = helper.interfaces_count;
2850 } else {
2851 if (ffs->eps_count != helper.eps_count) {
2852 ret = -EINVAL;
2853 goto error;
2854 }
2855 if (ffs->interfaces_count != helper.interfaces_count) {
2856 ret = -EINVAL;
2857 goto error;
2858 }
2859 }
Michal Nazarewiczac8dde12014-02-28 16:50:23 +05302860 data += ret;
2861 len -= ret;
2862 }
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02002863 if (os_descs_count) {
2864 ret = ffs_do_os_descs(os_descs_count, data, len,
2865 __ffs_data_do_os_desc, ffs);
2866 if (ret < 0)
2867 goto error;
2868 data += ret;
2869 len -= ret;
2870 }
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002871
Michal Nazarewiczac8dde12014-02-28 16:50:23 +05302872 if (raw_descs == data || len) {
2873 ret = -EINVAL;
2874 goto error;
2875 }
2876
2877 ffs->raw_descs_data = _data;
2878 ffs->raw_descs = raw_descs;
2879 ffs->raw_descs_length = data - raw_descs;
2880 ffs->fs_descs_count = counts[0];
2881 ffs->hs_descs_count = counts[1];
2882 ffs->ss_descs_count = counts[2];
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02002883 ffs->ms_os_descs_count = os_descs_count;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002884
Hemant Kumarde406b72016-07-28 11:51:07 -07002885 ffs_log("exit");
2886
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002887 return 0;
2888
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002889error:
2890 kfree(_data);
Hemant Kumarde406b72016-07-28 11:51:07 -07002891 ffs_log("exit: ret %d", ret);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002892 return ret;
2893}
2894
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002895static int __ffs_data_got_strings(struct ffs_data *ffs,
2896 char *const _data, size_t len)
2897{
2898 u32 str_count, needed_count, lang_count;
2899 struct usb_gadget_strings **stringtabs, *t;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002900 const char *data = _data;
Michal Nazarewicz872ce512016-05-31 14:17:21 +02002901 struct usb_string *s;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002902
2903 ENTER();
2904
Hemant Kumarde406b72016-07-28 11:51:07 -07002905 ffs_log("enter: len %zu", len);
2906
Vincent Pelletier12a9c112017-01-18 00:57:44 +00002907 if (unlikely(len < 16 ||
2908 get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002909 get_unaligned_le32(data + 4) != len))
2910 goto error;
2911 str_count = get_unaligned_le32(data + 8);
2912 lang_count = get_unaligned_le32(data + 12);
2913
2914 /* if one is zero the other must be zero */
2915 if (unlikely(!str_count != !lang_count))
2916 goto error;
2917
2918 /* Do we have at least as many strings as descriptors need? */
2919 needed_count = ffs->strings_count;
2920 if (unlikely(str_count < needed_count))
2921 goto error;
2922
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002923 /*
2924 * If we don't need any strings just return and free all
2925 * memory.
2926 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002927 if (!needed_count) {
2928 kfree(_data);
2929 return 0;
2930 }
2931
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002932 /* Allocate everything in one chunk so there's less maintenance. */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002933 {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002934 unsigned i = 0;
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01002935 vla_group(d);
2936 vla_item(d, struct usb_gadget_strings *, stringtabs,
2937 lang_count + 1);
2938 vla_item(d, struct usb_gadget_strings, stringtab, lang_count);
2939 vla_item(d, struct usb_string, strings,
2940 lang_count*(needed_count+1));
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002941
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01002942 char *vlabuf = kmalloc(vla_group_size(d), GFP_KERNEL);
2943
2944 if (unlikely(!vlabuf)) {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002945 kfree(_data);
2946 return -ENOMEM;
2947 }
2948
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01002949 /* Initialize the VLA pointers */
2950 stringtabs = vla_ptr(vlabuf, d, stringtabs);
2951 t = vla_ptr(vlabuf, d, stringtab);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002952 i = lang_count;
2953 do {
2954 *stringtabs++ = t++;
2955 } while (--i);
2956 *stringtabs = NULL;
2957
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01002958 /* stringtabs = vlabuf = d_stringtabs for later kfree */
2959 stringtabs = vla_ptr(vlabuf, d, stringtabs);
2960 t = vla_ptr(vlabuf, d, stringtab);
2961 s = vla_ptr(vlabuf, d, strings);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002962 }
2963
2964 /* For each language */
2965 data += 16;
2966 len -= 16;
2967
2968 do { /* lang_count > 0 so we can use do-while */
2969 unsigned needed = needed_count;
2970
2971 if (unlikely(len < 3))
2972 goto error_free;
2973 t->language = get_unaligned_le16(data);
2974 t->strings = s;
2975 ++t;
2976
2977 data += 2;
2978 len -= 2;
2979
2980 /* For each string */
2981 do { /* str_count > 0 so we can use do-while */
2982 size_t length = strnlen(data, len);
2983
2984 if (unlikely(length == len))
2985 goto error_free;
2986
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002987 /*
2988 * User may provide more strings then we need,
2989 * if that's the case we simply ignore the
2990 * rest
2991 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002992 if (likely(needed)) {
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002993 /*
2994 * s->id will be set while adding
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002995 * function to configuration so for
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002996 * now just leave garbage here.
2997 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002998 s->s = data;
2999 --needed;
3000 ++s;
3001 }
3002
3003 data += length + 1;
3004 len -= length + 1;
3005 } while (--str_count);
3006
3007 s->id = 0; /* terminator */
3008 s->s = NULL;
3009 ++s;
3010
3011 } while (--lang_count);
3012
3013 /* Some garbage left? */
3014 if (unlikely(len))
3015 goto error_free;
3016
3017 /* Done! */
3018 ffs->stringtabs = stringtabs;
3019 ffs->raw_strings = _data;
3020
Hemant Kumarde406b72016-07-28 11:51:07 -07003021 ffs_log("exit");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003022 return 0;
3023
3024error_free:
3025 kfree(stringtabs);
3026error:
3027 kfree(_data);
Hemant Kumarde406b72016-07-28 11:51:07 -07003028 ffs_log("exit: -EINVAL");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003029 return -EINVAL;
3030}
3031
3032
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003033/* Events handling and management *******************************************/
3034
3035static void __ffs_event_add(struct ffs_data *ffs,
3036 enum usb_functionfs_event_type type)
3037{
3038 enum usb_functionfs_event_type rem_type1, rem_type2 = type;
3039 int neg = 0;
3040
Hemant Kumarde406b72016-07-28 11:51:07 -07003041 ffs_log("enter: type %d state %d setup_state %d flag %lu", type,
3042 ffs->state, ffs->setup_state, ffs->flags);
3043
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01003044 /*
3045 * Abort any unhandled setup
3046 *
3047 * We do not need to worry about some cmpxchg() changing value
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003048 * of ffs->setup_state without holding the lock because when
3049 * state is FFS_SETUP_PENDING cmpxchg() in several places in
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01003050 * the source does nothing.
3051 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003052 if (ffs->setup_state == FFS_SETUP_PENDING)
Michal Nazarewicze46318a2014-02-10 10:42:40 +01003053 ffs->setup_state = FFS_SETUP_CANCELLED;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003054
Michal Nazarewicz67913bb2014-09-10 17:50:24 +02003055 /*
3056 * Logic of this function guarantees that there are at most four pending
3057 * evens on ffs->ev.types queue. This is important because the queue
3058 * has space for four elements only and __ffs_ep0_read_events function
3059 * depends on that limit as well. If more event types are added, those
3060 * limits have to be revisited or guaranteed to still hold.
3061 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003062 switch (type) {
3063 case FUNCTIONFS_RESUME:
3064 rem_type2 = FUNCTIONFS_SUSPEND;
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01003065 /* FALL THROUGH */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003066 case FUNCTIONFS_SUSPEND:
3067 case FUNCTIONFS_SETUP:
3068 rem_type1 = type;
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01003069 /* Discard all similar events */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003070 break;
3071
3072 case FUNCTIONFS_BIND:
3073 case FUNCTIONFS_UNBIND:
3074 case FUNCTIONFS_DISABLE:
3075 case FUNCTIONFS_ENABLE:
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01003076 /* Discard everything other then power management. */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003077 rem_type1 = FUNCTIONFS_SUSPEND;
3078 rem_type2 = FUNCTIONFS_RESUME;
3079 neg = 1;
3080 break;
3081
3082 default:
Michal Nazarewiczfe00bcb2014-09-11 18:52:49 +02003083 WARN(1, "%d: unknown event, this should not happen\n", type);
3084 return;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003085 }
3086
3087 {
3088 u8 *ev = ffs->ev.types, *out = ev;
3089 unsigned n = ffs->ev.count;
3090 for (; n; --n, ++ev)
3091 if ((*ev == rem_type1 || *ev == rem_type2) == neg)
3092 *out++ = *ev;
3093 else
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01003094 pr_vdebug("purging event %d\n", *ev);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003095 ffs->ev.count = out - ffs->ev.types;
3096 }
3097
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01003098 pr_vdebug("adding event %d\n", type);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003099 ffs->ev.types[ffs->ev.count++] = type;
3100 wake_up_locked(&ffs->ev.waitq);
Robert Baldyga5e33f6f2015-01-23 13:41:01 +01003101 if (ffs->ffs_eventfd)
3102 eventfd_signal(ffs->ffs_eventfd, 1);
Hemant Kumarde406b72016-07-28 11:51:07 -07003103
3104 ffs_log("exit: state %d setup_state %d flag %lu", ffs->state,
3105 ffs->setup_state, ffs->flags);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003106}
3107
3108static void ffs_event_add(struct ffs_data *ffs,
3109 enum usb_functionfs_event_type type)
3110{
3111 unsigned long flags;
3112 spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
3113 __ffs_event_add(ffs, type);
3114 spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
3115}
3116
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003117/* Bind/unbind USB function hooks *******************************************/
3118
Robert Baldyga6d5c1c72014-08-25 11:16:27 +02003119static int ffs_ep_addr2idx(struct ffs_data *ffs, u8 endpoint_address)
3120{
3121 int i;
3122
3123 for (i = 1; i < ARRAY_SIZE(ffs->eps_addrmap); ++i)
3124 if (ffs->eps_addrmap[i] == endpoint_address)
3125 return i;
3126 return -ENOENT;
3127}
3128
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003129static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
3130 struct usb_descriptor_header *desc,
3131 void *priv)
3132{
3133 struct usb_endpoint_descriptor *ds = (void *)desc;
3134 struct ffs_function *func = priv;
3135 struct ffs_ep *ffs_ep;
Dan Carpenter85b06f52014-09-09 15:06:09 +03003136 unsigned ep_desc_id;
3137 int idx;
Manu Gautam8d4e8972014-02-28 16:50:22 +05303138 static const char *speed_names[] = { "full", "high", "super" };
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003139
Hemant Kumarde406b72016-07-28 11:51:07 -07003140 ffs_log("enter");
3141
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003142 if (type != FFS_DESCRIPTOR)
3143 return 0;
3144
Manu Gautam8d4e8972014-02-28 16:50:22 +05303145 /*
3146 * If ss_descriptors is not NULL, we are reading super speed
3147 * descriptors; if hs_descriptors is not NULL, we are reading high
3148 * speed descriptors; otherwise, we are reading full speed
3149 * descriptors.
3150 */
3151 if (func->function.ss_descriptors) {
3152 ep_desc_id = 2;
3153 func->function.ss_descriptors[(long)valuep] = desc;
3154 } else if (func->function.hs_descriptors) {
3155 ep_desc_id = 1;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003156 func->function.hs_descriptors[(long)valuep] = desc;
Manu Gautam8d4e8972014-02-28 16:50:22 +05303157 } else {
3158 ep_desc_id = 0;
Sebastian Andrzej Siewior10287ba2012-10-22 22:15:06 +02003159 func->function.fs_descriptors[(long)valuep] = desc;
Manu Gautam8d4e8972014-02-28 16:50:22 +05303160 }
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003161
3162 if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT)
3163 return 0;
3164
Robert Baldyga6d5c1c72014-08-25 11:16:27 +02003165 idx = ffs_ep_addr2idx(func->ffs, ds->bEndpointAddress) - 1;
3166 if (idx < 0)
3167 return idx;
3168
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003169 ffs_ep = func->eps + idx;
3170
Manu Gautam8d4e8972014-02-28 16:50:22 +05303171 if (unlikely(ffs_ep->descs[ep_desc_id])) {
3172 pr_err("two %sspeed descriptors for EP %d\n",
3173 speed_names[ep_desc_id],
Michal Nazarewiczd8df0b62010-11-12 14:29:29 +01003174 ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003175 return -EINVAL;
3176 }
Manu Gautam8d4e8972014-02-28 16:50:22 +05303177 ffs_ep->descs[ep_desc_id] = ds;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003178
3179 ffs_dump_mem(": Original ep desc", ds, ds->bLength);
3180 if (ffs_ep->ep) {
3181 ds->bEndpointAddress = ffs_ep->descs[0]->bEndpointAddress;
3182 if (!ds->wMaxPacketSize)
3183 ds->wMaxPacketSize = ffs_ep->descs[0]->wMaxPacketSize;
3184 } else {
3185 struct usb_request *req;
3186 struct usb_ep *ep;
Robert Baldyga1b0bf882014-09-09 08:23:17 +02003187 u8 bEndpointAddress;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003188
Robert Baldyga1b0bf882014-09-09 08:23:17 +02003189 /*
3190 * We back up bEndpointAddress because autoconfig overwrites
3191 * it with physical endpoint address.
3192 */
3193 bEndpointAddress = ds->bEndpointAddress;
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01003194 pr_vdebug("autoconfig\n");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003195 ep = usb_ep_autoconfig(func->gadget, ds);
3196 if (unlikely(!ep))
3197 return -ENOTSUPP;
Joe Perchescc7e6052010-11-14 19:04:49 -08003198 ep->driver_data = func->eps + idx;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003199
3200 req = usb_ep_alloc_request(ep, GFP_KERNEL);
3201 if (unlikely(!req))
3202 return -ENOMEM;
3203
3204 ffs_ep->ep = ep;
3205 ffs_ep->req = req;
3206 func->eps_revmap[ds->bEndpointAddress &
3207 USB_ENDPOINT_NUMBER_MASK] = idx + 1;
Robert Baldyga1b0bf882014-09-09 08:23:17 +02003208 /*
3209 * If we use virtual address mapping, we restore
3210 * original bEndpointAddress value.
3211 */
3212 if (func->ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
3213 ds->bEndpointAddress = bEndpointAddress;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003214 }
3215 ffs_dump_mem(": Rewritten ep desc", ds, ds->bLength);
3216
Hemant Kumarde406b72016-07-28 11:51:07 -07003217 ffs_log("exit");
3218
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003219 return 0;
3220}
3221
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003222static int __ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep,
3223 struct usb_descriptor_header *desc,
3224 void *priv)
3225{
3226 struct ffs_function *func = priv;
3227 unsigned idx;
3228 u8 newValue;
3229
Hemant Kumarde406b72016-07-28 11:51:07 -07003230 ffs_log("enter: type %d", type);
3231
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003232 switch (type) {
3233 default:
3234 case FFS_DESCRIPTOR:
3235 /* Handled in previous pass by __ffs_func_bind_do_descs() */
3236 return 0;
3237
3238 case FFS_INTERFACE:
3239 idx = *valuep;
3240 if (func->interfaces_nums[idx] < 0) {
3241 int id = usb_interface_id(func->conf, &func->function);
3242 if (unlikely(id < 0))
3243 return id;
3244 func->interfaces_nums[idx] = id;
3245 }
3246 newValue = func->interfaces_nums[idx];
3247 break;
3248
3249 case FFS_STRING:
3250 /* String' IDs are allocated when fsf_data is bound to cdev */
3251 newValue = func->ffs->stringtabs[0]->strings[*valuep - 1].id;
3252 break;
3253
3254 case FFS_ENDPOINT:
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01003255 /*
3256 * USB_DT_ENDPOINT are handled in
3257 * __ffs_func_bind_do_descs().
3258 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003259 if (desc->bDescriptorType == USB_DT_ENDPOINT)
3260 return 0;
3261
3262 idx = (*valuep & USB_ENDPOINT_NUMBER_MASK) - 1;
3263 if (unlikely(!func->eps[idx].ep))
3264 return -EINVAL;
3265
3266 {
3267 struct usb_endpoint_descriptor **descs;
3268 descs = func->eps[idx].descs;
3269 newValue = descs[descs[0] ? 0 : 1]->bEndpointAddress;
3270 }
3271 break;
3272 }
3273
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01003274 pr_vdebug("%02x -> %02x\n", *valuep, newValue);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003275 *valuep = newValue;
Hemant Kumarde406b72016-07-28 11:51:07 -07003276
3277 ffs_log("exit: newValue %d", newValue);
3278
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003279 return 0;
3280}
3281
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02003282static int __ffs_func_bind_do_os_desc(enum ffs_os_desc_type type,
3283 struct usb_os_desc_header *h, void *data,
3284 unsigned len, void *priv)
3285{
3286 struct ffs_function *func = priv;
3287 u8 length = 0;
3288
Hemant Kumarde406b72016-07-28 11:51:07 -07003289 ffs_log("enter: type %d", type);
3290
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02003291 switch (type) {
3292 case FFS_OS_DESC_EXT_COMPAT: {
3293 struct usb_ext_compat_desc *desc = data;
3294 struct usb_os_desc_table *t;
3295
3296 t = &func->function.os_desc_table[desc->bFirstInterfaceNumber];
3297 t->if_id = func->interfaces_nums[desc->bFirstInterfaceNumber];
3298 memcpy(t->os_desc->ext_compat_id, &desc->CompatibleID,
3299 ARRAY_SIZE(desc->CompatibleID) +
3300 ARRAY_SIZE(desc->SubCompatibleID));
3301 length = sizeof(*desc);
3302 }
3303 break;
3304 case FFS_OS_DESC_EXT_PROP: {
3305 struct usb_ext_prop_desc *desc = data;
3306 struct usb_os_desc_table *t;
3307 struct usb_os_desc_ext_prop *ext_prop;
3308 char *ext_prop_name;
3309 char *ext_prop_data;
3310
3311 t = &func->function.os_desc_table[h->interface];
3312 t->if_id = func->interfaces_nums[h->interface];
3313
3314 ext_prop = func->ffs->ms_os_descs_ext_prop_avail;
3315 func->ffs->ms_os_descs_ext_prop_avail += sizeof(*ext_prop);
3316
3317 ext_prop->type = le32_to_cpu(desc->dwPropertyDataType);
3318 ext_prop->name_len = le16_to_cpu(desc->wPropertyNameLength);
3319 ext_prop->data_len = le32_to_cpu(*(u32 *)
3320 usb_ext_prop_data_len_ptr(data, ext_prop->name_len));
3321 length = ext_prop->name_len + ext_prop->data_len + 14;
3322
3323 ext_prop_name = func->ffs->ms_os_descs_ext_prop_name_avail;
3324 func->ffs->ms_os_descs_ext_prop_name_avail +=
3325 ext_prop->name_len;
3326
3327 ext_prop_data = func->ffs->ms_os_descs_ext_prop_data_avail;
3328 func->ffs->ms_os_descs_ext_prop_data_avail +=
3329 ext_prop->data_len;
3330 memcpy(ext_prop_data,
3331 usb_ext_prop_data_ptr(data, ext_prop->name_len),
3332 ext_prop->data_len);
3333 /* unicode data reported to the host as "WCHAR"s */
3334 switch (ext_prop->type) {
3335 case USB_EXT_PROP_UNICODE:
3336 case USB_EXT_PROP_UNICODE_ENV:
3337 case USB_EXT_PROP_UNICODE_LINK:
3338 case USB_EXT_PROP_UNICODE_MULTI:
3339 ext_prop->data_len *= 2;
3340 break;
3341 }
3342 ext_prop->data = ext_prop_data;
3343
3344 memcpy(ext_prop_name, usb_ext_prop_name_ptr(data),
3345 ext_prop->name_len);
3346 /* property name reported to the host as "WCHAR"s */
3347 ext_prop->name_len *= 2;
3348 ext_prop->name = ext_prop_name;
3349
3350 t->os_desc->ext_prop_len +=
3351 ext_prop->name_len + ext_prop->data_len + 14;
3352 ++t->os_desc->ext_prop_count;
3353 list_add_tail(&ext_prop->entry, &t->os_desc->ext_prop);
3354 }
3355 break;
3356 default:
3357 pr_vdebug("unknown descriptor: %d\n", type);
3358 }
3359
Hemant Kumarde406b72016-07-28 11:51:07 -07003360 ffs_log("exit");
3361
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02003362 return length;
3363}
3364
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01003365static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f,
3366 struct usb_configuration *c)
3367{
3368 struct ffs_function *func = ffs_func_from_usb(f);
3369 struct f_fs_opts *ffs_opts =
3370 container_of(f->fi, struct f_fs_opts, func_inst);
3371 int ret;
3372
3373 ENTER();
3374
Hemant Kumarde406b72016-07-28 11:51:07 -07003375 ffs_log("enter");
3376
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01003377 /*
3378 * Legacy gadget triggers binding in functionfs_ready_callback,
3379 * which already uses locking; taking the same lock here would
3380 * cause a deadlock.
3381 *
3382 * Configfs-enabled gadgets however do need ffs_dev_lock.
3383 */
3384 if (!ffs_opts->no_configfs)
3385 ffs_dev_lock();
3386 ret = ffs_opts->dev->desc_ready ? 0 : -ENODEV;
3387 func->ffs = ffs_opts->dev->ffs_data;
3388 if (!ffs_opts->no_configfs)
3389 ffs_dev_unlock();
3390 if (ret)
3391 return ERR_PTR(ret);
3392
3393 func->conf = c;
3394 func->gadget = c->cdev->gadget;
3395
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01003396 /*
3397 * in drivers/usb/gadget/configfs.c:configfs_composite_bind()
3398 * configurations are bound in sequence with list_for_each_entry,
3399 * in each configuration its functions are bound in sequence
3400 * with list_for_each_entry, so we assume no race condition
3401 * with regard to ffs_opts->bound access
3402 */
3403 if (!ffs_opts->refcnt) {
3404 ret = functionfs_bind(func->ffs, c->cdev);
3405 if (ret)
3406 return ERR_PTR(ret);
3407 }
3408 ffs_opts->refcnt++;
3409 func->function.strings = func->ffs->stringtabs;
3410
Hemant Kumarde406b72016-07-28 11:51:07 -07003411 ffs_log("exit");
3412
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01003413 return ffs_opts;
3414}
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01003415
3416static int _ffs_func_bind(struct usb_configuration *c,
3417 struct usb_function *f)
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003418{
3419 struct ffs_function *func = ffs_func_from_usb(f);
3420 struct ffs_data *ffs = func->ffs;
3421
3422 const int full = !!func->ffs->fs_descs_count;
Jack Pham8bedacf2018-01-24 00:11:53 -08003423 const int high = !!func->ffs->hs_descs_count;
3424 const int super = !!func->ffs->ss_descs_count;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003425
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02003426 int fs_len, hs_len, ss_len, ret, i;
Dan Carpenter0015f912016-05-28 07:48:10 +03003427 struct ffs_ep *eps_ptr;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003428
3429 /* Make it a single chunk, less management later on */
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01003430 vla_group(d);
3431 vla_item_with_sz(d, struct ffs_ep, eps, ffs->eps_count);
3432 vla_item_with_sz(d, struct usb_descriptor_header *, fs_descs,
3433 full ? ffs->fs_descs_count + 1 : 0);
3434 vla_item_with_sz(d, struct usb_descriptor_header *, hs_descs,
3435 high ? ffs->hs_descs_count + 1 : 0);
Manu Gautam8d4e8972014-02-28 16:50:22 +05303436 vla_item_with_sz(d, struct usb_descriptor_header *, ss_descs,
3437 super ? ffs->ss_descs_count + 1 : 0);
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01003438 vla_item_with_sz(d, short, inums, ffs->interfaces_count);
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02003439 vla_item_with_sz(d, struct usb_os_desc_table, os_desc_table,
3440 c->cdev->use_os_string ? ffs->interfaces_count : 0);
3441 vla_item_with_sz(d, char[16], ext_compat,
3442 c->cdev->use_os_string ? ffs->interfaces_count : 0);
3443 vla_item_with_sz(d, struct usb_os_desc, os_desc,
3444 c->cdev->use_os_string ? ffs->interfaces_count : 0);
3445 vla_item_with_sz(d, struct usb_os_desc_ext_prop, ext_prop,
3446 ffs->ms_os_descs_ext_prop_count);
3447 vla_item_with_sz(d, char, ext_prop_name,
3448 ffs->ms_os_descs_ext_prop_name_len);
3449 vla_item_with_sz(d, char, ext_prop_data,
3450 ffs->ms_os_descs_ext_prop_data_len);
Michal Nazarewiczac8dde12014-02-28 16:50:23 +05303451 vla_item_with_sz(d, char, raw_descs, ffs->raw_descs_length);
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01003452 char *vlabuf;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003453
3454 ENTER();
3455
Hemant Kumarde406b72016-07-28 11:51:07 -07003456 ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
3457 ffs->setup_state, ffs->flags);
3458
Manu Gautam8d4e8972014-02-28 16:50:22 +05303459 /* Has descriptors only for speeds gadget does not support */
3460 if (unlikely(!(full | high | super)))
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003461 return -ENOTSUPP;
3462
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01003463 /* Allocate a single chunk, less management later on */
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02003464 vlabuf = kzalloc(vla_group_size(d), GFP_KERNEL);
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01003465 if (unlikely(!vlabuf))
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003466 return -ENOMEM;
3467
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02003468 ffs->ms_os_descs_ext_prop_avail = vla_ptr(vlabuf, d, ext_prop);
3469 ffs->ms_os_descs_ext_prop_name_avail =
3470 vla_ptr(vlabuf, d, ext_prop_name);
3471 ffs->ms_os_descs_ext_prop_data_avail =
3472 vla_ptr(vlabuf, d, ext_prop_data);
3473
Michal Nazarewiczac8dde12014-02-28 16:50:23 +05303474 /* Copy descriptors */
3475 memcpy(vla_ptr(vlabuf, d, raw_descs), ffs->raw_descs,
3476 ffs->raw_descs_length);
Manu Gautam8d4e8972014-02-28 16:50:22 +05303477
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01003478 memset(vla_ptr(vlabuf, d, inums), 0xff, d_inums__sz);
Dan Carpenter0015f912016-05-28 07:48:10 +03003479 eps_ptr = vla_ptr(vlabuf, d, eps);
3480 for (i = 0; i < ffs->eps_count; i++)
3481 eps_ptr[i].num = -1;
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01003482
3483 /* Save pointers
3484 * d_eps == vlabuf, func->eps used to kfree vlabuf later
3485 */
3486 func->eps = vla_ptr(vlabuf, d, eps);
3487 func->interfaces_nums = vla_ptr(vlabuf, d, inums);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003488
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01003489 /*
3490 * Go through all the endpoint descriptors and allocate
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003491 * endpoints first, so that later we can rewrite the endpoint
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01003492 * numbers without worrying that it may be described later on.
3493 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003494 if (likely(full)) {
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01003495 func->function.fs_descriptors = vla_ptr(vlabuf, d, fs_descs);
Manu Gautam8d4e8972014-02-28 16:50:22 +05303496 fs_len = ffs_do_descs(ffs->fs_descs_count,
3497 vla_ptr(vlabuf, d, raw_descs),
3498 d_raw_descs__sz,
3499 __ffs_func_bind_do_descs, func);
3500 if (unlikely(fs_len < 0)) {
3501 ret = fs_len;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003502 goto error;
Manu Gautam8d4e8972014-02-28 16:50:22 +05303503 }
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003504 } else {
Manu Gautam8d4e8972014-02-28 16:50:22 +05303505 fs_len = 0;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003506 }
3507
3508 if (likely(high)) {
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01003509 func->function.hs_descriptors = vla_ptr(vlabuf, d, hs_descs);
Manu Gautam8d4e8972014-02-28 16:50:22 +05303510 hs_len = ffs_do_descs(ffs->hs_descs_count,
3511 vla_ptr(vlabuf, d, raw_descs) + fs_len,
3512 d_raw_descs__sz - fs_len,
3513 __ffs_func_bind_do_descs, func);
3514 if (unlikely(hs_len < 0)) {
3515 ret = hs_len;
3516 goto error;
3517 }
3518 } else {
3519 hs_len = 0;
3520 }
3521
3522 if (likely(super)) {
3523 func->function.ss_descriptors = vla_ptr(vlabuf, d, ss_descs);
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02003524 ss_len = ffs_do_descs(ffs->ss_descs_count,
Manu Gautam8d4e8972014-02-28 16:50:22 +05303525 vla_ptr(vlabuf, d, raw_descs) + fs_len + hs_len,
3526 d_raw_descs__sz - fs_len - hs_len,
3527 __ffs_func_bind_do_descs, func);
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02003528 if (unlikely(ss_len < 0)) {
3529 ret = ss_len;
Robert Baldyga88548942013-09-27 12:28:54 +02003530 goto error;
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02003531 }
3532 } else {
3533 ss_len = 0;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003534 }
3535
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01003536 /*
3537 * Now handle interface numbers allocation and interface and
3538 * endpoint numbers rewriting. We can do that in one go
3539 * now.
3540 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003541 ret = ffs_do_descs(ffs->fs_descs_count +
Manu Gautam8d4e8972014-02-28 16:50:22 +05303542 (high ? ffs->hs_descs_count : 0) +
3543 (super ? ffs->ss_descs_count : 0),
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01003544 vla_ptr(vlabuf, d, raw_descs), d_raw_descs__sz,
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003545 __ffs_func_bind_do_nums, func);
3546 if (unlikely(ret < 0))
3547 goto error;
3548
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02003549 func->function.os_desc_table = vla_ptr(vlabuf, d, os_desc_table);
Jim Linc6010c82016-05-13 20:32:16 +08003550 if (c->cdev->use_os_string) {
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02003551 for (i = 0; i < ffs->interfaces_count; ++i) {
3552 struct usb_os_desc *desc;
3553
3554 desc = func->function.os_desc_table[i].os_desc =
3555 vla_ptr(vlabuf, d, os_desc) +
3556 i * sizeof(struct usb_os_desc);
3557 desc->ext_compat_id =
3558 vla_ptr(vlabuf, d, ext_compat) + i * 16;
3559 INIT_LIST_HEAD(&desc->ext_prop);
3560 }
Jim Linc6010c82016-05-13 20:32:16 +08003561 ret = ffs_do_os_descs(ffs->ms_os_descs_count,
3562 vla_ptr(vlabuf, d, raw_descs) +
3563 fs_len + hs_len + ss_len,
3564 d_raw_descs__sz - fs_len - hs_len -
3565 ss_len,
3566 __ffs_func_bind_do_os_desc, func);
3567 if (unlikely(ret < 0))
3568 goto error;
3569 }
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02003570 func->function.os_desc_n =
3571 c->cdev->use_os_string ? ffs->interfaces_count : 0;
3572
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003573 /* And we're done */
3574 ffs_event_add(ffs, FUNCTIONFS_BIND);
Hemant Kumarde406b72016-07-28 11:51:07 -07003575
3576 ffs_log("exit: state %d setup_state %d flag %lu", ffs->state,
3577 ffs->setup_state, ffs->flags);
3578
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003579 return 0;
3580
3581error:
3582 /* XXX Do we need to release all claimed endpoints here? */
Hemant Kumarde406b72016-07-28 11:51:07 -07003583 ffs_log("exit: ret %d", ret);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003584 return ret;
3585}
3586
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01003587static int ffs_func_bind(struct usb_configuration *c,
3588 struct usb_function *f)
3589{
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01003590 struct f_fs_opts *ffs_opts = ffs_do_functionfs_bind(f, c);
Robert Baldyga55d81122015-07-13 11:03:50 +02003591 struct ffs_function *func = ffs_func_from_usb(f);
3592 int ret;
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01003593
Hemant Kumarde406b72016-07-28 11:51:07 -07003594 ffs_log("enter");
3595
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01003596 if (IS_ERR(ffs_opts))
3597 return PTR_ERR(ffs_opts);
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01003598
Robert Baldyga55d81122015-07-13 11:03:50 +02003599 ret = _ffs_func_bind(c, f);
3600 if (ret && !--ffs_opts->refcnt)
3601 functionfs_unbind(func->ffs);
3602
Hemant Kumarde406b72016-07-28 11:51:07 -07003603 ffs_log("exit: ret %d", ret);
3604
Robert Baldyga55d81122015-07-13 11:03:50 +02003605 return ret;
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01003606}
3607
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003608
3609/* Other USB function hooks *************************************************/
3610
Robert Baldyga18d6b32f2014-12-18 09:55:10 +01003611static void ffs_reset_work(struct work_struct *work)
3612{
3613 struct ffs_data *ffs = container_of(work,
3614 struct ffs_data, reset_work);
Hemant Kumarde406b72016-07-28 11:51:07 -07003615
3616 ffs_log("enter");
3617
Robert Baldyga18d6b32f2014-12-18 09:55:10 +01003618 ffs_data_reset(ffs);
Hemant Kumarde406b72016-07-28 11:51:07 -07003619
3620 ffs_log("exit");
Robert Baldyga18d6b32f2014-12-18 09:55:10 +01003621}
3622
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003623static int ffs_func_set_alt(struct usb_function *f,
3624 unsigned interface, unsigned alt)
3625{
3626 struct ffs_function *func = ffs_func_from_usb(f);
3627 struct ffs_data *ffs = func->ffs;
3628 int ret = 0, intf;
3629
Hemant Kumarde406b72016-07-28 11:51:07 -07003630 ffs_log("enter");
3631
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003632 if (alt != (unsigned)-1) {
3633 intf = ffs_func_revmap_intf(func, interface);
3634 if (unlikely(intf < 0))
3635 return intf;
3636 }
3637
Vijayavardhan Vennapusa0b192d22016-11-10 15:29:47 -08003638 if (ffs->func) {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003639 ffs_func_eps_disable(ffs->func);
Vijayavardhan Vennapusa0b192d22016-11-10 15:29:47 -08003640 ffs->func = NULL;
Ajay Agarwaldd11a242017-04-11 12:43:56 +05303641 /* matching put to allow LPM on disconnect */
3642 usb_gadget_autopm_put_async(ffs->gadget);
Vijayavardhan Vennapusa0b192d22016-11-10 15:29:47 -08003643 }
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003644
Robert Baldyga18d6b32f2014-12-18 09:55:10 +01003645 if (ffs->state == FFS_DEACTIVATED) {
3646 ffs->state = FFS_CLOSING;
3647 INIT_WORK(&ffs->reset_work, ffs_reset_work);
3648 schedule_work(&ffs->reset_work);
3649 return -ENODEV;
3650 }
3651
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003652 if (ffs->state != FFS_ACTIVE)
3653 return -ENODEV;
3654
3655 if (alt == (unsigned)-1) {
3656 ffs->func = NULL;
3657 ffs_event_add(ffs, FUNCTIONFS_DISABLE);
3658 return 0;
3659 }
3660
3661 ffs->func = func;
3662 ret = ffs_func_eps_enable(func);
Manu Gautam7b615362017-02-06 17:21:12 -08003663 if (likely(ret >= 0)) {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003664 ffs_event_add(ffs, FUNCTIONFS_ENABLE);
Manu Gautam7b615362017-02-06 17:21:12 -08003665 /* Disable USB LPM later on bus_suspend */
3666 usb_gadget_autopm_get_async(ffs->gadget);
3667 }
Hemant Kumarde406b72016-07-28 11:51:07 -07003668 ffs_log("exit: ret %d", ret);
3669
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003670 return ret;
3671}
3672
3673static void ffs_func_disable(struct usb_function *f)
3674{
Hemant Kumarde406b72016-07-28 11:51:07 -07003675 ffs_log("enter");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003676 ffs_func_set_alt(f, 0, (unsigned)-1);
Ajay Agarwaldd11a242017-04-11 12:43:56 +05303677
Hemant Kumarde406b72016-07-28 11:51:07 -07003678 ffs_log("exit");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003679}
3680
3681static int ffs_func_setup(struct usb_function *f,
3682 const struct usb_ctrlrequest *creq)
3683{
3684 struct ffs_function *func = ffs_func_from_usb(f);
3685 struct ffs_data *ffs = func->ffs;
3686 unsigned long flags;
3687 int ret;
3688
3689 ENTER();
3690
Hemant Kumarde406b72016-07-28 11:51:07 -07003691 ffs_log("enter");
3692
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01003693 pr_vdebug("creq->bRequestType = %02x\n", creq->bRequestType);
3694 pr_vdebug("creq->bRequest = %02x\n", creq->bRequest);
3695 pr_vdebug("creq->wValue = %04x\n", le16_to_cpu(creq->wValue));
3696 pr_vdebug("creq->wIndex = %04x\n", le16_to_cpu(creq->wIndex));
3697 pr_vdebug("creq->wLength = %04x\n", le16_to_cpu(creq->wLength));
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003698
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01003699 /*
3700 * Most requests directed to interface go through here
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003701 * (notable exceptions are set/get interface) so we need to
3702 * handle them. All other either handled by composite or
3703 * passed to usb_configuration->setup() (if one is set). No
3704 * matter, we will handle requests directed to endpoint here
Felix Hädicke54dfce62016-06-22 01:12:07 +02003705 * as well (as it's straightforward). Other request recipient
3706 * types are only handled when the user flag FUNCTIONFS_ALL_CTRL_RECIP
3707 * is being used.
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01003708 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003709 if (ffs->state != FFS_ACTIVE)
3710 return -ENODEV;
3711
3712 switch (creq->bRequestType & USB_RECIP_MASK) {
3713 case USB_RECIP_INTERFACE:
3714 ret = ffs_func_revmap_intf(func, le16_to_cpu(creq->wIndex));
3715 if (unlikely(ret < 0))
3716 return ret;
3717 break;
3718
3719 case USB_RECIP_ENDPOINT:
3720 ret = ffs_func_revmap_ep(func, le16_to_cpu(creq->wIndex));
3721 if (unlikely(ret < 0))
3722 return ret;
Robert Baldyga1b0bf882014-09-09 08:23:17 +02003723 if (func->ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
3724 ret = func->ffs->eps_addrmap[ret];
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003725 break;
3726
3727 default:
Felix Hädicke54dfce62016-06-22 01:12:07 +02003728 if (func->ffs->user_flags & FUNCTIONFS_ALL_CTRL_RECIP)
3729 ret = le16_to_cpu(creq->wIndex);
3730 else
3731 return -EOPNOTSUPP;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003732 }
3733
3734 spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
3735 ffs->ev.setup = *creq;
3736 ffs->ev.setup.wIndex = cpu_to_le16(ret);
3737 __ffs_event_add(ffs, FUNCTIONFS_SETUP);
3738 spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
3739
Hemant Kumarde406b72016-07-28 11:51:07 -07003740 ffs_log("exit");
3741
Jerry Zhang9e100432018-07-02 12:48:08 -07003742 return creq->wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003743}
3744
Felix Hädicke54dfce62016-06-22 01:12:07 +02003745static bool ffs_func_req_match(struct usb_function *f,
Felix Hädicke1a00b452016-06-22 01:12:08 +02003746 const struct usb_ctrlrequest *creq,
3747 bool config0)
Felix Hädicke54dfce62016-06-22 01:12:07 +02003748{
3749 struct ffs_function *func = ffs_func_from_usb(f);
3750
Liangliang Lubfc6d022017-12-12 17:18:16 +08003751 if (!test_bit(FFS_FL_BOUND, &func->ffs->flags)) {
3752 ffs_log("ffs function do not bind yet.\n");
3753 return false;
3754 }
3755
Felix Hädicke4368c282016-06-22 01:12:09 +02003756 if (config0 && !(func->ffs->user_flags & FUNCTIONFS_CONFIG0_SETUP))
Felix Hädicke1a00b452016-06-22 01:12:08 +02003757 return false;
3758
Felix Hädicke54dfce62016-06-22 01:12:07 +02003759 switch (creq->bRequestType & USB_RECIP_MASK) {
3760 case USB_RECIP_INTERFACE:
Felix Hädicke05e78c62016-11-04 00:23:26 +01003761 return (ffs_func_revmap_intf(func,
3762 le16_to_cpu(creq->wIndex)) >= 0);
Felix Hädicke54dfce62016-06-22 01:12:07 +02003763 case USB_RECIP_ENDPOINT:
Felix Hädicke05e78c62016-11-04 00:23:26 +01003764 return (ffs_func_revmap_ep(func,
3765 le16_to_cpu(creq->wIndex)) >= 0);
Felix Hädicke54dfce62016-06-22 01:12:07 +02003766 default:
3767 return (bool) (func->ffs->user_flags &
3768 FUNCTIONFS_ALL_CTRL_RECIP);
3769 }
3770}
3771
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003772static void ffs_func_suspend(struct usb_function *f)
3773{
3774 ENTER();
Hemant Kumarde406b72016-07-28 11:51:07 -07003775
3776 ffs_log("enter");
3777
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003778 ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_SUSPEND);
Hemant Kumarde406b72016-07-28 11:51:07 -07003779
3780 ffs_log("exit");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003781}
3782
3783static void ffs_func_resume(struct usb_function *f)
3784{
3785 ENTER();
Hemant Kumarde406b72016-07-28 11:51:07 -07003786
3787 ffs_log("enter");
3788
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003789 ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_RESUME);
Hemant Kumarde406b72016-07-28 11:51:07 -07003790
3791 ffs_log("exit");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003792}
3793
3794
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01003795/* Endpoint and interface numbers reverse mapping ***************************/
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003796
3797static int ffs_func_revmap_ep(struct ffs_function *func, u8 num)
3798{
3799 num = func->eps_revmap[num & USB_ENDPOINT_NUMBER_MASK];
3800 return num ? num : -EDOM;
3801}
3802
3803static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf)
3804{
3805 short *nums = func->interfaces_nums;
3806 unsigned count = func->ffs->interfaces_count;
3807
Hemant Kumarde406b72016-07-28 11:51:07 -07003808 ffs_log("enter");
3809
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003810 for (; count; --count, ++nums) {
3811 if (*nums >= 0 && *nums == intf)
3812 return nums - func->interfaces_nums;
3813 }
3814
Hemant Kumarde406b72016-07-28 11:51:07 -07003815 ffs_log("exit");
3816
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003817 return -EDOM;
3818}
3819
3820
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01003821/* Devices management *******************************************************/
3822
3823static LIST_HEAD(ffs_devices);
3824
Andrzej Pietrasiewiczda13a772014-01-13 16:49:38 +01003825static struct ffs_dev *_ffs_do_find_dev(const char *name)
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01003826{
3827 struct ffs_dev *dev;
3828
Hemant Kumarde406b72016-07-28 11:51:07 -07003829 ffs_log("enter");
3830
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01003831 list_for_each_entry(dev, &ffs_devices, entry) {
3832 if (!dev->name || !name)
3833 continue;
3834 if (strcmp(dev->name, name) == 0)
3835 return dev;
3836 }
Andrzej Pietrasiewiczb6584992013-12-03 15:15:36 +01003837
Hemant Kumarde406b72016-07-28 11:51:07 -07003838 ffs_log("exit");
3839
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01003840 return NULL;
3841}
3842
3843/*
3844 * ffs_lock must be taken by the caller of this function
3845 */
Andrzej Pietrasiewiczda13a772014-01-13 16:49:38 +01003846static struct ffs_dev *_ffs_get_single_dev(void)
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01003847{
3848 struct ffs_dev *dev;
3849
Hemant Kumarde406b72016-07-28 11:51:07 -07003850 ffs_log("enter");
3851
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01003852 if (list_is_singular(&ffs_devices)) {
3853 dev = list_first_entry(&ffs_devices, struct ffs_dev, entry);
3854 if (dev->single)
3855 return dev;
3856 }
3857
Hemant Kumarde406b72016-07-28 11:51:07 -07003858 ffs_log("exit");
3859
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01003860 return NULL;
3861}
3862
3863/*
3864 * ffs_lock must be taken by the caller of this function
3865 */
Andrzej Pietrasiewiczda13a772014-01-13 16:49:38 +01003866static struct ffs_dev *_ffs_find_dev(const char *name)
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01003867{
3868 struct ffs_dev *dev;
3869
Hemant Kumarde406b72016-07-28 11:51:07 -07003870 ffs_log("enter");
3871
Andrzej Pietrasiewiczda13a772014-01-13 16:49:38 +01003872 dev = _ffs_get_single_dev();
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01003873 if (dev)
3874 return dev;
3875
Hemant Kumarde406b72016-07-28 11:51:07 -07003876 dev = _ffs_do_find_dev(name);
3877
3878 ffs_log("exit");
3879
3880 return dev;
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01003881}
3882
Andrzej Pietrasiewiczb6584992013-12-03 15:15:36 +01003883/* Configfs support *********************************************************/
3884
3885static inline struct f_fs_opts *to_ffs_opts(struct config_item *item)
3886{
3887 return container_of(to_config_group(item), struct f_fs_opts,
3888 func_inst.group);
3889}
3890
3891static void ffs_attr_release(struct config_item *item)
3892{
3893 struct f_fs_opts *opts = to_ffs_opts(item);
3894
3895 usb_put_function_instance(&opts->func_inst);
3896}
3897
3898static struct configfs_item_operations ffs_item_ops = {
3899 .release = ffs_attr_release,
3900};
3901
3902static struct config_item_type ffs_func_type = {
3903 .ct_item_ops = &ffs_item_ops,
3904 .ct_owner = THIS_MODULE,
3905};
3906
3907
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01003908/* Function registration interface ******************************************/
3909
Liangliang Luececc4e2017-12-27 13:54:33 +08003910static struct ffs_inst_status *name_to_inst_status(
3911 const char *inst_name, bool create_inst)
Liangliang Lu55ba19d2017-12-07 14:42:55 +08003912{
Liangliang Luececc4e2017-12-27 13:54:33 +08003913 struct ffs_inst_status *inst_status;
Liangliang Lu55ba19d2017-12-07 14:42:55 +08003914
Liangliang Luececc4e2017-12-27 13:54:33 +08003915 list_for_each_entry(inst_status, &inst_list, list) {
3916 if (!strncasecmp(inst_status->inst_name,
3917 inst_name, strlen(inst_name)))
3918 return inst_status;
3919 }
3920
3921 if (!create_inst)
3922 return ERR_PTR(-ENODEV);
3923
3924 inst_status = kzalloc(sizeof(struct ffs_inst_status),
3925 GFP_KERNEL);
3926 if (!inst_status)
3927 return ERR_PTR(-ENOMEM);
3928
3929 mutex_init(&inst_status->ffs_lock);
3930 snprintf(inst_status->inst_name, INST_NAME_SIZE, inst_name);
3931 list_add_tail(&inst_status->list, &inst_list);
3932
3933 return inst_status;
3934}
3935
3936static int ffs_inst_exist_check(const char *inst_name)
3937{
3938 struct ffs_inst_status *inst_status;
3939
3940 inst_status = name_to_inst_status(inst_name, false);
3941 if (IS_ERR(inst_status)) {
Liangliang Lu55ba19d2017-12-07 14:42:55 +08003942 pr_err_ratelimited(
Liangliang Luececc4e2017-12-27 13:54:33 +08003943 "%s: failed to find instance (%s)\n",
3944 __func__, inst_name);
Liangliang Lu55ba19d2017-12-07 14:42:55 +08003945 return -ENODEV;
3946 }
3947
Liangliang Luececc4e2017-12-27 13:54:33 +08003948 mutex_lock(&inst_status->ffs_lock);
3949
3950 if (unlikely(inst_status->inst_exist == false)) {
3951 mutex_unlock(&inst_status->ffs_lock);
3952 pr_err_ratelimited(
3953 "%s: f_fs instance (%s) has been freed already.\n",
3954 __func__, inst_name);
3955 return -ENODEV;
3956 }
3957
3958 mutex_unlock(&inst_status->ffs_lock);
Liangliang Lu55ba19d2017-12-07 14:42:55 +08003959
3960 return 0;
3961}
3962
Liangliang Luececc4e2017-12-27 13:54:33 +08003963static void ffs_inst_clean(struct f_fs_opts *opts,
3964 const char *inst_name)
Liangliang Lu55ba19d2017-12-07 14:42:55 +08003965{
Liangliang Luececc4e2017-12-27 13:54:33 +08003966 struct ffs_inst_status *inst_status;
3967
3968 inst_status = name_to_inst_status(inst_name, false);
3969 if (IS_ERR(inst_status)) {
3970 pr_err_ratelimited(
3971 "%s: failed to find instance (%s)\n",
3972 __func__, inst_name);
3973 return;
3974 }
3975
3976 inst_status->opts = NULL;
3977
Liangliang Lu55ba19d2017-12-07 14:42:55 +08003978 ffs_dev_lock();
3979 _ffs_free_dev(opts->dev);
3980 ffs_dev_unlock();
3981 kfree(opts);
3982}
3983
Liangliang Luececc4e2017-12-27 13:54:33 +08003984static void ffs_inst_clean_delay(const char *inst_name)
Liangliang Lu55ba19d2017-12-07 14:42:55 +08003985{
Liangliang Luececc4e2017-12-27 13:54:33 +08003986 struct ffs_inst_status *inst_status;
Liangliang Lu55ba19d2017-12-07 14:42:55 +08003987
Liangliang Luececc4e2017-12-27 13:54:33 +08003988 inst_status = name_to_inst_status(inst_name, false);
3989 if (IS_ERR(inst_status)) {
3990 pr_err_ratelimited(
3991 "%s: failed to find (%s) instance\n",
3992 __func__, inst_name);
Liangliang Lu55ba19d2017-12-07 14:42:55 +08003993 return;
3994 }
3995
Liangliang Luececc4e2017-12-27 13:54:33 +08003996 mutex_lock(&inst_status->ffs_lock);
3997
3998 if (unlikely(inst_status->inst_exist == false)) {
3999 if (inst_status->opts) {
4000 ffs_inst_clean(inst_status->opts, inst_name);
4001 pr_err_ratelimited("%s: Delayed free memory\n",
4002 __func__);
4003 }
4004 mutex_unlock(&inst_status->ffs_lock);
4005 return;
4006 }
4007
4008 mutex_unlock(&inst_status->ffs_lock);
Liangliang Lu55ba19d2017-12-07 14:42:55 +08004009}
4010
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01004011static void ffs_free_inst(struct usb_function_instance *f)
4012{
4013 struct f_fs_opts *opts;
Liangliang Luececc4e2017-12-27 13:54:33 +08004014 struct ffs_inst_status *inst_status;
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01004015
4016 opts = to_f_fs_opts(f);
Liangliang Lu55ba19d2017-12-07 14:42:55 +08004017
Liangliang Luececc4e2017-12-27 13:54:33 +08004018 inst_status = name_to_inst_status(opts->dev->name, false);
4019 if (IS_ERR(inst_status)) {
4020 ffs_log("failed to find (%s) instance\n",
4021 opts->dev->name);
Liangliang Lu55ba19d2017-12-07 14:42:55 +08004022 return;
4023 }
4024
Liangliang Luececc4e2017-12-27 13:54:33 +08004025 mutex_lock(&inst_status->ffs_lock);
4026 if (opts->dev->ffs_data
4027 && atomic_read(&opts->dev->ffs_data->opened)) {
4028 inst_status->inst_exist = false;
4029 mutex_unlock(&inst_status->ffs_lock);
4030 ffs_log("Dev is open, free mem when dev (%s) close\n",
4031 opts->dev->name);
4032 return;
4033 }
4034
4035 ffs_inst_clean(opts, opts->dev->name);
4036 inst_status->inst_exist = false;
4037 mutex_unlock(&inst_status->ffs_lock);
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01004038}
4039
Andrzej Pietrasiewiczb6584992013-12-03 15:15:36 +01004040#define MAX_INST_NAME_LEN 40
4041
4042static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name)
4043{
Liangliang Luececc4e2017-12-27 13:54:33 +08004044 struct f_fs_opts *opts, *opts_prev;
4045 struct ffs_data *ffs_data_tmp;
Andrzej Pietrasiewiczb6584992013-12-03 15:15:36 +01004046 char *ptr;
4047 const char *tmp;
4048 int name_len, ret;
Liangliang Luececc4e2017-12-27 13:54:33 +08004049 struct ffs_inst_status *inst_status;
Andrzej Pietrasiewiczb6584992013-12-03 15:15:36 +01004050
4051 name_len = strlen(name) + 1;
4052 if (name_len > MAX_INST_NAME_LEN)
4053 return -ENAMETOOLONG;
4054
4055 ptr = kstrndup(name, name_len, GFP_KERNEL);
4056 if (!ptr)
4057 return -ENOMEM;
4058
Liangliang Luececc4e2017-12-27 13:54:33 +08004059 inst_status = name_to_inst_status(ptr, true);
4060 if (IS_ERR(inst_status)) {
4061 ffs_log("failed to create status struct for (%s) instance\n",
4062 ptr);
4063 return -EINVAL;
4064 }
4065
4066 mutex_lock(&inst_status->ffs_lock);
4067 opts_prev = inst_status->opts;
4068 if (opts_prev) {
4069 mutex_unlock(&inst_status->ffs_lock);
4070 ffs_log("instance (%s): prev inst do not freed yet\n",
4071 inst_status->inst_name);
Liangliang Lu55ba19d2017-12-07 14:42:55 +08004072 return -EBUSY;
4073 }
Liangliang Luececc4e2017-12-27 13:54:33 +08004074 mutex_unlock(&inst_status->ffs_lock);
Liangliang Lu55ba19d2017-12-07 14:42:55 +08004075
Andrzej Pietrasiewiczb6584992013-12-03 15:15:36 +01004076 opts = to_f_fs_opts(fi);
4077 tmp = NULL;
4078
4079 ffs_dev_lock();
4080
4081 tmp = opts->dev->name_allocated ? opts->dev->name : NULL;
4082 ret = _ffs_name_dev(opts->dev, ptr);
4083 if (ret) {
4084 kfree(ptr);
4085 ffs_dev_unlock();
4086 return ret;
4087 }
4088 opts->dev->name_allocated = true;
4089
Liangliang Lu55ba19d2017-12-07 14:42:55 +08004090 /*
4091 * If ffs instance is freed and created once, new allocated
4092 * opts->dev need to initialize opts->dev->ffs_data, and
4093 * ffs_private_data also need to update new allocated opts->dev
4094 * address.
4095 */
Liangliang Luececc4e2017-12-27 13:54:33 +08004096 ffs_data_tmp = inst_status->ffs_data;
4097 if (ffs_data_tmp)
4098 opts->dev->ffs_data = ffs_data_tmp;
Liangliang Lu55ba19d2017-12-07 14:42:55 +08004099
4100 if (opts->dev->ffs_data)
4101 opts->dev->ffs_data->private_data = opts->dev;
4102
Andrzej Pietrasiewiczb6584992013-12-03 15:15:36 +01004103 ffs_dev_unlock();
4104
4105 kfree(tmp);
4106
Liangliang Luececc4e2017-12-27 13:54:33 +08004107 mutex_lock(&inst_status->ffs_lock);
4108 inst_status->inst_exist = true;
4109 inst_status->opts = opts;
4110 mutex_unlock(&inst_status->ffs_lock);
Liangliang Lu55ba19d2017-12-07 14:42:55 +08004111
Andrzej Pietrasiewiczb6584992013-12-03 15:15:36 +01004112 return 0;
4113}
4114
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01004115static struct usb_function_instance *ffs_alloc_inst(void)
4116{
4117 struct f_fs_opts *opts;
4118 struct ffs_dev *dev;
4119
4120 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
4121 if (!opts)
4122 return ERR_PTR(-ENOMEM);
4123
Andrzej Pietrasiewiczb6584992013-12-03 15:15:36 +01004124 opts->func_inst.set_inst_name = ffs_set_inst_name;
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01004125 opts->func_inst.free_func_inst = ffs_free_inst;
4126 ffs_dev_lock();
Andrzej Pietrasiewiczda13a772014-01-13 16:49:38 +01004127 dev = _ffs_alloc_dev();
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01004128 ffs_dev_unlock();
4129 if (IS_ERR(dev)) {
4130 kfree(opts);
4131 return ERR_CAST(dev);
4132 }
4133 opts->dev = dev;
Andrzej Pietrasiewiczb6584992013-12-03 15:15:36 +01004134 dev->opts = opts;
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01004135
Andrzej Pietrasiewiczb6584992013-12-03 15:15:36 +01004136 config_group_init_type_name(&opts->func_inst.group, "",
4137 &ffs_func_type);
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01004138 return &opts->func_inst;
4139}
4140
4141static void ffs_free(struct usb_function *f)
4142{
4143 kfree(ffs_func_from_usb(f));
4144}
4145
4146static void ffs_func_unbind(struct usb_configuration *c,
4147 struct usb_function *f)
4148{
4149 struct ffs_function *func = ffs_func_from_usb(f);
4150 struct ffs_data *ffs = func->ffs;
4151 struct f_fs_opts *opts =
4152 container_of(f->fi, struct f_fs_opts, func_inst);
4153 struct ffs_ep *ep = func->eps;
4154 unsigned count = ffs->eps_count;
4155 unsigned long flags;
4156
4157 ENTER();
Hemant Kumarde406b72016-07-28 11:51:07 -07004158
4159 ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
4160 ffs->setup_state, ffs->flags);
4161
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01004162 if (ffs->func == func) {
4163 ffs_func_eps_disable(func);
4164 ffs->func = NULL;
4165 }
4166
4167 if (!--opts->refcnt)
4168 functionfs_unbind(ffs);
4169
4170 /* cleanup after autoconfig */
4171 spin_lock_irqsave(&func->ffs->eps_lock, flags);
4172 do {
4173 if (ep->ep && ep->req)
4174 usb_ep_free_request(ep->ep, ep->req);
4175 ep->req = NULL;
Manu Gautamb484e382016-11-11 09:58:16 -08004176 ep->ep = NULL;
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01004177 ++ep;
4178 } while (--count);
4179 spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
4180 kfree(func->eps);
4181 func->eps = NULL;
4182 /*
4183 * eps, descriptors and interfaces_nums are allocated in the
4184 * same chunk so only one free is required.
4185 */
4186 func->function.fs_descriptors = NULL;
4187 func->function.hs_descriptors = NULL;
Manu Gautam8d4e8972014-02-28 16:50:22 +05304188 func->function.ss_descriptors = NULL;
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01004189 func->interfaces_nums = NULL;
4190
4191 ffs_event_add(ffs, FUNCTIONFS_UNBIND);
Hemant Kumarde406b72016-07-28 11:51:07 -07004192
4193 ffs_log("exit: state %d setup_state %d flag %lu", ffs->state,
4194 ffs->setup_state, ffs->flags);
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01004195}
4196
4197static struct usb_function *ffs_alloc(struct usb_function_instance *fi)
4198{
4199 struct ffs_function *func;
4200
4201 ENTER();
4202
4203 func = kzalloc(sizeof(*func), GFP_KERNEL);
4204 if (unlikely(!func))
4205 return ERR_PTR(-ENOMEM);
4206
4207 func->function.name = "Function FS Gadget";
4208
4209 func->function.bind = ffs_func_bind;
4210 func->function.unbind = ffs_func_unbind;
4211 func->function.set_alt = ffs_func_set_alt;
4212 func->function.disable = ffs_func_disable;
4213 func->function.setup = ffs_func_setup;
Felix Hädicke54dfce62016-06-22 01:12:07 +02004214 func->function.req_match = ffs_func_req_match;
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01004215 func->function.suspend = ffs_func_suspend;
4216 func->function.resume = ffs_func_resume;
4217 func->function.free_func = ffs_free;
4218
4219 return &func->function;
4220}
4221
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01004222/*
4223 * ffs_lock must be taken by the caller of this function
4224 */
Andrzej Pietrasiewiczda13a772014-01-13 16:49:38 +01004225static struct ffs_dev *_ffs_alloc_dev(void)
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01004226{
4227 struct ffs_dev *dev;
4228 int ret;
4229
Andrzej Pietrasiewiczda13a772014-01-13 16:49:38 +01004230 if (_ffs_get_single_dev())
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01004231 return ERR_PTR(-EBUSY);
4232
4233 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
4234 if (!dev)
4235 return ERR_PTR(-ENOMEM);
4236
4237 if (list_empty(&ffs_devices)) {
4238 ret = functionfs_init();
4239 if (ret) {
4240 kfree(dev);
4241 return ERR_PTR(ret);
4242 }
4243 }
4244
4245 list_add(&dev->entry, &ffs_devices);
4246
4247 return dev;
4248}
4249
4250/*
4251 * ffs_lock must be taken by the caller of this function
4252 * The caller is responsible for "name" being available whenever f_fs needs it
4253 */
4254static int _ffs_name_dev(struct ffs_dev *dev, const char *name)
4255{
4256 struct ffs_dev *existing;
4257
Hemant Kumarde406b72016-07-28 11:51:07 -07004258 ffs_log("enter");
4259
Andrzej Pietrasiewiczda13a772014-01-13 16:49:38 +01004260 existing = _ffs_do_find_dev(name);
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01004261 if (existing)
4262 return -EBUSY;
Andrzej Pietrasiewiczab13cb02014-01-13 16:49:36 +01004263
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01004264 dev->name = name;
4265
Hemant Kumarde406b72016-07-28 11:51:07 -07004266 ffs_log("exit");
4267
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01004268 return 0;
4269}
4270
4271/*
4272 * The caller is responsible for "name" being available whenever f_fs needs it
4273 */
4274int ffs_name_dev(struct ffs_dev *dev, const char *name)
4275{
4276 int ret;
4277
Hemant Kumarde406b72016-07-28 11:51:07 -07004278 ffs_log("enter");
4279
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01004280 ffs_dev_lock();
4281 ret = _ffs_name_dev(dev, name);
4282 ffs_dev_unlock();
4283
Hemant Kumarde406b72016-07-28 11:51:07 -07004284 ffs_log("exit");
4285
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01004286 return ret;
4287}
Felipe Balbi0700faa2014-04-01 13:19:32 -05004288EXPORT_SYMBOL_GPL(ffs_name_dev);
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01004289
4290int ffs_single_dev(struct ffs_dev *dev)
4291{
4292 int ret;
4293
Hemant Kumarde406b72016-07-28 11:51:07 -07004294 ffs_log("enter");
4295
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01004296 ret = 0;
4297 ffs_dev_lock();
4298
4299 if (!list_is_singular(&ffs_devices))
4300 ret = -EBUSY;
4301 else
4302 dev->single = true;
4303
4304 ffs_dev_unlock();
Hemant Kumarde406b72016-07-28 11:51:07 -07004305
4306 ffs_log("exit");
4307
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01004308 return ret;
4309}
Felipe Balbi0700faa2014-04-01 13:19:32 -05004310EXPORT_SYMBOL_GPL(ffs_single_dev);
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01004311
4312/*
4313 * ffs_lock must be taken by the caller of this function
4314 */
Andrzej Pietrasiewiczda13a772014-01-13 16:49:38 +01004315static void _ffs_free_dev(struct ffs_dev *dev)
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01004316{
Hemant Kumarde406b72016-07-28 11:51:07 -07004317
4318 ffs_log("enter");
4319
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01004320 list_del(&dev->entry);
Andrzej Pietrasiewiczb6584992013-12-03 15:15:36 +01004321 if (dev->name_allocated)
4322 kfree(dev->name);
Jim Baxter3262ad82016-09-08 11:18:16 +02004323
4324 /* Clear the private_data pointer to stop incorrect dev access */
4325 if (dev->ffs_data)
4326 dev->ffs_data->private_data = NULL;
4327
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01004328 kfree(dev);
4329 if (list_empty(&ffs_devices))
4330 functionfs_cleanup();
Hemant Kumarde406b72016-07-28 11:51:07 -07004331
4332 ffs_log("exit");
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01004333}
4334
4335static void *ffs_acquire_dev(const char *dev_name)
4336{
4337 struct ffs_dev *ffs_dev;
4338
4339 ENTER();
Hemant Kumarde406b72016-07-28 11:51:07 -07004340
4341 ffs_log("enter");
4342
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01004343 ffs_dev_lock();
4344
Andrzej Pietrasiewiczda13a772014-01-13 16:49:38 +01004345 ffs_dev = _ffs_find_dev(dev_name);
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01004346 if (!ffs_dev)
Krzysztof Opasiakd668b4f2014-05-21 14:05:35 +02004347 ffs_dev = ERR_PTR(-ENOENT);
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01004348 else if (ffs_dev->mounted)
4349 ffs_dev = ERR_PTR(-EBUSY);
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01004350 else if (ffs_dev->ffs_acquire_dev_callback &&
4351 ffs_dev->ffs_acquire_dev_callback(ffs_dev))
Krzysztof Opasiakd668b4f2014-05-21 14:05:35 +02004352 ffs_dev = ERR_PTR(-ENOENT);
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01004353 else
4354 ffs_dev->mounted = true;
4355
4356 ffs_dev_unlock();
Hemant Kumarde406b72016-07-28 11:51:07 -07004357
4358 ffs_log("exit");
4359
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01004360 return ffs_dev;
4361}
4362
4363static void ffs_release_dev(struct ffs_data *ffs_data)
4364{
4365 struct ffs_dev *ffs_dev;
4366
4367 ENTER();
Hemant Kumarde406b72016-07-28 11:51:07 -07004368
4369 ffs_log("enter");
4370
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01004371 ffs_dev_lock();
4372
4373 ffs_dev = ffs_data->private_data;
Andrzej Pietrasiewiczea365922014-01-13 16:49:35 +01004374 if (ffs_dev) {
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01004375 ffs_dev->mounted = false;
Andrzej Pietrasiewiczea365922014-01-13 16:49:35 +01004376
4377 if (ffs_dev->ffs_release_dev_callback)
4378 ffs_dev->ffs_release_dev_callback(ffs_dev);
4379 }
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01004380
4381 ffs_dev_unlock();
Hemant Kumarde406b72016-07-28 11:51:07 -07004382
4383 ffs_log("exit");
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01004384}
4385
4386static int ffs_ready(struct ffs_data *ffs)
4387{
4388 struct ffs_dev *ffs_obj;
4389 int ret = 0;
4390
4391 ENTER();
Hemant Kumarde406b72016-07-28 11:51:07 -07004392
4393 ffs_log("enter");
4394
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01004395 ffs_dev_lock();
4396
4397 ffs_obj = ffs->private_data;
4398 if (!ffs_obj) {
4399 ret = -EINVAL;
4400 goto done;
4401 }
4402 if (WARN_ON(ffs_obj->desc_ready)) {
4403 ret = -EBUSY;
4404 goto done;
4405 }
4406
4407 ffs_obj->desc_ready = true;
4408 ffs_obj->ffs_data = ffs;
4409
Krzysztof Opasiak49a79d82015-05-22 17:25:18 +02004410 if (ffs_obj->ffs_ready_callback) {
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01004411 ret = ffs_obj->ffs_ready_callback(ffs);
Krzysztof Opasiak49a79d82015-05-22 17:25:18 +02004412 if (ret)
4413 goto done;
4414 }
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01004415
Krzysztof Opasiak49a79d82015-05-22 17:25:18 +02004416 set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags);
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01004417done:
4418 ffs_dev_unlock();
Hemant Kumarde406b72016-07-28 11:51:07 -07004419
4420 ffs_log("exit");
4421
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01004422 return ret;
4423}
4424
4425static void ffs_closed(struct ffs_data *ffs)
4426{
4427 struct ffs_dev *ffs_obj;
Rui Miguel Silvaf14e9ad2015-05-20 14:52:40 +01004428 struct f_fs_opts *opts;
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01004429
4430 ENTER();
Hemant Kumarde406b72016-07-28 11:51:07 -07004431
4432 ffs_log("enter");
4433
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01004434 ffs_dev_lock();
4435
4436 ffs_obj = ffs->private_data;
Hemant Kumar8e42e642016-08-05 15:37:19 -07004437 if (!ffs_obj) {
4438 ffs_dev_unlock();
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01004439 goto done;
Hemant Kumar8e42e642016-08-05 15:37:19 -07004440 }
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01004441
4442 ffs_obj->desc_ready = false;
Andrew Gabbasovfd6a7422017-11-08 10:13:15 -07004443 ffs_obj->ffs_data = NULL;
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01004444
Krzysztof Opasiak49a79d82015-05-22 17:25:18 +02004445 if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags) &&
4446 ffs_obj->ffs_closed_callback)
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01004447 ffs_obj->ffs_closed_callback(ffs);
Andrzej Pietrasiewiczb6584992013-12-03 15:15:36 +01004448
Hemant Kumar8e42e642016-08-05 15:37:19 -07004449 if (ffs_obj->opts) {
Rui Miguel Silvaf14e9ad2015-05-20 14:52:40 +01004450 opts = ffs_obj->opts;
Hemant Kumar8e42e642016-08-05 15:37:19 -07004451 } else {
4452 ffs_dev_unlock();
Rui Miguel Silvaf14e9ad2015-05-20 14:52:40 +01004453 goto done;
Hemant Kumar8e42e642016-08-05 15:37:19 -07004454 }
Rui Miguel Silvaf14e9ad2015-05-20 14:52:40 +01004455
Mayank Rana758d7522016-11-11 10:23:13 -08004456 /* to get updated refcount atomic variable value */
4457 smp_mb__before_atomic();
Rui Miguel Silvaf14e9ad2015-05-20 14:52:40 +01004458 if (opts->no_configfs || !opts->func_inst.group.cg_item.ci_parent
Hemant Kumar8e42e642016-08-05 15:37:19 -07004459 || !atomic_read(&opts->func_inst.group.cg_item.ci_kref.refcount)) {
4460 ffs_dev_unlock();
Andrzej Pietrasiewiczb6584992013-12-03 15:15:36 +01004461 goto done;
Hemant Kumar8e42e642016-08-05 15:37:19 -07004462 }
Andrzej Pietrasiewiczb6584992013-12-03 15:15:36 +01004463
Hemant Kumar8e42e642016-08-05 15:37:19 -07004464 ffs_dev_unlock();
4465
Hemant Kumar2ca83af2016-08-08 16:20:15 -07004466 if (test_bit(FFS_FL_BOUND, &ffs->flags)) {
4467 unregister_gadget_item(opts->
Andrzej Pietrasiewiczb6584992013-12-03 15:15:36 +01004468 func_inst.group.cg_item.ci_parent->ci_parent);
Hemant Kumar2ca83af2016-08-08 16:20:15 -07004469 ffs_log("unreg gadget done");
4470 }
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01004471done:
Hemant Kumarde406b72016-07-28 11:51:07 -07004472 ffs_log("exit");
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01004473}
4474
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02004475/* Misc helper functions ****************************************************/
4476
4477static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
4478{
4479 return nonblock
4480 ? likely(mutex_trylock(mutex)) ? 0 : -EAGAIN
4481 : mutex_lock_interruptible(mutex);
4482}
4483
ChandanaKishori Chiluverued923f32015-08-10 10:17:52 +05304484/**
4485 * ffs_prepare_buffer() - copy userspace buffer into kernel.
4486 * @buf: userspace buffer
4487 * @len: length of the buffer
4488 * @extra_alloc_buf: Extra buffer allocation if required by UDC.
4489 *
4490 * This function returns pointer to the copied buffer
4491 */
4492static char *ffs_prepare_buffer(const char __user *buf, size_t len,
4493 size_t extra_buf_alloc)
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02004494{
4495 char *data;
4496
4497 if (unlikely(!len))
4498 return NULL;
4499
ChandanaKishori Chiluverued923f32015-08-10 10:17:52 +05304500 data = kmalloc(len + extra_buf_alloc, GFP_KERNEL);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02004501 if (unlikely(!data))
4502 return ERR_PTR(-ENOMEM);
4503
Daniel Walter7fe9a932015-11-18 17:15:49 +01004504 if (unlikely(copy_from_user(data, buf, len))) {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02004505 kfree(data);
4506 return ERR_PTR(-EFAULT);
4507 }
4508
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01004509 pr_vdebug("Buffer from user space:\n");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02004510 ffs_dump_mem("", data, len);
4511
4512 return data;
4513}
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01004514
Liangliang Luececc4e2017-12-27 13:54:33 +08004515static void __exit ffs_exit(void)
4516{
4517 struct ffs_inst_status *inst_status, *inst_status_tmp = NULL;
4518
4519 list_for_each_entry(inst_status, &inst_list, list) {
4520 if (inst_status_tmp) {
4521 list_del(&inst_status_tmp->list);
4522 kfree(inst_status_tmp);
4523 }
4524 inst_status_tmp = inst_status;
4525 }
4526 if (inst_status_tmp) {
4527 list_del(&inst_status_tmp->list);
4528 kfree(inst_status_tmp);
4529 }
4530}
4531module_exit(ffs_exit);
4532
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01004533DECLARE_USB_FUNCTION_INIT(ffs, ffs_alloc_inst, ffs_alloc);
4534MODULE_LICENSE("GPL");
4535MODULE_AUTHOR("Michal Nazarewicz");