blob: d7acab873836afb88fa34ffde23c838448e014bf [file] [log] [blame]
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001/*
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002 * f_fs.c -- user mode file system API for USB composite function controllers
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003 *
4 * Copyright (C) 2010 Samsung Electronics
Michal Nazarewicz54b83602012-01-13 15:05:16 +01005 * Author: Michal Nazarewicz <mina86@mina86.com>
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02006 *
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01007 * Based on inode.c (GadgetFS) which was:
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02008 * Copyright (C) 2003-2004 David Brownell
9 * Copyright (C) 2003 Agilent Technologies
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +020015 */
16
17
18/* #define DEBUG */
19/* #define VERBOSE_DEBUG */
20
21#include <linux/blkdev.h>
Randy Dunlapb0608692010-05-10 10:51:36 -070022#include <linux/pagemap.h>
Paul Gortmakerf940fcd2011-05-27 09:56:31 -040023#include <linux/export.h>
Koen Beel560f1182012-05-30 20:43:37 +020024#include <linux/hid.h>
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +010025#include <linux/module.h>
Christoph Hellwige2e40f22015-02-22 08:58:50 -080026#include <linux/uio.h>
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +020027#include <asm/unaligned.h>
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +020028
29#include <linux/usb/composite.h>
30#include <linux/usb/functionfs.h>
31
Robert Baldyga2e4c7552014-02-10 10:42:44 +010032#include <linux/aio.h>
33#include <linux/mmu_context.h>
Robert Baldyga23de91e2014-02-10 10:42:43 +010034#include <linux/poll.h>
Robert Baldyga5e33f6f2015-01-23 13:41:01 +010035#include <linux/eventfd.h>
Robert Baldyga23de91e2014-02-10 10:42:43 +010036
Andrzej Pietrasiewicze72c39c2013-12-03 15:15:31 +010037#include "u_fs.h"
Andrzej Pietrasiewicz74d48462014-05-08 14:06:21 +020038#include "u_f.h"
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +020039#include "u_os_desc.h"
Andrzej Pietrasiewiczb6584992013-12-03 15:15:36 +010040#include "configfs.h"
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +020041
42#define FUNCTIONFS_MAGIC 0xa647361 /* Chosen by a honest dice roll ;) */
43
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +020044/* Reference counter handling */
45static void ffs_data_get(struct ffs_data *ffs);
46static void ffs_data_put(struct ffs_data *ffs);
47/* Creates new ffs_data object. */
48static struct ffs_data *__must_check ffs_data_new(void) __attribute__((malloc));
49
50/* Opened counter handling. */
51static void ffs_data_opened(struct ffs_data *ffs);
52static void ffs_data_closed(struct ffs_data *ffs);
53
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +010054/* Called with ffs->mutex held; take over ownership of data. */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +020055static int __must_check
56__ffs_data_got_descs(struct ffs_data *ffs, char *data, size_t len);
57static int __must_check
58__ffs_data_got_strings(struct ffs_data *ffs, char *data, size_t len);
59
60
61/* The function structure ***************************************************/
62
63struct ffs_ep;
64
65struct ffs_function {
66 struct usb_configuration *conf;
67 struct usb_gadget *gadget;
68 struct ffs_data *ffs;
69
70 struct ffs_ep *eps;
71 u8 eps_revmap[16];
72 short *interfaces_nums;
73
74 struct usb_function function;
75};
76
77
78static struct ffs_function *ffs_func_from_usb(struct usb_function *f)
79{
80 return container_of(f, struct ffs_function, function);
81}
82
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +020083
Michal Nazarewicza7ecf052014-02-10 10:42:41 +010084static inline enum ffs_setup_state
85ffs_setup_state_clear_cancelled(struct ffs_data *ffs)
86{
87 return (enum ffs_setup_state)
88 cmpxchg(&ffs->setup_state, FFS_SETUP_CANCELLED, FFS_NO_SETUP);
89}
90
91
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +020092static void ffs_func_eps_disable(struct ffs_function *func);
93static int __must_check ffs_func_eps_enable(struct ffs_function *func);
94
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +020095static int ffs_func_bind(struct usb_configuration *,
96 struct usb_function *);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +020097static int ffs_func_set_alt(struct usb_function *, unsigned, unsigned);
98static void ffs_func_disable(struct usb_function *);
99static int ffs_func_setup(struct usb_function *,
100 const struct usb_ctrlrequest *);
Felix Hädicke54dfce62016-06-22 01:12:07 +0200101static bool ffs_func_req_match(struct usb_function *,
102 const struct usb_ctrlrequest *);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200103static void ffs_func_suspend(struct usb_function *);
104static void ffs_func_resume(struct usb_function *);
105
106
107static int ffs_func_revmap_ep(struct ffs_function *func, u8 num);
108static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf);
109
110
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200111/* The endpoints structures *************************************************/
112
113struct ffs_ep {
114 struct usb_ep *ep; /* P: ffs->eps_lock */
115 struct usb_request *req; /* P: epfile->mutex */
116
Manu Gautam8d4e8972014-02-28 16:50:22 +0530117 /* [0]: full speed, [1]: high speed, [2]: super speed */
118 struct usb_endpoint_descriptor *descs[3];
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200119
120 u8 num;
121
122 int status; /* P: epfile->mutex */
123};
124
125struct ffs_epfile {
126 /* Protects ep->ep and ep->req. */
127 struct mutex mutex;
128 wait_queue_head_t wait;
129
130 struct ffs_data *ffs;
131 struct ffs_ep *ep; /* P: ffs->eps_lock */
132
133 struct dentry *dentry;
134
Michal Nazarewicz9353afb2016-05-21 20:47:35 +0200135 /*
136 * Buffer for holding data from partial reads which may happen since
137 * we’re rounding user read requests to a multiple of a max packet size.
138 */
139 struct ffs_buffer *read_buffer; /* P: epfile->mutex */
140
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200141 char name[5];
142
143 unsigned char in; /* P: ffs->eps_lock */
144 unsigned char isoc; /* P: ffs->eps_lock */
145
146 unsigned char _pad;
147};
148
Michal Nazarewicz9353afb2016-05-21 20:47:35 +0200149struct ffs_buffer {
150 size_t length;
151 char *data;
152 char storage[];
153};
154
Robert Baldyga2e4c7552014-02-10 10:42:44 +0100155/* ffs_io_data structure ***************************************************/
156
157struct ffs_io_data {
158 bool aio;
159 bool read;
160
161 struct kiocb *kiocb;
Al Viroc993c392015-01-31 23:23:35 -0500162 struct iov_iter data;
163 const void *to_free;
164 char *buf;
Robert Baldyga2e4c7552014-02-10 10:42:44 +0100165
166 struct mm_struct *mm;
167 struct work_struct work;
168
169 struct usb_ep *ep;
170 struct usb_request *req;
Robert Baldyga5e33f6f2015-01-23 13:41:01 +0100171
172 struct ffs_data *ffs;
Robert Baldyga2e4c7552014-02-10 10:42:44 +0100173};
174
Robert Baldyga6d5c1c72014-08-25 11:16:27 +0200175struct ffs_desc_helper {
176 struct ffs_data *ffs;
177 unsigned interfaces_count;
178 unsigned eps_count;
179};
180
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200181static int __must_check ffs_epfiles_create(struct ffs_data *ffs);
182static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count);
183
Al Viro1bb27ca2014-09-03 13:32:19 -0400184static struct dentry *
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200185ffs_sb_create_file(struct super_block *sb, const char *name, void *data,
Al Viro1bb27ca2014-09-03 13:32:19 -0400186 const struct file_operations *fops);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200187
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +0100188/* Devices management *******************************************************/
189
190DEFINE_MUTEX(ffs_lock);
Felipe Balbi0700faa2014-04-01 13:19:32 -0500191EXPORT_SYMBOL_GPL(ffs_lock);
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +0100192
Andrzej Pietrasiewiczda13a772014-01-13 16:49:38 +0100193static struct ffs_dev *_ffs_find_dev(const char *name);
194static struct ffs_dev *_ffs_alloc_dev(void);
Andrzej Pietrasiewiczb6584992013-12-03 15:15:36 +0100195static int _ffs_name_dev(struct ffs_dev *dev, const char *name);
Andrzej Pietrasiewiczda13a772014-01-13 16:49:38 +0100196static void _ffs_free_dev(struct ffs_dev *dev);
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +0100197static void *ffs_acquire_dev(const char *dev_name);
198static void ffs_release_dev(struct ffs_data *ffs_data);
199static int ffs_ready(struct ffs_data *ffs);
200static void ffs_closed(struct ffs_data *ffs);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200201
202/* Misc helper functions ****************************************************/
203
204static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
205 __attribute__((warn_unused_result, nonnull));
Al Viro260ef312012-09-26 21:43:45 -0400206static char *ffs_prepare_buffer(const char __user *buf, size_t len)
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200207 __attribute__((warn_unused_result, nonnull));
208
209
210/* Control file aka ep0 *****************************************************/
211
212static void ffs_ep0_complete(struct usb_ep *ep, struct usb_request *req)
213{
214 struct ffs_data *ffs = req->context;
215
216 complete_all(&ffs->ep0req_completion);
217}
218
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200219static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
220{
221 struct usb_request *req = ffs->ep0req;
222 int ret;
223
224 req->zero = len < le16_to_cpu(ffs->ev.setup.wLength);
225
226 spin_unlock_irq(&ffs->ev.waitq.lock);
227
228 req->buf = data;
229 req->length = len;
230
Marek Szyprowskice1fd352011-01-28 13:55:36 +0100231 /*
232 * UDC layer requires to provide a buffer even for ZLP, but should
233 * not use it at all. Let's provide some poisoned pointer to catch
234 * possible bug in the driver.
235 */
236 if (req->buf == NULL)
237 req->buf = (void *)0xDEADBABE;
238
Wolfram Sang16735d02013-11-14 14:32:02 -0800239 reinit_completion(&ffs->ep0req_completion);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200240
241 ret = usb_ep_queue(ffs->gadget->ep0, req, GFP_ATOMIC);
242 if (unlikely(ret < 0))
243 return ret;
244
245 ret = wait_for_completion_interruptible(&ffs->ep0req_completion);
246 if (unlikely(ret)) {
247 usb_ep_dequeue(ffs->gadget->ep0, req);
248 return -EINTR;
249 }
250
251 ffs->setup_state = FFS_NO_SETUP;
Robert Baldyga0a7b1f82014-02-10 10:42:42 +0100252 return req->status ? req->status : req->actual;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200253}
254
255static int __ffs_ep0_stall(struct ffs_data *ffs)
256{
257 if (ffs->ev.can_stall) {
Michal Nazarewiczaa02f172010-11-17 17:09:47 +0100258 pr_vdebug("ep0 stall\n");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200259 usb_ep_set_halt(ffs->gadget->ep0);
260 ffs->setup_state = FFS_NO_SETUP;
261 return -EL2HLT;
262 } else {
Michal Nazarewiczaa02f172010-11-17 17:09:47 +0100263 pr_debug("bogus ep0 stall!\n");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200264 return -ESRCH;
265 }
266}
267
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200268static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
269 size_t len, loff_t *ptr)
270{
271 struct ffs_data *ffs = file->private_data;
272 ssize_t ret;
273 char *data;
274
275 ENTER();
276
277 /* Fast check if setup was canceled */
Michal Nazarewicza7ecf052014-02-10 10:42:41 +0100278 if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200279 return -EIDRM;
280
281 /* Acquire mutex */
282 ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
283 if (unlikely(ret < 0))
284 return ret;
285
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200286 /* Check state */
287 switch (ffs->state) {
288 case FFS_READ_DESCRIPTORS:
289 case FFS_READ_STRINGS:
290 /* Copy data */
291 if (unlikely(len < 16)) {
292 ret = -EINVAL;
293 break;
294 }
295
296 data = ffs_prepare_buffer(buf, len);
Tobias Klauser537baab2010-12-09 15:52:39 +0100297 if (IS_ERR(data)) {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200298 ret = PTR_ERR(data);
299 break;
300 }
301
302 /* Handle data */
303 if (ffs->state == FFS_READ_DESCRIPTORS) {
Michal Nazarewiczaa02f172010-11-17 17:09:47 +0100304 pr_info("read descriptors\n");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200305 ret = __ffs_data_got_descs(ffs, data, len);
306 if (unlikely(ret < 0))
307 break;
308
309 ffs->state = FFS_READ_STRINGS;
310 ret = len;
311 } else {
Michal Nazarewiczaa02f172010-11-17 17:09:47 +0100312 pr_info("read strings\n");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200313 ret = __ffs_data_got_strings(ffs, data, len);
314 if (unlikely(ret < 0))
315 break;
316
317 ret = ffs_epfiles_create(ffs);
318 if (unlikely(ret)) {
319 ffs->state = FFS_CLOSING;
320 break;
321 }
322
323 ffs->state = FFS_ACTIVE;
324 mutex_unlock(&ffs->mutex);
325
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +0100326 ret = ffs_ready(ffs);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200327 if (unlikely(ret < 0)) {
328 ffs->state = FFS_CLOSING;
329 return ret;
330 }
331
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200332 return len;
333 }
334 break;
335
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200336 case FFS_ACTIVE:
337 data = NULL;
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100338 /*
339 * We're called from user space, we can use _irq
340 * rather then _irqsave
341 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200342 spin_lock_irq(&ffs->ev.waitq.lock);
Michal Nazarewicza7ecf052014-02-10 10:42:41 +0100343 switch (ffs_setup_state_clear_cancelled(ffs)) {
Michal Nazarewicze46318a2014-02-10 10:42:40 +0100344 case FFS_SETUP_CANCELLED:
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200345 ret = -EIDRM;
346 goto done_spin;
347
348 case FFS_NO_SETUP:
349 ret = -ESRCH;
350 goto done_spin;
351
352 case FFS_SETUP_PENDING:
353 break;
354 }
355
356 /* FFS_SETUP_PENDING */
357 if (!(ffs->ev.setup.bRequestType & USB_DIR_IN)) {
358 spin_unlock_irq(&ffs->ev.waitq.lock);
359 ret = __ffs_ep0_stall(ffs);
360 break;
361 }
362
363 /* FFS_SETUP_PENDING and not stall */
364 len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
365
366 spin_unlock_irq(&ffs->ev.waitq.lock);
367
368 data = ffs_prepare_buffer(buf, len);
Tobias Klauser537baab2010-12-09 15:52:39 +0100369 if (IS_ERR(data)) {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200370 ret = PTR_ERR(data);
371 break;
372 }
373
374 spin_lock_irq(&ffs->ev.waitq.lock);
375
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100376 /*
377 * We are guaranteed to be still in FFS_ACTIVE state
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200378 * but the state of setup could have changed from
Michal Nazarewicze46318a2014-02-10 10:42:40 +0100379 * FFS_SETUP_PENDING to FFS_SETUP_CANCELLED so we need
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200380 * to check for that. If that happened we copied data
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100381 * from user space in vain but it's unlikely.
382 *
383 * For sure we are not in FFS_NO_SETUP since this is
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200384 * the only place FFS_SETUP_PENDING -> FFS_NO_SETUP
385 * transition can be performed and it's protected by
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100386 * mutex.
387 */
Michal Nazarewicza7ecf052014-02-10 10:42:41 +0100388 if (ffs_setup_state_clear_cancelled(ffs) ==
389 FFS_SETUP_CANCELLED) {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200390 ret = -EIDRM;
391done_spin:
392 spin_unlock_irq(&ffs->ev.waitq.lock);
393 } else {
394 /* unlocks spinlock */
395 ret = __ffs_ep0_queue_wait(ffs, data, len);
396 }
397 kfree(data);
398 break;
399
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200400 default:
401 ret = -EBADFD;
402 break;
403 }
404
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200405 mutex_unlock(&ffs->mutex);
406 return ret;
407}
408
Michal Nazarewicz67913bb2014-09-10 17:50:24 +0200409/* Called with ffs->ev.waitq.lock and ffs->mutex held, both released on exit. */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200410static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf,
411 size_t n)
412{
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100413 /*
Michal Nazarewicz67913bb2014-09-10 17:50:24 +0200414 * n cannot be bigger than ffs->ev.count, which cannot be bigger than
415 * size of ffs->ev.types array (which is four) so that's how much space
416 * we reserve.
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100417 */
Michal Nazarewicz67913bb2014-09-10 17:50:24 +0200418 struct usb_functionfs_event events[ARRAY_SIZE(ffs->ev.types)];
419 const size_t size = n * sizeof *events;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200420 unsigned i = 0;
421
Michal Nazarewicz67913bb2014-09-10 17:50:24 +0200422 memset(events, 0, size);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200423
424 do {
425 events[i].type = ffs->ev.types[i];
426 if (events[i].type == FUNCTIONFS_SETUP) {
427 events[i].u.setup = ffs->ev.setup;
428 ffs->setup_state = FFS_SETUP_PENDING;
429 }
430 } while (++i < n);
431
Michal Nazarewicz67913bb2014-09-10 17:50:24 +0200432 ffs->ev.count -= n;
433 if (ffs->ev.count)
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200434 memmove(ffs->ev.types, ffs->ev.types + n,
435 ffs->ev.count * sizeof *ffs->ev.types);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200436
437 spin_unlock_irq(&ffs->ev.waitq.lock);
438 mutex_unlock(&ffs->mutex);
439
Daniel Walter7fe9a932015-11-18 17:15:49 +0100440 return unlikely(copy_to_user(buf, events, size)) ? -EFAULT : size;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200441}
442
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200443static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
444 size_t len, loff_t *ptr)
445{
446 struct ffs_data *ffs = file->private_data;
447 char *data = NULL;
448 size_t n;
449 int ret;
450
451 ENTER();
452
453 /* Fast check if setup was canceled */
Michal Nazarewicza7ecf052014-02-10 10:42:41 +0100454 if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200455 return -EIDRM;
456
457 /* Acquire mutex */
458 ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
459 if (unlikely(ret < 0))
460 return ret;
461
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200462 /* Check state */
463 if (ffs->state != FFS_ACTIVE) {
464 ret = -EBADFD;
465 goto done_mutex;
466 }
467
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100468 /*
469 * We're called from user space, we can use _irq rather then
470 * _irqsave
471 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200472 spin_lock_irq(&ffs->ev.waitq.lock);
473
Michal Nazarewicza7ecf052014-02-10 10:42:41 +0100474 switch (ffs_setup_state_clear_cancelled(ffs)) {
Michal Nazarewicze46318a2014-02-10 10:42:40 +0100475 case FFS_SETUP_CANCELLED:
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200476 ret = -EIDRM;
477 break;
478
479 case FFS_NO_SETUP:
480 n = len / sizeof(struct usb_functionfs_event);
481 if (unlikely(!n)) {
482 ret = -EINVAL;
483 break;
484 }
485
486 if ((file->f_flags & O_NONBLOCK) && !ffs->ev.count) {
487 ret = -EAGAIN;
488 break;
489 }
490
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100491 if (wait_event_interruptible_exclusive_locked_irq(ffs->ev.waitq,
492 ffs->ev.count)) {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200493 ret = -EINTR;
494 break;
495 }
496
497 return __ffs_ep0_read_events(ffs, buf,
498 min(n, (size_t)ffs->ev.count));
499
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200500 case FFS_SETUP_PENDING:
501 if (ffs->ev.setup.bRequestType & USB_DIR_IN) {
502 spin_unlock_irq(&ffs->ev.waitq.lock);
503 ret = __ffs_ep0_stall(ffs);
504 goto done_mutex;
505 }
506
507 len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
508
509 spin_unlock_irq(&ffs->ev.waitq.lock);
510
511 if (likely(len)) {
512 data = kmalloc(len, GFP_KERNEL);
513 if (unlikely(!data)) {
514 ret = -ENOMEM;
515 goto done_mutex;
516 }
517 }
518
519 spin_lock_irq(&ffs->ev.waitq.lock);
520
521 /* See ffs_ep0_write() */
Michal Nazarewicza7ecf052014-02-10 10:42:41 +0100522 if (ffs_setup_state_clear_cancelled(ffs) ==
523 FFS_SETUP_CANCELLED) {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200524 ret = -EIDRM;
525 break;
526 }
527
528 /* unlocks spinlock */
529 ret = __ffs_ep0_queue_wait(ffs, data, len);
Daniel Walter7fe9a932015-11-18 17:15:49 +0100530 if (likely(ret > 0) && unlikely(copy_to_user(buf, data, len)))
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200531 ret = -EFAULT;
532 goto done_mutex;
533
534 default:
535 ret = -EBADFD;
536 break;
537 }
538
539 spin_unlock_irq(&ffs->ev.waitq.lock);
540done_mutex:
541 mutex_unlock(&ffs->mutex);
542 kfree(data);
543 return ret;
544}
545
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200546static int ffs_ep0_open(struct inode *inode, struct file *file)
547{
548 struct ffs_data *ffs = inode->i_private;
549
550 ENTER();
551
552 if (unlikely(ffs->state == FFS_CLOSING))
553 return -EBUSY;
554
555 file->private_data = ffs;
556 ffs_data_opened(ffs);
557
558 return 0;
559}
560
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200561static int ffs_ep0_release(struct inode *inode, struct file *file)
562{
563 struct ffs_data *ffs = file->private_data;
564
565 ENTER();
566
567 ffs_data_closed(ffs);
568
569 return 0;
570}
571
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200572static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
573{
574 struct ffs_data *ffs = file->private_data;
575 struct usb_gadget *gadget = ffs->gadget;
576 long ret;
577
578 ENTER();
579
580 if (code == FUNCTIONFS_INTERFACE_REVMAP) {
581 struct ffs_function *func = ffs->func;
582 ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV;
Andrzej Pietrasiewicz92b0abf2012-03-28 09:30:50 +0200583 } else if (gadget && gadget->ops->ioctl) {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200584 ret = gadget->ops->ioctl(gadget, code, value);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200585 } else {
586 ret = -ENOTTY;
587 }
588
589 return ret;
590}
591
Robert Baldyga23de91e2014-02-10 10:42:43 +0100592static unsigned int ffs_ep0_poll(struct file *file, poll_table *wait)
593{
594 struct ffs_data *ffs = file->private_data;
595 unsigned int mask = POLLWRNORM;
596 int ret;
597
598 poll_wait(file, &ffs->ev.waitq, wait);
599
600 ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
601 if (unlikely(ret < 0))
602 return mask;
603
604 switch (ffs->state) {
605 case FFS_READ_DESCRIPTORS:
606 case FFS_READ_STRINGS:
607 mask |= POLLOUT;
608 break;
609
610 case FFS_ACTIVE:
611 switch (ffs->setup_state) {
612 case FFS_NO_SETUP:
613 if (ffs->ev.count)
614 mask |= POLLIN;
615 break;
616
617 case FFS_SETUP_PENDING:
618 case FFS_SETUP_CANCELLED:
619 mask |= (POLLIN | POLLOUT);
620 break;
621 }
622 case FFS_CLOSING:
623 break;
Robert Baldyga18d6b32f2014-12-18 09:55:10 +0100624 case FFS_DEACTIVATED:
625 break;
Robert Baldyga23de91e2014-02-10 10:42:43 +0100626 }
627
628 mutex_unlock(&ffs->mutex);
629
630 return mask;
631}
632
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200633static const struct file_operations ffs_ep0_operations = {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200634 .llseek = no_llseek,
635
636 .open = ffs_ep0_open,
637 .write = ffs_ep0_write,
638 .read = ffs_ep0_read,
639 .release = ffs_ep0_release,
640 .unlocked_ioctl = ffs_ep0_ioctl,
Robert Baldyga23de91e2014-02-10 10:42:43 +0100641 .poll = ffs_ep0_poll,
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200642};
643
644
645/* "Normal" endpoints operations ********************************************/
646
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200647static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req)
648{
649 ENTER();
650 if (likely(req->context)) {
651 struct ffs_ep *ep = _ep->driver_data;
652 ep->status = req->status ? req->status : req->actual;
653 complete(req->context);
654 }
655}
656
Michal Nazarewiczc662a312016-05-21 20:47:34 +0200657static ssize_t ffs_copy_to_iter(void *data, int data_len, struct iov_iter *iter)
658{
659 ssize_t ret = copy_to_iter(data, data_len, iter);
660 if (likely(ret == data_len))
661 return ret;
662
663 if (unlikely(iov_iter_count(iter)))
664 return -EFAULT;
665
666 /*
667 * Dear user space developer!
668 *
669 * TL;DR: To stop getting below error message in your kernel log, change
670 * user space code using functionfs to align read buffers to a max
671 * packet size.
672 *
673 * Some UDCs (e.g. dwc3) require request sizes to be a multiple of a max
674 * packet size. When unaligned buffer is passed to functionfs, it
675 * internally uses a larger, aligned buffer so that such UDCs are happy.
676 *
677 * Unfortunately, this means that host may send more data than was
678 * requested in read(2) system call. f_fs doesn’t know what to do with
679 * that excess data so it simply drops it.
680 *
681 * Was the buffer aligned in the first place, no such problem would
682 * happen.
683 *
Michal Nazarewicz9353afb2016-05-21 20:47:35 +0200684 * Data may be dropped only in AIO reads. Synchronous reads are handled
685 * by splitting a request into multiple parts. This splitting may still
686 * be a problem though so it’s likely best to align the buffer
687 * regardless of it being AIO or not..
688 *
Michal Nazarewiczc662a312016-05-21 20:47:34 +0200689 * This only affects OUT endpoints, i.e. reading data with a read(2),
690 * aio_read(2) etc. system calls. Writing data to an IN endpoint is not
691 * affected.
692 */
693 pr_err("functionfs read size %d > requested size %zd, dropping excess data. "
694 "Align read buffer size to max packet size to avoid the problem.\n",
695 data_len, ret);
696
697 return ret;
698}
699
Robert Baldyga2e4c7552014-02-10 10:42:44 +0100700static void ffs_user_copy_worker(struct work_struct *work)
701{
702 struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
703 work);
704 int ret = io_data->req->status ? io_data->req->status :
705 io_data->req->actual;
Lars-Peter Clausen38740a52016-04-14 17:01:17 +0200706 bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
Robert Baldyga2e4c7552014-02-10 10:42:44 +0100707
708 if (io_data->read && ret > 0) {
Robert Baldyga2e4c7552014-02-10 10:42:44 +0100709 use_mm(io_data->mm);
Michal Nazarewiczc662a312016-05-21 20:47:34 +0200710 ret = ffs_copy_to_iter(io_data->buf, ret, &io_data->data);
Robert Baldyga2e4c7552014-02-10 10:42:44 +0100711 unuse_mm(io_data->mm);
712 }
713
Christoph Hellwig04b2fa92015-02-02 14:49:06 +0100714 io_data->kiocb->ki_complete(io_data->kiocb, ret, ret);
Robert Baldyga2e4c7552014-02-10 10:42:44 +0100715
Lars-Peter Clausen38740a52016-04-14 17:01:17 +0200716 if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
Robert Baldyga5e33f6f2015-01-23 13:41:01 +0100717 eventfd_signal(io_data->ffs->ffs_eventfd, 1);
718
Robert Baldyga2e4c7552014-02-10 10:42:44 +0100719 usb_ep_free_request(io_data->ep, io_data->req);
720
Robert Baldyga2e4c7552014-02-10 10:42:44 +0100721 if (io_data->read)
Al Viroc993c392015-01-31 23:23:35 -0500722 kfree(io_data->to_free);
Robert Baldyga2e4c7552014-02-10 10:42:44 +0100723 kfree(io_data->buf);
724 kfree(io_data);
725}
726
727static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
728 struct usb_request *req)
729{
730 struct ffs_io_data *io_data = req->context;
731
732 ENTER();
733
734 INIT_WORK(&io_data->work, ffs_user_copy_worker);
735 schedule_work(&io_data->work);
736}
737
Michal Nazarewicz9353afb2016-05-21 20:47:35 +0200738/* Assumes epfile->mutex is held. */
739static ssize_t __ffs_epfile_read_buffered(struct ffs_epfile *epfile,
740 struct iov_iter *iter)
741{
742 struct ffs_buffer *buf = epfile->read_buffer;
743 ssize_t ret;
744 if (!buf)
745 return 0;
746
747 ret = copy_to_iter(buf->data, buf->length, iter);
748 if (buf->length == ret) {
749 kfree(buf);
750 epfile->read_buffer = NULL;
751 } else if (unlikely(iov_iter_count(iter))) {
752 ret = -EFAULT;
753 } else {
754 buf->length -= ret;
755 buf->data += ret;
756 }
757 return ret;
758}
759
760/* Assumes epfile->mutex is held. */
761static ssize_t __ffs_epfile_read_data(struct ffs_epfile *epfile,
762 void *data, int data_len,
763 struct iov_iter *iter)
764{
765 struct ffs_buffer *buf;
766
767 ssize_t ret = copy_to_iter(data, data_len, iter);
768 if (likely(data_len == ret))
769 return ret;
770
771 if (unlikely(iov_iter_count(iter)))
772 return -EFAULT;
773
774 /* See ffs_copy_to_iter for more context. */
775 pr_warn("functionfs read size %d > requested size %zd, splitting request into multiple reads.",
776 data_len, ret);
777
778 data_len -= ret;
779 buf = kmalloc(sizeof(*buf) + data_len, GFP_KERNEL);
Dan Carpenter44963d62016-06-24 15:23:16 +0300780 if (!buf)
781 return -ENOMEM;
Michal Nazarewicz9353afb2016-05-21 20:47:35 +0200782 buf->length = data_len;
783 buf->data = buf->storage;
784 memcpy(buf->storage, data + ret, data_len);
785 epfile->read_buffer = buf;
786
787 return ret;
788}
789
Robert Baldyga2e4c7552014-02-10 10:42:44 +0100790static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200791{
792 struct ffs_epfile *epfile = file->private_data;
Michal Nazarewiczae76e132016-01-04 21:05:59 +0100793 struct usb_request *req;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200794 struct ffs_ep *ep;
795 char *data = NULL;
David Cohenc0d31b32014-10-13 11:15:54 -0700796 ssize_t ret, data_len = -EINVAL;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200797 int halt;
798
Michal Nazarewicz7fa68032013-12-09 15:55:36 -0800799 /* Are we still active? */
Michal Nazarewiczb3591f62016-01-04 20:58:12 +0100800 if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
801 return -ENODEV;
Michal Nazarewicz7fa68032013-12-09 15:55:36 -0800802
803 /* Wait for endpoint to be enabled */
804 ep = epfile->ep;
805 if (!ep) {
Michal Nazarewiczb3591f62016-01-04 20:58:12 +0100806 if (file->f_flags & O_NONBLOCK)
807 return -EAGAIN;
Michal Nazarewicz7fa68032013-12-09 15:55:36 -0800808
809 ret = wait_event_interruptible(epfile->wait, (ep = epfile->ep));
Michal Nazarewiczb3591f62016-01-04 20:58:12 +0100810 if (ret)
811 return -EINTR;
Michal Nazarewicz7fa68032013-12-09 15:55:36 -0800812 }
813
814 /* Do we halt? */
Robert Baldyga2e4c7552014-02-10 10:42:44 +0100815 halt = (!io_data->read == !epfile->in);
Michal Nazarewiczb3591f62016-01-04 20:58:12 +0100816 if (halt && epfile->isoc)
817 return -EINVAL;
Michal Nazarewicz7fa68032013-12-09 15:55:36 -0800818
Michal Nazarewicz9353afb2016-05-21 20:47:35 +0200819 /* We will be using request and read_buffer */
820 ret = ffs_mutex_lock(&epfile->mutex, file->f_flags & O_NONBLOCK);
821 if (unlikely(ret))
822 goto error;
823
Michal Nazarewicz7fa68032013-12-09 15:55:36 -0800824 /* Allocate & copy */
825 if (!halt) {
Michal Nazarewicz9353afb2016-05-21 20:47:35 +0200826 struct usb_gadget *gadget;
827
828 /*
829 * Do we have buffered data from previous partial read? Check
830 * that for synchronous case only because we do not have
831 * facility to ‘wake up’ a pending asynchronous read and push
832 * buffered data to it which we would need to make things behave
833 * consistently.
834 */
835 if (!io_data->aio && io_data->read) {
836 ret = __ffs_epfile_read_buffered(epfile, &io_data->data);
837 if (ret)
838 goto error_mutex;
839 }
840
Michal Nazarewicz219580e2013-12-09 15:55:37 -0800841 /*
Andrzej Pietrasiewiczf0f42202014-01-20 08:33:50 +0100842 * if we _do_ wait above, the epfile->ffs->gadget might be NULL
Michal Nazarewiczae76e132016-01-04 21:05:59 +0100843 * before the waiting completes, so do not assign to 'gadget'
844 * earlier
Andrzej Pietrasiewiczf0f42202014-01-20 08:33:50 +0100845 */
Michal Nazarewicz9353afb2016-05-21 20:47:35 +0200846 gadget = epfile->ffs->gadget;
Andrzej Pietrasiewiczf0f42202014-01-20 08:33:50 +0100847
Chao Bi97839ca2014-04-14 11:19:53 +0800848 spin_lock_irq(&epfile->ffs->eps_lock);
849 /* In the meantime, endpoint got disabled or changed. */
850 if (epfile->ep != ep) {
Michal Nazarewicz9353afb2016-05-21 20:47:35 +0200851 ret = -ESHUTDOWN;
852 goto error_lock;
Chao Bi97839ca2014-04-14 11:19:53 +0800853 }
Al Viroc993c392015-01-31 23:23:35 -0500854 data_len = iov_iter_count(&io_data->data);
Andrzej Pietrasiewiczf0f42202014-01-20 08:33:50 +0100855 /*
Michal Nazarewicz219580e2013-12-09 15:55:37 -0800856 * Controller may require buffer size to be aligned to
857 * maxpacketsize of an out endpoint.
858 */
Al Viroc993c392015-01-31 23:23:35 -0500859 if (io_data->read)
860 data_len = usb_ep_align_maybe(gadget, ep->ep, data_len);
Chao Bi97839ca2014-04-14 11:19:53 +0800861 spin_unlock_irq(&epfile->ffs->eps_lock);
Michal Nazarewicz219580e2013-12-09 15:55:37 -0800862
863 data = kmalloc(data_len, GFP_KERNEL);
Michal Nazarewicz9353afb2016-05-21 20:47:35 +0200864 if (unlikely(!data)) {
865 ret = -ENOMEM;
866 goto error_mutex;
867 }
868 if (!io_data->read &&
869 copy_from_iter(data, data_len, &io_data->data) != data_len) {
870 ret = -EFAULT;
871 goto error_mutex;
Michal Nazarewicz7fa68032013-12-09 15:55:36 -0800872 }
873 }
874
Michal Nazarewicz7fa68032013-12-09 15:55:36 -0800875 spin_lock_irq(&epfile->ffs->eps_lock);
876
877 if (epfile->ep != ep) {
878 /* In the meantime, endpoint got disabled or changed. */
879 ret = -ESHUTDOWN;
Michal Nazarewicz7fa68032013-12-09 15:55:36 -0800880 } else if (halt) {
881 /* Halt */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200882 if (likely(epfile->ep == ep) && !WARN_ON(!ep->ep))
883 usb_ep_set_halt(ep->ep);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200884 ret = -EBADMSG;
Michal Nazarewiczae76e132016-01-04 21:05:59 +0100885 } else if (unlikely(data_len == -EINVAL)) {
David Cohenc0d31b32014-10-13 11:15:54 -0700886 /*
887 * Sanity Check: even though data_len can't be used
888 * uninitialized at the time I write this comment, some
889 * compilers complain about this situation.
890 * In order to keep the code clean from warnings, data_len is
891 * being initialized to -EINVAL during its declaration, which
892 * means we can't rely on compiler anymore to warn no future
893 * changes won't result in data_len being used uninitialized.
894 * For such reason, we're adding this redundant sanity check
895 * here.
896 */
Michal Nazarewiczae76e132016-01-04 21:05:59 +0100897 WARN(1, "%s: data_len == -EINVAL\n", __func__);
898 ret = -EINVAL;
899 } else if (!io_data->aio) {
900 DECLARE_COMPLETION_ONSTACK(done);
Du, Changbinef150882015-12-29 14:36:58 +0800901 bool interrupted = false;
Michal Nazarewiczae76e132016-01-04 21:05:59 +0100902
903 req = ep->req;
904 req->buf = data;
905 req->length = data_len;
906
907 req->context = &done;
908 req->complete = ffs_epfile_io_complete;
909
910 ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
911 if (unlikely(ret < 0))
912 goto error_lock;
913
914 spin_unlock_irq(&epfile->ffs->eps_lock);
915
916 if (unlikely(wait_for_completion_interruptible(&done))) {
Du, Changbinef150882015-12-29 14:36:58 +0800917 /*
918 * To avoid race condition with ffs_epfile_io_complete,
919 * dequeue the request first then check
920 * status. usb_ep_dequeue API should guarantee no race
921 * condition with req->complete callback.
922 */
Michal Nazarewiczae76e132016-01-04 21:05:59 +0100923 usb_ep_dequeue(ep->ep, req);
Du, Changbinef150882015-12-29 14:36:58 +0800924 interrupted = ep->status < 0;
Michal Nazarewiczae76e132016-01-04 21:05:59 +0100925 }
926
Michal Nazarewiczc662a312016-05-21 20:47:34 +0200927 if (interrupted)
928 ret = -EINTR;
929 else if (io_data->read && ep->status > 0)
Michal Nazarewicz9353afb2016-05-21 20:47:35 +0200930 ret = __ffs_epfile_read_data(epfile, data, ep->status,
931 &io_data->data);
Michal Nazarewiczc662a312016-05-21 20:47:34 +0200932 else
933 ret = ep->status;
Michal Nazarewiczae76e132016-01-04 21:05:59 +0100934 goto error_mutex;
935 } else if (!(req = usb_ep_alloc_request(ep->ep, GFP_KERNEL))) {
936 ret = -ENOMEM;
937 } else {
938 req->buf = data;
939 req->length = data_len;
940
941 io_data->buf = data;
942 io_data->ep = ep->ep;
943 io_data->req = req;
944 io_data->ffs = epfile->ffs;
945
946 req->context = io_data;
947 req->complete = ffs_epfile_async_io_complete;
948
949 ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
950 if (unlikely(ret)) {
951 usb_ep_free_request(ep->ep, req);
David Cohenc0d31b32014-10-13 11:15:54 -0700952 goto error_lock;
953 }
954
Michal Nazarewiczae76e132016-01-04 21:05:59 +0100955 ret = -EIOCBQUEUED;
956 /*
957 * Do not kfree the buffer in this function. It will be freed
958 * by ffs_user_copy_worker.
959 */
960 data = NULL;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200961 }
962
Robert Baldyga48968f82014-03-10 09:33:37 +0100963error_lock:
964 spin_unlock_irq(&epfile->ffs->eps_lock);
Michal Nazarewiczae76e132016-01-04 21:05:59 +0100965error_mutex:
Robert Baldyga48968f82014-03-10 09:33:37 +0100966 mutex_unlock(&epfile->mutex);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200967error:
968 kfree(data);
969 return ret;
970}
971
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200972static int
973ffs_epfile_open(struct inode *inode, struct file *file)
974{
975 struct ffs_epfile *epfile = inode->i_private;
976
977 ENTER();
978
979 if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
980 return -ENODEV;
981
982 file->private_data = epfile;
983 ffs_data_opened(epfile->ffs);
984
985 return 0;
986}
987
Robert Baldyga2e4c7552014-02-10 10:42:44 +0100988static int ffs_aio_cancel(struct kiocb *kiocb)
989{
990 struct ffs_io_data *io_data = kiocb->private;
991 struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
992 int value;
993
994 ENTER();
995
996 spin_lock_irq(&epfile->ffs->eps_lock);
997
998 if (likely(io_data && io_data->ep && io_data->req))
999 value = usb_ep_dequeue(io_data->ep, io_data->req);
1000 else
1001 value = -EINVAL;
1002
1003 spin_unlock_irq(&epfile->ffs->eps_lock);
1004
1005 return value;
1006}
1007
Al Viro70e60d92015-01-31 23:55:39 -05001008static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
Robert Baldyga2e4c7552014-02-10 10:42:44 +01001009{
Al Viro70e60d92015-01-31 23:55:39 -05001010 struct ffs_io_data io_data, *p = &io_data;
Al Virode2080d2015-01-31 23:42:34 -05001011 ssize_t res;
Robert Baldyga2e4c7552014-02-10 10:42:44 +01001012
1013 ENTER();
1014
Al Viro70e60d92015-01-31 23:55:39 -05001015 if (!is_sync_kiocb(kiocb)) {
1016 p = kmalloc(sizeof(io_data), GFP_KERNEL);
1017 if (unlikely(!p))
1018 return -ENOMEM;
1019 p->aio = true;
1020 } else {
1021 p->aio = false;
1022 }
Robert Baldyga2e4c7552014-02-10 10:42:44 +01001023
Al Viro70e60d92015-01-31 23:55:39 -05001024 p->read = false;
1025 p->kiocb = kiocb;
1026 p->data = *from;
1027 p->mm = current->mm;
Robert Baldyga2e4c7552014-02-10 10:42:44 +01001028
Al Viro70e60d92015-01-31 23:55:39 -05001029 kiocb->private = p;
Robert Baldyga2e4c7552014-02-10 10:42:44 +01001030
Rui Miguel Silva4088acf2015-05-18 16:02:07 +01001031 if (p->aio)
1032 kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
Robert Baldyga2e4c7552014-02-10 10:42:44 +01001033
Al Viro70e60d92015-01-31 23:55:39 -05001034 res = ffs_epfile_io(kiocb->ki_filp, p);
1035 if (res == -EIOCBQUEUED)
1036 return res;
1037 if (p->aio)
1038 kfree(p);
1039 else
1040 *from = p->data;
Al Virode2080d2015-01-31 23:42:34 -05001041 return res;
Robert Baldyga2e4c7552014-02-10 10:42:44 +01001042}
1043
Al Viro70e60d92015-01-31 23:55:39 -05001044static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
Robert Baldyga2e4c7552014-02-10 10:42:44 +01001045{
Al Viro70e60d92015-01-31 23:55:39 -05001046 struct ffs_io_data io_data, *p = &io_data;
Al Virode2080d2015-01-31 23:42:34 -05001047 ssize_t res;
Robert Baldyga2e4c7552014-02-10 10:42:44 +01001048
1049 ENTER();
1050
Al Viro70e60d92015-01-31 23:55:39 -05001051 if (!is_sync_kiocb(kiocb)) {
1052 p = kmalloc(sizeof(io_data), GFP_KERNEL);
1053 if (unlikely(!p))
1054 return -ENOMEM;
1055 p->aio = true;
1056 } else {
1057 p->aio = false;
Robert Baldyga2e4c7552014-02-10 10:42:44 +01001058 }
1059
Al Viro70e60d92015-01-31 23:55:39 -05001060 p->read = true;
1061 p->kiocb = kiocb;
1062 if (p->aio) {
1063 p->to_free = dup_iter(&p->data, to, GFP_KERNEL);
1064 if (!p->to_free) {
1065 kfree(p);
1066 return -ENOMEM;
1067 }
1068 } else {
1069 p->data = *to;
1070 p->to_free = NULL;
1071 }
1072 p->mm = current->mm;
Robert Baldyga2e4c7552014-02-10 10:42:44 +01001073
Al Viro70e60d92015-01-31 23:55:39 -05001074 kiocb->private = p;
Robert Baldyga2e4c7552014-02-10 10:42:44 +01001075
Rui Miguel Silva4088acf2015-05-18 16:02:07 +01001076 if (p->aio)
1077 kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
Robert Baldyga2e4c7552014-02-10 10:42:44 +01001078
Al Viro70e60d92015-01-31 23:55:39 -05001079 res = ffs_epfile_io(kiocb->ki_filp, p);
1080 if (res == -EIOCBQUEUED)
1081 return res;
1082
1083 if (p->aio) {
1084 kfree(p->to_free);
1085 kfree(p);
1086 } else {
1087 *to = p->data;
Al Virode2080d2015-01-31 23:42:34 -05001088 }
1089 return res;
Robert Baldyga2e4c7552014-02-10 10:42:44 +01001090}
1091
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001092static int
1093ffs_epfile_release(struct inode *inode, struct file *file)
1094{
1095 struct ffs_epfile *epfile = inode->i_private;
1096
1097 ENTER();
1098
Michal Nazarewicz9353afb2016-05-21 20:47:35 +02001099 kfree(epfile->read_buffer);
1100 epfile->read_buffer = NULL;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001101 ffs_data_closed(epfile->ffs);
1102
1103 return 0;
1104}
1105
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001106static long ffs_epfile_ioctl(struct file *file, unsigned code,
1107 unsigned long value)
1108{
1109 struct ffs_epfile *epfile = file->private_data;
1110 int ret;
1111
1112 ENTER();
1113
1114 if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
1115 return -ENODEV;
1116
1117 spin_lock_irq(&epfile->ffs->eps_lock);
1118 if (likely(epfile->ep)) {
1119 switch (code) {
1120 case FUNCTIONFS_FIFO_STATUS:
1121 ret = usb_ep_fifo_status(epfile->ep->ep);
1122 break;
1123 case FUNCTIONFS_FIFO_FLUSH:
1124 usb_ep_fifo_flush(epfile->ep->ep);
1125 ret = 0;
1126 break;
1127 case FUNCTIONFS_CLEAR_HALT:
1128 ret = usb_ep_clear_halt(epfile->ep->ep);
1129 break;
1130 case FUNCTIONFS_ENDPOINT_REVMAP:
1131 ret = epfile->ep->num;
1132 break;
Robert Baldygac559a352014-09-09 08:23:16 +02001133 case FUNCTIONFS_ENDPOINT_DESC:
1134 {
1135 int desc_idx;
1136 struct usb_endpoint_descriptor *desc;
1137
1138 switch (epfile->ffs->gadget->speed) {
1139 case USB_SPEED_SUPER:
1140 desc_idx = 2;
1141 break;
1142 case USB_SPEED_HIGH:
1143 desc_idx = 1;
1144 break;
1145 default:
1146 desc_idx = 0;
1147 }
1148 desc = epfile->ep->descs[desc_idx];
1149
1150 spin_unlock_irq(&epfile->ffs->eps_lock);
1151 ret = copy_to_user((void *)value, desc, sizeof(*desc));
1152 if (ret)
1153 ret = -EFAULT;
1154 return ret;
1155 }
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001156 default:
1157 ret = -ENOTTY;
1158 }
1159 } else {
1160 ret = -ENODEV;
1161 }
1162 spin_unlock_irq(&epfile->ffs->eps_lock);
1163
1164 return ret;
1165}
1166
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001167static const struct file_operations ffs_epfile_operations = {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001168 .llseek = no_llseek,
1169
1170 .open = ffs_epfile_open,
Al Viro70e60d92015-01-31 23:55:39 -05001171 .write_iter = ffs_epfile_write_iter,
1172 .read_iter = ffs_epfile_read_iter,
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001173 .release = ffs_epfile_release,
1174 .unlocked_ioctl = ffs_epfile_ioctl,
1175};
1176
1177
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001178/* File system and super block operations ***********************************/
1179
1180/*
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01001181 * Mounting the file system creates a controller file, used first for
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001182 * function configuration then later for event monitoring.
1183 */
1184
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001185static struct inode *__must_check
1186ffs_sb_make_inode(struct super_block *sb, void *data,
1187 const struct file_operations *fops,
1188 const struct inode_operations *iops,
1189 struct ffs_file_perms *perms)
1190{
1191 struct inode *inode;
1192
1193 ENTER();
1194
1195 inode = new_inode(sb);
1196
1197 if (likely(inode)) {
1198 struct timespec current_time = CURRENT_TIME;
1199
Al Viro12ba8d12010-10-27 04:19:36 +01001200 inode->i_ino = get_next_ino();
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001201 inode->i_mode = perms->mode;
1202 inode->i_uid = perms->uid;
1203 inode->i_gid = perms->gid;
1204 inode->i_atime = current_time;
1205 inode->i_mtime = current_time;
1206 inode->i_ctime = current_time;
1207 inode->i_private = data;
1208 if (fops)
1209 inode->i_fop = fops;
1210 if (iops)
1211 inode->i_op = iops;
1212 }
1213
1214 return inode;
1215}
1216
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001217/* Create "regular" file */
Al Viro1bb27ca2014-09-03 13:32:19 -04001218static struct dentry *ffs_sb_create_file(struct super_block *sb,
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001219 const char *name, void *data,
Al Viro1bb27ca2014-09-03 13:32:19 -04001220 const struct file_operations *fops)
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001221{
1222 struct ffs_data *ffs = sb->s_fs_info;
1223 struct dentry *dentry;
1224 struct inode *inode;
1225
1226 ENTER();
1227
1228 dentry = d_alloc_name(sb->s_root, name);
1229 if (unlikely(!dentry))
1230 return NULL;
1231
1232 inode = ffs_sb_make_inode(sb, data, fops, NULL, &ffs->file_perms);
1233 if (unlikely(!inode)) {
1234 dput(dentry);
1235 return NULL;
1236 }
1237
1238 d_add(dentry, inode);
Al Viro1bb27ca2014-09-03 13:32:19 -04001239 return dentry;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001240}
1241
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001242/* Super block */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001243static const struct super_operations ffs_sb_operations = {
1244 .statfs = simple_statfs,
1245 .drop_inode = generic_delete_inode,
1246};
1247
1248struct ffs_sb_fill_data {
1249 struct ffs_file_perms perms;
1250 umode_t root_mode;
1251 const char *dev_name;
Robert Baldyga18d6b32f2014-12-18 09:55:10 +01001252 bool no_disconnect;
Al Viro2606b282013-09-20 17:14:21 +01001253 struct ffs_data *ffs_data;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001254};
1255
1256static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
1257{
1258 struct ffs_sb_fill_data *data = _data;
1259 struct inode *inode;
Al Viro2606b282013-09-20 17:14:21 +01001260 struct ffs_data *ffs = data->ffs_data;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001261
1262 ENTER();
1263
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001264 ffs->sb = sb;
Al Viro2606b282013-09-20 17:14:21 +01001265 data->ffs_data = NULL;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001266 sb->s_fs_info = ffs;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001267 sb->s_blocksize = PAGE_SIZE;
1268 sb->s_blocksize_bits = PAGE_SHIFT;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001269 sb->s_magic = FUNCTIONFS_MAGIC;
1270 sb->s_op = &ffs_sb_operations;
1271 sb->s_time_gran = 1;
1272
1273 /* Root inode */
1274 data->perms.mode = data->root_mode;
1275 inode = ffs_sb_make_inode(sb, NULL,
1276 &simple_dir_operations,
1277 &simple_dir_inode_operations,
1278 &data->perms);
Al Viro48fde702012-01-08 22:15:13 -05001279 sb->s_root = d_make_root(inode);
1280 if (unlikely(!sb->s_root))
Al Viro2606b282013-09-20 17:14:21 +01001281 return -ENOMEM;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001282
1283 /* EP0 file */
1284 if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs,
Al Viro1bb27ca2014-09-03 13:32:19 -04001285 &ffs_ep0_operations)))
Al Viro2606b282013-09-20 17:14:21 +01001286 return -ENOMEM;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001287
1288 return 0;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001289}
1290
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001291static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
1292{
1293 ENTER();
1294
1295 if (!opts || !*opts)
1296 return 0;
1297
1298 for (;;) {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001299 unsigned long value;
Michal Nazarewiczafd2e182013-01-09 10:17:47 +01001300 char *eq, *comma;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001301
1302 /* Option limit */
1303 comma = strchr(opts, ',');
1304 if (comma)
1305 *comma = 0;
1306
1307 /* Value limit */
1308 eq = strchr(opts, '=');
1309 if (unlikely(!eq)) {
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001310 pr_err("'=' missing in %s\n", opts);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001311 return -EINVAL;
1312 }
1313 *eq = 0;
1314
1315 /* Parse value */
Michal Nazarewiczafd2e182013-01-09 10:17:47 +01001316 if (kstrtoul(eq + 1, 0, &value)) {
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001317 pr_err("%s: invalid value: %s\n", opts, eq + 1);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001318 return -EINVAL;
1319 }
1320
1321 /* Interpret option */
1322 switch (eq - opts) {
Robert Baldyga18d6b32f2014-12-18 09:55:10 +01001323 case 13:
1324 if (!memcmp(opts, "no_disconnect", 13))
1325 data->no_disconnect = !!value;
1326 else
1327 goto invalid;
1328 break;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001329 case 5:
1330 if (!memcmp(opts, "rmode", 5))
1331 data->root_mode = (value & 0555) | S_IFDIR;
1332 else if (!memcmp(opts, "fmode", 5))
1333 data->perms.mode = (value & 0666) | S_IFREG;
1334 else
1335 goto invalid;
1336 break;
1337
1338 case 4:
1339 if (!memcmp(opts, "mode", 4)) {
1340 data->root_mode = (value & 0555) | S_IFDIR;
1341 data->perms.mode = (value & 0666) | S_IFREG;
1342 } else {
1343 goto invalid;
1344 }
1345 break;
1346
1347 case 3:
Eric W. Biedermanb9b73f72012-06-14 01:19:23 -07001348 if (!memcmp(opts, "uid", 3)) {
1349 data->perms.uid = make_kuid(current_user_ns(), value);
1350 if (!uid_valid(data->perms.uid)) {
1351 pr_err("%s: unmapped value: %lu\n", opts, value);
1352 return -EINVAL;
1353 }
Benoit Gobyb8100752013-01-08 19:57:09 -08001354 } else if (!memcmp(opts, "gid", 3)) {
Eric W. Biedermanb9b73f72012-06-14 01:19:23 -07001355 data->perms.gid = make_kgid(current_user_ns(), value);
1356 if (!gid_valid(data->perms.gid)) {
1357 pr_err("%s: unmapped value: %lu\n", opts, value);
1358 return -EINVAL;
1359 }
Benoit Gobyb8100752013-01-08 19:57:09 -08001360 } else {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001361 goto invalid;
Benoit Gobyb8100752013-01-08 19:57:09 -08001362 }
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001363 break;
1364
1365 default:
1366invalid:
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001367 pr_err("%s: invalid option\n", opts);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001368 return -EINVAL;
1369 }
1370
1371 /* Next iteration */
1372 if (!comma)
1373 break;
1374 opts = comma + 1;
1375 }
1376
1377 return 0;
1378}
1379
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001380/* "mount -t functionfs dev_name /dev/function" ends up here */
1381
Al Virofc14f2f2010-07-25 01:48:30 +04001382static struct dentry *
1383ffs_fs_mount(struct file_system_type *t, int flags,
1384 const char *dev_name, void *opts)
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001385{
1386 struct ffs_sb_fill_data data = {
1387 .perms = {
1388 .mode = S_IFREG | 0600,
Eric W. Biedermanb9b73f72012-06-14 01:19:23 -07001389 .uid = GLOBAL_ROOT_UID,
1390 .gid = GLOBAL_ROOT_GID,
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001391 },
1392 .root_mode = S_IFDIR | 0500,
Robert Baldyga18d6b32f2014-12-18 09:55:10 +01001393 .no_disconnect = false,
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001394 };
Andrzej Pietrasiewicz581791f2012-05-14 15:51:52 +02001395 struct dentry *rv;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001396 int ret;
Andrzej Pietrasiewicz581791f2012-05-14 15:51:52 +02001397 void *ffs_dev;
Al Viro2606b282013-09-20 17:14:21 +01001398 struct ffs_data *ffs;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001399
1400 ENTER();
1401
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001402 ret = ffs_fs_parse_opts(&data, opts);
1403 if (unlikely(ret < 0))
Al Virofc14f2f2010-07-25 01:48:30 +04001404 return ERR_PTR(ret);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001405
Al Viro2606b282013-09-20 17:14:21 +01001406 ffs = ffs_data_new();
1407 if (unlikely(!ffs))
1408 return ERR_PTR(-ENOMEM);
1409 ffs->file_perms = data.perms;
Robert Baldyga18d6b32f2014-12-18 09:55:10 +01001410 ffs->no_disconnect = data.no_disconnect;
Al Viro2606b282013-09-20 17:14:21 +01001411
1412 ffs->dev_name = kstrdup(dev_name, GFP_KERNEL);
1413 if (unlikely(!ffs->dev_name)) {
1414 ffs_data_put(ffs);
1415 return ERR_PTR(-ENOMEM);
1416 }
1417
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01001418 ffs_dev = ffs_acquire_dev(dev_name);
Al Viro2606b282013-09-20 17:14:21 +01001419 if (IS_ERR(ffs_dev)) {
1420 ffs_data_put(ffs);
1421 return ERR_CAST(ffs_dev);
1422 }
1423 ffs->private_data = ffs_dev;
1424 data.ffs_data = ffs;
Andrzej Pietrasiewicz581791f2012-05-14 15:51:52 +02001425
Andrzej Pietrasiewicz581791f2012-05-14 15:51:52 +02001426 rv = mount_nodev(t, flags, &data, ffs_sb_fill);
Al Viro2606b282013-09-20 17:14:21 +01001427 if (IS_ERR(rv) && data.ffs_data) {
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01001428 ffs_release_dev(data.ffs_data);
Al Viro2606b282013-09-20 17:14:21 +01001429 ffs_data_put(data.ffs_data);
1430 }
Andrzej Pietrasiewicz581791f2012-05-14 15:51:52 +02001431 return rv;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001432}
1433
1434static void
1435ffs_fs_kill_sb(struct super_block *sb)
1436{
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001437 ENTER();
1438
1439 kill_litter_super(sb);
Andrzej Pietrasiewicz581791f2012-05-14 15:51:52 +02001440 if (sb->s_fs_info) {
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01001441 ffs_release_dev(sb->s_fs_info);
Robert Baldyga18d6b32f2014-12-18 09:55:10 +01001442 ffs_data_closed(sb->s_fs_info);
Al Viro5b5f9562012-01-08 15:38:27 -05001443 ffs_data_put(sb->s_fs_info);
Andrzej Pietrasiewicz581791f2012-05-14 15:51:52 +02001444 }
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001445}
1446
1447static struct file_system_type ffs_fs_type = {
1448 .owner = THIS_MODULE,
1449 .name = "functionfs",
Al Virofc14f2f2010-07-25 01:48:30 +04001450 .mount = ffs_fs_mount,
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001451 .kill_sb = ffs_fs_kill_sb,
1452};
Eric W. Biederman7f78e032013-03-02 19:39:14 -08001453MODULE_ALIAS_FS("functionfs");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001454
1455
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001456/* Driver's main init/cleanup functions *************************************/
1457
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001458static int functionfs_init(void)
1459{
1460 int ret;
1461
1462 ENTER();
1463
1464 ret = register_filesystem(&ffs_fs_type);
1465 if (likely(!ret))
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001466 pr_info("file system registered\n");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001467 else
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001468 pr_err("failed registering file system (%d)\n", ret);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001469
1470 return ret;
1471}
1472
1473static void functionfs_cleanup(void)
1474{
1475 ENTER();
1476
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001477 pr_info("unloading\n");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001478 unregister_filesystem(&ffs_fs_type);
1479}
1480
1481
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001482/* ffs_data and ffs_function construction and destruction code **************/
1483
1484static void ffs_data_clear(struct ffs_data *ffs);
1485static void ffs_data_reset(struct ffs_data *ffs);
1486
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001487static void ffs_data_get(struct ffs_data *ffs)
1488{
1489 ENTER();
1490
1491 atomic_inc(&ffs->ref);
1492}
1493
1494static void ffs_data_opened(struct ffs_data *ffs)
1495{
1496 ENTER();
1497
1498 atomic_inc(&ffs->ref);
Robert Baldyga18d6b32f2014-12-18 09:55:10 +01001499 if (atomic_add_return(1, &ffs->opened) == 1 &&
1500 ffs->state == FFS_DEACTIVATED) {
1501 ffs->state = FFS_CLOSING;
1502 ffs_data_reset(ffs);
1503 }
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001504}
1505
1506static void ffs_data_put(struct ffs_data *ffs)
1507{
1508 ENTER();
1509
1510 if (unlikely(atomic_dec_and_test(&ffs->ref))) {
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001511 pr_info("%s(): freeing\n", __func__);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001512 ffs_data_clear(ffs);
Andi Kleen647d5582012-03-16 12:01:02 -07001513 BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001514 waitqueue_active(&ffs->ep0req_completion.wait));
Andrzej Pietrasiewicz581791f2012-05-14 15:51:52 +02001515 kfree(ffs->dev_name);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001516 kfree(ffs);
1517 }
1518}
1519
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001520static void ffs_data_closed(struct ffs_data *ffs)
1521{
1522 ENTER();
1523
1524 if (atomic_dec_and_test(&ffs->opened)) {
Robert Baldyga18d6b32f2014-12-18 09:55:10 +01001525 if (ffs->no_disconnect) {
1526 ffs->state = FFS_DEACTIVATED;
1527 if (ffs->epfiles) {
1528 ffs_epfiles_destroy(ffs->epfiles,
1529 ffs->eps_count);
1530 ffs->epfiles = NULL;
1531 }
1532 if (ffs->setup_state == FFS_SETUP_PENDING)
1533 __ffs_ep0_stall(ffs);
1534 } else {
1535 ffs->state = FFS_CLOSING;
1536 ffs_data_reset(ffs);
1537 }
1538 }
1539 if (atomic_read(&ffs->opened) < 0) {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001540 ffs->state = FFS_CLOSING;
1541 ffs_data_reset(ffs);
1542 }
1543
1544 ffs_data_put(ffs);
1545}
1546
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001547static struct ffs_data *ffs_data_new(void)
1548{
1549 struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL);
1550 if (unlikely(!ffs))
Felipe Balbif8800d42013-12-12 12:15:43 -06001551 return NULL;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001552
1553 ENTER();
1554
1555 atomic_set(&ffs->ref, 1);
1556 atomic_set(&ffs->opened, 0);
1557 ffs->state = FFS_READ_DESCRIPTORS;
1558 mutex_init(&ffs->mutex);
1559 spin_lock_init(&ffs->eps_lock);
1560 init_waitqueue_head(&ffs->ev.waitq);
1561 init_completion(&ffs->ep0req_completion);
1562
1563 /* XXX REVISIT need to update it in some places, or do we? */
1564 ffs->ev.can_stall = 1;
1565
1566 return ffs;
1567}
1568
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001569static void ffs_data_clear(struct ffs_data *ffs)
1570{
1571 ENTER();
1572
Krzysztof Opasiak49a79d82015-05-22 17:25:18 +02001573 ffs_closed(ffs);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001574
1575 BUG_ON(ffs->gadget);
1576
1577 if (ffs->epfiles)
1578 ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count);
1579
Robert Baldyga5e33f6f2015-01-23 13:41:01 +01001580 if (ffs->ffs_eventfd)
1581 eventfd_ctx_put(ffs->ffs_eventfd);
1582
Michal Nazarewiczac8dde12014-02-28 16:50:23 +05301583 kfree(ffs->raw_descs_data);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001584 kfree(ffs->raw_strings);
1585 kfree(ffs->stringtabs);
1586}
1587
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001588static void ffs_data_reset(struct ffs_data *ffs)
1589{
1590 ENTER();
1591
1592 ffs_data_clear(ffs);
1593
1594 ffs->epfiles = NULL;
Michal Nazarewiczac8dde12014-02-28 16:50:23 +05301595 ffs->raw_descs_data = NULL;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001596 ffs->raw_descs = NULL;
1597 ffs->raw_strings = NULL;
1598 ffs->stringtabs = NULL;
1599
1600 ffs->raw_descs_length = 0;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001601 ffs->fs_descs_count = 0;
1602 ffs->hs_descs_count = 0;
Manu Gautam8d4e8972014-02-28 16:50:22 +05301603 ffs->ss_descs_count = 0;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001604
1605 ffs->strings_count = 0;
1606 ffs->interfaces_count = 0;
1607 ffs->eps_count = 0;
1608
1609 ffs->ev.count = 0;
1610
1611 ffs->state = FFS_READ_DESCRIPTORS;
1612 ffs->setup_state = FFS_NO_SETUP;
1613 ffs->flags = 0;
1614}
1615
1616
1617static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
1618{
Michal Nazarewiczfd7c9a02010-06-16 12:08:00 +02001619 struct usb_gadget_strings **lang;
1620 int first_id;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001621
1622 ENTER();
1623
1624 if (WARN_ON(ffs->state != FFS_ACTIVE
1625 || test_and_set_bit(FFS_FL_BOUND, &ffs->flags)))
1626 return -EBADFD;
1627
Michal Nazarewiczfd7c9a02010-06-16 12:08:00 +02001628 first_id = usb_string_ids_n(cdev, ffs->strings_count);
1629 if (unlikely(first_id < 0))
1630 return first_id;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001631
1632 ffs->ep0req = usb_ep_alloc_request(cdev->gadget->ep0, GFP_KERNEL);
1633 if (unlikely(!ffs->ep0req))
1634 return -ENOMEM;
1635 ffs->ep0req->complete = ffs_ep0_complete;
1636 ffs->ep0req->context = ffs;
1637
Michal Nazarewiczfd7c9a02010-06-16 12:08:00 +02001638 lang = ffs->stringtabs;
Michal Nazarewiczf0688c82014-06-17 17:47:41 +02001639 if (lang) {
1640 for (; *lang; ++lang) {
1641 struct usb_string *str = (*lang)->strings;
1642 int id = first_id;
1643 for (; str->s; ++id, ++str)
1644 str->id = id;
1645 }
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001646 }
1647
1648 ffs->gadget = cdev->gadget;
Michal Nazarewiczfd7c9a02010-06-16 12:08:00 +02001649 ffs_data_get(ffs);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001650 return 0;
1651}
1652
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001653static void functionfs_unbind(struct ffs_data *ffs)
1654{
1655 ENTER();
1656
1657 if (!WARN_ON(!ffs->gadget)) {
1658 usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req);
1659 ffs->ep0req = NULL;
1660 ffs->gadget = NULL;
Andrzej Pietrasiewicze2190a92012-03-12 12:55:41 +01001661 clear_bit(FFS_FL_BOUND, &ffs->flags);
Dan Carpenterdf498992013-08-23 11:16:15 +03001662 ffs_data_put(ffs);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001663 }
1664}
1665
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001666static int ffs_epfiles_create(struct ffs_data *ffs)
1667{
1668 struct ffs_epfile *epfile, *epfiles;
1669 unsigned i, count;
1670
1671 ENTER();
1672
1673 count = ffs->eps_count;
Thomas Meyer9823a522011-11-29 22:08:00 +01001674 epfiles = kcalloc(count, sizeof(*epfiles), GFP_KERNEL);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001675 if (!epfiles)
1676 return -ENOMEM;
1677
1678 epfile = epfiles;
1679 for (i = 1; i <= count; ++i, ++epfile) {
1680 epfile->ffs = ffs;
1681 mutex_init(&epfile->mutex);
1682 init_waitqueue_head(&epfile->wait);
Robert Baldyga1b0bf882014-09-09 08:23:17 +02001683 if (ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
Mario Schuknechtacba23f2015-01-26 20:40:21 +01001684 sprintf(epfile->name, "ep%02x", ffs->eps_addrmap[i]);
Robert Baldyga1b0bf882014-09-09 08:23:17 +02001685 else
Mario Schuknechtacba23f2015-01-26 20:40:21 +01001686 sprintf(epfile->name, "ep%u", i);
1687 epfile->dentry = ffs_sb_create_file(ffs->sb, epfile->name,
Al Viro1bb27ca2014-09-03 13:32:19 -04001688 epfile,
1689 &ffs_epfile_operations);
1690 if (unlikely(!epfile->dentry)) {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001691 ffs_epfiles_destroy(epfiles, i - 1);
1692 return -ENOMEM;
1693 }
1694 }
1695
1696 ffs->epfiles = epfiles;
1697 return 0;
1698}
1699
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001700static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
1701{
1702 struct ffs_epfile *epfile = epfiles;
1703
1704 ENTER();
1705
1706 for (; count; --count, ++epfile) {
1707 BUG_ON(mutex_is_locked(&epfile->mutex) ||
1708 waitqueue_active(&epfile->wait));
1709 if (epfile->dentry) {
1710 d_delete(epfile->dentry);
1711 dput(epfile->dentry);
1712 epfile->dentry = NULL;
1713 }
1714 }
1715
1716 kfree(epfiles);
1717}
1718
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001719static void ffs_func_eps_disable(struct ffs_function *func)
1720{
1721 struct ffs_ep *ep = func->eps;
1722 struct ffs_epfile *epfile = func->ffs->epfiles;
1723 unsigned count = func->ffs->eps_count;
1724 unsigned long flags;
1725
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001726 do {
Michal Nazarewicz9353afb2016-05-21 20:47:35 +02001727 if (epfile)
1728 mutex_lock(&epfile->mutex);
1729 spin_lock_irqsave(&func->ffs->eps_lock, flags);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001730 /* pending requests get nuked */
1731 if (likely(ep->ep))
1732 usb_ep_disable(ep->ep);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001733 ++ep;
Michal Nazarewicz9353afb2016-05-21 20:47:35 +02001734 spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
Robert Baldyga18d6b32f2014-12-18 09:55:10 +01001735
1736 if (epfile) {
1737 epfile->ep = NULL;
Michal Nazarewicz9353afb2016-05-21 20:47:35 +02001738 kfree(epfile->read_buffer);
1739 epfile->read_buffer = NULL;
1740 mutex_unlock(&epfile->mutex);
Robert Baldyga18d6b32f2014-12-18 09:55:10 +01001741 ++epfile;
1742 }
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001743 } while (--count);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001744}
1745
1746static int ffs_func_eps_enable(struct ffs_function *func)
1747{
1748 struct ffs_data *ffs = func->ffs;
1749 struct ffs_ep *ep = func->eps;
1750 struct ffs_epfile *epfile = ffs->epfiles;
1751 unsigned count = ffs->eps_count;
1752 unsigned long flags;
1753 int ret = 0;
1754
1755 spin_lock_irqsave(&func->ffs->eps_lock, flags);
1756 do {
1757 struct usb_endpoint_descriptor *ds;
Manu Gautam8d4e8972014-02-28 16:50:22 +05301758 int desc_idx;
1759
1760 if (ffs->gadget->speed == USB_SPEED_SUPER)
1761 desc_idx = 2;
1762 else if (ffs->gadget->speed == USB_SPEED_HIGH)
1763 desc_idx = 1;
1764 else
1765 desc_idx = 0;
1766
1767 /* fall-back to lower speed if desc missing for current speed */
1768 do {
1769 ds = ep->descs[desc_idx];
1770 } while (!ds && --desc_idx >= 0);
1771
1772 if (!ds) {
1773 ret = -EINVAL;
1774 break;
1775 }
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001776
1777 ep->ep->driver_data = ep;
Tatyana Brokhman72c973d2011-06-28 16:33:48 +03001778 ep->ep->desc = ds;
1779 ret = usb_ep_enable(ep->ep);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001780 if (likely(!ret)) {
1781 epfile->ep = ep;
1782 epfile->in = usb_endpoint_dir_in(ds);
1783 epfile->isoc = usb_endpoint_xfer_isoc(ds);
1784 } else {
1785 break;
1786 }
1787
1788 wake_up(&epfile->wait);
1789
1790 ++ep;
1791 ++epfile;
1792 } while (--count);
1793 spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
1794
1795 return ret;
1796}
1797
1798
1799/* Parsing and building descriptors and strings *****************************/
1800
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01001801/*
1802 * This validates if data pointed by data is a valid USB descriptor as
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001803 * well as record how many interfaces, endpoints and strings are
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01001804 * required by given configuration. Returns address after the
1805 * descriptor or NULL if data is invalid.
1806 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001807
1808enum ffs_entity_type {
1809 FFS_DESCRIPTOR, FFS_INTERFACE, FFS_STRING, FFS_ENDPOINT
1810};
1811
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02001812enum ffs_os_desc_type {
1813 FFS_OS_DESC, FFS_OS_DESC_EXT_COMPAT, FFS_OS_DESC_EXT_PROP
1814};
1815
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001816typedef int (*ffs_entity_callback)(enum ffs_entity_type entity,
1817 u8 *valuep,
1818 struct usb_descriptor_header *desc,
1819 void *priv);
1820
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02001821typedef int (*ffs_os_desc_callback)(enum ffs_os_desc_type entity,
1822 struct usb_os_desc_header *h, void *data,
1823 unsigned len, void *priv);
1824
Andrzej Pietrasiewiczf96cbd12014-07-09 12:20:06 +02001825static int __must_check ffs_do_single_desc(char *data, unsigned len,
1826 ffs_entity_callback entity,
1827 void *priv)
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001828{
1829 struct usb_descriptor_header *_ds = (void *)data;
1830 u8 length;
1831 int ret;
1832
1833 ENTER();
1834
1835 /* At least two bytes are required: length and type */
1836 if (len < 2) {
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001837 pr_vdebug("descriptor too short\n");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001838 return -EINVAL;
1839 }
1840
1841 /* If we have at least as many bytes as the descriptor takes? */
1842 length = _ds->bLength;
1843 if (len < length) {
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001844 pr_vdebug("descriptor longer then available data\n");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001845 return -EINVAL;
1846 }
1847
1848#define __entity_check_INTERFACE(val) 1
1849#define __entity_check_STRING(val) (val)
1850#define __entity_check_ENDPOINT(val) ((val) & USB_ENDPOINT_NUMBER_MASK)
1851#define __entity(type, val) do { \
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001852 pr_vdebug("entity " #type "(%02x)\n", (val)); \
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001853 if (unlikely(!__entity_check_ ##type(val))) { \
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001854 pr_vdebug("invalid entity's value\n"); \
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001855 return -EINVAL; \
1856 } \
1857 ret = entity(FFS_ ##type, &val, _ds, priv); \
1858 if (unlikely(ret < 0)) { \
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001859 pr_debug("entity " #type "(%02x); ret = %d\n", \
Michal Nazarewiczd8df0b62010-11-12 14:29:29 +01001860 (val), ret); \
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001861 return ret; \
1862 } \
1863 } while (0)
1864
1865 /* Parse descriptor depending on type. */
1866 switch (_ds->bDescriptorType) {
1867 case USB_DT_DEVICE:
1868 case USB_DT_CONFIG:
1869 case USB_DT_STRING:
1870 case USB_DT_DEVICE_QUALIFIER:
1871 /* function can't have any of those */
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001872 pr_vdebug("descriptor reserved for gadget: %d\n",
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01001873 _ds->bDescriptorType);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001874 return -EINVAL;
1875
1876 case USB_DT_INTERFACE: {
1877 struct usb_interface_descriptor *ds = (void *)_ds;
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001878 pr_vdebug("interface descriptor\n");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001879 if (length != sizeof *ds)
1880 goto inv_length;
1881
1882 __entity(INTERFACE, ds->bInterfaceNumber);
1883 if (ds->iInterface)
1884 __entity(STRING, ds->iInterface);
1885 }
1886 break;
1887
1888 case USB_DT_ENDPOINT: {
1889 struct usb_endpoint_descriptor *ds = (void *)_ds;
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001890 pr_vdebug("endpoint descriptor\n");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001891 if (length != USB_DT_ENDPOINT_SIZE &&
1892 length != USB_DT_ENDPOINT_AUDIO_SIZE)
1893 goto inv_length;
1894 __entity(ENDPOINT, ds->bEndpointAddress);
1895 }
1896 break;
1897
Koen Beel560f1182012-05-30 20:43:37 +02001898 case HID_DT_HID:
1899 pr_vdebug("hid descriptor\n");
1900 if (length != sizeof(struct hid_descriptor))
1901 goto inv_length;
1902 break;
1903
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001904 case USB_DT_OTG:
1905 if (length != sizeof(struct usb_otg_descriptor))
1906 goto inv_length;
1907 break;
1908
1909 case USB_DT_INTERFACE_ASSOCIATION: {
1910 struct usb_interface_assoc_descriptor *ds = (void *)_ds;
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001911 pr_vdebug("interface association descriptor\n");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001912 if (length != sizeof *ds)
1913 goto inv_length;
1914 if (ds->iFunction)
1915 __entity(STRING, ds->iFunction);
1916 }
1917 break;
1918
Manu Gautam8d4e8972014-02-28 16:50:22 +05301919 case USB_DT_SS_ENDPOINT_COMP:
1920 pr_vdebug("EP SS companion descriptor\n");
1921 if (length != sizeof(struct usb_ss_ep_comp_descriptor))
1922 goto inv_length;
1923 break;
1924
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001925 case USB_DT_OTHER_SPEED_CONFIG:
1926 case USB_DT_INTERFACE_POWER:
1927 case USB_DT_DEBUG:
1928 case USB_DT_SECURITY:
1929 case USB_DT_CS_RADIO_CONTROL:
1930 /* TODO */
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001931 pr_vdebug("unimplemented descriptor: %d\n", _ds->bDescriptorType);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001932 return -EINVAL;
1933
1934 default:
1935 /* We should never be here */
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001936 pr_vdebug("unknown descriptor: %d\n", _ds->bDescriptorType);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001937 return -EINVAL;
1938
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01001939inv_length:
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001940 pr_vdebug("invalid length: %d (descriptor %d)\n",
Michal Nazarewiczd8df0b62010-11-12 14:29:29 +01001941 _ds->bLength, _ds->bDescriptorType);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001942 return -EINVAL;
1943 }
1944
1945#undef __entity
1946#undef __entity_check_DESCRIPTOR
1947#undef __entity_check_INTERFACE
1948#undef __entity_check_STRING
1949#undef __entity_check_ENDPOINT
1950
1951 return length;
1952}
1953
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001954static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
1955 ffs_entity_callback entity, void *priv)
1956{
1957 const unsigned _len = len;
1958 unsigned long num = 0;
1959
1960 ENTER();
1961
1962 for (;;) {
1963 int ret;
1964
1965 if (num == count)
1966 data = NULL;
1967
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01001968 /* Record "descriptor" entity */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001969 ret = entity(FFS_DESCRIPTOR, (u8 *)num, (void *)data, priv);
1970 if (unlikely(ret < 0)) {
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001971 pr_debug("entity DESCRIPTOR(%02lx); ret = %d\n",
Michal Nazarewiczd8df0b62010-11-12 14:29:29 +01001972 num, ret);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001973 return ret;
1974 }
1975
1976 if (!data)
1977 return _len - len;
1978
Andrzej Pietrasiewiczf96cbd12014-07-09 12:20:06 +02001979 ret = ffs_do_single_desc(data, len, entity, priv);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001980 if (unlikely(ret < 0)) {
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001981 pr_debug("%s returns %d\n", __func__, ret);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001982 return ret;
1983 }
1984
1985 len -= ret;
1986 data += ret;
1987 ++num;
1988 }
1989}
1990
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001991static int __ffs_data_do_entity(enum ffs_entity_type type,
1992 u8 *valuep, struct usb_descriptor_header *desc,
1993 void *priv)
1994{
Robert Baldyga6d5c1c72014-08-25 11:16:27 +02001995 struct ffs_desc_helper *helper = priv;
1996 struct usb_endpoint_descriptor *d;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001997
1998 ENTER();
1999
2000 switch (type) {
2001 case FFS_DESCRIPTOR:
2002 break;
2003
2004 case FFS_INTERFACE:
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002005 /*
2006 * Interfaces are indexed from zero so if we
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002007 * encountered interface "n" then there are at least
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002008 * "n+1" interfaces.
2009 */
Robert Baldyga6d5c1c72014-08-25 11:16:27 +02002010 if (*valuep >= helper->interfaces_count)
2011 helper->interfaces_count = *valuep + 1;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002012 break;
2013
2014 case FFS_STRING:
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002015 /*
2016 * Strings are indexed from 1 (0 is magic ;) reserved
2017 * for languages list or some such)
2018 */
Robert Baldyga6d5c1c72014-08-25 11:16:27 +02002019 if (*valuep > helper->ffs->strings_count)
2020 helper->ffs->strings_count = *valuep;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002021 break;
2022
2023 case FFS_ENDPOINT:
Robert Baldyga6d5c1c72014-08-25 11:16:27 +02002024 d = (void *)desc;
2025 helper->eps_count++;
2026 if (helper->eps_count >= 15)
2027 return -EINVAL;
2028 /* Check if descriptors for any speed were already parsed */
2029 if (!helper->ffs->eps_count && !helper->ffs->interfaces_count)
2030 helper->ffs->eps_addrmap[helper->eps_count] =
2031 d->bEndpointAddress;
2032 else if (helper->ffs->eps_addrmap[helper->eps_count] !=
2033 d->bEndpointAddress)
2034 return -EINVAL;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002035 break;
2036 }
2037
2038 return 0;
2039}
2040
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02002041static int __ffs_do_os_desc_header(enum ffs_os_desc_type *next_type,
2042 struct usb_os_desc_header *desc)
2043{
2044 u16 bcd_version = le16_to_cpu(desc->bcdVersion);
2045 u16 w_index = le16_to_cpu(desc->wIndex);
2046
2047 if (bcd_version != 1) {
2048 pr_vdebug("unsupported os descriptors version: %d",
2049 bcd_version);
2050 return -EINVAL;
2051 }
2052 switch (w_index) {
2053 case 0x4:
2054 *next_type = FFS_OS_DESC_EXT_COMPAT;
2055 break;
2056 case 0x5:
2057 *next_type = FFS_OS_DESC_EXT_PROP;
2058 break;
2059 default:
2060 pr_vdebug("unsupported os descriptor type: %d", w_index);
2061 return -EINVAL;
2062 }
2063
2064 return sizeof(*desc);
2065}
2066
2067/*
2068 * Process all extended compatibility/extended property descriptors
2069 * of a feature descriptor
2070 */
2071static int __must_check ffs_do_single_os_desc(char *data, unsigned len,
2072 enum ffs_os_desc_type type,
2073 u16 feature_count,
2074 ffs_os_desc_callback entity,
2075 void *priv,
2076 struct usb_os_desc_header *h)
2077{
2078 int ret;
2079 const unsigned _len = len;
2080
2081 ENTER();
2082
2083 /* loop over all ext compat/ext prop descriptors */
2084 while (feature_count--) {
2085 ret = entity(type, h, data, len, priv);
2086 if (unlikely(ret < 0)) {
2087 pr_debug("bad OS descriptor, type: %d\n", type);
2088 return ret;
2089 }
2090 data += ret;
2091 len -= ret;
2092 }
2093 return _len - len;
2094}
2095
2096/* Process a number of complete Feature Descriptors (Ext Compat or Ext Prop) */
2097static int __must_check ffs_do_os_descs(unsigned count,
2098 char *data, unsigned len,
2099 ffs_os_desc_callback entity, void *priv)
2100{
2101 const unsigned _len = len;
2102 unsigned long num = 0;
2103
2104 ENTER();
2105
2106 for (num = 0; num < count; ++num) {
2107 int ret;
2108 enum ffs_os_desc_type type;
2109 u16 feature_count;
2110 struct usb_os_desc_header *desc = (void *)data;
2111
2112 if (len < sizeof(*desc))
2113 return -EINVAL;
2114
2115 /*
2116 * Record "descriptor" entity.
2117 * Process dwLength, bcdVersion, wIndex, get b/wCount.
2118 * Move the data pointer to the beginning of extended
2119 * compatibilities proper or extended properties proper
2120 * portions of the data
2121 */
2122 if (le32_to_cpu(desc->dwLength) > len)
2123 return -EINVAL;
2124
2125 ret = __ffs_do_os_desc_header(&type, desc);
2126 if (unlikely(ret < 0)) {
2127 pr_debug("entity OS_DESCRIPTOR(%02lx); ret = %d\n",
2128 num, ret);
2129 return ret;
2130 }
2131 /*
2132 * 16-bit hex "?? 00" Little Endian looks like 8-bit hex "??"
2133 */
2134 feature_count = le16_to_cpu(desc->wCount);
2135 if (type == FFS_OS_DESC_EXT_COMPAT &&
2136 (feature_count > 255 || desc->Reserved))
2137 return -EINVAL;
2138 len -= ret;
2139 data += ret;
2140
2141 /*
2142 * Process all function/property descriptors
2143 * of this Feature Descriptor
2144 */
2145 ret = ffs_do_single_os_desc(data, len, type,
2146 feature_count, entity, priv, desc);
2147 if (unlikely(ret < 0)) {
2148 pr_debug("%s returns %d\n", __func__, ret);
2149 return ret;
2150 }
2151
2152 len -= ret;
2153 data += ret;
2154 }
2155 return _len - len;
2156}
2157
2158/**
2159 * Validate contents of the buffer from userspace related to OS descriptors.
2160 */
2161static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
2162 struct usb_os_desc_header *h, void *data,
2163 unsigned len, void *priv)
2164{
2165 struct ffs_data *ffs = priv;
2166 u8 length;
2167
2168 ENTER();
2169
2170 switch (type) {
2171 case FFS_OS_DESC_EXT_COMPAT: {
2172 struct usb_ext_compat_desc *d = data;
2173 int i;
2174
2175 if (len < sizeof(*d) ||
2176 d->bFirstInterfaceNumber >= ffs->interfaces_count ||
Jim Lin53642392016-05-20 18:13:19 +08002177 !d->Reserved1)
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02002178 return -EINVAL;
2179 for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
2180 if (d->Reserved2[i])
2181 return -EINVAL;
2182
2183 length = sizeof(struct usb_ext_compat_desc);
2184 }
2185 break;
2186 case FFS_OS_DESC_EXT_PROP: {
2187 struct usb_ext_prop_desc *d = data;
2188 u32 type, pdl;
2189 u16 pnl;
2190
2191 if (len < sizeof(*d) || h->interface >= ffs->interfaces_count)
2192 return -EINVAL;
2193 length = le32_to_cpu(d->dwSize);
2194 type = le32_to_cpu(d->dwPropertyDataType);
2195 if (type < USB_EXT_PROP_UNICODE ||
2196 type > USB_EXT_PROP_UNICODE_MULTI) {
2197 pr_vdebug("unsupported os descriptor property type: %d",
2198 type);
2199 return -EINVAL;
2200 }
2201 pnl = le16_to_cpu(d->wPropertyNameLength);
2202 pdl = le32_to_cpu(*(u32 *)((u8 *)data + 10 + pnl));
2203 if (length != 14 + pnl + pdl) {
2204 pr_vdebug("invalid os descriptor length: %d pnl:%d pdl:%d (descriptor %d)\n",
2205 length, pnl, pdl, type);
2206 return -EINVAL;
2207 }
2208 ++ffs->ms_os_descs_ext_prop_count;
2209 /* property name reported to the host as "WCHAR"s */
2210 ffs->ms_os_descs_ext_prop_name_len += pnl * 2;
2211 ffs->ms_os_descs_ext_prop_data_len += pdl;
2212 }
2213 break;
2214 default:
2215 pr_vdebug("unknown descriptor: %d\n", type);
2216 return -EINVAL;
2217 }
2218 return length;
2219}
2220
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002221static int __ffs_data_got_descs(struct ffs_data *ffs,
2222 char *const _data, size_t len)
2223{
Michal Nazarewiczac8dde12014-02-28 16:50:23 +05302224 char *data = _data, *raw_descs;
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02002225 unsigned os_descs_count = 0, counts[3], flags;
Michal Nazarewiczac8dde12014-02-28 16:50:23 +05302226 int ret = -EINVAL, i;
Robert Baldyga6d5c1c72014-08-25 11:16:27 +02002227 struct ffs_desc_helper helper;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002228
2229 ENTER();
2230
Michal Nazarewiczac8dde12014-02-28 16:50:23 +05302231 if (get_unaligned_le32(data + 4) != len)
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002232 goto error;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002233
Michal Nazarewiczac8dde12014-02-28 16:50:23 +05302234 switch (get_unaligned_le32(data)) {
2235 case FUNCTIONFS_DESCRIPTORS_MAGIC:
2236 flags = FUNCTIONFS_HAS_FS_DESC | FUNCTIONFS_HAS_HS_DESC;
Manu Gautam8d4e8972014-02-28 16:50:22 +05302237 data += 8;
2238 len -= 8;
Michal Nazarewiczac8dde12014-02-28 16:50:23 +05302239 break;
2240 case FUNCTIONFS_DESCRIPTORS_MAGIC_V2:
2241 flags = get_unaligned_le32(data + 8);
Robert Baldyga1b0bf882014-09-09 08:23:17 +02002242 ffs->user_flags = flags;
Michal Nazarewiczac8dde12014-02-28 16:50:23 +05302243 if (flags & ~(FUNCTIONFS_HAS_FS_DESC |
2244 FUNCTIONFS_HAS_HS_DESC |
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02002245 FUNCTIONFS_HAS_SS_DESC |
Robert Baldyga1b0bf882014-09-09 08:23:17 +02002246 FUNCTIONFS_HAS_MS_OS_DESC |
Robert Baldyga5e33f6f2015-01-23 13:41:01 +01002247 FUNCTIONFS_VIRTUAL_ADDR |
Felix Hädicke54dfce62016-06-22 01:12:07 +02002248 FUNCTIONFS_EVENTFD |
2249 FUNCTIONFS_ALL_CTRL_RECIP)) {
Michal Nazarewiczac8dde12014-02-28 16:50:23 +05302250 ret = -ENOSYS;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002251 goto error;
2252 }
Michal Nazarewiczac8dde12014-02-28 16:50:23 +05302253 data += 12;
2254 len -= 12;
2255 break;
2256 default:
2257 goto error;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002258 }
2259
Robert Baldyga5e33f6f2015-01-23 13:41:01 +01002260 if (flags & FUNCTIONFS_EVENTFD) {
2261 if (len < 4)
2262 goto error;
2263 ffs->ffs_eventfd =
2264 eventfd_ctx_fdget((int)get_unaligned_le32(data));
2265 if (IS_ERR(ffs->ffs_eventfd)) {
2266 ret = PTR_ERR(ffs->ffs_eventfd);
2267 ffs->ffs_eventfd = NULL;
2268 goto error;
2269 }
2270 data += 4;
2271 len -= 4;
2272 }
2273
Michal Nazarewiczac8dde12014-02-28 16:50:23 +05302274 /* Read fs_count, hs_count and ss_count (if present) */
2275 for (i = 0; i < 3; ++i) {
2276 if (!(flags & (1 << i))) {
2277 counts[i] = 0;
2278 } else if (len < 4) {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002279 goto error;
Michal Nazarewiczac8dde12014-02-28 16:50:23 +05302280 } else {
2281 counts[i] = get_unaligned_le32(data);
2282 data += 4;
2283 len -= 4;
2284 }
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002285 }
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02002286 if (flags & (1 << i)) {
2287 os_descs_count = get_unaligned_le32(data);
2288 data += 4;
2289 len -= 4;
2290 };
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002291
Michal Nazarewiczac8dde12014-02-28 16:50:23 +05302292 /* Read descriptors */
2293 raw_descs = data;
Robert Baldyga6d5c1c72014-08-25 11:16:27 +02002294 helper.ffs = ffs;
Michal Nazarewiczac8dde12014-02-28 16:50:23 +05302295 for (i = 0; i < 3; ++i) {
2296 if (!counts[i])
2297 continue;
Robert Baldyga6d5c1c72014-08-25 11:16:27 +02002298 helper.interfaces_count = 0;
2299 helper.eps_count = 0;
Michal Nazarewiczac8dde12014-02-28 16:50:23 +05302300 ret = ffs_do_descs(counts[i], data, len,
Robert Baldyga6d5c1c72014-08-25 11:16:27 +02002301 __ffs_data_do_entity, &helper);
Michal Nazarewiczac8dde12014-02-28 16:50:23 +05302302 if (ret < 0)
2303 goto error;
Robert Baldyga6d5c1c72014-08-25 11:16:27 +02002304 if (!ffs->eps_count && !ffs->interfaces_count) {
2305 ffs->eps_count = helper.eps_count;
2306 ffs->interfaces_count = helper.interfaces_count;
2307 } else {
2308 if (ffs->eps_count != helper.eps_count) {
2309 ret = -EINVAL;
2310 goto error;
2311 }
2312 if (ffs->interfaces_count != helper.interfaces_count) {
2313 ret = -EINVAL;
2314 goto error;
2315 }
2316 }
Michal Nazarewiczac8dde12014-02-28 16:50:23 +05302317 data += ret;
2318 len -= ret;
2319 }
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02002320 if (os_descs_count) {
2321 ret = ffs_do_os_descs(os_descs_count, data, len,
2322 __ffs_data_do_os_desc, ffs);
2323 if (ret < 0)
2324 goto error;
2325 data += ret;
2326 len -= ret;
2327 }
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002328
Michal Nazarewiczac8dde12014-02-28 16:50:23 +05302329 if (raw_descs == data || len) {
2330 ret = -EINVAL;
2331 goto error;
2332 }
2333
2334 ffs->raw_descs_data = _data;
2335 ffs->raw_descs = raw_descs;
2336 ffs->raw_descs_length = data - raw_descs;
2337 ffs->fs_descs_count = counts[0];
2338 ffs->hs_descs_count = counts[1];
2339 ffs->ss_descs_count = counts[2];
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02002340 ffs->ms_os_descs_count = os_descs_count;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002341
2342 return 0;
2343
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002344error:
2345 kfree(_data);
2346 return ret;
2347}
2348
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002349static int __ffs_data_got_strings(struct ffs_data *ffs,
2350 char *const _data, size_t len)
2351{
2352 u32 str_count, needed_count, lang_count;
2353 struct usb_gadget_strings **stringtabs, *t;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002354 const char *data = _data;
Michal Nazarewicz872ce512016-05-31 14:17:21 +02002355 struct usb_string *s;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002356
2357 ENTER();
2358
2359 if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
2360 get_unaligned_le32(data + 4) != len))
2361 goto error;
2362 str_count = get_unaligned_le32(data + 8);
2363 lang_count = get_unaligned_le32(data + 12);
2364
2365 /* if one is zero the other must be zero */
2366 if (unlikely(!str_count != !lang_count))
2367 goto error;
2368
2369 /* Do we have at least as many strings as descriptors need? */
2370 needed_count = ffs->strings_count;
2371 if (unlikely(str_count < needed_count))
2372 goto error;
2373
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002374 /*
2375 * If we don't need any strings just return and free all
2376 * memory.
2377 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002378 if (!needed_count) {
2379 kfree(_data);
2380 return 0;
2381 }
2382
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002383 /* Allocate everything in one chunk so there's less maintenance. */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002384 {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002385 unsigned i = 0;
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01002386 vla_group(d);
2387 vla_item(d, struct usb_gadget_strings *, stringtabs,
2388 lang_count + 1);
2389 vla_item(d, struct usb_gadget_strings, stringtab, lang_count);
2390 vla_item(d, struct usb_string, strings,
2391 lang_count*(needed_count+1));
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002392
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01002393 char *vlabuf = kmalloc(vla_group_size(d), GFP_KERNEL);
2394
2395 if (unlikely(!vlabuf)) {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002396 kfree(_data);
2397 return -ENOMEM;
2398 }
2399
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01002400 /* Initialize the VLA pointers */
2401 stringtabs = vla_ptr(vlabuf, d, stringtabs);
2402 t = vla_ptr(vlabuf, d, stringtab);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002403 i = lang_count;
2404 do {
2405 *stringtabs++ = t++;
2406 } while (--i);
2407 *stringtabs = NULL;
2408
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01002409 /* stringtabs = vlabuf = d_stringtabs for later kfree */
2410 stringtabs = vla_ptr(vlabuf, d, stringtabs);
2411 t = vla_ptr(vlabuf, d, stringtab);
2412 s = vla_ptr(vlabuf, d, strings);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002413 }
2414
2415 /* For each language */
2416 data += 16;
2417 len -= 16;
2418
2419 do { /* lang_count > 0 so we can use do-while */
2420 unsigned needed = needed_count;
2421
2422 if (unlikely(len < 3))
2423 goto error_free;
2424 t->language = get_unaligned_le16(data);
2425 t->strings = s;
2426 ++t;
2427
2428 data += 2;
2429 len -= 2;
2430
2431 /* For each string */
2432 do { /* str_count > 0 so we can use do-while */
2433 size_t length = strnlen(data, len);
2434
2435 if (unlikely(length == len))
2436 goto error_free;
2437
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002438 /*
2439 * User may provide more strings then we need,
2440 * if that's the case we simply ignore the
2441 * rest
2442 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002443 if (likely(needed)) {
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002444 /*
2445 * s->id will be set while adding
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002446 * function to configuration so for
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002447 * now just leave garbage here.
2448 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002449 s->s = data;
2450 --needed;
2451 ++s;
2452 }
2453
2454 data += length + 1;
2455 len -= length + 1;
2456 } while (--str_count);
2457
2458 s->id = 0; /* terminator */
2459 s->s = NULL;
2460 ++s;
2461
2462 } while (--lang_count);
2463
2464 /* Some garbage left? */
2465 if (unlikely(len))
2466 goto error_free;
2467
2468 /* Done! */
2469 ffs->stringtabs = stringtabs;
2470 ffs->raw_strings = _data;
2471
2472 return 0;
2473
2474error_free:
2475 kfree(stringtabs);
2476error:
2477 kfree(_data);
2478 return -EINVAL;
2479}
2480
2481
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002482/* Events handling and management *******************************************/
2483
2484static void __ffs_event_add(struct ffs_data *ffs,
2485 enum usb_functionfs_event_type type)
2486{
2487 enum usb_functionfs_event_type rem_type1, rem_type2 = type;
2488 int neg = 0;
2489
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002490 /*
2491 * Abort any unhandled setup
2492 *
2493 * We do not need to worry about some cmpxchg() changing value
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002494 * of ffs->setup_state without holding the lock because when
2495 * state is FFS_SETUP_PENDING cmpxchg() in several places in
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002496 * the source does nothing.
2497 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002498 if (ffs->setup_state == FFS_SETUP_PENDING)
Michal Nazarewicze46318a2014-02-10 10:42:40 +01002499 ffs->setup_state = FFS_SETUP_CANCELLED;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002500
Michal Nazarewicz67913bb2014-09-10 17:50:24 +02002501 /*
2502 * Logic of this function guarantees that there are at most four pending
2503 * evens on ffs->ev.types queue. This is important because the queue
2504 * has space for four elements only and __ffs_ep0_read_events function
2505 * depends on that limit as well. If more event types are added, those
2506 * limits have to be revisited or guaranteed to still hold.
2507 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002508 switch (type) {
2509 case FUNCTIONFS_RESUME:
2510 rem_type2 = FUNCTIONFS_SUSPEND;
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002511 /* FALL THROUGH */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002512 case FUNCTIONFS_SUSPEND:
2513 case FUNCTIONFS_SETUP:
2514 rem_type1 = type;
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002515 /* Discard all similar events */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002516 break;
2517
2518 case FUNCTIONFS_BIND:
2519 case FUNCTIONFS_UNBIND:
2520 case FUNCTIONFS_DISABLE:
2521 case FUNCTIONFS_ENABLE:
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002522 /* Discard everything other then power management. */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002523 rem_type1 = FUNCTIONFS_SUSPEND;
2524 rem_type2 = FUNCTIONFS_RESUME;
2525 neg = 1;
2526 break;
2527
2528 default:
Michal Nazarewiczfe00bcb2014-09-11 18:52:49 +02002529 WARN(1, "%d: unknown event, this should not happen\n", type);
2530 return;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002531 }
2532
2533 {
2534 u8 *ev = ffs->ev.types, *out = ev;
2535 unsigned n = ffs->ev.count;
2536 for (; n; --n, ++ev)
2537 if ((*ev == rem_type1 || *ev == rem_type2) == neg)
2538 *out++ = *ev;
2539 else
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01002540 pr_vdebug("purging event %d\n", *ev);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002541 ffs->ev.count = out - ffs->ev.types;
2542 }
2543
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01002544 pr_vdebug("adding event %d\n", type);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002545 ffs->ev.types[ffs->ev.count++] = type;
2546 wake_up_locked(&ffs->ev.waitq);
Robert Baldyga5e33f6f2015-01-23 13:41:01 +01002547 if (ffs->ffs_eventfd)
2548 eventfd_signal(ffs->ffs_eventfd, 1);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002549}
2550
2551static void ffs_event_add(struct ffs_data *ffs,
2552 enum usb_functionfs_event_type type)
2553{
2554 unsigned long flags;
2555 spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
2556 __ffs_event_add(ffs, type);
2557 spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
2558}
2559
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002560/* Bind/unbind USB function hooks *******************************************/
2561
Robert Baldyga6d5c1c72014-08-25 11:16:27 +02002562static int ffs_ep_addr2idx(struct ffs_data *ffs, u8 endpoint_address)
2563{
2564 int i;
2565
2566 for (i = 1; i < ARRAY_SIZE(ffs->eps_addrmap); ++i)
2567 if (ffs->eps_addrmap[i] == endpoint_address)
2568 return i;
2569 return -ENOENT;
2570}
2571
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002572static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
2573 struct usb_descriptor_header *desc,
2574 void *priv)
2575{
2576 struct usb_endpoint_descriptor *ds = (void *)desc;
2577 struct ffs_function *func = priv;
2578 struct ffs_ep *ffs_ep;
Dan Carpenter85b06f52014-09-09 15:06:09 +03002579 unsigned ep_desc_id;
2580 int idx;
Manu Gautam8d4e8972014-02-28 16:50:22 +05302581 static const char *speed_names[] = { "full", "high", "super" };
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002582
2583 if (type != FFS_DESCRIPTOR)
2584 return 0;
2585
Manu Gautam8d4e8972014-02-28 16:50:22 +05302586 /*
2587 * If ss_descriptors is not NULL, we are reading super speed
2588 * descriptors; if hs_descriptors is not NULL, we are reading high
2589 * speed descriptors; otherwise, we are reading full speed
2590 * descriptors.
2591 */
2592 if (func->function.ss_descriptors) {
2593 ep_desc_id = 2;
2594 func->function.ss_descriptors[(long)valuep] = desc;
2595 } else if (func->function.hs_descriptors) {
2596 ep_desc_id = 1;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002597 func->function.hs_descriptors[(long)valuep] = desc;
Manu Gautam8d4e8972014-02-28 16:50:22 +05302598 } else {
2599 ep_desc_id = 0;
Sebastian Andrzej Siewior10287ba2012-10-22 22:15:06 +02002600 func->function.fs_descriptors[(long)valuep] = desc;
Manu Gautam8d4e8972014-02-28 16:50:22 +05302601 }
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002602
2603 if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT)
2604 return 0;
2605
Robert Baldyga6d5c1c72014-08-25 11:16:27 +02002606 idx = ffs_ep_addr2idx(func->ffs, ds->bEndpointAddress) - 1;
2607 if (idx < 0)
2608 return idx;
2609
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002610 ffs_ep = func->eps + idx;
2611
Manu Gautam8d4e8972014-02-28 16:50:22 +05302612 if (unlikely(ffs_ep->descs[ep_desc_id])) {
2613 pr_err("two %sspeed descriptors for EP %d\n",
2614 speed_names[ep_desc_id],
Michal Nazarewiczd8df0b62010-11-12 14:29:29 +01002615 ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002616 return -EINVAL;
2617 }
Manu Gautam8d4e8972014-02-28 16:50:22 +05302618 ffs_ep->descs[ep_desc_id] = ds;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002619
2620 ffs_dump_mem(": Original ep desc", ds, ds->bLength);
2621 if (ffs_ep->ep) {
2622 ds->bEndpointAddress = ffs_ep->descs[0]->bEndpointAddress;
2623 if (!ds->wMaxPacketSize)
2624 ds->wMaxPacketSize = ffs_ep->descs[0]->wMaxPacketSize;
2625 } else {
2626 struct usb_request *req;
2627 struct usb_ep *ep;
Robert Baldyga1b0bf882014-09-09 08:23:17 +02002628 u8 bEndpointAddress;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002629
Robert Baldyga1b0bf882014-09-09 08:23:17 +02002630 /*
2631 * We back up bEndpointAddress because autoconfig overwrites
2632 * it with physical endpoint address.
2633 */
2634 bEndpointAddress = ds->bEndpointAddress;
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01002635 pr_vdebug("autoconfig\n");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002636 ep = usb_ep_autoconfig(func->gadget, ds);
2637 if (unlikely(!ep))
2638 return -ENOTSUPP;
Joe Perchescc7e6052010-11-14 19:04:49 -08002639 ep->driver_data = func->eps + idx;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002640
2641 req = usb_ep_alloc_request(ep, GFP_KERNEL);
2642 if (unlikely(!req))
2643 return -ENOMEM;
2644
2645 ffs_ep->ep = ep;
2646 ffs_ep->req = req;
2647 func->eps_revmap[ds->bEndpointAddress &
2648 USB_ENDPOINT_NUMBER_MASK] = idx + 1;
Robert Baldyga1b0bf882014-09-09 08:23:17 +02002649 /*
2650 * If we use virtual address mapping, we restore
2651 * original bEndpointAddress value.
2652 */
2653 if (func->ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
2654 ds->bEndpointAddress = bEndpointAddress;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002655 }
2656 ffs_dump_mem(": Rewritten ep desc", ds, ds->bLength);
2657
2658 return 0;
2659}
2660
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002661static int __ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep,
2662 struct usb_descriptor_header *desc,
2663 void *priv)
2664{
2665 struct ffs_function *func = priv;
2666 unsigned idx;
2667 u8 newValue;
2668
2669 switch (type) {
2670 default:
2671 case FFS_DESCRIPTOR:
2672 /* Handled in previous pass by __ffs_func_bind_do_descs() */
2673 return 0;
2674
2675 case FFS_INTERFACE:
2676 idx = *valuep;
2677 if (func->interfaces_nums[idx] < 0) {
2678 int id = usb_interface_id(func->conf, &func->function);
2679 if (unlikely(id < 0))
2680 return id;
2681 func->interfaces_nums[idx] = id;
2682 }
2683 newValue = func->interfaces_nums[idx];
2684 break;
2685
2686 case FFS_STRING:
2687 /* String' IDs are allocated when fsf_data is bound to cdev */
2688 newValue = func->ffs->stringtabs[0]->strings[*valuep - 1].id;
2689 break;
2690
2691 case FFS_ENDPOINT:
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002692 /*
2693 * USB_DT_ENDPOINT are handled in
2694 * __ffs_func_bind_do_descs().
2695 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002696 if (desc->bDescriptorType == USB_DT_ENDPOINT)
2697 return 0;
2698
2699 idx = (*valuep & USB_ENDPOINT_NUMBER_MASK) - 1;
2700 if (unlikely(!func->eps[idx].ep))
2701 return -EINVAL;
2702
2703 {
2704 struct usb_endpoint_descriptor **descs;
2705 descs = func->eps[idx].descs;
2706 newValue = descs[descs[0] ? 0 : 1]->bEndpointAddress;
2707 }
2708 break;
2709 }
2710
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01002711 pr_vdebug("%02x -> %02x\n", *valuep, newValue);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002712 *valuep = newValue;
2713 return 0;
2714}
2715
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02002716static int __ffs_func_bind_do_os_desc(enum ffs_os_desc_type type,
2717 struct usb_os_desc_header *h, void *data,
2718 unsigned len, void *priv)
2719{
2720 struct ffs_function *func = priv;
2721 u8 length = 0;
2722
2723 switch (type) {
2724 case FFS_OS_DESC_EXT_COMPAT: {
2725 struct usb_ext_compat_desc *desc = data;
2726 struct usb_os_desc_table *t;
2727
2728 t = &func->function.os_desc_table[desc->bFirstInterfaceNumber];
2729 t->if_id = func->interfaces_nums[desc->bFirstInterfaceNumber];
2730 memcpy(t->os_desc->ext_compat_id, &desc->CompatibleID,
2731 ARRAY_SIZE(desc->CompatibleID) +
2732 ARRAY_SIZE(desc->SubCompatibleID));
2733 length = sizeof(*desc);
2734 }
2735 break;
2736 case FFS_OS_DESC_EXT_PROP: {
2737 struct usb_ext_prop_desc *desc = data;
2738 struct usb_os_desc_table *t;
2739 struct usb_os_desc_ext_prop *ext_prop;
2740 char *ext_prop_name;
2741 char *ext_prop_data;
2742
2743 t = &func->function.os_desc_table[h->interface];
2744 t->if_id = func->interfaces_nums[h->interface];
2745
2746 ext_prop = func->ffs->ms_os_descs_ext_prop_avail;
2747 func->ffs->ms_os_descs_ext_prop_avail += sizeof(*ext_prop);
2748
2749 ext_prop->type = le32_to_cpu(desc->dwPropertyDataType);
2750 ext_prop->name_len = le16_to_cpu(desc->wPropertyNameLength);
2751 ext_prop->data_len = le32_to_cpu(*(u32 *)
2752 usb_ext_prop_data_len_ptr(data, ext_prop->name_len));
2753 length = ext_prop->name_len + ext_prop->data_len + 14;
2754
2755 ext_prop_name = func->ffs->ms_os_descs_ext_prop_name_avail;
2756 func->ffs->ms_os_descs_ext_prop_name_avail +=
2757 ext_prop->name_len;
2758
2759 ext_prop_data = func->ffs->ms_os_descs_ext_prop_data_avail;
2760 func->ffs->ms_os_descs_ext_prop_data_avail +=
2761 ext_prop->data_len;
2762 memcpy(ext_prop_data,
2763 usb_ext_prop_data_ptr(data, ext_prop->name_len),
2764 ext_prop->data_len);
2765 /* unicode data reported to the host as "WCHAR"s */
2766 switch (ext_prop->type) {
2767 case USB_EXT_PROP_UNICODE:
2768 case USB_EXT_PROP_UNICODE_ENV:
2769 case USB_EXT_PROP_UNICODE_LINK:
2770 case USB_EXT_PROP_UNICODE_MULTI:
2771 ext_prop->data_len *= 2;
2772 break;
2773 }
2774 ext_prop->data = ext_prop_data;
2775
2776 memcpy(ext_prop_name, usb_ext_prop_name_ptr(data),
2777 ext_prop->name_len);
2778 /* property name reported to the host as "WCHAR"s */
2779 ext_prop->name_len *= 2;
2780 ext_prop->name = ext_prop_name;
2781
2782 t->os_desc->ext_prop_len +=
2783 ext_prop->name_len + ext_prop->data_len + 14;
2784 ++t->os_desc->ext_prop_count;
2785 list_add_tail(&ext_prop->entry, &t->os_desc->ext_prop);
2786 }
2787 break;
2788 default:
2789 pr_vdebug("unknown descriptor: %d\n", type);
2790 }
2791
2792 return length;
2793}
2794
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01002795static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f,
2796 struct usb_configuration *c)
2797{
2798 struct ffs_function *func = ffs_func_from_usb(f);
2799 struct f_fs_opts *ffs_opts =
2800 container_of(f->fi, struct f_fs_opts, func_inst);
2801 int ret;
2802
2803 ENTER();
2804
2805 /*
2806 * Legacy gadget triggers binding in functionfs_ready_callback,
2807 * which already uses locking; taking the same lock here would
2808 * cause a deadlock.
2809 *
2810 * Configfs-enabled gadgets however do need ffs_dev_lock.
2811 */
2812 if (!ffs_opts->no_configfs)
2813 ffs_dev_lock();
2814 ret = ffs_opts->dev->desc_ready ? 0 : -ENODEV;
2815 func->ffs = ffs_opts->dev->ffs_data;
2816 if (!ffs_opts->no_configfs)
2817 ffs_dev_unlock();
2818 if (ret)
2819 return ERR_PTR(ret);
2820
2821 func->conf = c;
2822 func->gadget = c->cdev->gadget;
2823
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01002824 /*
2825 * in drivers/usb/gadget/configfs.c:configfs_composite_bind()
2826 * configurations are bound in sequence with list_for_each_entry,
2827 * in each configuration its functions are bound in sequence
2828 * with list_for_each_entry, so we assume no race condition
2829 * with regard to ffs_opts->bound access
2830 */
2831 if (!ffs_opts->refcnt) {
2832 ret = functionfs_bind(func->ffs, c->cdev);
2833 if (ret)
2834 return ERR_PTR(ret);
2835 }
2836 ffs_opts->refcnt++;
2837 func->function.strings = func->ffs->stringtabs;
2838
2839 return ffs_opts;
2840}
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01002841
2842static int _ffs_func_bind(struct usb_configuration *c,
2843 struct usb_function *f)
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002844{
2845 struct ffs_function *func = ffs_func_from_usb(f);
2846 struct ffs_data *ffs = func->ffs;
2847
2848 const int full = !!func->ffs->fs_descs_count;
2849 const int high = gadget_is_dualspeed(func->gadget) &&
2850 func->ffs->hs_descs_count;
Manu Gautam8d4e8972014-02-28 16:50:22 +05302851 const int super = gadget_is_superspeed(func->gadget) &&
2852 func->ffs->ss_descs_count;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002853
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02002854 int fs_len, hs_len, ss_len, ret, i;
Dan Carpenter0015f912016-05-28 07:48:10 +03002855 struct ffs_ep *eps_ptr;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002856
2857 /* Make it a single chunk, less management later on */
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01002858 vla_group(d);
2859 vla_item_with_sz(d, struct ffs_ep, eps, ffs->eps_count);
2860 vla_item_with_sz(d, struct usb_descriptor_header *, fs_descs,
2861 full ? ffs->fs_descs_count + 1 : 0);
2862 vla_item_with_sz(d, struct usb_descriptor_header *, hs_descs,
2863 high ? ffs->hs_descs_count + 1 : 0);
Manu Gautam8d4e8972014-02-28 16:50:22 +05302864 vla_item_with_sz(d, struct usb_descriptor_header *, ss_descs,
2865 super ? ffs->ss_descs_count + 1 : 0);
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01002866 vla_item_with_sz(d, short, inums, ffs->interfaces_count);
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02002867 vla_item_with_sz(d, struct usb_os_desc_table, os_desc_table,
2868 c->cdev->use_os_string ? ffs->interfaces_count : 0);
2869 vla_item_with_sz(d, char[16], ext_compat,
2870 c->cdev->use_os_string ? ffs->interfaces_count : 0);
2871 vla_item_with_sz(d, struct usb_os_desc, os_desc,
2872 c->cdev->use_os_string ? ffs->interfaces_count : 0);
2873 vla_item_with_sz(d, struct usb_os_desc_ext_prop, ext_prop,
2874 ffs->ms_os_descs_ext_prop_count);
2875 vla_item_with_sz(d, char, ext_prop_name,
2876 ffs->ms_os_descs_ext_prop_name_len);
2877 vla_item_with_sz(d, char, ext_prop_data,
2878 ffs->ms_os_descs_ext_prop_data_len);
Michal Nazarewiczac8dde12014-02-28 16:50:23 +05302879 vla_item_with_sz(d, char, raw_descs, ffs->raw_descs_length);
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01002880 char *vlabuf;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002881
2882 ENTER();
2883
Manu Gautam8d4e8972014-02-28 16:50:22 +05302884 /* Has descriptors only for speeds gadget does not support */
2885 if (unlikely(!(full | high | super)))
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002886 return -ENOTSUPP;
2887
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01002888 /* Allocate a single chunk, less management later on */
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02002889 vlabuf = kzalloc(vla_group_size(d), GFP_KERNEL);
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01002890 if (unlikely(!vlabuf))
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002891 return -ENOMEM;
2892
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02002893 ffs->ms_os_descs_ext_prop_avail = vla_ptr(vlabuf, d, ext_prop);
2894 ffs->ms_os_descs_ext_prop_name_avail =
2895 vla_ptr(vlabuf, d, ext_prop_name);
2896 ffs->ms_os_descs_ext_prop_data_avail =
2897 vla_ptr(vlabuf, d, ext_prop_data);
2898
Michal Nazarewiczac8dde12014-02-28 16:50:23 +05302899 /* Copy descriptors */
2900 memcpy(vla_ptr(vlabuf, d, raw_descs), ffs->raw_descs,
2901 ffs->raw_descs_length);
Manu Gautam8d4e8972014-02-28 16:50:22 +05302902
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01002903 memset(vla_ptr(vlabuf, d, inums), 0xff, d_inums__sz);
Dan Carpenter0015f912016-05-28 07:48:10 +03002904 eps_ptr = vla_ptr(vlabuf, d, eps);
2905 for (i = 0; i < ffs->eps_count; i++)
2906 eps_ptr[i].num = -1;
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01002907
2908 /* Save pointers
2909 * d_eps == vlabuf, func->eps used to kfree vlabuf later
2910 */
2911 func->eps = vla_ptr(vlabuf, d, eps);
2912 func->interfaces_nums = vla_ptr(vlabuf, d, inums);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002913
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002914 /*
2915 * Go through all the endpoint descriptors and allocate
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002916 * endpoints first, so that later we can rewrite the endpoint
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002917 * numbers without worrying that it may be described later on.
2918 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002919 if (likely(full)) {
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01002920 func->function.fs_descriptors = vla_ptr(vlabuf, d, fs_descs);
Manu Gautam8d4e8972014-02-28 16:50:22 +05302921 fs_len = ffs_do_descs(ffs->fs_descs_count,
2922 vla_ptr(vlabuf, d, raw_descs),
2923 d_raw_descs__sz,
2924 __ffs_func_bind_do_descs, func);
2925 if (unlikely(fs_len < 0)) {
2926 ret = fs_len;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002927 goto error;
Manu Gautam8d4e8972014-02-28 16:50:22 +05302928 }
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002929 } else {
Manu Gautam8d4e8972014-02-28 16:50:22 +05302930 fs_len = 0;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002931 }
2932
2933 if (likely(high)) {
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01002934 func->function.hs_descriptors = vla_ptr(vlabuf, d, hs_descs);
Manu Gautam8d4e8972014-02-28 16:50:22 +05302935 hs_len = ffs_do_descs(ffs->hs_descs_count,
2936 vla_ptr(vlabuf, d, raw_descs) + fs_len,
2937 d_raw_descs__sz - fs_len,
2938 __ffs_func_bind_do_descs, func);
2939 if (unlikely(hs_len < 0)) {
2940 ret = hs_len;
2941 goto error;
2942 }
2943 } else {
2944 hs_len = 0;
2945 }
2946
2947 if (likely(super)) {
2948 func->function.ss_descriptors = vla_ptr(vlabuf, d, ss_descs);
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02002949 ss_len = ffs_do_descs(ffs->ss_descs_count,
Manu Gautam8d4e8972014-02-28 16:50:22 +05302950 vla_ptr(vlabuf, d, raw_descs) + fs_len + hs_len,
2951 d_raw_descs__sz - fs_len - hs_len,
2952 __ffs_func_bind_do_descs, func);
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02002953 if (unlikely(ss_len < 0)) {
2954 ret = ss_len;
Robert Baldyga88548942013-09-27 12:28:54 +02002955 goto error;
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02002956 }
2957 } else {
2958 ss_len = 0;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002959 }
2960
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002961 /*
2962 * Now handle interface numbers allocation and interface and
2963 * endpoint numbers rewriting. We can do that in one go
2964 * now.
2965 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002966 ret = ffs_do_descs(ffs->fs_descs_count +
Manu Gautam8d4e8972014-02-28 16:50:22 +05302967 (high ? ffs->hs_descs_count : 0) +
2968 (super ? ffs->ss_descs_count : 0),
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01002969 vla_ptr(vlabuf, d, raw_descs), d_raw_descs__sz,
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002970 __ffs_func_bind_do_nums, func);
2971 if (unlikely(ret < 0))
2972 goto error;
2973
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02002974 func->function.os_desc_table = vla_ptr(vlabuf, d, os_desc_table);
Jim Linc6010c82016-05-13 20:32:16 +08002975 if (c->cdev->use_os_string) {
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02002976 for (i = 0; i < ffs->interfaces_count; ++i) {
2977 struct usb_os_desc *desc;
2978
2979 desc = func->function.os_desc_table[i].os_desc =
2980 vla_ptr(vlabuf, d, os_desc) +
2981 i * sizeof(struct usb_os_desc);
2982 desc->ext_compat_id =
2983 vla_ptr(vlabuf, d, ext_compat) + i * 16;
2984 INIT_LIST_HEAD(&desc->ext_prop);
2985 }
Jim Linc6010c82016-05-13 20:32:16 +08002986 ret = ffs_do_os_descs(ffs->ms_os_descs_count,
2987 vla_ptr(vlabuf, d, raw_descs) +
2988 fs_len + hs_len + ss_len,
2989 d_raw_descs__sz - fs_len - hs_len -
2990 ss_len,
2991 __ffs_func_bind_do_os_desc, func);
2992 if (unlikely(ret < 0))
2993 goto error;
2994 }
Andrzej Pietrasiewiczf0175ab2014-07-09 12:20:08 +02002995 func->function.os_desc_n =
2996 c->cdev->use_os_string ? ffs->interfaces_count : 0;
2997
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002998 /* And we're done */
2999 ffs_event_add(ffs, FUNCTIONFS_BIND);
3000 return 0;
3001
3002error:
3003 /* XXX Do we need to release all claimed endpoints here? */
3004 return ret;
3005}
3006
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01003007static int ffs_func_bind(struct usb_configuration *c,
3008 struct usb_function *f)
3009{
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01003010 struct f_fs_opts *ffs_opts = ffs_do_functionfs_bind(f, c);
Robert Baldyga55d81122015-07-13 11:03:50 +02003011 struct ffs_function *func = ffs_func_from_usb(f);
3012 int ret;
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01003013
3014 if (IS_ERR(ffs_opts))
3015 return PTR_ERR(ffs_opts);
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01003016
Robert Baldyga55d81122015-07-13 11:03:50 +02003017 ret = _ffs_func_bind(c, f);
3018 if (ret && !--ffs_opts->refcnt)
3019 functionfs_unbind(func->ffs);
3020
3021 return ret;
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01003022}
3023
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003024
3025/* Other USB function hooks *************************************************/
3026
Robert Baldyga18d6b32f2014-12-18 09:55:10 +01003027static void ffs_reset_work(struct work_struct *work)
3028{
3029 struct ffs_data *ffs = container_of(work,
3030 struct ffs_data, reset_work);
3031 ffs_data_reset(ffs);
3032}
3033
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003034static int ffs_func_set_alt(struct usb_function *f,
3035 unsigned interface, unsigned alt)
3036{
3037 struct ffs_function *func = ffs_func_from_usb(f);
3038 struct ffs_data *ffs = func->ffs;
3039 int ret = 0, intf;
3040
3041 if (alt != (unsigned)-1) {
3042 intf = ffs_func_revmap_intf(func, interface);
3043 if (unlikely(intf < 0))
3044 return intf;
3045 }
3046
3047 if (ffs->func)
3048 ffs_func_eps_disable(ffs->func);
3049
Robert Baldyga18d6b32f2014-12-18 09:55:10 +01003050 if (ffs->state == FFS_DEACTIVATED) {
3051 ffs->state = FFS_CLOSING;
3052 INIT_WORK(&ffs->reset_work, ffs_reset_work);
3053 schedule_work(&ffs->reset_work);
3054 return -ENODEV;
3055 }
3056
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003057 if (ffs->state != FFS_ACTIVE)
3058 return -ENODEV;
3059
3060 if (alt == (unsigned)-1) {
3061 ffs->func = NULL;
3062 ffs_event_add(ffs, FUNCTIONFS_DISABLE);
3063 return 0;
3064 }
3065
3066 ffs->func = func;
3067 ret = ffs_func_eps_enable(func);
3068 if (likely(ret >= 0))
3069 ffs_event_add(ffs, FUNCTIONFS_ENABLE);
3070 return ret;
3071}
3072
3073static void ffs_func_disable(struct usb_function *f)
3074{
3075 ffs_func_set_alt(f, 0, (unsigned)-1);
3076}
3077
3078static int ffs_func_setup(struct usb_function *f,
3079 const struct usb_ctrlrequest *creq)
3080{
3081 struct ffs_function *func = ffs_func_from_usb(f);
3082 struct ffs_data *ffs = func->ffs;
3083 unsigned long flags;
3084 int ret;
3085
3086 ENTER();
3087
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01003088 pr_vdebug("creq->bRequestType = %02x\n", creq->bRequestType);
3089 pr_vdebug("creq->bRequest = %02x\n", creq->bRequest);
3090 pr_vdebug("creq->wValue = %04x\n", le16_to_cpu(creq->wValue));
3091 pr_vdebug("creq->wIndex = %04x\n", le16_to_cpu(creq->wIndex));
3092 pr_vdebug("creq->wLength = %04x\n", le16_to_cpu(creq->wLength));
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003093
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01003094 /*
3095 * Most requests directed to interface go through here
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003096 * (notable exceptions are set/get interface) so we need to
3097 * handle them. All other either handled by composite or
3098 * passed to usb_configuration->setup() (if one is set). No
3099 * matter, we will handle requests directed to endpoint here
Felix Hädicke54dfce62016-06-22 01:12:07 +02003100 * as well (as it's straightforward). Other request recipient
3101 * types are only handled when the user flag FUNCTIONFS_ALL_CTRL_RECIP
3102 * is being used.
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01003103 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003104 if (ffs->state != FFS_ACTIVE)
3105 return -ENODEV;
3106
3107 switch (creq->bRequestType & USB_RECIP_MASK) {
3108 case USB_RECIP_INTERFACE:
3109 ret = ffs_func_revmap_intf(func, le16_to_cpu(creq->wIndex));
3110 if (unlikely(ret < 0))
3111 return ret;
3112 break;
3113
3114 case USB_RECIP_ENDPOINT:
3115 ret = ffs_func_revmap_ep(func, le16_to_cpu(creq->wIndex));
3116 if (unlikely(ret < 0))
3117 return ret;
Robert Baldyga1b0bf882014-09-09 08:23:17 +02003118 if (func->ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
3119 ret = func->ffs->eps_addrmap[ret];
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003120 break;
3121
3122 default:
Felix Hädicke54dfce62016-06-22 01:12:07 +02003123 if (func->ffs->user_flags & FUNCTIONFS_ALL_CTRL_RECIP)
3124 ret = le16_to_cpu(creq->wIndex);
3125 else
3126 return -EOPNOTSUPP;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003127 }
3128
3129 spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
3130 ffs->ev.setup = *creq;
3131 ffs->ev.setup.wIndex = cpu_to_le16(ret);
3132 __ffs_event_add(ffs, FUNCTIONFS_SETUP);
3133 spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
3134
3135 return 0;
3136}
3137
Felix Hädicke54dfce62016-06-22 01:12:07 +02003138static bool ffs_func_req_match(struct usb_function *f,
3139 const struct usb_ctrlrequest *creq)
3140{
3141 struct ffs_function *func = ffs_func_from_usb(f);
3142
3143 switch (creq->bRequestType & USB_RECIP_MASK) {
3144 case USB_RECIP_INTERFACE:
3145 return ffs_func_revmap_intf(func,
3146 le16_to_cpu(creq->wIndex) >= 0);
3147 case USB_RECIP_ENDPOINT:
3148 return ffs_func_revmap_ep(func,
3149 le16_to_cpu(creq->wIndex) >= 0);
3150 default:
3151 return (bool) (func->ffs->user_flags &
3152 FUNCTIONFS_ALL_CTRL_RECIP);
3153 }
3154}
3155
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003156static void ffs_func_suspend(struct usb_function *f)
3157{
3158 ENTER();
3159 ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_SUSPEND);
3160}
3161
3162static void ffs_func_resume(struct usb_function *f)
3163{
3164 ENTER();
3165 ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_RESUME);
3166}
3167
3168
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01003169/* Endpoint and interface numbers reverse mapping ***************************/
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003170
3171static int ffs_func_revmap_ep(struct ffs_function *func, u8 num)
3172{
3173 num = func->eps_revmap[num & USB_ENDPOINT_NUMBER_MASK];
3174 return num ? num : -EDOM;
3175}
3176
3177static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf)
3178{
3179 short *nums = func->interfaces_nums;
3180 unsigned count = func->ffs->interfaces_count;
3181
3182 for (; count; --count, ++nums) {
3183 if (*nums >= 0 && *nums == intf)
3184 return nums - func->interfaces_nums;
3185 }
3186
3187 return -EDOM;
3188}
3189
3190
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01003191/* Devices management *******************************************************/
3192
3193static LIST_HEAD(ffs_devices);
3194
Andrzej Pietrasiewiczda13a772014-01-13 16:49:38 +01003195static struct ffs_dev *_ffs_do_find_dev(const char *name)
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01003196{
3197 struct ffs_dev *dev;
3198
3199 list_for_each_entry(dev, &ffs_devices, entry) {
3200 if (!dev->name || !name)
3201 continue;
3202 if (strcmp(dev->name, name) == 0)
3203 return dev;
3204 }
Andrzej Pietrasiewiczb6584992013-12-03 15:15:36 +01003205
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01003206 return NULL;
3207}
3208
3209/*
3210 * ffs_lock must be taken by the caller of this function
3211 */
Andrzej Pietrasiewiczda13a772014-01-13 16:49:38 +01003212static struct ffs_dev *_ffs_get_single_dev(void)
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01003213{
3214 struct ffs_dev *dev;
3215
3216 if (list_is_singular(&ffs_devices)) {
3217 dev = list_first_entry(&ffs_devices, struct ffs_dev, entry);
3218 if (dev->single)
3219 return dev;
3220 }
3221
3222 return NULL;
3223}
3224
3225/*
3226 * ffs_lock must be taken by the caller of this function
3227 */
Andrzej Pietrasiewiczda13a772014-01-13 16:49:38 +01003228static struct ffs_dev *_ffs_find_dev(const char *name)
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01003229{
3230 struct ffs_dev *dev;
3231
Andrzej Pietrasiewiczda13a772014-01-13 16:49:38 +01003232 dev = _ffs_get_single_dev();
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01003233 if (dev)
3234 return dev;
3235
Andrzej Pietrasiewiczda13a772014-01-13 16:49:38 +01003236 return _ffs_do_find_dev(name);
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01003237}
3238
Andrzej Pietrasiewiczb6584992013-12-03 15:15:36 +01003239/* Configfs support *********************************************************/
3240
3241static inline struct f_fs_opts *to_ffs_opts(struct config_item *item)
3242{
3243 return container_of(to_config_group(item), struct f_fs_opts,
3244 func_inst.group);
3245}
3246
3247static void ffs_attr_release(struct config_item *item)
3248{
3249 struct f_fs_opts *opts = to_ffs_opts(item);
3250
3251 usb_put_function_instance(&opts->func_inst);
3252}
3253
3254static struct configfs_item_operations ffs_item_ops = {
3255 .release = ffs_attr_release,
3256};
3257
3258static struct config_item_type ffs_func_type = {
3259 .ct_item_ops = &ffs_item_ops,
3260 .ct_owner = THIS_MODULE,
3261};
3262
3263
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01003264/* Function registration interface ******************************************/
3265
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01003266static void ffs_free_inst(struct usb_function_instance *f)
3267{
3268 struct f_fs_opts *opts;
3269
3270 opts = to_f_fs_opts(f);
3271 ffs_dev_lock();
Andrzej Pietrasiewiczda13a772014-01-13 16:49:38 +01003272 _ffs_free_dev(opts->dev);
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01003273 ffs_dev_unlock();
3274 kfree(opts);
3275}
3276
Andrzej Pietrasiewiczb6584992013-12-03 15:15:36 +01003277#define MAX_INST_NAME_LEN 40
3278
3279static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name)
3280{
3281 struct f_fs_opts *opts;
3282 char *ptr;
3283 const char *tmp;
3284 int name_len, ret;
3285
3286 name_len = strlen(name) + 1;
3287 if (name_len > MAX_INST_NAME_LEN)
3288 return -ENAMETOOLONG;
3289
3290 ptr = kstrndup(name, name_len, GFP_KERNEL);
3291 if (!ptr)
3292 return -ENOMEM;
3293
3294 opts = to_f_fs_opts(fi);
3295 tmp = NULL;
3296
3297 ffs_dev_lock();
3298
3299 tmp = opts->dev->name_allocated ? opts->dev->name : NULL;
3300 ret = _ffs_name_dev(opts->dev, ptr);
3301 if (ret) {
3302 kfree(ptr);
3303 ffs_dev_unlock();
3304 return ret;
3305 }
3306 opts->dev->name_allocated = true;
3307
3308 ffs_dev_unlock();
3309
3310 kfree(tmp);
3311
3312 return 0;
3313}
3314
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01003315static struct usb_function_instance *ffs_alloc_inst(void)
3316{
3317 struct f_fs_opts *opts;
3318 struct ffs_dev *dev;
3319
3320 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
3321 if (!opts)
3322 return ERR_PTR(-ENOMEM);
3323
Andrzej Pietrasiewiczb6584992013-12-03 15:15:36 +01003324 opts->func_inst.set_inst_name = ffs_set_inst_name;
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01003325 opts->func_inst.free_func_inst = ffs_free_inst;
3326 ffs_dev_lock();
Andrzej Pietrasiewiczda13a772014-01-13 16:49:38 +01003327 dev = _ffs_alloc_dev();
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01003328 ffs_dev_unlock();
3329 if (IS_ERR(dev)) {
3330 kfree(opts);
3331 return ERR_CAST(dev);
3332 }
3333 opts->dev = dev;
Andrzej Pietrasiewiczb6584992013-12-03 15:15:36 +01003334 dev->opts = opts;
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01003335
Andrzej Pietrasiewiczb6584992013-12-03 15:15:36 +01003336 config_group_init_type_name(&opts->func_inst.group, "",
3337 &ffs_func_type);
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01003338 return &opts->func_inst;
3339}
3340
3341static void ffs_free(struct usb_function *f)
3342{
3343 kfree(ffs_func_from_usb(f));
3344}
3345
3346static void ffs_func_unbind(struct usb_configuration *c,
3347 struct usb_function *f)
3348{
3349 struct ffs_function *func = ffs_func_from_usb(f);
3350 struct ffs_data *ffs = func->ffs;
3351 struct f_fs_opts *opts =
3352 container_of(f->fi, struct f_fs_opts, func_inst);
3353 struct ffs_ep *ep = func->eps;
3354 unsigned count = ffs->eps_count;
3355 unsigned long flags;
3356
3357 ENTER();
3358 if (ffs->func == func) {
3359 ffs_func_eps_disable(func);
3360 ffs->func = NULL;
3361 }
3362
3363 if (!--opts->refcnt)
3364 functionfs_unbind(ffs);
3365
3366 /* cleanup after autoconfig */
3367 spin_lock_irqsave(&func->ffs->eps_lock, flags);
3368 do {
3369 if (ep->ep && ep->req)
3370 usb_ep_free_request(ep->ep, ep->req);
3371 ep->req = NULL;
3372 ++ep;
3373 } while (--count);
3374 spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
3375 kfree(func->eps);
3376 func->eps = NULL;
3377 /*
3378 * eps, descriptors and interfaces_nums are allocated in the
3379 * same chunk so only one free is required.
3380 */
3381 func->function.fs_descriptors = NULL;
3382 func->function.hs_descriptors = NULL;
Manu Gautam8d4e8972014-02-28 16:50:22 +05303383 func->function.ss_descriptors = NULL;
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01003384 func->interfaces_nums = NULL;
3385
3386 ffs_event_add(ffs, FUNCTIONFS_UNBIND);
3387}
3388
3389static struct usb_function *ffs_alloc(struct usb_function_instance *fi)
3390{
3391 struct ffs_function *func;
3392
3393 ENTER();
3394
3395 func = kzalloc(sizeof(*func), GFP_KERNEL);
3396 if (unlikely(!func))
3397 return ERR_PTR(-ENOMEM);
3398
3399 func->function.name = "Function FS Gadget";
3400
3401 func->function.bind = ffs_func_bind;
3402 func->function.unbind = ffs_func_unbind;
3403 func->function.set_alt = ffs_func_set_alt;
3404 func->function.disable = ffs_func_disable;
3405 func->function.setup = ffs_func_setup;
Felix Hädicke54dfce62016-06-22 01:12:07 +02003406 func->function.req_match = ffs_func_req_match;
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01003407 func->function.suspend = ffs_func_suspend;
3408 func->function.resume = ffs_func_resume;
3409 func->function.free_func = ffs_free;
3410
3411 return &func->function;
3412}
3413
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01003414/*
3415 * ffs_lock must be taken by the caller of this function
3416 */
Andrzej Pietrasiewiczda13a772014-01-13 16:49:38 +01003417static struct ffs_dev *_ffs_alloc_dev(void)
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01003418{
3419 struct ffs_dev *dev;
3420 int ret;
3421
Andrzej Pietrasiewiczda13a772014-01-13 16:49:38 +01003422 if (_ffs_get_single_dev())
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01003423 return ERR_PTR(-EBUSY);
3424
3425 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
3426 if (!dev)
3427 return ERR_PTR(-ENOMEM);
3428
3429 if (list_empty(&ffs_devices)) {
3430 ret = functionfs_init();
3431 if (ret) {
3432 kfree(dev);
3433 return ERR_PTR(ret);
3434 }
3435 }
3436
3437 list_add(&dev->entry, &ffs_devices);
3438
3439 return dev;
3440}
3441
3442/*
3443 * ffs_lock must be taken by the caller of this function
3444 * The caller is responsible for "name" being available whenever f_fs needs it
3445 */
3446static int _ffs_name_dev(struct ffs_dev *dev, const char *name)
3447{
3448 struct ffs_dev *existing;
3449
Andrzej Pietrasiewiczda13a772014-01-13 16:49:38 +01003450 existing = _ffs_do_find_dev(name);
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01003451 if (existing)
3452 return -EBUSY;
Andrzej Pietrasiewiczab13cb02014-01-13 16:49:36 +01003453
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01003454 dev->name = name;
3455
3456 return 0;
3457}
3458
3459/*
3460 * The caller is responsible for "name" being available whenever f_fs needs it
3461 */
3462int ffs_name_dev(struct ffs_dev *dev, const char *name)
3463{
3464 int ret;
3465
3466 ffs_dev_lock();
3467 ret = _ffs_name_dev(dev, name);
3468 ffs_dev_unlock();
3469
3470 return ret;
3471}
Felipe Balbi0700faa2014-04-01 13:19:32 -05003472EXPORT_SYMBOL_GPL(ffs_name_dev);
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01003473
3474int ffs_single_dev(struct ffs_dev *dev)
3475{
3476 int ret;
3477
3478 ret = 0;
3479 ffs_dev_lock();
3480
3481 if (!list_is_singular(&ffs_devices))
3482 ret = -EBUSY;
3483 else
3484 dev->single = true;
3485
3486 ffs_dev_unlock();
3487 return ret;
3488}
Felipe Balbi0700faa2014-04-01 13:19:32 -05003489EXPORT_SYMBOL_GPL(ffs_single_dev);
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01003490
3491/*
3492 * ffs_lock must be taken by the caller of this function
3493 */
Andrzej Pietrasiewiczda13a772014-01-13 16:49:38 +01003494static void _ffs_free_dev(struct ffs_dev *dev)
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01003495{
3496 list_del(&dev->entry);
Andrzej Pietrasiewiczb6584992013-12-03 15:15:36 +01003497 if (dev->name_allocated)
3498 kfree(dev->name);
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01003499 kfree(dev);
3500 if (list_empty(&ffs_devices))
3501 functionfs_cleanup();
3502}
3503
3504static void *ffs_acquire_dev(const char *dev_name)
3505{
3506 struct ffs_dev *ffs_dev;
3507
3508 ENTER();
3509 ffs_dev_lock();
3510
Andrzej Pietrasiewiczda13a772014-01-13 16:49:38 +01003511 ffs_dev = _ffs_find_dev(dev_name);
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01003512 if (!ffs_dev)
Krzysztof Opasiakd668b4f2014-05-21 14:05:35 +02003513 ffs_dev = ERR_PTR(-ENOENT);
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01003514 else if (ffs_dev->mounted)
3515 ffs_dev = ERR_PTR(-EBUSY);
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01003516 else if (ffs_dev->ffs_acquire_dev_callback &&
3517 ffs_dev->ffs_acquire_dev_callback(ffs_dev))
Krzysztof Opasiakd668b4f2014-05-21 14:05:35 +02003518 ffs_dev = ERR_PTR(-ENOENT);
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01003519 else
3520 ffs_dev->mounted = true;
3521
3522 ffs_dev_unlock();
3523 return ffs_dev;
3524}
3525
3526static void ffs_release_dev(struct ffs_data *ffs_data)
3527{
3528 struct ffs_dev *ffs_dev;
3529
3530 ENTER();
3531 ffs_dev_lock();
3532
3533 ffs_dev = ffs_data->private_data;
Andrzej Pietrasiewiczea365922014-01-13 16:49:35 +01003534 if (ffs_dev) {
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01003535 ffs_dev->mounted = false;
Andrzej Pietrasiewiczea365922014-01-13 16:49:35 +01003536
3537 if (ffs_dev->ffs_release_dev_callback)
3538 ffs_dev->ffs_release_dev_callback(ffs_dev);
3539 }
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01003540
3541 ffs_dev_unlock();
3542}
3543
3544static int ffs_ready(struct ffs_data *ffs)
3545{
3546 struct ffs_dev *ffs_obj;
3547 int ret = 0;
3548
3549 ENTER();
3550 ffs_dev_lock();
3551
3552 ffs_obj = ffs->private_data;
3553 if (!ffs_obj) {
3554 ret = -EINVAL;
3555 goto done;
3556 }
3557 if (WARN_ON(ffs_obj->desc_ready)) {
3558 ret = -EBUSY;
3559 goto done;
3560 }
3561
3562 ffs_obj->desc_ready = true;
3563 ffs_obj->ffs_data = ffs;
3564
Krzysztof Opasiak49a79d82015-05-22 17:25:18 +02003565 if (ffs_obj->ffs_ready_callback) {
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01003566 ret = ffs_obj->ffs_ready_callback(ffs);
Krzysztof Opasiak49a79d82015-05-22 17:25:18 +02003567 if (ret)
3568 goto done;
3569 }
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01003570
Krzysztof Opasiak49a79d82015-05-22 17:25:18 +02003571 set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags);
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01003572done:
3573 ffs_dev_unlock();
3574 return ret;
3575}
3576
3577static void ffs_closed(struct ffs_data *ffs)
3578{
3579 struct ffs_dev *ffs_obj;
Rui Miguel Silvaf14e9ad2015-05-20 14:52:40 +01003580 struct f_fs_opts *opts;
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01003581
3582 ENTER();
3583 ffs_dev_lock();
3584
3585 ffs_obj = ffs->private_data;
3586 if (!ffs_obj)
3587 goto done;
3588
3589 ffs_obj->desc_ready = false;
3590
Krzysztof Opasiak49a79d82015-05-22 17:25:18 +02003591 if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags) &&
3592 ffs_obj->ffs_closed_callback)
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01003593 ffs_obj->ffs_closed_callback(ffs);
Andrzej Pietrasiewiczb6584992013-12-03 15:15:36 +01003594
Rui Miguel Silvaf14e9ad2015-05-20 14:52:40 +01003595 if (ffs_obj->opts)
3596 opts = ffs_obj->opts;
3597 else
3598 goto done;
3599
3600 if (opts->no_configfs || !opts->func_inst.group.cg_item.ci_parent
3601 || !atomic_read(&opts->func_inst.group.cg_item.ci_kref.refcount))
Andrzej Pietrasiewiczb6584992013-12-03 15:15:36 +01003602 goto done;
3603
3604 unregister_gadget_item(ffs_obj->opts->
3605 func_inst.group.cg_item.ci_parent->ci_parent);
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01003606done:
3607 ffs_dev_unlock();
3608}
3609
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003610/* Misc helper functions ****************************************************/
3611
3612static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
3613{
3614 return nonblock
3615 ? likely(mutex_trylock(mutex)) ? 0 : -EAGAIN
3616 : mutex_lock_interruptible(mutex);
3617}
3618
Al Viro260ef312012-09-26 21:43:45 -04003619static char *ffs_prepare_buffer(const char __user *buf, size_t len)
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003620{
3621 char *data;
3622
3623 if (unlikely(!len))
3624 return NULL;
3625
3626 data = kmalloc(len, GFP_KERNEL);
3627 if (unlikely(!data))
3628 return ERR_PTR(-ENOMEM);
3629
Daniel Walter7fe9a932015-11-18 17:15:49 +01003630 if (unlikely(copy_from_user(data, buf, len))) {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003631 kfree(data);
3632 return ERR_PTR(-EFAULT);
3633 }
3634
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01003635 pr_vdebug("Buffer from user space:\n");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003636 ffs_dump_mem("", data, len);
3637
3638 return data;
3639}
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01003640
Andrzej Pietrasiewicz5920cda2013-12-03 15:15:33 +01003641DECLARE_USB_FUNCTION_INIT(ffs, ffs_alloc_inst, ffs_alloc);
3642MODULE_LICENSE("GPL");
3643MODULE_AUTHOR("Michal Nazarewicz");