blob: d17930d9a843fa01d3e19078d6e32c7f12652ad0 [file] [log] [blame]
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001/*
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002 * f_fs.c -- user mode file system API for USB composite function controllers
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02003 *
4 * Copyright (C) 2010 Samsung Electronics
Michal Nazarewicz54b83602012-01-13 15:05:16 +01005 * Author: Michal Nazarewicz <mina86@mina86.com>
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02006 *
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01007 * Based on inode.c (GadgetFS) which was:
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02008 * Copyright (C) 2003-2004 David Brownell
9 * Copyright (C) 2003 Agilent Technologies
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +020015 */
16
17
18/* #define DEBUG */
19/* #define VERBOSE_DEBUG */
20
21#include <linux/blkdev.h>
Randy Dunlapb0608692010-05-10 10:51:36 -070022#include <linux/pagemap.h>
Paul Gortmakerf940fcd2011-05-27 09:56:31 -040023#include <linux/export.h>
Koen Beel560f1182012-05-30 20:43:37 +020024#include <linux/hid.h>
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +020025#include <asm/unaligned.h>
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +020026
27#include <linux/usb/composite.h>
28#include <linux/usb/functionfs.h>
29
Andrzej Pietrasiewicze72c39c2013-12-03 15:15:31 +010030#include "u_fs.h"
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +020031
32#define FUNCTIONFS_MAGIC 0xa647361 /* Chosen by a honest dice roll ;) */
33
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +010034/* Variable Length Array Macros **********************************************/
35#define vla_group(groupname) size_t groupname##__next = 0
36#define vla_group_size(groupname) groupname##__next
37
38#define vla_item(groupname, type, name, n) \
39 size_t groupname##_##name##__offset = ({ \
40 size_t align_mask = __alignof__(type) - 1; \
41 size_t offset = (groupname##__next + align_mask) & ~align_mask;\
42 size_t size = (n) * sizeof(type); \
43 groupname##__next = offset + size; \
44 offset; \
45 })
46
47#define vla_item_with_sz(groupname, type, name, n) \
48 size_t groupname##_##name##__sz = (n) * sizeof(type); \
49 size_t groupname##_##name##__offset = ({ \
50 size_t align_mask = __alignof__(type) - 1; \
51 size_t offset = (groupname##__next + align_mask) & ~align_mask;\
52 size_t size = groupname##_##name##__sz; \
53 groupname##__next = offset + size; \
54 offset; \
55 })
56
57#define vla_ptr(ptr, groupname, name) \
58 ((void *) ((char *)ptr + groupname##_##name##__offset))
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +020059
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +010060/* Debugging ****************************************************************/
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +020061
62#ifdef VERBOSE_DEBUG
Andrzej Pietrasiewiczea0e6272012-06-11 11:13:15 +020063#ifndef pr_vdebug
Michal Nazarewiczd8df0b62010-11-12 14:29:29 +010064# define pr_vdebug pr_debug
Andrzej Pietrasiewiczea0e6272012-06-11 11:13:15 +020065#endif /* pr_vdebug */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +020066# define ffs_dump_mem(prefix, ptr, len) \
Michal Nazarewiczaa02f172010-11-17 17:09:47 +010067 print_hex_dump_bytes(pr_fmt(prefix ": "), DUMP_PREFIX_NONE, ptr, len)
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +020068#else
Andrzej Pietrasiewiczea0e6272012-06-11 11:13:15 +020069#ifndef pr_vdebug
Michal Nazarewiczd8df0b62010-11-12 14:29:29 +010070# define pr_vdebug(...) do { } while (0)
Andrzej Pietrasiewiczea0e6272012-06-11 11:13:15 +020071#endif /* pr_vdebug */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +020072# define ffs_dump_mem(prefix, ptr, len) do { } while (0)
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +020073#endif /* VERBOSE_DEBUG */
74
Michal Nazarewiczaa02f172010-11-17 17:09:47 +010075#define ENTER() pr_vdebug("%s()\n", __func__)
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +020076
77
78/* The data structure and setup file ****************************************/
79
80enum ffs_state {
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +010081 /*
82 * Waiting for descriptors and strings.
83 *
84 * In this state no open(2), read(2) or write(2) on epfiles
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +020085 * may succeed (which should not be the problem as there
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +010086 * should be no such files opened in the first place).
87 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +020088 FFS_READ_DESCRIPTORS,
89 FFS_READ_STRINGS,
90
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +010091 /*
92 * We've got descriptors and strings. We are or have called
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +010093 * ffs_ready(). functionfs_bind() may have
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +010094 * been called but we don't know.
95 *
96 * This is the only state in which operations on epfiles may
97 * succeed.
98 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +020099 FFS_ACTIVE,
100
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100101 /*
102 * All endpoints have been closed. This state is also set if
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200103 * we encounter an unrecoverable error. The only
104 * unrecoverable error is situation when after reading strings
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100105 * from user space we fail to initialise epfiles or
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +0100106 * ffs_ready() returns with error (<0).
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100107 *
108 * In this state no open(2), read(2) or write(2) (both on ep0
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200109 * as well as epfile) may succeed (at this point epfiles are
110 * unlinked and all closed so this is not a problem; ep0 is
111 * also closed but ep0 file exists and so open(2) on ep0 must
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100112 * fail).
113 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200114 FFS_CLOSING
115};
116
117
118enum ffs_setup_state {
119 /* There is no setup request pending. */
120 FFS_NO_SETUP,
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100121 /*
122 * User has read events and there was a setup request event
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200123 * there. The next read/write on ep0 will handle the
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100124 * request.
125 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200126 FFS_SETUP_PENDING,
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100127 /*
128 * There was event pending but before user space handled it
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200129 * some other event was introduced which canceled existing
130 * setup. If this state is set read/write on ep0 return
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100131 * -EIDRM. This state is only set when adding event.
132 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200133 FFS_SETUP_CANCELED
134};
135
136
137
138struct ffs_epfile;
139struct ffs_function;
140
141struct ffs_data {
142 struct usb_gadget *gadget;
143
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100144 /*
145 * Protect access read/write operations, only one read/write
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200146 * at a time. As a consequence protects ep0req and company.
147 * While setup request is being processed (queued) this is
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100148 * held.
149 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200150 struct mutex mutex;
151
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100152 /*
153 * Protect access to endpoint related structures (basically
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200154 * usb_ep_queue(), usb_ep_dequeue(), etc. calls) except for
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100155 * endpoint zero.
156 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200157 spinlock_t eps_lock;
158
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100159 /*
160 * XXX REVISIT do we need our own request? Since we are not
161 * handling setup requests immediately user space may be so
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200162 * slow that another setup will be sent to the gadget but this
163 * time not to us but another function and then there could be
Linus Torvaldsa4ce96a2010-07-21 09:25:42 -0700164 * a race. Is that the case? Or maybe we can use cdev->req
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100165 * after all, maybe we just need some spinlock for that?
166 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200167 struct usb_request *ep0req; /* P: mutex */
168 struct completion ep0req_completion; /* P: mutex */
169 int ep0req_status; /* P: mutex */
170
171 /* reference counter */
172 atomic_t ref;
173 /* how many files are opened (EP0 and others) */
174 atomic_t opened;
175
176 /* EP0 state */
177 enum ffs_state state;
178
179 /*
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100180 * Possible transitions:
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200181 * + FFS_NO_SETUP -> FFS_SETUP_PENDING -- P: ev.waitq.lock
182 * happens only in ep0 read which is P: mutex
183 * + FFS_SETUP_PENDING -> FFS_NO_SETUP -- P: ev.waitq.lock
184 * happens only in ep0 i/o which is P: mutex
185 * + FFS_SETUP_PENDING -> FFS_SETUP_CANCELED -- P: ev.waitq.lock
186 * + FFS_SETUP_CANCELED -> FFS_NO_SETUP -- cmpxchg
187 */
188 enum ffs_setup_state setup_state;
189
190#define FFS_SETUP_STATE(ffs) \
191 ((enum ffs_setup_state)cmpxchg(&(ffs)->setup_state, \
192 FFS_SETUP_CANCELED, FFS_NO_SETUP))
193
194 /* Events & such. */
195 struct {
196 u8 types[4];
197 unsigned short count;
198 /* XXX REVISIT need to update it in some places, or do we? */
199 unsigned short can_stall;
200 struct usb_ctrlrequest setup;
201
202 wait_queue_head_t waitq;
203 } ev; /* the whole structure, P: ev.waitq.lock */
204
205 /* Flags */
206 unsigned long flags;
207#define FFS_FL_CALL_CLOSED_CALLBACK 0
208#define FFS_FL_BOUND 1
209
210 /* Active function */
211 struct ffs_function *func;
212
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100213 /*
214 * Device name, write once when file system is mounted.
215 * Intended for user to read if she wants.
216 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200217 const char *dev_name;
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100218 /* Private data for our user (ie. gadget). Managed by user. */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200219 void *private_data;
220
221 /* filled by __ffs_data_got_descs() */
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100222 /*
223 * Real descriptors are 16 bytes after raw_descs (so you need
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200224 * to skip 16 bytes (ie. ffs->raw_descs + 16) to get to the
225 * first full speed descriptor). raw_descs_length and
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100226 * raw_fs_descs_length do not have those 16 bytes added.
227 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200228 const void *raw_descs;
229 unsigned raw_descs_length;
230 unsigned raw_fs_descs_length;
231 unsigned fs_descs_count;
232 unsigned hs_descs_count;
233
234 unsigned short strings_count;
235 unsigned short interfaces_count;
236 unsigned short eps_count;
237 unsigned short _pad1;
238
239 /* filled by __ffs_data_got_strings() */
240 /* ids in stringtabs are set in functionfs_bind() */
241 const void *raw_strings;
242 struct usb_gadget_strings **stringtabs;
243
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100244 /*
245 * File system's super block, write once when file system is
246 * mounted.
247 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200248 struct super_block *sb;
249
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100250 /* File permissions, written once when fs is mounted */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200251 struct ffs_file_perms {
252 umode_t mode;
Eric W. Biedermanb9b73f72012-06-14 01:19:23 -0700253 kuid_t uid;
254 kgid_t gid;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200255 } file_perms;
256
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100257 /*
258 * The endpoint files, filled by ffs_epfiles_create(),
259 * destroyed by ffs_epfiles_destroy().
260 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200261 struct ffs_epfile *epfiles;
262};
263
264/* Reference counter handling */
265static void ffs_data_get(struct ffs_data *ffs);
266static void ffs_data_put(struct ffs_data *ffs);
267/* Creates new ffs_data object. */
268static struct ffs_data *__must_check ffs_data_new(void) __attribute__((malloc));
269
270/* Opened counter handling. */
271static void ffs_data_opened(struct ffs_data *ffs);
272static void ffs_data_closed(struct ffs_data *ffs);
273
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100274/* Called with ffs->mutex held; take over ownership of data. */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200275static int __must_check
276__ffs_data_got_descs(struct ffs_data *ffs, char *data, size_t len);
277static int __must_check
278__ffs_data_got_strings(struct ffs_data *ffs, char *data, size_t len);
279
280
281/* The function structure ***************************************************/
282
283struct ffs_ep;
284
285struct ffs_function {
286 struct usb_configuration *conf;
287 struct usb_gadget *gadget;
288 struct ffs_data *ffs;
289
290 struct ffs_ep *eps;
291 u8 eps_revmap[16];
292 short *interfaces_nums;
293
294 struct usb_function function;
295};
296
297
298static struct ffs_function *ffs_func_from_usb(struct usb_function *f)
299{
300 return container_of(f, struct ffs_function, function);
301}
302
303static void ffs_func_free(struct ffs_function *func);
304
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200305static void ffs_func_eps_disable(struct ffs_function *func);
306static int __must_check ffs_func_eps_enable(struct ffs_function *func);
307
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200308static int ffs_func_bind(struct usb_configuration *,
309 struct usb_function *);
310static void ffs_func_unbind(struct usb_configuration *,
311 struct usb_function *);
312static int ffs_func_set_alt(struct usb_function *, unsigned, unsigned);
313static void ffs_func_disable(struct usb_function *);
314static int ffs_func_setup(struct usb_function *,
315 const struct usb_ctrlrequest *);
316static void ffs_func_suspend(struct usb_function *);
317static void ffs_func_resume(struct usb_function *);
318
319
320static int ffs_func_revmap_ep(struct ffs_function *func, u8 num);
321static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf);
322
323
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200324/* The endpoints structures *************************************************/
325
326struct ffs_ep {
327 struct usb_ep *ep; /* P: ffs->eps_lock */
328 struct usb_request *req; /* P: epfile->mutex */
329
330 /* [0]: full speed, [1]: high speed */
331 struct usb_endpoint_descriptor *descs[2];
332
333 u8 num;
334
335 int status; /* P: epfile->mutex */
336};
337
338struct ffs_epfile {
339 /* Protects ep->ep and ep->req. */
340 struct mutex mutex;
341 wait_queue_head_t wait;
342
343 struct ffs_data *ffs;
344 struct ffs_ep *ep; /* P: ffs->eps_lock */
345
346 struct dentry *dentry;
347
348 char name[5];
349
350 unsigned char in; /* P: ffs->eps_lock */
351 unsigned char isoc; /* P: ffs->eps_lock */
352
353 unsigned char _pad;
354};
355
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200356static int __must_check ffs_epfiles_create(struct ffs_data *ffs);
357static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count);
358
359static struct inode *__must_check
360ffs_sb_create_file(struct super_block *sb, const char *name, void *data,
361 const struct file_operations *fops,
362 struct dentry **dentry_p);
363
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +0100364/* Devices management *******************************************************/
365
366DEFINE_MUTEX(ffs_lock);
367
368static struct ffs_dev *ffs_find_dev(const char *name);
369static void *ffs_acquire_dev(const char *dev_name);
370static void ffs_release_dev(struct ffs_data *ffs_data);
371static int ffs_ready(struct ffs_data *ffs);
372static void ffs_closed(struct ffs_data *ffs);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200373
374/* Misc helper functions ****************************************************/
375
376static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
377 __attribute__((warn_unused_result, nonnull));
Al Viro260ef312012-09-26 21:43:45 -0400378static char *ffs_prepare_buffer(const char __user *buf, size_t len)
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200379 __attribute__((warn_unused_result, nonnull));
380
381
382/* Control file aka ep0 *****************************************************/
383
384static void ffs_ep0_complete(struct usb_ep *ep, struct usb_request *req)
385{
386 struct ffs_data *ffs = req->context;
387
388 complete_all(&ffs->ep0req_completion);
389}
390
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200391static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
392{
393 struct usb_request *req = ffs->ep0req;
394 int ret;
395
396 req->zero = len < le16_to_cpu(ffs->ev.setup.wLength);
397
398 spin_unlock_irq(&ffs->ev.waitq.lock);
399
400 req->buf = data;
401 req->length = len;
402
Marek Szyprowskice1fd352011-01-28 13:55:36 +0100403 /*
404 * UDC layer requires to provide a buffer even for ZLP, but should
405 * not use it at all. Let's provide some poisoned pointer to catch
406 * possible bug in the driver.
407 */
408 if (req->buf == NULL)
409 req->buf = (void *)0xDEADBABE;
410
Wolfram Sang16735d02013-11-14 14:32:02 -0800411 reinit_completion(&ffs->ep0req_completion);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200412
413 ret = usb_ep_queue(ffs->gadget->ep0, req, GFP_ATOMIC);
414 if (unlikely(ret < 0))
415 return ret;
416
417 ret = wait_for_completion_interruptible(&ffs->ep0req_completion);
418 if (unlikely(ret)) {
419 usb_ep_dequeue(ffs->gadget->ep0, req);
420 return -EINTR;
421 }
422
423 ffs->setup_state = FFS_NO_SETUP;
424 return ffs->ep0req_status;
425}
426
427static int __ffs_ep0_stall(struct ffs_data *ffs)
428{
429 if (ffs->ev.can_stall) {
Michal Nazarewiczaa02f172010-11-17 17:09:47 +0100430 pr_vdebug("ep0 stall\n");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200431 usb_ep_set_halt(ffs->gadget->ep0);
432 ffs->setup_state = FFS_NO_SETUP;
433 return -EL2HLT;
434 } else {
Michal Nazarewiczaa02f172010-11-17 17:09:47 +0100435 pr_debug("bogus ep0 stall!\n");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200436 return -ESRCH;
437 }
438}
439
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200440static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
441 size_t len, loff_t *ptr)
442{
443 struct ffs_data *ffs = file->private_data;
444 ssize_t ret;
445 char *data;
446
447 ENTER();
448
449 /* Fast check if setup was canceled */
450 if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED)
451 return -EIDRM;
452
453 /* Acquire mutex */
454 ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
455 if (unlikely(ret < 0))
456 return ret;
457
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200458 /* Check state */
459 switch (ffs->state) {
460 case FFS_READ_DESCRIPTORS:
461 case FFS_READ_STRINGS:
462 /* Copy data */
463 if (unlikely(len < 16)) {
464 ret = -EINVAL;
465 break;
466 }
467
468 data = ffs_prepare_buffer(buf, len);
Tobias Klauser537baab2010-12-09 15:52:39 +0100469 if (IS_ERR(data)) {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200470 ret = PTR_ERR(data);
471 break;
472 }
473
474 /* Handle data */
475 if (ffs->state == FFS_READ_DESCRIPTORS) {
Michal Nazarewiczaa02f172010-11-17 17:09:47 +0100476 pr_info("read descriptors\n");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200477 ret = __ffs_data_got_descs(ffs, data, len);
478 if (unlikely(ret < 0))
479 break;
480
481 ffs->state = FFS_READ_STRINGS;
482 ret = len;
483 } else {
Michal Nazarewiczaa02f172010-11-17 17:09:47 +0100484 pr_info("read strings\n");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200485 ret = __ffs_data_got_strings(ffs, data, len);
486 if (unlikely(ret < 0))
487 break;
488
489 ret = ffs_epfiles_create(ffs);
490 if (unlikely(ret)) {
491 ffs->state = FFS_CLOSING;
492 break;
493 }
494
495 ffs->state = FFS_ACTIVE;
496 mutex_unlock(&ffs->mutex);
497
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +0100498 ret = ffs_ready(ffs);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200499 if (unlikely(ret < 0)) {
500 ffs->state = FFS_CLOSING;
501 return ret;
502 }
503
504 set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags);
505 return len;
506 }
507 break;
508
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200509 case FFS_ACTIVE:
510 data = NULL;
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100511 /*
512 * We're called from user space, we can use _irq
513 * rather then _irqsave
514 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200515 spin_lock_irq(&ffs->ev.waitq.lock);
516 switch (FFS_SETUP_STATE(ffs)) {
517 case FFS_SETUP_CANCELED:
518 ret = -EIDRM;
519 goto done_spin;
520
521 case FFS_NO_SETUP:
522 ret = -ESRCH;
523 goto done_spin;
524
525 case FFS_SETUP_PENDING:
526 break;
527 }
528
529 /* FFS_SETUP_PENDING */
530 if (!(ffs->ev.setup.bRequestType & USB_DIR_IN)) {
531 spin_unlock_irq(&ffs->ev.waitq.lock);
532 ret = __ffs_ep0_stall(ffs);
533 break;
534 }
535
536 /* FFS_SETUP_PENDING and not stall */
537 len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
538
539 spin_unlock_irq(&ffs->ev.waitq.lock);
540
541 data = ffs_prepare_buffer(buf, len);
Tobias Klauser537baab2010-12-09 15:52:39 +0100542 if (IS_ERR(data)) {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200543 ret = PTR_ERR(data);
544 break;
545 }
546
547 spin_lock_irq(&ffs->ev.waitq.lock);
548
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100549 /*
550 * We are guaranteed to be still in FFS_ACTIVE state
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200551 * but the state of setup could have changed from
552 * FFS_SETUP_PENDING to FFS_SETUP_CANCELED so we need
553 * to check for that. If that happened we copied data
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100554 * from user space in vain but it's unlikely.
555 *
556 * For sure we are not in FFS_NO_SETUP since this is
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200557 * the only place FFS_SETUP_PENDING -> FFS_NO_SETUP
558 * transition can be performed and it's protected by
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100559 * mutex.
560 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200561 if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED) {
562 ret = -EIDRM;
563done_spin:
564 spin_unlock_irq(&ffs->ev.waitq.lock);
565 } else {
566 /* unlocks spinlock */
567 ret = __ffs_ep0_queue_wait(ffs, data, len);
568 }
569 kfree(data);
570 break;
571
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200572 default:
573 ret = -EBADFD;
574 break;
575 }
576
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200577 mutex_unlock(&ffs->mutex);
578 return ret;
579}
580
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200581static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf,
582 size_t n)
583{
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100584 /*
585 * We are holding ffs->ev.waitq.lock and ffs->mutex and we need
586 * to release them.
587 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200588 struct usb_functionfs_event events[n];
589 unsigned i = 0;
590
591 memset(events, 0, sizeof events);
592
593 do {
594 events[i].type = ffs->ev.types[i];
595 if (events[i].type == FUNCTIONFS_SETUP) {
596 events[i].u.setup = ffs->ev.setup;
597 ffs->setup_state = FFS_SETUP_PENDING;
598 }
599 } while (++i < n);
600
601 if (n < ffs->ev.count) {
602 ffs->ev.count -= n;
603 memmove(ffs->ev.types, ffs->ev.types + n,
604 ffs->ev.count * sizeof *ffs->ev.types);
605 } else {
606 ffs->ev.count = 0;
607 }
608
609 spin_unlock_irq(&ffs->ev.waitq.lock);
610 mutex_unlock(&ffs->mutex);
611
612 return unlikely(__copy_to_user(buf, events, sizeof events))
613 ? -EFAULT : sizeof events;
614}
615
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200616static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
617 size_t len, loff_t *ptr)
618{
619 struct ffs_data *ffs = file->private_data;
620 char *data = NULL;
621 size_t n;
622 int ret;
623
624 ENTER();
625
626 /* Fast check if setup was canceled */
627 if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED)
628 return -EIDRM;
629
630 /* Acquire mutex */
631 ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
632 if (unlikely(ret < 0))
633 return ret;
634
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200635 /* Check state */
636 if (ffs->state != FFS_ACTIVE) {
637 ret = -EBADFD;
638 goto done_mutex;
639 }
640
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100641 /*
642 * We're called from user space, we can use _irq rather then
643 * _irqsave
644 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200645 spin_lock_irq(&ffs->ev.waitq.lock);
646
647 switch (FFS_SETUP_STATE(ffs)) {
648 case FFS_SETUP_CANCELED:
649 ret = -EIDRM;
650 break;
651
652 case FFS_NO_SETUP:
653 n = len / sizeof(struct usb_functionfs_event);
654 if (unlikely(!n)) {
655 ret = -EINVAL;
656 break;
657 }
658
659 if ((file->f_flags & O_NONBLOCK) && !ffs->ev.count) {
660 ret = -EAGAIN;
661 break;
662 }
663
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100664 if (wait_event_interruptible_exclusive_locked_irq(ffs->ev.waitq,
665 ffs->ev.count)) {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200666 ret = -EINTR;
667 break;
668 }
669
670 return __ffs_ep0_read_events(ffs, buf,
671 min(n, (size_t)ffs->ev.count));
672
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200673 case FFS_SETUP_PENDING:
674 if (ffs->ev.setup.bRequestType & USB_DIR_IN) {
675 spin_unlock_irq(&ffs->ev.waitq.lock);
676 ret = __ffs_ep0_stall(ffs);
677 goto done_mutex;
678 }
679
680 len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
681
682 spin_unlock_irq(&ffs->ev.waitq.lock);
683
684 if (likely(len)) {
685 data = kmalloc(len, GFP_KERNEL);
686 if (unlikely(!data)) {
687 ret = -ENOMEM;
688 goto done_mutex;
689 }
690 }
691
692 spin_lock_irq(&ffs->ev.waitq.lock);
693
694 /* See ffs_ep0_write() */
695 if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED) {
696 ret = -EIDRM;
697 break;
698 }
699
700 /* unlocks spinlock */
701 ret = __ffs_ep0_queue_wait(ffs, data, len);
702 if (likely(ret > 0) && unlikely(__copy_to_user(buf, data, len)))
703 ret = -EFAULT;
704 goto done_mutex;
705
706 default:
707 ret = -EBADFD;
708 break;
709 }
710
711 spin_unlock_irq(&ffs->ev.waitq.lock);
712done_mutex:
713 mutex_unlock(&ffs->mutex);
714 kfree(data);
715 return ret;
716}
717
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200718static int ffs_ep0_open(struct inode *inode, struct file *file)
719{
720 struct ffs_data *ffs = inode->i_private;
721
722 ENTER();
723
724 if (unlikely(ffs->state == FFS_CLOSING))
725 return -EBUSY;
726
727 file->private_data = ffs;
728 ffs_data_opened(ffs);
729
730 return 0;
731}
732
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200733static int ffs_ep0_release(struct inode *inode, struct file *file)
734{
735 struct ffs_data *ffs = file->private_data;
736
737 ENTER();
738
739 ffs_data_closed(ffs);
740
741 return 0;
742}
743
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200744static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
745{
746 struct ffs_data *ffs = file->private_data;
747 struct usb_gadget *gadget = ffs->gadget;
748 long ret;
749
750 ENTER();
751
752 if (code == FUNCTIONFS_INTERFACE_REVMAP) {
753 struct ffs_function *func = ffs->func;
754 ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV;
Andrzej Pietrasiewicz92b0abf2012-03-28 09:30:50 +0200755 } else if (gadget && gadget->ops->ioctl) {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200756 ret = gadget->ops->ioctl(gadget, code, value);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200757 } else {
758 ret = -ENOTTY;
759 }
760
761 return ret;
762}
763
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200764static const struct file_operations ffs_ep0_operations = {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200765 .llseek = no_llseek,
766
767 .open = ffs_ep0_open,
768 .write = ffs_ep0_write,
769 .read = ffs_ep0_read,
770 .release = ffs_ep0_release,
771 .unlocked_ioctl = ffs_ep0_ioctl,
772};
773
774
775/* "Normal" endpoints operations ********************************************/
776
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200777static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req)
778{
779 ENTER();
780 if (likely(req->context)) {
781 struct ffs_ep *ep = _ep->driver_data;
782 ep->status = req->status ? req->status : req->actual;
783 complete(req->context);
784 }
785}
786
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200787static ssize_t ffs_epfile_io(struct file *file,
788 char __user *buf, size_t len, int read)
789{
790 struct ffs_epfile *epfile = file->private_data;
Michal Nazarewicz219580e2013-12-09 15:55:37 -0800791 struct usb_gadget *gadget = epfile->ffs->gadget;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200792 struct ffs_ep *ep;
793 char *data = NULL;
Michal Nazarewicz219580e2013-12-09 15:55:37 -0800794 ssize_t ret, data_len;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200795 int halt;
796
Michal Nazarewicz7fa68032013-12-09 15:55:36 -0800797 /* Are we still active? */
798 if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) {
799 ret = -ENODEV;
800 goto error;
801 }
802
803 /* Wait for endpoint to be enabled */
804 ep = epfile->ep;
805 if (!ep) {
806 if (file->f_flags & O_NONBLOCK) {
807 ret = -EAGAIN;
808 goto error;
809 }
810
811 ret = wait_event_interruptible(epfile->wait, (ep = epfile->ep));
812 if (ret) {
813 ret = -EINTR;
814 goto error;
815 }
816 }
817
818 /* Do we halt? */
819 halt = !read == !epfile->in;
820 if (halt && epfile->isoc) {
821 ret = -EINVAL;
822 goto error;
823 }
824
825 /* Allocate & copy */
826 if (!halt) {
Michal Nazarewicz219580e2013-12-09 15:55:37 -0800827 /*
828 * Controller may require buffer size to be aligned to
829 * maxpacketsize of an out endpoint.
830 */
831 data_len = read ? usb_ep_align_maybe(gadget, ep->ep, len) : len;
832
833 data = kmalloc(data_len, GFP_KERNEL);
Michal Nazarewicz7fa68032013-12-09 15:55:36 -0800834 if (unlikely(!data))
835 return -ENOMEM;
836
837 if (!read && unlikely(copy_from_user(data, buf, len))) {
838 ret = -EFAULT;
839 goto error;
840 }
841 }
842
843 /* We will be using request */
844 ret = ffs_mutex_lock(&epfile->mutex, file->f_flags & O_NONBLOCK);
845 if (unlikely(ret))
846 goto error;
847
848 spin_lock_irq(&epfile->ffs->eps_lock);
849
850 if (epfile->ep != ep) {
851 /* In the meantime, endpoint got disabled or changed. */
852 ret = -ESHUTDOWN;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200853 spin_unlock_irq(&epfile->ffs->eps_lock);
Michal Nazarewicz7fa68032013-12-09 15:55:36 -0800854 } else if (halt) {
855 /* Halt */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200856 if (likely(epfile->ep == ep) && !WARN_ON(!ep->ep))
857 usb_ep_set_halt(ep->ep);
858 spin_unlock_irq(&epfile->ffs->eps_lock);
859 ret = -EBADMSG;
860 } else {
861 /* Fire the request */
862 DECLARE_COMPLETION_ONSTACK(done);
863
864 struct usb_request *req = ep->req;
865 req->context = &done;
866 req->complete = ffs_epfile_io_complete;
867 req->buf = data;
Michal Nazarewicz219580e2013-12-09 15:55:37 -0800868 req->length = data_len;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200869
870 ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
871
872 spin_unlock_irq(&epfile->ffs->eps_lock);
873
874 if (unlikely(ret < 0)) {
875 /* nop */
876 } else if (unlikely(wait_for_completion_interruptible(&done))) {
877 ret = -EINTR;
878 usb_ep_dequeue(ep->ep, req);
879 } else {
Michal Nazarewicz219580e2013-12-09 15:55:37 -0800880 /*
881 * XXX We may end up silently droping data here.
882 * Since data_len (i.e. req->length) may be bigger
883 * than len (after being rounded up to maxpacketsize),
884 * we may end up with more data then user space has
885 * space for.
886 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200887 ret = ep->status;
888 if (read && ret > 0 &&
Michal Nazarewicz219580e2013-12-09 15:55:37 -0800889 unlikely(copy_to_user(buf, data,
890 min_t(size_t, ret, len))))
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200891 ret = -EFAULT;
892 }
893 }
894
895 mutex_unlock(&epfile->mutex);
896error:
897 kfree(data);
898 return ret;
899}
900
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200901static ssize_t
902ffs_epfile_write(struct file *file, const char __user *buf, size_t len,
903 loff_t *ptr)
904{
905 ENTER();
906
907 return ffs_epfile_io(file, (char __user *)buf, len, 0);
908}
909
910static ssize_t
911ffs_epfile_read(struct file *file, char __user *buf, size_t len, loff_t *ptr)
912{
913 ENTER();
914
915 return ffs_epfile_io(file, buf, len, 1);
916}
917
918static int
919ffs_epfile_open(struct inode *inode, struct file *file)
920{
921 struct ffs_epfile *epfile = inode->i_private;
922
923 ENTER();
924
925 if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
926 return -ENODEV;
927
928 file->private_data = epfile;
929 ffs_data_opened(epfile->ffs);
930
931 return 0;
932}
933
934static int
935ffs_epfile_release(struct inode *inode, struct file *file)
936{
937 struct ffs_epfile *epfile = inode->i_private;
938
939 ENTER();
940
941 ffs_data_closed(epfile->ffs);
942
943 return 0;
944}
945
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200946static long ffs_epfile_ioctl(struct file *file, unsigned code,
947 unsigned long value)
948{
949 struct ffs_epfile *epfile = file->private_data;
950 int ret;
951
952 ENTER();
953
954 if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
955 return -ENODEV;
956
957 spin_lock_irq(&epfile->ffs->eps_lock);
958 if (likely(epfile->ep)) {
959 switch (code) {
960 case FUNCTIONFS_FIFO_STATUS:
961 ret = usb_ep_fifo_status(epfile->ep->ep);
962 break;
963 case FUNCTIONFS_FIFO_FLUSH:
964 usb_ep_fifo_flush(epfile->ep->ep);
965 ret = 0;
966 break;
967 case FUNCTIONFS_CLEAR_HALT:
968 ret = usb_ep_clear_halt(epfile->ep->ep);
969 break;
970 case FUNCTIONFS_ENDPOINT_REVMAP:
971 ret = epfile->ep->num;
972 break;
973 default:
974 ret = -ENOTTY;
975 }
976 } else {
977 ret = -ENODEV;
978 }
979 spin_unlock_irq(&epfile->ffs->eps_lock);
980
981 return ret;
982}
983
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200984static const struct file_operations ffs_epfile_operations = {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200985 .llseek = no_llseek,
986
987 .open = ffs_epfile_open,
988 .write = ffs_epfile_write,
989 .read = ffs_epfile_read,
990 .release = ffs_epfile_release,
991 .unlocked_ioctl = ffs_epfile_ioctl,
992};
993
994
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200995/* File system and super block operations ***********************************/
996
997/*
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +0100998 * Mounting the file system creates a controller file, used first for
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +0200999 * function configuration then later for event monitoring.
1000 */
1001
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001002static struct inode *__must_check
1003ffs_sb_make_inode(struct super_block *sb, void *data,
1004 const struct file_operations *fops,
1005 const struct inode_operations *iops,
1006 struct ffs_file_perms *perms)
1007{
1008 struct inode *inode;
1009
1010 ENTER();
1011
1012 inode = new_inode(sb);
1013
1014 if (likely(inode)) {
1015 struct timespec current_time = CURRENT_TIME;
1016
Al Viro12ba8d12010-10-27 04:19:36 +01001017 inode->i_ino = get_next_ino();
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001018 inode->i_mode = perms->mode;
1019 inode->i_uid = perms->uid;
1020 inode->i_gid = perms->gid;
1021 inode->i_atime = current_time;
1022 inode->i_mtime = current_time;
1023 inode->i_ctime = current_time;
1024 inode->i_private = data;
1025 if (fops)
1026 inode->i_fop = fops;
1027 if (iops)
1028 inode->i_op = iops;
1029 }
1030
1031 return inode;
1032}
1033
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001034/* Create "regular" file */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001035static struct inode *ffs_sb_create_file(struct super_block *sb,
1036 const char *name, void *data,
1037 const struct file_operations *fops,
1038 struct dentry **dentry_p)
1039{
1040 struct ffs_data *ffs = sb->s_fs_info;
1041 struct dentry *dentry;
1042 struct inode *inode;
1043
1044 ENTER();
1045
1046 dentry = d_alloc_name(sb->s_root, name);
1047 if (unlikely(!dentry))
1048 return NULL;
1049
1050 inode = ffs_sb_make_inode(sb, data, fops, NULL, &ffs->file_perms);
1051 if (unlikely(!inode)) {
1052 dput(dentry);
1053 return NULL;
1054 }
1055
1056 d_add(dentry, inode);
1057 if (dentry_p)
1058 *dentry_p = dentry;
1059
1060 return inode;
1061}
1062
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001063/* Super block */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001064static const struct super_operations ffs_sb_operations = {
1065 .statfs = simple_statfs,
1066 .drop_inode = generic_delete_inode,
1067};
1068
1069struct ffs_sb_fill_data {
1070 struct ffs_file_perms perms;
1071 umode_t root_mode;
1072 const char *dev_name;
Al Viro2606b282013-09-20 17:14:21 +01001073 struct ffs_data *ffs_data;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001074};
1075
1076static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
1077{
1078 struct ffs_sb_fill_data *data = _data;
1079 struct inode *inode;
Al Viro2606b282013-09-20 17:14:21 +01001080 struct ffs_data *ffs = data->ffs_data;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001081
1082 ENTER();
1083
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001084 ffs->sb = sb;
Al Viro2606b282013-09-20 17:14:21 +01001085 data->ffs_data = NULL;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001086 sb->s_fs_info = ffs;
1087 sb->s_blocksize = PAGE_CACHE_SIZE;
1088 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
1089 sb->s_magic = FUNCTIONFS_MAGIC;
1090 sb->s_op = &ffs_sb_operations;
1091 sb->s_time_gran = 1;
1092
1093 /* Root inode */
1094 data->perms.mode = data->root_mode;
1095 inode = ffs_sb_make_inode(sb, NULL,
1096 &simple_dir_operations,
1097 &simple_dir_inode_operations,
1098 &data->perms);
Al Viro48fde702012-01-08 22:15:13 -05001099 sb->s_root = d_make_root(inode);
1100 if (unlikely(!sb->s_root))
Al Viro2606b282013-09-20 17:14:21 +01001101 return -ENOMEM;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001102
1103 /* EP0 file */
1104 if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs,
1105 &ffs_ep0_operations, NULL)))
Al Viro2606b282013-09-20 17:14:21 +01001106 return -ENOMEM;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001107
1108 return 0;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001109}
1110
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001111static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
1112{
1113 ENTER();
1114
1115 if (!opts || !*opts)
1116 return 0;
1117
1118 for (;;) {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001119 unsigned long value;
Michal Nazarewiczafd2e182013-01-09 10:17:47 +01001120 char *eq, *comma;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001121
1122 /* Option limit */
1123 comma = strchr(opts, ',');
1124 if (comma)
1125 *comma = 0;
1126
1127 /* Value limit */
1128 eq = strchr(opts, '=');
1129 if (unlikely(!eq)) {
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001130 pr_err("'=' missing in %s\n", opts);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001131 return -EINVAL;
1132 }
1133 *eq = 0;
1134
1135 /* Parse value */
Michal Nazarewiczafd2e182013-01-09 10:17:47 +01001136 if (kstrtoul(eq + 1, 0, &value)) {
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001137 pr_err("%s: invalid value: %s\n", opts, eq + 1);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001138 return -EINVAL;
1139 }
1140
1141 /* Interpret option */
1142 switch (eq - opts) {
1143 case 5:
1144 if (!memcmp(opts, "rmode", 5))
1145 data->root_mode = (value & 0555) | S_IFDIR;
1146 else if (!memcmp(opts, "fmode", 5))
1147 data->perms.mode = (value & 0666) | S_IFREG;
1148 else
1149 goto invalid;
1150 break;
1151
1152 case 4:
1153 if (!memcmp(opts, "mode", 4)) {
1154 data->root_mode = (value & 0555) | S_IFDIR;
1155 data->perms.mode = (value & 0666) | S_IFREG;
1156 } else {
1157 goto invalid;
1158 }
1159 break;
1160
1161 case 3:
Eric W. Biedermanb9b73f72012-06-14 01:19:23 -07001162 if (!memcmp(opts, "uid", 3)) {
1163 data->perms.uid = make_kuid(current_user_ns(), value);
1164 if (!uid_valid(data->perms.uid)) {
1165 pr_err("%s: unmapped value: %lu\n", opts, value);
1166 return -EINVAL;
1167 }
Benoit Gobyb8100752013-01-08 19:57:09 -08001168 } else if (!memcmp(opts, "gid", 3)) {
Eric W. Biedermanb9b73f72012-06-14 01:19:23 -07001169 data->perms.gid = make_kgid(current_user_ns(), value);
1170 if (!gid_valid(data->perms.gid)) {
1171 pr_err("%s: unmapped value: %lu\n", opts, value);
1172 return -EINVAL;
1173 }
Benoit Gobyb8100752013-01-08 19:57:09 -08001174 } else {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001175 goto invalid;
Benoit Gobyb8100752013-01-08 19:57:09 -08001176 }
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001177 break;
1178
1179 default:
1180invalid:
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001181 pr_err("%s: invalid option\n", opts);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001182 return -EINVAL;
1183 }
1184
1185 /* Next iteration */
1186 if (!comma)
1187 break;
1188 opts = comma + 1;
1189 }
1190
1191 return 0;
1192}
1193
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001194/* "mount -t functionfs dev_name /dev/function" ends up here */
1195
Al Virofc14f2f2010-07-25 01:48:30 +04001196static struct dentry *
1197ffs_fs_mount(struct file_system_type *t, int flags,
1198 const char *dev_name, void *opts)
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001199{
1200 struct ffs_sb_fill_data data = {
1201 .perms = {
1202 .mode = S_IFREG | 0600,
Eric W. Biedermanb9b73f72012-06-14 01:19:23 -07001203 .uid = GLOBAL_ROOT_UID,
1204 .gid = GLOBAL_ROOT_GID,
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001205 },
1206 .root_mode = S_IFDIR | 0500,
1207 };
Andrzej Pietrasiewicz581791f2012-05-14 15:51:52 +02001208 struct dentry *rv;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001209 int ret;
Andrzej Pietrasiewicz581791f2012-05-14 15:51:52 +02001210 void *ffs_dev;
Al Viro2606b282013-09-20 17:14:21 +01001211 struct ffs_data *ffs;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001212
1213 ENTER();
1214
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001215 ret = ffs_fs_parse_opts(&data, opts);
1216 if (unlikely(ret < 0))
Al Virofc14f2f2010-07-25 01:48:30 +04001217 return ERR_PTR(ret);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001218
Al Viro2606b282013-09-20 17:14:21 +01001219 ffs = ffs_data_new();
1220 if (unlikely(!ffs))
1221 return ERR_PTR(-ENOMEM);
1222 ffs->file_perms = data.perms;
1223
1224 ffs->dev_name = kstrdup(dev_name, GFP_KERNEL);
1225 if (unlikely(!ffs->dev_name)) {
1226 ffs_data_put(ffs);
1227 return ERR_PTR(-ENOMEM);
1228 }
1229
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01001230 ffs_dev = ffs_acquire_dev(dev_name);
Al Viro2606b282013-09-20 17:14:21 +01001231 if (IS_ERR(ffs_dev)) {
1232 ffs_data_put(ffs);
1233 return ERR_CAST(ffs_dev);
1234 }
1235 ffs->private_data = ffs_dev;
1236 data.ffs_data = ffs;
Andrzej Pietrasiewicz581791f2012-05-14 15:51:52 +02001237
Andrzej Pietrasiewicz581791f2012-05-14 15:51:52 +02001238 rv = mount_nodev(t, flags, &data, ffs_sb_fill);
Al Viro2606b282013-09-20 17:14:21 +01001239 if (IS_ERR(rv) && data.ffs_data) {
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01001240 ffs_release_dev(data.ffs_data);
Al Viro2606b282013-09-20 17:14:21 +01001241 ffs_data_put(data.ffs_data);
1242 }
Andrzej Pietrasiewicz581791f2012-05-14 15:51:52 +02001243 return rv;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001244}
1245
1246static void
1247ffs_fs_kill_sb(struct super_block *sb)
1248{
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001249 ENTER();
1250
1251 kill_litter_super(sb);
Andrzej Pietrasiewicz581791f2012-05-14 15:51:52 +02001252 if (sb->s_fs_info) {
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01001253 ffs_release_dev(sb->s_fs_info);
Al Viro5b5f9562012-01-08 15:38:27 -05001254 ffs_data_put(sb->s_fs_info);
Andrzej Pietrasiewicz581791f2012-05-14 15:51:52 +02001255 }
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001256}
1257
1258static struct file_system_type ffs_fs_type = {
1259 .owner = THIS_MODULE,
1260 .name = "functionfs",
Al Virofc14f2f2010-07-25 01:48:30 +04001261 .mount = ffs_fs_mount,
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001262 .kill_sb = ffs_fs_kill_sb,
1263};
Eric W. Biederman7f78e032013-03-02 19:39:14 -08001264MODULE_ALIAS_FS("functionfs");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001265
1266
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001267/* Driver's main init/cleanup functions *************************************/
1268
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001269static int functionfs_init(void)
1270{
1271 int ret;
1272
1273 ENTER();
1274
1275 ret = register_filesystem(&ffs_fs_type);
1276 if (likely(!ret))
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001277 pr_info("file system registered\n");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001278 else
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001279 pr_err("failed registering file system (%d)\n", ret);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001280
1281 return ret;
1282}
1283
1284static void functionfs_cleanup(void)
1285{
1286 ENTER();
1287
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001288 pr_info("unloading\n");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001289 unregister_filesystem(&ffs_fs_type);
1290}
1291
1292
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001293/* ffs_data and ffs_function construction and destruction code **************/
1294
1295static void ffs_data_clear(struct ffs_data *ffs);
1296static void ffs_data_reset(struct ffs_data *ffs);
1297
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001298static void ffs_data_get(struct ffs_data *ffs)
1299{
1300 ENTER();
1301
1302 atomic_inc(&ffs->ref);
1303}
1304
1305static void ffs_data_opened(struct ffs_data *ffs)
1306{
1307 ENTER();
1308
1309 atomic_inc(&ffs->ref);
1310 atomic_inc(&ffs->opened);
1311}
1312
1313static void ffs_data_put(struct ffs_data *ffs)
1314{
1315 ENTER();
1316
1317 if (unlikely(atomic_dec_and_test(&ffs->ref))) {
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001318 pr_info("%s(): freeing\n", __func__);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001319 ffs_data_clear(ffs);
Andi Kleen647d5582012-03-16 12:01:02 -07001320 BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001321 waitqueue_active(&ffs->ep0req_completion.wait));
Andrzej Pietrasiewicz581791f2012-05-14 15:51:52 +02001322 kfree(ffs->dev_name);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001323 kfree(ffs);
1324 }
1325}
1326
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001327static void ffs_data_closed(struct ffs_data *ffs)
1328{
1329 ENTER();
1330
1331 if (atomic_dec_and_test(&ffs->opened)) {
1332 ffs->state = FFS_CLOSING;
1333 ffs_data_reset(ffs);
1334 }
1335
1336 ffs_data_put(ffs);
1337}
1338
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001339static struct ffs_data *ffs_data_new(void)
1340{
1341 struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL);
1342 if (unlikely(!ffs))
1343 return 0;
1344
1345 ENTER();
1346
1347 atomic_set(&ffs->ref, 1);
1348 atomic_set(&ffs->opened, 0);
1349 ffs->state = FFS_READ_DESCRIPTORS;
1350 mutex_init(&ffs->mutex);
1351 spin_lock_init(&ffs->eps_lock);
1352 init_waitqueue_head(&ffs->ev.waitq);
1353 init_completion(&ffs->ep0req_completion);
1354
1355 /* XXX REVISIT need to update it in some places, or do we? */
1356 ffs->ev.can_stall = 1;
1357
1358 return ffs;
1359}
1360
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001361static void ffs_data_clear(struct ffs_data *ffs)
1362{
1363 ENTER();
1364
1365 if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags))
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01001366 ffs_closed(ffs);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001367
1368 BUG_ON(ffs->gadget);
1369
1370 if (ffs->epfiles)
1371 ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count);
1372
1373 kfree(ffs->raw_descs);
1374 kfree(ffs->raw_strings);
1375 kfree(ffs->stringtabs);
1376}
1377
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001378static void ffs_data_reset(struct ffs_data *ffs)
1379{
1380 ENTER();
1381
1382 ffs_data_clear(ffs);
1383
1384 ffs->epfiles = NULL;
1385 ffs->raw_descs = NULL;
1386 ffs->raw_strings = NULL;
1387 ffs->stringtabs = NULL;
1388
1389 ffs->raw_descs_length = 0;
1390 ffs->raw_fs_descs_length = 0;
1391 ffs->fs_descs_count = 0;
1392 ffs->hs_descs_count = 0;
1393
1394 ffs->strings_count = 0;
1395 ffs->interfaces_count = 0;
1396 ffs->eps_count = 0;
1397
1398 ffs->ev.count = 0;
1399
1400 ffs->state = FFS_READ_DESCRIPTORS;
1401 ffs->setup_state = FFS_NO_SETUP;
1402 ffs->flags = 0;
1403}
1404
1405
1406static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
1407{
Michal Nazarewiczfd7c9a02010-06-16 12:08:00 +02001408 struct usb_gadget_strings **lang;
1409 int first_id;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001410
1411 ENTER();
1412
1413 if (WARN_ON(ffs->state != FFS_ACTIVE
1414 || test_and_set_bit(FFS_FL_BOUND, &ffs->flags)))
1415 return -EBADFD;
1416
Michal Nazarewiczfd7c9a02010-06-16 12:08:00 +02001417 first_id = usb_string_ids_n(cdev, ffs->strings_count);
1418 if (unlikely(first_id < 0))
1419 return first_id;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001420
1421 ffs->ep0req = usb_ep_alloc_request(cdev->gadget->ep0, GFP_KERNEL);
1422 if (unlikely(!ffs->ep0req))
1423 return -ENOMEM;
1424 ffs->ep0req->complete = ffs_ep0_complete;
1425 ffs->ep0req->context = ffs;
1426
Michal Nazarewiczfd7c9a02010-06-16 12:08:00 +02001427 lang = ffs->stringtabs;
1428 for (lang = ffs->stringtabs; *lang; ++lang) {
1429 struct usb_string *str = (*lang)->strings;
1430 int id = first_id;
1431 for (; str->s; ++id, ++str)
1432 str->id = id;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001433 }
1434
1435 ffs->gadget = cdev->gadget;
Michal Nazarewiczfd7c9a02010-06-16 12:08:00 +02001436 ffs_data_get(ffs);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001437 return 0;
1438}
1439
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001440static void functionfs_unbind(struct ffs_data *ffs)
1441{
1442 ENTER();
1443
1444 if (!WARN_ON(!ffs->gadget)) {
1445 usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req);
1446 ffs->ep0req = NULL;
1447 ffs->gadget = NULL;
Andrzej Pietrasiewicze2190a92012-03-12 12:55:41 +01001448 clear_bit(FFS_FL_BOUND, &ffs->flags);
Dan Carpenterdf498992013-08-23 11:16:15 +03001449 ffs_data_put(ffs);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001450 }
1451}
1452
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001453static int ffs_epfiles_create(struct ffs_data *ffs)
1454{
1455 struct ffs_epfile *epfile, *epfiles;
1456 unsigned i, count;
1457
1458 ENTER();
1459
1460 count = ffs->eps_count;
Thomas Meyer9823a522011-11-29 22:08:00 +01001461 epfiles = kcalloc(count, sizeof(*epfiles), GFP_KERNEL);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001462 if (!epfiles)
1463 return -ENOMEM;
1464
1465 epfile = epfiles;
1466 for (i = 1; i <= count; ++i, ++epfile) {
1467 epfile->ffs = ffs;
1468 mutex_init(&epfile->mutex);
1469 init_waitqueue_head(&epfile->wait);
1470 sprintf(epfiles->name, "ep%u", i);
1471 if (!unlikely(ffs_sb_create_file(ffs->sb, epfiles->name, epfile,
1472 &ffs_epfile_operations,
1473 &epfile->dentry))) {
1474 ffs_epfiles_destroy(epfiles, i - 1);
1475 return -ENOMEM;
1476 }
1477 }
1478
1479 ffs->epfiles = epfiles;
1480 return 0;
1481}
1482
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001483static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
1484{
1485 struct ffs_epfile *epfile = epfiles;
1486
1487 ENTER();
1488
1489 for (; count; --count, ++epfile) {
1490 BUG_ON(mutex_is_locked(&epfile->mutex) ||
1491 waitqueue_active(&epfile->wait));
1492 if (epfile->dentry) {
1493 d_delete(epfile->dentry);
1494 dput(epfile->dentry);
1495 epfile->dentry = NULL;
1496 }
1497 }
1498
1499 kfree(epfiles);
1500}
1501
Michal Nazarewicz7898aee2010-06-16 12:07:58 +02001502static int functionfs_bind_config(struct usb_composite_dev *cdev,
1503 struct usb_configuration *c,
1504 struct ffs_data *ffs)
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001505{
1506 struct ffs_function *func;
1507 int ret;
1508
1509 ENTER();
1510
1511 func = kzalloc(sizeof *func, GFP_KERNEL);
1512 if (unlikely(!func))
1513 return -ENOMEM;
1514
1515 func->function.name = "Function FS Gadget";
1516 func->function.strings = ffs->stringtabs;
1517
1518 func->function.bind = ffs_func_bind;
1519 func->function.unbind = ffs_func_unbind;
1520 func->function.set_alt = ffs_func_set_alt;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001521 func->function.disable = ffs_func_disable;
1522 func->function.setup = ffs_func_setup;
1523 func->function.suspend = ffs_func_suspend;
1524 func->function.resume = ffs_func_resume;
1525
1526 func->conf = c;
1527 func->gadget = cdev->gadget;
1528 func->ffs = ffs;
1529 ffs_data_get(ffs);
1530
1531 ret = usb_add_function(c, &func->function);
1532 if (unlikely(ret))
1533 ffs_func_free(func);
1534
1535 return ret;
1536}
1537
1538static void ffs_func_free(struct ffs_function *func)
1539{
Peter Korsgaard4f065392012-05-03 12:58:49 +02001540 struct ffs_ep *ep = func->eps;
1541 unsigned count = func->ffs->eps_count;
1542 unsigned long flags;
1543
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001544 ENTER();
1545
Peter Korsgaard4f065392012-05-03 12:58:49 +02001546 /* cleanup after autoconfig */
1547 spin_lock_irqsave(&func->ffs->eps_lock, flags);
1548 do {
1549 if (ep->ep && ep->req)
1550 usb_ep_free_request(ep->ep, ep->req);
1551 ep->req = NULL;
1552 ++ep;
1553 } while (--count);
1554 spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
1555
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001556 ffs_data_put(func->ffs);
1557
1558 kfree(func->eps);
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01001559 /*
1560 * eps and interfaces_nums are allocated in the same chunk so
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001561 * only one free is required. Descriptors are also allocated
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01001562 * in the same chunk.
1563 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001564
1565 kfree(func);
1566}
1567
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001568static void ffs_func_eps_disable(struct ffs_function *func)
1569{
1570 struct ffs_ep *ep = func->eps;
1571 struct ffs_epfile *epfile = func->ffs->epfiles;
1572 unsigned count = func->ffs->eps_count;
1573 unsigned long flags;
1574
1575 spin_lock_irqsave(&func->ffs->eps_lock, flags);
1576 do {
1577 /* pending requests get nuked */
1578 if (likely(ep->ep))
1579 usb_ep_disable(ep->ep);
1580 epfile->ep = NULL;
1581
1582 ++ep;
1583 ++epfile;
1584 } while (--count);
1585 spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
1586}
1587
1588static int ffs_func_eps_enable(struct ffs_function *func)
1589{
1590 struct ffs_data *ffs = func->ffs;
1591 struct ffs_ep *ep = func->eps;
1592 struct ffs_epfile *epfile = ffs->epfiles;
1593 unsigned count = ffs->eps_count;
1594 unsigned long flags;
1595 int ret = 0;
1596
1597 spin_lock_irqsave(&func->ffs->eps_lock, flags);
1598 do {
1599 struct usb_endpoint_descriptor *ds;
1600 ds = ep->descs[ep->descs[1] ? 1 : 0];
1601
1602 ep->ep->driver_data = ep;
Tatyana Brokhman72c973d2011-06-28 16:33:48 +03001603 ep->ep->desc = ds;
1604 ret = usb_ep_enable(ep->ep);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001605 if (likely(!ret)) {
1606 epfile->ep = ep;
1607 epfile->in = usb_endpoint_dir_in(ds);
1608 epfile->isoc = usb_endpoint_xfer_isoc(ds);
1609 } else {
1610 break;
1611 }
1612
1613 wake_up(&epfile->wait);
1614
1615 ++ep;
1616 ++epfile;
1617 } while (--count);
1618 spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
1619
1620 return ret;
1621}
1622
1623
1624/* Parsing and building descriptors and strings *****************************/
1625
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01001626/*
1627 * This validates if data pointed by data is a valid USB descriptor as
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001628 * well as record how many interfaces, endpoints and strings are
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01001629 * required by given configuration. Returns address after the
1630 * descriptor or NULL if data is invalid.
1631 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001632
1633enum ffs_entity_type {
1634 FFS_DESCRIPTOR, FFS_INTERFACE, FFS_STRING, FFS_ENDPOINT
1635};
1636
1637typedef int (*ffs_entity_callback)(enum ffs_entity_type entity,
1638 u8 *valuep,
1639 struct usb_descriptor_header *desc,
1640 void *priv);
1641
1642static int __must_check ffs_do_desc(char *data, unsigned len,
1643 ffs_entity_callback entity, void *priv)
1644{
1645 struct usb_descriptor_header *_ds = (void *)data;
1646 u8 length;
1647 int ret;
1648
1649 ENTER();
1650
1651 /* At least two bytes are required: length and type */
1652 if (len < 2) {
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001653 pr_vdebug("descriptor too short\n");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001654 return -EINVAL;
1655 }
1656
1657 /* If we have at least as many bytes as the descriptor takes? */
1658 length = _ds->bLength;
1659 if (len < length) {
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001660 pr_vdebug("descriptor longer then available data\n");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001661 return -EINVAL;
1662 }
1663
1664#define __entity_check_INTERFACE(val) 1
1665#define __entity_check_STRING(val) (val)
1666#define __entity_check_ENDPOINT(val) ((val) & USB_ENDPOINT_NUMBER_MASK)
1667#define __entity(type, val) do { \
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001668 pr_vdebug("entity " #type "(%02x)\n", (val)); \
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001669 if (unlikely(!__entity_check_ ##type(val))) { \
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001670 pr_vdebug("invalid entity's value\n"); \
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001671 return -EINVAL; \
1672 } \
1673 ret = entity(FFS_ ##type, &val, _ds, priv); \
1674 if (unlikely(ret < 0)) { \
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001675 pr_debug("entity " #type "(%02x); ret = %d\n", \
Michal Nazarewiczd8df0b62010-11-12 14:29:29 +01001676 (val), ret); \
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001677 return ret; \
1678 } \
1679 } while (0)
1680
1681 /* Parse descriptor depending on type. */
1682 switch (_ds->bDescriptorType) {
1683 case USB_DT_DEVICE:
1684 case USB_DT_CONFIG:
1685 case USB_DT_STRING:
1686 case USB_DT_DEVICE_QUALIFIER:
1687 /* function can't have any of those */
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001688 pr_vdebug("descriptor reserved for gadget: %d\n",
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01001689 _ds->bDescriptorType);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001690 return -EINVAL;
1691
1692 case USB_DT_INTERFACE: {
1693 struct usb_interface_descriptor *ds = (void *)_ds;
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001694 pr_vdebug("interface descriptor\n");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001695 if (length != sizeof *ds)
1696 goto inv_length;
1697
1698 __entity(INTERFACE, ds->bInterfaceNumber);
1699 if (ds->iInterface)
1700 __entity(STRING, ds->iInterface);
1701 }
1702 break;
1703
1704 case USB_DT_ENDPOINT: {
1705 struct usb_endpoint_descriptor *ds = (void *)_ds;
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001706 pr_vdebug("endpoint descriptor\n");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001707 if (length != USB_DT_ENDPOINT_SIZE &&
1708 length != USB_DT_ENDPOINT_AUDIO_SIZE)
1709 goto inv_length;
1710 __entity(ENDPOINT, ds->bEndpointAddress);
1711 }
1712 break;
1713
Koen Beel560f1182012-05-30 20:43:37 +02001714 case HID_DT_HID:
1715 pr_vdebug("hid descriptor\n");
1716 if (length != sizeof(struct hid_descriptor))
1717 goto inv_length;
1718 break;
1719
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001720 case USB_DT_OTG:
1721 if (length != sizeof(struct usb_otg_descriptor))
1722 goto inv_length;
1723 break;
1724
1725 case USB_DT_INTERFACE_ASSOCIATION: {
1726 struct usb_interface_assoc_descriptor *ds = (void *)_ds;
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001727 pr_vdebug("interface association descriptor\n");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001728 if (length != sizeof *ds)
1729 goto inv_length;
1730 if (ds->iFunction)
1731 __entity(STRING, ds->iFunction);
1732 }
1733 break;
1734
1735 case USB_DT_OTHER_SPEED_CONFIG:
1736 case USB_DT_INTERFACE_POWER:
1737 case USB_DT_DEBUG:
1738 case USB_DT_SECURITY:
1739 case USB_DT_CS_RADIO_CONTROL:
1740 /* TODO */
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001741 pr_vdebug("unimplemented descriptor: %d\n", _ds->bDescriptorType);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001742 return -EINVAL;
1743
1744 default:
1745 /* We should never be here */
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001746 pr_vdebug("unknown descriptor: %d\n", _ds->bDescriptorType);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001747 return -EINVAL;
1748
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01001749inv_length:
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001750 pr_vdebug("invalid length: %d (descriptor %d)\n",
Michal Nazarewiczd8df0b62010-11-12 14:29:29 +01001751 _ds->bLength, _ds->bDescriptorType);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001752 return -EINVAL;
1753 }
1754
1755#undef __entity
1756#undef __entity_check_DESCRIPTOR
1757#undef __entity_check_INTERFACE
1758#undef __entity_check_STRING
1759#undef __entity_check_ENDPOINT
1760
1761 return length;
1762}
1763
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001764static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
1765 ffs_entity_callback entity, void *priv)
1766{
1767 const unsigned _len = len;
1768 unsigned long num = 0;
1769
1770 ENTER();
1771
1772 for (;;) {
1773 int ret;
1774
1775 if (num == count)
1776 data = NULL;
1777
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01001778 /* Record "descriptor" entity */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001779 ret = entity(FFS_DESCRIPTOR, (u8 *)num, (void *)data, priv);
1780 if (unlikely(ret < 0)) {
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001781 pr_debug("entity DESCRIPTOR(%02lx); ret = %d\n",
Michal Nazarewiczd8df0b62010-11-12 14:29:29 +01001782 num, ret);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001783 return ret;
1784 }
1785
1786 if (!data)
1787 return _len - len;
1788
1789 ret = ffs_do_desc(data, len, entity, priv);
1790 if (unlikely(ret < 0)) {
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01001791 pr_debug("%s returns %d\n", __func__, ret);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001792 return ret;
1793 }
1794
1795 len -= ret;
1796 data += ret;
1797 ++num;
1798 }
1799}
1800
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001801static int __ffs_data_do_entity(enum ffs_entity_type type,
1802 u8 *valuep, struct usb_descriptor_header *desc,
1803 void *priv)
1804{
1805 struct ffs_data *ffs = priv;
1806
1807 ENTER();
1808
1809 switch (type) {
1810 case FFS_DESCRIPTOR:
1811 break;
1812
1813 case FFS_INTERFACE:
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01001814 /*
1815 * Interfaces are indexed from zero so if we
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001816 * encountered interface "n" then there are at least
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01001817 * "n+1" interfaces.
1818 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001819 if (*valuep >= ffs->interfaces_count)
1820 ffs->interfaces_count = *valuep + 1;
1821 break;
1822
1823 case FFS_STRING:
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01001824 /*
1825 * Strings are indexed from 1 (0 is magic ;) reserved
1826 * for languages list or some such)
1827 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001828 if (*valuep > ffs->strings_count)
1829 ffs->strings_count = *valuep;
1830 break;
1831
1832 case FFS_ENDPOINT:
1833 /* Endpoints are indexed from 1 as well. */
1834 if ((*valuep & USB_ENDPOINT_NUMBER_MASK) > ffs->eps_count)
1835 ffs->eps_count = (*valuep & USB_ENDPOINT_NUMBER_MASK);
1836 break;
1837 }
1838
1839 return 0;
1840}
1841
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001842static int __ffs_data_got_descs(struct ffs_data *ffs,
1843 char *const _data, size_t len)
1844{
1845 unsigned fs_count, hs_count;
1846 int fs_len, ret = -EINVAL;
1847 char *data = _data;
1848
1849 ENTER();
1850
1851 if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_DESCRIPTORS_MAGIC ||
1852 get_unaligned_le32(data + 4) != len))
1853 goto error;
1854 fs_count = get_unaligned_le32(data + 8);
1855 hs_count = get_unaligned_le32(data + 12);
1856
1857 if (!fs_count && !hs_count)
1858 goto einval;
1859
1860 data += 16;
1861 len -= 16;
1862
1863 if (likely(fs_count)) {
1864 fs_len = ffs_do_descs(fs_count, data, len,
1865 __ffs_data_do_entity, ffs);
1866 if (unlikely(fs_len < 0)) {
1867 ret = fs_len;
1868 goto error;
1869 }
1870
1871 data += fs_len;
1872 len -= fs_len;
1873 } else {
1874 fs_len = 0;
1875 }
1876
1877 if (likely(hs_count)) {
1878 ret = ffs_do_descs(hs_count, data, len,
1879 __ffs_data_do_entity, ffs);
1880 if (unlikely(ret < 0))
1881 goto error;
1882 } else {
1883 ret = 0;
1884 }
1885
1886 if (unlikely(len != ret))
1887 goto einval;
1888
1889 ffs->raw_fs_descs_length = fs_len;
1890 ffs->raw_descs_length = fs_len + ret;
1891 ffs->raw_descs = _data;
1892 ffs->fs_descs_count = fs_count;
1893 ffs->hs_descs_count = hs_count;
1894
1895 return 0;
1896
1897einval:
1898 ret = -EINVAL;
1899error:
1900 kfree(_data);
1901 return ret;
1902}
1903
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001904static int __ffs_data_got_strings(struct ffs_data *ffs,
1905 char *const _data, size_t len)
1906{
1907 u32 str_count, needed_count, lang_count;
1908 struct usb_gadget_strings **stringtabs, *t;
1909 struct usb_string *strings, *s;
1910 const char *data = _data;
1911
1912 ENTER();
1913
1914 if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
1915 get_unaligned_le32(data + 4) != len))
1916 goto error;
1917 str_count = get_unaligned_le32(data + 8);
1918 lang_count = get_unaligned_le32(data + 12);
1919
1920 /* if one is zero the other must be zero */
1921 if (unlikely(!str_count != !lang_count))
1922 goto error;
1923
1924 /* Do we have at least as many strings as descriptors need? */
1925 needed_count = ffs->strings_count;
1926 if (unlikely(str_count < needed_count))
1927 goto error;
1928
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01001929 /*
1930 * If we don't need any strings just return and free all
1931 * memory.
1932 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001933 if (!needed_count) {
1934 kfree(_data);
1935 return 0;
1936 }
1937
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01001938 /* Allocate everything in one chunk so there's less maintenance. */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001939 {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001940 unsigned i = 0;
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01001941 vla_group(d);
1942 vla_item(d, struct usb_gadget_strings *, stringtabs,
1943 lang_count + 1);
1944 vla_item(d, struct usb_gadget_strings, stringtab, lang_count);
1945 vla_item(d, struct usb_string, strings,
1946 lang_count*(needed_count+1));
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001947
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01001948 char *vlabuf = kmalloc(vla_group_size(d), GFP_KERNEL);
1949
1950 if (unlikely(!vlabuf)) {
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001951 kfree(_data);
1952 return -ENOMEM;
1953 }
1954
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01001955 /* Initialize the VLA pointers */
1956 stringtabs = vla_ptr(vlabuf, d, stringtabs);
1957 t = vla_ptr(vlabuf, d, stringtab);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001958 i = lang_count;
1959 do {
1960 *stringtabs++ = t++;
1961 } while (--i);
1962 *stringtabs = NULL;
1963
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01001964 /* stringtabs = vlabuf = d_stringtabs for later kfree */
1965 stringtabs = vla_ptr(vlabuf, d, stringtabs);
1966 t = vla_ptr(vlabuf, d, stringtab);
1967 s = vla_ptr(vlabuf, d, strings);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001968 strings = s;
1969 }
1970
1971 /* For each language */
1972 data += 16;
1973 len -= 16;
1974
1975 do { /* lang_count > 0 so we can use do-while */
1976 unsigned needed = needed_count;
1977
1978 if (unlikely(len < 3))
1979 goto error_free;
1980 t->language = get_unaligned_le16(data);
1981 t->strings = s;
1982 ++t;
1983
1984 data += 2;
1985 len -= 2;
1986
1987 /* For each string */
1988 do { /* str_count > 0 so we can use do-while */
1989 size_t length = strnlen(data, len);
1990
1991 if (unlikely(length == len))
1992 goto error_free;
1993
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01001994 /*
1995 * User may provide more strings then we need,
1996 * if that's the case we simply ignore the
1997 * rest
1998 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02001999 if (likely(needed)) {
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002000 /*
2001 * s->id will be set while adding
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002002 * function to configuration so for
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002003 * now just leave garbage here.
2004 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002005 s->s = data;
2006 --needed;
2007 ++s;
2008 }
2009
2010 data += length + 1;
2011 len -= length + 1;
2012 } while (--str_count);
2013
2014 s->id = 0; /* terminator */
2015 s->s = NULL;
2016 ++s;
2017
2018 } while (--lang_count);
2019
2020 /* Some garbage left? */
2021 if (unlikely(len))
2022 goto error_free;
2023
2024 /* Done! */
2025 ffs->stringtabs = stringtabs;
2026 ffs->raw_strings = _data;
2027
2028 return 0;
2029
2030error_free:
2031 kfree(stringtabs);
2032error:
2033 kfree(_data);
2034 return -EINVAL;
2035}
2036
2037
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002038/* Events handling and management *******************************************/
2039
2040static void __ffs_event_add(struct ffs_data *ffs,
2041 enum usb_functionfs_event_type type)
2042{
2043 enum usb_functionfs_event_type rem_type1, rem_type2 = type;
2044 int neg = 0;
2045
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002046 /*
2047 * Abort any unhandled setup
2048 *
2049 * We do not need to worry about some cmpxchg() changing value
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002050 * of ffs->setup_state without holding the lock because when
2051 * state is FFS_SETUP_PENDING cmpxchg() in several places in
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002052 * the source does nothing.
2053 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002054 if (ffs->setup_state == FFS_SETUP_PENDING)
2055 ffs->setup_state = FFS_SETUP_CANCELED;
2056
2057 switch (type) {
2058 case FUNCTIONFS_RESUME:
2059 rem_type2 = FUNCTIONFS_SUSPEND;
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002060 /* FALL THROUGH */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002061 case FUNCTIONFS_SUSPEND:
2062 case FUNCTIONFS_SETUP:
2063 rem_type1 = type;
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002064 /* Discard all similar events */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002065 break;
2066
2067 case FUNCTIONFS_BIND:
2068 case FUNCTIONFS_UNBIND:
2069 case FUNCTIONFS_DISABLE:
2070 case FUNCTIONFS_ENABLE:
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002071 /* Discard everything other then power management. */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002072 rem_type1 = FUNCTIONFS_SUSPEND;
2073 rem_type2 = FUNCTIONFS_RESUME;
2074 neg = 1;
2075 break;
2076
2077 default:
2078 BUG();
2079 }
2080
2081 {
2082 u8 *ev = ffs->ev.types, *out = ev;
2083 unsigned n = ffs->ev.count;
2084 for (; n; --n, ++ev)
2085 if ((*ev == rem_type1 || *ev == rem_type2) == neg)
2086 *out++ = *ev;
2087 else
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01002088 pr_vdebug("purging event %d\n", *ev);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002089 ffs->ev.count = out - ffs->ev.types;
2090 }
2091
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01002092 pr_vdebug("adding event %d\n", type);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002093 ffs->ev.types[ffs->ev.count++] = type;
2094 wake_up_locked(&ffs->ev.waitq);
2095}
2096
2097static void ffs_event_add(struct ffs_data *ffs,
2098 enum usb_functionfs_event_type type)
2099{
2100 unsigned long flags;
2101 spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
2102 __ffs_event_add(ffs, type);
2103 spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
2104}
2105
2106
2107/* Bind/unbind USB function hooks *******************************************/
2108
2109static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
2110 struct usb_descriptor_header *desc,
2111 void *priv)
2112{
2113 struct usb_endpoint_descriptor *ds = (void *)desc;
2114 struct ffs_function *func = priv;
2115 struct ffs_ep *ffs_ep;
2116
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002117 /*
2118 * If hs_descriptors is not NULL then we are reading hs
2119 * descriptors now
2120 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002121 const int isHS = func->function.hs_descriptors != NULL;
2122 unsigned idx;
2123
2124 if (type != FFS_DESCRIPTOR)
2125 return 0;
2126
2127 if (isHS)
2128 func->function.hs_descriptors[(long)valuep] = desc;
2129 else
Sebastian Andrzej Siewior10287ba2012-10-22 22:15:06 +02002130 func->function.fs_descriptors[(long)valuep] = desc;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002131
2132 if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT)
2133 return 0;
2134
2135 idx = (ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) - 1;
2136 ffs_ep = func->eps + idx;
2137
2138 if (unlikely(ffs_ep->descs[isHS])) {
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01002139 pr_vdebug("two %sspeed descriptors for EP %d\n",
Michal Nazarewiczd8df0b62010-11-12 14:29:29 +01002140 isHS ? "high" : "full",
2141 ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002142 return -EINVAL;
2143 }
2144 ffs_ep->descs[isHS] = ds;
2145
2146 ffs_dump_mem(": Original ep desc", ds, ds->bLength);
2147 if (ffs_ep->ep) {
2148 ds->bEndpointAddress = ffs_ep->descs[0]->bEndpointAddress;
2149 if (!ds->wMaxPacketSize)
2150 ds->wMaxPacketSize = ffs_ep->descs[0]->wMaxPacketSize;
2151 } else {
2152 struct usb_request *req;
2153 struct usb_ep *ep;
2154
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01002155 pr_vdebug("autoconfig\n");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002156 ep = usb_ep_autoconfig(func->gadget, ds);
2157 if (unlikely(!ep))
2158 return -ENOTSUPP;
Joe Perchescc7e60562010-11-14 19:04:49 -08002159 ep->driver_data = func->eps + idx;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002160
2161 req = usb_ep_alloc_request(ep, GFP_KERNEL);
2162 if (unlikely(!req))
2163 return -ENOMEM;
2164
2165 ffs_ep->ep = ep;
2166 ffs_ep->req = req;
2167 func->eps_revmap[ds->bEndpointAddress &
2168 USB_ENDPOINT_NUMBER_MASK] = idx + 1;
2169 }
2170 ffs_dump_mem(": Rewritten ep desc", ds, ds->bLength);
2171
2172 return 0;
2173}
2174
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002175static int __ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep,
2176 struct usb_descriptor_header *desc,
2177 void *priv)
2178{
2179 struct ffs_function *func = priv;
2180 unsigned idx;
2181 u8 newValue;
2182
2183 switch (type) {
2184 default:
2185 case FFS_DESCRIPTOR:
2186 /* Handled in previous pass by __ffs_func_bind_do_descs() */
2187 return 0;
2188
2189 case FFS_INTERFACE:
2190 idx = *valuep;
2191 if (func->interfaces_nums[idx] < 0) {
2192 int id = usb_interface_id(func->conf, &func->function);
2193 if (unlikely(id < 0))
2194 return id;
2195 func->interfaces_nums[idx] = id;
2196 }
2197 newValue = func->interfaces_nums[idx];
2198 break;
2199
2200 case FFS_STRING:
2201 /* String' IDs are allocated when fsf_data is bound to cdev */
2202 newValue = func->ffs->stringtabs[0]->strings[*valuep - 1].id;
2203 break;
2204
2205 case FFS_ENDPOINT:
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002206 /*
2207 * USB_DT_ENDPOINT are handled in
2208 * __ffs_func_bind_do_descs().
2209 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002210 if (desc->bDescriptorType == USB_DT_ENDPOINT)
2211 return 0;
2212
2213 idx = (*valuep & USB_ENDPOINT_NUMBER_MASK) - 1;
2214 if (unlikely(!func->eps[idx].ep))
2215 return -EINVAL;
2216
2217 {
2218 struct usb_endpoint_descriptor **descs;
2219 descs = func->eps[idx].descs;
2220 newValue = descs[descs[0] ? 0 : 1]->bEndpointAddress;
2221 }
2222 break;
2223 }
2224
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01002225 pr_vdebug("%02x -> %02x\n", *valuep, newValue);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002226 *valuep = newValue;
2227 return 0;
2228}
2229
2230static int ffs_func_bind(struct usb_configuration *c,
2231 struct usb_function *f)
2232{
2233 struct ffs_function *func = ffs_func_from_usb(f);
2234 struct ffs_data *ffs = func->ffs;
2235
2236 const int full = !!func->ffs->fs_descs_count;
2237 const int high = gadget_is_dualspeed(func->gadget) &&
2238 func->ffs->hs_descs_count;
2239
2240 int ret;
2241
2242 /* Make it a single chunk, less management later on */
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01002243 vla_group(d);
2244 vla_item_with_sz(d, struct ffs_ep, eps, ffs->eps_count);
2245 vla_item_with_sz(d, struct usb_descriptor_header *, fs_descs,
2246 full ? ffs->fs_descs_count + 1 : 0);
2247 vla_item_with_sz(d, struct usb_descriptor_header *, hs_descs,
2248 high ? ffs->hs_descs_count + 1 : 0);
2249 vla_item_with_sz(d, short, inums, ffs->interfaces_count);
2250 vla_item_with_sz(d, char, raw_descs,
2251 high ? ffs->raw_descs_length : ffs->raw_fs_descs_length);
2252 char *vlabuf;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002253
2254 ENTER();
2255
2256 /* Only high speed but not supported by gadget? */
2257 if (unlikely(!(full | high)))
2258 return -ENOTSUPP;
2259
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01002260 /* Allocate a single chunk, less management later on */
2261 vlabuf = kmalloc(vla_group_size(d), GFP_KERNEL);
2262 if (unlikely(!vlabuf))
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002263 return -ENOMEM;
2264
2265 /* Zero */
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01002266 memset(vla_ptr(vlabuf, d, eps), 0, d_eps__sz);
2267 memcpy(vla_ptr(vlabuf, d, raw_descs), ffs->raw_descs + 16,
2268 d_raw_descs__sz);
2269 memset(vla_ptr(vlabuf, d, inums), 0xff, d_inums__sz);
2270 for (ret = ffs->eps_count; ret; --ret) {
2271 struct ffs_ep *ptr;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002272
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01002273 ptr = vla_ptr(vlabuf, d, eps);
2274 ptr[ret].num = -1;
2275 }
2276
2277 /* Save pointers
2278 * d_eps == vlabuf, func->eps used to kfree vlabuf later
2279 */
2280 func->eps = vla_ptr(vlabuf, d, eps);
2281 func->interfaces_nums = vla_ptr(vlabuf, d, inums);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002282
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002283 /*
2284 * Go through all the endpoint descriptors and allocate
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002285 * endpoints first, so that later we can rewrite the endpoint
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002286 * numbers without worrying that it may be described later on.
2287 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002288 if (likely(full)) {
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01002289 func->function.fs_descriptors = vla_ptr(vlabuf, d, fs_descs);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002290 ret = ffs_do_descs(ffs->fs_descs_count,
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01002291 vla_ptr(vlabuf, d, raw_descs),
2292 d_raw_descs__sz,
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002293 __ffs_func_bind_do_descs, func);
2294 if (unlikely(ret < 0))
2295 goto error;
2296 } else {
2297 ret = 0;
2298 }
2299
2300 if (likely(high)) {
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01002301 func->function.hs_descriptors = vla_ptr(vlabuf, d, hs_descs);
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002302 ret = ffs_do_descs(ffs->hs_descs_count,
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01002303 vla_ptr(vlabuf, d, raw_descs) + ret,
2304 d_raw_descs__sz - ret,
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002305 __ffs_func_bind_do_descs, func);
Robert Baldyga88548942013-09-27 12:28:54 +02002306 if (unlikely(ret < 0))
2307 goto error;
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002308 }
2309
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002310 /*
2311 * Now handle interface numbers allocation and interface and
2312 * endpoint numbers rewriting. We can do that in one go
2313 * now.
2314 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002315 ret = ffs_do_descs(ffs->fs_descs_count +
2316 (high ? ffs->hs_descs_count : 0),
Andrzej Pietrasiewicze6f38622013-12-03 15:15:30 +01002317 vla_ptr(vlabuf, d, raw_descs), d_raw_descs__sz,
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002318 __ffs_func_bind_do_nums, func);
2319 if (unlikely(ret < 0))
2320 goto error;
2321
2322 /* And we're done */
2323 ffs_event_add(ffs, FUNCTIONFS_BIND);
2324 return 0;
2325
2326error:
2327 /* XXX Do we need to release all claimed endpoints here? */
2328 return ret;
2329}
2330
2331
2332/* Other USB function hooks *************************************************/
2333
2334static void ffs_func_unbind(struct usb_configuration *c,
2335 struct usb_function *f)
2336{
2337 struct ffs_function *func = ffs_func_from_usb(f);
2338 struct ffs_data *ffs = func->ffs;
2339
2340 ENTER();
2341
2342 if (ffs->func == func) {
2343 ffs_func_eps_disable(func);
2344 ffs->func = NULL;
2345 }
2346
2347 ffs_event_add(ffs, FUNCTIONFS_UNBIND);
2348
2349 ffs_func_free(func);
2350}
2351
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002352static int ffs_func_set_alt(struct usb_function *f,
2353 unsigned interface, unsigned alt)
2354{
2355 struct ffs_function *func = ffs_func_from_usb(f);
2356 struct ffs_data *ffs = func->ffs;
2357 int ret = 0, intf;
2358
2359 if (alt != (unsigned)-1) {
2360 intf = ffs_func_revmap_intf(func, interface);
2361 if (unlikely(intf < 0))
2362 return intf;
2363 }
2364
2365 if (ffs->func)
2366 ffs_func_eps_disable(ffs->func);
2367
2368 if (ffs->state != FFS_ACTIVE)
2369 return -ENODEV;
2370
2371 if (alt == (unsigned)-1) {
2372 ffs->func = NULL;
2373 ffs_event_add(ffs, FUNCTIONFS_DISABLE);
2374 return 0;
2375 }
2376
2377 ffs->func = func;
2378 ret = ffs_func_eps_enable(func);
2379 if (likely(ret >= 0))
2380 ffs_event_add(ffs, FUNCTIONFS_ENABLE);
2381 return ret;
2382}
2383
2384static void ffs_func_disable(struct usb_function *f)
2385{
2386 ffs_func_set_alt(f, 0, (unsigned)-1);
2387}
2388
2389static int ffs_func_setup(struct usb_function *f,
2390 const struct usb_ctrlrequest *creq)
2391{
2392 struct ffs_function *func = ffs_func_from_usb(f);
2393 struct ffs_data *ffs = func->ffs;
2394 unsigned long flags;
2395 int ret;
2396
2397 ENTER();
2398
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01002399 pr_vdebug("creq->bRequestType = %02x\n", creq->bRequestType);
2400 pr_vdebug("creq->bRequest = %02x\n", creq->bRequest);
2401 pr_vdebug("creq->wValue = %04x\n", le16_to_cpu(creq->wValue));
2402 pr_vdebug("creq->wIndex = %04x\n", le16_to_cpu(creq->wIndex));
2403 pr_vdebug("creq->wLength = %04x\n", le16_to_cpu(creq->wLength));
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002404
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002405 /*
2406 * Most requests directed to interface go through here
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002407 * (notable exceptions are set/get interface) so we need to
2408 * handle them. All other either handled by composite or
2409 * passed to usb_configuration->setup() (if one is set). No
2410 * matter, we will handle requests directed to endpoint here
2411 * as well (as it's straightforward) but what to do with any
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002412 * other request?
2413 */
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002414 if (ffs->state != FFS_ACTIVE)
2415 return -ENODEV;
2416
2417 switch (creq->bRequestType & USB_RECIP_MASK) {
2418 case USB_RECIP_INTERFACE:
2419 ret = ffs_func_revmap_intf(func, le16_to_cpu(creq->wIndex));
2420 if (unlikely(ret < 0))
2421 return ret;
2422 break;
2423
2424 case USB_RECIP_ENDPOINT:
2425 ret = ffs_func_revmap_ep(func, le16_to_cpu(creq->wIndex));
2426 if (unlikely(ret < 0))
2427 return ret;
2428 break;
2429
2430 default:
2431 return -EOPNOTSUPP;
2432 }
2433
2434 spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
2435 ffs->ev.setup = *creq;
2436 ffs->ev.setup.wIndex = cpu_to_le16(ret);
2437 __ffs_event_add(ffs, FUNCTIONFS_SETUP);
2438 spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
2439
2440 return 0;
2441}
2442
2443static void ffs_func_suspend(struct usb_function *f)
2444{
2445 ENTER();
2446 ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_SUSPEND);
2447}
2448
2449static void ffs_func_resume(struct usb_function *f)
2450{
2451 ENTER();
2452 ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_RESUME);
2453}
2454
2455
Michal Nazarewicz5ab54cf2010-11-12 14:29:28 +01002456/* Endpoint and interface numbers reverse mapping ***************************/
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002457
2458static int ffs_func_revmap_ep(struct ffs_function *func, u8 num)
2459{
2460 num = func->eps_revmap[num & USB_ENDPOINT_NUMBER_MASK];
2461 return num ? num : -EDOM;
2462}
2463
2464static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf)
2465{
2466 short *nums = func->interfaces_nums;
2467 unsigned count = func->ffs->interfaces_count;
2468
2469 for (; count; --count, ++nums) {
2470 if (*nums >= 0 && *nums == intf)
2471 return nums - func->interfaces_nums;
2472 }
2473
2474 return -EDOM;
2475}
2476
2477
Andrzej Pietrasiewicz4b187fc2013-12-03 15:15:32 +01002478/* Devices management *******************************************************/
2479
2480static LIST_HEAD(ffs_devices);
2481
2482static struct ffs_dev *_ffs_find_dev(const char *name)
2483{
2484 struct ffs_dev *dev;
2485
2486 list_for_each_entry(dev, &ffs_devices, entry) {
2487 if (!dev->name || !name)
2488 continue;
2489 if (strcmp(dev->name, name) == 0)
2490 return dev;
2491 }
2492
2493 return NULL;
2494}
2495
2496/*
2497 * ffs_lock must be taken by the caller of this function
2498 */
2499static struct ffs_dev *ffs_get_single_dev(void)
2500{
2501 struct ffs_dev *dev;
2502
2503 if (list_is_singular(&ffs_devices)) {
2504 dev = list_first_entry(&ffs_devices, struct ffs_dev, entry);
2505 if (dev->single)
2506 return dev;
2507 }
2508
2509 return NULL;
2510}
2511
2512/*
2513 * ffs_lock must be taken by the caller of this function
2514 */
2515static struct ffs_dev *ffs_find_dev(const char *name)
2516{
2517 struct ffs_dev *dev;
2518
2519 dev = ffs_get_single_dev();
2520 if (dev)
2521 return dev;
2522
2523 return _ffs_find_dev(name);
2524}
2525
2526/*
2527 * ffs_lock must be taken by the caller of this function
2528 */
2529struct ffs_dev *ffs_alloc_dev(void)
2530{
2531 struct ffs_dev *dev;
2532 int ret;
2533
2534 if (ffs_get_single_dev())
2535 return ERR_PTR(-EBUSY);
2536
2537 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2538 if (!dev)
2539 return ERR_PTR(-ENOMEM);
2540
2541 if (list_empty(&ffs_devices)) {
2542 ret = functionfs_init();
2543 if (ret) {
2544 kfree(dev);
2545 return ERR_PTR(ret);
2546 }
2547 }
2548
2549 list_add(&dev->entry, &ffs_devices);
2550
2551 return dev;
2552}
2553
2554/*
2555 * ffs_lock must be taken by the caller of this function
2556 * The caller is responsible for "name" being available whenever f_fs needs it
2557 */
2558static int _ffs_name_dev(struct ffs_dev *dev, const char *name)
2559{
2560 struct ffs_dev *existing;
2561
2562 existing = _ffs_find_dev(name);
2563 if (existing)
2564 return -EBUSY;
2565
2566 dev->name = name;
2567
2568 return 0;
2569}
2570
2571/*
2572 * The caller is responsible for "name" being available whenever f_fs needs it
2573 */
2574int ffs_name_dev(struct ffs_dev *dev, const char *name)
2575{
2576 int ret;
2577
2578 ffs_dev_lock();
2579 ret = _ffs_name_dev(dev, name);
2580 ffs_dev_unlock();
2581
2582 return ret;
2583}
2584
2585int ffs_single_dev(struct ffs_dev *dev)
2586{
2587 int ret;
2588
2589 ret = 0;
2590 ffs_dev_lock();
2591
2592 if (!list_is_singular(&ffs_devices))
2593 ret = -EBUSY;
2594 else
2595 dev->single = true;
2596
2597 ffs_dev_unlock();
2598 return ret;
2599}
2600
2601/*
2602 * ffs_lock must be taken by the caller of this function
2603 */
2604void ffs_free_dev(struct ffs_dev *dev)
2605{
2606 list_del(&dev->entry);
2607 kfree(dev);
2608 if (list_empty(&ffs_devices))
2609 functionfs_cleanup();
2610}
2611
2612static void *ffs_acquire_dev(const char *dev_name)
2613{
2614 struct ffs_dev *ffs_dev;
2615
2616 ENTER();
2617 ffs_dev_lock();
2618
2619 ffs_dev = ffs_find_dev(dev_name);
2620 if (!ffs_dev)
2621 ffs_dev = ERR_PTR(-ENODEV);
2622 else if (ffs_dev->mounted)
2623 ffs_dev = ERR_PTR(-EBUSY);
2624 else
2625 ffs_dev->mounted = true;
2626
2627 ffs_dev_unlock();
2628 return ffs_dev;
2629}
2630
2631static void ffs_release_dev(struct ffs_data *ffs_data)
2632{
2633 struct ffs_dev *ffs_dev;
2634
2635 ENTER();
2636 ffs_dev_lock();
2637
2638 ffs_dev = ffs_data->private_data;
2639 if (ffs_dev)
2640 ffs_dev->mounted = false;
2641
2642 ffs_dev_unlock();
2643}
2644
2645static int ffs_ready(struct ffs_data *ffs)
2646{
2647 struct ffs_dev *ffs_obj;
2648 int ret = 0;
2649
2650 ENTER();
2651 ffs_dev_lock();
2652
2653 ffs_obj = ffs->private_data;
2654 if (!ffs_obj) {
2655 ret = -EINVAL;
2656 goto done;
2657 }
2658 if (WARN_ON(ffs_obj->desc_ready)) {
2659 ret = -EBUSY;
2660 goto done;
2661 }
2662
2663 ffs_obj->desc_ready = true;
2664 ffs_obj->ffs_data = ffs;
2665
2666 if (ffs_obj->ffs_ready_callback)
2667 ret = ffs_obj->ffs_ready_callback(ffs);
2668
2669done:
2670 ffs_dev_unlock();
2671 return ret;
2672}
2673
2674static void ffs_closed(struct ffs_data *ffs)
2675{
2676 struct ffs_dev *ffs_obj;
2677
2678 ENTER();
2679 ffs_dev_lock();
2680
2681 ffs_obj = ffs->private_data;
2682 if (!ffs_obj)
2683 goto done;
2684
2685 ffs_obj->desc_ready = false;
2686
2687 if (ffs_obj->ffs_closed_callback)
2688 ffs_obj->ffs_closed_callback(ffs);
2689done:
2690 ffs_dev_unlock();
2691}
2692
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002693/* Misc helper functions ****************************************************/
2694
2695static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
2696{
2697 return nonblock
2698 ? likely(mutex_trylock(mutex)) ? 0 : -EAGAIN
2699 : mutex_lock_interruptible(mutex);
2700}
2701
Al Viro260ef312012-09-26 21:43:45 -04002702static char *ffs_prepare_buffer(const char __user *buf, size_t len)
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002703{
2704 char *data;
2705
2706 if (unlikely(!len))
2707 return NULL;
2708
2709 data = kmalloc(len, GFP_KERNEL);
2710 if (unlikely(!data))
2711 return ERR_PTR(-ENOMEM);
2712
2713 if (unlikely(__copy_from_user(data, buf, len))) {
2714 kfree(data);
2715 return ERR_PTR(-EFAULT);
2716 }
2717
Michal Nazarewiczaa02f172010-11-17 17:09:47 +01002718 pr_vdebug("Buffer from user space:\n");
Michal Nazarewiczddf8abd2010-05-05 12:53:14 +02002719 ffs_dump_mem("", data, len);
2720
2721 return data;
2722}