blob: ca5f11b64142707f813b50c638a4965b0db5b285 [file] [log] [blame]
David Brownellc1dca562008-06-19 17:51:44 -07001/*
2 * u_serial.c - utilities for USB gadget "serial port"/TTY support
3 *
4 * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
5 * Copyright (C) 2008 David Brownell
6 * Copyright (C) 2008 by Nokia Corporation
7 *
8 * This code also borrows from usbserial.c, which is
9 * Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com)
10 * Copyright (C) 2000 Peter Berger (pberger@brimson.com)
11 * Copyright (C) 2000 Al Borchers (alborchers@steinerpoint.com)
12 *
13 * This software is distributed under the terms of the GNU General
14 * Public License ("GPL") as published by the Free Software Foundation,
15 * either version 2 of that License or (at your option) any later version.
16 */
17
18/* #define VERBOSE_DEBUG */
19
20#include <linux/kernel.h>
stephane duverger1e413942010-06-29 16:57:25 +020021#include <linux/sched.h>
David Brownellc1dca562008-06-19 17:51:44 -070022#include <linux/interrupt.h>
23#include <linux/device.h>
24#include <linux/delay.h>
25#include <linux/tty.h>
26#include <linux/tty_flip.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090027#include <linux/slab.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070028#include <linux/debugfs.h>
David Brownellc1dca562008-06-19 17:51:44 -070029
30#include "u_serial.h"
31
32
33/*
34 * This component encapsulates the TTY layer glue needed to provide basic
35 * "serial port" functionality through the USB gadget stack. Each such
36 * port is exposed through a /dev/ttyGS* node.
37 *
38 * After initialization (gserial_setup), these TTY port devices stay
39 * available until they are removed (gserial_cleanup). Each one may be
40 * connected to a USB function (gserial_connect), or disconnected (with
41 * gserial_disconnect) when the USB host issues a config change event.
42 * Data can only flow when the port is connected to the host.
43 *
44 * A given TTY port can be made available in multiple configurations.
45 * For example, each one might expose a ttyGS0 node which provides a
46 * login application. In one case that might use CDC ACM interface 0,
47 * while another configuration might use interface 3 for that. The
48 * work to handle that (including descriptor management) is not part
49 * of this component.
50 *
51 * Configurations may expose more than one TTY port. For example, if
52 * ttyGS0 provides login service, then ttyGS1 might provide dialer access
53 * for a telephone or fax link. And ttyGS2 might be something that just
54 * needs a simple byte stream interface for some messaging protocol that
55 * is managed in userspace ... OBEX, PTP, and MTP have been mentioned.
56 */
57
David Brownell937ef732008-07-07 12:16:08 -070058#define PREFIX "ttyGS"
59
David Brownellc1dca562008-06-19 17:51:44 -070060/*
61 * gserial is the lifecycle interface, used by USB functions
62 * gs_port is the I/O nexus, used by the tty driver
63 * tty_struct links to the tty/filesystem framework
64 *
65 * gserial <---> gs_port ... links will be null when the USB link is
David Brownell1f1ba112008-08-06 18:49:57 -070066 * inactive; managed by gserial_{connect,disconnect}(). each gserial
67 * instance can wrap its own USB control protocol.
David Brownellc1dca562008-06-19 17:51:44 -070068 * gserial->ioport == usb_ep->driver_data ... gs_port
69 * gs_port->port_usb ... gserial
70 *
71 * gs_port <---> tty_struct ... links will be null when the TTY file
72 * isn't opened; managed by gs_open()/gs_close()
73 * gserial->port_tty ... tty_struct
74 * tty_struct->driver_data ... gserial
75 */
76
77/* RX and TX queues can buffer QUEUE_SIZE packets before they hit the
78 * next layer of buffering. For TX that's a circular buffer; for RX
79 * consider it a NOP. A third layer is provided by the TTY code.
80 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070081#define TX_QUEUE_SIZE 8
82#define TX_BUF_SIZE 4096
David Brownellc1dca562008-06-19 17:51:44 -070083#define WRITE_BUF_SIZE 8192 /* TX only */
84
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070085#define RX_QUEUE_SIZE 8
86#define RX_BUF_SIZE 4096
87
88
David Brownellc1dca562008-06-19 17:51:44 -070089/* circular buffer */
90struct gs_buf {
91 unsigned buf_size;
92 char *buf_buf;
93 char *buf_get;
94 char *buf_put;
95};
96
97/*
98 * The port structure holds info for each port, one for each minor number
99 * (and thus for each /dev/ node).
100 */
101struct gs_port {
102 spinlock_t port_lock; /* guard port_* access */
103
104 struct gserial *port_usb;
105 struct tty_struct *port_tty;
106
107 unsigned open_count;
108 bool openclose; /* open/close in progress */
109 u8 port_num;
110
111 wait_queue_head_t close_wait; /* wait for last close */
112
113 struct list_head read_pool;
Jim Sung28609d42010-11-04 18:47:51 -0700114 int read_started;
115 int read_allocated;
David Brownell937ef732008-07-07 12:16:08 -0700116 struct list_head read_queue;
117 unsigned n_read;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700118 struct work_struct push;
David Brownellc1dca562008-06-19 17:51:44 -0700119
120 struct list_head write_pool;
Jim Sung28609d42010-11-04 18:47:51 -0700121 int write_started;
122 int write_allocated;
David Brownellc1dca562008-06-19 17:51:44 -0700123 struct gs_buf port_write_buf;
124 wait_queue_head_t drain_wait; /* wait while writes drain */
125
126 /* REVISIT this state ... */
127 struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700128 unsigned long nbytes_from_host;
129 unsigned long nbytes_to_tty;
130 unsigned long nbytes_from_tty;
131 unsigned long nbytes_to_host;
David Brownellc1dca562008-06-19 17:51:44 -0700132};
133
134/* increase N_PORTS if you need more */
John Michelau677ba872010-11-08 18:05:37 -0600135#define N_PORTS 8
David Brownellc1dca562008-06-19 17:51:44 -0700136static struct portmaster {
137 struct mutex lock; /* protect open/close */
138 struct gs_port *port;
139} ports[N_PORTS];
140static unsigned n_ports;
141
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700142static struct workqueue_struct *gserial_wq;
143
David Brownellc1dca562008-06-19 17:51:44 -0700144#define GS_CLOSE_TIMEOUT 15 /* seconds */
145
146
147
148#ifdef VERBOSE_DEBUG
149#define pr_vdebug(fmt, arg...) \
150 pr_debug(fmt, ##arg)
151#else
152#define pr_vdebug(fmt, arg...) \
153 ({ if (0) pr_debug(fmt, ##arg); })
154#endif
155
156/*-------------------------------------------------------------------------*/
157
158/* Circular Buffer */
159
160/*
161 * gs_buf_alloc
162 *
163 * Allocate a circular buffer and all associated memory.
164 */
165static int gs_buf_alloc(struct gs_buf *gb, unsigned size)
166{
167 gb->buf_buf = kmalloc(size, GFP_KERNEL);
168 if (gb->buf_buf == NULL)
169 return -ENOMEM;
170
171 gb->buf_size = size;
172 gb->buf_put = gb->buf_buf;
173 gb->buf_get = gb->buf_buf;
174
175 return 0;
176}
177
178/*
179 * gs_buf_free
180 *
181 * Free the buffer and all associated memory.
182 */
183static void gs_buf_free(struct gs_buf *gb)
184{
185 kfree(gb->buf_buf);
186 gb->buf_buf = NULL;
187}
188
189/*
190 * gs_buf_clear
191 *
192 * Clear out all data in the circular buffer.
193 */
194static void gs_buf_clear(struct gs_buf *gb)
195{
196 gb->buf_get = gb->buf_put;
197 /* equivalent to a get of all data available */
198}
199
200/*
201 * gs_buf_data_avail
202 *
David Brownell1f1ba112008-08-06 18:49:57 -0700203 * Return the number of bytes of data written into the circular
David Brownellc1dca562008-06-19 17:51:44 -0700204 * buffer.
205 */
206static unsigned gs_buf_data_avail(struct gs_buf *gb)
207{
208 return (gb->buf_size + gb->buf_put - gb->buf_get) % gb->buf_size;
209}
210
211/*
212 * gs_buf_space_avail
213 *
214 * Return the number of bytes of space available in the circular
215 * buffer.
216 */
217static unsigned gs_buf_space_avail(struct gs_buf *gb)
218{
219 return (gb->buf_size + gb->buf_get - gb->buf_put - 1) % gb->buf_size;
220}
221
222/*
223 * gs_buf_put
224 *
225 * Copy data data from a user buffer and put it into the circular buffer.
226 * Restrict to the amount of space available.
227 *
228 * Return the number of bytes copied.
229 */
230static unsigned
231gs_buf_put(struct gs_buf *gb, const char *buf, unsigned count)
232{
233 unsigned len;
234
235 len = gs_buf_space_avail(gb);
236 if (count > len)
237 count = len;
238
239 if (count == 0)
240 return 0;
241
242 len = gb->buf_buf + gb->buf_size - gb->buf_put;
243 if (count > len) {
244 memcpy(gb->buf_put, buf, len);
245 memcpy(gb->buf_buf, buf+len, count - len);
246 gb->buf_put = gb->buf_buf + count - len;
247 } else {
248 memcpy(gb->buf_put, buf, count);
249 if (count < len)
250 gb->buf_put += count;
251 else /* count == len */
252 gb->buf_put = gb->buf_buf;
253 }
254
255 return count;
256}
257
258/*
259 * gs_buf_get
260 *
261 * Get data from the circular buffer and copy to the given buffer.
262 * Restrict to the amount of data available.
263 *
264 * Return the number of bytes copied.
265 */
266static unsigned
267gs_buf_get(struct gs_buf *gb, char *buf, unsigned count)
268{
269 unsigned len;
270
271 len = gs_buf_data_avail(gb);
272 if (count > len)
273 count = len;
274
275 if (count == 0)
276 return 0;
277
278 len = gb->buf_buf + gb->buf_size - gb->buf_get;
279 if (count > len) {
280 memcpy(buf, gb->buf_get, len);
281 memcpy(buf+len, gb->buf_buf, count - len);
282 gb->buf_get = gb->buf_buf + count - len;
283 } else {
284 memcpy(buf, gb->buf_get, count);
285 if (count < len)
286 gb->buf_get += count;
287 else /* count == len */
288 gb->buf_get = gb->buf_buf;
289 }
290
291 return count;
292}
293
294/*-------------------------------------------------------------------------*/
295
296/* I/O glue between TTY (upper) and USB function (lower) driver layers */
297
298/*
299 * gs_alloc_req
300 *
301 * Allocate a usb_request and its buffer. Returns a pointer to the
302 * usb_request or NULL if there is an error.
303 */
David Brownell1f1ba112008-08-06 18:49:57 -0700304struct usb_request *
David Brownellc1dca562008-06-19 17:51:44 -0700305gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
306{
307 struct usb_request *req;
308
309 req = usb_ep_alloc_request(ep, kmalloc_flags);
310
311 if (req != NULL) {
312 req->length = len;
313 req->buf = kmalloc(len, kmalloc_flags);
314 if (req->buf == NULL) {
315 usb_ep_free_request(ep, req);
316 return NULL;
317 }
318 }
319
320 return req;
321}
322
323/*
324 * gs_free_req
325 *
326 * Free a usb_request and its buffer.
327 */
David Brownell1f1ba112008-08-06 18:49:57 -0700328void gs_free_req(struct usb_ep *ep, struct usb_request *req)
David Brownellc1dca562008-06-19 17:51:44 -0700329{
330 kfree(req->buf);
331 usb_ep_free_request(ep, req);
332}
333
334/*
335 * gs_send_packet
336 *
337 * If there is data to send, a packet is built in the given
338 * buffer and the size is returned. If there is no data to
339 * send, 0 is returned.
340 *
341 * Called with port_lock held.
342 */
343static unsigned
344gs_send_packet(struct gs_port *port, char *packet, unsigned size)
345{
346 unsigned len;
347
348 len = gs_buf_data_avail(&port->port_write_buf);
349 if (len < size)
350 size = len;
351 if (size != 0)
352 size = gs_buf_get(&port->port_write_buf, packet, size);
353 return size;
354}
355
356/*
357 * gs_start_tx
358 *
359 * This function finds available write requests, calls
360 * gs_send_packet to fill these packets with data, and
361 * continues until either there are no more write requests
362 * available or no more data to send. This function is
363 * run whenever data arrives or write requests are available.
364 *
365 * Context: caller owns port_lock; port_usb is non-null.
366 */
367static int gs_start_tx(struct gs_port *port)
368/*
369__releases(&port->port_lock)
370__acquires(&port->port_lock)
371*/
372{
373 struct list_head *pool = &port->write_pool;
374 struct usb_ep *in = port->port_usb->in;
375 int status = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700376 static long prev_len;
David Brownellc1dca562008-06-19 17:51:44 -0700377 bool do_tty_wake = false;
378
379 while (!list_empty(pool)) {
380 struct usb_request *req;
381 int len;
382
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700383 if (port->write_started >= TX_QUEUE_SIZE)
Jim Sung28609d42010-11-04 18:47:51 -0700384 break;
385
David Brownellc1dca562008-06-19 17:51:44 -0700386 req = list_entry(pool->next, struct usb_request, list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700387 len = gs_send_packet(port, req->buf, TX_BUF_SIZE);
David Brownellc1dca562008-06-19 17:51:44 -0700388 if (len == 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700389 /* Queue zero length packet */
390 if (prev_len && (prev_len % in->maxpacket == 0)) {
391 req->length = 0;
392 list_del(&req->list);
393 spin_unlock(&port->port_lock);
394 status = usb_ep_queue(in, req, GFP_ATOMIC);
395 spin_lock(&port->port_lock);
396 if (!port->port_usb) {
397 gs_free_req(in, req);
398 break;
399 }
400 if (status) {
401 printk(KERN_ERR "%s: %s err %d\n",
402 __func__, "queue", status);
403 list_add(&req->list, pool);
404 }
405 prev_len = 0;
406 }
David Brownellc1dca562008-06-19 17:51:44 -0700407 wake_up_interruptible(&port->drain_wait);
408 break;
409 }
410 do_tty_wake = true;
411
412 req->length = len;
413 list_del(&req->list);
Daniel Glöckner2e251342009-05-28 12:53:24 +0200414 req->zero = (gs_buf_data_avail(&port->port_write_buf) == 0);
David Brownellc1dca562008-06-19 17:51:44 -0700415
David Brownell937ef732008-07-07 12:16:08 -0700416 pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
417 port->port_num, len, *((u8 *)req->buf),
David Brownellc1dca562008-06-19 17:51:44 -0700418 *((u8 *)req->buf+1), *((u8 *)req->buf+2));
David Brownellc1dca562008-06-19 17:51:44 -0700419
420 /* Drop lock while we call out of driver; completions
421 * could be issued while we do so. Disconnection may
422 * happen too; maybe immediately before we queue this!
423 *
424 * NOTE that we may keep sending data for a while after
425 * the TTY closed (dev->ioport->port_tty is NULL).
426 */
427 spin_unlock(&port->port_lock);
428 status = usb_ep_queue(in, req, GFP_ATOMIC);
429 spin_lock(&port->port_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700430 /*
431 * If port_usb is NULL, gserial disconnect is called
432 * while the spinlock is dropped and all requests are
433 * freed. Free the current request here.
434 */
435 if (!port->port_usb) {
436 do_tty_wake = false;
437 gs_free_req(in, req);
438 break;
439 }
David Brownellc1dca562008-06-19 17:51:44 -0700440 if (status) {
441 pr_debug("%s: %s %s err %d\n",
442 __func__, "queue", in->name, status);
443 list_add(&req->list, pool);
444 break;
445 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700446 prev_len = req->length;
447 port->nbytes_from_tty += req->length;
David Brownellc1dca562008-06-19 17:51:44 -0700448
David Brownellc1dca562008-06-19 17:51:44 -0700449 }
450
451 if (do_tty_wake && port->port_tty)
452 tty_wakeup(port->port_tty);
453 return status;
454}
455
David Brownellc1dca562008-06-19 17:51:44 -0700456/*
457 * Context: caller owns port_lock, and port_usb is set
458 */
459static unsigned gs_start_rx(struct gs_port *port)
460/*
461__releases(&port->port_lock)
462__acquires(&port->port_lock)
463*/
464{
465 struct list_head *pool = &port->read_pool;
466 struct usb_ep *out = port->port_usb->out;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700467 unsigned started = 0;
David Brownellc1dca562008-06-19 17:51:44 -0700468
469 while (!list_empty(pool)) {
470 struct usb_request *req;
471 int status;
472 struct tty_struct *tty;
473
David Brownell937ef732008-07-07 12:16:08 -0700474 /* no more rx if closed */
David Brownellc1dca562008-06-19 17:51:44 -0700475 tty = port->port_tty;
David Brownell937ef732008-07-07 12:16:08 -0700476 if (!tty)
David Brownellc1dca562008-06-19 17:51:44 -0700477 break;
478
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700479 if (port->read_started >= RX_QUEUE_SIZE)
Jim Sung28609d42010-11-04 18:47:51 -0700480 break;
481
David Brownellc1dca562008-06-19 17:51:44 -0700482 req = list_entry(pool->next, struct usb_request, list);
483 list_del(&req->list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700484 req->length = RX_BUF_SIZE;
David Brownellc1dca562008-06-19 17:51:44 -0700485
486 /* drop lock while we call out; the controller driver
487 * may need to call us back (e.g. for disconnect)
488 */
489 spin_unlock(&port->port_lock);
490 status = usb_ep_queue(out, req, GFP_ATOMIC);
491 spin_lock(&port->port_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700492 /*
493 * If port_usb is NULL, gserial disconnect is called
494 * while the spinlock is dropped and all requests are
495 * freed. Free the current request here.
496 */
497 if (!port->port_usb) {
498 started = 0;
499 gs_free_req(out, req);
500 break;
501 }
David Brownellc1dca562008-06-19 17:51:44 -0700502 if (status) {
503 pr_debug("%s: %s %s err %d\n",
504 __func__, "queue", out->name, status);
505 list_add(&req->list, pool);
506 break;
507 }
Jim Sung28609d42010-11-04 18:47:51 -0700508 port->read_started++;
David Brownellc1dca562008-06-19 17:51:44 -0700509
David Brownellc1dca562008-06-19 17:51:44 -0700510 }
Jim Sung28609d42010-11-04 18:47:51 -0700511 return port->read_started;
David Brownellc1dca562008-06-19 17:51:44 -0700512}
513
David Brownell937ef732008-07-07 12:16:08 -0700514/*
515 * RX tasklet takes data out of the RX queue and hands it up to the TTY
516 * layer until it refuses to take any more data (or is throttled back).
517 * Then it issues reads for any further data.
518 *
519 * If the RX queue becomes full enough that no usb_request is queued,
520 * the OUT endpoint may begin NAKing as soon as its FIFO fills up.
521 * So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
522 * can be buffered before the TTY layer's buffers (currently 64 KB).
523 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700524static void gs_rx_push(struct work_struct *w)
David Brownell937ef732008-07-07 12:16:08 -0700525{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700526 struct gs_port *port = container_of(w, struct gs_port, push);
David Brownell937ef732008-07-07 12:16:08 -0700527 struct tty_struct *tty;
528 struct list_head *queue = &port->read_queue;
529 bool disconnect = false;
530 bool do_push = false;
531
532 /* hand any queued data to the tty */
533 spin_lock_irq(&port->port_lock);
534 tty = port->port_tty;
535 while (!list_empty(queue)) {
536 struct usb_request *req;
537
538 req = list_first_entry(queue, struct usb_request, list);
539
540 /* discard data if tty was closed */
541 if (!tty)
542 goto recycle;
543
544 /* leave data queued if tty was rx throttled */
545 if (test_bit(TTY_THROTTLED, &tty->flags))
546 break;
547
548 switch (req->status) {
549 case -ESHUTDOWN:
550 disconnect = true;
551 pr_vdebug(PREFIX "%d: shutdown\n", port->port_num);
552 break;
553
554 default:
555 /* presumably a transient fault */
556 pr_warning(PREFIX "%d: unexpected RX status %d\n",
557 port->port_num, req->status);
558 /* FALLTHROUGH */
559 case 0:
560 /* normal completion */
561 break;
562 }
563
564 /* push data to (open) tty */
565 if (req->actual) {
566 char *packet = req->buf;
567 unsigned size = req->actual;
568 unsigned n;
569 int count;
570
571 /* we may have pushed part of this packet already... */
572 n = port->n_read;
573 if (n) {
574 packet += n;
575 size -= n;
576 }
577
578 count = tty_insert_flip_string(tty, packet, size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700579 port->nbytes_to_tty += count;
David Brownell937ef732008-07-07 12:16:08 -0700580 if (count)
581 do_push = true;
582 if (count != size) {
583 /* stop pushing; TTY layer can't handle more */
584 port->n_read += count;
585 pr_vdebug(PREFIX "%d: rx block %d/%d\n",
586 port->port_num,
587 count, req->actual);
588 break;
589 }
590 port->n_read = 0;
591 }
592recycle:
593 list_move(&req->list, &port->read_pool);
Jim Sung28609d42010-11-04 18:47:51 -0700594 port->read_started--;
David Brownell937ef732008-07-07 12:16:08 -0700595 }
596
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700597 /* Push from tty to ldisc; this is immediate with low_latency, and
598 * may trigger callbacks to this driver ... so drop the spinlock.
David Brownell937ef732008-07-07 12:16:08 -0700599 */
600 if (tty && do_push) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700601 spin_unlock_irq(&port->port_lock);
David Brownell937ef732008-07-07 12:16:08 -0700602 tty_flip_buffer_push(tty);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700603 wake_up_interruptible(&tty->read_wait);
604 spin_lock_irq(&port->port_lock);
605
606 /* tty may have been closed */
607 tty = port->port_tty;
David Brownell937ef732008-07-07 12:16:08 -0700608 }
609
610
611 /* We want our data queue to become empty ASAP, keeping data
612 * in the tty and ldisc (not here). If we couldn't push any
613 * this time around, there may be trouble unless there's an
614 * implicit tty_unthrottle() call on its way...
615 *
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700616 * REVISIT we should probably add a timer to keep the work queue
David Brownell937ef732008-07-07 12:16:08 -0700617 * from starving ... but it's not clear that case ever happens.
618 */
619 if (!list_empty(queue) && tty) {
620 if (!test_bit(TTY_THROTTLED, &tty->flags)) {
621 if (do_push)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700622 queue_work(gserial_wq, &port->push);
David Brownell937ef732008-07-07 12:16:08 -0700623 else
624 pr_warning(PREFIX "%d: RX not scheduled?\n",
625 port->port_num);
626 }
627 }
628
629 /* If we're still connected, refill the USB RX queue. */
630 if (!disconnect && port->port_usb)
631 gs_start_rx(port);
632
633 spin_unlock_irq(&port->port_lock);
634}
635
David Brownellc1dca562008-06-19 17:51:44 -0700636static void gs_read_complete(struct usb_ep *ep, struct usb_request *req)
637{
David Brownellc1dca562008-06-19 17:51:44 -0700638 struct gs_port *port = ep->driver_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700639 unsigned long flags;
David Brownellc1dca562008-06-19 17:51:44 -0700640
David Brownell937ef732008-07-07 12:16:08 -0700641 /* Queue all received data until the tty layer is ready for it. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700642 spin_lock_irqsave(&port->port_lock, flags);
643 port->nbytes_from_host += req->actual;
David Brownell937ef732008-07-07 12:16:08 -0700644 list_add_tail(&req->list, &port->read_queue);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700645 queue_work(gserial_wq, &port->push);
646 spin_unlock_irqrestore(&port->port_lock, flags);
David Brownellc1dca562008-06-19 17:51:44 -0700647}
648
649static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
650{
651 struct gs_port *port = ep->driver_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700652 unsigned long flags;
David Brownellc1dca562008-06-19 17:51:44 -0700653
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700654 spin_lock_irqsave(&port->port_lock, flags);
655 port->nbytes_to_host += req->actual;
David Brownellc1dca562008-06-19 17:51:44 -0700656 list_add(&req->list, &port->write_pool);
Jim Sung28609d42010-11-04 18:47:51 -0700657 port->write_started--;
David Brownellc1dca562008-06-19 17:51:44 -0700658
659 switch (req->status) {
660 default:
661 /* presumably a transient fault */
662 pr_warning("%s: unexpected %s status %d\n",
663 __func__, ep->name, req->status);
664 /* FALL THROUGH */
665 case 0:
666 /* normal completion */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700667 if (port->port_usb)
668 gs_start_tx(port);
David Brownellc1dca562008-06-19 17:51:44 -0700669 break;
670
671 case -ESHUTDOWN:
672 /* disconnect */
673 pr_vdebug("%s: %s shutdown\n", __func__, ep->name);
674 break;
675 }
676
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700677 spin_unlock_irqrestore(&port->port_lock, flags);
David Brownellc1dca562008-06-19 17:51:44 -0700678}
679
Jim Sung28609d42010-11-04 18:47:51 -0700680static void gs_free_requests(struct usb_ep *ep, struct list_head *head,
681 int *allocated)
David Brownellc1dca562008-06-19 17:51:44 -0700682{
683 struct usb_request *req;
684
685 while (!list_empty(head)) {
686 req = list_entry(head->next, struct usb_request, list);
687 list_del(&req->list);
688 gs_free_req(ep, req);
Jim Sung28609d42010-11-04 18:47:51 -0700689 if (allocated)
690 (*allocated)--;
David Brownellc1dca562008-06-19 17:51:44 -0700691 }
692}
693
694static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700695 int num, int size, void (*fn)(struct usb_ep *, struct usb_request *),
Jim Sung28609d42010-11-04 18:47:51 -0700696 int *allocated)
David Brownellc1dca562008-06-19 17:51:44 -0700697{
698 int i;
699 struct usb_request *req;
700
701 /* Pre-allocate up to QUEUE_SIZE transfers, but if we can't
702 * do quite that many this time, don't fail ... we just won't
703 * be as speedy as we might otherwise be.
704 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700705 for (i = 0; i < num; i++) {
706 req = gs_alloc_req(ep, size, GFP_ATOMIC);
David Brownellc1dca562008-06-19 17:51:44 -0700707 if (!req)
708 return list_empty(head) ? -ENOMEM : 0;
709 req->complete = fn;
710 list_add_tail(&req->list, head);
Jim Sung28609d42010-11-04 18:47:51 -0700711 if (allocated)
712 (*allocated)++;
David Brownellc1dca562008-06-19 17:51:44 -0700713 }
714 return 0;
715}
716
717/**
718 * gs_start_io - start USB I/O streams
719 * @dev: encapsulates endpoints to use
720 * Context: holding port_lock; port_tty and port_usb are non-null
721 *
722 * We only start I/O when something is connected to both sides of
723 * this port. If nothing is listening on the host side, we may
724 * be pointlessly filling up our TX buffers and FIFO.
725 */
726static int gs_start_io(struct gs_port *port)
727{
728 struct list_head *head = &port->read_pool;
729 struct usb_ep *ep = port->port_usb->out;
730 int status;
731 unsigned started;
732
733 /* Allocate RX and TX I/O buffers. We can't easily do this much
734 * earlier (with GFP_KERNEL) because the requests are coupled to
735 * endpoints, as are the packet sizes we'll be using. Different
736 * configurations may use different endpoints with a given port;
737 * and high speed vs full speed changes packet sizes too.
738 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700739 status = gs_alloc_requests(ep, head, RX_QUEUE_SIZE, RX_BUF_SIZE,
740 gs_read_complete, &port->read_allocated);
David Brownellc1dca562008-06-19 17:51:44 -0700741 if (status)
742 return status;
743
744 status = gs_alloc_requests(port->port_usb->in, &port->write_pool,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700745 TX_QUEUE_SIZE, TX_BUF_SIZE, gs_write_complete, &port->write_allocated);
David Brownellc1dca562008-06-19 17:51:44 -0700746 if (status) {
Jim Sung28609d42010-11-04 18:47:51 -0700747 gs_free_requests(ep, head, &port->read_allocated);
David Brownellc1dca562008-06-19 17:51:44 -0700748 return status;
749 }
750
751 /* queue read requests */
David Brownell937ef732008-07-07 12:16:08 -0700752 port->n_read = 0;
David Brownellc1dca562008-06-19 17:51:44 -0700753 started = gs_start_rx(port);
754
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700755 if (!port->port_usb)
756 return -EIO;
David Brownellc1dca562008-06-19 17:51:44 -0700757 /* unblock any pending writes into our circular buffer */
758 if (started) {
759 tty_wakeup(port->port_tty);
760 } else {
Jim Sung28609d42010-11-04 18:47:51 -0700761 gs_free_requests(ep, head, &port->read_allocated);
762 gs_free_requests(port->port_usb->in, &port->write_pool,
763 &port->write_allocated);
David Brownell937ef732008-07-07 12:16:08 -0700764 status = -EIO;
David Brownellc1dca562008-06-19 17:51:44 -0700765 }
766
David Brownell937ef732008-07-07 12:16:08 -0700767 return status;
David Brownellc1dca562008-06-19 17:51:44 -0700768}
769
770/*-------------------------------------------------------------------------*/
771
772/* TTY Driver */
773
774/*
775 * gs_open sets up the link between a gs_port and its associated TTY.
776 * That link is broken *only* by TTY close(), and all driver methods
777 * know that.
778 */
779static int gs_open(struct tty_struct *tty, struct file *file)
780{
781 int port_num = tty->index;
782 struct gs_port *port;
783 int status;
784
785 if (port_num < 0 || port_num >= n_ports)
786 return -ENXIO;
787
788 do {
789 mutex_lock(&ports[port_num].lock);
790 port = ports[port_num].port;
791 if (!port)
792 status = -ENODEV;
793 else {
794 spin_lock_irq(&port->port_lock);
795
796 /* already open? Great. */
797 if (port->open_count) {
798 status = 0;
799 port->open_count++;
800
801 /* currently opening/closing? wait ... */
802 } else if (port->openclose) {
803 status = -EBUSY;
804
805 /* ... else we do the work */
806 } else {
807 status = -EAGAIN;
808 port->openclose = true;
809 }
810 spin_unlock_irq(&port->port_lock);
811 }
812 mutex_unlock(&ports[port_num].lock);
813
814 switch (status) {
815 default:
816 /* fully handled */
817 return status;
818 case -EAGAIN:
819 /* must do the work */
820 break;
821 case -EBUSY:
822 /* wait for EAGAIN task to finish */
823 msleep(1);
824 /* REVISIT could have a waitchannel here, if
825 * concurrent open performance is important
826 */
827 break;
828 }
829 } while (status != -EAGAIN);
830
831 /* Do the "real open" */
832 spin_lock_irq(&port->port_lock);
833
834 /* allocate circular buffer on first open */
835 if (port->port_write_buf.buf_buf == NULL) {
836
837 spin_unlock_irq(&port->port_lock);
838 status = gs_buf_alloc(&port->port_write_buf, WRITE_BUF_SIZE);
839 spin_lock_irq(&port->port_lock);
840
841 if (status) {
842 pr_debug("gs_open: ttyGS%d (%p,%p) no buffer\n",
843 port->port_num, tty, file);
844 port->openclose = false;
845 goto exit_unlock_port;
846 }
847 }
848
849 /* REVISIT if REMOVED (ports[].port NULL), abort the open
850 * to let rmmod work faster (but this way isn't wrong).
851 */
852
853 /* REVISIT maybe wait for "carrier detect" */
854
855 tty->driver_data = port;
856 port->port_tty = tty;
857
858 port->open_count = 1;
859 port->openclose = false;
860
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700861 /* low_latency means ldiscs work is carried in the same context
862 * of tty_flip_buffer_push. The same can be called from IRQ with
863 * low_latency = 0. But better to use a dedicated worker thread
864 * to push the data.
865 */
866 tty->low_latency = 1;
867
David Brownellc1dca562008-06-19 17:51:44 -0700868 /* if connected, start the I/O stream */
869 if (port->port_usb) {
David Brownell1f1ba112008-08-06 18:49:57 -0700870 struct gserial *gser = port->port_usb;
871
David Brownellc1dca562008-06-19 17:51:44 -0700872 pr_debug("gs_open: start ttyGS%d\n", port->port_num);
873 gs_start_io(port);
874
David Brownell1f1ba112008-08-06 18:49:57 -0700875 if (gser->connect)
876 gser->connect(gser);
David Brownellc1dca562008-06-19 17:51:44 -0700877 }
878
879 pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file);
880
881 status = 0;
882
883exit_unlock_port:
884 spin_unlock_irq(&port->port_lock);
885 return status;
886}
887
888static int gs_writes_finished(struct gs_port *p)
889{
890 int cond;
891
892 /* return true on disconnect or empty buffer */
893 spin_lock_irq(&p->port_lock);
894 cond = (p->port_usb == NULL) || !gs_buf_data_avail(&p->port_write_buf);
895 spin_unlock_irq(&p->port_lock);
896
897 return cond;
898}
899
900static void gs_close(struct tty_struct *tty, struct file *file)
901{
902 struct gs_port *port = tty->driver_data;
David Brownell1f1ba112008-08-06 18:49:57 -0700903 struct gserial *gser;
David Brownellc1dca562008-06-19 17:51:44 -0700904
905 spin_lock_irq(&port->port_lock);
906
907 if (port->open_count != 1) {
908 if (port->open_count == 0)
909 WARN_ON(1);
910 else
911 --port->open_count;
912 goto exit;
913 }
914
915 pr_debug("gs_close: ttyGS%d (%p,%p) ...\n", port->port_num, tty, file);
916
917 /* mark port as closing but in use; we can drop port lock
918 * and sleep if necessary
919 */
920 port->openclose = true;
921 port->open_count = 0;
922
David Brownell1f1ba112008-08-06 18:49:57 -0700923 gser = port->port_usb;
924 if (gser && gser->disconnect)
925 gser->disconnect(gser);
David Brownellc1dca562008-06-19 17:51:44 -0700926
927 /* wait for circular write buffer to drain, disconnect, or at
928 * most GS_CLOSE_TIMEOUT seconds; then discard the rest
929 */
David Brownell1f1ba112008-08-06 18:49:57 -0700930 if (gs_buf_data_avail(&port->port_write_buf) > 0 && gser) {
David Brownellc1dca562008-06-19 17:51:44 -0700931 spin_unlock_irq(&port->port_lock);
932 wait_event_interruptible_timeout(port->drain_wait,
933 gs_writes_finished(port),
934 GS_CLOSE_TIMEOUT * HZ);
935 spin_lock_irq(&port->port_lock);
David Brownell1f1ba112008-08-06 18:49:57 -0700936 gser = port->port_usb;
David Brownellc1dca562008-06-19 17:51:44 -0700937 }
938
939 /* Iff we're disconnected, there can be no I/O in flight so it's
940 * ok to free the circular buffer; else just scrub it. And don't
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700941 * let the push work queue fire again until we're re-opened.
David Brownellc1dca562008-06-19 17:51:44 -0700942 */
David Brownell1f1ba112008-08-06 18:49:57 -0700943 if (gser == NULL)
David Brownellc1dca562008-06-19 17:51:44 -0700944 gs_buf_free(&port->port_write_buf);
945 else
946 gs_buf_clear(&port->port_write_buf);
947
David Brownellc1dca562008-06-19 17:51:44 -0700948 tty->driver_data = NULL;
949 port->port_tty = NULL;
950
951 port->openclose = false;
952
953 pr_debug("gs_close: ttyGS%d (%p,%p) done!\n",
954 port->port_num, tty, file);
955
956 wake_up_interruptible(&port->close_wait);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700957
958 /*
959 * Freeing the previously queued requests as they are
960 * allocated again as a part of gs_open()
961 */
962 if (port->port_usb) {
963 spin_unlock_irq(&port->port_lock);
964 usb_ep_fifo_flush(gser->out);
965 usb_ep_fifo_flush(gser->in);
966 spin_lock_irq(&port->port_lock);
967 gs_free_requests(gser->out, &port->read_queue, NULL);
968 gs_free_requests(gser->out, &port->read_pool, NULL);
969 gs_free_requests(gser->in, &port->write_pool, NULL);
970 }
971 port->read_allocated = port->read_started =
972 port->write_allocated = port->write_started = 0;
David Brownellc1dca562008-06-19 17:51:44 -0700973exit:
974 spin_unlock_irq(&port->port_lock);
975}
976
977static int gs_write(struct tty_struct *tty, const unsigned char *buf, int count)
978{
979 struct gs_port *port = tty->driver_data;
980 unsigned long flags;
981 int status;
982
983 pr_vdebug("gs_write: ttyGS%d (%p) writing %d bytes\n",
984 port->port_num, tty, count);
985
986 spin_lock_irqsave(&port->port_lock, flags);
987 if (count)
988 count = gs_buf_put(&port->port_write_buf, buf, count);
989 /* treat count == 0 as flush_chars() */
990 if (port->port_usb)
991 status = gs_start_tx(port);
992 spin_unlock_irqrestore(&port->port_lock, flags);
993
994 return count;
995}
996
997static int gs_put_char(struct tty_struct *tty, unsigned char ch)
998{
999 struct gs_port *port = tty->driver_data;
1000 unsigned long flags;
1001 int status;
1002
1003 pr_vdebug("gs_put_char: (%d,%p) char=0x%x, called from %p\n",
1004 port->port_num, tty, ch, __builtin_return_address(0));
1005
1006 spin_lock_irqsave(&port->port_lock, flags);
1007 status = gs_buf_put(&port->port_write_buf, &ch, 1);
1008 spin_unlock_irqrestore(&port->port_lock, flags);
1009
1010 return status;
1011}
1012
1013static void gs_flush_chars(struct tty_struct *tty)
1014{
1015 struct gs_port *port = tty->driver_data;
1016 unsigned long flags;
1017
1018 pr_vdebug("gs_flush_chars: (%d,%p)\n", port->port_num, tty);
1019
1020 spin_lock_irqsave(&port->port_lock, flags);
1021 if (port->port_usb)
1022 gs_start_tx(port);
1023 spin_unlock_irqrestore(&port->port_lock, flags);
1024}
1025
1026static int gs_write_room(struct tty_struct *tty)
1027{
1028 struct gs_port *port = tty->driver_data;
1029 unsigned long flags;
1030 int room = 0;
1031
1032 spin_lock_irqsave(&port->port_lock, flags);
1033 if (port->port_usb)
1034 room = gs_buf_space_avail(&port->port_write_buf);
1035 spin_unlock_irqrestore(&port->port_lock, flags);
1036
1037 pr_vdebug("gs_write_room: (%d,%p) room=%d\n",
1038 port->port_num, tty, room);
1039
1040 return room;
1041}
1042
1043static int gs_chars_in_buffer(struct tty_struct *tty)
1044{
1045 struct gs_port *port = tty->driver_data;
1046 unsigned long flags;
1047 int chars = 0;
1048
1049 spin_lock_irqsave(&port->port_lock, flags);
1050 chars = gs_buf_data_avail(&port->port_write_buf);
1051 spin_unlock_irqrestore(&port->port_lock, flags);
1052
1053 pr_vdebug("gs_chars_in_buffer: (%d,%p) chars=%d\n",
1054 port->port_num, tty, chars);
1055
1056 return chars;
1057}
1058
1059/* undo side effects of setting TTY_THROTTLED */
1060static void gs_unthrottle(struct tty_struct *tty)
1061{
1062 struct gs_port *port = tty->driver_data;
1063 unsigned long flags;
David Brownellc1dca562008-06-19 17:51:44 -07001064
1065 spin_lock_irqsave(&port->port_lock, flags);
David Brownell937ef732008-07-07 12:16:08 -07001066 if (port->port_usb) {
1067 /* Kickstart read queue processing. We don't do xon/xoff,
1068 * rts/cts, or other handshaking with the host, but if the
1069 * read queue backs up enough we'll be NAKing OUT packets.
1070 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001071 queue_work(gserial_wq, &port->push);
David Brownell937ef732008-07-07 12:16:08 -07001072 pr_vdebug(PREFIX "%d: unthrottle\n", port->port_num);
1073 }
David Brownellc1dca562008-06-19 17:51:44 -07001074 spin_unlock_irqrestore(&port->port_lock, flags);
David Brownellc1dca562008-06-19 17:51:44 -07001075}
1076
David Brownell1f1ba112008-08-06 18:49:57 -07001077static int gs_break_ctl(struct tty_struct *tty, int duration)
1078{
1079 struct gs_port *port = tty->driver_data;
1080 int status = 0;
1081 struct gserial *gser;
1082
1083 pr_vdebug("gs_break_ctl: ttyGS%d, send break (%d) \n",
1084 port->port_num, duration);
1085
1086 spin_lock_irq(&port->port_lock);
1087 gser = port->port_usb;
1088 if (gser && gser->send_break)
1089 status = gser->send_break(gser, duration);
1090 spin_unlock_irq(&port->port_lock);
1091
1092 return status;
1093}
1094
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001095static int gs_tiocmget(struct tty_struct *tty)
1096{
1097 struct gs_port *port = tty->driver_data;
1098 struct gserial *gser;
1099 unsigned int result = 0;
1100
1101 spin_lock_irq(&port->port_lock);
1102 gser = port->port_usb;
1103 if (!gser) {
1104 result = -ENODEV;
1105 goto fail;
1106 }
1107
1108 if (gser->get_dtr)
1109 result |= (gser->get_dtr(gser) ? TIOCM_DTR : 0);
1110
1111 if (gser->get_rts)
1112 result |= (gser->get_rts(gser) ? TIOCM_RTS : 0);
1113
1114 if (gser->serial_state & TIOCM_CD)
1115 result |= TIOCM_CD;
1116
1117 if (gser->serial_state & TIOCM_RI)
1118 result |= TIOCM_RI;
1119fail:
1120 spin_unlock_irq(&port->port_lock);
1121 return result;
1122}
1123
1124static int gs_tiocmset(struct tty_struct *tty,
1125 unsigned int set, unsigned int clear)
1126{
1127 struct gs_port *port = tty->driver_data;
1128 struct gserial *gser;
1129 int status = 0;
1130
1131 spin_lock_irq(&port->port_lock);
1132 gser = port->port_usb;
1133 if (!gser) {
1134 status = -ENODEV;
1135 goto fail;
1136 }
1137
1138 if (set & TIOCM_RI) {
1139 if (gser->send_ring_indicator) {
1140 gser->serial_state |= TIOCM_RI;
1141 status = gser->send_ring_indicator(gser, 1);
1142 }
1143 }
1144 if (clear & TIOCM_RI) {
1145 if (gser->send_ring_indicator) {
1146 gser->serial_state &= ~TIOCM_RI;
1147 status = gser->send_ring_indicator(gser, 0);
1148 }
1149 }
1150 if (set & TIOCM_CD) {
1151 if (gser->send_carrier_detect) {
1152 gser->serial_state |= TIOCM_CD;
1153 status = gser->send_carrier_detect(gser, 1);
1154 }
1155 }
1156 if (clear & TIOCM_CD) {
1157 if (gser->send_carrier_detect) {
1158 gser->serial_state &= ~TIOCM_CD;
1159 status = gser->send_carrier_detect(gser, 0);
1160 }
1161 }
1162fail:
1163 spin_unlock_irq(&port->port_lock);
1164 return status;
1165}
David Brownellc1dca562008-06-19 17:51:44 -07001166static const struct tty_operations gs_tty_ops = {
1167 .open = gs_open,
1168 .close = gs_close,
1169 .write = gs_write,
1170 .put_char = gs_put_char,
1171 .flush_chars = gs_flush_chars,
1172 .write_room = gs_write_room,
1173 .chars_in_buffer = gs_chars_in_buffer,
1174 .unthrottle = gs_unthrottle,
David Brownell1f1ba112008-08-06 18:49:57 -07001175 .break_ctl = gs_break_ctl,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001176 .tiocmget = gs_tiocmget,
1177 .tiocmset = gs_tiocmset,
David Brownellc1dca562008-06-19 17:51:44 -07001178};
1179
1180/*-------------------------------------------------------------------------*/
1181
1182static struct tty_driver *gs_tty_driver;
1183
Benoit Gobyaab96812011-04-19 20:37:33 -07001184static int
David Brownellc1dca562008-06-19 17:51:44 -07001185gs_port_alloc(unsigned port_num, struct usb_cdc_line_coding *coding)
1186{
1187 struct gs_port *port;
1188
1189 port = kzalloc(sizeof(struct gs_port), GFP_KERNEL);
1190 if (port == NULL)
1191 return -ENOMEM;
1192
1193 spin_lock_init(&port->port_lock);
1194 init_waitqueue_head(&port->close_wait);
1195 init_waitqueue_head(&port->drain_wait);
1196
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001197 INIT_WORK(&port->push, gs_rx_push);
David Brownellc1dca562008-06-19 17:51:44 -07001198
1199 INIT_LIST_HEAD(&port->read_pool);
David Brownell937ef732008-07-07 12:16:08 -07001200 INIT_LIST_HEAD(&port->read_queue);
David Brownellc1dca562008-06-19 17:51:44 -07001201 INIT_LIST_HEAD(&port->write_pool);
1202
1203 port->port_num = port_num;
1204 port->port_line_coding = *coding;
1205
1206 ports[port_num].port = port;
1207
1208 return 0;
1209}
1210
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001211
1212#if defined(CONFIG_DEBUG_FS)
1213
1214#define BUF_SIZE 512
1215
1216static ssize_t debug_read_status(struct file *file, char __user *ubuf,
1217 size_t count, loff_t *ppos)
1218{
1219 struct gs_port *ui_dev = file->private_data;
1220 struct tty_struct *tty;
1221 struct gserial *gser;
1222 char *buf;
1223 unsigned long flags;
1224 int i = 0;
1225 int ret;
1226 int result = 0;
1227
1228 tty = ui_dev->port_tty;
1229 gser = ui_dev->port_usb;
1230
1231 buf = kzalloc(sizeof(char) * BUF_SIZE, GFP_KERNEL);
1232 if (!buf)
1233 return -ENOMEM;
1234
1235 spin_lock_irqsave(&ui_dev->port_lock, flags);
1236
1237 i += scnprintf(buf + i, BUF_SIZE - i,
1238 "nbytes_from_host: %lu\n", ui_dev->nbytes_from_host);
1239
1240 i += scnprintf(buf + i, BUF_SIZE - i,
1241 "nbytes_to_tty: %lu\n", ui_dev->nbytes_to_tty);
1242
1243 i += scnprintf(buf + i, BUF_SIZE - i, "nbytes_with_usb_OUT_txr: %lu\n",
1244 (ui_dev->nbytes_from_host - ui_dev->nbytes_to_tty));
1245
1246 i += scnprintf(buf + i, BUF_SIZE - i,
1247 "nbytes_from_tty: %lu\n", ui_dev->nbytes_from_tty);
1248
1249 i += scnprintf(buf + i, BUF_SIZE - i,
1250 "nbytes_to_host: %lu\n", ui_dev->nbytes_to_host);
1251
1252 i += scnprintf(buf + i, BUF_SIZE - i, "nbytes_with_usb_IN_txr: %lu\n",
1253 (ui_dev->nbytes_from_tty - ui_dev->nbytes_to_host));
1254
1255 if (tty)
1256 i += scnprintf(buf + i, BUF_SIZE - i,
1257 "tty_flags: %lu\n", tty->flags);
1258
1259 if (gser->get_dtr) {
1260 result |= (gser->get_dtr(gser) ? TIOCM_DTR : 0);
1261 i += scnprintf(buf + i, BUF_SIZE - i,
1262 "DTR_status: %d\n", result);
1263 }
1264
1265 spin_unlock_irqrestore(&ui_dev->port_lock, flags);
1266
1267 ret = simple_read_from_buffer(ubuf, count, ppos, buf, i);
1268
1269 kfree(buf);
1270
1271 return ret;
1272}
1273
1274static ssize_t debug_write_reset(struct file *file, const char __user *buf,
1275 size_t count, loff_t *ppos)
1276{
1277 struct gs_port *ui_dev = file->private_data;
1278 unsigned long flags;
1279
1280 spin_lock_irqsave(&ui_dev->port_lock, flags);
1281 ui_dev->nbytes_from_host = ui_dev->nbytes_to_tty =
1282 ui_dev->nbytes_from_tty = ui_dev->nbytes_to_host = 0;
1283 spin_unlock_irqrestore(&ui_dev->port_lock, flags);
1284
1285 return count;
1286}
1287
1288static int serial_debug_open(struct inode *inode, struct file *file)
1289{
1290 file->private_data = inode->i_private;
1291 return 0;
1292}
1293
1294const struct file_operations debug_rst_ops = {
1295 .open = serial_debug_open,
1296 .write = debug_write_reset,
1297};
1298
1299const struct file_operations debug_adb_ops = {
1300 .open = serial_debug_open,
1301 .read = debug_read_status,
1302};
1303
1304static void usb_debugfs_init(struct gs_port *ui_dev, int port_num)
1305{
1306 struct dentry *dent;
1307 char buf[48];
1308
1309 snprintf(buf, 48, "usb_serial%d", port_num);
1310 dent = debugfs_create_dir(buf, 0);
1311 if (IS_ERR(dent))
1312 return;
1313
1314 debugfs_create_file("readstatus", 0444, dent, ui_dev, &debug_adb_ops);
1315 debugfs_create_file("reset", 0222, dent, ui_dev, &debug_rst_ops);
1316}
1317#else
1318static void usb_debugfs_init(struct gs_port *ui_dev) {}
1319#endif
1320
David Brownellc1dca562008-06-19 17:51:44 -07001321/**
1322 * gserial_setup - initialize TTY driver for one or more ports
1323 * @g: gadget to associate with these ports
1324 * @count: how many ports to support
1325 * Context: may sleep
1326 *
1327 * The TTY stack needs to know in advance how many devices it should
1328 * plan to manage. Use this call to set up the ports you will be
1329 * exporting through USB. Later, connect them to functions based
1330 * on what configuration is activated by the USB host; and disconnect
1331 * them as appropriate.
1332 *
1333 * An example would be a two-configuration device in which both
1334 * configurations expose port 0, but through different functions.
1335 * One configuration could even expose port 1 while the other
1336 * one doesn't.
1337 *
1338 * Returns negative errno or zero.
1339 */
Benoit Gobyaab96812011-04-19 20:37:33 -07001340int gserial_setup(struct usb_gadget *g, unsigned count)
David Brownellc1dca562008-06-19 17:51:44 -07001341{
1342 unsigned i;
1343 struct usb_cdc_line_coding coding;
1344 int status;
1345
1346 if (count == 0 || count > N_PORTS)
1347 return -EINVAL;
1348
1349 gs_tty_driver = alloc_tty_driver(count);
1350 if (!gs_tty_driver)
1351 return -ENOMEM;
1352
1353 gs_tty_driver->owner = THIS_MODULE;
1354 gs_tty_driver->driver_name = "g_serial";
David Brownell937ef732008-07-07 12:16:08 -07001355 gs_tty_driver->name = PREFIX;
David Brownellc1dca562008-06-19 17:51:44 -07001356 /* uses dynamically assigned dev_t values */
1357
1358 gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
1359 gs_tty_driver->subtype = SERIAL_TYPE_NORMAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001360 gs_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV
1361 | TTY_DRIVER_RESET_TERMIOS;
David Brownellc1dca562008-06-19 17:51:44 -07001362 gs_tty_driver->init_termios = tty_std_termios;
1363
1364 /* 9600-8-N-1 ... matches defaults expected by "usbser.sys" on
1365 * MS-Windows. Otherwise, most of these flags shouldn't affect
1366 * anything unless we were to actually hook up to a serial line.
1367 */
1368 gs_tty_driver->init_termios.c_cflag =
1369 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
1370 gs_tty_driver->init_termios.c_ispeed = 9600;
1371 gs_tty_driver->init_termios.c_ospeed = 9600;
1372
Harvey Harrison551509d2009-02-11 14:11:36 -08001373 coding.dwDTERate = cpu_to_le32(9600);
David Brownellc1dca562008-06-19 17:51:44 -07001374 coding.bCharFormat = 8;
1375 coding.bParityType = USB_CDC_NO_PARITY;
1376 coding.bDataBits = USB_CDC_1_STOP_BITS;
1377
1378 tty_set_operations(gs_tty_driver, &gs_tty_ops);
1379
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001380 gserial_wq = create_singlethread_workqueue("k_gserial");
1381 if (!gserial_wq) {
1382 status = -ENOMEM;
1383 goto fail;
1384 }
1385
David Brownellc1dca562008-06-19 17:51:44 -07001386 /* make devices be openable */
1387 for (i = 0; i < count; i++) {
1388 mutex_init(&ports[i].lock);
1389 status = gs_port_alloc(i, &coding);
1390 if (status) {
1391 count = i;
1392 goto fail;
1393 }
1394 }
1395 n_ports = count;
1396
1397 /* export the driver ... */
1398 status = tty_register_driver(gs_tty_driver);
1399 if (status) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001400 put_tty_driver(gs_tty_driver);
David Brownellc1dca562008-06-19 17:51:44 -07001401 pr_err("%s: cannot register, err %d\n",
1402 __func__, status);
1403 goto fail;
1404 }
1405
1406 /* ... and sysfs class devices, so mdev/udev make /dev/ttyGS* */
1407 for (i = 0; i < count; i++) {
1408 struct device *tty_dev;
1409
1410 tty_dev = tty_register_device(gs_tty_driver, i, &g->dev);
1411 if (IS_ERR(tty_dev))
1412 pr_warning("%s: no classdev for port %d, err %ld\n",
1413 __func__, i, PTR_ERR(tty_dev));
1414 }
1415
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001416 for (i = 0; i < count; i++)
1417 usb_debugfs_init(ports[i].port, i);
1418
David Brownellc1dca562008-06-19 17:51:44 -07001419 pr_debug("%s: registered %d ttyGS* device%s\n", __func__,
1420 count, (count == 1) ? "" : "s");
1421
1422 return status;
1423fail:
1424 while (count--)
1425 kfree(ports[count].port);
Pavankumar Kondetif0f95d82011-09-23 11:38:57 +05301426 if (gserial_wq)
1427 destroy_workqueue(gserial_wq);
David Brownellc1dca562008-06-19 17:51:44 -07001428 put_tty_driver(gs_tty_driver);
1429 gs_tty_driver = NULL;
1430 return status;
1431}
1432
1433static int gs_closed(struct gs_port *port)
1434{
1435 int cond;
1436
1437 spin_lock_irq(&port->port_lock);
1438 cond = (port->open_count == 0) && !port->openclose;
1439 spin_unlock_irq(&port->port_lock);
1440 return cond;
1441}
1442
1443/**
1444 * gserial_cleanup - remove TTY-over-USB driver and devices
1445 * Context: may sleep
1446 *
1447 * This is called to free all resources allocated by @gserial_setup().
1448 * Accordingly, it may need to wait until some open /dev/ files have
1449 * closed.
1450 *
1451 * The caller must have issued @gserial_disconnect() for any ports
1452 * that had previously been connected, so that there is never any
1453 * I/O pending when it's called.
1454 */
1455void gserial_cleanup(void)
1456{
1457 unsigned i;
1458 struct gs_port *port;
1459
David Brownellac90e362008-07-01 13:18:20 -07001460 if (!gs_tty_driver)
1461 return;
1462
David Brownellc1dca562008-06-19 17:51:44 -07001463 /* start sysfs and /dev/ttyGS* node removal */
1464 for (i = 0; i < n_ports; i++)
1465 tty_unregister_device(gs_tty_driver, i);
1466
1467 for (i = 0; i < n_ports; i++) {
1468 /* prevent new opens */
1469 mutex_lock(&ports[i].lock);
1470 port = ports[i].port;
1471 ports[i].port = NULL;
1472 mutex_unlock(&ports[i].lock);
1473
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001474 cancel_work_sync(&port->push);
David Brownell937ef732008-07-07 12:16:08 -07001475
David Brownellc1dca562008-06-19 17:51:44 -07001476 /* wait for old opens to finish */
1477 wait_event(port->close_wait, gs_closed(port));
1478
1479 WARN_ON(port->port_usb != NULL);
1480
1481 kfree(port);
1482 }
1483 n_ports = 0;
1484
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001485 destroy_workqueue(gserial_wq);
David Brownellc1dca562008-06-19 17:51:44 -07001486 tty_unregister_driver(gs_tty_driver);
Jon Poveyb23097b2010-06-14 19:42:10 +09001487 put_tty_driver(gs_tty_driver);
David Brownellc1dca562008-06-19 17:51:44 -07001488 gs_tty_driver = NULL;
1489
1490 pr_debug("%s: cleaned up ttyGS* support\n", __func__);
1491}
1492
1493/**
1494 * gserial_connect - notify TTY I/O glue that USB link is active
1495 * @gser: the function, set up with endpoints and descriptors
1496 * @port_num: which port is active
1497 * Context: any (usually from irq)
1498 *
1499 * This is called activate endpoints and let the TTY layer know that
1500 * the connection is active ... not unlike "carrier detect". It won't
1501 * necessarily start I/O queues; unless the TTY is held open by any
1502 * task, there would be no point. However, the endpoints will be
1503 * activated so the USB host can perform I/O, subject to basic USB
1504 * hardware flow control.
1505 *
1506 * Caller needs to have set up the endpoints and USB function in @dev
1507 * before calling this, as well as the appropriate (speed-specific)
1508 * endpoint descriptors, and also have set up the TTY driver by calling
1509 * @gserial_setup().
1510 *
1511 * Returns negative errno or zero.
1512 * On success, ep->driver_data will be overwritten.
1513 */
1514int gserial_connect(struct gserial *gser, u8 port_num)
1515{
1516 struct gs_port *port;
1517 unsigned long flags;
1518 int status;
1519
1520 if (!gs_tty_driver || port_num >= n_ports)
1521 return -ENXIO;
1522
1523 /* we "know" gserial_cleanup() hasn't been called */
1524 port = ports[port_num].port;
1525
1526 /* activate the endpoints */
David Brownac5d1542012-02-06 10:37:22 -08001527 status = usb_ep_enable(gser->in, gser->in_desc);
David Brownellc1dca562008-06-19 17:51:44 -07001528 if (status < 0)
1529 return status;
1530 gser->in->driver_data = port;
1531
David Brownac5d1542012-02-06 10:37:22 -08001532 status = usb_ep_enable(gser->out, gser->out_desc);
David Brownellc1dca562008-06-19 17:51:44 -07001533 if (status < 0)
1534 goto fail_out;
1535 gser->out->driver_data = port;
1536
1537 /* then tell the tty glue that I/O can work */
1538 spin_lock_irqsave(&port->port_lock, flags);
1539 gser->ioport = port;
1540 port->port_usb = gser;
1541
1542 /* REVISIT unclear how best to handle this state...
1543 * we don't really couple it with the Linux TTY.
1544 */
1545 gser->port_line_coding = port->port_line_coding;
1546
1547 /* REVISIT if waiting on "carrier detect", signal. */
1548
David Brownell1f1ba112008-08-06 18:49:57 -07001549 /* if it's already open, start I/O ... and notify the serial
1550 * protocol about open/close status (connect/disconnect).
David Brownellc1dca562008-06-19 17:51:44 -07001551 */
David Brownellc1dca562008-06-19 17:51:44 -07001552 if (port->open_count) {
1553 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
1554 gs_start_io(port);
David Brownell1f1ba112008-08-06 18:49:57 -07001555 if (gser->connect)
1556 gser->connect(gser);
1557 } else {
1558 if (gser->disconnect)
1559 gser->disconnect(gser);
David Brownellc1dca562008-06-19 17:51:44 -07001560 }
1561
1562 spin_unlock_irqrestore(&port->port_lock, flags);
1563
1564 return status;
1565
1566fail_out:
1567 usb_ep_disable(gser->in);
1568 gser->in->driver_data = NULL;
1569 return status;
1570}
1571
1572/**
1573 * gserial_disconnect - notify TTY I/O glue that USB link is inactive
1574 * @gser: the function, on which gserial_connect() was called
1575 * Context: any (usually from irq)
1576 *
1577 * This is called to deactivate endpoints and let the TTY layer know
1578 * that the connection went inactive ... not unlike "hangup".
1579 *
1580 * On return, the state is as if gserial_connect() had never been called;
1581 * there is no active USB I/O on these endpoints.
1582 */
1583void gserial_disconnect(struct gserial *gser)
1584{
1585 struct gs_port *port = gser->ioport;
1586 unsigned long flags;
1587
1588 if (!port)
1589 return;
1590
1591 /* tell the TTY glue not to do I/O here any more */
1592 spin_lock_irqsave(&port->port_lock, flags);
1593
1594 /* REVISIT as above: how best to track this? */
1595 port->port_line_coding = gser->port_line_coding;
1596
1597 port->port_usb = NULL;
1598 gser->ioport = NULL;
1599 if (port->open_count > 0 || port->openclose) {
1600 wake_up_interruptible(&port->drain_wait);
1601 if (port->port_tty)
1602 tty_hangup(port->port_tty);
1603 }
1604 spin_unlock_irqrestore(&port->port_lock, flags);
1605
1606 /* disable endpoints, aborting down any active I/O */
1607 usb_ep_disable(gser->out);
1608 gser->out->driver_data = NULL;
1609
1610 usb_ep_disable(gser->in);
1611 gser->in->driver_data = NULL;
1612
1613 /* finally, free any unused/unusable I/O buffers */
1614 spin_lock_irqsave(&port->port_lock, flags);
1615 if (port->open_count == 0 && !port->openclose)
1616 gs_buf_free(&port->port_write_buf);
Jim Sung28609d42010-11-04 18:47:51 -07001617 gs_free_requests(gser->out, &port->read_pool, NULL);
1618 gs_free_requests(gser->out, &port->read_queue, NULL);
1619 gs_free_requests(gser->in, &port->write_pool, NULL);
1620
1621 port->read_allocated = port->read_started =
1622 port->write_allocated = port->write_started = 0;
1623
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001624 port->nbytes_from_host = port->nbytes_to_tty =
1625 port->nbytes_from_tty = port->nbytes_to_host = 0;
1626
David Brownellc1dca562008-06-19 17:51:44 -07001627 spin_unlock_irqrestore(&port->port_lock, flags);
1628}