blob: 53d59287f2bc8fcd0599e3bd597d2bcd26280ece [file] [log] [blame]
David Brownellc1dca562008-06-19 17:51:44 -07001/*
2 * u_serial.c - utilities for USB gadget "serial port"/TTY support
3 *
4 * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
5 * Copyright (C) 2008 David Brownell
6 * Copyright (C) 2008 by Nokia Corporation
7 *
8 * This code also borrows from usbserial.c, which is
9 * Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com)
10 * Copyright (C) 2000 Peter Berger (pberger@brimson.com)
11 * Copyright (C) 2000 Al Borchers (alborchers@steinerpoint.com)
12 *
13 * This software is distributed under the terms of the GNU General
14 * Public License ("GPL") as published by the Free Software Foundation,
15 * either version 2 of that License or (at your option) any later version.
16 */
17
18/* #define VERBOSE_DEBUG */
19
20#include <linux/kernel.h>
21#include <linux/interrupt.h>
22#include <linux/device.h>
23#include <linux/delay.h>
24#include <linux/tty.h>
25#include <linux/tty_flip.h>
26
27#include "u_serial.h"
28
29
30/*
31 * This component encapsulates the TTY layer glue needed to provide basic
32 * "serial port" functionality through the USB gadget stack. Each such
33 * port is exposed through a /dev/ttyGS* node.
34 *
35 * After initialization (gserial_setup), these TTY port devices stay
36 * available until they are removed (gserial_cleanup). Each one may be
37 * connected to a USB function (gserial_connect), or disconnected (with
38 * gserial_disconnect) when the USB host issues a config change event.
39 * Data can only flow when the port is connected to the host.
40 *
41 * A given TTY port can be made available in multiple configurations.
42 * For example, each one might expose a ttyGS0 node which provides a
43 * login application. In one case that might use CDC ACM interface 0,
44 * while another configuration might use interface 3 for that. The
45 * work to handle that (including descriptor management) is not part
46 * of this component.
47 *
48 * Configurations may expose more than one TTY port. For example, if
49 * ttyGS0 provides login service, then ttyGS1 might provide dialer access
50 * for a telephone or fax link. And ttyGS2 might be something that just
51 * needs a simple byte stream interface for some messaging protocol that
52 * is managed in userspace ... OBEX, PTP, and MTP have been mentioned.
53 */
54
David Brownell937ef732008-07-07 12:16:08 -070055#define PREFIX "ttyGS"
56
David Brownellc1dca562008-06-19 17:51:44 -070057/*
58 * gserial is the lifecycle interface, used by USB functions
59 * gs_port is the I/O nexus, used by the tty driver
60 * tty_struct links to the tty/filesystem framework
61 *
62 * gserial <---> gs_port ... links will be null when the USB link is
David Brownell1f1ba112008-08-06 18:49:57 -070063 * inactive; managed by gserial_{connect,disconnect}(). each gserial
64 * instance can wrap its own USB control protocol.
David Brownellc1dca562008-06-19 17:51:44 -070065 * gserial->ioport == usb_ep->driver_data ... gs_port
66 * gs_port->port_usb ... gserial
67 *
68 * gs_port <---> tty_struct ... links will be null when the TTY file
69 * isn't opened; managed by gs_open()/gs_close()
70 * gserial->port_tty ... tty_struct
71 * tty_struct->driver_data ... gserial
72 */
73
74/* RX and TX queues can buffer QUEUE_SIZE packets before they hit the
75 * next layer of buffering. For TX that's a circular buffer; for RX
76 * consider it a NOP. A third layer is provided by the TTY code.
77 */
78#define QUEUE_SIZE 16
79#define WRITE_BUF_SIZE 8192 /* TX only */
80
81/* circular buffer */
82struct gs_buf {
83 unsigned buf_size;
84 char *buf_buf;
85 char *buf_get;
86 char *buf_put;
87};
88
89/*
90 * The port structure holds info for each port, one for each minor number
91 * (and thus for each /dev/ node).
92 */
93struct gs_port {
94 spinlock_t port_lock; /* guard port_* access */
95
96 struct gserial *port_usb;
97 struct tty_struct *port_tty;
98
99 unsigned open_count;
100 bool openclose; /* open/close in progress */
101 u8 port_num;
102
103 wait_queue_head_t close_wait; /* wait for last close */
104
105 struct list_head read_pool;
David Brownell937ef732008-07-07 12:16:08 -0700106 struct list_head read_queue;
107 unsigned n_read;
David Brownellc1dca562008-06-19 17:51:44 -0700108 struct tasklet_struct push;
109
110 struct list_head write_pool;
111 struct gs_buf port_write_buf;
112 wait_queue_head_t drain_wait; /* wait while writes drain */
113
114 /* REVISIT this state ... */
115 struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */
116};
117
118/* increase N_PORTS if you need more */
119#define N_PORTS 4
120static struct portmaster {
121 struct mutex lock; /* protect open/close */
122 struct gs_port *port;
123} ports[N_PORTS];
124static unsigned n_ports;
125
126#define GS_CLOSE_TIMEOUT 15 /* seconds */
127
128
129
130#ifdef VERBOSE_DEBUG
131#define pr_vdebug(fmt, arg...) \
132 pr_debug(fmt, ##arg)
133#else
134#define pr_vdebug(fmt, arg...) \
135 ({ if (0) pr_debug(fmt, ##arg); })
136#endif
137
138/*-------------------------------------------------------------------------*/
139
140/* Circular Buffer */
141
142/*
143 * gs_buf_alloc
144 *
145 * Allocate a circular buffer and all associated memory.
146 */
147static int gs_buf_alloc(struct gs_buf *gb, unsigned size)
148{
149 gb->buf_buf = kmalloc(size, GFP_KERNEL);
150 if (gb->buf_buf == NULL)
151 return -ENOMEM;
152
153 gb->buf_size = size;
154 gb->buf_put = gb->buf_buf;
155 gb->buf_get = gb->buf_buf;
156
157 return 0;
158}
159
160/*
161 * gs_buf_free
162 *
163 * Free the buffer and all associated memory.
164 */
165static void gs_buf_free(struct gs_buf *gb)
166{
167 kfree(gb->buf_buf);
168 gb->buf_buf = NULL;
169}
170
171/*
172 * gs_buf_clear
173 *
174 * Clear out all data in the circular buffer.
175 */
176static void gs_buf_clear(struct gs_buf *gb)
177{
178 gb->buf_get = gb->buf_put;
179 /* equivalent to a get of all data available */
180}
181
182/*
183 * gs_buf_data_avail
184 *
David Brownell1f1ba112008-08-06 18:49:57 -0700185 * Return the number of bytes of data written into the circular
David Brownellc1dca562008-06-19 17:51:44 -0700186 * buffer.
187 */
188static unsigned gs_buf_data_avail(struct gs_buf *gb)
189{
190 return (gb->buf_size + gb->buf_put - gb->buf_get) % gb->buf_size;
191}
192
193/*
194 * gs_buf_space_avail
195 *
196 * Return the number of bytes of space available in the circular
197 * buffer.
198 */
199static unsigned gs_buf_space_avail(struct gs_buf *gb)
200{
201 return (gb->buf_size + gb->buf_get - gb->buf_put - 1) % gb->buf_size;
202}
203
204/*
205 * gs_buf_put
206 *
207 * Copy data data from a user buffer and put it into the circular buffer.
208 * Restrict to the amount of space available.
209 *
210 * Return the number of bytes copied.
211 */
212static unsigned
213gs_buf_put(struct gs_buf *gb, const char *buf, unsigned count)
214{
215 unsigned len;
216
217 len = gs_buf_space_avail(gb);
218 if (count > len)
219 count = len;
220
221 if (count == 0)
222 return 0;
223
224 len = gb->buf_buf + gb->buf_size - gb->buf_put;
225 if (count > len) {
226 memcpy(gb->buf_put, buf, len);
227 memcpy(gb->buf_buf, buf+len, count - len);
228 gb->buf_put = gb->buf_buf + count - len;
229 } else {
230 memcpy(gb->buf_put, buf, count);
231 if (count < len)
232 gb->buf_put += count;
233 else /* count == len */
234 gb->buf_put = gb->buf_buf;
235 }
236
237 return count;
238}
239
240/*
241 * gs_buf_get
242 *
243 * Get data from the circular buffer and copy to the given buffer.
244 * Restrict to the amount of data available.
245 *
246 * Return the number of bytes copied.
247 */
248static unsigned
249gs_buf_get(struct gs_buf *gb, char *buf, unsigned count)
250{
251 unsigned len;
252
253 len = gs_buf_data_avail(gb);
254 if (count > len)
255 count = len;
256
257 if (count == 0)
258 return 0;
259
260 len = gb->buf_buf + gb->buf_size - gb->buf_get;
261 if (count > len) {
262 memcpy(buf, gb->buf_get, len);
263 memcpy(buf+len, gb->buf_buf, count - len);
264 gb->buf_get = gb->buf_buf + count - len;
265 } else {
266 memcpy(buf, gb->buf_get, count);
267 if (count < len)
268 gb->buf_get += count;
269 else /* count == len */
270 gb->buf_get = gb->buf_buf;
271 }
272
273 return count;
274}
275
276/*-------------------------------------------------------------------------*/
277
278/* I/O glue between TTY (upper) and USB function (lower) driver layers */
279
280/*
281 * gs_alloc_req
282 *
283 * Allocate a usb_request and its buffer. Returns a pointer to the
284 * usb_request or NULL if there is an error.
285 */
David Brownell1f1ba112008-08-06 18:49:57 -0700286struct usb_request *
David Brownellc1dca562008-06-19 17:51:44 -0700287gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
288{
289 struct usb_request *req;
290
291 req = usb_ep_alloc_request(ep, kmalloc_flags);
292
293 if (req != NULL) {
294 req->length = len;
295 req->buf = kmalloc(len, kmalloc_flags);
296 if (req->buf == NULL) {
297 usb_ep_free_request(ep, req);
298 return NULL;
299 }
300 }
301
302 return req;
303}
304
305/*
306 * gs_free_req
307 *
308 * Free a usb_request and its buffer.
309 */
David Brownell1f1ba112008-08-06 18:49:57 -0700310void gs_free_req(struct usb_ep *ep, struct usb_request *req)
David Brownellc1dca562008-06-19 17:51:44 -0700311{
312 kfree(req->buf);
313 usb_ep_free_request(ep, req);
314}
315
316/*
317 * gs_send_packet
318 *
319 * If there is data to send, a packet is built in the given
320 * buffer and the size is returned. If there is no data to
321 * send, 0 is returned.
322 *
323 * Called with port_lock held.
324 */
325static unsigned
326gs_send_packet(struct gs_port *port, char *packet, unsigned size)
327{
328 unsigned len;
329
330 len = gs_buf_data_avail(&port->port_write_buf);
331 if (len < size)
332 size = len;
333 if (size != 0)
334 size = gs_buf_get(&port->port_write_buf, packet, size);
335 return size;
336}
337
338/*
339 * gs_start_tx
340 *
341 * This function finds available write requests, calls
342 * gs_send_packet to fill these packets with data, and
343 * continues until either there are no more write requests
344 * available or no more data to send. This function is
345 * run whenever data arrives or write requests are available.
346 *
347 * Context: caller owns port_lock; port_usb is non-null.
348 */
349static int gs_start_tx(struct gs_port *port)
350/*
351__releases(&port->port_lock)
352__acquires(&port->port_lock)
353*/
354{
355 struct list_head *pool = &port->write_pool;
356 struct usb_ep *in = port->port_usb->in;
357 int status = 0;
358 bool do_tty_wake = false;
359
360 while (!list_empty(pool)) {
361 struct usb_request *req;
362 int len;
363
364 req = list_entry(pool->next, struct usb_request, list);
365 len = gs_send_packet(port, req->buf, in->maxpacket);
366 if (len == 0) {
367 wake_up_interruptible(&port->drain_wait);
368 break;
369 }
370 do_tty_wake = true;
371
372 req->length = len;
373 list_del(&req->list);
374
David Brownell937ef732008-07-07 12:16:08 -0700375 pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
376 port->port_num, len, *((u8 *)req->buf),
David Brownellc1dca562008-06-19 17:51:44 -0700377 *((u8 *)req->buf+1), *((u8 *)req->buf+2));
David Brownellc1dca562008-06-19 17:51:44 -0700378
379 /* Drop lock while we call out of driver; completions
380 * could be issued while we do so. Disconnection may
381 * happen too; maybe immediately before we queue this!
382 *
383 * NOTE that we may keep sending data for a while after
384 * the TTY closed (dev->ioport->port_tty is NULL).
385 */
386 spin_unlock(&port->port_lock);
387 status = usb_ep_queue(in, req, GFP_ATOMIC);
388 spin_lock(&port->port_lock);
389
390 if (status) {
391 pr_debug("%s: %s %s err %d\n",
392 __func__, "queue", in->name, status);
393 list_add(&req->list, pool);
394 break;
395 }
396
397 /* abort immediately after disconnect */
398 if (!port->port_usb)
399 break;
400 }
401
402 if (do_tty_wake && port->port_tty)
403 tty_wakeup(port->port_tty);
404 return status;
405}
406
David Brownellc1dca562008-06-19 17:51:44 -0700407/*
408 * Context: caller owns port_lock, and port_usb is set
409 */
410static unsigned gs_start_rx(struct gs_port *port)
411/*
412__releases(&port->port_lock)
413__acquires(&port->port_lock)
414*/
415{
416 struct list_head *pool = &port->read_pool;
417 struct usb_ep *out = port->port_usb->out;
418 unsigned started = 0;
419
420 while (!list_empty(pool)) {
421 struct usb_request *req;
422 int status;
423 struct tty_struct *tty;
424
David Brownell937ef732008-07-07 12:16:08 -0700425 /* no more rx if closed */
David Brownellc1dca562008-06-19 17:51:44 -0700426 tty = port->port_tty;
David Brownell937ef732008-07-07 12:16:08 -0700427 if (!tty)
David Brownellc1dca562008-06-19 17:51:44 -0700428 break;
429
430 req = list_entry(pool->next, struct usb_request, list);
431 list_del(&req->list);
432 req->length = out->maxpacket;
433
434 /* drop lock while we call out; the controller driver
435 * may need to call us back (e.g. for disconnect)
436 */
437 spin_unlock(&port->port_lock);
438 status = usb_ep_queue(out, req, GFP_ATOMIC);
439 spin_lock(&port->port_lock);
440
441 if (status) {
442 pr_debug("%s: %s %s err %d\n",
443 __func__, "queue", out->name, status);
444 list_add(&req->list, pool);
445 break;
446 }
447 started++;
448
449 /* abort immediately after disconnect */
450 if (!port->port_usb)
451 break;
452 }
453 return started;
454}
455
David Brownell937ef732008-07-07 12:16:08 -0700456/*
457 * RX tasklet takes data out of the RX queue and hands it up to the TTY
458 * layer until it refuses to take any more data (or is throttled back).
459 * Then it issues reads for any further data.
460 *
461 * If the RX queue becomes full enough that no usb_request is queued,
462 * the OUT endpoint may begin NAKing as soon as its FIFO fills up.
463 * So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
464 * can be buffered before the TTY layer's buffers (currently 64 KB).
465 */
466static void gs_rx_push(unsigned long _port)
467{
468 struct gs_port *port = (void *)_port;
469 struct tty_struct *tty;
470 struct list_head *queue = &port->read_queue;
471 bool disconnect = false;
472 bool do_push = false;
473
474 /* hand any queued data to the tty */
475 spin_lock_irq(&port->port_lock);
476 tty = port->port_tty;
477 while (!list_empty(queue)) {
478 struct usb_request *req;
479
480 req = list_first_entry(queue, struct usb_request, list);
481
482 /* discard data if tty was closed */
483 if (!tty)
484 goto recycle;
485
486 /* leave data queued if tty was rx throttled */
487 if (test_bit(TTY_THROTTLED, &tty->flags))
488 break;
489
490 switch (req->status) {
491 case -ESHUTDOWN:
492 disconnect = true;
493 pr_vdebug(PREFIX "%d: shutdown\n", port->port_num);
494 break;
495
496 default:
497 /* presumably a transient fault */
498 pr_warning(PREFIX "%d: unexpected RX status %d\n",
499 port->port_num, req->status);
500 /* FALLTHROUGH */
501 case 0:
502 /* normal completion */
503 break;
504 }
505
506 /* push data to (open) tty */
507 if (req->actual) {
508 char *packet = req->buf;
509 unsigned size = req->actual;
510 unsigned n;
511 int count;
512
513 /* we may have pushed part of this packet already... */
514 n = port->n_read;
515 if (n) {
516 packet += n;
517 size -= n;
518 }
519
520 count = tty_insert_flip_string(tty, packet, size);
521 if (count)
522 do_push = true;
523 if (count != size) {
524 /* stop pushing; TTY layer can't handle more */
525 port->n_read += count;
526 pr_vdebug(PREFIX "%d: rx block %d/%d\n",
527 port->port_num,
528 count, req->actual);
529 break;
530 }
531 port->n_read = 0;
532 }
533recycle:
534 list_move(&req->list, &port->read_pool);
535 }
536
537 /* Push from tty to ldisc; this is immediate with low_latency, and
538 * may trigger callbacks to this driver ... so drop the spinlock.
539 */
540 if (tty && do_push) {
541 spin_unlock_irq(&port->port_lock);
542 tty_flip_buffer_push(tty);
543 wake_up_interruptible(&tty->read_wait);
544 spin_lock_irq(&port->port_lock);
545
546 /* tty may have been closed */
547 tty = port->port_tty;
548 }
549
550
551 /* We want our data queue to become empty ASAP, keeping data
552 * in the tty and ldisc (not here). If we couldn't push any
553 * this time around, there may be trouble unless there's an
554 * implicit tty_unthrottle() call on its way...
555 *
556 * REVISIT we should probably add a timer to keep the tasklet
557 * from starving ... but it's not clear that case ever happens.
558 */
559 if (!list_empty(queue) && tty) {
560 if (!test_bit(TTY_THROTTLED, &tty->flags)) {
561 if (do_push)
562 tasklet_schedule(&port->push);
563 else
564 pr_warning(PREFIX "%d: RX not scheduled?\n",
565 port->port_num);
566 }
567 }
568
569 /* If we're still connected, refill the USB RX queue. */
570 if (!disconnect && port->port_usb)
571 gs_start_rx(port);
572
573 spin_unlock_irq(&port->port_lock);
574}
575
David Brownellc1dca562008-06-19 17:51:44 -0700576static void gs_read_complete(struct usb_ep *ep, struct usb_request *req)
577{
David Brownellc1dca562008-06-19 17:51:44 -0700578 struct gs_port *port = ep->driver_data;
579
David Brownell937ef732008-07-07 12:16:08 -0700580 /* Queue all received data until the tty layer is ready for it. */
David Brownellc1dca562008-06-19 17:51:44 -0700581 spin_lock(&port->port_lock);
David Brownell937ef732008-07-07 12:16:08 -0700582 list_add_tail(&req->list, &port->read_queue);
583 tasklet_schedule(&port->push);
David Brownellc1dca562008-06-19 17:51:44 -0700584 spin_unlock(&port->port_lock);
585}
586
587static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
588{
589 struct gs_port *port = ep->driver_data;
590
591 spin_lock(&port->port_lock);
592 list_add(&req->list, &port->write_pool);
593
594 switch (req->status) {
595 default:
596 /* presumably a transient fault */
597 pr_warning("%s: unexpected %s status %d\n",
598 __func__, ep->name, req->status);
599 /* FALL THROUGH */
600 case 0:
601 /* normal completion */
602 gs_start_tx(port);
603 break;
604
605 case -ESHUTDOWN:
606 /* disconnect */
607 pr_vdebug("%s: %s shutdown\n", __func__, ep->name);
608 break;
609 }
610
611 spin_unlock(&port->port_lock);
612}
613
614static void gs_free_requests(struct usb_ep *ep, struct list_head *head)
615{
616 struct usb_request *req;
617
618 while (!list_empty(head)) {
619 req = list_entry(head->next, struct usb_request, list);
620 list_del(&req->list);
621 gs_free_req(ep, req);
622 }
623}
624
625static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head,
626 void (*fn)(struct usb_ep *, struct usb_request *))
627{
628 int i;
629 struct usb_request *req;
630
631 /* Pre-allocate up to QUEUE_SIZE transfers, but if we can't
632 * do quite that many this time, don't fail ... we just won't
633 * be as speedy as we might otherwise be.
634 */
635 for (i = 0; i < QUEUE_SIZE; i++) {
636 req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC);
637 if (!req)
638 return list_empty(head) ? -ENOMEM : 0;
639 req->complete = fn;
640 list_add_tail(&req->list, head);
641 }
642 return 0;
643}
644
645/**
646 * gs_start_io - start USB I/O streams
647 * @dev: encapsulates endpoints to use
648 * Context: holding port_lock; port_tty and port_usb are non-null
649 *
650 * We only start I/O when something is connected to both sides of
651 * this port. If nothing is listening on the host side, we may
652 * be pointlessly filling up our TX buffers and FIFO.
653 */
654static int gs_start_io(struct gs_port *port)
655{
656 struct list_head *head = &port->read_pool;
657 struct usb_ep *ep = port->port_usb->out;
658 int status;
659 unsigned started;
660
661 /* Allocate RX and TX I/O buffers. We can't easily do this much
662 * earlier (with GFP_KERNEL) because the requests are coupled to
663 * endpoints, as are the packet sizes we'll be using. Different
664 * configurations may use different endpoints with a given port;
665 * and high speed vs full speed changes packet sizes too.
666 */
667 status = gs_alloc_requests(ep, head, gs_read_complete);
668 if (status)
669 return status;
670
671 status = gs_alloc_requests(port->port_usb->in, &port->write_pool,
672 gs_write_complete);
673 if (status) {
674 gs_free_requests(ep, head);
675 return status;
676 }
677
678 /* queue read requests */
David Brownell937ef732008-07-07 12:16:08 -0700679 port->n_read = 0;
David Brownellc1dca562008-06-19 17:51:44 -0700680 started = gs_start_rx(port);
681
682 /* unblock any pending writes into our circular buffer */
683 if (started) {
684 tty_wakeup(port->port_tty);
685 } else {
686 gs_free_requests(ep, head);
687 gs_free_requests(port->port_usb->in, &port->write_pool);
David Brownell937ef732008-07-07 12:16:08 -0700688 status = -EIO;
David Brownellc1dca562008-06-19 17:51:44 -0700689 }
690
David Brownell937ef732008-07-07 12:16:08 -0700691 return status;
David Brownellc1dca562008-06-19 17:51:44 -0700692}
693
694/*-------------------------------------------------------------------------*/
695
696/* TTY Driver */
697
698/*
699 * gs_open sets up the link between a gs_port and its associated TTY.
700 * That link is broken *only* by TTY close(), and all driver methods
701 * know that.
702 */
703static int gs_open(struct tty_struct *tty, struct file *file)
704{
705 int port_num = tty->index;
706 struct gs_port *port;
707 int status;
708
709 if (port_num < 0 || port_num >= n_ports)
710 return -ENXIO;
711
712 do {
713 mutex_lock(&ports[port_num].lock);
714 port = ports[port_num].port;
715 if (!port)
716 status = -ENODEV;
717 else {
718 spin_lock_irq(&port->port_lock);
719
720 /* already open? Great. */
721 if (port->open_count) {
722 status = 0;
723 port->open_count++;
724
725 /* currently opening/closing? wait ... */
726 } else if (port->openclose) {
727 status = -EBUSY;
728
729 /* ... else we do the work */
730 } else {
731 status = -EAGAIN;
732 port->openclose = true;
733 }
734 spin_unlock_irq(&port->port_lock);
735 }
736 mutex_unlock(&ports[port_num].lock);
737
738 switch (status) {
739 default:
740 /* fully handled */
741 return status;
742 case -EAGAIN:
743 /* must do the work */
744 break;
745 case -EBUSY:
746 /* wait for EAGAIN task to finish */
747 msleep(1);
748 /* REVISIT could have a waitchannel here, if
749 * concurrent open performance is important
750 */
751 break;
752 }
753 } while (status != -EAGAIN);
754
755 /* Do the "real open" */
756 spin_lock_irq(&port->port_lock);
757
758 /* allocate circular buffer on first open */
759 if (port->port_write_buf.buf_buf == NULL) {
760
761 spin_unlock_irq(&port->port_lock);
762 status = gs_buf_alloc(&port->port_write_buf, WRITE_BUF_SIZE);
763 spin_lock_irq(&port->port_lock);
764
765 if (status) {
766 pr_debug("gs_open: ttyGS%d (%p,%p) no buffer\n",
767 port->port_num, tty, file);
768 port->openclose = false;
769 goto exit_unlock_port;
770 }
771 }
772
773 /* REVISIT if REMOVED (ports[].port NULL), abort the open
774 * to let rmmod work faster (but this way isn't wrong).
775 */
776
777 /* REVISIT maybe wait for "carrier detect" */
778
779 tty->driver_data = port;
780 port->port_tty = tty;
781
782 port->open_count = 1;
783 port->openclose = false;
784
785 /* low_latency means ldiscs work in tasklet context, without
786 * needing a workqueue schedule ... easier to keep up.
787 */
788 tty->low_latency = 1;
789
790 /* if connected, start the I/O stream */
791 if (port->port_usb) {
David Brownell1f1ba112008-08-06 18:49:57 -0700792 struct gserial *gser = port->port_usb;
793
David Brownellc1dca562008-06-19 17:51:44 -0700794 pr_debug("gs_open: start ttyGS%d\n", port->port_num);
795 gs_start_io(port);
796
David Brownell1f1ba112008-08-06 18:49:57 -0700797 if (gser->connect)
798 gser->connect(gser);
David Brownellc1dca562008-06-19 17:51:44 -0700799 }
800
801 pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file);
802
803 status = 0;
804
805exit_unlock_port:
806 spin_unlock_irq(&port->port_lock);
807 return status;
808}
809
810static int gs_writes_finished(struct gs_port *p)
811{
812 int cond;
813
814 /* return true on disconnect or empty buffer */
815 spin_lock_irq(&p->port_lock);
816 cond = (p->port_usb == NULL) || !gs_buf_data_avail(&p->port_write_buf);
817 spin_unlock_irq(&p->port_lock);
818
819 return cond;
820}
821
822static void gs_close(struct tty_struct *tty, struct file *file)
823{
824 struct gs_port *port = tty->driver_data;
David Brownell1f1ba112008-08-06 18:49:57 -0700825 struct gserial *gser;
David Brownellc1dca562008-06-19 17:51:44 -0700826
827 spin_lock_irq(&port->port_lock);
828
829 if (port->open_count != 1) {
830 if (port->open_count == 0)
831 WARN_ON(1);
832 else
833 --port->open_count;
834 goto exit;
835 }
836
837 pr_debug("gs_close: ttyGS%d (%p,%p) ...\n", port->port_num, tty, file);
838
839 /* mark port as closing but in use; we can drop port lock
840 * and sleep if necessary
841 */
842 port->openclose = true;
843 port->open_count = 0;
844
David Brownell1f1ba112008-08-06 18:49:57 -0700845 gser = port->port_usb;
846 if (gser && gser->disconnect)
847 gser->disconnect(gser);
David Brownellc1dca562008-06-19 17:51:44 -0700848
849 /* wait for circular write buffer to drain, disconnect, or at
850 * most GS_CLOSE_TIMEOUT seconds; then discard the rest
851 */
David Brownell1f1ba112008-08-06 18:49:57 -0700852 if (gs_buf_data_avail(&port->port_write_buf) > 0 && gser) {
David Brownellc1dca562008-06-19 17:51:44 -0700853 spin_unlock_irq(&port->port_lock);
854 wait_event_interruptible_timeout(port->drain_wait,
855 gs_writes_finished(port),
856 GS_CLOSE_TIMEOUT * HZ);
857 spin_lock_irq(&port->port_lock);
David Brownell1f1ba112008-08-06 18:49:57 -0700858 gser = port->port_usb;
David Brownellc1dca562008-06-19 17:51:44 -0700859 }
860
861 /* Iff we're disconnected, there can be no I/O in flight so it's
862 * ok to free the circular buffer; else just scrub it. And don't
863 * let the push tasklet fire again until we're re-opened.
864 */
David Brownell1f1ba112008-08-06 18:49:57 -0700865 if (gser == NULL)
David Brownellc1dca562008-06-19 17:51:44 -0700866 gs_buf_free(&port->port_write_buf);
867 else
868 gs_buf_clear(&port->port_write_buf);
869
David Brownellc1dca562008-06-19 17:51:44 -0700870 tty->driver_data = NULL;
871 port->port_tty = NULL;
872
873 port->openclose = false;
874
875 pr_debug("gs_close: ttyGS%d (%p,%p) done!\n",
876 port->port_num, tty, file);
877
878 wake_up_interruptible(&port->close_wait);
879exit:
880 spin_unlock_irq(&port->port_lock);
881}
882
883static int gs_write(struct tty_struct *tty, const unsigned char *buf, int count)
884{
885 struct gs_port *port = tty->driver_data;
886 unsigned long flags;
887 int status;
888
889 pr_vdebug("gs_write: ttyGS%d (%p) writing %d bytes\n",
890 port->port_num, tty, count);
891
892 spin_lock_irqsave(&port->port_lock, flags);
893 if (count)
894 count = gs_buf_put(&port->port_write_buf, buf, count);
895 /* treat count == 0 as flush_chars() */
896 if (port->port_usb)
897 status = gs_start_tx(port);
898 spin_unlock_irqrestore(&port->port_lock, flags);
899
900 return count;
901}
902
903static int gs_put_char(struct tty_struct *tty, unsigned char ch)
904{
905 struct gs_port *port = tty->driver_data;
906 unsigned long flags;
907 int status;
908
909 pr_vdebug("gs_put_char: (%d,%p) char=0x%x, called from %p\n",
910 port->port_num, tty, ch, __builtin_return_address(0));
911
912 spin_lock_irqsave(&port->port_lock, flags);
913 status = gs_buf_put(&port->port_write_buf, &ch, 1);
914 spin_unlock_irqrestore(&port->port_lock, flags);
915
916 return status;
917}
918
919static void gs_flush_chars(struct tty_struct *tty)
920{
921 struct gs_port *port = tty->driver_data;
922 unsigned long flags;
923
924 pr_vdebug("gs_flush_chars: (%d,%p)\n", port->port_num, tty);
925
926 spin_lock_irqsave(&port->port_lock, flags);
927 if (port->port_usb)
928 gs_start_tx(port);
929 spin_unlock_irqrestore(&port->port_lock, flags);
930}
931
932static int gs_write_room(struct tty_struct *tty)
933{
934 struct gs_port *port = tty->driver_data;
935 unsigned long flags;
936 int room = 0;
937
938 spin_lock_irqsave(&port->port_lock, flags);
939 if (port->port_usb)
940 room = gs_buf_space_avail(&port->port_write_buf);
941 spin_unlock_irqrestore(&port->port_lock, flags);
942
943 pr_vdebug("gs_write_room: (%d,%p) room=%d\n",
944 port->port_num, tty, room);
945
946 return room;
947}
948
949static int gs_chars_in_buffer(struct tty_struct *tty)
950{
951 struct gs_port *port = tty->driver_data;
952 unsigned long flags;
953 int chars = 0;
954
955 spin_lock_irqsave(&port->port_lock, flags);
956 chars = gs_buf_data_avail(&port->port_write_buf);
957 spin_unlock_irqrestore(&port->port_lock, flags);
958
959 pr_vdebug("gs_chars_in_buffer: (%d,%p) chars=%d\n",
960 port->port_num, tty, chars);
961
962 return chars;
963}
964
965/* undo side effects of setting TTY_THROTTLED */
966static void gs_unthrottle(struct tty_struct *tty)
967{
968 struct gs_port *port = tty->driver_data;
969 unsigned long flags;
David Brownellc1dca562008-06-19 17:51:44 -0700970
971 spin_lock_irqsave(&port->port_lock, flags);
David Brownell937ef732008-07-07 12:16:08 -0700972 if (port->port_usb) {
973 /* Kickstart read queue processing. We don't do xon/xoff,
974 * rts/cts, or other handshaking with the host, but if the
975 * read queue backs up enough we'll be NAKing OUT packets.
976 */
977 tasklet_schedule(&port->push);
978 pr_vdebug(PREFIX "%d: unthrottle\n", port->port_num);
979 }
David Brownellc1dca562008-06-19 17:51:44 -0700980 spin_unlock_irqrestore(&port->port_lock, flags);
David Brownellc1dca562008-06-19 17:51:44 -0700981}
982
David Brownell1f1ba112008-08-06 18:49:57 -0700983static int gs_break_ctl(struct tty_struct *tty, int duration)
984{
985 struct gs_port *port = tty->driver_data;
986 int status = 0;
987 struct gserial *gser;
988
989 pr_vdebug("gs_break_ctl: ttyGS%d, send break (%d) \n",
990 port->port_num, duration);
991
992 spin_lock_irq(&port->port_lock);
993 gser = port->port_usb;
994 if (gser && gser->send_break)
995 status = gser->send_break(gser, duration);
996 spin_unlock_irq(&port->port_lock);
997
998 return status;
999}
1000
David Brownellc1dca562008-06-19 17:51:44 -07001001static const struct tty_operations gs_tty_ops = {
1002 .open = gs_open,
1003 .close = gs_close,
1004 .write = gs_write,
1005 .put_char = gs_put_char,
1006 .flush_chars = gs_flush_chars,
1007 .write_room = gs_write_room,
1008 .chars_in_buffer = gs_chars_in_buffer,
1009 .unthrottle = gs_unthrottle,
David Brownell1f1ba112008-08-06 18:49:57 -07001010 .break_ctl = gs_break_ctl,
David Brownellc1dca562008-06-19 17:51:44 -07001011};
1012
1013/*-------------------------------------------------------------------------*/
1014
1015static struct tty_driver *gs_tty_driver;
1016
1017static int __init
1018gs_port_alloc(unsigned port_num, struct usb_cdc_line_coding *coding)
1019{
1020 struct gs_port *port;
1021
1022 port = kzalloc(sizeof(struct gs_port), GFP_KERNEL);
1023 if (port == NULL)
1024 return -ENOMEM;
1025
1026 spin_lock_init(&port->port_lock);
1027 init_waitqueue_head(&port->close_wait);
1028 init_waitqueue_head(&port->drain_wait);
1029
1030 tasklet_init(&port->push, gs_rx_push, (unsigned long) port);
1031
1032 INIT_LIST_HEAD(&port->read_pool);
David Brownell937ef732008-07-07 12:16:08 -07001033 INIT_LIST_HEAD(&port->read_queue);
David Brownellc1dca562008-06-19 17:51:44 -07001034 INIT_LIST_HEAD(&port->write_pool);
1035
1036 port->port_num = port_num;
1037 port->port_line_coding = *coding;
1038
1039 ports[port_num].port = port;
1040
1041 return 0;
1042}
1043
1044/**
1045 * gserial_setup - initialize TTY driver for one or more ports
1046 * @g: gadget to associate with these ports
1047 * @count: how many ports to support
1048 * Context: may sleep
1049 *
1050 * The TTY stack needs to know in advance how many devices it should
1051 * plan to manage. Use this call to set up the ports you will be
1052 * exporting through USB. Later, connect them to functions based
1053 * on what configuration is activated by the USB host; and disconnect
1054 * them as appropriate.
1055 *
1056 * An example would be a two-configuration device in which both
1057 * configurations expose port 0, but through different functions.
1058 * One configuration could even expose port 1 while the other
1059 * one doesn't.
1060 *
1061 * Returns negative errno or zero.
1062 */
1063int __init gserial_setup(struct usb_gadget *g, unsigned count)
1064{
1065 unsigned i;
1066 struct usb_cdc_line_coding coding;
1067 int status;
1068
1069 if (count == 0 || count > N_PORTS)
1070 return -EINVAL;
1071
1072 gs_tty_driver = alloc_tty_driver(count);
1073 if (!gs_tty_driver)
1074 return -ENOMEM;
1075
1076 gs_tty_driver->owner = THIS_MODULE;
1077 gs_tty_driver->driver_name = "g_serial";
David Brownell937ef732008-07-07 12:16:08 -07001078 gs_tty_driver->name = PREFIX;
David Brownellc1dca562008-06-19 17:51:44 -07001079 /* uses dynamically assigned dev_t values */
1080
1081 gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
1082 gs_tty_driver->subtype = SERIAL_TYPE_NORMAL;
1083 gs_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
1084 gs_tty_driver->init_termios = tty_std_termios;
1085
1086 /* 9600-8-N-1 ... matches defaults expected by "usbser.sys" on
1087 * MS-Windows. Otherwise, most of these flags shouldn't affect
1088 * anything unless we were to actually hook up to a serial line.
1089 */
1090 gs_tty_driver->init_termios.c_cflag =
1091 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
1092 gs_tty_driver->init_termios.c_ispeed = 9600;
1093 gs_tty_driver->init_termios.c_ospeed = 9600;
1094
1095 coding.dwDTERate = __constant_cpu_to_le32(9600);
1096 coding.bCharFormat = 8;
1097 coding.bParityType = USB_CDC_NO_PARITY;
1098 coding.bDataBits = USB_CDC_1_STOP_BITS;
1099
1100 tty_set_operations(gs_tty_driver, &gs_tty_ops);
1101
1102 /* make devices be openable */
1103 for (i = 0; i < count; i++) {
1104 mutex_init(&ports[i].lock);
1105 status = gs_port_alloc(i, &coding);
1106 if (status) {
1107 count = i;
1108 goto fail;
1109 }
1110 }
1111 n_ports = count;
1112
1113 /* export the driver ... */
1114 status = tty_register_driver(gs_tty_driver);
1115 if (status) {
1116 put_tty_driver(gs_tty_driver);
1117 pr_err("%s: cannot register, err %d\n",
1118 __func__, status);
1119 goto fail;
1120 }
1121
1122 /* ... and sysfs class devices, so mdev/udev make /dev/ttyGS* */
1123 for (i = 0; i < count; i++) {
1124 struct device *tty_dev;
1125
1126 tty_dev = tty_register_device(gs_tty_driver, i, &g->dev);
1127 if (IS_ERR(tty_dev))
1128 pr_warning("%s: no classdev for port %d, err %ld\n",
1129 __func__, i, PTR_ERR(tty_dev));
1130 }
1131
1132 pr_debug("%s: registered %d ttyGS* device%s\n", __func__,
1133 count, (count == 1) ? "" : "s");
1134
1135 return status;
1136fail:
1137 while (count--)
1138 kfree(ports[count].port);
1139 put_tty_driver(gs_tty_driver);
1140 gs_tty_driver = NULL;
1141 return status;
1142}
1143
1144static int gs_closed(struct gs_port *port)
1145{
1146 int cond;
1147
1148 spin_lock_irq(&port->port_lock);
1149 cond = (port->open_count == 0) && !port->openclose;
1150 spin_unlock_irq(&port->port_lock);
1151 return cond;
1152}
1153
1154/**
1155 * gserial_cleanup - remove TTY-over-USB driver and devices
1156 * Context: may sleep
1157 *
1158 * This is called to free all resources allocated by @gserial_setup().
1159 * Accordingly, it may need to wait until some open /dev/ files have
1160 * closed.
1161 *
1162 * The caller must have issued @gserial_disconnect() for any ports
1163 * that had previously been connected, so that there is never any
1164 * I/O pending when it's called.
1165 */
1166void gserial_cleanup(void)
1167{
1168 unsigned i;
1169 struct gs_port *port;
1170
David Brownellac90e362008-07-01 13:18:20 -07001171 if (!gs_tty_driver)
1172 return;
1173
David Brownellc1dca562008-06-19 17:51:44 -07001174 /* start sysfs and /dev/ttyGS* node removal */
1175 for (i = 0; i < n_ports; i++)
1176 tty_unregister_device(gs_tty_driver, i);
1177
1178 for (i = 0; i < n_ports; i++) {
1179 /* prevent new opens */
1180 mutex_lock(&ports[i].lock);
1181 port = ports[i].port;
1182 ports[i].port = NULL;
1183 mutex_unlock(&ports[i].lock);
1184
David Brownell937ef732008-07-07 12:16:08 -07001185 tasklet_kill(&port->push);
1186
David Brownellc1dca562008-06-19 17:51:44 -07001187 /* wait for old opens to finish */
1188 wait_event(port->close_wait, gs_closed(port));
1189
1190 WARN_ON(port->port_usb != NULL);
1191
1192 kfree(port);
1193 }
1194 n_ports = 0;
1195
1196 tty_unregister_driver(gs_tty_driver);
1197 gs_tty_driver = NULL;
1198
1199 pr_debug("%s: cleaned up ttyGS* support\n", __func__);
1200}
1201
1202/**
1203 * gserial_connect - notify TTY I/O glue that USB link is active
1204 * @gser: the function, set up with endpoints and descriptors
1205 * @port_num: which port is active
1206 * Context: any (usually from irq)
1207 *
1208 * This is called activate endpoints and let the TTY layer know that
1209 * the connection is active ... not unlike "carrier detect". It won't
1210 * necessarily start I/O queues; unless the TTY is held open by any
1211 * task, there would be no point. However, the endpoints will be
1212 * activated so the USB host can perform I/O, subject to basic USB
1213 * hardware flow control.
1214 *
1215 * Caller needs to have set up the endpoints and USB function in @dev
1216 * before calling this, as well as the appropriate (speed-specific)
1217 * endpoint descriptors, and also have set up the TTY driver by calling
1218 * @gserial_setup().
1219 *
1220 * Returns negative errno or zero.
1221 * On success, ep->driver_data will be overwritten.
1222 */
1223int gserial_connect(struct gserial *gser, u8 port_num)
1224{
1225 struct gs_port *port;
1226 unsigned long flags;
1227 int status;
1228
1229 if (!gs_tty_driver || port_num >= n_ports)
1230 return -ENXIO;
1231
1232 /* we "know" gserial_cleanup() hasn't been called */
1233 port = ports[port_num].port;
1234
1235 /* activate the endpoints */
1236 status = usb_ep_enable(gser->in, gser->in_desc);
1237 if (status < 0)
1238 return status;
1239 gser->in->driver_data = port;
1240
1241 status = usb_ep_enable(gser->out, gser->out_desc);
1242 if (status < 0)
1243 goto fail_out;
1244 gser->out->driver_data = port;
1245
1246 /* then tell the tty glue that I/O can work */
1247 spin_lock_irqsave(&port->port_lock, flags);
1248 gser->ioport = port;
1249 port->port_usb = gser;
1250
1251 /* REVISIT unclear how best to handle this state...
1252 * we don't really couple it with the Linux TTY.
1253 */
1254 gser->port_line_coding = port->port_line_coding;
1255
1256 /* REVISIT if waiting on "carrier detect", signal. */
1257
David Brownell1f1ba112008-08-06 18:49:57 -07001258 /* if it's already open, start I/O ... and notify the serial
1259 * protocol about open/close status (connect/disconnect).
David Brownellc1dca562008-06-19 17:51:44 -07001260 */
David Brownellc1dca562008-06-19 17:51:44 -07001261 if (port->open_count) {
1262 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
1263 gs_start_io(port);
David Brownell1f1ba112008-08-06 18:49:57 -07001264 if (gser->connect)
1265 gser->connect(gser);
1266 } else {
1267 if (gser->disconnect)
1268 gser->disconnect(gser);
David Brownellc1dca562008-06-19 17:51:44 -07001269 }
1270
1271 spin_unlock_irqrestore(&port->port_lock, flags);
1272
1273 return status;
1274
1275fail_out:
1276 usb_ep_disable(gser->in);
1277 gser->in->driver_data = NULL;
1278 return status;
1279}
1280
1281/**
1282 * gserial_disconnect - notify TTY I/O glue that USB link is inactive
1283 * @gser: the function, on which gserial_connect() was called
1284 * Context: any (usually from irq)
1285 *
1286 * This is called to deactivate endpoints and let the TTY layer know
1287 * that the connection went inactive ... not unlike "hangup".
1288 *
1289 * On return, the state is as if gserial_connect() had never been called;
1290 * there is no active USB I/O on these endpoints.
1291 */
1292void gserial_disconnect(struct gserial *gser)
1293{
1294 struct gs_port *port = gser->ioport;
1295 unsigned long flags;
1296
1297 if (!port)
1298 return;
1299
1300 /* tell the TTY glue not to do I/O here any more */
1301 spin_lock_irqsave(&port->port_lock, flags);
1302
1303 /* REVISIT as above: how best to track this? */
1304 port->port_line_coding = gser->port_line_coding;
1305
1306 port->port_usb = NULL;
1307 gser->ioport = NULL;
1308 if (port->open_count > 0 || port->openclose) {
1309 wake_up_interruptible(&port->drain_wait);
1310 if (port->port_tty)
1311 tty_hangup(port->port_tty);
1312 }
1313 spin_unlock_irqrestore(&port->port_lock, flags);
1314
1315 /* disable endpoints, aborting down any active I/O */
1316 usb_ep_disable(gser->out);
1317 gser->out->driver_data = NULL;
1318
1319 usb_ep_disable(gser->in);
1320 gser->in->driver_data = NULL;
1321
1322 /* finally, free any unused/unusable I/O buffers */
1323 spin_lock_irqsave(&port->port_lock, flags);
1324 if (port->open_count == 0 && !port->openclose)
1325 gs_buf_free(&port->port_write_buf);
1326 gs_free_requests(gser->out, &port->read_pool);
David Brownell937ef732008-07-07 12:16:08 -07001327 gs_free_requests(gser->out, &port->read_queue);
David Brownellc1dca562008-06-19 17:51:44 -07001328 gs_free_requests(gser->in, &port->write_pool);
1329 spin_unlock_irqrestore(&port->port_lock, flags);
1330}