blob: ca4fc3d3e7ff12e67dc0108fb1bdb6691d067ad0 [file] [log] [blame]
David Brownellc1dca562008-06-19 17:51:44 -07001/*
2 * u_serial.c - utilities for USB gadget "serial port"/TTY support
3 *
4 * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
5 * Copyright (C) 2008 David Brownell
6 * Copyright (C) 2008 by Nokia Corporation
7 *
8 * This code also borrows from usbserial.c, which is
9 * Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com)
10 * Copyright (C) 2000 Peter Berger (pberger@brimson.com)
11 * Copyright (C) 2000 Al Borchers (alborchers@steinerpoint.com)
12 *
13 * This software is distributed under the terms of the GNU General
14 * Public License ("GPL") as published by the Free Software Foundation,
15 * either version 2 of that License or (at your option) any later version.
16 */
17
18/* #define VERBOSE_DEBUG */
19
20#include <linux/kernel.h>
stephane duverger1e413942010-06-29 16:57:25 +020021#include <linux/sched.h>
David Brownellc1dca562008-06-19 17:51:44 -070022#include <linux/interrupt.h>
23#include <linux/device.h>
24#include <linux/delay.h>
25#include <linux/tty.h>
26#include <linux/tty_flip.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090027#include <linux/slab.h>
Paul Gortmakerf940fcd2011-05-27 09:56:31 -040028#include <linux/export.h>
David Brownellc1dca562008-06-19 17:51:44 -070029
30#include "u_serial.h"
31
32
33/*
34 * This component encapsulates the TTY layer glue needed to provide basic
35 * "serial port" functionality through the USB gadget stack. Each such
36 * port is exposed through a /dev/ttyGS* node.
37 *
38 * After initialization (gserial_setup), these TTY port devices stay
39 * available until they are removed (gserial_cleanup). Each one may be
40 * connected to a USB function (gserial_connect), or disconnected (with
41 * gserial_disconnect) when the USB host issues a config change event.
42 * Data can only flow when the port is connected to the host.
43 *
44 * A given TTY port can be made available in multiple configurations.
45 * For example, each one might expose a ttyGS0 node which provides a
46 * login application. In one case that might use CDC ACM interface 0,
47 * while another configuration might use interface 3 for that. The
48 * work to handle that (including descriptor management) is not part
49 * of this component.
50 *
51 * Configurations may expose more than one TTY port. For example, if
52 * ttyGS0 provides login service, then ttyGS1 might provide dialer access
53 * for a telephone or fax link. And ttyGS2 might be something that just
54 * needs a simple byte stream interface for some messaging protocol that
55 * is managed in userspace ... OBEX, PTP, and MTP have been mentioned.
56 */
57
David Brownell937ef732008-07-07 12:16:08 -070058#define PREFIX "ttyGS"
59
David Brownellc1dca562008-06-19 17:51:44 -070060/*
61 * gserial is the lifecycle interface, used by USB functions
62 * gs_port is the I/O nexus, used by the tty driver
63 * tty_struct links to the tty/filesystem framework
64 *
65 * gserial <---> gs_port ... links will be null when the USB link is
David Brownell1f1ba112008-08-06 18:49:57 -070066 * inactive; managed by gserial_{connect,disconnect}(). each gserial
67 * instance can wrap its own USB control protocol.
David Brownellc1dca562008-06-19 17:51:44 -070068 * gserial->ioport == usb_ep->driver_data ... gs_port
69 * gs_port->port_usb ... gserial
70 *
71 * gs_port <---> tty_struct ... links will be null when the TTY file
72 * isn't opened; managed by gs_open()/gs_close()
73 * gserial->port_tty ... tty_struct
74 * tty_struct->driver_data ... gserial
75 */
76
77/* RX and TX queues can buffer QUEUE_SIZE packets before they hit the
78 * next layer of buffering. For TX that's a circular buffer; for RX
79 * consider it a NOP. A third layer is provided by the TTY code.
80 */
81#define QUEUE_SIZE 16
82#define WRITE_BUF_SIZE 8192 /* TX only */
83
84/* circular buffer */
85struct gs_buf {
86 unsigned buf_size;
87 char *buf_buf;
88 char *buf_get;
89 char *buf_put;
90};
91
92/*
93 * The port structure holds info for each port, one for each minor number
94 * (and thus for each /dev/ node).
95 */
96struct gs_port {
Jiri Slaby266e37e2012-04-02 13:54:47 +020097 struct tty_port port;
David Brownellc1dca562008-06-19 17:51:44 -070098 spinlock_t port_lock; /* guard port_* access */
99
100 struct gserial *port_usb;
David Brownellc1dca562008-06-19 17:51:44 -0700101
David Brownellc1dca562008-06-19 17:51:44 -0700102 bool openclose; /* open/close in progress */
103 u8 port_num;
104
David Brownellc1dca562008-06-19 17:51:44 -0700105 struct list_head read_pool;
Jim Sung28609d42010-11-04 18:47:51 -0700106 int read_started;
107 int read_allocated;
David Brownell937ef732008-07-07 12:16:08 -0700108 struct list_head read_queue;
109 unsigned n_read;
David Brownellc1dca562008-06-19 17:51:44 -0700110 struct tasklet_struct push;
111
112 struct list_head write_pool;
Jim Sung28609d42010-11-04 18:47:51 -0700113 int write_started;
114 int write_allocated;
David Brownellc1dca562008-06-19 17:51:44 -0700115 struct gs_buf port_write_buf;
116 wait_queue_head_t drain_wait; /* wait while writes drain */
117
118 /* REVISIT this state ... */
119 struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */
120};
121
122/* increase N_PORTS if you need more */
123#define N_PORTS 4
124static struct portmaster {
125 struct mutex lock; /* protect open/close */
126 struct gs_port *port;
127} ports[N_PORTS];
128static unsigned n_ports;
129
130#define GS_CLOSE_TIMEOUT 15 /* seconds */
131
132
133
134#ifdef VERBOSE_DEBUG
Andrzej Pietrasiewiczea0e6272012-06-11 11:13:15 +0200135#ifndef pr_vdebug
David Brownellc1dca562008-06-19 17:51:44 -0700136#define pr_vdebug(fmt, arg...) \
137 pr_debug(fmt, ##arg)
Andrzej Pietrasiewiczea0e6272012-06-11 11:13:15 +0200138#endif /* pr_vdebug */
David Brownellc1dca562008-06-19 17:51:44 -0700139#else
Andrzej Pietrasiewiczea0e6272012-06-11 11:13:15 +0200140#ifndef pr_vdebig
David Brownellc1dca562008-06-19 17:51:44 -0700141#define pr_vdebug(fmt, arg...) \
142 ({ if (0) pr_debug(fmt, ##arg); })
Andrzej Pietrasiewiczea0e6272012-06-11 11:13:15 +0200143#endif /* pr_vdebug */
David Brownellc1dca562008-06-19 17:51:44 -0700144#endif
145
146/*-------------------------------------------------------------------------*/
147
148/* Circular Buffer */
149
150/*
151 * gs_buf_alloc
152 *
153 * Allocate a circular buffer and all associated memory.
154 */
155static int gs_buf_alloc(struct gs_buf *gb, unsigned size)
156{
157 gb->buf_buf = kmalloc(size, GFP_KERNEL);
158 if (gb->buf_buf == NULL)
159 return -ENOMEM;
160
161 gb->buf_size = size;
162 gb->buf_put = gb->buf_buf;
163 gb->buf_get = gb->buf_buf;
164
165 return 0;
166}
167
168/*
169 * gs_buf_free
170 *
171 * Free the buffer and all associated memory.
172 */
173static void gs_buf_free(struct gs_buf *gb)
174{
175 kfree(gb->buf_buf);
176 gb->buf_buf = NULL;
177}
178
179/*
180 * gs_buf_clear
181 *
182 * Clear out all data in the circular buffer.
183 */
184static void gs_buf_clear(struct gs_buf *gb)
185{
186 gb->buf_get = gb->buf_put;
187 /* equivalent to a get of all data available */
188}
189
190/*
191 * gs_buf_data_avail
192 *
David Brownell1f1ba112008-08-06 18:49:57 -0700193 * Return the number of bytes of data written into the circular
David Brownellc1dca562008-06-19 17:51:44 -0700194 * buffer.
195 */
196static unsigned gs_buf_data_avail(struct gs_buf *gb)
197{
198 return (gb->buf_size + gb->buf_put - gb->buf_get) % gb->buf_size;
199}
200
201/*
202 * gs_buf_space_avail
203 *
204 * Return the number of bytes of space available in the circular
205 * buffer.
206 */
207static unsigned gs_buf_space_avail(struct gs_buf *gb)
208{
209 return (gb->buf_size + gb->buf_get - gb->buf_put - 1) % gb->buf_size;
210}
211
212/*
213 * gs_buf_put
214 *
215 * Copy data data from a user buffer and put it into the circular buffer.
216 * Restrict to the amount of space available.
217 *
218 * Return the number of bytes copied.
219 */
220static unsigned
221gs_buf_put(struct gs_buf *gb, const char *buf, unsigned count)
222{
223 unsigned len;
224
225 len = gs_buf_space_avail(gb);
226 if (count > len)
227 count = len;
228
229 if (count == 0)
230 return 0;
231
232 len = gb->buf_buf + gb->buf_size - gb->buf_put;
233 if (count > len) {
234 memcpy(gb->buf_put, buf, len);
235 memcpy(gb->buf_buf, buf+len, count - len);
236 gb->buf_put = gb->buf_buf + count - len;
237 } else {
238 memcpy(gb->buf_put, buf, count);
239 if (count < len)
240 gb->buf_put += count;
241 else /* count == len */
242 gb->buf_put = gb->buf_buf;
243 }
244
245 return count;
246}
247
248/*
249 * gs_buf_get
250 *
251 * Get data from the circular buffer and copy to the given buffer.
252 * Restrict to the amount of data available.
253 *
254 * Return the number of bytes copied.
255 */
256static unsigned
257gs_buf_get(struct gs_buf *gb, char *buf, unsigned count)
258{
259 unsigned len;
260
261 len = gs_buf_data_avail(gb);
262 if (count > len)
263 count = len;
264
265 if (count == 0)
266 return 0;
267
268 len = gb->buf_buf + gb->buf_size - gb->buf_get;
269 if (count > len) {
270 memcpy(buf, gb->buf_get, len);
271 memcpy(buf+len, gb->buf_buf, count - len);
272 gb->buf_get = gb->buf_buf + count - len;
273 } else {
274 memcpy(buf, gb->buf_get, count);
275 if (count < len)
276 gb->buf_get += count;
277 else /* count == len */
278 gb->buf_get = gb->buf_buf;
279 }
280
281 return count;
282}
283
284/*-------------------------------------------------------------------------*/
285
286/* I/O glue between TTY (upper) and USB function (lower) driver layers */
287
288/*
289 * gs_alloc_req
290 *
291 * Allocate a usb_request and its buffer. Returns a pointer to the
292 * usb_request or NULL if there is an error.
293 */
David Brownell1f1ba112008-08-06 18:49:57 -0700294struct usb_request *
David Brownellc1dca562008-06-19 17:51:44 -0700295gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
296{
297 struct usb_request *req;
298
299 req = usb_ep_alloc_request(ep, kmalloc_flags);
300
301 if (req != NULL) {
302 req->length = len;
303 req->buf = kmalloc(len, kmalloc_flags);
304 if (req->buf == NULL) {
305 usb_ep_free_request(ep, req);
306 return NULL;
307 }
308 }
309
310 return req;
311}
312
313/*
314 * gs_free_req
315 *
316 * Free a usb_request and its buffer.
317 */
David Brownell1f1ba112008-08-06 18:49:57 -0700318void gs_free_req(struct usb_ep *ep, struct usb_request *req)
David Brownellc1dca562008-06-19 17:51:44 -0700319{
320 kfree(req->buf);
321 usb_ep_free_request(ep, req);
322}
323
324/*
325 * gs_send_packet
326 *
327 * If there is data to send, a packet is built in the given
328 * buffer and the size is returned. If there is no data to
329 * send, 0 is returned.
330 *
331 * Called with port_lock held.
332 */
333static unsigned
334gs_send_packet(struct gs_port *port, char *packet, unsigned size)
335{
336 unsigned len;
337
338 len = gs_buf_data_avail(&port->port_write_buf);
339 if (len < size)
340 size = len;
341 if (size != 0)
342 size = gs_buf_get(&port->port_write_buf, packet, size);
343 return size;
344}
345
346/*
347 * gs_start_tx
348 *
349 * This function finds available write requests, calls
350 * gs_send_packet to fill these packets with data, and
351 * continues until either there are no more write requests
352 * available or no more data to send. This function is
353 * run whenever data arrives or write requests are available.
354 *
355 * Context: caller owns port_lock; port_usb is non-null.
356 */
357static int gs_start_tx(struct gs_port *port)
358/*
359__releases(&port->port_lock)
360__acquires(&port->port_lock)
361*/
362{
363 struct list_head *pool = &port->write_pool;
364 struct usb_ep *in = port->port_usb->in;
365 int status = 0;
366 bool do_tty_wake = false;
367
368 while (!list_empty(pool)) {
369 struct usb_request *req;
370 int len;
371
Jim Sung28609d42010-11-04 18:47:51 -0700372 if (port->write_started >= QUEUE_SIZE)
373 break;
374
David Brownellc1dca562008-06-19 17:51:44 -0700375 req = list_entry(pool->next, struct usb_request, list);
376 len = gs_send_packet(port, req->buf, in->maxpacket);
377 if (len == 0) {
378 wake_up_interruptible(&port->drain_wait);
379 break;
380 }
381 do_tty_wake = true;
382
383 req->length = len;
384 list_del(&req->list);
Daniel Glöckner2e251342009-05-28 12:53:24 +0200385 req->zero = (gs_buf_data_avail(&port->port_write_buf) == 0);
David Brownellc1dca562008-06-19 17:51:44 -0700386
David Brownell937ef732008-07-07 12:16:08 -0700387 pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
388 port->port_num, len, *((u8 *)req->buf),
David Brownellc1dca562008-06-19 17:51:44 -0700389 *((u8 *)req->buf+1), *((u8 *)req->buf+2));
David Brownellc1dca562008-06-19 17:51:44 -0700390
391 /* Drop lock while we call out of driver; completions
392 * could be issued while we do so. Disconnection may
393 * happen too; maybe immediately before we queue this!
394 *
395 * NOTE that we may keep sending data for a while after
396 * the TTY closed (dev->ioport->port_tty is NULL).
397 */
398 spin_unlock(&port->port_lock);
399 status = usb_ep_queue(in, req, GFP_ATOMIC);
400 spin_lock(&port->port_lock);
401
402 if (status) {
403 pr_debug("%s: %s %s err %d\n",
404 __func__, "queue", in->name, status);
405 list_add(&req->list, pool);
406 break;
407 }
408
Jim Sung28609d42010-11-04 18:47:51 -0700409 port->write_started++;
410
David Brownellc1dca562008-06-19 17:51:44 -0700411 /* abort immediately after disconnect */
412 if (!port->port_usb)
413 break;
414 }
415
Jiri Slaby35f95fd2012-04-02 13:54:48 +0200416 if (do_tty_wake && port->port.tty)
417 tty_wakeup(port->port.tty);
David Brownellc1dca562008-06-19 17:51:44 -0700418 return status;
419}
420
David Brownellc1dca562008-06-19 17:51:44 -0700421/*
422 * Context: caller owns port_lock, and port_usb is set
423 */
424static unsigned gs_start_rx(struct gs_port *port)
425/*
426__releases(&port->port_lock)
427__acquires(&port->port_lock)
428*/
429{
430 struct list_head *pool = &port->read_pool;
431 struct usb_ep *out = port->port_usb->out;
David Brownellc1dca562008-06-19 17:51:44 -0700432
433 while (!list_empty(pool)) {
434 struct usb_request *req;
435 int status;
436 struct tty_struct *tty;
437
David Brownell937ef732008-07-07 12:16:08 -0700438 /* no more rx if closed */
Jiri Slaby35f95fd2012-04-02 13:54:48 +0200439 tty = port->port.tty;
David Brownell937ef732008-07-07 12:16:08 -0700440 if (!tty)
David Brownellc1dca562008-06-19 17:51:44 -0700441 break;
442
Jim Sung28609d42010-11-04 18:47:51 -0700443 if (port->read_started >= QUEUE_SIZE)
444 break;
445
David Brownellc1dca562008-06-19 17:51:44 -0700446 req = list_entry(pool->next, struct usb_request, list);
447 list_del(&req->list);
448 req->length = out->maxpacket;
449
450 /* drop lock while we call out; the controller driver
451 * may need to call us back (e.g. for disconnect)
452 */
453 spin_unlock(&port->port_lock);
454 status = usb_ep_queue(out, req, GFP_ATOMIC);
455 spin_lock(&port->port_lock);
456
457 if (status) {
458 pr_debug("%s: %s %s err %d\n",
459 __func__, "queue", out->name, status);
460 list_add(&req->list, pool);
461 break;
462 }
Jim Sung28609d42010-11-04 18:47:51 -0700463 port->read_started++;
David Brownellc1dca562008-06-19 17:51:44 -0700464
465 /* abort immediately after disconnect */
466 if (!port->port_usb)
467 break;
468 }
Jim Sung28609d42010-11-04 18:47:51 -0700469 return port->read_started;
David Brownellc1dca562008-06-19 17:51:44 -0700470}
471
David Brownell937ef732008-07-07 12:16:08 -0700472/*
473 * RX tasklet takes data out of the RX queue and hands it up to the TTY
474 * layer until it refuses to take any more data (or is throttled back).
475 * Then it issues reads for any further data.
476 *
477 * If the RX queue becomes full enough that no usb_request is queued,
478 * the OUT endpoint may begin NAKing as soon as its FIFO fills up.
479 * So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
480 * can be buffered before the TTY layer's buffers (currently 64 KB).
481 */
482static void gs_rx_push(unsigned long _port)
483{
484 struct gs_port *port = (void *)_port;
485 struct tty_struct *tty;
486 struct list_head *queue = &port->read_queue;
487 bool disconnect = false;
488 bool do_push = false;
489
490 /* hand any queued data to the tty */
491 spin_lock_irq(&port->port_lock);
Jiri Slaby35f95fd2012-04-02 13:54:48 +0200492 tty = port->port.tty;
David Brownell937ef732008-07-07 12:16:08 -0700493 while (!list_empty(queue)) {
494 struct usb_request *req;
495
496 req = list_first_entry(queue, struct usb_request, list);
497
David Brownell937ef732008-07-07 12:16:08 -0700498 /* leave data queued if tty was rx throttled */
Jiri Slaby05c7cd32013-01-03 15:53:04 +0100499 if (tty && test_bit(TTY_THROTTLED, &tty->flags))
David Brownell937ef732008-07-07 12:16:08 -0700500 break;
501
502 switch (req->status) {
503 case -ESHUTDOWN:
504 disconnect = true;
505 pr_vdebug(PREFIX "%d: shutdown\n", port->port_num);
506 break;
507
508 default:
509 /* presumably a transient fault */
510 pr_warning(PREFIX "%d: unexpected RX status %d\n",
511 port->port_num, req->status);
512 /* FALLTHROUGH */
513 case 0:
514 /* normal completion */
515 break;
516 }
517
518 /* push data to (open) tty */
519 if (req->actual) {
520 char *packet = req->buf;
521 unsigned size = req->actual;
522 unsigned n;
523 int count;
524
525 /* we may have pushed part of this packet already... */
526 n = port->n_read;
527 if (n) {
528 packet += n;
529 size -= n;
530 }
531
Jiri Slaby05c7cd32013-01-03 15:53:04 +0100532 count = tty_insert_flip_string(&port->port, packet,
533 size);
David Brownell937ef732008-07-07 12:16:08 -0700534 if (count)
535 do_push = true;
536 if (count != size) {
537 /* stop pushing; TTY layer can't handle more */
538 port->n_read += count;
539 pr_vdebug(PREFIX "%d: rx block %d/%d\n",
540 port->port_num,
541 count, req->actual);
542 break;
543 }
544 port->n_read = 0;
545 }
Jiri Slaby05c7cd32013-01-03 15:53:04 +0100546
David Brownell937ef732008-07-07 12:16:08 -0700547 list_move(&req->list, &port->read_pool);
Jim Sung28609d42010-11-04 18:47:51 -0700548 port->read_started--;
David Brownell937ef732008-07-07 12:16:08 -0700549 }
550
Jon Povey44a0c012010-06-14 19:41:04 +0900551 /* Push from tty to ldisc; without low_latency set this is handled by
552 * a workqueue, so we won't get callbacks and can hold port_lock
David Brownell937ef732008-07-07 12:16:08 -0700553 */
Jiri Slaby2e124b42013-01-03 15:53:06 +0100554 if (do_push)
555 tty_flip_buffer_push(&port->port);
David Brownell937ef732008-07-07 12:16:08 -0700556
557
558 /* We want our data queue to become empty ASAP, keeping data
559 * in the tty and ldisc (not here). If we couldn't push any
560 * this time around, there may be trouble unless there's an
561 * implicit tty_unthrottle() call on its way...
562 *
563 * REVISIT we should probably add a timer to keep the tasklet
564 * from starving ... but it's not clear that case ever happens.
565 */
566 if (!list_empty(queue) && tty) {
567 if (!test_bit(TTY_THROTTLED, &tty->flags)) {
568 if (do_push)
569 tasklet_schedule(&port->push);
570 else
571 pr_warning(PREFIX "%d: RX not scheduled?\n",
572 port->port_num);
573 }
574 }
575
576 /* If we're still connected, refill the USB RX queue. */
577 if (!disconnect && port->port_usb)
578 gs_start_rx(port);
579
580 spin_unlock_irq(&port->port_lock);
581}
582
David Brownellc1dca562008-06-19 17:51:44 -0700583static void gs_read_complete(struct usb_ep *ep, struct usb_request *req)
584{
David Brownellc1dca562008-06-19 17:51:44 -0700585 struct gs_port *port = ep->driver_data;
586
David Brownell937ef732008-07-07 12:16:08 -0700587 /* Queue all received data until the tty layer is ready for it. */
David Brownellc1dca562008-06-19 17:51:44 -0700588 spin_lock(&port->port_lock);
David Brownell937ef732008-07-07 12:16:08 -0700589 list_add_tail(&req->list, &port->read_queue);
590 tasklet_schedule(&port->push);
David Brownellc1dca562008-06-19 17:51:44 -0700591 spin_unlock(&port->port_lock);
592}
593
594static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
595{
596 struct gs_port *port = ep->driver_data;
597
598 spin_lock(&port->port_lock);
599 list_add(&req->list, &port->write_pool);
Jim Sung28609d42010-11-04 18:47:51 -0700600 port->write_started--;
David Brownellc1dca562008-06-19 17:51:44 -0700601
602 switch (req->status) {
603 default:
604 /* presumably a transient fault */
605 pr_warning("%s: unexpected %s status %d\n",
606 __func__, ep->name, req->status);
607 /* FALL THROUGH */
608 case 0:
609 /* normal completion */
610 gs_start_tx(port);
611 break;
612
613 case -ESHUTDOWN:
614 /* disconnect */
615 pr_vdebug("%s: %s shutdown\n", __func__, ep->name);
616 break;
617 }
618
619 spin_unlock(&port->port_lock);
620}
621
Jim Sung28609d42010-11-04 18:47:51 -0700622static void gs_free_requests(struct usb_ep *ep, struct list_head *head,
623 int *allocated)
David Brownellc1dca562008-06-19 17:51:44 -0700624{
625 struct usb_request *req;
626
627 while (!list_empty(head)) {
628 req = list_entry(head->next, struct usb_request, list);
629 list_del(&req->list);
630 gs_free_req(ep, req);
Jim Sung28609d42010-11-04 18:47:51 -0700631 if (allocated)
632 (*allocated)--;
David Brownellc1dca562008-06-19 17:51:44 -0700633 }
634}
635
636static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head,
Jim Sung28609d42010-11-04 18:47:51 -0700637 void (*fn)(struct usb_ep *, struct usb_request *),
638 int *allocated)
David Brownellc1dca562008-06-19 17:51:44 -0700639{
640 int i;
641 struct usb_request *req;
Jim Sung28609d42010-11-04 18:47:51 -0700642 int n = allocated ? QUEUE_SIZE - *allocated : QUEUE_SIZE;
David Brownellc1dca562008-06-19 17:51:44 -0700643
644 /* Pre-allocate up to QUEUE_SIZE transfers, but if we can't
645 * do quite that many this time, don't fail ... we just won't
646 * be as speedy as we might otherwise be.
647 */
Jim Sung28609d42010-11-04 18:47:51 -0700648 for (i = 0; i < n; i++) {
David Brownellc1dca562008-06-19 17:51:44 -0700649 req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC);
650 if (!req)
651 return list_empty(head) ? -ENOMEM : 0;
652 req->complete = fn;
653 list_add_tail(&req->list, head);
Jim Sung28609d42010-11-04 18:47:51 -0700654 if (allocated)
655 (*allocated)++;
David Brownellc1dca562008-06-19 17:51:44 -0700656 }
657 return 0;
658}
659
660/**
661 * gs_start_io - start USB I/O streams
662 * @dev: encapsulates endpoints to use
663 * Context: holding port_lock; port_tty and port_usb are non-null
664 *
665 * We only start I/O when something is connected to both sides of
666 * this port. If nothing is listening on the host side, we may
667 * be pointlessly filling up our TX buffers and FIFO.
668 */
669static int gs_start_io(struct gs_port *port)
670{
671 struct list_head *head = &port->read_pool;
672 struct usb_ep *ep = port->port_usb->out;
673 int status;
674 unsigned started;
675
676 /* Allocate RX and TX I/O buffers. We can't easily do this much
677 * earlier (with GFP_KERNEL) because the requests are coupled to
678 * endpoints, as are the packet sizes we'll be using. Different
679 * configurations may use different endpoints with a given port;
680 * and high speed vs full speed changes packet sizes too.
681 */
Jim Sung28609d42010-11-04 18:47:51 -0700682 status = gs_alloc_requests(ep, head, gs_read_complete,
683 &port->read_allocated);
David Brownellc1dca562008-06-19 17:51:44 -0700684 if (status)
685 return status;
686
687 status = gs_alloc_requests(port->port_usb->in, &port->write_pool,
Jim Sung28609d42010-11-04 18:47:51 -0700688 gs_write_complete, &port->write_allocated);
David Brownellc1dca562008-06-19 17:51:44 -0700689 if (status) {
Jim Sung28609d42010-11-04 18:47:51 -0700690 gs_free_requests(ep, head, &port->read_allocated);
David Brownellc1dca562008-06-19 17:51:44 -0700691 return status;
692 }
693
694 /* queue read requests */
David Brownell937ef732008-07-07 12:16:08 -0700695 port->n_read = 0;
David Brownellc1dca562008-06-19 17:51:44 -0700696 started = gs_start_rx(port);
697
698 /* unblock any pending writes into our circular buffer */
699 if (started) {
Jiri Slaby35f95fd2012-04-02 13:54:48 +0200700 tty_wakeup(port->port.tty);
David Brownellc1dca562008-06-19 17:51:44 -0700701 } else {
Jim Sung28609d42010-11-04 18:47:51 -0700702 gs_free_requests(ep, head, &port->read_allocated);
703 gs_free_requests(port->port_usb->in, &port->write_pool,
704 &port->write_allocated);
David Brownell937ef732008-07-07 12:16:08 -0700705 status = -EIO;
David Brownellc1dca562008-06-19 17:51:44 -0700706 }
707
David Brownell937ef732008-07-07 12:16:08 -0700708 return status;
David Brownellc1dca562008-06-19 17:51:44 -0700709}
710
711/*-------------------------------------------------------------------------*/
712
713/* TTY Driver */
714
715/*
716 * gs_open sets up the link between a gs_port and its associated TTY.
717 * That link is broken *only* by TTY close(), and all driver methods
718 * know that.
719 */
720static int gs_open(struct tty_struct *tty, struct file *file)
721{
722 int port_num = tty->index;
723 struct gs_port *port;
724 int status;
725
David Brownellc1dca562008-06-19 17:51:44 -0700726 do {
727 mutex_lock(&ports[port_num].lock);
728 port = ports[port_num].port;
729 if (!port)
730 status = -ENODEV;
731 else {
732 spin_lock_irq(&port->port_lock);
733
734 /* already open? Great. */
Jiri Slaby266e37e2012-04-02 13:54:47 +0200735 if (port->port.count) {
David Brownellc1dca562008-06-19 17:51:44 -0700736 status = 0;
Jiri Slaby266e37e2012-04-02 13:54:47 +0200737 port->port.count++;
David Brownellc1dca562008-06-19 17:51:44 -0700738
739 /* currently opening/closing? wait ... */
740 } else if (port->openclose) {
741 status = -EBUSY;
742
743 /* ... else we do the work */
744 } else {
745 status = -EAGAIN;
746 port->openclose = true;
747 }
748 spin_unlock_irq(&port->port_lock);
749 }
750 mutex_unlock(&ports[port_num].lock);
751
752 switch (status) {
753 default:
754 /* fully handled */
755 return status;
756 case -EAGAIN:
757 /* must do the work */
758 break;
759 case -EBUSY:
760 /* wait for EAGAIN task to finish */
761 msleep(1);
762 /* REVISIT could have a waitchannel here, if
763 * concurrent open performance is important
764 */
765 break;
766 }
767 } while (status != -EAGAIN);
768
769 /* Do the "real open" */
770 spin_lock_irq(&port->port_lock);
771
772 /* allocate circular buffer on first open */
773 if (port->port_write_buf.buf_buf == NULL) {
774
775 spin_unlock_irq(&port->port_lock);
776 status = gs_buf_alloc(&port->port_write_buf, WRITE_BUF_SIZE);
777 spin_lock_irq(&port->port_lock);
778
779 if (status) {
780 pr_debug("gs_open: ttyGS%d (%p,%p) no buffer\n",
781 port->port_num, tty, file);
782 port->openclose = false;
783 goto exit_unlock_port;
784 }
785 }
786
787 /* REVISIT if REMOVED (ports[].port NULL), abort the open
788 * to let rmmod work faster (but this way isn't wrong).
789 */
790
791 /* REVISIT maybe wait for "carrier detect" */
792
793 tty->driver_data = port;
Jiri Slaby35f95fd2012-04-02 13:54:48 +0200794 port->port.tty = tty;
David Brownellc1dca562008-06-19 17:51:44 -0700795
Jiri Slaby266e37e2012-04-02 13:54:47 +0200796 port->port.count = 1;
David Brownellc1dca562008-06-19 17:51:44 -0700797 port->openclose = false;
798
David Brownellc1dca562008-06-19 17:51:44 -0700799 /* if connected, start the I/O stream */
800 if (port->port_usb) {
David Brownell1f1ba112008-08-06 18:49:57 -0700801 struct gserial *gser = port->port_usb;
802
David Brownellc1dca562008-06-19 17:51:44 -0700803 pr_debug("gs_open: start ttyGS%d\n", port->port_num);
804 gs_start_io(port);
805
David Brownell1f1ba112008-08-06 18:49:57 -0700806 if (gser->connect)
807 gser->connect(gser);
David Brownellc1dca562008-06-19 17:51:44 -0700808 }
809
810 pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file);
811
812 status = 0;
813
814exit_unlock_port:
815 spin_unlock_irq(&port->port_lock);
816 return status;
817}
818
819static int gs_writes_finished(struct gs_port *p)
820{
821 int cond;
822
823 /* return true on disconnect or empty buffer */
824 spin_lock_irq(&p->port_lock);
825 cond = (p->port_usb == NULL) || !gs_buf_data_avail(&p->port_write_buf);
826 spin_unlock_irq(&p->port_lock);
827
828 return cond;
829}
830
831static void gs_close(struct tty_struct *tty, struct file *file)
832{
833 struct gs_port *port = tty->driver_data;
David Brownell1f1ba112008-08-06 18:49:57 -0700834 struct gserial *gser;
David Brownellc1dca562008-06-19 17:51:44 -0700835
836 spin_lock_irq(&port->port_lock);
837
Jiri Slaby266e37e2012-04-02 13:54:47 +0200838 if (port->port.count != 1) {
839 if (port->port.count == 0)
David Brownellc1dca562008-06-19 17:51:44 -0700840 WARN_ON(1);
841 else
Jiri Slaby266e37e2012-04-02 13:54:47 +0200842 --port->port.count;
David Brownellc1dca562008-06-19 17:51:44 -0700843 goto exit;
844 }
845
846 pr_debug("gs_close: ttyGS%d (%p,%p) ...\n", port->port_num, tty, file);
847
848 /* mark port as closing but in use; we can drop port lock
849 * and sleep if necessary
850 */
851 port->openclose = true;
Jiri Slaby266e37e2012-04-02 13:54:47 +0200852 port->port.count = 0;
David Brownellc1dca562008-06-19 17:51:44 -0700853
David Brownell1f1ba112008-08-06 18:49:57 -0700854 gser = port->port_usb;
855 if (gser && gser->disconnect)
856 gser->disconnect(gser);
David Brownellc1dca562008-06-19 17:51:44 -0700857
858 /* wait for circular write buffer to drain, disconnect, or at
859 * most GS_CLOSE_TIMEOUT seconds; then discard the rest
860 */
David Brownell1f1ba112008-08-06 18:49:57 -0700861 if (gs_buf_data_avail(&port->port_write_buf) > 0 && gser) {
David Brownellc1dca562008-06-19 17:51:44 -0700862 spin_unlock_irq(&port->port_lock);
863 wait_event_interruptible_timeout(port->drain_wait,
864 gs_writes_finished(port),
865 GS_CLOSE_TIMEOUT * HZ);
866 spin_lock_irq(&port->port_lock);
David Brownell1f1ba112008-08-06 18:49:57 -0700867 gser = port->port_usb;
David Brownellc1dca562008-06-19 17:51:44 -0700868 }
869
870 /* Iff we're disconnected, there can be no I/O in flight so it's
871 * ok to free the circular buffer; else just scrub it. And don't
872 * let the push tasklet fire again until we're re-opened.
873 */
David Brownell1f1ba112008-08-06 18:49:57 -0700874 if (gser == NULL)
David Brownellc1dca562008-06-19 17:51:44 -0700875 gs_buf_free(&port->port_write_buf);
876 else
877 gs_buf_clear(&port->port_write_buf);
878
David Brownellc1dca562008-06-19 17:51:44 -0700879 tty->driver_data = NULL;
Jiri Slaby35f95fd2012-04-02 13:54:48 +0200880 port->port.tty = NULL;
David Brownellc1dca562008-06-19 17:51:44 -0700881
882 port->openclose = false;
883
884 pr_debug("gs_close: ttyGS%d (%p,%p) done!\n",
885 port->port_num, tty, file);
886
Jiri Slaby3d5ea592012-04-02 13:54:49 +0200887 wake_up_interruptible(&port->port.close_wait);
David Brownellc1dca562008-06-19 17:51:44 -0700888exit:
889 spin_unlock_irq(&port->port_lock);
890}
891
892static int gs_write(struct tty_struct *tty, const unsigned char *buf, int count)
893{
894 struct gs_port *port = tty->driver_data;
895 unsigned long flags;
896 int status;
897
898 pr_vdebug("gs_write: ttyGS%d (%p) writing %d bytes\n",
899 port->port_num, tty, count);
900
901 spin_lock_irqsave(&port->port_lock, flags);
902 if (count)
903 count = gs_buf_put(&port->port_write_buf, buf, count);
904 /* treat count == 0 as flush_chars() */
905 if (port->port_usb)
906 status = gs_start_tx(port);
907 spin_unlock_irqrestore(&port->port_lock, flags);
908
909 return count;
910}
911
912static int gs_put_char(struct tty_struct *tty, unsigned char ch)
913{
914 struct gs_port *port = tty->driver_data;
915 unsigned long flags;
916 int status;
917
Joe Perchesbfa346a2012-02-28 10:49:38 -0800918 pr_vdebug("gs_put_char: (%d,%p) char=0x%x, called from %pf\n",
David Brownellc1dca562008-06-19 17:51:44 -0700919 port->port_num, tty, ch, __builtin_return_address(0));
920
921 spin_lock_irqsave(&port->port_lock, flags);
922 status = gs_buf_put(&port->port_write_buf, &ch, 1);
923 spin_unlock_irqrestore(&port->port_lock, flags);
924
925 return status;
926}
927
928static void gs_flush_chars(struct tty_struct *tty)
929{
930 struct gs_port *port = tty->driver_data;
931 unsigned long flags;
932
933 pr_vdebug("gs_flush_chars: (%d,%p)\n", port->port_num, tty);
934
935 spin_lock_irqsave(&port->port_lock, flags);
936 if (port->port_usb)
937 gs_start_tx(port);
938 spin_unlock_irqrestore(&port->port_lock, flags);
939}
940
941static int gs_write_room(struct tty_struct *tty)
942{
943 struct gs_port *port = tty->driver_data;
944 unsigned long flags;
945 int room = 0;
946
947 spin_lock_irqsave(&port->port_lock, flags);
948 if (port->port_usb)
949 room = gs_buf_space_avail(&port->port_write_buf);
950 spin_unlock_irqrestore(&port->port_lock, flags);
951
952 pr_vdebug("gs_write_room: (%d,%p) room=%d\n",
953 port->port_num, tty, room);
954
955 return room;
956}
957
958static int gs_chars_in_buffer(struct tty_struct *tty)
959{
960 struct gs_port *port = tty->driver_data;
961 unsigned long flags;
962 int chars = 0;
963
964 spin_lock_irqsave(&port->port_lock, flags);
965 chars = gs_buf_data_avail(&port->port_write_buf);
966 spin_unlock_irqrestore(&port->port_lock, flags);
967
968 pr_vdebug("gs_chars_in_buffer: (%d,%p) chars=%d\n",
969 port->port_num, tty, chars);
970
971 return chars;
972}
973
974/* undo side effects of setting TTY_THROTTLED */
975static void gs_unthrottle(struct tty_struct *tty)
976{
977 struct gs_port *port = tty->driver_data;
978 unsigned long flags;
David Brownellc1dca562008-06-19 17:51:44 -0700979
980 spin_lock_irqsave(&port->port_lock, flags);
David Brownell937ef732008-07-07 12:16:08 -0700981 if (port->port_usb) {
982 /* Kickstart read queue processing. We don't do xon/xoff,
983 * rts/cts, or other handshaking with the host, but if the
984 * read queue backs up enough we'll be NAKing OUT packets.
985 */
986 tasklet_schedule(&port->push);
987 pr_vdebug(PREFIX "%d: unthrottle\n", port->port_num);
988 }
David Brownellc1dca562008-06-19 17:51:44 -0700989 spin_unlock_irqrestore(&port->port_lock, flags);
David Brownellc1dca562008-06-19 17:51:44 -0700990}
991
David Brownell1f1ba112008-08-06 18:49:57 -0700992static int gs_break_ctl(struct tty_struct *tty, int duration)
993{
994 struct gs_port *port = tty->driver_data;
995 int status = 0;
996 struct gserial *gser;
997
998 pr_vdebug("gs_break_ctl: ttyGS%d, send break (%d) \n",
999 port->port_num, duration);
1000
1001 spin_lock_irq(&port->port_lock);
1002 gser = port->port_usb;
1003 if (gser && gser->send_break)
1004 status = gser->send_break(gser, duration);
1005 spin_unlock_irq(&port->port_lock);
1006
1007 return status;
1008}
1009
David Brownellc1dca562008-06-19 17:51:44 -07001010static const struct tty_operations gs_tty_ops = {
1011 .open = gs_open,
1012 .close = gs_close,
1013 .write = gs_write,
1014 .put_char = gs_put_char,
1015 .flush_chars = gs_flush_chars,
1016 .write_room = gs_write_room,
1017 .chars_in_buffer = gs_chars_in_buffer,
1018 .unthrottle = gs_unthrottle,
David Brownell1f1ba112008-08-06 18:49:57 -07001019 .break_ctl = gs_break_ctl,
David Brownellc1dca562008-06-19 17:51:44 -07001020};
1021
1022/*-------------------------------------------------------------------------*/
1023
1024static struct tty_driver *gs_tty_driver;
1025
Benoit Gobyc3c04b22012-05-10 10:08:01 +02001026static int
David Brownellc1dca562008-06-19 17:51:44 -07001027gs_port_alloc(unsigned port_num, struct usb_cdc_line_coding *coding)
1028{
1029 struct gs_port *port;
1030
1031 port = kzalloc(sizeof(struct gs_port), GFP_KERNEL);
1032 if (port == NULL)
1033 return -ENOMEM;
1034
Jiri Slaby266e37e2012-04-02 13:54:47 +02001035 tty_port_init(&port->port);
David Brownellc1dca562008-06-19 17:51:44 -07001036 spin_lock_init(&port->port_lock);
David Brownellc1dca562008-06-19 17:51:44 -07001037 init_waitqueue_head(&port->drain_wait);
1038
1039 tasklet_init(&port->push, gs_rx_push, (unsigned long) port);
1040
1041 INIT_LIST_HEAD(&port->read_pool);
David Brownell937ef732008-07-07 12:16:08 -07001042 INIT_LIST_HEAD(&port->read_queue);
David Brownellc1dca562008-06-19 17:51:44 -07001043 INIT_LIST_HEAD(&port->write_pool);
1044
1045 port->port_num = port_num;
1046 port->port_line_coding = *coding;
1047
1048 ports[port_num].port = port;
1049
1050 return 0;
1051}
1052
1053/**
1054 * gserial_setup - initialize TTY driver for one or more ports
1055 * @g: gadget to associate with these ports
1056 * @count: how many ports to support
1057 * Context: may sleep
1058 *
1059 * The TTY stack needs to know in advance how many devices it should
1060 * plan to manage. Use this call to set up the ports you will be
1061 * exporting through USB. Later, connect them to functions based
1062 * on what configuration is activated by the USB host; and disconnect
1063 * them as appropriate.
1064 *
1065 * An example would be a two-configuration device in which both
1066 * configurations expose port 0, but through different functions.
1067 * One configuration could even expose port 1 while the other
1068 * one doesn't.
1069 *
1070 * Returns negative errno or zero.
1071 */
Benoit Gobyc3c04b22012-05-10 10:08:01 +02001072int gserial_setup(struct usb_gadget *g, unsigned count)
David Brownellc1dca562008-06-19 17:51:44 -07001073{
1074 unsigned i;
1075 struct usb_cdc_line_coding coding;
1076 int status;
1077
1078 if (count == 0 || count > N_PORTS)
1079 return -EINVAL;
1080
1081 gs_tty_driver = alloc_tty_driver(count);
1082 if (!gs_tty_driver)
1083 return -ENOMEM;
1084
David Brownellc1dca562008-06-19 17:51:44 -07001085 gs_tty_driver->driver_name = "g_serial";
David Brownell937ef732008-07-07 12:16:08 -07001086 gs_tty_driver->name = PREFIX;
David Brownellc1dca562008-06-19 17:51:44 -07001087 /* uses dynamically assigned dev_t values */
1088
1089 gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
1090 gs_tty_driver->subtype = SERIAL_TYPE_NORMAL;
1091 gs_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
1092 gs_tty_driver->init_termios = tty_std_termios;
1093
1094 /* 9600-8-N-1 ... matches defaults expected by "usbser.sys" on
1095 * MS-Windows. Otherwise, most of these flags shouldn't affect
1096 * anything unless we were to actually hook up to a serial line.
1097 */
1098 gs_tty_driver->init_termios.c_cflag =
1099 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
1100 gs_tty_driver->init_termios.c_ispeed = 9600;
1101 gs_tty_driver->init_termios.c_ospeed = 9600;
1102
Harvey Harrison551509d2009-02-11 14:11:36 -08001103 coding.dwDTERate = cpu_to_le32(9600);
David Brownellc1dca562008-06-19 17:51:44 -07001104 coding.bCharFormat = 8;
1105 coding.bParityType = USB_CDC_NO_PARITY;
1106 coding.bDataBits = USB_CDC_1_STOP_BITS;
1107
1108 tty_set_operations(gs_tty_driver, &gs_tty_ops);
1109
1110 /* make devices be openable */
1111 for (i = 0; i < count; i++) {
1112 mutex_init(&ports[i].lock);
1113 status = gs_port_alloc(i, &coding);
1114 if (status) {
1115 count = i;
1116 goto fail;
1117 }
1118 }
1119 n_ports = count;
1120
1121 /* export the driver ... */
1122 status = tty_register_driver(gs_tty_driver);
1123 if (status) {
David Brownellc1dca562008-06-19 17:51:44 -07001124 pr_err("%s: cannot register, err %d\n",
1125 __func__, status);
1126 goto fail;
1127 }
1128
1129 /* ... and sysfs class devices, so mdev/udev make /dev/ttyGS* */
1130 for (i = 0; i < count; i++) {
1131 struct device *tty_dev;
1132
Jiri Slaby734cc172012-08-07 21:47:47 +02001133 tty_dev = tty_port_register_device(&ports[i].port->port,
1134 gs_tty_driver, i, &g->dev);
David Brownellc1dca562008-06-19 17:51:44 -07001135 if (IS_ERR(tty_dev))
1136 pr_warning("%s: no classdev for port %d, err %ld\n",
1137 __func__, i, PTR_ERR(tty_dev));
1138 }
1139
1140 pr_debug("%s: registered %d ttyGS* device%s\n", __func__,
1141 count, (count == 1) ? "" : "s");
1142
1143 return status;
1144fail:
Jiri Slaby191c5f12012-11-15 09:49:56 +01001145 while (count--) {
1146 tty_port_destroy(&ports[count].port->port);
David Brownellc1dca562008-06-19 17:51:44 -07001147 kfree(ports[count].port);
Jiri Slaby191c5f12012-11-15 09:49:56 +01001148 }
David Brownellc1dca562008-06-19 17:51:44 -07001149 put_tty_driver(gs_tty_driver);
1150 gs_tty_driver = NULL;
1151 return status;
1152}
1153
1154static int gs_closed(struct gs_port *port)
1155{
1156 int cond;
1157
1158 spin_lock_irq(&port->port_lock);
Jiri Slaby266e37e2012-04-02 13:54:47 +02001159 cond = (port->port.count == 0) && !port->openclose;
David Brownellc1dca562008-06-19 17:51:44 -07001160 spin_unlock_irq(&port->port_lock);
1161 return cond;
1162}
1163
1164/**
1165 * gserial_cleanup - remove TTY-over-USB driver and devices
1166 * Context: may sleep
1167 *
1168 * This is called to free all resources allocated by @gserial_setup().
1169 * Accordingly, it may need to wait until some open /dev/ files have
1170 * closed.
1171 *
1172 * The caller must have issued @gserial_disconnect() for any ports
1173 * that had previously been connected, so that there is never any
1174 * I/O pending when it's called.
1175 */
1176void gserial_cleanup(void)
1177{
1178 unsigned i;
1179 struct gs_port *port;
1180
David Brownellac90e362008-07-01 13:18:20 -07001181 if (!gs_tty_driver)
1182 return;
1183
David Brownellc1dca562008-06-19 17:51:44 -07001184 /* start sysfs and /dev/ttyGS* node removal */
1185 for (i = 0; i < n_ports; i++)
1186 tty_unregister_device(gs_tty_driver, i);
1187
1188 for (i = 0; i < n_ports; i++) {
1189 /* prevent new opens */
1190 mutex_lock(&ports[i].lock);
1191 port = ports[i].port;
1192 ports[i].port = NULL;
1193 mutex_unlock(&ports[i].lock);
1194
David Brownell937ef732008-07-07 12:16:08 -07001195 tasklet_kill(&port->push);
1196
David Brownellc1dca562008-06-19 17:51:44 -07001197 /* wait for old opens to finish */
Jiri Slaby3d5ea592012-04-02 13:54:49 +02001198 wait_event(port->port.close_wait, gs_closed(port));
David Brownellc1dca562008-06-19 17:51:44 -07001199
1200 WARN_ON(port->port_usb != NULL);
1201
Jiri Slaby191c5f12012-11-15 09:49:56 +01001202 tty_port_destroy(&port->port);
David Brownellc1dca562008-06-19 17:51:44 -07001203 kfree(port);
1204 }
1205 n_ports = 0;
1206
1207 tty_unregister_driver(gs_tty_driver);
Jon Poveyb23097b2010-06-14 19:42:10 +09001208 put_tty_driver(gs_tty_driver);
David Brownellc1dca562008-06-19 17:51:44 -07001209 gs_tty_driver = NULL;
1210
1211 pr_debug("%s: cleaned up ttyGS* support\n", __func__);
1212}
1213
1214/**
1215 * gserial_connect - notify TTY I/O glue that USB link is active
1216 * @gser: the function, set up with endpoints and descriptors
1217 * @port_num: which port is active
1218 * Context: any (usually from irq)
1219 *
1220 * This is called activate endpoints and let the TTY layer know that
1221 * the connection is active ... not unlike "carrier detect". It won't
1222 * necessarily start I/O queues; unless the TTY is held open by any
1223 * task, there would be no point. However, the endpoints will be
1224 * activated so the USB host can perform I/O, subject to basic USB
1225 * hardware flow control.
1226 *
1227 * Caller needs to have set up the endpoints and USB function in @dev
1228 * before calling this, as well as the appropriate (speed-specific)
1229 * endpoint descriptors, and also have set up the TTY driver by calling
1230 * @gserial_setup().
1231 *
1232 * Returns negative errno or zero.
1233 * On success, ep->driver_data will be overwritten.
1234 */
1235int gserial_connect(struct gserial *gser, u8 port_num)
1236{
1237 struct gs_port *port;
1238 unsigned long flags;
1239 int status;
1240
1241 if (!gs_tty_driver || port_num >= n_ports)
1242 return -ENXIO;
1243
1244 /* we "know" gserial_cleanup() hasn't been called */
1245 port = ports[port_num].port;
1246
1247 /* activate the endpoints */
Tatyana Brokhman72c973d2011-06-28 16:33:48 +03001248 status = usb_ep_enable(gser->in);
David Brownellc1dca562008-06-19 17:51:44 -07001249 if (status < 0)
1250 return status;
1251 gser->in->driver_data = port;
1252
Tatyana Brokhman72c973d2011-06-28 16:33:48 +03001253 status = usb_ep_enable(gser->out);
David Brownellc1dca562008-06-19 17:51:44 -07001254 if (status < 0)
1255 goto fail_out;
1256 gser->out->driver_data = port;
1257
1258 /* then tell the tty glue that I/O can work */
1259 spin_lock_irqsave(&port->port_lock, flags);
1260 gser->ioport = port;
1261 port->port_usb = gser;
1262
1263 /* REVISIT unclear how best to handle this state...
1264 * we don't really couple it with the Linux TTY.
1265 */
1266 gser->port_line_coding = port->port_line_coding;
1267
1268 /* REVISIT if waiting on "carrier detect", signal. */
1269
David Brownell1f1ba112008-08-06 18:49:57 -07001270 /* if it's already open, start I/O ... and notify the serial
1271 * protocol about open/close status (connect/disconnect).
David Brownellc1dca562008-06-19 17:51:44 -07001272 */
Jiri Slaby266e37e2012-04-02 13:54:47 +02001273 if (port->port.count) {
David Brownellc1dca562008-06-19 17:51:44 -07001274 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
1275 gs_start_io(port);
David Brownell1f1ba112008-08-06 18:49:57 -07001276 if (gser->connect)
1277 gser->connect(gser);
1278 } else {
1279 if (gser->disconnect)
1280 gser->disconnect(gser);
David Brownellc1dca562008-06-19 17:51:44 -07001281 }
1282
1283 spin_unlock_irqrestore(&port->port_lock, flags);
1284
1285 return status;
1286
1287fail_out:
1288 usb_ep_disable(gser->in);
1289 gser->in->driver_data = NULL;
1290 return status;
1291}
1292
1293/**
1294 * gserial_disconnect - notify TTY I/O glue that USB link is inactive
1295 * @gser: the function, on which gserial_connect() was called
1296 * Context: any (usually from irq)
1297 *
1298 * This is called to deactivate endpoints and let the TTY layer know
1299 * that the connection went inactive ... not unlike "hangup".
1300 *
1301 * On return, the state is as if gserial_connect() had never been called;
1302 * there is no active USB I/O on these endpoints.
1303 */
1304void gserial_disconnect(struct gserial *gser)
1305{
1306 struct gs_port *port = gser->ioport;
1307 unsigned long flags;
1308
1309 if (!port)
1310 return;
1311
1312 /* tell the TTY glue not to do I/O here any more */
1313 spin_lock_irqsave(&port->port_lock, flags);
1314
1315 /* REVISIT as above: how best to track this? */
1316 port->port_line_coding = gser->port_line_coding;
1317
1318 port->port_usb = NULL;
1319 gser->ioport = NULL;
Jiri Slaby266e37e2012-04-02 13:54:47 +02001320 if (port->port.count > 0 || port->openclose) {
David Brownellc1dca562008-06-19 17:51:44 -07001321 wake_up_interruptible(&port->drain_wait);
Jiri Slaby35f95fd2012-04-02 13:54:48 +02001322 if (port->port.tty)
1323 tty_hangup(port->port.tty);
David Brownellc1dca562008-06-19 17:51:44 -07001324 }
1325 spin_unlock_irqrestore(&port->port_lock, flags);
1326
1327 /* disable endpoints, aborting down any active I/O */
1328 usb_ep_disable(gser->out);
1329 gser->out->driver_data = NULL;
1330
1331 usb_ep_disable(gser->in);
1332 gser->in->driver_data = NULL;
1333
1334 /* finally, free any unused/unusable I/O buffers */
1335 spin_lock_irqsave(&port->port_lock, flags);
Jiri Slaby266e37e2012-04-02 13:54:47 +02001336 if (port->port.count == 0 && !port->openclose)
David Brownellc1dca562008-06-19 17:51:44 -07001337 gs_buf_free(&port->port_write_buf);
Jim Sung28609d42010-11-04 18:47:51 -07001338 gs_free_requests(gser->out, &port->read_pool, NULL);
1339 gs_free_requests(gser->out, &port->read_queue, NULL);
1340 gs_free_requests(gser->in, &port->write_pool, NULL);
1341
1342 port->read_allocated = port->read_started =
1343 port->write_allocated = port->write_started = 0;
1344
David Brownellc1dca562008-06-19 17:51:44 -07001345 spin_unlock_irqrestore(&port->port_lock, flags);
1346}