blob: e0cd1e4c88927cfc29e12f050ec2a4eb8555136b [file] [log] [blame]
David Brownellc1dca562008-06-19 17:51:44 -07001/*
2 * u_serial.c - utilities for USB gadget "serial port"/TTY support
3 *
4 * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
5 * Copyright (C) 2008 David Brownell
6 * Copyright (C) 2008 by Nokia Corporation
7 *
8 * This code also borrows from usbserial.c, which is
9 * Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com)
10 * Copyright (C) 2000 Peter Berger (pberger@brimson.com)
11 * Copyright (C) 2000 Al Borchers (alborchers@steinerpoint.com)
12 *
13 * This software is distributed under the terms of the GNU General
14 * Public License ("GPL") as published by the Free Software Foundation,
15 * either version 2 of that License or (at your option) any later version.
16 */
17
18/* #define VERBOSE_DEBUG */
19
20#include <linux/kernel.h>
stephane duverger1e413942010-06-29 16:57:25 +020021#include <linux/sched.h>
David Brownellc1dca562008-06-19 17:51:44 -070022#include <linux/interrupt.h>
23#include <linux/device.h>
24#include <linux/delay.h>
25#include <linux/tty.h>
26#include <linux/tty_flip.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090027#include <linux/slab.h>
Paul Gortmakerf940fcd2011-05-27 09:56:31 -040028#include <linux/export.h>
Sebastian Andrzej Siewior3249ca22012-12-23 21:10:04 +010029#include <linux/module.h>
Baolin Wanga5beaaf2015-11-21 15:44:53 +080030#include <linux/console.h>
31#include <linux/kthread.h>
David Brownellc1dca562008-06-19 17:51:44 -070032
33#include "u_serial.h"
34
35
36/*
37 * This component encapsulates the TTY layer glue needed to provide basic
38 * "serial port" functionality through the USB gadget stack. Each such
39 * port is exposed through a /dev/ttyGS* node.
40 *
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +010041 * After this module has been loaded, the individual TTY port can be requested
42 * (gserial_alloc_line()) and it will stay available until they are removed
43 * (gserial_free_line()). Each one may be connected to a USB function
44 * (gserial_connect), or disconnected (with gserial_disconnect) when the USB
45 * host issues a config change event. Data can only flow when the port is
46 * connected to the host.
David Brownellc1dca562008-06-19 17:51:44 -070047 *
48 * A given TTY port can be made available in multiple configurations.
49 * For example, each one might expose a ttyGS0 node which provides a
50 * login application. In one case that might use CDC ACM interface 0,
51 * while another configuration might use interface 3 for that. The
52 * work to handle that (including descriptor management) is not part
53 * of this component.
54 *
55 * Configurations may expose more than one TTY port. For example, if
56 * ttyGS0 provides login service, then ttyGS1 might provide dialer access
57 * for a telephone or fax link. And ttyGS2 might be something that just
58 * needs a simple byte stream interface for some messaging protocol that
59 * is managed in userspace ... OBEX, PTP, and MTP have been mentioned.
Richard Leitnerc572a212014-08-21 08:57:28 +020060 *
61 *
David Brownellc1dca562008-06-19 17:51:44 -070062 * gserial is the lifecycle interface, used by USB functions
63 * gs_port is the I/O nexus, used by the tty driver
64 * tty_struct links to the tty/filesystem framework
65 *
66 * gserial <---> gs_port ... links will be null when the USB link is
David Brownell1f1ba112008-08-06 18:49:57 -070067 * inactive; managed by gserial_{connect,disconnect}(). each gserial
68 * instance can wrap its own USB control protocol.
David Brownellc1dca562008-06-19 17:51:44 -070069 * gserial->ioport == usb_ep->driver_data ... gs_port
70 * gs_port->port_usb ... gserial
71 *
72 * gs_port <---> tty_struct ... links will be null when the TTY file
73 * isn't opened; managed by gs_open()/gs_close()
74 * gserial->port_tty ... tty_struct
75 * tty_struct->driver_data ... gserial
76 */
77
78/* RX and TX queues can buffer QUEUE_SIZE packets before they hit the
79 * next layer of buffering. For TX that's a circular buffer; for RX
80 * consider it a NOP. A third layer is provided by the TTY code.
81 */
82#define QUEUE_SIZE 16
83#define WRITE_BUF_SIZE 8192 /* TX only */
Baolin Wanga5beaaf2015-11-21 15:44:53 +080084#define GS_CONSOLE_BUF_SIZE 8192
David Brownellc1dca562008-06-19 17:51:44 -070085
86/* circular buffer */
87struct gs_buf {
88 unsigned buf_size;
89 char *buf_buf;
90 char *buf_get;
91 char *buf_put;
92};
93
Baolin Wanga5beaaf2015-11-21 15:44:53 +080094/* console info */
95struct gscons_info {
96 struct gs_port *port;
97 struct task_struct *console_thread;
98 struct gs_buf con_buf;
99 /* protect the buf and busy flag */
100 spinlock_t con_lock;
101 int req_busy;
102 struct usb_request *console_req;
103};
104
David Brownellc1dca562008-06-19 17:51:44 -0700105/*
106 * The port structure holds info for each port, one for each minor number
107 * (and thus for each /dev/ node).
108 */
109struct gs_port {
Jiri Slaby266e37e2012-04-02 13:54:47 +0200110 struct tty_port port;
David Brownellc1dca562008-06-19 17:51:44 -0700111 spinlock_t port_lock; /* guard port_* access */
112
113 struct gserial *port_usb;
David Brownellc1dca562008-06-19 17:51:44 -0700114
David Brownellc1dca562008-06-19 17:51:44 -0700115 bool openclose; /* open/close in progress */
116 u8 port_num;
117
David Brownellc1dca562008-06-19 17:51:44 -0700118 struct list_head read_pool;
Jim Sung28609d402010-11-04 18:47:51 -0700119 int read_started;
120 int read_allocated;
David Brownell937ef732008-07-07 12:16:08 -0700121 struct list_head read_queue;
122 unsigned n_read;
David Brownellc1dca562008-06-19 17:51:44 -0700123 struct tasklet_struct push;
124
125 struct list_head write_pool;
Jim Sung28609d402010-11-04 18:47:51 -0700126 int write_started;
127 int write_allocated;
David Brownellc1dca562008-06-19 17:51:44 -0700128 struct gs_buf port_write_buf;
129 wait_queue_head_t drain_wait; /* wait while writes drain */
Philip Oberstaller3e9d3d22015-03-27 17:42:18 +0100130 bool write_busy;
Peter Hurleyb140dfe2015-10-10 16:00:53 -0400131 wait_queue_head_t close_wait;
David Brownellc1dca562008-06-19 17:51:44 -0700132
133 /* REVISIT this state ... */
134 struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */
135};
136
David Brownellc1dca562008-06-19 17:51:44 -0700137static struct portmaster {
138 struct mutex lock; /* protect open/close */
139 struct gs_port *port;
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +0100140} ports[MAX_U_SERIAL_PORTS];
David Brownellc1dca562008-06-19 17:51:44 -0700141
142#define GS_CLOSE_TIMEOUT 15 /* seconds */
143
144
145
146#ifdef VERBOSE_DEBUG
Andrzej Pietrasiewiczea0e6272012-06-11 11:13:15 +0200147#ifndef pr_vdebug
David Brownellc1dca562008-06-19 17:51:44 -0700148#define pr_vdebug(fmt, arg...) \
149 pr_debug(fmt, ##arg)
Andrzej Pietrasiewiczea0e6272012-06-11 11:13:15 +0200150#endif /* pr_vdebug */
David Brownellc1dca562008-06-19 17:51:44 -0700151#else
Bo Shen273daf22013-03-13 16:54:07 +0800152#ifndef pr_vdebug
David Brownellc1dca562008-06-19 17:51:44 -0700153#define pr_vdebug(fmt, arg...) \
154 ({ if (0) pr_debug(fmt, ##arg); })
Andrzej Pietrasiewiczea0e6272012-06-11 11:13:15 +0200155#endif /* pr_vdebug */
David Brownellc1dca562008-06-19 17:51:44 -0700156#endif
157
158/*-------------------------------------------------------------------------*/
159
160/* Circular Buffer */
161
162/*
163 * gs_buf_alloc
164 *
165 * Allocate a circular buffer and all associated memory.
166 */
167static int gs_buf_alloc(struct gs_buf *gb, unsigned size)
168{
169 gb->buf_buf = kmalloc(size, GFP_KERNEL);
170 if (gb->buf_buf == NULL)
171 return -ENOMEM;
172
173 gb->buf_size = size;
174 gb->buf_put = gb->buf_buf;
175 gb->buf_get = gb->buf_buf;
176
177 return 0;
178}
179
180/*
181 * gs_buf_free
182 *
183 * Free the buffer and all associated memory.
184 */
185static void gs_buf_free(struct gs_buf *gb)
186{
187 kfree(gb->buf_buf);
188 gb->buf_buf = NULL;
189}
190
191/*
192 * gs_buf_clear
193 *
194 * Clear out all data in the circular buffer.
195 */
196static void gs_buf_clear(struct gs_buf *gb)
197{
198 gb->buf_get = gb->buf_put;
199 /* equivalent to a get of all data available */
200}
201
202/*
203 * gs_buf_data_avail
204 *
David Brownell1f1ba112008-08-06 18:49:57 -0700205 * Return the number of bytes of data written into the circular
David Brownellc1dca562008-06-19 17:51:44 -0700206 * buffer.
207 */
208static unsigned gs_buf_data_avail(struct gs_buf *gb)
209{
210 return (gb->buf_size + gb->buf_put - gb->buf_get) % gb->buf_size;
211}
212
213/*
214 * gs_buf_space_avail
215 *
216 * Return the number of bytes of space available in the circular
217 * buffer.
218 */
219static unsigned gs_buf_space_avail(struct gs_buf *gb)
220{
221 return (gb->buf_size + gb->buf_get - gb->buf_put - 1) % gb->buf_size;
222}
223
224/*
225 * gs_buf_put
226 *
227 * Copy data data from a user buffer and put it into the circular buffer.
228 * Restrict to the amount of space available.
229 *
230 * Return the number of bytes copied.
231 */
232static unsigned
233gs_buf_put(struct gs_buf *gb, const char *buf, unsigned count)
234{
235 unsigned len;
236
237 len = gs_buf_space_avail(gb);
238 if (count > len)
239 count = len;
240
241 if (count == 0)
242 return 0;
243
244 len = gb->buf_buf + gb->buf_size - gb->buf_put;
245 if (count > len) {
246 memcpy(gb->buf_put, buf, len);
247 memcpy(gb->buf_buf, buf+len, count - len);
248 gb->buf_put = gb->buf_buf + count - len;
249 } else {
250 memcpy(gb->buf_put, buf, count);
251 if (count < len)
252 gb->buf_put += count;
253 else /* count == len */
254 gb->buf_put = gb->buf_buf;
255 }
256
257 return count;
258}
259
260/*
261 * gs_buf_get
262 *
263 * Get data from the circular buffer and copy to the given buffer.
264 * Restrict to the amount of data available.
265 *
266 * Return the number of bytes copied.
267 */
268static unsigned
269gs_buf_get(struct gs_buf *gb, char *buf, unsigned count)
270{
271 unsigned len;
272
273 len = gs_buf_data_avail(gb);
274 if (count > len)
275 count = len;
276
277 if (count == 0)
278 return 0;
279
280 len = gb->buf_buf + gb->buf_size - gb->buf_get;
281 if (count > len) {
282 memcpy(buf, gb->buf_get, len);
283 memcpy(buf+len, gb->buf_buf, count - len);
284 gb->buf_get = gb->buf_buf + count - len;
285 } else {
286 memcpy(buf, gb->buf_get, count);
287 if (count < len)
288 gb->buf_get += count;
289 else /* count == len */
290 gb->buf_get = gb->buf_buf;
291 }
292
293 return count;
294}
295
296/*-------------------------------------------------------------------------*/
297
298/* I/O glue between TTY (upper) and USB function (lower) driver layers */
299
300/*
301 * gs_alloc_req
302 *
303 * Allocate a usb_request and its buffer. Returns a pointer to the
304 * usb_request or NULL if there is an error.
305 */
David Brownell1f1ba112008-08-06 18:49:57 -0700306struct usb_request *
David Brownellc1dca562008-06-19 17:51:44 -0700307gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
308{
309 struct usb_request *req;
310
311 req = usb_ep_alloc_request(ep, kmalloc_flags);
312
313 if (req != NULL) {
314 req->length = len;
315 req->buf = kmalloc(len, kmalloc_flags);
316 if (req->buf == NULL) {
317 usb_ep_free_request(ep, req);
318 return NULL;
319 }
320 }
321
322 return req;
323}
Sebastian Andrzej Siewior3249ca22012-12-23 21:10:04 +0100324EXPORT_SYMBOL_GPL(gs_alloc_req);
David Brownellc1dca562008-06-19 17:51:44 -0700325
326/*
327 * gs_free_req
328 *
329 * Free a usb_request and its buffer.
330 */
David Brownell1f1ba112008-08-06 18:49:57 -0700331void gs_free_req(struct usb_ep *ep, struct usb_request *req)
David Brownellc1dca562008-06-19 17:51:44 -0700332{
333 kfree(req->buf);
334 usb_ep_free_request(ep, req);
335}
Sebastian Andrzej Siewior3249ca22012-12-23 21:10:04 +0100336EXPORT_SYMBOL_GPL(gs_free_req);
David Brownellc1dca562008-06-19 17:51:44 -0700337
338/*
339 * gs_send_packet
340 *
341 * If there is data to send, a packet is built in the given
342 * buffer and the size is returned. If there is no data to
343 * send, 0 is returned.
344 *
345 * Called with port_lock held.
346 */
347static unsigned
348gs_send_packet(struct gs_port *port, char *packet, unsigned size)
349{
350 unsigned len;
351
352 len = gs_buf_data_avail(&port->port_write_buf);
353 if (len < size)
354 size = len;
355 if (size != 0)
356 size = gs_buf_get(&port->port_write_buf, packet, size);
357 return size;
358}
359
360/*
361 * gs_start_tx
362 *
363 * This function finds available write requests, calls
364 * gs_send_packet to fill these packets with data, and
365 * continues until either there are no more write requests
366 * available or no more data to send. This function is
367 * run whenever data arrives or write requests are available.
368 *
369 * Context: caller owns port_lock; port_usb is non-null.
370 */
371static int gs_start_tx(struct gs_port *port)
372/*
373__releases(&port->port_lock)
374__acquires(&port->port_lock)
375*/
376{
377 struct list_head *pool = &port->write_pool;
Baolin Wang511a36d2016-06-30 17:10:23 +0800378 struct usb_ep *in;
David Brownellc1dca562008-06-19 17:51:44 -0700379 int status = 0;
380 bool do_tty_wake = false;
381
Baolin Wang511a36d2016-06-30 17:10:23 +0800382 if (!port->port_usb)
383 return status;
384
385 in = port->port_usb->in;
386
Philip Oberstaller3e9d3d22015-03-27 17:42:18 +0100387 while (!port->write_busy && !list_empty(pool)) {
David Brownellc1dca562008-06-19 17:51:44 -0700388 struct usb_request *req;
389 int len;
390
Jim Sung28609d402010-11-04 18:47:51 -0700391 if (port->write_started >= QUEUE_SIZE)
392 break;
393
David Brownellc1dca562008-06-19 17:51:44 -0700394 req = list_entry(pool->next, struct usb_request, list);
395 len = gs_send_packet(port, req->buf, in->maxpacket);
396 if (len == 0) {
397 wake_up_interruptible(&port->drain_wait);
398 break;
399 }
400 do_tty_wake = true;
401
402 req->length = len;
403 list_del(&req->list);
Daniel Glöckner2e251342009-05-28 12:53:24 +0200404 req->zero = (gs_buf_data_avail(&port->port_write_buf) == 0);
David Brownellc1dca562008-06-19 17:51:44 -0700405
Richard Leitnerc572a212014-08-21 08:57:28 +0200406 pr_vdebug("ttyGS%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
407 port->port_num, len, *((u8 *)req->buf),
408 *((u8 *)req->buf+1), *((u8 *)req->buf+2));
David Brownellc1dca562008-06-19 17:51:44 -0700409
410 /* Drop lock while we call out of driver; completions
411 * could be issued while we do so. Disconnection may
412 * happen too; maybe immediately before we queue this!
413 *
414 * NOTE that we may keep sending data for a while after
415 * the TTY closed (dev->ioport->port_tty is NULL).
416 */
Philip Oberstaller3e9d3d22015-03-27 17:42:18 +0100417 port->write_busy = true;
David Brownellc1dca562008-06-19 17:51:44 -0700418 spin_unlock(&port->port_lock);
419 status = usb_ep_queue(in, req, GFP_ATOMIC);
420 spin_lock(&port->port_lock);
Philip Oberstaller3e9d3d22015-03-27 17:42:18 +0100421 port->write_busy = false;
David Brownellc1dca562008-06-19 17:51:44 -0700422
423 if (status) {
424 pr_debug("%s: %s %s err %d\n",
425 __func__, "queue", in->name, status);
426 list_add(&req->list, pool);
427 break;
428 }
429
Jim Sung28609d402010-11-04 18:47:51 -0700430 port->write_started++;
431
David Brownellc1dca562008-06-19 17:51:44 -0700432 /* abort immediately after disconnect */
433 if (!port->port_usb)
434 break;
435 }
436
Jiri Slaby35f95fd2012-04-02 13:54:48 +0200437 if (do_tty_wake && port->port.tty)
438 tty_wakeup(port->port.tty);
David Brownellc1dca562008-06-19 17:51:44 -0700439 return status;
440}
441
David Brownellc1dca562008-06-19 17:51:44 -0700442/*
443 * Context: caller owns port_lock, and port_usb is set
444 */
445static unsigned gs_start_rx(struct gs_port *port)
446/*
447__releases(&port->port_lock)
448__acquires(&port->port_lock)
449*/
450{
451 struct list_head *pool = &port->read_pool;
452 struct usb_ep *out = port->port_usb->out;
David Brownellc1dca562008-06-19 17:51:44 -0700453
454 while (!list_empty(pool)) {
455 struct usb_request *req;
456 int status;
457 struct tty_struct *tty;
458
David Brownell937ef732008-07-07 12:16:08 -0700459 /* no more rx if closed */
Jiri Slaby35f95fd2012-04-02 13:54:48 +0200460 tty = port->port.tty;
David Brownell937ef732008-07-07 12:16:08 -0700461 if (!tty)
David Brownellc1dca562008-06-19 17:51:44 -0700462 break;
463
Jim Sung28609d402010-11-04 18:47:51 -0700464 if (port->read_started >= QUEUE_SIZE)
465 break;
466
David Brownellc1dca562008-06-19 17:51:44 -0700467 req = list_entry(pool->next, struct usb_request, list);
468 list_del(&req->list);
469 req->length = out->maxpacket;
470
471 /* drop lock while we call out; the controller driver
472 * may need to call us back (e.g. for disconnect)
473 */
474 spin_unlock(&port->port_lock);
475 status = usb_ep_queue(out, req, GFP_ATOMIC);
476 spin_lock(&port->port_lock);
477
478 if (status) {
479 pr_debug("%s: %s %s err %d\n",
480 __func__, "queue", out->name, status);
481 list_add(&req->list, pool);
482 break;
483 }
Jim Sung28609d402010-11-04 18:47:51 -0700484 port->read_started++;
David Brownellc1dca562008-06-19 17:51:44 -0700485
486 /* abort immediately after disconnect */
487 if (!port->port_usb)
488 break;
489 }
Jim Sung28609d402010-11-04 18:47:51 -0700490 return port->read_started;
David Brownellc1dca562008-06-19 17:51:44 -0700491}
492
David Brownell937ef732008-07-07 12:16:08 -0700493/*
494 * RX tasklet takes data out of the RX queue and hands it up to the TTY
495 * layer until it refuses to take any more data (or is throttled back).
496 * Then it issues reads for any further data.
497 *
498 * If the RX queue becomes full enough that no usb_request is queued,
499 * the OUT endpoint may begin NAKing as soon as its FIFO fills up.
500 * So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
501 * can be buffered before the TTY layer's buffers (currently 64 KB).
502 */
503static void gs_rx_push(unsigned long _port)
504{
505 struct gs_port *port = (void *)_port;
506 struct tty_struct *tty;
507 struct list_head *queue = &port->read_queue;
508 bool disconnect = false;
509 bool do_push = false;
510
511 /* hand any queued data to the tty */
512 spin_lock_irq(&port->port_lock);
Jiri Slaby35f95fd2012-04-02 13:54:48 +0200513 tty = port->port.tty;
David Brownell937ef732008-07-07 12:16:08 -0700514 while (!list_empty(queue)) {
515 struct usb_request *req;
516
517 req = list_first_entry(queue, struct usb_request, list);
518
David Brownell937ef732008-07-07 12:16:08 -0700519 /* leave data queued if tty was rx throttled */
Peter Hurley97ef38b2016-04-09 17:11:36 -0700520 if (tty && tty_throttled(tty))
David Brownell937ef732008-07-07 12:16:08 -0700521 break;
522
523 switch (req->status) {
524 case -ESHUTDOWN:
525 disconnect = true;
Richard Leitnerc572a212014-08-21 08:57:28 +0200526 pr_vdebug("ttyGS%d: shutdown\n", port->port_num);
David Brownell937ef732008-07-07 12:16:08 -0700527 break;
528
529 default:
530 /* presumably a transient fault */
Richard Leitnerc572a212014-08-21 08:57:28 +0200531 pr_warn("ttyGS%d: unexpected RX status %d\n",
532 port->port_num, req->status);
David Brownell937ef732008-07-07 12:16:08 -0700533 /* FALLTHROUGH */
534 case 0:
535 /* normal completion */
536 break;
537 }
538
539 /* push data to (open) tty */
540 if (req->actual) {
541 char *packet = req->buf;
542 unsigned size = req->actual;
543 unsigned n;
544 int count;
545
546 /* we may have pushed part of this packet already... */
547 n = port->n_read;
548 if (n) {
549 packet += n;
550 size -= n;
551 }
552
Jiri Slaby05c7cd32013-01-03 15:53:04 +0100553 count = tty_insert_flip_string(&port->port, packet,
554 size);
David Brownell937ef732008-07-07 12:16:08 -0700555 if (count)
556 do_push = true;
557 if (count != size) {
558 /* stop pushing; TTY layer can't handle more */
559 port->n_read += count;
Richard Leitnerc572a212014-08-21 08:57:28 +0200560 pr_vdebug("ttyGS%d: rx block %d/%d\n",
561 port->port_num, count, req->actual);
David Brownell937ef732008-07-07 12:16:08 -0700562 break;
563 }
564 port->n_read = 0;
565 }
Jiri Slaby05c7cd32013-01-03 15:53:04 +0100566
David Brownell937ef732008-07-07 12:16:08 -0700567 list_move(&req->list, &port->read_pool);
Jim Sung28609d402010-11-04 18:47:51 -0700568 port->read_started--;
David Brownell937ef732008-07-07 12:16:08 -0700569 }
570
Peter Hurleya9c3f682014-02-22 07:31:21 -0500571 /* Push from tty to ldisc; this is handled by a workqueue,
572 * so we won't get callbacks and can hold port_lock
David Brownell937ef732008-07-07 12:16:08 -0700573 */
Jiri Slaby2e124b42013-01-03 15:53:06 +0100574 if (do_push)
575 tty_flip_buffer_push(&port->port);
David Brownell937ef732008-07-07 12:16:08 -0700576
577
578 /* We want our data queue to become empty ASAP, keeping data
579 * in the tty and ldisc (not here). If we couldn't push any
580 * this time around, there may be trouble unless there's an
581 * implicit tty_unthrottle() call on its way...
582 *
583 * REVISIT we should probably add a timer to keep the tasklet
584 * from starving ... but it's not clear that case ever happens.
585 */
586 if (!list_empty(queue) && tty) {
Peter Hurley97ef38b2016-04-09 17:11:36 -0700587 if (!tty_throttled(tty)) {
David Brownell937ef732008-07-07 12:16:08 -0700588 if (do_push)
589 tasklet_schedule(&port->push);
590 else
Richard Leitnerc572a212014-08-21 08:57:28 +0200591 pr_warn("ttyGS%d: RX not scheduled?\n",
David Brownell937ef732008-07-07 12:16:08 -0700592 port->port_num);
593 }
594 }
595
596 /* If we're still connected, refill the USB RX queue. */
597 if (!disconnect && port->port_usb)
598 gs_start_rx(port);
599
600 spin_unlock_irq(&port->port_lock);
601}
602
David Brownellc1dca562008-06-19 17:51:44 -0700603static void gs_read_complete(struct usb_ep *ep, struct usb_request *req)
604{
David Brownellc1dca562008-06-19 17:51:44 -0700605 struct gs_port *port = ep->driver_data;
606
David Brownell937ef732008-07-07 12:16:08 -0700607 /* Queue all received data until the tty layer is ready for it. */
David Brownellc1dca562008-06-19 17:51:44 -0700608 spin_lock(&port->port_lock);
David Brownell937ef732008-07-07 12:16:08 -0700609 list_add_tail(&req->list, &port->read_queue);
610 tasklet_schedule(&port->push);
David Brownellc1dca562008-06-19 17:51:44 -0700611 spin_unlock(&port->port_lock);
612}
613
614static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
615{
616 struct gs_port *port = ep->driver_data;
617
618 spin_lock(&port->port_lock);
619 list_add(&req->list, &port->write_pool);
Jim Sung28609d402010-11-04 18:47:51 -0700620 port->write_started--;
David Brownellc1dca562008-06-19 17:51:44 -0700621
622 switch (req->status) {
623 default:
624 /* presumably a transient fault */
625 pr_warning("%s: unexpected %s status %d\n",
626 __func__, ep->name, req->status);
627 /* FALL THROUGH */
628 case 0:
629 /* normal completion */
630 gs_start_tx(port);
631 break;
632
633 case -ESHUTDOWN:
634 /* disconnect */
635 pr_vdebug("%s: %s shutdown\n", __func__, ep->name);
636 break;
637 }
638
639 spin_unlock(&port->port_lock);
640}
641
Jim Sung28609d402010-11-04 18:47:51 -0700642static void gs_free_requests(struct usb_ep *ep, struct list_head *head,
643 int *allocated)
David Brownellc1dca562008-06-19 17:51:44 -0700644{
645 struct usb_request *req;
646
647 while (!list_empty(head)) {
648 req = list_entry(head->next, struct usb_request, list);
649 list_del(&req->list);
650 gs_free_req(ep, req);
Jim Sung28609d402010-11-04 18:47:51 -0700651 if (allocated)
652 (*allocated)--;
David Brownellc1dca562008-06-19 17:51:44 -0700653 }
654}
655
656static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head,
Jim Sung28609d402010-11-04 18:47:51 -0700657 void (*fn)(struct usb_ep *, struct usb_request *),
658 int *allocated)
David Brownellc1dca562008-06-19 17:51:44 -0700659{
660 int i;
661 struct usb_request *req;
Jim Sung28609d402010-11-04 18:47:51 -0700662 int n = allocated ? QUEUE_SIZE - *allocated : QUEUE_SIZE;
David Brownellc1dca562008-06-19 17:51:44 -0700663
664 /* Pre-allocate up to QUEUE_SIZE transfers, but if we can't
665 * do quite that many this time, don't fail ... we just won't
666 * be as speedy as we might otherwise be.
667 */
Jim Sung28609d402010-11-04 18:47:51 -0700668 for (i = 0; i < n; i++) {
David Brownellc1dca562008-06-19 17:51:44 -0700669 req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC);
670 if (!req)
671 return list_empty(head) ? -ENOMEM : 0;
672 req->complete = fn;
673 list_add_tail(&req->list, head);
Jim Sung28609d402010-11-04 18:47:51 -0700674 if (allocated)
675 (*allocated)++;
David Brownellc1dca562008-06-19 17:51:44 -0700676 }
677 return 0;
678}
679
680/**
681 * gs_start_io - start USB I/O streams
682 * @dev: encapsulates endpoints to use
683 * Context: holding port_lock; port_tty and port_usb are non-null
684 *
685 * We only start I/O when something is connected to both sides of
686 * this port. If nothing is listening on the host side, we may
687 * be pointlessly filling up our TX buffers and FIFO.
688 */
689static int gs_start_io(struct gs_port *port)
690{
691 struct list_head *head = &port->read_pool;
692 struct usb_ep *ep = port->port_usb->out;
693 int status;
694 unsigned started;
695
696 /* Allocate RX and TX I/O buffers. We can't easily do this much
697 * earlier (with GFP_KERNEL) because the requests are coupled to
698 * endpoints, as are the packet sizes we'll be using. Different
699 * configurations may use different endpoints with a given port;
700 * and high speed vs full speed changes packet sizes too.
701 */
Jim Sung28609d402010-11-04 18:47:51 -0700702 status = gs_alloc_requests(ep, head, gs_read_complete,
703 &port->read_allocated);
David Brownellc1dca562008-06-19 17:51:44 -0700704 if (status)
705 return status;
706
707 status = gs_alloc_requests(port->port_usb->in, &port->write_pool,
Jim Sung28609d402010-11-04 18:47:51 -0700708 gs_write_complete, &port->write_allocated);
David Brownellc1dca562008-06-19 17:51:44 -0700709 if (status) {
Jim Sung28609d402010-11-04 18:47:51 -0700710 gs_free_requests(ep, head, &port->read_allocated);
David Brownellc1dca562008-06-19 17:51:44 -0700711 return status;
712 }
713
714 /* queue read requests */
David Brownell937ef732008-07-07 12:16:08 -0700715 port->n_read = 0;
David Brownellc1dca562008-06-19 17:51:44 -0700716 started = gs_start_rx(port);
717
718 /* unblock any pending writes into our circular buffer */
719 if (started) {
Jiri Slaby35f95fd2012-04-02 13:54:48 +0200720 tty_wakeup(port->port.tty);
David Brownellc1dca562008-06-19 17:51:44 -0700721 } else {
Jim Sung28609d402010-11-04 18:47:51 -0700722 gs_free_requests(ep, head, &port->read_allocated);
723 gs_free_requests(port->port_usb->in, &port->write_pool,
724 &port->write_allocated);
David Brownell937ef732008-07-07 12:16:08 -0700725 status = -EIO;
David Brownellc1dca562008-06-19 17:51:44 -0700726 }
727
David Brownell937ef732008-07-07 12:16:08 -0700728 return status;
David Brownellc1dca562008-06-19 17:51:44 -0700729}
730
731/*-------------------------------------------------------------------------*/
732
733/* TTY Driver */
734
735/*
736 * gs_open sets up the link between a gs_port and its associated TTY.
737 * That link is broken *only* by TTY close(), and all driver methods
738 * know that.
739 */
740static int gs_open(struct tty_struct *tty, struct file *file)
741{
742 int port_num = tty->index;
743 struct gs_port *port;
744 int status;
745
David Brownellc1dca562008-06-19 17:51:44 -0700746 do {
747 mutex_lock(&ports[port_num].lock);
748 port = ports[port_num].port;
749 if (!port)
750 status = -ENODEV;
751 else {
752 spin_lock_irq(&port->port_lock);
753
754 /* already open? Great. */
Jiri Slaby266e37e2012-04-02 13:54:47 +0200755 if (port->port.count) {
David Brownellc1dca562008-06-19 17:51:44 -0700756 status = 0;
Jiri Slaby266e37e2012-04-02 13:54:47 +0200757 port->port.count++;
David Brownellc1dca562008-06-19 17:51:44 -0700758
759 /* currently opening/closing? wait ... */
760 } else if (port->openclose) {
761 status = -EBUSY;
762
763 /* ... else we do the work */
764 } else {
765 status = -EAGAIN;
766 port->openclose = true;
767 }
768 spin_unlock_irq(&port->port_lock);
769 }
770 mutex_unlock(&ports[port_num].lock);
771
772 switch (status) {
773 default:
774 /* fully handled */
775 return status;
776 case -EAGAIN:
777 /* must do the work */
778 break;
779 case -EBUSY:
780 /* wait for EAGAIN task to finish */
781 msleep(1);
782 /* REVISIT could have a waitchannel here, if
783 * concurrent open performance is important
784 */
785 break;
786 }
787 } while (status != -EAGAIN);
788
789 /* Do the "real open" */
790 spin_lock_irq(&port->port_lock);
791
792 /* allocate circular buffer on first open */
793 if (port->port_write_buf.buf_buf == NULL) {
794
795 spin_unlock_irq(&port->port_lock);
796 status = gs_buf_alloc(&port->port_write_buf, WRITE_BUF_SIZE);
797 spin_lock_irq(&port->port_lock);
798
799 if (status) {
800 pr_debug("gs_open: ttyGS%d (%p,%p) no buffer\n",
801 port->port_num, tty, file);
802 port->openclose = false;
803 goto exit_unlock_port;
804 }
805 }
806
807 /* REVISIT if REMOVED (ports[].port NULL), abort the open
808 * to let rmmod work faster (but this way isn't wrong).
809 */
810
811 /* REVISIT maybe wait for "carrier detect" */
812
813 tty->driver_data = port;
Jiri Slaby35f95fd2012-04-02 13:54:48 +0200814 port->port.tty = tty;
David Brownellc1dca562008-06-19 17:51:44 -0700815
Jiri Slaby266e37e2012-04-02 13:54:47 +0200816 port->port.count = 1;
David Brownellc1dca562008-06-19 17:51:44 -0700817 port->openclose = false;
818
David Brownellc1dca562008-06-19 17:51:44 -0700819 /* if connected, start the I/O stream */
820 if (port->port_usb) {
David Brownell1f1ba112008-08-06 18:49:57 -0700821 struct gserial *gser = port->port_usb;
822
David Brownellc1dca562008-06-19 17:51:44 -0700823 pr_debug("gs_open: start ttyGS%d\n", port->port_num);
824 gs_start_io(port);
825
David Brownell1f1ba112008-08-06 18:49:57 -0700826 if (gser->connect)
827 gser->connect(gser);
David Brownellc1dca562008-06-19 17:51:44 -0700828 }
829
830 pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file);
831
832 status = 0;
833
834exit_unlock_port:
835 spin_unlock_irq(&port->port_lock);
836 return status;
837}
838
839static int gs_writes_finished(struct gs_port *p)
840{
841 int cond;
842
843 /* return true on disconnect or empty buffer */
844 spin_lock_irq(&p->port_lock);
845 cond = (p->port_usb == NULL) || !gs_buf_data_avail(&p->port_write_buf);
846 spin_unlock_irq(&p->port_lock);
847
848 return cond;
849}
850
851static void gs_close(struct tty_struct *tty, struct file *file)
852{
853 struct gs_port *port = tty->driver_data;
David Brownell1f1ba112008-08-06 18:49:57 -0700854 struct gserial *gser;
David Brownellc1dca562008-06-19 17:51:44 -0700855
856 spin_lock_irq(&port->port_lock);
857
Jiri Slaby266e37e2012-04-02 13:54:47 +0200858 if (port->port.count != 1) {
859 if (port->port.count == 0)
David Brownellc1dca562008-06-19 17:51:44 -0700860 WARN_ON(1);
861 else
Jiri Slaby266e37e2012-04-02 13:54:47 +0200862 --port->port.count;
David Brownellc1dca562008-06-19 17:51:44 -0700863 goto exit;
864 }
865
866 pr_debug("gs_close: ttyGS%d (%p,%p) ...\n", port->port_num, tty, file);
867
868 /* mark port as closing but in use; we can drop port lock
869 * and sleep if necessary
870 */
871 port->openclose = true;
Jiri Slaby266e37e2012-04-02 13:54:47 +0200872 port->port.count = 0;
David Brownellc1dca562008-06-19 17:51:44 -0700873
David Brownell1f1ba112008-08-06 18:49:57 -0700874 gser = port->port_usb;
875 if (gser && gser->disconnect)
876 gser->disconnect(gser);
David Brownellc1dca562008-06-19 17:51:44 -0700877
878 /* wait for circular write buffer to drain, disconnect, or at
879 * most GS_CLOSE_TIMEOUT seconds; then discard the rest
880 */
David Brownell1f1ba112008-08-06 18:49:57 -0700881 if (gs_buf_data_avail(&port->port_write_buf) > 0 && gser) {
David Brownellc1dca562008-06-19 17:51:44 -0700882 spin_unlock_irq(&port->port_lock);
883 wait_event_interruptible_timeout(port->drain_wait,
884 gs_writes_finished(port),
885 GS_CLOSE_TIMEOUT * HZ);
886 spin_lock_irq(&port->port_lock);
David Brownell1f1ba112008-08-06 18:49:57 -0700887 gser = port->port_usb;
David Brownellc1dca562008-06-19 17:51:44 -0700888 }
889
890 /* Iff we're disconnected, there can be no I/O in flight so it's
891 * ok to free the circular buffer; else just scrub it. And don't
892 * let the push tasklet fire again until we're re-opened.
893 */
David Brownell1f1ba112008-08-06 18:49:57 -0700894 if (gser == NULL)
David Brownellc1dca562008-06-19 17:51:44 -0700895 gs_buf_free(&port->port_write_buf);
896 else
897 gs_buf_clear(&port->port_write_buf);
898
Jiri Slaby35f95fd2012-04-02 13:54:48 +0200899 port->port.tty = NULL;
David Brownellc1dca562008-06-19 17:51:44 -0700900
901 port->openclose = false;
902
903 pr_debug("gs_close: ttyGS%d (%p,%p) done!\n",
904 port->port_num, tty, file);
905
Peter Hurleyb140dfe2015-10-10 16:00:53 -0400906 wake_up(&port->close_wait);
David Brownellc1dca562008-06-19 17:51:44 -0700907exit:
908 spin_unlock_irq(&port->port_lock);
909}
910
911static int gs_write(struct tty_struct *tty, const unsigned char *buf, int count)
912{
913 struct gs_port *port = tty->driver_data;
914 unsigned long flags;
David Brownellc1dca562008-06-19 17:51:44 -0700915
916 pr_vdebug("gs_write: ttyGS%d (%p) writing %d bytes\n",
917 port->port_num, tty, count);
918
919 spin_lock_irqsave(&port->port_lock, flags);
920 if (count)
921 count = gs_buf_put(&port->port_write_buf, buf, count);
922 /* treat count == 0 as flush_chars() */
923 if (port->port_usb)
Michal Nazarewicz872ce512016-05-31 14:17:21 +0200924 gs_start_tx(port);
David Brownellc1dca562008-06-19 17:51:44 -0700925 spin_unlock_irqrestore(&port->port_lock, flags);
926
927 return count;
928}
929
930static int gs_put_char(struct tty_struct *tty, unsigned char ch)
931{
932 struct gs_port *port = tty->driver_data;
933 unsigned long flags;
934 int status;
935
Scott Wood16d9efa2015-03-12 16:46:01 -0500936 pr_vdebug("gs_put_char: (%d,%p) char=0x%x, called from %ps\n",
David Brownellc1dca562008-06-19 17:51:44 -0700937 port->port_num, tty, ch, __builtin_return_address(0));
938
939 spin_lock_irqsave(&port->port_lock, flags);
940 status = gs_buf_put(&port->port_write_buf, &ch, 1);
941 spin_unlock_irqrestore(&port->port_lock, flags);
942
943 return status;
944}
945
946static void gs_flush_chars(struct tty_struct *tty)
947{
948 struct gs_port *port = tty->driver_data;
949 unsigned long flags;
950
951 pr_vdebug("gs_flush_chars: (%d,%p)\n", port->port_num, tty);
952
953 spin_lock_irqsave(&port->port_lock, flags);
954 if (port->port_usb)
955 gs_start_tx(port);
956 spin_unlock_irqrestore(&port->port_lock, flags);
957}
958
959static int gs_write_room(struct tty_struct *tty)
960{
961 struct gs_port *port = tty->driver_data;
962 unsigned long flags;
963 int room = 0;
964
965 spin_lock_irqsave(&port->port_lock, flags);
966 if (port->port_usb)
967 room = gs_buf_space_avail(&port->port_write_buf);
968 spin_unlock_irqrestore(&port->port_lock, flags);
969
970 pr_vdebug("gs_write_room: (%d,%p) room=%d\n",
971 port->port_num, tty, room);
972
973 return room;
974}
975
976static int gs_chars_in_buffer(struct tty_struct *tty)
977{
978 struct gs_port *port = tty->driver_data;
979 unsigned long flags;
980 int chars = 0;
981
982 spin_lock_irqsave(&port->port_lock, flags);
983 chars = gs_buf_data_avail(&port->port_write_buf);
984 spin_unlock_irqrestore(&port->port_lock, flags);
985
986 pr_vdebug("gs_chars_in_buffer: (%d,%p) chars=%d\n",
987 port->port_num, tty, chars);
988
989 return chars;
990}
991
992/* undo side effects of setting TTY_THROTTLED */
993static void gs_unthrottle(struct tty_struct *tty)
994{
995 struct gs_port *port = tty->driver_data;
996 unsigned long flags;
David Brownellc1dca562008-06-19 17:51:44 -0700997
998 spin_lock_irqsave(&port->port_lock, flags);
David Brownell937ef732008-07-07 12:16:08 -0700999 if (port->port_usb) {
1000 /* Kickstart read queue processing. We don't do xon/xoff,
1001 * rts/cts, or other handshaking with the host, but if the
1002 * read queue backs up enough we'll be NAKing OUT packets.
1003 */
1004 tasklet_schedule(&port->push);
Richard Leitnerc572a212014-08-21 08:57:28 +02001005 pr_vdebug("ttyGS%d: unthrottle\n", port->port_num);
David Brownell937ef732008-07-07 12:16:08 -07001006 }
David Brownellc1dca562008-06-19 17:51:44 -07001007 spin_unlock_irqrestore(&port->port_lock, flags);
David Brownellc1dca562008-06-19 17:51:44 -07001008}
1009
David Brownell1f1ba112008-08-06 18:49:57 -07001010static int gs_break_ctl(struct tty_struct *tty, int duration)
1011{
1012 struct gs_port *port = tty->driver_data;
1013 int status = 0;
1014 struct gserial *gser;
1015
1016 pr_vdebug("gs_break_ctl: ttyGS%d, send break (%d) \n",
1017 port->port_num, duration);
1018
1019 spin_lock_irq(&port->port_lock);
1020 gser = port->port_usb;
1021 if (gser && gser->send_break)
1022 status = gser->send_break(gser, duration);
1023 spin_unlock_irq(&port->port_lock);
1024
1025 return status;
1026}
1027
David Brownellc1dca562008-06-19 17:51:44 -07001028static const struct tty_operations gs_tty_ops = {
1029 .open = gs_open,
1030 .close = gs_close,
1031 .write = gs_write,
1032 .put_char = gs_put_char,
1033 .flush_chars = gs_flush_chars,
1034 .write_room = gs_write_room,
1035 .chars_in_buffer = gs_chars_in_buffer,
1036 .unthrottle = gs_unthrottle,
David Brownell1f1ba112008-08-06 18:49:57 -07001037 .break_ctl = gs_break_ctl,
David Brownellc1dca562008-06-19 17:51:44 -07001038};
1039
1040/*-------------------------------------------------------------------------*/
1041
1042static struct tty_driver *gs_tty_driver;
1043
Baolin Wanga5beaaf2015-11-21 15:44:53 +08001044#ifdef CONFIG_U_SERIAL_CONSOLE
1045
1046static struct gscons_info gscons_info;
1047static struct console gserial_cons;
1048
1049static struct usb_request *gs_request_new(struct usb_ep *ep)
1050{
1051 struct usb_request *req = usb_ep_alloc_request(ep, GFP_ATOMIC);
1052 if (!req)
1053 return NULL;
1054
1055 req->buf = kmalloc(ep->maxpacket, GFP_ATOMIC);
1056 if (!req->buf) {
1057 usb_ep_free_request(ep, req);
1058 return NULL;
1059 }
1060
1061 return req;
1062}
1063
1064static void gs_request_free(struct usb_request *req, struct usb_ep *ep)
1065{
1066 if (!req)
1067 return;
1068
1069 kfree(req->buf);
1070 usb_ep_free_request(ep, req);
1071}
1072
1073static void gs_complete_out(struct usb_ep *ep, struct usb_request *req)
1074{
1075 struct gscons_info *info = &gscons_info;
1076
1077 switch (req->status) {
1078 default:
1079 pr_warn("%s: unexpected %s status %d\n",
1080 __func__, ep->name, req->status);
1081 case 0:
1082 /* normal completion */
1083 spin_lock(&info->con_lock);
1084 info->req_busy = 0;
1085 spin_unlock(&info->con_lock);
1086
1087 wake_up_process(info->console_thread);
1088 break;
1089 case -ESHUTDOWN:
1090 /* disconnect */
1091 pr_vdebug("%s: %s shutdown\n", __func__, ep->name);
1092 break;
1093 }
1094}
1095
1096static int gs_console_connect(int port_num)
1097{
1098 struct gscons_info *info = &gscons_info;
1099 struct gs_port *port;
1100 struct usb_ep *ep;
1101
1102 if (port_num != gserial_cons.index) {
1103 pr_err("%s: port num [%d] is not support console\n",
1104 __func__, port_num);
1105 return -ENXIO;
1106 }
1107
1108 port = ports[port_num].port;
1109 ep = port->port_usb->in;
1110 if (!info->console_req) {
1111 info->console_req = gs_request_new(ep);
1112 if (!info->console_req)
1113 return -ENOMEM;
1114 info->console_req->complete = gs_complete_out;
1115 }
1116
1117 info->port = port;
1118 spin_lock(&info->con_lock);
1119 info->req_busy = 0;
1120 spin_unlock(&info->con_lock);
1121 pr_vdebug("port[%d] console connect!\n", port_num);
1122 return 0;
1123}
1124
1125static void gs_console_disconnect(struct usb_ep *ep)
1126{
1127 struct gscons_info *info = &gscons_info;
1128 struct usb_request *req = info->console_req;
1129
1130 gs_request_free(req, ep);
1131 info->console_req = NULL;
1132}
1133
1134static int gs_console_thread(void *data)
1135{
1136 struct gscons_info *info = &gscons_info;
1137 struct gs_port *port;
1138 struct usb_request *req;
1139 struct usb_ep *ep;
1140 int xfer, ret, count, size;
1141
1142 do {
1143 port = info->port;
1144 set_current_state(TASK_INTERRUPTIBLE);
1145 if (!port || !port->port_usb
1146 || !port->port_usb->in || !info->console_req)
1147 goto sched;
1148
1149 req = info->console_req;
1150 ep = port->port_usb->in;
1151
1152 spin_lock_irq(&info->con_lock);
1153 count = gs_buf_data_avail(&info->con_buf);
1154 size = ep->maxpacket;
1155
1156 if (count > 0 && !info->req_busy) {
1157 set_current_state(TASK_RUNNING);
1158 if (count < size)
1159 size = count;
1160
1161 xfer = gs_buf_get(&info->con_buf, req->buf, size);
1162 req->length = xfer;
1163
1164 spin_unlock(&info->con_lock);
1165 ret = usb_ep_queue(ep, req, GFP_ATOMIC);
1166 spin_lock(&info->con_lock);
1167 if (ret < 0)
1168 info->req_busy = 0;
1169 else
1170 info->req_busy = 1;
1171
1172 spin_unlock_irq(&info->con_lock);
1173 } else {
1174 spin_unlock_irq(&info->con_lock);
1175sched:
1176 if (kthread_should_stop()) {
1177 set_current_state(TASK_RUNNING);
1178 break;
1179 }
1180 schedule();
1181 }
1182 } while (1);
1183
1184 return 0;
1185}
1186
1187static int gs_console_setup(struct console *co, char *options)
1188{
1189 struct gscons_info *info = &gscons_info;
1190 int status;
1191
1192 info->port = NULL;
1193 info->console_req = NULL;
1194 info->req_busy = 0;
1195 spin_lock_init(&info->con_lock);
1196
1197 status = gs_buf_alloc(&info->con_buf, GS_CONSOLE_BUF_SIZE);
1198 if (status) {
1199 pr_err("%s: allocate console buffer failed\n", __func__);
1200 return status;
1201 }
1202
1203 info->console_thread = kthread_create(gs_console_thread,
1204 co, "gs_console");
1205 if (IS_ERR(info->console_thread)) {
1206 pr_err("%s: cannot create console thread\n", __func__);
1207 gs_buf_free(&info->con_buf);
1208 return PTR_ERR(info->console_thread);
1209 }
1210 wake_up_process(info->console_thread);
1211
1212 return 0;
1213}
1214
1215static void gs_console_write(struct console *co,
1216 const char *buf, unsigned count)
1217{
1218 struct gscons_info *info = &gscons_info;
1219 unsigned long flags;
1220
1221 spin_lock_irqsave(&info->con_lock, flags);
1222 gs_buf_put(&info->con_buf, buf, count);
1223 spin_unlock_irqrestore(&info->con_lock, flags);
1224
1225 wake_up_process(info->console_thread);
1226}
1227
1228static struct tty_driver *gs_console_device(struct console *co, int *index)
1229{
1230 struct tty_driver **p = (struct tty_driver **)co->data;
1231
1232 if (!*p)
1233 return NULL;
1234
1235 *index = co->index;
1236 return *p;
1237}
1238
1239static struct console gserial_cons = {
1240 .name = "ttyGS",
1241 .write = gs_console_write,
1242 .device = gs_console_device,
1243 .setup = gs_console_setup,
1244 .flags = CON_PRINTBUFFER,
1245 .index = -1,
1246 .data = &gs_tty_driver,
1247};
1248
1249static void gserial_console_init(void)
1250{
1251 register_console(&gserial_cons);
1252}
1253
1254static void gserial_console_exit(void)
1255{
1256 struct gscons_info *info = &gscons_info;
1257
1258 unregister_console(&gserial_cons);
1259 kthread_stop(info->console_thread);
1260 gs_buf_free(&info->con_buf);
1261}
1262
1263#else
1264
1265static int gs_console_connect(int port_num)
1266{
1267 return 0;
1268}
1269
1270static void gs_console_disconnect(struct usb_ep *ep)
1271{
1272}
1273
1274static void gserial_console_init(void)
1275{
1276}
1277
1278static void gserial_console_exit(void)
1279{
1280}
1281
1282#endif
1283
Benoit Gobyc3c04b22012-05-10 10:08:01 +02001284static int
David Brownellc1dca562008-06-19 17:51:44 -07001285gs_port_alloc(unsigned port_num, struct usb_cdc_line_coding *coding)
1286{
1287 struct gs_port *port;
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +01001288 int ret = 0;
1289
1290 mutex_lock(&ports[port_num].lock);
1291 if (ports[port_num].port) {
1292 ret = -EBUSY;
1293 goto out;
1294 }
David Brownellc1dca562008-06-19 17:51:44 -07001295
1296 port = kzalloc(sizeof(struct gs_port), GFP_KERNEL);
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +01001297 if (port == NULL) {
1298 ret = -ENOMEM;
1299 goto out;
1300 }
David Brownellc1dca562008-06-19 17:51:44 -07001301
Jiri Slaby266e37e2012-04-02 13:54:47 +02001302 tty_port_init(&port->port);
David Brownellc1dca562008-06-19 17:51:44 -07001303 spin_lock_init(&port->port_lock);
David Brownellc1dca562008-06-19 17:51:44 -07001304 init_waitqueue_head(&port->drain_wait);
Peter Hurleyb140dfe2015-10-10 16:00:53 -04001305 init_waitqueue_head(&port->close_wait);
David Brownellc1dca562008-06-19 17:51:44 -07001306
1307 tasklet_init(&port->push, gs_rx_push, (unsigned long) port);
1308
1309 INIT_LIST_HEAD(&port->read_pool);
David Brownell937ef732008-07-07 12:16:08 -07001310 INIT_LIST_HEAD(&port->read_queue);
David Brownellc1dca562008-06-19 17:51:44 -07001311 INIT_LIST_HEAD(&port->write_pool);
1312
1313 port->port_num = port_num;
1314 port->port_line_coding = *coding;
1315
1316 ports[port_num].port = port;
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +01001317out:
1318 mutex_unlock(&ports[port_num].lock);
1319 return ret;
David Brownellc1dca562008-06-19 17:51:44 -07001320}
1321
1322static int gs_closed(struct gs_port *port)
1323{
1324 int cond;
1325
1326 spin_lock_irq(&port->port_lock);
Jiri Slaby266e37e2012-04-02 13:54:47 +02001327 cond = (port->port.count == 0) && !port->openclose;
David Brownellc1dca562008-06-19 17:51:44 -07001328 spin_unlock_irq(&port->port_lock);
1329 return cond;
1330}
1331
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +01001332static void gserial_free_port(struct gs_port *port)
David Brownellc1dca562008-06-19 17:51:44 -07001333{
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +01001334 tasklet_kill(&port->push);
1335 /* wait for old opens to finish */
Peter Hurleyb140dfe2015-10-10 16:00:53 -04001336 wait_event(port->close_wait, gs_closed(port));
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +01001337 WARN_ON(port->port_usb != NULL);
1338 tty_port_destroy(&port->port);
1339 kfree(port);
1340}
1341
1342void gserial_free_line(unsigned char port_num)
1343{
David Brownellc1dca562008-06-19 17:51:44 -07001344 struct gs_port *port;
1345
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +01001346 mutex_lock(&ports[port_num].lock);
1347 if (WARN_ON(!ports[port_num].port)) {
1348 mutex_unlock(&ports[port_num].lock);
David Brownellac90e362008-07-01 13:18:20 -07001349 return;
David Brownellc1dca562008-06-19 17:51:44 -07001350 }
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +01001351 port = ports[port_num].port;
1352 ports[port_num].port = NULL;
1353 mutex_unlock(&ports[port_num].lock);
David Brownellc1dca562008-06-19 17:51:44 -07001354
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +01001355 gserial_free_port(port);
1356 tty_unregister_device(gs_tty_driver, port_num);
Baolin Wanga5beaaf2015-11-21 15:44:53 +08001357 gserial_console_exit();
David Brownellc1dca562008-06-19 17:51:44 -07001358}
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +01001359EXPORT_SYMBOL_GPL(gserial_free_line);
1360
1361int gserial_alloc_line(unsigned char *line_num)
1362{
1363 struct usb_cdc_line_coding coding;
1364 struct device *tty_dev;
1365 int ret;
1366 int port_num;
1367
1368 coding.dwDTERate = cpu_to_le32(9600);
1369 coding.bCharFormat = 8;
1370 coding.bParityType = USB_CDC_NO_PARITY;
1371 coding.bDataBits = USB_CDC_1_STOP_BITS;
1372
1373 for (port_num = 0; port_num < MAX_U_SERIAL_PORTS; port_num++) {
1374 ret = gs_port_alloc(port_num, &coding);
1375 if (ret == -EBUSY)
1376 continue;
1377 if (ret)
1378 return ret;
1379 break;
1380 }
1381 if (ret)
1382 return ret;
1383
1384 /* ... and sysfs class devices, so mdev/udev make /dev/ttyGS* */
1385
1386 tty_dev = tty_port_register_device(&ports[port_num].port->port,
1387 gs_tty_driver, port_num, NULL);
1388 if (IS_ERR(tty_dev)) {
1389 struct gs_port *port;
1390 pr_err("%s: failed to register tty for port %d, err %ld\n",
1391 __func__, port_num, PTR_ERR(tty_dev));
1392
1393 ret = PTR_ERR(tty_dev);
1394 port = ports[port_num].port;
1395 ports[port_num].port = NULL;
1396 gserial_free_port(port);
1397 goto err;
1398 }
1399 *line_num = port_num;
Baolin Wanga5beaaf2015-11-21 15:44:53 +08001400 gserial_console_init();
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +01001401err:
1402 return ret;
1403}
1404EXPORT_SYMBOL_GPL(gserial_alloc_line);
David Brownellc1dca562008-06-19 17:51:44 -07001405
1406/**
1407 * gserial_connect - notify TTY I/O glue that USB link is active
1408 * @gser: the function, set up with endpoints and descriptors
1409 * @port_num: which port is active
1410 * Context: any (usually from irq)
1411 *
1412 * This is called activate endpoints and let the TTY layer know that
1413 * the connection is active ... not unlike "carrier detect". It won't
1414 * necessarily start I/O queues; unless the TTY is held open by any
1415 * task, there would be no point. However, the endpoints will be
1416 * activated so the USB host can perform I/O, subject to basic USB
1417 * hardware flow control.
1418 *
1419 * Caller needs to have set up the endpoints and USB function in @dev
1420 * before calling this, as well as the appropriate (speed-specific)
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +01001421 * endpoint descriptors, and also have allocate @port_num by calling
1422 * @gserial_alloc_line().
David Brownellc1dca562008-06-19 17:51:44 -07001423 *
1424 * Returns negative errno or zero.
1425 * On success, ep->driver_data will be overwritten.
1426 */
1427int gserial_connect(struct gserial *gser, u8 port_num)
1428{
1429 struct gs_port *port;
1430 unsigned long flags;
1431 int status;
1432
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +01001433 if (port_num >= MAX_U_SERIAL_PORTS)
David Brownellc1dca562008-06-19 17:51:44 -07001434 return -ENXIO;
1435
David Brownellc1dca562008-06-19 17:51:44 -07001436 port = ports[port_num].port;
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +01001437 if (!port) {
1438 pr_err("serial line %d not allocated.\n", port_num);
1439 return -EINVAL;
1440 }
1441 if (port->port_usb) {
1442 pr_err("serial line %d is in use.\n", port_num);
1443 return -EBUSY;
1444 }
David Brownellc1dca562008-06-19 17:51:44 -07001445
1446 /* activate the endpoints */
Tatyana Brokhman72c973d2011-06-28 16:33:48 +03001447 status = usb_ep_enable(gser->in);
David Brownellc1dca562008-06-19 17:51:44 -07001448 if (status < 0)
1449 return status;
1450 gser->in->driver_data = port;
1451
Tatyana Brokhman72c973d2011-06-28 16:33:48 +03001452 status = usb_ep_enable(gser->out);
David Brownellc1dca562008-06-19 17:51:44 -07001453 if (status < 0)
1454 goto fail_out;
1455 gser->out->driver_data = port;
1456
1457 /* then tell the tty glue that I/O can work */
1458 spin_lock_irqsave(&port->port_lock, flags);
1459 gser->ioport = port;
1460 port->port_usb = gser;
1461
1462 /* REVISIT unclear how best to handle this state...
1463 * we don't really couple it with the Linux TTY.
1464 */
1465 gser->port_line_coding = port->port_line_coding;
1466
1467 /* REVISIT if waiting on "carrier detect", signal. */
1468
David Brownell1f1ba112008-08-06 18:49:57 -07001469 /* if it's already open, start I/O ... and notify the serial
1470 * protocol about open/close status (connect/disconnect).
David Brownellc1dca562008-06-19 17:51:44 -07001471 */
Jiri Slaby266e37e2012-04-02 13:54:47 +02001472 if (port->port.count) {
David Brownellc1dca562008-06-19 17:51:44 -07001473 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
1474 gs_start_io(port);
David Brownell1f1ba112008-08-06 18:49:57 -07001475 if (gser->connect)
1476 gser->connect(gser);
1477 } else {
1478 if (gser->disconnect)
1479 gser->disconnect(gser);
David Brownellc1dca562008-06-19 17:51:44 -07001480 }
1481
Baolin Wanga5beaaf2015-11-21 15:44:53 +08001482 status = gs_console_connect(port_num);
David Brownellc1dca562008-06-19 17:51:44 -07001483 spin_unlock_irqrestore(&port->port_lock, flags);
1484
1485 return status;
1486
1487fail_out:
1488 usb_ep_disable(gser->in);
David Brownellc1dca562008-06-19 17:51:44 -07001489 return status;
1490}
Sebastian Andrzej Siewior3249ca22012-12-23 21:10:04 +01001491EXPORT_SYMBOL_GPL(gserial_connect);
David Brownellc1dca562008-06-19 17:51:44 -07001492/**
1493 * gserial_disconnect - notify TTY I/O glue that USB link is inactive
1494 * @gser: the function, on which gserial_connect() was called
1495 * Context: any (usually from irq)
1496 *
1497 * This is called to deactivate endpoints and let the TTY layer know
1498 * that the connection went inactive ... not unlike "hangup".
1499 *
1500 * On return, the state is as if gserial_connect() had never been called;
1501 * there is no active USB I/O on these endpoints.
1502 */
1503void gserial_disconnect(struct gserial *gser)
1504{
1505 struct gs_port *port = gser->ioport;
1506 unsigned long flags;
1507
1508 if (!port)
1509 return;
1510
1511 /* tell the TTY glue not to do I/O here any more */
1512 spin_lock_irqsave(&port->port_lock, flags);
1513
1514 /* REVISIT as above: how best to track this? */
1515 port->port_line_coding = gser->port_line_coding;
1516
1517 port->port_usb = NULL;
1518 gser->ioport = NULL;
Jiri Slaby266e37e2012-04-02 13:54:47 +02001519 if (port->port.count > 0 || port->openclose) {
David Brownellc1dca562008-06-19 17:51:44 -07001520 wake_up_interruptible(&port->drain_wait);
Jiri Slaby35f95fd2012-04-02 13:54:48 +02001521 if (port->port.tty)
1522 tty_hangup(port->port.tty);
David Brownellc1dca562008-06-19 17:51:44 -07001523 }
1524 spin_unlock_irqrestore(&port->port_lock, flags);
1525
1526 /* disable endpoints, aborting down any active I/O */
1527 usb_ep_disable(gser->out);
David Brownellc1dca562008-06-19 17:51:44 -07001528 usb_ep_disable(gser->in);
David Brownellc1dca562008-06-19 17:51:44 -07001529
1530 /* finally, free any unused/unusable I/O buffers */
1531 spin_lock_irqsave(&port->port_lock, flags);
Jiri Slaby266e37e2012-04-02 13:54:47 +02001532 if (port->port.count == 0 && !port->openclose)
David Brownellc1dca562008-06-19 17:51:44 -07001533 gs_buf_free(&port->port_write_buf);
Jim Sung28609d402010-11-04 18:47:51 -07001534 gs_free_requests(gser->out, &port->read_pool, NULL);
1535 gs_free_requests(gser->out, &port->read_queue, NULL);
1536 gs_free_requests(gser->in, &port->write_pool, NULL);
1537
1538 port->read_allocated = port->read_started =
1539 port->write_allocated = port->write_started = 0;
1540
Baolin Wanga5beaaf2015-11-21 15:44:53 +08001541 gs_console_disconnect(gser->in);
David Brownellc1dca562008-06-19 17:51:44 -07001542 spin_unlock_irqrestore(&port->port_lock, flags);
1543}
Sebastian Andrzej Siewior3249ca22012-12-23 21:10:04 +01001544EXPORT_SYMBOL_GPL(gserial_disconnect);
1545
Felipe Balbi38b3ad52013-01-18 13:18:44 +02001546static int userial_init(void)
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +01001547{
1548 unsigned i;
1549 int status;
1550
1551 gs_tty_driver = alloc_tty_driver(MAX_U_SERIAL_PORTS);
1552 if (!gs_tty_driver)
1553 return -ENOMEM;
1554
1555 gs_tty_driver->driver_name = "g_serial";
Richard Leitnerc572a212014-08-21 08:57:28 +02001556 gs_tty_driver->name = "ttyGS";
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +01001557 /* uses dynamically assigned dev_t values */
1558
1559 gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
1560 gs_tty_driver->subtype = SERIAL_TYPE_NORMAL;
1561 gs_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
1562 gs_tty_driver->init_termios = tty_std_termios;
1563
1564 /* 9600-8-N-1 ... matches defaults expected by "usbser.sys" on
1565 * MS-Windows. Otherwise, most of these flags shouldn't affect
1566 * anything unless we were to actually hook up to a serial line.
1567 */
1568 gs_tty_driver->init_termios.c_cflag =
1569 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
1570 gs_tty_driver->init_termios.c_ispeed = 9600;
1571 gs_tty_driver->init_termios.c_ospeed = 9600;
1572
1573 tty_set_operations(gs_tty_driver, &gs_tty_ops);
1574 for (i = 0; i < MAX_U_SERIAL_PORTS; i++)
1575 mutex_init(&ports[i].lock);
1576
1577 /* export the driver ... */
1578 status = tty_register_driver(gs_tty_driver);
1579 if (status) {
1580 pr_err("%s: cannot register, err %d\n",
1581 __func__, status);
1582 goto fail;
1583 }
1584
1585 pr_debug("%s: registered %d ttyGS* device%s\n", __func__,
1586 MAX_U_SERIAL_PORTS,
1587 (MAX_U_SERIAL_PORTS == 1) ? "" : "s");
1588
1589 return status;
1590fail:
1591 put_tty_driver(gs_tty_driver);
1592 gs_tty_driver = NULL;
1593 return status;
1594}
1595module_init(userial_init);
1596
1597static void userial_cleanup(void)
1598{
1599 tty_unregister_driver(gs_tty_driver);
1600 put_tty_driver(gs_tty_driver);
1601 gs_tty_driver = NULL;
1602}
1603module_exit(userial_cleanup);
1604
Sebastian Andrzej Siewior3249ca22012-12-23 21:10:04 +01001605MODULE_LICENSE("GPL");