blob: 2181504fefe45f658362fa46c5e911153d7bef65 [file] [log] [blame]
David Brownellc1dca562008-06-19 17:51:44 -07001/*
2 * u_serial.c - utilities for USB gadget "serial port"/TTY support
3 *
4 * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
5 * Copyright (C) 2008 David Brownell
6 * Copyright (C) 2008 by Nokia Corporation
7 *
8 * This code also borrows from usbserial.c, which is
9 * Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com)
10 * Copyright (C) 2000 Peter Berger (pberger@brimson.com)
11 * Copyright (C) 2000 Al Borchers (alborchers@steinerpoint.com)
12 *
13 * This software is distributed under the terms of the GNU General
14 * Public License ("GPL") as published by the Free Software Foundation,
15 * either version 2 of that License or (at your option) any later version.
16 */
17
18/* #define VERBOSE_DEBUG */
19
20#include <linux/kernel.h>
stephane duverger1e413942010-06-29 16:57:25 +020021#include <linux/sched.h>
David Brownellc1dca562008-06-19 17:51:44 -070022#include <linux/interrupt.h>
23#include <linux/device.h>
24#include <linux/delay.h>
25#include <linux/tty.h>
26#include <linux/tty_flip.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090027#include <linux/slab.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070028#include <linux/debugfs.h>
David Brownellc1dca562008-06-19 17:51:44 -070029
30#include "u_serial.h"
31
32
33/*
34 * This component encapsulates the TTY layer glue needed to provide basic
35 * "serial port" functionality through the USB gadget stack. Each such
36 * port is exposed through a /dev/ttyGS* node.
37 *
38 * After initialization (gserial_setup), these TTY port devices stay
39 * available until they are removed (gserial_cleanup). Each one may be
40 * connected to a USB function (gserial_connect), or disconnected (with
41 * gserial_disconnect) when the USB host issues a config change event.
42 * Data can only flow when the port is connected to the host.
43 *
44 * A given TTY port can be made available in multiple configurations.
45 * For example, each one might expose a ttyGS0 node which provides a
46 * login application. In one case that might use CDC ACM interface 0,
47 * while another configuration might use interface 3 for that. The
48 * work to handle that (including descriptor management) is not part
49 * of this component.
50 *
51 * Configurations may expose more than one TTY port. For example, if
52 * ttyGS0 provides login service, then ttyGS1 might provide dialer access
53 * for a telephone or fax link. And ttyGS2 might be something that just
54 * needs a simple byte stream interface for some messaging protocol that
55 * is managed in userspace ... OBEX, PTP, and MTP have been mentioned.
56 */
57
David Brownell937ef732008-07-07 12:16:08 -070058#define PREFIX "ttyGS"
59
David Brownellc1dca562008-06-19 17:51:44 -070060/*
61 * gserial is the lifecycle interface, used by USB functions
62 * gs_port is the I/O nexus, used by the tty driver
63 * tty_struct links to the tty/filesystem framework
64 *
65 * gserial <---> gs_port ... links will be null when the USB link is
David Brownell1f1ba112008-08-06 18:49:57 -070066 * inactive; managed by gserial_{connect,disconnect}(). each gserial
67 * instance can wrap its own USB control protocol.
David Brownellc1dca562008-06-19 17:51:44 -070068 * gserial->ioport == usb_ep->driver_data ... gs_port
69 * gs_port->port_usb ... gserial
70 *
71 * gs_port <---> tty_struct ... links will be null when the TTY file
72 * isn't opened; managed by gs_open()/gs_close()
73 * gserial->port_tty ... tty_struct
74 * tty_struct->driver_data ... gserial
75 */
76
77/* RX and TX queues can buffer QUEUE_SIZE packets before they hit the
78 * next layer of buffering. For TX that's a circular buffer; for RX
79 * consider it a NOP. A third layer is provided by the TTY code.
80 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070081#define TX_QUEUE_SIZE 8
82#define TX_BUF_SIZE 4096
David Brownellc1dca562008-06-19 17:51:44 -070083#define WRITE_BUF_SIZE 8192 /* TX only */
84
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070085#define RX_QUEUE_SIZE 8
86#define RX_BUF_SIZE 4096
87
88
David Brownellc1dca562008-06-19 17:51:44 -070089/* circular buffer */
90struct gs_buf {
91 unsigned buf_size;
92 char *buf_buf;
93 char *buf_get;
94 char *buf_put;
95};
96
97/*
98 * The port structure holds info for each port, one for each minor number
99 * (and thus for each /dev/ node).
100 */
101struct gs_port {
102 spinlock_t port_lock; /* guard port_* access */
103
104 struct gserial *port_usb;
105 struct tty_struct *port_tty;
106
107 unsigned open_count;
108 bool openclose; /* open/close in progress */
109 u8 port_num;
110
111 wait_queue_head_t close_wait; /* wait for last close */
112
113 struct list_head read_pool;
Jim Sung28609d42010-11-04 18:47:51 -0700114 int read_started;
115 int read_allocated;
David Brownell937ef732008-07-07 12:16:08 -0700116 struct list_head read_queue;
117 unsigned n_read;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700118 struct work_struct push;
David Brownellc1dca562008-06-19 17:51:44 -0700119
120 struct list_head write_pool;
Jim Sung28609d42010-11-04 18:47:51 -0700121 int write_started;
122 int write_allocated;
David Brownellc1dca562008-06-19 17:51:44 -0700123 struct gs_buf port_write_buf;
124 wait_queue_head_t drain_wait; /* wait while writes drain */
125
126 /* REVISIT this state ... */
127 struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700128 unsigned long nbytes_from_host;
129 unsigned long nbytes_to_tty;
130 unsigned long nbytes_from_tty;
131 unsigned long nbytes_to_host;
David Brownellc1dca562008-06-19 17:51:44 -0700132};
133
134/* increase N_PORTS if you need more */
John Michelau677ba872010-11-08 18:05:37 -0600135#define N_PORTS 8
David Brownellc1dca562008-06-19 17:51:44 -0700136static struct portmaster {
137 struct mutex lock; /* protect open/close */
138 struct gs_port *port;
139} ports[N_PORTS];
140static unsigned n_ports;
141
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700142static struct workqueue_struct *gserial_wq;
143
David Brownellc1dca562008-06-19 17:51:44 -0700144#define GS_CLOSE_TIMEOUT 15 /* seconds */
145
146
147
148#ifdef VERBOSE_DEBUG
149#define pr_vdebug(fmt, arg...) \
150 pr_debug(fmt, ##arg)
151#else
152#define pr_vdebug(fmt, arg...) \
153 ({ if (0) pr_debug(fmt, ##arg); })
154#endif
155
156/*-------------------------------------------------------------------------*/
157
158/* Circular Buffer */
159
160/*
161 * gs_buf_alloc
162 *
163 * Allocate a circular buffer and all associated memory.
164 */
165static int gs_buf_alloc(struct gs_buf *gb, unsigned size)
166{
167 gb->buf_buf = kmalloc(size, GFP_KERNEL);
168 if (gb->buf_buf == NULL)
169 return -ENOMEM;
170
171 gb->buf_size = size;
172 gb->buf_put = gb->buf_buf;
173 gb->buf_get = gb->buf_buf;
174
175 return 0;
176}
177
178/*
179 * gs_buf_free
180 *
181 * Free the buffer and all associated memory.
182 */
183static void gs_buf_free(struct gs_buf *gb)
184{
185 kfree(gb->buf_buf);
186 gb->buf_buf = NULL;
187}
188
189/*
190 * gs_buf_clear
191 *
192 * Clear out all data in the circular buffer.
193 */
194static void gs_buf_clear(struct gs_buf *gb)
195{
196 gb->buf_get = gb->buf_put;
197 /* equivalent to a get of all data available */
198}
199
200/*
201 * gs_buf_data_avail
202 *
David Brownell1f1ba112008-08-06 18:49:57 -0700203 * Return the number of bytes of data written into the circular
David Brownellc1dca562008-06-19 17:51:44 -0700204 * buffer.
205 */
206static unsigned gs_buf_data_avail(struct gs_buf *gb)
207{
208 return (gb->buf_size + gb->buf_put - gb->buf_get) % gb->buf_size;
209}
210
211/*
212 * gs_buf_space_avail
213 *
214 * Return the number of bytes of space available in the circular
215 * buffer.
216 */
217static unsigned gs_buf_space_avail(struct gs_buf *gb)
218{
219 return (gb->buf_size + gb->buf_get - gb->buf_put - 1) % gb->buf_size;
220}
221
222/*
223 * gs_buf_put
224 *
225 * Copy data data from a user buffer and put it into the circular buffer.
226 * Restrict to the amount of space available.
227 *
228 * Return the number of bytes copied.
229 */
230static unsigned
231gs_buf_put(struct gs_buf *gb, const char *buf, unsigned count)
232{
233 unsigned len;
234
235 len = gs_buf_space_avail(gb);
236 if (count > len)
237 count = len;
238
239 if (count == 0)
240 return 0;
241
242 len = gb->buf_buf + gb->buf_size - gb->buf_put;
243 if (count > len) {
244 memcpy(gb->buf_put, buf, len);
245 memcpy(gb->buf_buf, buf+len, count - len);
246 gb->buf_put = gb->buf_buf + count - len;
247 } else {
248 memcpy(gb->buf_put, buf, count);
249 if (count < len)
250 gb->buf_put += count;
251 else /* count == len */
252 gb->buf_put = gb->buf_buf;
253 }
254
255 return count;
256}
257
258/*
259 * gs_buf_get
260 *
261 * Get data from the circular buffer and copy to the given buffer.
262 * Restrict to the amount of data available.
263 *
264 * Return the number of bytes copied.
265 */
266static unsigned
267gs_buf_get(struct gs_buf *gb, char *buf, unsigned count)
268{
269 unsigned len;
270
271 len = gs_buf_data_avail(gb);
272 if (count > len)
273 count = len;
274
275 if (count == 0)
276 return 0;
277
278 len = gb->buf_buf + gb->buf_size - gb->buf_get;
279 if (count > len) {
280 memcpy(buf, gb->buf_get, len);
281 memcpy(buf+len, gb->buf_buf, count - len);
282 gb->buf_get = gb->buf_buf + count - len;
283 } else {
284 memcpy(buf, gb->buf_get, count);
285 if (count < len)
286 gb->buf_get += count;
287 else /* count == len */
288 gb->buf_get = gb->buf_buf;
289 }
290
291 return count;
292}
293
294/*-------------------------------------------------------------------------*/
295
296/* I/O glue between TTY (upper) and USB function (lower) driver layers */
297
298/*
299 * gs_alloc_req
300 *
301 * Allocate a usb_request and its buffer. Returns a pointer to the
302 * usb_request or NULL if there is an error.
303 */
David Brownell1f1ba112008-08-06 18:49:57 -0700304struct usb_request *
David Brownellc1dca562008-06-19 17:51:44 -0700305gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
306{
307 struct usb_request *req;
308
309 req = usb_ep_alloc_request(ep, kmalloc_flags);
310
311 if (req != NULL) {
312 req->length = len;
313 req->buf = kmalloc(len, kmalloc_flags);
314 if (req->buf == NULL) {
315 usb_ep_free_request(ep, req);
316 return NULL;
317 }
318 }
319
320 return req;
321}
322
323/*
324 * gs_free_req
325 *
326 * Free a usb_request and its buffer.
327 */
David Brownell1f1ba112008-08-06 18:49:57 -0700328void gs_free_req(struct usb_ep *ep, struct usb_request *req)
David Brownellc1dca562008-06-19 17:51:44 -0700329{
330 kfree(req->buf);
331 usb_ep_free_request(ep, req);
332}
333
334/*
335 * gs_send_packet
336 *
337 * If there is data to send, a packet is built in the given
338 * buffer and the size is returned. If there is no data to
339 * send, 0 is returned.
340 *
341 * Called with port_lock held.
342 */
343static unsigned
344gs_send_packet(struct gs_port *port, char *packet, unsigned size)
345{
346 unsigned len;
347
348 len = gs_buf_data_avail(&port->port_write_buf);
349 if (len < size)
350 size = len;
351 if (size != 0)
352 size = gs_buf_get(&port->port_write_buf, packet, size);
353 return size;
354}
355
356/*
357 * gs_start_tx
358 *
359 * This function finds available write requests, calls
360 * gs_send_packet to fill these packets with data, and
361 * continues until either there are no more write requests
362 * available or no more data to send. This function is
363 * run whenever data arrives or write requests are available.
364 *
365 * Context: caller owns port_lock; port_usb is non-null.
366 */
367static int gs_start_tx(struct gs_port *port)
368/*
369__releases(&port->port_lock)
370__acquires(&port->port_lock)
371*/
372{
373 struct list_head *pool = &port->write_pool;
374 struct usb_ep *in = port->port_usb->in;
375 int status = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700376 static long prev_len;
David Brownellc1dca562008-06-19 17:51:44 -0700377 bool do_tty_wake = false;
378
379 while (!list_empty(pool)) {
380 struct usb_request *req;
381 int len;
382
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700383 if (port->write_started >= TX_QUEUE_SIZE)
Jim Sung28609d42010-11-04 18:47:51 -0700384 break;
385
David Brownellc1dca562008-06-19 17:51:44 -0700386 req = list_entry(pool->next, struct usb_request, list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700387 len = gs_send_packet(port, req->buf, TX_BUF_SIZE);
David Brownellc1dca562008-06-19 17:51:44 -0700388 if (len == 0) {
Rajkumar Raghupathy40985292012-04-12 15:19:53 +0530389 /* Queue zero length packet explicitly to make it
390 * work with UDCs which don't support req->zero flag
391 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700392 if (prev_len && (prev_len % in->maxpacket == 0)) {
393 req->length = 0;
394 list_del(&req->list);
395 spin_unlock(&port->port_lock);
396 status = usb_ep_queue(in, req, GFP_ATOMIC);
397 spin_lock(&port->port_lock);
398 if (!port->port_usb) {
399 gs_free_req(in, req);
400 break;
401 }
402 if (status) {
403 printk(KERN_ERR "%s: %s err %d\n",
404 __func__, "queue", status);
405 list_add(&req->list, pool);
406 }
407 prev_len = 0;
408 }
David Brownellc1dca562008-06-19 17:51:44 -0700409 wake_up_interruptible(&port->drain_wait);
410 break;
411 }
412 do_tty_wake = true;
413
414 req->length = len;
415 list_del(&req->list);
416
David Brownell937ef732008-07-07 12:16:08 -0700417 pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
418 port->port_num, len, *((u8 *)req->buf),
David Brownellc1dca562008-06-19 17:51:44 -0700419 *((u8 *)req->buf+1), *((u8 *)req->buf+2));
David Brownellc1dca562008-06-19 17:51:44 -0700420
421 /* Drop lock while we call out of driver; completions
422 * could be issued while we do so. Disconnection may
423 * happen too; maybe immediately before we queue this!
424 *
425 * NOTE that we may keep sending data for a while after
426 * the TTY closed (dev->ioport->port_tty is NULL).
427 */
428 spin_unlock(&port->port_lock);
429 status = usb_ep_queue(in, req, GFP_ATOMIC);
430 spin_lock(&port->port_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700431 /*
432 * If port_usb is NULL, gserial disconnect is called
433 * while the spinlock is dropped and all requests are
434 * freed. Free the current request here.
435 */
436 if (!port->port_usb) {
437 do_tty_wake = false;
438 gs_free_req(in, req);
439 break;
440 }
David Brownellc1dca562008-06-19 17:51:44 -0700441 if (status) {
442 pr_debug("%s: %s %s err %d\n",
443 __func__, "queue", in->name, status);
444 list_add(&req->list, pool);
445 break;
446 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700447 prev_len = req->length;
448 port->nbytes_from_tty += req->length;
David Brownellc1dca562008-06-19 17:51:44 -0700449
David Brownellc1dca562008-06-19 17:51:44 -0700450 }
451
452 if (do_tty_wake && port->port_tty)
453 tty_wakeup(port->port_tty);
454 return status;
455}
456
David Brownellc1dca562008-06-19 17:51:44 -0700457/*
458 * Context: caller owns port_lock, and port_usb is set
459 */
460static unsigned gs_start_rx(struct gs_port *port)
461/*
462__releases(&port->port_lock)
463__acquires(&port->port_lock)
464*/
465{
466 struct list_head *pool = &port->read_pool;
467 struct usb_ep *out = port->port_usb->out;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700468 unsigned started = 0;
David Brownellc1dca562008-06-19 17:51:44 -0700469
470 while (!list_empty(pool)) {
471 struct usb_request *req;
472 int status;
473 struct tty_struct *tty;
474
David Brownell937ef732008-07-07 12:16:08 -0700475 /* no more rx if closed */
David Brownellc1dca562008-06-19 17:51:44 -0700476 tty = port->port_tty;
David Brownell937ef732008-07-07 12:16:08 -0700477 if (!tty)
David Brownellc1dca562008-06-19 17:51:44 -0700478 break;
479
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700480 if (port->read_started >= RX_QUEUE_SIZE)
Jim Sung28609d42010-11-04 18:47:51 -0700481 break;
482
David Brownellc1dca562008-06-19 17:51:44 -0700483 req = list_entry(pool->next, struct usb_request, list);
484 list_del(&req->list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700485 req->length = RX_BUF_SIZE;
David Brownellc1dca562008-06-19 17:51:44 -0700486
487 /* drop lock while we call out; the controller driver
488 * may need to call us back (e.g. for disconnect)
489 */
490 spin_unlock(&port->port_lock);
491 status = usb_ep_queue(out, req, GFP_ATOMIC);
492 spin_lock(&port->port_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700493 /*
494 * If port_usb is NULL, gserial disconnect is called
495 * while the spinlock is dropped and all requests are
496 * freed. Free the current request here.
497 */
498 if (!port->port_usb) {
499 started = 0;
500 gs_free_req(out, req);
501 break;
502 }
David Brownellc1dca562008-06-19 17:51:44 -0700503 if (status) {
504 pr_debug("%s: %s %s err %d\n",
505 __func__, "queue", out->name, status);
506 list_add(&req->list, pool);
507 break;
508 }
Jim Sung28609d42010-11-04 18:47:51 -0700509 port->read_started++;
David Brownellc1dca562008-06-19 17:51:44 -0700510
David Brownellc1dca562008-06-19 17:51:44 -0700511 }
Jim Sung28609d42010-11-04 18:47:51 -0700512 return port->read_started;
David Brownellc1dca562008-06-19 17:51:44 -0700513}
514
David Brownell937ef732008-07-07 12:16:08 -0700515/*
516 * RX tasklet takes data out of the RX queue and hands it up to the TTY
517 * layer until it refuses to take any more data (or is throttled back).
518 * Then it issues reads for any further data.
519 *
520 * If the RX queue becomes full enough that no usb_request is queued,
521 * the OUT endpoint may begin NAKing as soon as its FIFO fills up.
522 * So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
523 * can be buffered before the TTY layer's buffers (currently 64 KB).
524 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700525static void gs_rx_push(struct work_struct *w)
David Brownell937ef732008-07-07 12:16:08 -0700526{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700527 struct gs_port *port = container_of(w, struct gs_port, push);
David Brownell937ef732008-07-07 12:16:08 -0700528 struct tty_struct *tty;
529 struct list_head *queue = &port->read_queue;
530 bool disconnect = false;
531 bool do_push = false;
532
533 /* hand any queued data to the tty */
534 spin_lock_irq(&port->port_lock);
535 tty = port->port_tty;
536 while (!list_empty(queue)) {
537 struct usb_request *req;
538
539 req = list_first_entry(queue, struct usb_request, list);
540
541 /* discard data if tty was closed */
542 if (!tty)
543 goto recycle;
544
545 /* leave data queued if tty was rx throttled */
546 if (test_bit(TTY_THROTTLED, &tty->flags))
547 break;
548
549 switch (req->status) {
550 case -ESHUTDOWN:
551 disconnect = true;
552 pr_vdebug(PREFIX "%d: shutdown\n", port->port_num);
553 break;
554
555 default:
556 /* presumably a transient fault */
557 pr_warning(PREFIX "%d: unexpected RX status %d\n",
558 port->port_num, req->status);
559 /* FALLTHROUGH */
560 case 0:
561 /* normal completion */
562 break;
563 }
564
565 /* push data to (open) tty */
566 if (req->actual) {
567 char *packet = req->buf;
568 unsigned size = req->actual;
569 unsigned n;
570 int count;
571
572 /* we may have pushed part of this packet already... */
573 n = port->n_read;
574 if (n) {
575 packet += n;
576 size -= n;
577 }
578
579 count = tty_insert_flip_string(tty, packet, size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700580 port->nbytes_to_tty += count;
David Brownell937ef732008-07-07 12:16:08 -0700581 if (count)
582 do_push = true;
583 if (count != size) {
584 /* stop pushing; TTY layer can't handle more */
585 port->n_read += count;
586 pr_vdebug(PREFIX "%d: rx block %d/%d\n",
587 port->port_num,
588 count, req->actual);
589 break;
590 }
591 port->n_read = 0;
592 }
593recycle:
594 list_move(&req->list, &port->read_pool);
Jim Sung28609d42010-11-04 18:47:51 -0700595 port->read_started--;
David Brownell937ef732008-07-07 12:16:08 -0700596 }
597
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700598 /* Push from tty to ldisc; this is immediate with low_latency, and
599 * may trigger callbacks to this driver ... so drop the spinlock.
David Brownell937ef732008-07-07 12:16:08 -0700600 */
601 if (tty && do_push) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700602 spin_unlock_irq(&port->port_lock);
David Brownell937ef732008-07-07 12:16:08 -0700603 tty_flip_buffer_push(tty);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700604 wake_up_interruptible(&tty->read_wait);
605 spin_lock_irq(&port->port_lock);
606
607 /* tty may have been closed */
608 tty = port->port_tty;
David Brownell937ef732008-07-07 12:16:08 -0700609 }
610
611
612 /* We want our data queue to become empty ASAP, keeping data
613 * in the tty and ldisc (not here). If we couldn't push any
614 * this time around, there may be trouble unless there's an
615 * implicit tty_unthrottle() call on its way...
616 *
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700617 * REVISIT we should probably add a timer to keep the work queue
David Brownell937ef732008-07-07 12:16:08 -0700618 * from starving ... but it's not clear that case ever happens.
619 */
620 if (!list_empty(queue) && tty) {
621 if (!test_bit(TTY_THROTTLED, &tty->flags)) {
622 if (do_push)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700623 queue_work(gserial_wq, &port->push);
David Brownell937ef732008-07-07 12:16:08 -0700624 else
625 pr_warning(PREFIX "%d: RX not scheduled?\n",
626 port->port_num);
627 }
628 }
629
630 /* If we're still connected, refill the USB RX queue. */
631 if (!disconnect && port->port_usb)
632 gs_start_rx(port);
633
634 spin_unlock_irq(&port->port_lock);
635}
636
David Brownellc1dca562008-06-19 17:51:44 -0700637static void gs_read_complete(struct usb_ep *ep, struct usb_request *req)
638{
David Brownellc1dca562008-06-19 17:51:44 -0700639 struct gs_port *port = ep->driver_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700640 unsigned long flags;
David Brownellc1dca562008-06-19 17:51:44 -0700641
David Brownell937ef732008-07-07 12:16:08 -0700642 /* Queue all received data until the tty layer is ready for it. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700643 spin_lock_irqsave(&port->port_lock, flags);
644 port->nbytes_from_host += req->actual;
David Brownell937ef732008-07-07 12:16:08 -0700645 list_add_tail(&req->list, &port->read_queue);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700646 queue_work(gserial_wq, &port->push);
647 spin_unlock_irqrestore(&port->port_lock, flags);
David Brownellc1dca562008-06-19 17:51:44 -0700648}
649
650static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
651{
652 struct gs_port *port = ep->driver_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700653 unsigned long flags;
David Brownellc1dca562008-06-19 17:51:44 -0700654
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700655 spin_lock_irqsave(&port->port_lock, flags);
656 port->nbytes_to_host += req->actual;
David Brownellc1dca562008-06-19 17:51:44 -0700657 list_add(&req->list, &port->write_pool);
Jim Sung28609d42010-11-04 18:47:51 -0700658 port->write_started--;
David Brownellc1dca562008-06-19 17:51:44 -0700659
660 switch (req->status) {
661 default:
662 /* presumably a transient fault */
663 pr_warning("%s: unexpected %s status %d\n",
664 __func__, ep->name, req->status);
665 /* FALL THROUGH */
666 case 0:
667 /* normal completion */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700668 if (port->port_usb)
669 gs_start_tx(port);
David Brownellc1dca562008-06-19 17:51:44 -0700670 break;
671
672 case -ESHUTDOWN:
673 /* disconnect */
674 pr_vdebug("%s: %s shutdown\n", __func__, ep->name);
675 break;
676 }
677
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700678 spin_unlock_irqrestore(&port->port_lock, flags);
David Brownellc1dca562008-06-19 17:51:44 -0700679}
680
Jim Sung28609d42010-11-04 18:47:51 -0700681static void gs_free_requests(struct usb_ep *ep, struct list_head *head,
682 int *allocated)
David Brownellc1dca562008-06-19 17:51:44 -0700683{
684 struct usb_request *req;
685
686 while (!list_empty(head)) {
687 req = list_entry(head->next, struct usb_request, list);
688 list_del(&req->list);
689 gs_free_req(ep, req);
Jim Sung28609d42010-11-04 18:47:51 -0700690 if (allocated)
691 (*allocated)--;
David Brownellc1dca562008-06-19 17:51:44 -0700692 }
693}
694
695static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700696 int num, int size, void (*fn)(struct usb_ep *, struct usb_request *),
Jim Sung28609d42010-11-04 18:47:51 -0700697 int *allocated)
David Brownellc1dca562008-06-19 17:51:44 -0700698{
699 int i;
700 struct usb_request *req;
701
702 /* Pre-allocate up to QUEUE_SIZE transfers, but if we can't
703 * do quite that many this time, don't fail ... we just won't
704 * be as speedy as we might otherwise be.
705 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700706 for (i = 0; i < num; i++) {
707 req = gs_alloc_req(ep, size, GFP_ATOMIC);
David Brownellc1dca562008-06-19 17:51:44 -0700708 if (!req)
709 return list_empty(head) ? -ENOMEM : 0;
710 req->complete = fn;
711 list_add_tail(&req->list, head);
Jim Sung28609d42010-11-04 18:47:51 -0700712 if (allocated)
713 (*allocated)++;
David Brownellc1dca562008-06-19 17:51:44 -0700714 }
715 return 0;
716}
717
718/**
719 * gs_start_io - start USB I/O streams
720 * @dev: encapsulates endpoints to use
721 * Context: holding port_lock; port_tty and port_usb are non-null
722 *
723 * We only start I/O when something is connected to both sides of
724 * this port. If nothing is listening on the host side, we may
725 * be pointlessly filling up our TX buffers and FIFO.
726 */
727static int gs_start_io(struct gs_port *port)
728{
729 struct list_head *head = &port->read_pool;
730 struct usb_ep *ep = port->port_usb->out;
731 int status;
732 unsigned started;
733
734 /* Allocate RX and TX I/O buffers. We can't easily do this much
735 * earlier (with GFP_KERNEL) because the requests are coupled to
736 * endpoints, as are the packet sizes we'll be using. Different
737 * configurations may use different endpoints with a given port;
738 * and high speed vs full speed changes packet sizes too.
739 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700740 status = gs_alloc_requests(ep, head, RX_QUEUE_SIZE, RX_BUF_SIZE,
741 gs_read_complete, &port->read_allocated);
David Brownellc1dca562008-06-19 17:51:44 -0700742 if (status)
743 return status;
744
745 status = gs_alloc_requests(port->port_usb->in, &port->write_pool,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700746 TX_QUEUE_SIZE, TX_BUF_SIZE, gs_write_complete, &port->write_allocated);
David Brownellc1dca562008-06-19 17:51:44 -0700747 if (status) {
Jim Sung28609d42010-11-04 18:47:51 -0700748 gs_free_requests(ep, head, &port->read_allocated);
David Brownellc1dca562008-06-19 17:51:44 -0700749 return status;
750 }
751
752 /* queue read requests */
David Brownell937ef732008-07-07 12:16:08 -0700753 port->n_read = 0;
David Brownellc1dca562008-06-19 17:51:44 -0700754 started = gs_start_rx(port);
755
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700756 if (!port->port_usb)
757 return -EIO;
David Brownellc1dca562008-06-19 17:51:44 -0700758 /* unblock any pending writes into our circular buffer */
759 if (started) {
760 tty_wakeup(port->port_tty);
761 } else {
Jim Sung28609d42010-11-04 18:47:51 -0700762 gs_free_requests(ep, head, &port->read_allocated);
763 gs_free_requests(port->port_usb->in, &port->write_pool,
764 &port->write_allocated);
David Brownell937ef732008-07-07 12:16:08 -0700765 status = -EIO;
David Brownellc1dca562008-06-19 17:51:44 -0700766 }
767
David Brownell937ef732008-07-07 12:16:08 -0700768 return status;
David Brownellc1dca562008-06-19 17:51:44 -0700769}
770
771/*-------------------------------------------------------------------------*/
772
773/* TTY Driver */
774
775/*
776 * gs_open sets up the link between a gs_port and its associated TTY.
777 * That link is broken *only* by TTY close(), and all driver methods
778 * know that.
779 */
780static int gs_open(struct tty_struct *tty, struct file *file)
781{
782 int port_num = tty->index;
783 struct gs_port *port;
784 int status;
785
786 if (port_num < 0 || port_num >= n_ports)
787 return -ENXIO;
788
789 do {
790 mutex_lock(&ports[port_num].lock);
791 port = ports[port_num].port;
792 if (!port)
793 status = -ENODEV;
794 else {
795 spin_lock_irq(&port->port_lock);
796
797 /* already open? Great. */
798 if (port->open_count) {
799 status = 0;
800 port->open_count++;
801
802 /* currently opening/closing? wait ... */
803 } else if (port->openclose) {
804 status = -EBUSY;
805
806 /* ... else we do the work */
807 } else {
808 status = -EAGAIN;
809 port->openclose = true;
810 }
811 spin_unlock_irq(&port->port_lock);
812 }
813 mutex_unlock(&ports[port_num].lock);
814
815 switch (status) {
816 default:
817 /* fully handled */
818 return status;
819 case -EAGAIN:
820 /* must do the work */
821 break;
822 case -EBUSY:
823 /* wait for EAGAIN task to finish */
824 msleep(1);
825 /* REVISIT could have a waitchannel here, if
826 * concurrent open performance is important
827 */
828 break;
829 }
830 } while (status != -EAGAIN);
831
832 /* Do the "real open" */
833 spin_lock_irq(&port->port_lock);
834
835 /* allocate circular buffer on first open */
836 if (port->port_write_buf.buf_buf == NULL) {
837
838 spin_unlock_irq(&port->port_lock);
839 status = gs_buf_alloc(&port->port_write_buf, WRITE_BUF_SIZE);
840 spin_lock_irq(&port->port_lock);
841
842 if (status) {
843 pr_debug("gs_open: ttyGS%d (%p,%p) no buffer\n",
844 port->port_num, tty, file);
845 port->openclose = false;
846 goto exit_unlock_port;
847 }
848 }
849
850 /* REVISIT if REMOVED (ports[].port NULL), abort the open
851 * to let rmmod work faster (but this way isn't wrong).
852 */
853
854 /* REVISIT maybe wait for "carrier detect" */
855
856 tty->driver_data = port;
857 port->port_tty = tty;
858
859 port->open_count = 1;
860 port->openclose = false;
861
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700862 /* low_latency means ldiscs work is carried in the same context
863 * of tty_flip_buffer_push. The same can be called from IRQ with
864 * low_latency = 0. But better to use a dedicated worker thread
865 * to push the data.
866 */
867 tty->low_latency = 1;
868
David Brownellc1dca562008-06-19 17:51:44 -0700869 /* if connected, start the I/O stream */
870 if (port->port_usb) {
David Brownell1f1ba112008-08-06 18:49:57 -0700871 struct gserial *gser = port->port_usb;
872
David Brownellc1dca562008-06-19 17:51:44 -0700873 pr_debug("gs_open: start ttyGS%d\n", port->port_num);
874 gs_start_io(port);
875
David Brownell1f1ba112008-08-06 18:49:57 -0700876 if (gser->connect)
877 gser->connect(gser);
David Brownellc1dca562008-06-19 17:51:44 -0700878 }
879
880 pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file);
881
882 status = 0;
883
884exit_unlock_port:
885 spin_unlock_irq(&port->port_lock);
886 return status;
887}
888
889static int gs_writes_finished(struct gs_port *p)
890{
891 int cond;
892
893 /* return true on disconnect or empty buffer */
894 spin_lock_irq(&p->port_lock);
895 cond = (p->port_usb == NULL) || !gs_buf_data_avail(&p->port_write_buf);
896 spin_unlock_irq(&p->port_lock);
897
898 return cond;
899}
900
901static void gs_close(struct tty_struct *tty, struct file *file)
902{
903 struct gs_port *port = tty->driver_data;
David Brownell1f1ba112008-08-06 18:49:57 -0700904 struct gserial *gser;
David Brownellc1dca562008-06-19 17:51:44 -0700905
906 spin_lock_irq(&port->port_lock);
907
908 if (port->open_count != 1) {
909 if (port->open_count == 0)
910 WARN_ON(1);
911 else
912 --port->open_count;
913 goto exit;
914 }
915
916 pr_debug("gs_close: ttyGS%d (%p,%p) ...\n", port->port_num, tty, file);
917
918 /* mark port as closing but in use; we can drop port lock
919 * and sleep if necessary
920 */
921 port->openclose = true;
922 port->open_count = 0;
923
David Brownell1f1ba112008-08-06 18:49:57 -0700924 gser = port->port_usb;
925 if (gser && gser->disconnect)
926 gser->disconnect(gser);
David Brownellc1dca562008-06-19 17:51:44 -0700927
928 /* wait for circular write buffer to drain, disconnect, or at
929 * most GS_CLOSE_TIMEOUT seconds; then discard the rest
930 */
David Brownell1f1ba112008-08-06 18:49:57 -0700931 if (gs_buf_data_avail(&port->port_write_buf) > 0 && gser) {
David Brownellc1dca562008-06-19 17:51:44 -0700932 spin_unlock_irq(&port->port_lock);
933 wait_event_interruptible_timeout(port->drain_wait,
934 gs_writes_finished(port),
935 GS_CLOSE_TIMEOUT * HZ);
936 spin_lock_irq(&port->port_lock);
David Brownell1f1ba112008-08-06 18:49:57 -0700937 gser = port->port_usb;
David Brownellc1dca562008-06-19 17:51:44 -0700938 }
939
940 /* Iff we're disconnected, there can be no I/O in flight so it's
941 * ok to free the circular buffer; else just scrub it. And don't
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700942 * let the push work queue fire again until we're re-opened.
David Brownellc1dca562008-06-19 17:51:44 -0700943 */
David Brownell1f1ba112008-08-06 18:49:57 -0700944 if (gser == NULL)
David Brownellc1dca562008-06-19 17:51:44 -0700945 gs_buf_free(&port->port_write_buf);
946 else
947 gs_buf_clear(&port->port_write_buf);
948
David Brownellc1dca562008-06-19 17:51:44 -0700949 tty->driver_data = NULL;
950 port->port_tty = NULL;
951
952 port->openclose = false;
953
954 pr_debug("gs_close: ttyGS%d (%p,%p) done!\n",
955 port->port_num, tty, file);
956
957 wake_up_interruptible(&port->close_wait);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700958
959 /*
960 * Freeing the previously queued requests as they are
961 * allocated again as a part of gs_open()
962 */
963 if (port->port_usb) {
964 spin_unlock_irq(&port->port_lock);
965 usb_ep_fifo_flush(gser->out);
966 usb_ep_fifo_flush(gser->in);
967 spin_lock_irq(&port->port_lock);
968 gs_free_requests(gser->out, &port->read_queue, NULL);
969 gs_free_requests(gser->out, &port->read_pool, NULL);
970 gs_free_requests(gser->in, &port->write_pool, NULL);
971 }
972 port->read_allocated = port->read_started =
973 port->write_allocated = port->write_started = 0;
David Brownellc1dca562008-06-19 17:51:44 -0700974exit:
975 spin_unlock_irq(&port->port_lock);
976}
977
978static int gs_write(struct tty_struct *tty, const unsigned char *buf, int count)
979{
980 struct gs_port *port = tty->driver_data;
981 unsigned long flags;
982 int status;
983
984 pr_vdebug("gs_write: ttyGS%d (%p) writing %d bytes\n",
985 port->port_num, tty, count);
986
987 spin_lock_irqsave(&port->port_lock, flags);
988 if (count)
989 count = gs_buf_put(&port->port_write_buf, buf, count);
990 /* treat count == 0 as flush_chars() */
991 if (port->port_usb)
992 status = gs_start_tx(port);
993 spin_unlock_irqrestore(&port->port_lock, flags);
994
995 return count;
996}
997
998static int gs_put_char(struct tty_struct *tty, unsigned char ch)
999{
1000 struct gs_port *port = tty->driver_data;
1001 unsigned long flags;
1002 int status;
1003
1004 pr_vdebug("gs_put_char: (%d,%p) char=0x%x, called from %p\n",
1005 port->port_num, tty, ch, __builtin_return_address(0));
1006
1007 spin_lock_irqsave(&port->port_lock, flags);
1008 status = gs_buf_put(&port->port_write_buf, &ch, 1);
1009 spin_unlock_irqrestore(&port->port_lock, flags);
1010
1011 return status;
1012}
1013
1014static void gs_flush_chars(struct tty_struct *tty)
1015{
1016 struct gs_port *port = tty->driver_data;
1017 unsigned long flags;
1018
1019 pr_vdebug("gs_flush_chars: (%d,%p)\n", port->port_num, tty);
1020
1021 spin_lock_irqsave(&port->port_lock, flags);
1022 if (port->port_usb)
1023 gs_start_tx(port);
1024 spin_unlock_irqrestore(&port->port_lock, flags);
1025}
1026
1027static int gs_write_room(struct tty_struct *tty)
1028{
1029 struct gs_port *port = tty->driver_data;
1030 unsigned long flags;
1031 int room = 0;
1032
1033 spin_lock_irqsave(&port->port_lock, flags);
1034 if (port->port_usb)
1035 room = gs_buf_space_avail(&port->port_write_buf);
1036 spin_unlock_irqrestore(&port->port_lock, flags);
1037
1038 pr_vdebug("gs_write_room: (%d,%p) room=%d\n",
1039 port->port_num, tty, room);
1040
1041 return room;
1042}
1043
1044static int gs_chars_in_buffer(struct tty_struct *tty)
1045{
1046 struct gs_port *port = tty->driver_data;
1047 unsigned long flags;
1048 int chars = 0;
1049
1050 spin_lock_irqsave(&port->port_lock, flags);
1051 chars = gs_buf_data_avail(&port->port_write_buf);
1052 spin_unlock_irqrestore(&port->port_lock, flags);
1053
1054 pr_vdebug("gs_chars_in_buffer: (%d,%p) chars=%d\n",
1055 port->port_num, tty, chars);
1056
1057 return chars;
1058}
1059
1060/* undo side effects of setting TTY_THROTTLED */
1061static void gs_unthrottle(struct tty_struct *tty)
1062{
1063 struct gs_port *port = tty->driver_data;
1064 unsigned long flags;
David Brownellc1dca562008-06-19 17:51:44 -07001065
1066 spin_lock_irqsave(&port->port_lock, flags);
David Brownell937ef732008-07-07 12:16:08 -07001067 if (port->port_usb) {
1068 /* Kickstart read queue processing. We don't do xon/xoff,
1069 * rts/cts, or other handshaking with the host, but if the
1070 * read queue backs up enough we'll be NAKing OUT packets.
1071 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001072 queue_work(gserial_wq, &port->push);
David Brownell937ef732008-07-07 12:16:08 -07001073 pr_vdebug(PREFIX "%d: unthrottle\n", port->port_num);
1074 }
David Brownellc1dca562008-06-19 17:51:44 -07001075 spin_unlock_irqrestore(&port->port_lock, flags);
David Brownellc1dca562008-06-19 17:51:44 -07001076}
1077
David Brownell1f1ba112008-08-06 18:49:57 -07001078static int gs_break_ctl(struct tty_struct *tty, int duration)
1079{
1080 struct gs_port *port = tty->driver_data;
1081 int status = 0;
1082 struct gserial *gser;
1083
1084 pr_vdebug("gs_break_ctl: ttyGS%d, send break (%d) \n",
1085 port->port_num, duration);
1086
1087 spin_lock_irq(&port->port_lock);
1088 gser = port->port_usb;
1089 if (gser && gser->send_break)
1090 status = gser->send_break(gser, duration);
1091 spin_unlock_irq(&port->port_lock);
1092
1093 return status;
1094}
1095
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001096static int gs_tiocmget(struct tty_struct *tty)
1097{
1098 struct gs_port *port = tty->driver_data;
1099 struct gserial *gser;
1100 unsigned int result = 0;
1101
1102 spin_lock_irq(&port->port_lock);
1103 gser = port->port_usb;
1104 if (!gser) {
1105 result = -ENODEV;
1106 goto fail;
1107 }
1108
1109 if (gser->get_dtr)
1110 result |= (gser->get_dtr(gser) ? TIOCM_DTR : 0);
1111
1112 if (gser->get_rts)
1113 result |= (gser->get_rts(gser) ? TIOCM_RTS : 0);
1114
1115 if (gser->serial_state & TIOCM_CD)
1116 result |= TIOCM_CD;
1117
1118 if (gser->serial_state & TIOCM_RI)
1119 result |= TIOCM_RI;
1120fail:
1121 spin_unlock_irq(&port->port_lock);
1122 return result;
1123}
1124
1125static int gs_tiocmset(struct tty_struct *tty,
1126 unsigned int set, unsigned int clear)
1127{
1128 struct gs_port *port = tty->driver_data;
1129 struct gserial *gser;
1130 int status = 0;
1131
1132 spin_lock_irq(&port->port_lock);
1133 gser = port->port_usb;
1134 if (!gser) {
1135 status = -ENODEV;
1136 goto fail;
1137 }
1138
1139 if (set & TIOCM_RI) {
1140 if (gser->send_ring_indicator) {
1141 gser->serial_state |= TIOCM_RI;
1142 status = gser->send_ring_indicator(gser, 1);
1143 }
1144 }
1145 if (clear & TIOCM_RI) {
1146 if (gser->send_ring_indicator) {
1147 gser->serial_state &= ~TIOCM_RI;
1148 status = gser->send_ring_indicator(gser, 0);
1149 }
1150 }
1151 if (set & TIOCM_CD) {
1152 if (gser->send_carrier_detect) {
1153 gser->serial_state |= TIOCM_CD;
1154 status = gser->send_carrier_detect(gser, 1);
1155 }
1156 }
1157 if (clear & TIOCM_CD) {
1158 if (gser->send_carrier_detect) {
1159 gser->serial_state &= ~TIOCM_CD;
1160 status = gser->send_carrier_detect(gser, 0);
1161 }
1162 }
1163fail:
1164 spin_unlock_irq(&port->port_lock);
1165 return status;
1166}
David Brownellc1dca562008-06-19 17:51:44 -07001167static const struct tty_operations gs_tty_ops = {
1168 .open = gs_open,
1169 .close = gs_close,
1170 .write = gs_write,
1171 .put_char = gs_put_char,
1172 .flush_chars = gs_flush_chars,
1173 .write_room = gs_write_room,
1174 .chars_in_buffer = gs_chars_in_buffer,
1175 .unthrottle = gs_unthrottle,
David Brownell1f1ba112008-08-06 18:49:57 -07001176 .break_ctl = gs_break_ctl,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001177 .tiocmget = gs_tiocmget,
1178 .tiocmset = gs_tiocmset,
David Brownellc1dca562008-06-19 17:51:44 -07001179};
1180
1181/*-------------------------------------------------------------------------*/
1182
1183static struct tty_driver *gs_tty_driver;
1184
Benoit Gobyaab96812011-04-19 20:37:33 -07001185static int
David Brownellc1dca562008-06-19 17:51:44 -07001186gs_port_alloc(unsigned port_num, struct usb_cdc_line_coding *coding)
1187{
1188 struct gs_port *port;
1189
1190 port = kzalloc(sizeof(struct gs_port), GFP_KERNEL);
1191 if (port == NULL)
1192 return -ENOMEM;
1193
1194 spin_lock_init(&port->port_lock);
1195 init_waitqueue_head(&port->close_wait);
1196 init_waitqueue_head(&port->drain_wait);
1197
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001198 INIT_WORK(&port->push, gs_rx_push);
David Brownellc1dca562008-06-19 17:51:44 -07001199
1200 INIT_LIST_HEAD(&port->read_pool);
David Brownell937ef732008-07-07 12:16:08 -07001201 INIT_LIST_HEAD(&port->read_queue);
David Brownellc1dca562008-06-19 17:51:44 -07001202 INIT_LIST_HEAD(&port->write_pool);
1203
1204 port->port_num = port_num;
1205 port->port_line_coding = *coding;
1206
1207 ports[port_num].port = port;
1208
1209 return 0;
1210}
1211
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001212
1213#if defined(CONFIG_DEBUG_FS)
1214
1215#define BUF_SIZE 512
1216
1217static ssize_t debug_read_status(struct file *file, char __user *ubuf,
1218 size_t count, loff_t *ppos)
1219{
1220 struct gs_port *ui_dev = file->private_data;
1221 struct tty_struct *tty;
1222 struct gserial *gser;
1223 char *buf;
1224 unsigned long flags;
1225 int i = 0;
1226 int ret;
1227 int result = 0;
1228
1229 tty = ui_dev->port_tty;
1230 gser = ui_dev->port_usb;
1231
1232 buf = kzalloc(sizeof(char) * BUF_SIZE, GFP_KERNEL);
1233 if (!buf)
1234 return -ENOMEM;
1235
1236 spin_lock_irqsave(&ui_dev->port_lock, flags);
1237
1238 i += scnprintf(buf + i, BUF_SIZE - i,
1239 "nbytes_from_host: %lu\n", ui_dev->nbytes_from_host);
1240
1241 i += scnprintf(buf + i, BUF_SIZE - i,
1242 "nbytes_to_tty: %lu\n", ui_dev->nbytes_to_tty);
1243
1244 i += scnprintf(buf + i, BUF_SIZE - i, "nbytes_with_usb_OUT_txr: %lu\n",
1245 (ui_dev->nbytes_from_host - ui_dev->nbytes_to_tty));
1246
1247 i += scnprintf(buf + i, BUF_SIZE - i,
1248 "nbytes_from_tty: %lu\n", ui_dev->nbytes_from_tty);
1249
1250 i += scnprintf(buf + i, BUF_SIZE - i,
1251 "nbytes_to_host: %lu\n", ui_dev->nbytes_to_host);
1252
1253 i += scnprintf(buf + i, BUF_SIZE - i, "nbytes_with_usb_IN_txr: %lu\n",
1254 (ui_dev->nbytes_from_tty - ui_dev->nbytes_to_host));
1255
1256 if (tty)
1257 i += scnprintf(buf + i, BUF_SIZE - i,
1258 "tty_flags: %lu\n", tty->flags);
1259
1260 if (gser->get_dtr) {
1261 result |= (gser->get_dtr(gser) ? TIOCM_DTR : 0);
1262 i += scnprintf(buf + i, BUF_SIZE - i,
1263 "DTR_status: %d\n", result);
1264 }
1265
1266 spin_unlock_irqrestore(&ui_dev->port_lock, flags);
1267
1268 ret = simple_read_from_buffer(ubuf, count, ppos, buf, i);
1269
1270 kfree(buf);
1271
1272 return ret;
1273}
1274
1275static ssize_t debug_write_reset(struct file *file, const char __user *buf,
1276 size_t count, loff_t *ppos)
1277{
1278 struct gs_port *ui_dev = file->private_data;
1279 unsigned long flags;
1280
1281 spin_lock_irqsave(&ui_dev->port_lock, flags);
1282 ui_dev->nbytes_from_host = ui_dev->nbytes_to_tty =
1283 ui_dev->nbytes_from_tty = ui_dev->nbytes_to_host = 0;
1284 spin_unlock_irqrestore(&ui_dev->port_lock, flags);
1285
1286 return count;
1287}
1288
1289static int serial_debug_open(struct inode *inode, struct file *file)
1290{
1291 file->private_data = inode->i_private;
1292 return 0;
1293}
1294
1295const struct file_operations debug_rst_ops = {
1296 .open = serial_debug_open,
1297 .write = debug_write_reset,
1298};
1299
1300const struct file_operations debug_adb_ops = {
1301 .open = serial_debug_open,
1302 .read = debug_read_status,
1303};
1304
1305static void usb_debugfs_init(struct gs_port *ui_dev, int port_num)
1306{
1307 struct dentry *dent;
1308 char buf[48];
1309
1310 snprintf(buf, 48, "usb_serial%d", port_num);
1311 dent = debugfs_create_dir(buf, 0);
1312 if (IS_ERR(dent))
1313 return;
1314
1315 debugfs_create_file("readstatus", 0444, dent, ui_dev, &debug_adb_ops);
1316 debugfs_create_file("reset", 0222, dent, ui_dev, &debug_rst_ops);
1317}
1318#else
1319static void usb_debugfs_init(struct gs_port *ui_dev) {}
1320#endif
1321
David Brownellc1dca562008-06-19 17:51:44 -07001322/**
1323 * gserial_setup - initialize TTY driver for one or more ports
1324 * @g: gadget to associate with these ports
1325 * @count: how many ports to support
1326 * Context: may sleep
1327 *
1328 * The TTY stack needs to know in advance how many devices it should
1329 * plan to manage. Use this call to set up the ports you will be
1330 * exporting through USB. Later, connect them to functions based
1331 * on what configuration is activated by the USB host; and disconnect
1332 * them as appropriate.
1333 *
1334 * An example would be a two-configuration device in which both
1335 * configurations expose port 0, but through different functions.
1336 * One configuration could even expose port 1 while the other
1337 * one doesn't.
1338 *
1339 * Returns negative errno or zero.
1340 */
Benoit Gobyaab96812011-04-19 20:37:33 -07001341int gserial_setup(struct usb_gadget *g, unsigned count)
David Brownellc1dca562008-06-19 17:51:44 -07001342{
1343 unsigned i;
1344 struct usb_cdc_line_coding coding;
1345 int status;
1346
1347 if (count == 0 || count > N_PORTS)
1348 return -EINVAL;
1349
1350 gs_tty_driver = alloc_tty_driver(count);
1351 if (!gs_tty_driver)
1352 return -ENOMEM;
1353
1354 gs_tty_driver->owner = THIS_MODULE;
1355 gs_tty_driver->driver_name = "g_serial";
David Brownell937ef732008-07-07 12:16:08 -07001356 gs_tty_driver->name = PREFIX;
David Brownellc1dca562008-06-19 17:51:44 -07001357 /* uses dynamically assigned dev_t values */
1358
1359 gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
1360 gs_tty_driver->subtype = SERIAL_TYPE_NORMAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001361 gs_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV
1362 | TTY_DRIVER_RESET_TERMIOS;
David Brownellc1dca562008-06-19 17:51:44 -07001363 gs_tty_driver->init_termios = tty_std_termios;
1364
1365 /* 9600-8-N-1 ... matches defaults expected by "usbser.sys" on
1366 * MS-Windows. Otherwise, most of these flags shouldn't affect
1367 * anything unless we were to actually hook up to a serial line.
1368 */
1369 gs_tty_driver->init_termios.c_cflag =
1370 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
1371 gs_tty_driver->init_termios.c_ispeed = 9600;
1372 gs_tty_driver->init_termios.c_ospeed = 9600;
1373
Harvey Harrison551509d2009-02-11 14:11:36 -08001374 coding.dwDTERate = cpu_to_le32(9600);
David Brownellc1dca562008-06-19 17:51:44 -07001375 coding.bCharFormat = 8;
1376 coding.bParityType = USB_CDC_NO_PARITY;
1377 coding.bDataBits = USB_CDC_1_STOP_BITS;
1378
1379 tty_set_operations(gs_tty_driver, &gs_tty_ops);
1380
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001381 gserial_wq = create_singlethread_workqueue("k_gserial");
1382 if (!gserial_wq) {
1383 status = -ENOMEM;
1384 goto fail;
1385 }
1386
David Brownellc1dca562008-06-19 17:51:44 -07001387 /* make devices be openable */
1388 for (i = 0; i < count; i++) {
1389 mutex_init(&ports[i].lock);
1390 status = gs_port_alloc(i, &coding);
1391 if (status) {
1392 count = i;
1393 goto fail;
1394 }
1395 }
1396 n_ports = count;
1397
1398 /* export the driver ... */
1399 status = tty_register_driver(gs_tty_driver);
1400 if (status) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001401 put_tty_driver(gs_tty_driver);
David Brownellc1dca562008-06-19 17:51:44 -07001402 pr_err("%s: cannot register, err %d\n",
1403 __func__, status);
1404 goto fail;
1405 }
1406
1407 /* ... and sysfs class devices, so mdev/udev make /dev/ttyGS* */
1408 for (i = 0; i < count; i++) {
1409 struct device *tty_dev;
1410
1411 tty_dev = tty_register_device(gs_tty_driver, i, &g->dev);
1412 if (IS_ERR(tty_dev))
1413 pr_warning("%s: no classdev for port %d, err %ld\n",
1414 __func__, i, PTR_ERR(tty_dev));
1415 }
1416
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001417 for (i = 0; i < count; i++)
1418 usb_debugfs_init(ports[i].port, i);
1419
David Brownellc1dca562008-06-19 17:51:44 -07001420 pr_debug("%s: registered %d ttyGS* device%s\n", __func__,
1421 count, (count == 1) ? "" : "s");
1422
1423 return status;
1424fail:
1425 while (count--)
1426 kfree(ports[count].port);
Pavankumar Kondetif0f95d82011-09-23 11:38:57 +05301427 if (gserial_wq)
1428 destroy_workqueue(gserial_wq);
David Brownellc1dca562008-06-19 17:51:44 -07001429 put_tty_driver(gs_tty_driver);
1430 gs_tty_driver = NULL;
1431 return status;
1432}
1433
1434static int gs_closed(struct gs_port *port)
1435{
1436 int cond;
1437
1438 spin_lock_irq(&port->port_lock);
1439 cond = (port->open_count == 0) && !port->openclose;
1440 spin_unlock_irq(&port->port_lock);
1441 return cond;
1442}
1443
1444/**
1445 * gserial_cleanup - remove TTY-over-USB driver and devices
1446 * Context: may sleep
1447 *
1448 * This is called to free all resources allocated by @gserial_setup().
1449 * Accordingly, it may need to wait until some open /dev/ files have
1450 * closed.
1451 *
1452 * The caller must have issued @gserial_disconnect() for any ports
1453 * that had previously been connected, so that there is never any
1454 * I/O pending when it's called.
1455 */
1456void gserial_cleanup(void)
1457{
1458 unsigned i;
1459 struct gs_port *port;
1460
David Brownellac90e362008-07-01 13:18:20 -07001461 if (!gs_tty_driver)
1462 return;
1463
David Brownellc1dca562008-06-19 17:51:44 -07001464 /* start sysfs and /dev/ttyGS* node removal */
1465 for (i = 0; i < n_ports; i++)
1466 tty_unregister_device(gs_tty_driver, i);
1467
1468 for (i = 0; i < n_ports; i++) {
1469 /* prevent new opens */
1470 mutex_lock(&ports[i].lock);
1471 port = ports[i].port;
1472 ports[i].port = NULL;
1473 mutex_unlock(&ports[i].lock);
1474
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001475 cancel_work_sync(&port->push);
David Brownell937ef732008-07-07 12:16:08 -07001476
David Brownellc1dca562008-06-19 17:51:44 -07001477 /* wait for old opens to finish */
1478 wait_event(port->close_wait, gs_closed(port));
1479
1480 WARN_ON(port->port_usb != NULL);
1481
1482 kfree(port);
1483 }
1484 n_ports = 0;
1485
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001486 destroy_workqueue(gserial_wq);
David Brownellc1dca562008-06-19 17:51:44 -07001487 tty_unregister_driver(gs_tty_driver);
Jon Poveyb23097b2010-06-14 19:42:10 +09001488 put_tty_driver(gs_tty_driver);
David Brownellc1dca562008-06-19 17:51:44 -07001489 gs_tty_driver = NULL;
1490
1491 pr_debug("%s: cleaned up ttyGS* support\n", __func__);
1492}
1493
1494/**
1495 * gserial_connect - notify TTY I/O glue that USB link is active
1496 * @gser: the function, set up with endpoints and descriptors
1497 * @port_num: which port is active
1498 * Context: any (usually from irq)
1499 *
1500 * This is called activate endpoints and let the TTY layer know that
1501 * the connection is active ... not unlike "carrier detect". It won't
1502 * necessarily start I/O queues; unless the TTY is held open by any
1503 * task, there would be no point. However, the endpoints will be
1504 * activated so the USB host can perform I/O, subject to basic USB
1505 * hardware flow control.
1506 *
1507 * Caller needs to have set up the endpoints and USB function in @dev
1508 * before calling this, as well as the appropriate (speed-specific)
1509 * endpoint descriptors, and also have set up the TTY driver by calling
1510 * @gserial_setup().
1511 *
1512 * Returns negative errno or zero.
1513 * On success, ep->driver_data will be overwritten.
1514 */
1515int gserial_connect(struct gserial *gser, u8 port_num)
1516{
1517 struct gs_port *port;
1518 unsigned long flags;
1519 int status;
1520
1521 if (!gs_tty_driver || port_num >= n_ports)
1522 return -ENXIO;
1523
1524 /* we "know" gserial_cleanup() hasn't been called */
1525 port = ports[port_num].port;
1526
1527 /* activate the endpoints */
Tatyana Brokhmancf709c12011-06-28 16:33:48 +03001528 status = usb_ep_enable(gser->in);
David Brownellc1dca562008-06-19 17:51:44 -07001529 if (status < 0)
1530 return status;
1531 gser->in->driver_data = port;
1532
Tatyana Brokhmancf709c12011-06-28 16:33:48 +03001533 status = usb_ep_enable(gser->out);
David Brownellc1dca562008-06-19 17:51:44 -07001534 if (status < 0)
1535 goto fail_out;
1536 gser->out->driver_data = port;
1537
1538 /* then tell the tty glue that I/O can work */
1539 spin_lock_irqsave(&port->port_lock, flags);
1540 gser->ioport = port;
1541 port->port_usb = gser;
1542
1543 /* REVISIT unclear how best to handle this state...
1544 * we don't really couple it with the Linux TTY.
1545 */
1546 gser->port_line_coding = port->port_line_coding;
1547
1548 /* REVISIT if waiting on "carrier detect", signal. */
1549
David Brownell1f1ba112008-08-06 18:49:57 -07001550 /* if it's already open, start I/O ... and notify the serial
1551 * protocol about open/close status (connect/disconnect).
David Brownellc1dca562008-06-19 17:51:44 -07001552 */
David Brownellc1dca562008-06-19 17:51:44 -07001553 if (port->open_count) {
1554 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
1555 gs_start_io(port);
David Brownell1f1ba112008-08-06 18:49:57 -07001556 if (gser->connect)
1557 gser->connect(gser);
1558 } else {
1559 if (gser->disconnect)
1560 gser->disconnect(gser);
David Brownellc1dca562008-06-19 17:51:44 -07001561 }
1562
1563 spin_unlock_irqrestore(&port->port_lock, flags);
1564
1565 return status;
1566
1567fail_out:
1568 usb_ep_disable(gser->in);
1569 gser->in->driver_data = NULL;
1570 return status;
1571}
1572
1573/**
1574 * gserial_disconnect - notify TTY I/O glue that USB link is inactive
1575 * @gser: the function, on which gserial_connect() was called
1576 * Context: any (usually from irq)
1577 *
1578 * This is called to deactivate endpoints and let the TTY layer know
1579 * that the connection went inactive ... not unlike "hangup".
1580 *
1581 * On return, the state is as if gserial_connect() had never been called;
1582 * there is no active USB I/O on these endpoints.
1583 */
1584void gserial_disconnect(struct gserial *gser)
1585{
1586 struct gs_port *port = gser->ioport;
1587 unsigned long flags;
1588
1589 if (!port)
1590 return;
1591
1592 /* tell the TTY glue not to do I/O here any more */
1593 spin_lock_irqsave(&port->port_lock, flags);
1594
1595 /* REVISIT as above: how best to track this? */
1596 port->port_line_coding = gser->port_line_coding;
1597
1598 port->port_usb = NULL;
1599 gser->ioport = NULL;
1600 if (port->open_count > 0 || port->openclose) {
1601 wake_up_interruptible(&port->drain_wait);
1602 if (port->port_tty)
1603 tty_hangup(port->port_tty);
1604 }
1605 spin_unlock_irqrestore(&port->port_lock, flags);
1606
1607 /* disable endpoints, aborting down any active I/O */
1608 usb_ep_disable(gser->out);
1609 gser->out->driver_data = NULL;
1610
1611 usb_ep_disable(gser->in);
1612 gser->in->driver_data = NULL;
1613
1614 /* finally, free any unused/unusable I/O buffers */
1615 spin_lock_irqsave(&port->port_lock, flags);
1616 if (port->open_count == 0 && !port->openclose)
1617 gs_buf_free(&port->port_write_buf);
Jim Sung28609d42010-11-04 18:47:51 -07001618 gs_free_requests(gser->out, &port->read_pool, NULL);
1619 gs_free_requests(gser->out, &port->read_queue, NULL);
1620 gs_free_requests(gser->in, &port->write_pool, NULL);
1621
1622 port->read_allocated = port->read_started =
1623 port->write_allocated = port->write_started = 0;
1624
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001625 port->nbytes_from_host = port->nbytes_to_tty =
1626 port->nbytes_from_tty = port->nbytes_to_host = 0;
1627
David Brownellc1dca562008-06-19 17:51:44 -07001628 spin_unlock_irqrestore(&port->port_lock, flags);
1629}