blob: 633897f0d354485114bdeae4363460cbd2d558fb [file] [log] [blame]
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001/* drivers/tty/n_smux.c
2 *
3 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/errno.h>
18#include <linux/tty.h>
19#include <linux/tty_flip.h>
20#include <linux/tty_driver.h>
21#include <linux/smux.h>
22#include <linux/list.h>
23#include <linux/kfifo.h>
24#include <linux/slab.h>
25#include <linux/types.h>
26#include <linux/platform_device.h>
27#include <linux/delay.h>
28#include <mach/msm_serial_hs.h>
29#include "smux_private.h"
30#include "smux_loopback.h"
31
32#define SMUX_NOTIFY_FIFO_SIZE 128
33#define SMUX_TX_QUEUE_SIZE 256
34#define SMUX_GET_RX_BUFF_MAX_RETRY_CNT 2
35#define SMUX_WM_LOW 2
36#define SMUX_WM_HIGH 4
37#define SMUX_PKT_LOG_SIZE 80
38
39/* Maximum size we can accept in a single RX buffer */
40#define TTY_RECEIVE_ROOM 65536
41#define TTY_BUFFER_FULL_WAIT_MS 50
42
43/* maximum sleep time between wakeup attempts */
44#define SMUX_WAKEUP_DELAY_MAX (1 << 20)
45
46/* minimum delay for scheduling delayed work */
47#define SMUX_WAKEUP_DELAY_MIN (1 << 15)
48
49/* inactivity timeout for no rx/tx activity */
50#define SMUX_INACTIVITY_TIMEOUT_MS 1000
51
52enum {
53 MSM_SMUX_DEBUG = 1U << 0,
54 MSM_SMUX_INFO = 1U << 1,
55 MSM_SMUX_POWER_INFO = 1U << 2,
56 MSM_SMUX_PKT = 1U << 3,
57};
58
59static int smux_debug_mask;
60module_param_named(debug_mask, smux_debug_mask,
61 int, S_IRUGO | S_IWUSR | S_IWGRP);
62
63/* Simulated wakeup used for testing */
64int smux_byte_loopback;
65module_param_named(byte_loopback, smux_byte_loopback,
66 int, S_IRUGO | S_IWUSR | S_IWGRP);
67int smux_simulate_wakeup_delay = 1;
68module_param_named(simulate_wakeup_delay, smux_simulate_wakeup_delay,
69 int, S_IRUGO | S_IWUSR | S_IWGRP);
70
71#define SMUX_DBG(x...) do { \
72 if (smux_debug_mask & MSM_SMUX_DEBUG) \
73 pr_info(x); \
74} while (0)
75
76#define SMUX_LOG_PKT_RX(pkt) do { \
77 if (smux_debug_mask & MSM_SMUX_PKT) \
78 smux_log_pkt(pkt, 1); \
79} while (0)
80
81#define SMUX_LOG_PKT_TX(pkt) do { \
82 if (smux_debug_mask & MSM_SMUX_PKT) \
83 smux_log_pkt(pkt, 0); \
84} while (0)
85
86/**
87 * Return true if channel is fully opened (both
88 * local and remote sides are in the OPENED state).
89 */
90#define IS_FULLY_OPENED(ch) \
91 (ch && (ch)->local_state == SMUX_LCH_LOCAL_OPENED \
92 && (ch)->remote_state == SMUX_LCH_REMOTE_OPENED)
93
94static struct platform_device smux_devs[] = {
95 {.name = "SMUX_CTL", .id = -1},
96 {.name = "SMUX_RMNET", .id = -1},
97 {.name = "SMUX_DUN_DATA_HSUART", .id = 0},
98 {.name = "SMUX_RMNET_DATA_HSUART", .id = 1},
99 {.name = "SMUX_RMNET_CTL_HSUART", .id = 0},
100 {.name = "SMUX_DIAG", .id = -1},
101};
102
103enum {
104 SMUX_CMD_STATUS_RTC = 1 << 0,
105 SMUX_CMD_STATUS_RTR = 1 << 1,
106 SMUX_CMD_STATUS_RI = 1 << 2,
107 SMUX_CMD_STATUS_DCD = 1 << 3,
108 SMUX_CMD_STATUS_FLOW_CNTL = 1 << 4,
109};
110
111/* Channel mode */
112enum {
113 SMUX_LCH_MODE_NORMAL,
114 SMUX_LCH_MODE_LOCAL_LOOPBACK,
115 SMUX_LCH_MODE_REMOTE_LOOPBACK,
116};
117
118enum {
119 SMUX_RX_IDLE,
120 SMUX_RX_MAGIC,
121 SMUX_RX_HDR,
122 SMUX_RX_PAYLOAD,
123 SMUX_RX_FAILURE,
124};
125
126/**
127 * Power states.
128 *
129 * The _FLUSH states are internal transitional states and are not part of the
130 * official state machine.
131 */
132enum {
133 SMUX_PWR_OFF,
134 SMUX_PWR_TURNING_ON,
135 SMUX_PWR_ON,
136 SMUX_PWR_TURNING_OFF_FLUSH,
137 SMUX_PWR_TURNING_OFF,
138 SMUX_PWR_OFF_FLUSH,
139};
140
141/**
142 * Logical Channel Structure. One instance per channel.
143 *
144 * Locking Hierarchy
145 * Each lock has a postfix that describes the locking level. If multiple locks
146 * are required, only increasing lock hierarchy numbers may be locked which
147 * ensures avoiding a deadlock.
148 *
149 * Locking Example
150 * If state_lock_lhb1 is currently held and the TX list needs to be
151 * manipulated, then tx_lock_lhb2 may be locked since it's locking hierarchy
152 * is greater. However, if tx_lock_lhb2 is held, then state_lock_lhb1 may
153 * not be acquired since it would result in a deadlock.
154 *
155 * Note that the Line Discipline locks (*_lha) should always be acquired
156 * before the logical channel locks.
157 */
158struct smux_lch_t {
159 /* channel state */
160 spinlock_t state_lock_lhb1;
161 uint8_t lcid;
162 unsigned local_state;
163 unsigned local_mode;
164 uint8_t local_tiocm;
165
166 unsigned remote_state;
167 unsigned remote_mode;
168 uint8_t remote_tiocm;
169
170 int tx_flow_control;
171
172 /* client callbacks and private data */
173 void *priv;
174 void (*notify)(void *priv, int event_type, const void *metadata);
175 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
176 int size);
177
178 /* TX Info */
179 spinlock_t tx_lock_lhb2;
180 struct list_head tx_queue;
181 struct list_head tx_ready_list;
182 unsigned tx_pending_data_cnt;
183 unsigned notify_lwm;
184};
185
186union notifier_metadata {
187 struct smux_meta_disconnected disconnected;
188 struct smux_meta_read read;
189 struct smux_meta_write write;
190 struct smux_meta_tiocm tiocm;
191};
192
193struct smux_notify_handle {
194 void (*notify)(void *priv, int event_type, const void *metadata);
195 void *priv;
196 int event_type;
197 union notifier_metadata *metadata;
198};
199
200/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600201 * Receive worker data structure.
202 *
203 * One instance is created for every call to smux_rx_state_machine.
204 */
205struct smux_rx_worker_data {
206 const unsigned char *data;
207 int len;
208 int flag;
209
210 struct work_struct work;
211 struct completion work_complete;
212};
213
214/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600215 * Line discipline and module structure.
216 *
217 * Only one instance since multiple instances of line discipline are not
218 * allowed.
219 */
220struct smux_ldisc_t {
221 spinlock_t lock_lha0;
222
223 int is_initialized;
224 int in_reset;
225 int ld_open_count;
226 struct tty_struct *tty;
227
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600228 /* RX State Machine (singled-threaded access by smux_rx_wq) */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600229 unsigned char recv_buf[SMUX_MAX_PKT_SIZE];
230 unsigned int recv_len;
231 unsigned int pkt_remain;
232 unsigned rx_state;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600233
234 /* RX Activity - accessed by multiple threads */
235 spinlock_t rx_lock_lha1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600236 unsigned rx_activity_flag;
237
238 /* TX / Power */
239 spinlock_t tx_lock_lha2;
240 struct list_head lch_tx_ready_list;
241 unsigned power_state;
242 unsigned pwr_wakeup_delay_us;
243 unsigned tx_activity_flag;
244 unsigned powerdown_enabled;
245};
246
247
248/* data structures */
249static struct smux_lch_t smux_lch[SMUX_NUM_LOGICAL_CHANNELS];
250static struct smux_ldisc_t smux;
251static const char *tty_error_type[] = {
252 [TTY_NORMAL] = "normal",
253 [TTY_OVERRUN] = "overrun",
254 [TTY_BREAK] = "break",
255 [TTY_PARITY] = "parity",
256 [TTY_FRAME] = "framing",
257};
258
259static const char *smux_cmds[] = {
260 [SMUX_CMD_DATA] = "DATA",
261 [SMUX_CMD_OPEN_LCH] = "OPEN",
262 [SMUX_CMD_CLOSE_LCH] = "CLOSE",
263 [SMUX_CMD_STATUS] = "STATUS",
264 [SMUX_CMD_PWR_CTL] = "PWR",
265 [SMUX_CMD_BYTE] = "Raw Byte",
266};
267
268static void smux_notify_local_fn(struct work_struct *work);
269static DECLARE_WORK(smux_notify_local, smux_notify_local_fn);
270
271static struct workqueue_struct *smux_notify_wq;
272static size_t handle_size;
273static struct kfifo smux_notify_fifo;
274static int queued_fifo_notifications;
275static DEFINE_SPINLOCK(notify_lock_lhc1);
276
277static struct workqueue_struct *smux_tx_wq;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600278static struct workqueue_struct *smux_rx_wq;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600279static void smux_tx_worker(struct work_struct *work);
280static DECLARE_WORK(smux_tx_work, smux_tx_worker);
281
282static void smux_wakeup_worker(struct work_struct *work);
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600283static void smux_rx_worker(struct work_struct *work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600284static DECLARE_WORK(smux_wakeup_work, smux_wakeup_worker);
285static DECLARE_DELAYED_WORK(smux_wakeup_delayed_work, smux_wakeup_worker);
286
287static void smux_inactivity_worker(struct work_struct *work);
288static DECLARE_WORK(smux_inactivity_work, smux_inactivity_worker);
289static DECLARE_DELAYED_WORK(smux_delayed_inactivity_work,
290 smux_inactivity_worker);
291
292static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch);
293static void list_channel(struct smux_lch_t *ch);
294static int smux_send_status_cmd(struct smux_lch_t *ch);
295static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt);
296
297/**
298 * Convert TTY Error Flags to string for logging purposes.
299 *
300 * @flag TTY_* flag
301 * @returns String description or NULL if unknown
302 */
303static const char *tty_flag_to_str(unsigned flag)
304{
305 if (flag < ARRAY_SIZE(tty_error_type))
306 return tty_error_type[flag];
307 return NULL;
308}
309
310/**
311 * Convert SMUX Command to string for logging purposes.
312 *
313 * @cmd SMUX command
314 * @returns String description or NULL if unknown
315 */
316static const char *cmd_to_str(unsigned cmd)
317{
318 if (cmd < ARRAY_SIZE(smux_cmds))
319 return smux_cmds[cmd];
320 return NULL;
321}
322
323/**
324 * Set the reset state due to an unrecoverable failure.
325 */
326static void smux_enter_reset(void)
327{
328 pr_err("%s: unrecoverable failure, waiting for ssr\n", __func__);
329 smux.in_reset = 1;
330}
331
332static int lch_init(void)
333{
334 unsigned int id;
335 struct smux_lch_t *ch;
336 int i = 0;
337
338 handle_size = sizeof(struct smux_notify_handle *);
339
340 smux_notify_wq = create_singlethread_workqueue("smux_notify_wq");
341 smux_tx_wq = create_singlethread_workqueue("smux_tx_wq");
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600342 smux_rx_wq = create_singlethread_workqueue("smux_rx_wq");
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600343
344 if (IS_ERR(smux_notify_wq) || IS_ERR(smux_tx_wq)) {
345 SMUX_DBG("%s: create_singlethread_workqueue ENOMEM\n",
346 __func__);
347 return -ENOMEM;
348 }
349
350 i |= kfifo_alloc(&smux_notify_fifo,
351 SMUX_NOTIFY_FIFO_SIZE * handle_size,
352 GFP_KERNEL);
353 i |= smux_loopback_init();
354
355 if (i) {
356 pr_err("%s: out of memory error\n", __func__);
357 return -ENOMEM;
358 }
359
360 for (id = 0 ; id < SMUX_NUM_LOGICAL_CHANNELS; id++) {
361 ch = &smux_lch[id];
362
363 spin_lock_init(&ch->state_lock_lhb1);
364 ch->lcid = id;
365 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
366 ch->local_mode = SMUX_LCH_MODE_NORMAL;
367 ch->local_tiocm = 0x0;
368 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
369 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
370 ch->remote_tiocm = 0x0;
371 ch->tx_flow_control = 0;
372 ch->priv = 0;
373 ch->notify = 0;
374 ch->get_rx_buffer = 0;
375
376 spin_lock_init(&ch->tx_lock_lhb2);
377 INIT_LIST_HEAD(&ch->tx_queue);
378 INIT_LIST_HEAD(&ch->tx_ready_list);
379 ch->tx_pending_data_cnt = 0;
380 ch->notify_lwm = 0;
381 }
382
383 return 0;
384}
385
386int smux_assert_lch_id(uint32_t lcid)
387{
388 if (lcid >= SMUX_NUM_LOGICAL_CHANNELS)
389 return -ENXIO;
390 else
391 return 0;
392}
393
394/**
395 * Log packet information for debug purposes.
396 *
397 * @pkt Packet to log
398 * @is_recv 1 = RX packet; 0 = TX Packet
399 *
400 * [DIR][LCID] [LOCAL_STATE][LOCAL_MODE]:[REMOTE_STATE][REMOTE_MODE] PKT Info
401 *
402 * PKT Info:
403 * [CMD] flags [flags] len [PAYLOAD_LEN]:[PAD_LEN] [Payload hex bytes]
404 *
405 * Direction: R = Receive, S = Send
406 * Local State: C = Closed; c = closing; o = opening; O = Opened
407 * Local Mode: L = Local loopback; R = Remote loopback; N = Normal
408 * Remote State: C = Closed; O = Opened
409 * Remote Mode: R = Remote loopback; N = Normal
410 */
411static void smux_log_pkt(struct smux_pkt_t *pkt, int is_recv)
412{
413 char logbuf[SMUX_PKT_LOG_SIZE];
414 char cmd_extra[16];
415 int i = 0;
416 int count;
417 int len;
418 char local_state;
419 char local_mode;
420 char remote_state;
421 char remote_mode;
422 struct smux_lch_t *ch;
423 unsigned char *data;
424
425 ch = &smux_lch[pkt->hdr.lcid];
426
427 switch (ch->local_state) {
428 case SMUX_LCH_LOCAL_CLOSED:
429 local_state = 'C';
430 break;
431 case SMUX_LCH_LOCAL_OPENING:
432 local_state = 'o';
433 break;
434 case SMUX_LCH_LOCAL_OPENED:
435 local_state = 'O';
436 break;
437 case SMUX_LCH_LOCAL_CLOSING:
438 local_state = 'c';
439 break;
440 default:
441 local_state = 'U';
442 break;
443 }
444
445 switch (ch->local_mode) {
446 case SMUX_LCH_MODE_LOCAL_LOOPBACK:
447 local_mode = 'L';
448 break;
449 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
450 local_mode = 'R';
451 break;
452 case SMUX_LCH_MODE_NORMAL:
453 local_mode = 'N';
454 break;
455 default:
456 local_mode = 'U';
457 break;
458 }
459
460 switch (ch->remote_state) {
461 case SMUX_LCH_REMOTE_CLOSED:
462 remote_state = 'C';
463 break;
464 case SMUX_LCH_REMOTE_OPENED:
465 remote_state = 'O';
466 break;
467
468 default:
469 remote_state = 'U';
470 break;
471 }
472
473 switch (ch->remote_mode) {
474 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
475 remote_mode = 'R';
476 break;
477 case SMUX_LCH_MODE_NORMAL:
478 remote_mode = 'N';
479 break;
480 default:
481 remote_mode = 'U';
482 break;
483 }
484
485 /* determine command type (ACK, etc) */
486 cmd_extra[0] = '\0';
487 switch (pkt->hdr.cmd) {
488 case SMUX_CMD_OPEN_LCH:
489 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
490 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
491 break;
492 case SMUX_CMD_CLOSE_LCH:
493 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
494 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
495 break;
496 };
497
498 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
499 "smux: %c%d %c%c:%c%c %s%s flags %x len %d:%d ",
500 is_recv ? 'R' : 'S', pkt->hdr.lcid,
501 local_state, local_mode,
502 remote_state, remote_mode,
503 cmd_to_str(pkt->hdr.cmd), cmd_extra, pkt->hdr.flags,
504 pkt->hdr.payload_len, pkt->hdr.pad_len);
505
506 len = (pkt->hdr.payload_len > 16) ? 16 : pkt->hdr.payload_len;
507 data = (unsigned char *)pkt->payload;
508 for (count = 0; count < len; count++)
509 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
510 "%02x ", (unsigned)data[count]);
511
512 pr_info("%s\n", logbuf);
513}
514
515static void smux_notify_local_fn(struct work_struct *work)
516{
517 struct smux_notify_handle *notify_handle = NULL;
518 union notifier_metadata *metadata = NULL;
519 unsigned long flags;
520 int i;
521
522 for (;;) {
523 /* retrieve notification */
524 spin_lock_irqsave(&notify_lock_lhc1, flags);
525 if (kfifo_len(&smux_notify_fifo) >= handle_size) {
526 i = kfifo_out(&smux_notify_fifo,
527 &notify_handle,
528 handle_size);
529 if (i != handle_size) {
530 pr_err("%s: unable to retrieve handle %d expected %d\n",
531 __func__, i, handle_size);
532 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
533 break;
534 }
535 } else {
536 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
537 break;
538 }
539 --queued_fifo_notifications;
540 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
541
542 /* notify client */
543 metadata = notify_handle->metadata;
544 notify_handle->notify(notify_handle->priv,
545 notify_handle->event_type,
546 metadata);
547
548 kfree(metadata);
549 kfree(notify_handle);
550 }
551}
552
553/**
554 * Initialize existing packet.
555 */
556void smux_init_pkt(struct smux_pkt_t *pkt)
557{
558 memset(pkt, 0x0, sizeof(*pkt));
559 pkt->hdr.magic = SMUX_MAGIC;
560 INIT_LIST_HEAD(&pkt->list);
561}
562
563/**
564 * Allocate and initialize packet.
565 *
566 * If a payload is needed, either set it directly and ensure that it's freed or
567 * use smd_alloc_pkt_payload() to allocate a packet and it will be freed
568 * automatically when smd_free_pkt() is called.
569 */
570struct smux_pkt_t *smux_alloc_pkt(void)
571{
572 struct smux_pkt_t *pkt;
573
574 /* Consider a free list implementation instead of kmalloc */
575 pkt = kmalloc(sizeof(struct smux_pkt_t), GFP_ATOMIC);
576 if (!pkt) {
577 pr_err("%s: out of memory\n", __func__);
578 return NULL;
579 }
580 smux_init_pkt(pkt);
581 pkt->allocated = 1;
582
583 return pkt;
584}
585
586/**
587 * Free packet.
588 *
589 * @pkt Packet to free (may be NULL)
590 *
591 * If payload was allocated using smux_alloc_pkt_payload(), then it is freed as
592 * well. Otherwise, the caller is responsible for freeing the payload.
593 */
594void smux_free_pkt(struct smux_pkt_t *pkt)
595{
596 if (pkt) {
597 if (pkt->free_payload)
598 kfree(pkt->payload);
599 if (pkt->allocated)
600 kfree(pkt);
601 }
602}
603
604/**
605 * Allocate packet payload.
606 *
607 * @pkt Packet to add payload to
608 *
609 * @returns 0 on success, <0 upon error
610 *
611 * A flag is set to signal smux_free_pkt() to free the payload.
612 */
613int smux_alloc_pkt_payload(struct smux_pkt_t *pkt)
614{
615 if (!pkt)
616 return -EINVAL;
617
618 pkt->payload = kmalloc(pkt->hdr.payload_len, GFP_ATOMIC);
619 pkt->free_payload = 1;
620 if (!pkt->payload) {
621 pr_err("%s: unable to malloc %d bytes for payload\n",
622 __func__, pkt->hdr.payload_len);
623 return -ENOMEM;
624 }
625
626 return 0;
627}
628
629static int schedule_notify(uint8_t lcid, int event,
630 const union notifier_metadata *metadata)
631{
632 struct smux_notify_handle *notify_handle = 0;
633 union notifier_metadata *meta_copy = 0;
634 struct smux_lch_t *ch;
635 int i;
636 unsigned long flags;
637 int ret = 0;
638
639 ch = &smux_lch[lcid];
640 notify_handle = kzalloc(sizeof(struct smux_notify_handle),
641 GFP_ATOMIC);
642 if (!notify_handle) {
643 pr_err("%s: out of memory\n", __func__);
644 ret = -ENOMEM;
645 goto free_out;
646 }
647
648 notify_handle->notify = ch->notify;
649 notify_handle->priv = ch->priv;
650 notify_handle->event_type = event;
651 if (metadata) {
652 meta_copy = kzalloc(sizeof(union notifier_metadata),
653 GFP_ATOMIC);
654 if (!meta_copy) {
655 pr_err("%s: out of memory\n", __func__);
656 ret = -ENOMEM;
657 goto free_out;
658 }
659 *meta_copy = *metadata;
660 notify_handle->metadata = meta_copy;
661 } else {
662 notify_handle->metadata = NULL;
663 }
664
665 spin_lock_irqsave(&notify_lock_lhc1, flags);
666 i = kfifo_avail(&smux_notify_fifo);
667 if (i < handle_size) {
668 pr_err("%s: fifo full error %d expected %d\n",
669 __func__, i, handle_size);
670 ret = -ENOMEM;
671 goto unlock_out;
672 }
673
674 i = kfifo_in(&smux_notify_fifo, &notify_handle, handle_size);
675 if (i < 0 || i != handle_size) {
676 pr_err("%s: fifo not available error %d (expected %d)\n",
677 __func__, i, handle_size);
678 ret = -ENOSPC;
679 goto unlock_out;
680 }
681 ++queued_fifo_notifications;
682
683unlock_out:
684 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
685
686free_out:
687 queue_work(smux_notify_wq, &smux_notify_local);
688 if (ret < 0 && notify_handle) {
689 kfree(notify_handle->metadata);
690 kfree(notify_handle);
691 }
692 return ret;
693}
694
695/**
696 * Returns the serialized size of a packet.
697 *
698 * @pkt Packet to serialize
699 *
700 * @returns Serialized length of packet
701 */
702static unsigned int smux_serialize_size(struct smux_pkt_t *pkt)
703{
704 unsigned int size;
705
706 size = sizeof(struct smux_hdr_t);
707 size += pkt->hdr.payload_len;
708 size += pkt->hdr.pad_len;
709
710 return size;
711}
712
713/**
714 * Serialize packet @pkt into output buffer @data.
715 *
716 * @pkt Packet to serialize
717 * @out Destination buffer pointer
718 * @out_len Size of serialized packet
719 *
720 * @returns 0 for success
721 */
722int smux_serialize(struct smux_pkt_t *pkt, char *out,
723 unsigned int *out_len)
724{
725 char *data_start = out;
726
727 if (smux_serialize_size(pkt) > SMUX_MAX_PKT_SIZE) {
728 pr_err("%s: packet size %d too big\n",
729 __func__, smux_serialize_size(pkt));
730 return -E2BIG;
731 }
732
733 memcpy(out, &pkt->hdr, sizeof(struct smux_hdr_t));
734 out += sizeof(struct smux_hdr_t);
735 if (pkt->payload) {
736 memcpy(out, pkt->payload, pkt->hdr.payload_len);
737 out += pkt->hdr.payload_len;
738 }
739 if (pkt->hdr.pad_len) {
740 memset(out, 0x0, pkt->hdr.pad_len);
741 out += pkt->hdr.pad_len;
742 }
743 *out_len = out - data_start;
744 return 0;
745}
746
747/**
748 * Serialize header and provide pointer to the data.
749 *
750 * @pkt Packet
751 * @out[out] Pointer to the serialized header data
752 * @out_len[out] Pointer to the serialized header length
753 */
754static void smux_serialize_hdr(struct smux_pkt_t *pkt, char **out,
755 unsigned int *out_len)
756{
757 *out = (char *)&pkt->hdr;
758 *out_len = sizeof(struct smux_hdr_t);
759}
760
761/**
762 * Serialize payload and provide pointer to the data.
763 *
764 * @pkt Packet
765 * @out[out] Pointer to the serialized payload data
766 * @out_len[out] Pointer to the serialized payload length
767 */
768static void smux_serialize_payload(struct smux_pkt_t *pkt, char **out,
769 unsigned int *out_len)
770{
771 *out = pkt->payload;
772 *out_len = pkt->hdr.payload_len;
773}
774
775/**
776 * Serialize padding and provide pointer to the data.
777 *
778 * @pkt Packet
779 * @out[out] Pointer to the serialized padding (always NULL)
780 * @out_len[out] Pointer to the serialized payload length
781 *
782 * Since the padding field value is undefined, only the size of the patting
783 * (@out_len) is set and the buffer pointer (@out) will always be NULL.
784 */
785static void smux_serialize_padding(struct smux_pkt_t *pkt, char **out,
786 unsigned int *out_len)
787{
788 *out = NULL;
789 *out_len = pkt->hdr.pad_len;
790}
791
792/**
793 * Write data to TTY framework and handle breaking the writes up if needed.
794 *
795 * @data Data to write
796 * @len Length of data
797 *
798 * @returns 0 for success, < 0 for failure
799 */
800static int write_to_tty(char *data, unsigned len)
801{
802 int data_written;
803
804 if (!data)
805 return 0;
806
807 while (len > 0) {
808 data_written = smux.tty->ops->write(smux.tty, data, len);
809 if (data_written >= 0) {
810 len -= data_written;
811 data += data_written;
812 } else {
813 pr_err("%s: TTY write returned error %d\n",
814 __func__, data_written);
815 return data_written;
816 }
817
818 if (len)
819 tty_wait_until_sent(smux.tty,
820 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
821
822 /* FUTURE - add SSR logic */
823 }
824 return 0;
825}
826
827/**
828 * Write packet to TTY.
829 *
830 * @pkt packet to write
831 *
832 * @returns 0 on success
833 */
834static int smux_tx_tty(struct smux_pkt_t *pkt)
835{
836 char *data;
837 unsigned int len;
838 int ret;
839
840 if (!smux.tty) {
841 pr_err("%s: TTY not initialized", __func__);
842 return -ENOTTY;
843 }
844
845 if (pkt->hdr.cmd == SMUX_CMD_BYTE) {
846 SMUX_DBG("%s: tty send single byte\n", __func__);
847 ret = write_to_tty(&pkt->hdr.flags, 1);
848 return ret;
849 }
850
851 smux_serialize_hdr(pkt, &data, &len);
852 ret = write_to_tty(data, len);
853 if (ret) {
854 pr_err("%s: failed %d to write header %d\n",
855 __func__, ret, len);
856 return ret;
857 }
858
859 smux_serialize_payload(pkt, &data, &len);
860 ret = write_to_tty(data, len);
861 if (ret) {
862 pr_err("%s: failed %d to write payload %d\n",
863 __func__, ret, len);
864 return ret;
865 }
866
867 smux_serialize_padding(pkt, &data, &len);
868 while (len > 0) {
869 char zero = 0x0;
870 ret = write_to_tty(&zero, 1);
871 if (ret) {
872 pr_err("%s: failed %d to write padding %d\n",
873 __func__, ret, len);
874 return ret;
875 }
876 --len;
877 }
878 return 0;
879}
880
881/**
882 * Send a single character.
883 *
884 * @ch Character to send
885 */
886static void smux_send_byte(char ch)
887{
888 struct smux_pkt_t pkt;
889
890 smux_init_pkt(&pkt);
891
892 pkt.hdr.cmd = SMUX_CMD_BYTE;
893 pkt.hdr.flags = ch;
894 pkt.hdr.lcid = 0;
895 pkt.hdr.flags = ch;
896 SMUX_LOG_PKT_TX(&pkt);
897 if (!smux_byte_loopback)
898 smux_tx_tty(&pkt);
899 else
900 smux_tx_loopback(&pkt);
901}
902
903/**
904 * Receive a single-character packet (used for internal testing).
905 *
906 * @ch Character to receive
907 * @lcid Logical channel ID for packet
908 *
909 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600910 */
911static int smux_receive_byte(char ch, int lcid)
912{
913 struct smux_pkt_t pkt;
914
915 smux_init_pkt(&pkt);
916 pkt.hdr.lcid = lcid;
917 pkt.hdr.cmd = SMUX_CMD_BYTE;
918 pkt.hdr.flags = ch;
919
920 return smux_dispatch_rx_pkt(&pkt);
921}
922
923/**
924 * Queue packet for transmit.
925 *
926 * @pkt_ptr Packet to queue
927 * @ch Channel to queue packet on
928 * @queue Queue channel on ready list
929 */
930static void smux_tx_queue(struct smux_pkt_t *pkt_ptr, struct smux_lch_t *ch,
931 int queue)
932{
933 unsigned long flags;
934
935 SMUX_DBG("%s: queuing pkt %p\n", __func__, pkt_ptr);
936
937 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
938 list_add_tail(&pkt_ptr->list, &ch->tx_queue);
939 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
940
941 if (queue)
942 list_channel(ch);
943}
944
945/**
946 * Handle receive OPEN ACK command.
947 *
948 * @pkt Received packet
949 *
950 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600951 */
952static int smux_handle_rx_open_ack(struct smux_pkt_t *pkt)
953{
954 uint8_t lcid;
955 int ret;
956 struct smux_lch_t *ch;
957 int enable_powerdown = 0;
958
959 lcid = pkt->hdr.lcid;
960 ch = &smux_lch[lcid];
961
962 spin_lock(&ch->state_lock_lhb1);
963 if (ch->local_state == SMUX_LCH_LOCAL_OPENING) {
964 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
965 ch->local_state,
966 SMUX_LCH_LOCAL_OPENED);
967
968 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
969 enable_powerdown = 1;
970
971 ch->local_state = SMUX_LCH_LOCAL_OPENED;
972 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED)
973 schedule_notify(lcid, SMUX_CONNECTED, NULL);
974 ret = 0;
975 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
976 SMUX_DBG("Remote loopback OPEN ACK received\n");
977 ret = 0;
978 } else {
979 pr_err("%s: lcid %d state 0x%x open ack invalid\n",
980 __func__, lcid, ch->local_state);
981 ret = -EINVAL;
982 }
983 spin_unlock(&ch->state_lock_lhb1);
984
985 if (enable_powerdown) {
986 spin_lock(&smux.tx_lock_lha2);
987 if (!smux.powerdown_enabled) {
988 smux.powerdown_enabled = 1;
989 SMUX_DBG("%s: enabling power-collapse support\n",
990 __func__);
991 }
992 spin_unlock(&smux.tx_lock_lha2);
993 }
994
995 return ret;
996}
997
998static int smux_handle_close_ack(struct smux_pkt_t *pkt)
999{
1000 uint8_t lcid;
1001 int ret;
1002 struct smux_lch_t *ch;
1003 union notifier_metadata meta_disconnected;
1004 unsigned long flags;
1005
1006 lcid = pkt->hdr.lcid;
1007 ch = &smux_lch[lcid];
1008 meta_disconnected.disconnected.is_ssr = 0;
1009
1010 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1011
1012 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
1013 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
1014 SMUX_LCH_LOCAL_CLOSING,
1015 SMUX_LCH_LOCAL_CLOSED);
1016 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
1017 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED)
1018 schedule_notify(lcid, SMUX_DISCONNECTED,
1019 &meta_disconnected);
1020 ret = 0;
1021 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1022 SMUX_DBG("Remote loopback CLOSE ACK received\n");
1023 ret = 0;
1024 } else {
1025 pr_err("%s: lcid %d state 0x%x close ack invalid\n",
1026 __func__, lcid, ch->local_state);
1027 ret = -EINVAL;
1028 }
1029 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1030 return ret;
1031}
1032
1033/**
1034 * Handle receive OPEN command.
1035 *
1036 * @pkt Received packet
1037 *
1038 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001039 */
1040static int smux_handle_rx_open_cmd(struct smux_pkt_t *pkt)
1041{
1042 uint8_t lcid;
1043 int ret;
1044 struct smux_lch_t *ch;
1045 struct smux_pkt_t *ack_pkt;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001046 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001047 int tx_ready = 0;
1048 int enable_powerdown = 0;
1049
1050 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
1051 return smux_handle_rx_open_ack(pkt);
1052
1053 lcid = pkt->hdr.lcid;
1054 ch = &smux_lch[lcid];
1055
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001056 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001057
1058 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED) {
1059 SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
1060 SMUX_LCH_REMOTE_CLOSED,
1061 SMUX_LCH_REMOTE_OPENED);
1062
1063 ch->remote_state = SMUX_LCH_REMOTE_OPENED;
1064 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1065 enable_powerdown = 1;
1066
1067 /* Send Open ACK */
1068 ack_pkt = smux_alloc_pkt();
1069 if (!ack_pkt) {
1070 /* exit out to allow retrying this later */
1071 ret = -ENOMEM;
1072 goto out;
1073 }
1074 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1075 ack_pkt->hdr.flags = SMUX_CMD_OPEN_ACK
1076 | SMUX_CMD_OPEN_POWER_COLLAPSE;
1077 ack_pkt->hdr.lcid = lcid;
1078 ack_pkt->hdr.payload_len = 0;
1079 ack_pkt->hdr.pad_len = 0;
1080 if (pkt->hdr.flags & SMUX_CMD_OPEN_REMOTE_LOOPBACK) {
1081 ch->remote_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
1082 ack_pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
1083 }
1084 smux_tx_queue(ack_pkt, ch, 0);
1085 tx_ready = 1;
1086
1087 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1088 /*
1089 * Send an Open command to the remote side to
1090 * simulate our local client doing it.
1091 */
1092 ack_pkt = smux_alloc_pkt();
1093 if (ack_pkt) {
1094 ack_pkt->hdr.lcid = lcid;
1095 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1096 ack_pkt->hdr.flags =
1097 SMUX_CMD_OPEN_POWER_COLLAPSE;
1098 ack_pkt->hdr.payload_len = 0;
1099 ack_pkt->hdr.pad_len = 0;
1100 smux_tx_queue(ack_pkt, ch, 0);
1101 tx_ready = 1;
1102 } else {
1103 pr_err("%s: Remote loopack allocation failure\n",
1104 __func__);
1105 }
1106 } else if (ch->local_state == SMUX_LCH_LOCAL_OPENED) {
1107 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1108 }
1109 ret = 0;
1110 } else {
1111 pr_err("%s: lcid %d remote state 0x%x open invalid\n",
1112 __func__, lcid, ch->remote_state);
1113 ret = -EINVAL;
1114 }
1115
1116out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001117 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001118
1119 if (enable_powerdown) {
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001120 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8b9a6402012-06-05 13:32:57 -06001121 if (!smux.powerdown_enabled) {
1122 smux.powerdown_enabled = 1;
1123 SMUX_DBG("%s: enabling power-collapse support\n",
1124 __func__);
1125 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001126 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001127 }
1128
1129 if (tx_ready)
1130 list_channel(ch);
1131
1132 return ret;
1133}
1134
1135/**
1136 * Handle receive CLOSE command.
1137 *
1138 * @pkt Received packet
1139 *
1140 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001141 */
1142static int smux_handle_rx_close_cmd(struct smux_pkt_t *pkt)
1143{
1144 uint8_t lcid;
1145 int ret;
1146 struct smux_lch_t *ch;
1147 struct smux_pkt_t *ack_pkt;
1148 union notifier_metadata meta_disconnected;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001149 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001150 int tx_ready = 0;
1151
1152 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
1153 return smux_handle_close_ack(pkt);
1154
1155 lcid = pkt->hdr.lcid;
1156 ch = &smux_lch[lcid];
1157 meta_disconnected.disconnected.is_ssr = 0;
1158
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001159 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001160 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED) {
1161 SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
1162 SMUX_LCH_REMOTE_OPENED,
1163 SMUX_LCH_REMOTE_CLOSED);
1164
1165 ack_pkt = smux_alloc_pkt();
1166 if (!ack_pkt) {
1167 /* exit out to allow retrying this later */
1168 ret = -ENOMEM;
1169 goto out;
1170 }
1171 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
1172 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1173 ack_pkt->hdr.flags = SMUX_CMD_CLOSE_ACK;
1174 ack_pkt->hdr.lcid = lcid;
1175 ack_pkt->hdr.payload_len = 0;
1176 ack_pkt->hdr.pad_len = 0;
1177 smux_tx_queue(ack_pkt, ch, 0);
1178 tx_ready = 1;
1179
1180 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1181 /*
1182 * Send a Close command to the remote side to simulate
1183 * our local client doing it.
1184 */
1185 ack_pkt = smux_alloc_pkt();
1186 if (ack_pkt) {
1187 ack_pkt->hdr.lcid = lcid;
1188 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1189 ack_pkt->hdr.flags = 0;
1190 ack_pkt->hdr.payload_len = 0;
1191 ack_pkt->hdr.pad_len = 0;
1192 smux_tx_queue(ack_pkt, ch, 0);
1193 tx_ready = 1;
1194 } else {
1195 pr_err("%s: Remote loopack allocation failure\n",
1196 __func__);
1197 }
1198 }
1199
1200 if (ch->local_state == SMUX_LCH_LOCAL_CLOSED)
1201 schedule_notify(lcid, SMUX_DISCONNECTED,
1202 &meta_disconnected);
1203 ret = 0;
1204 } else {
1205 pr_err("%s: lcid %d remote state 0x%x close invalid\n",
1206 __func__, lcid, ch->remote_state);
1207 ret = -EINVAL;
1208 }
1209out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001210 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001211 if (tx_ready)
1212 list_channel(ch);
1213
1214 return ret;
1215}
1216
1217/*
1218 * Handle receive DATA command.
1219 *
1220 * @pkt Received packet
1221 *
1222 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001223 */
1224static int smux_handle_rx_data_cmd(struct smux_pkt_t *pkt)
1225{
1226 uint8_t lcid;
1227 int ret;
1228 int i;
1229 int tmp;
1230 int rx_len;
1231 struct smux_lch_t *ch;
1232 union notifier_metadata metadata;
1233 int remote_loopback;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001234 struct smux_pkt_t *ack_pkt;
1235 unsigned long flags;
1236
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001237 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1238 ret = -ENXIO;
1239 goto out;
1240 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001241
1242 lcid = pkt->hdr.lcid;
1243 ch = &smux_lch[lcid];
1244 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1245 remote_loopback = ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK;
1246
1247 if (ch->local_state != SMUX_LCH_LOCAL_OPENED
1248 && !remote_loopback) {
1249 pr_err("smux: ch %d error data on local state 0x%x",
1250 lcid, ch->local_state);
1251 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001252 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001253 goto out;
1254 }
1255
1256 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1257 pr_err("smux: ch %d error data on remote state 0x%x",
1258 lcid, ch->remote_state);
1259 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001260 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001261 goto out;
1262 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001263 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001264
1265 rx_len = pkt->hdr.payload_len;
1266 if (rx_len == 0) {
1267 ret = -EINVAL;
1268 goto out;
1269 }
1270
1271 for (i = 0; i < SMUX_GET_RX_BUFF_MAX_RETRY_CNT; ++i) {
1272 metadata.read.pkt_priv = 0;
1273 metadata.read.buffer = 0;
1274
1275 if (!remote_loopback) {
1276 tmp = ch->get_rx_buffer(ch->priv,
1277 (void **)&metadata.read.pkt_priv,
1278 (void **)&metadata.read.buffer,
1279 rx_len);
1280 if (tmp == 0 && metadata.read.buffer) {
1281 /* place data into RX buffer */
1282 memcpy(metadata.read.buffer, pkt->payload,
1283 rx_len);
1284 metadata.read.len = rx_len;
1285 schedule_notify(lcid, SMUX_READ_DONE,
1286 &metadata);
1287 ret = 0;
1288 break;
1289 } else if (tmp == -EAGAIN) {
1290 ret = -ENOMEM;
1291 } else if (tmp < 0) {
1292 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1293 ret = -ENOMEM;
1294 break;
1295 } else if (!metadata.read.buffer) {
1296 pr_err("%s: get_rx_buffer() buffer is NULL\n",
1297 __func__);
1298 ret = -ENOMEM;
1299 }
1300 } else {
1301 /* Echo the data back to the remote client. */
1302 ack_pkt = smux_alloc_pkt();
1303 if (ack_pkt) {
1304 ack_pkt->hdr.lcid = lcid;
1305 ack_pkt->hdr.cmd = SMUX_CMD_DATA;
1306 ack_pkt->hdr.flags = 0;
1307 ack_pkt->hdr.payload_len = pkt->hdr.payload_len;
1308 ack_pkt->payload = pkt->payload;
1309 ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
1310 smux_tx_queue(ack_pkt, ch, 0);
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001311 list_channel(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001312 } else {
1313 pr_err("%s: Remote loopack allocation failure\n",
1314 __func__);
1315 }
1316 }
1317 }
1318
1319out:
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001320 return ret;
1321}
1322
1323/**
1324 * Handle receive byte command for testing purposes.
1325 *
1326 * @pkt Received packet
1327 *
1328 * @returns 0 for success
1329 */
1330static int smux_handle_rx_byte_cmd(struct smux_pkt_t *pkt)
1331{
1332 uint8_t lcid;
1333 int ret;
1334 struct smux_lch_t *ch;
1335 union notifier_metadata metadata;
1336 unsigned long flags;
1337
1338 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid))
1339 return -ENXIO;
1340
1341 lcid = pkt->hdr.lcid;
1342 ch = &smux_lch[lcid];
1343 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1344
1345 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
1346 pr_err("smux: ch %d error data on local state 0x%x",
1347 lcid, ch->local_state);
1348 ret = -EIO;
1349 goto out;
1350 }
1351
1352 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1353 pr_err("smux: ch %d error data on remote state 0x%x",
1354 lcid, ch->remote_state);
1355 ret = -EIO;
1356 goto out;
1357 }
1358
1359 metadata.read.pkt_priv = (void *)(int)pkt->hdr.flags;
1360 metadata.read.buffer = 0;
1361 schedule_notify(lcid, SMUX_READ_DONE, &metadata);
1362 ret = 0;
1363
1364out:
1365 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1366 return ret;
1367}
1368
1369/**
1370 * Handle receive status command.
1371 *
1372 * @pkt Received packet
1373 *
1374 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001375 */
1376static int smux_handle_rx_status_cmd(struct smux_pkt_t *pkt)
1377{
1378 uint8_t lcid;
1379 int ret;
1380 struct smux_lch_t *ch;
1381 union notifier_metadata meta;
1382 unsigned long flags;
1383 int tx_ready = 0;
1384
1385 lcid = pkt->hdr.lcid;
1386 ch = &smux_lch[lcid];
1387
1388 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1389 meta.tiocm.tiocm_old = ch->remote_tiocm;
1390 meta.tiocm.tiocm_new = pkt->hdr.flags;
1391
1392 /* update logical channel flow control */
1393 if ((meta.tiocm.tiocm_old & SMUX_CMD_STATUS_FLOW_CNTL) ^
1394 (meta.tiocm.tiocm_new & SMUX_CMD_STATUS_FLOW_CNTL)) {
1395 /* logical channel flow control changed */
1396 if (pkt->hdr.flags & SMUX_CMD_STATUS_FLOW_CNTL) {
1397 /* disabled TX */
1398 SMUX_DBG("TX Flow control enabled\n");
1399 ch->tx_flow_control = 1;
1400 } else {
1401 /* re-enable channel */
1402 SMUX_DBG("TX Flow control disabled\n");
1403 ch->tx_flow_control = 0;
1404 tx_ready = 1;
1405 }
1406 }
1407 meta.tiocm.tiocm_old = msm_smux_tiocm_get_atomic(ch);
1408 ch->remote_tiocm = pkt->hdr.flags;
1409 meta.tiocm.tiocm_new = msm_smux_tiocm_get_atomic(ch);
1410
1411 /* client notification for status change */
1412 if (IS_FULLY_OPENED(ch)) {
1413 if (meta.tiocm.tiocm_old != meta.tiocm.tiocm_new)
1414 schedule_notify(lcid, SMUX_TIOCM_UPDATE, &meta);
1415 ret = 0;
1416 }
1417 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1418 if (tx_ready)
1419 list_channel(ch);
1420
1421 return ret;
1422}
1423
1424/**
1425 * Handle receive power command.
1426 *
1427 * @pkt Received packet
1428 *
1429 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001430 */
1431static int smux_handle_rx_power_cmd(struct smux_pkt_t *pkt)
1432{
1433 int tx_ready = 0;
1434 struct smux_pkt_t *ack_pkt;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001435 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001436
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001437 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001438 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK) {
1439 /* local sleep request ack */
1440 if (smux.power_state == SMUX_PWR_TURNING_OFF) {
1441 /* Power-down complete, turn off UART */
1442 SMUX_DBG("%s: Power %d->%d\n", __func__,
1443 smux.power_state, SMUX_PWR_OFF_FLUSH);
1444 smux.power_state = SMUX_PWR_OFF_FLUSH;
1445 queue_work(smux_tx_wq, &smux_inactivity_work);
1446 } else {
1447 pr_err("%s: sleep request ack invalid in state %d\n",
1448 __func__, smux.power_state);
1449 }
1450 } else {
1451 /* remote sleep request */
1452 if (smux.power_state == SMUX_PWR_ON
1453 || smux.power_state == SMUX_PWR_TURNING_OFF) {
1454 ack_pkt = smux_alloc_pkt();
1455 if (ack_pkt) {
1456 SMUX_DBG("%s: Power %d->%d\n", __func__,
1457 smux.power_state,
1458 SMUX_PWR_TURNING_OFF_FLUSH);
1459
1460 /* send power-down request */
1461 ack_pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
1462 ack_pkt->hdr.flags = SMUX_CMD_PWR_CTL_ACK;
1463 ack_pkt->hdr.lcid = pkt->hdr.lcid;
1464 smux_tx_queue(ack_pkt,
1465 &smux_lch[ack_pkt->hdr.lcid], 0);
1466 tx_ready = 1;
1467 smux.power_state = SMUX_PWR_TURNING_OFF_FLUSH;
1468 queue_delayed_work(smux_tx_wq,
1469 &smux_delayed_inactivity_work,
1470 msecs_to_jiffies(
1471 SMUX_INACTIVITY_TIMEOUT_MS));
1472 }
1473 } else {
1474 pr_err("%s: sleep request invalid in state %d\n",
1475 __func__, smux.power_state);
1476 }
1477 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001478 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001479
1480 if (tx_ready)
1481 list_channel(&smux_lch[ack_pkt->hdr.lcid]);
1482
1483 return 0;
1484}
1485
1486/**
1487 * Handle dispatching a completed packet for receive processing.
1488 *
1489 * @pkt Packet to process
1490 *
1491 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001492 */
1493static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt)
1494{
1495 int ret;
1496
1497 SMUX_LOG_PKT_RX(pkt);
1498
1499 switch (pkt->hdr.cmd) {
1500 case SMUX_CMD_OPEN_LCH:
1501 ret = smux_handle_rx_open_cmd(pkt);
1502 break;
1503
1504 case SMUX_CMD_DATA:
1505 ret = smux_handle_rx_data_cmd(pkt);
1506 break;
1507
1508 case SMUX_CMD_CLOSE_LCH:
1509 ret = smux_handle_rx_close_cmd(pkt);
1510 break;
1511
1512 case SMUX_CMD_STATUS:
1513 ret = smux_handle_rx_status_cmd(pkt);
1514 break;
1515
1516 case SMUX_CMD_PWR_CTL:
1517 ret = smux_handle_rx_power_cmd(pkt);
1518 break;
1519
1520 case SMUX_CMD_BYTE:
1521 ret = smux_handle_rx_byte_cmd(pkt);
1522 break;
1523
1524 default:
1525 pr_err("%s: command %d unknown\n", __func__, pkt->hdr.cmd);
1526 ret = -EINVAL;
1527 }
1528 return ret;
1529}
1530
1531/**
1532 * Deserializes a packet and dispatches it to the packet receive logic.
1533 *
1534 * @data Raw data for one packet
1535 * @len Length of the data
1536 *
1537 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001538 */
1539static int smux_deserialize(unsigned char *data, int len)
1540{
1541 struct smux_pkt_t recv;
1542 uint8_t lcid;
1543
1544 smux_init_pkt(&recv);
1545
1546 /*
1547 * It may be possible to optimize this to not use the
1548 * temporary buffer.
1549 */
1550 memcpy(&recv.hdr, data, sizeof(struct smux_hdr_t));
1551
1552 if (recv.hdr.magic != SMUX_MAGIC) {
1553 pr_err("%s: invalid header magic\n", __func__);
1554 return -EINVAL;
1555 }
1556
1557 lcid = recv.hdr.lcid;
1558 if (smux_assert_lch_id(lcid)) {
1559 pr_err("%s: invalid channel id %d\n", __func__, lcid);
1560 return -ENXIO;
1561 }
1562
1563 if (recv.hdr.payload_len)
1564 recv.payload = data + sizeof(struct smux_hdr_t);
1565
1566 return smux_dispatch_rx_pkt(&recv);
1567}
1568
1569/**
1570 * Handle wakeup request byte.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001571 */
1572static void smux_handle_wakeup_req(void)
1573{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001574 unsigned long flags;
1575
1576 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001577 if (smux.power_state == SMUX_PWR_OFF
1578 || smux.power_state == SMUX_PWR_TURNING_ON) {
1579 /* wakeup system */
1580 SMUX_DBG("%s: Power %d->%d\n", __func__,
1581 smux.power_state, SMUX_PWR_ON);
1582 smux.power_state = SMUX_PWR_ON;
1583 queue_work(smux_tx_wq, &smux_wakeup_work);
1584 queue_work(smux_tx_wq, &smux_tx_work);
1585 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1586 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1587 smux_send_byte(SMUX_WAKEUP_ACK);
1588 } else {
1589 smux_send_byte(SMUX_WAKEUP_ACK);
1590 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001591 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001592}
1593
1594/**
1595 * Handle wakeup request ack.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001596 */
1597static void smux_handle_wakeup_ack(void)
1598{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001599 unsigned long flags;
1600
1601 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001602 if (smux.power_state == SMUX_PWR_TURNING_ON) {
1603 /* received response to wakeup request */
1604 SMUX_DBG("%s: Power %d->%d\n", __func__,
1605 smux.power_state, SMUX_PWR_ON);
1606 smux.power_state = SMUX_PWR_ON;
1607 queue_work(smux_tx_wq, &smux_tx_work);
1608 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1609 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1610
1611 } else if (smux.power_state != SMUX_PWR_ON) {
1612 /* invalid message */
1613 pr_err("%s: wakeup request ack invalid in state %d\n",
1614 __func__, smux.power_state);
1615 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001616 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001617}
1618
1619/**
1620 * RX State machine - IDLE state processing.
1621 *
1622 * @data New RX data to process
1623 * @len Length of the data
1624 * @used Return value of length processed
1625 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001626 */
1627static void smux_rx_handle_idle(const unsigned char *data,
1628 int len, int *used, int flag)
1629{
1630 int i;
1631
1632 if (flag) {
1633 if (smux_byte_loopback)
1634 smux_receive_byte(SMUX_UT_ECHO_ACK_FAIL,
1635 smux_byte_loopback);
1636 pr_err("%s: TTY error 0x%x - ignoring\n", __func__, flag);
1637 ++*used;
1638 return;
1639 }
1640
1641 for (i = *used; i < len && smux.rx_state == SMUX_RX_IDLE; i++) {
1642 switch (data[i]) {
1643 case SMUX_MAGIC_WORD1:
1644 smux.rx_state = SMUX_RX_MAGIC;
1645 break;
1646 case SMUX_WAKEUP_REQ:
1647 smux_handle_wakeup_req();
1648 break;
1649 case SMUX_WAKEUP_ACK:
1650 smux_handle_wakeup_ack();
1651 break;
1652 default:
1653 /* unexpected character */
1654 if (smux_byte_loopback && data[i] == SMUX_UT_ECHO_REQ)
1655 smux_receive_byte(SMUX_UT_ECHO_ACK_OK,
1656 smux_byte_loopback);
1657 pr_err("%s: parse error 0x%02x - ignoring\n", __func__,
1658 (unsigned)data[i]);
1659 break;
1660 }
1661 }
1662
1663 *used = i;
1664}
1665
1666/**
1667 * RX State machine - Header Magic state processing.
1668 *
1669 * @data New RX data to process
1670 * @len Length of the data
1671 * @used Return value of length processed
1672 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001673 */
1674static void smux_rx_handle_magic(const unsigned char *data,
1675 int len, int *used, int flag)
1676{
1677 int i;
1678
1679 if (flag) {
1680 pr_err("%s: TTY RX error %d\n", __func__, flag);
1681 smux_enter_reset();
1682 smux.rx_state = SMUX_RX_FAILURE;
1683 ++*used;
1684 return;
1685 }
1686
1687 for (i = *used; i < len && smux.rx_state == SMUX_RX_MAGIC; i++) {
1688 /* wait for completion of the magic */
1689 if (data[i] == SMUX_MAGIC_WORD2) {
1690 smux.recv_len = 0;
1691 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD1;
1692 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD2;
1693 smux.rx_state = SMUX_RX_HDR;
1694 } else {
1695 /* unexpected / trash character */
1696 pr_err("%s: rx parse error for char %c; *used=%d, len=%d\n",
1697 __func__, data[i], *used, len);
1698 smux.rx_state = SMUX_RX_IDLE;
1699 }
1700 }
1701
1702 *used = i;
1703}
1704
1705/**
1706 * RX State machine - Packet Header state processing.
1707 *
1708 * @data New RX data to process
1709 * @len Length of the data
1710 * @used Return value of length processed
1711 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001712 */
1713static void smux_rx_handle_hdr(const unsigned char *data,
1714 int len, int *used, int flag)
1715{
1716 int i;
1717 struct smux_hdr_t *hdr;
1718
1719 if (flag) {
1720 pr_err("%s: TTY RX error %d\n", __func__, flag);
1721 smux_enter_reset();
1722 smux.rx_state = SMUX_RX_FAILURE;
1723 ++*used;
1724 return;
1725 }
1726
1727 for (i = *used; i < len && smux.rx_state == SMUX_RX_HDR; i++) {
1728 smux.recv_buf[smux.recv_len++] = data[i];
1729
1730 if (smux.recv_len == sizeof(struct smux_hdr_t)) {
1731 /* complete header received */
1732 hdr = (struct smux_hdr_t *)smux.recv_buf;
1733 smux.pkt_remain = hdr->payload_len + hdr->pad_len;
1734 smux.rx_state = SMUX_RX_PAYLOAD;
1735 }
1736 }
1737 *used = i;
1738}
1739
1740/**
1741 * RX State machine - Packet Payload state processing.
1742 *
1743 * @data New RX data to process
1744 * @len Length of the data
1745 * @used Return value of length processed
1746 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001747 */
1748static void smux_rx_handle_pkt_payload(const unsigned char *data,
1749 int len, int *used, int flag)
1750{
1751 int remaining;
1752
1753 if (flag) {
1754 pr_err("%s: TTY RX error %d\n", __func__, flag);
1755 smux_enter_reset();
1756 smux.rx_state = SMUX_RX_FAILURE;
1757 ++*used;
1758 return;
1759 }
1760
1761 /* copy data into rx buffer */
1762 if (smux.pkt_remain < (len - *used))
1763 remaining = smux.pkt_remain;
1764 else
1765 remaining = len - *used;
1766
1767 memcpy(&smux.recv_buf[smux.recv_len], &data[*used], remaining);
1768 smux.recv_len += remaining;
1769 smux.pkt_remain -= remaining;
1770 *used += remaining;
1771
1772 if (smux.pkt_remain == 0) {
1773 /* complete packet received */
1774 smux_deserialize(smux.recv_buf, smux.recv_len);
1775 smux.rx_state = SMUX_RX_IDLE;
1776 }
1777}
1778
1779/**
1780 * Feed data to the receive state machine.
1781 *
1782 * @data Pointer to data block
1783 * @len Length of data
1784 * @flag TTY_NORMAL (0) for no error, otherwise TTY Error Flag
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001785 */
1786void smux_rx_state_machine(const unsigned char *data,
1787 int len, int flag)
1788{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001789 struct smux_rx_worker_data work;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001790
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001791 work.data = data;
1792 work.len = len;
1793 work.flag = flag;
1794 INIT_WORK_ONSTACK(&work.work, smux_rx_worker);
1795 work.work_complete = COMPLETION_INITIALIZER_ONSTACK(work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001796
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001797 queue_work(smux_rx_wq, &work.work);
1798 wait_for_completion(&work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001799}
1800
1801/**
1802 * Add channel to transmit-ready list and trigger transmit worker.
1803 *
1804 * @ch Channel to add
1805 */
1806static void list_channel(struct smux_lch_t *ch)
1807{
1808 unsigned long flags;
1809
1810 SMUX_DBG("%s: listing channel %d\n",
1811 __func__, ch->lcid);
1812
1813 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
1814 spin_lock(&ch->tx_lock_lhb2);
1815 smux.tx_activity_flag = 1;
1816 if (list_empty(&ch->tx_ready_list))
1817 list_add_tail(&ch->tx_ready_list, &smux.lch_tx_ready_list);
1818 spin_unlock(&ch->tx_lock_lhb2);
1819 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
1820
1821 queue_work(smux_tx_wq, &smux_tx_work);
1822}
1823
1824/**
1825 * Transmit packet on correct transport and then perform client
1826 * notification.
1827 *
1828 * @ch Channel to transmit on
1829 * @pkt Packet to transmit
1830 */
1831static void smux_tx_pkt(struct smux_lch_t *ch, struct smux_pkt_t *pkt)
1832{
1833 union notifier_metadata meta_write;
1834 int ret;
1835
1836 if (ch && pkt) {
1837 SMUX_LOG_PKT_TX(pkt);
1838 if (ch->local_mode == SMUX_LCH_MODE_LOCAL_LOOPBACK)
1839 ret = smux_tx_loopback(pkt);
1840 else
1841 ret = smux_tx_tty(pkt);
1842
1843 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
1844 /* notify write-done */
1845 meta_write.write.pkt_priv = pkt->priv;
1846 meta_write.write.buffer = pkt->payload;
1847 meta_write.write.len = pkt->hdr.payload_len;
1848 if (ret >= 0) {
1849 SMUX_DBG("%s: PKT write done", __func__);
1850 schedule_notify(ch->lcid, SMUX_WRITE_DONE,
1851 &meta_write);
1852 } else {
1853 pr_err("%s: failed to write pkt %d\n",
1854 __func__, ret);
1855 schedule_notify(ch->lcid, SMUX_WRITE_FAIL,
1856 &meta_write);
1857 }
1858 }
1859 }
1860}
1861
1862/**
1863 * Power-up the UART.
1864 */
1865static void smux_uart_power_on(void)
1866{
1867 struct uart_state *state;
1868
1869 if (!smux.tty || !smux.tty->driver_data) {
1870 pr_err("%s: unable to find UART port for tty %p\n",
1871 __func__, smux.tty);
1872 return;
1873 }
1874 state = smux.tty->driver_data;
1875 msm_hs_request_clock_on(state->uart_port);
1876}
1877
1878/**
1879 * Power down the UART.
1880 */
1881static void smux_uart_power_off(void)
1882{
1883 struct uart_state *state;
1884
1885 if (!smux.tty || !smux.tty->driver_data) {
1886 pr_err("%s: unable to find UART port for tty %p\n",
1887 __func__, smux.tty);
1888 return;
1889 }
1890 state = smux.tty->driver_data;
1891 msm_hs_request_clock_off(state->uart_port);
1892}
1893
1894/**
1895 * TX Wakeup Worker
1896 *
1897 * @work Not used
1898 *
1899 * Do an exponential back-off wakeup sequence with a maximum period
1900 * of approximately 1 second (1 << 20 microseconds).
1901 */
1902static void smux_wakeup_worker(struct work_struct *work)
1903{
1904 unsigned long flags;
1905 unsigned wakeup_delay;
1906 int complete = 0;
1907
1908 for (;;) {
1909 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
1910 if (smux.power_state == SMUX_PWR_ON) {
1911 /* wakeup complete */
1912 complete = 1;
1913 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
1914 break;
1915 } else {
1916 /* retry */
1917 wakeup_delay = smux.pwr_wakeup_delay_us;
1918 smux.pwr_wakeup_delay_us <<= 1;
1919 if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
1920 smux.pwr_wakeup_delay_us =
1921 SMUX_WAKEUP_DELAY_MAX;
1922 }
1923 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
1924 SMUX_DBG("%s: triggering wakeup\n", __func__);
1925 smux_send_byte(SMUX_WAKEUP_REQ);
1926
1927 if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
1928 SMUX_DBG("%s: sleeping for %u us\n", __func__,
1929 wakeup_delay);
1930 usleep_range(wakeup_delay, 2*wakeup_delay);
1931 } else {
1932 /* schedule delayed work */
1933 SMUX_DBG("%s: scheduling delayed wakeup in %u ms\n",
1934 __func__, wakeup_delay / 1000);
1935 queue_delayed_work(smux_tx_wq,
1936 &smux_wakeup_delayed_work,
1937 msecs_to_jiffies(wakeup_delay / 1000));
1938 break;
1939 }
1940 }
1941
1942 if (complete) {
1943 SMUX_DBG("%s: wakeup complete\n", __func__);
1944 /*
1945 * Cancel any pending retry. This avoids a race condition with
1946 * a new power-up request because:
1947 * 1) this worker doesn't modify the state
1948 * 2) this worker is processed on the same single-threaded
1949 * workqueue as new TX wakeup requests
1950 */
1951 cancel_delayed_work(&smux_wakeup_delayed_work);
1952 }
1953}
1954
1955
1956/**
1957 * Inactivity timeout worker. Periodically scheduled when link is active.
1958 * When it detects inactivity, it will power-down the UART link.
1959 *
1960 * @work Work structure (not used)
1961 */
1962static void smux_inactivity_worker(struct work_struct *work)
1963{
1964 int tx_ready = 0;
1965 struct smux_pkt_t *pkt;
1966 unsigned long flags;
1967
1968 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
1969 spin_lock(&smux.tx_lock_lha2);
1970
1971 if (!smux.tx_activity_flag && !smux.rx_activity_flag) {
1972 /* no activity */
1973 if (smux.powerdown_enabled) {
1974 if (smux.power_state == SMUX_PWR_ON) {
1975 /* start power-down sequence */
1976 pkt = smux_alloc_pkt();
1977 if (pkt) {
1978 SMUX_DBG("%s: Power %d->%d\n", __func__,
1979 smux.power_state,
1980 SMUX_PWR_TURNING_OFF);
1981 smux.power_state = SMUX_PWR_TURNING_OFF;
1982
1983 /* send power-down request */
1984 pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
1985 pkt->hdr.flags = 0;
1986 pkt->hdr.lcid = 0;
1987 smux_tx_queue(pkt,
1988 &smux_lch[SMUX_TEST_LCID],
1989 0);
1990 tx_ready = 1;
1991 }
1992 }
1993 } else {
1994 SMUX_DBG("%s: link inactive, but powerdown disabled\n",
1995 __func__);
1996 }
1997 }
1998 smux.tx_activity_flag = 0;
1999 smux.rx_activity_flag = 0;
2000
2001 spin_unlock(&smux.tx_lock_lha2);
2002 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2003
2004 if (tx_ready)
2005 list_channel(&smux_lch[SMUX_TEST_LCID]);
2006
2007 if ((smux.power_state == SMUX_PWR_OFF_FLUSH) ||
2008 (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH)) {
2009 /* ready to power-down the UART */
2010 SMUX_DBG("%s: Power %d->%d\n", __func__,
2011 smux.power_state, SMUX_PWR_OFF);
2012 smux_uart_power_off();
2013 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2014 smux.power_state = SMUX_PWR_OFF;
2015 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2016 }
2017
2018 /* reschedule inactivity worker */
2019 if (smux.power_state != SMUX_PWR_OFF)
2020 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
2021 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
2022}
2023
2024/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002025 * RX worker handles all receive operations.
2026 *
2027 * @work Work structure contained in TBD structure
2028 */
2029static void smux_rx_worker(struct work_struct *work)
2030{
2031 unsigned long flags;
2032 int used;
2033 int initial_rx_state;
2034 struct smux_rx_worker_data *w;
2035 const unsigned char *data;
2036 int len;
2037 int flag;
2038
2039 w = container_of(work, struct smux_rx_worker_data, work);
2040 data = w->data;
2041 len = w->len;
2042 flag = w->flag;
2043
2044 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2045 smux.rx_activity_flag = 1;
2046 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2047
2048 SMUX_DBG("%s: %p, len=%d, flag=%d\n", __func__, data, len, flag);
2049 used = 0;
2050 do {
2051 SMUX_DBG("%s: state %d; %d of %d\n",
2052 __func__, smux.rx_state, used, len);
2053 initial_rx_state = smux.rx_state;
2054
2055 switch (smux.rx_state) {
2056 case SMUX_RX_IDLE:
2057 smux_rx_handle_idle(data, len, &used, flag);
2058 break;
2059 case SMUX_RX_MAGIC:
2060 smux_rx_handle_magic(data, len, &used, flag);
2061 break;
2062 case SMUX_RX_HDR:
2063 smux_rx_handle_hdr(data, len, &used, flag);
2064 break;
2065 case SMUX_RX_PAYLOAD:
2066 smux_rx_handle_pkt_payload(data, len, &used, flag);
2067 break;
2068 default:
2069 SMUX_DBG("%s: invalid state %d\n",
2070 __func__, smux.rx_state);
2071 smux.rx_state = SMUX_RX_IDLE;
2072 break;
2073 }
2074 } while (used < len || smux.rx_state != initial_rx_state);
2075
2076 complete(&w->work_complete);
2077}
2078
2079/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002080 * Transmit worker handles serializing and transmitting packets onto the
2081 * underlying transport.
2082 *
2083 * @work Work structure (not used)
2084 */
2085static void smux_tx_worker(struct work_struct *work)
2086{
2087 struct smux_pkt_t *pkt;
2088 struct smux_lch_t *ch;
2089 unsigned low_wm_notif;
2090 unsigned lcid;
2091 unsigned long flags;
2092
2093
2094 /*
2095 * Transmit packets in round-robin fashion based upon ready
2096 * channels.
2097 *
2098 * To eliminate the need to hold a lock for the entire
2099 * iteration through the channel ready list, the head of the
2100 * ready-channel list is always the next channel to be
2101 * processed. To send a packet, the first valid packet in
2102 * the head channel is removed and the head channel is then
2103 * rescheduled at the end of the queue by removing it and
2104 * inserting after the tail. The locks can then be released
2105 * while the packet is processed.
2106 */
2107 for (;;) {
2108 pkt = NULL;
2109 low_wm_notif = 0;
2110
2111 /* get the next ready channel */
2112 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2113 if (list_empty(&smux.lch_tx_ready_list)) {
2114 /* no ready channels */
2115 SMUX_DBG("%s: no more ready channels, exiting\n",
2116 __func__);
2117 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2118 break;
2119 }
2120 smux.tx_activity_flag = 1;
2121
2122 if (smux.power_state != SMUX_PWR_ON
2123 && smux.power_state != SMUX_PWR_TURNING_OFF
2124 && smux.power_state != SMUX_PWR_TURNING_OFF_FLUSH) {
2125 /* Link isn't ready to transmit */
2126 if (smux.power_state == SMUX_PWR_OFF) {
2127 /* link is off, trigger wakeup */
2128 smux.pwr_wakeup_delay_us = 1;
2129 SMUX_DBG("%s: Power %d->%d\n", __func__,
2130 smux.power_state,
2131 SMUX_PWR_TURNING_ON);
2132 smux.power_state = SMUX_PWR_TURNING_ON;
2133 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2134 flags);
2135 smux_uart_power_on();
2136 queue_work(smux_tx_wq, &smux_wakeup_work);
2137 } else {
2138 SMUX_DBG("%s: can not tx with power state %d\n",
2139 __func__,
2140 smux.power_state);
2141 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2142 flags);
2143 }
2144 break;
2145 }
2146
2147 /* get the next packet to send and rotate channel list */
2148 ch = list_first_entry(&smux.lch_tx_ready_list,
2149 struct smux_lch_t,
2150 tx_ready_list);
2151
2152 spin_lock(&ch->state_lock_lhb1);
2153 spin_lock(&ch->tx_lock_lhb2);
2154 if (!list_empty(&ch->tx_queue)) {
2155 /*
2156 * If remote TX flow control is enabled or
2157 * the channel is not fully opened, then only
2158 * send command packets.
2159 */
2160 if (ch->tx_flow_control || !IS_FULLY_OPENED(ch)) {
2161 struct smux_pkt_t *curr;
2162 list_for_each_entry(curr, &ch->tx_queue, list) {
2163 if (curr->hdr.cmd != SMUX_CMD_DATA) {
2164 pkt = curr;
2165 break;
2166 }
2167 }
2168 } else {
2169 /* get next cmd/data packet to send */
2170 pkt = list_first_entry(&ch->tx_queue,
2171 struct smux_pkt_t, list);
2172 }
2173 }
2174
2175 if (pkt) {
2176 list_del(&pkt->list);
2177
2178 /* update packet stats */
2179 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2180 --ch->tx_pending_data_cnt;
2181 if (ch->notify_lwm &&
2182 ch->tx_pending_data_cnt
2183 <= SMUX_WM_LOW) {
2184 ch->notify_lwm = 0;
2185 low_wm_notif = 1;
2186 }
2187 }
2188
2189 /* advance to the next ready channel */
2190 list_rotate_left(&smux.lch_tx_ready_list);
2191 } else {
2192 /* no data in channel to send, remove from ready list */
2193 list_del(&ch->tx_ready_list);
2194 INIT_LIST_HEAD(&ch->tx_ready_list);
2195 }
2196 lcid = ch->lcid;
2197 spin_unlock(&ch->tx_lock_lhb2);
2198 spin_unlock(&ch->state_lock_lhb1);
2199 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2200
2201 if (low_wm_notif)
2202 schedule_notify(lcid, SMUX_LOW_WM_HIT, NULL);
2203
2204 /* send the packet */
2205 smux_tx_pkt(ch, pkt);
2206 smux_free_pkt(pkt);
2207 }
2208}
2209
2210
2211/**********************************************************************/
2212/* Kernel API */
2213/**********************************************************************/
2214
2215/**
2216 * Set or clear channel option using the SMUX_CH_OPTION_* channel
2217 * flags.
2218 *
2219 * @lcid Logical channel ID
2220 * @set Options to set
2221 * @clear Options to clear
2222 *
2223 * @returns 0 for success, < 0 for failure
2224 */
2225int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear)
2226{
2227 unsigned long flags;
2228 struct smux_lch_t *ch;
2229 int tx_ready = 0;
2230 int ret = 0;
2231
2232 if (smux_assert_lch_id(lcid))
2233 return -ENXIO;
2234
2235 ch = &smux_lch[lcid];
2236 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2237
2238 /* Local loopback mode */
2239 if (set & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2240 ch->local_mode = SMUX_LCH_MODE_LOCAL_LOOPBACK;
2241
2242 if (clear & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2243 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2244
2245 /* Remote loopback mode */
2246 if (set & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2247 ch->local_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
2248
2249 if (clear & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2250 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2251
2252 /* Flow control */
2253 if (set & SMUX_CH_OPTION_REMOTE_TX_STOP) {
2254 ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
2255 ret = smux_send_status_cmd(ch);
2256 tx_ready = 1;
2257 }
2258
2259 if (clear & SMUX_CH_OPTION_REMOTE_TX_STOP) {
2260 ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
2261 ret = smux_send_status_cmd(ch);
2262 tx_ready = 1;
2263 }
2264
2265 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2266
2267 if (tx_ready)
2268 list_channel(ch);
2269
2270 return ret;
2271}
2272
2273/**
2274 * Starts the opening sequence for a logical channel.
2275 *
2276 * @lcid Logical channel ID
2277 * @priv Free for client usage
2278 * @notify Event notification function
2279 * @get_rx_buffer Function used to provide a receive buffer to SMUX
2280 *
2281 * @returns 0 for success, <0 otherwise
2282 *
2283 * A channel must be fully closed (either not previously opened or
2284 * msm_smux_close() has been called and the SMUX_DISCONNECTED has been
2285 * received.
2286 *
2287 * One the remote side is opened, the client will receive a SMUX_CONNECTED
2288 * event.
2289 */
2290int msm_smux_open(uint8_t lcid, void *priv,
2291 void (*notify)(void *priv, int event_type, const void *metadata),
2292 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
2293 int size))
2294{
2295 int ret;
2296 struct smux_lch_t *ch;
2297 struct smux_pkt_t *pkt;
2298 int tx_ready = 0;
2299 unsigned long flags;
2300
2301 if (smux_assert_lch_id(lcid))
2302 return -ENXIO;
2303
2304 ch = &smux_lch[lcid];
2305 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2306
2307 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
2308 ret = -EAGAIN;
2309 goto out;
2310 }
2311
2312 if (ch->local_state != SMUX_LCH_LOCAL_CLOSED) {
2313 pr_err("%s: open lcid %d local state %x invalid\n",
2314 __func__, lcid, ch->local_state);
2315 ret = -EINVAL;
2316 goto out;
2317 }
2318
2319 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
2320 ch->local_state,
2321 SMUX_LCH_LOCAL_OPENING);
2322
2323 ch->local_state = SMUX_LCH_LOCAL_OPENING;
2324
2325 ch->priv = priv;
2326 ch->notify = notify;
2327 ch->get_rx_buffer = get_rx_buffer;
2328 ret = 0;
2329
2330 /* Send Open Command */
2331 pkt = smux_alloc_pkt();
2332 if (!pkt) {
2333 ret = -ENOMEM;
2334 goto out;
2335 }
2336 pkt->hdr.magic = SMUX_MAGIC;
2337 pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
2338 pkt->hdr.flags = SMUX_CMD_OPEN_POWER_COLLAPSE;
2339 if (ch->local_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK)
2340 pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
2341 pkt->hdr.lcid = lcid;
2342 pkt->hdr.payload_len = 0;
2343 pkt->hdr.pad_len = 0;
2344 smux_tx_queue(pkt, ch, 0);
2345 tx_ready = 1;
2346
2347out:
2348 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2349 if (tx_ready)
2350 list_channel(ch);
2351 return ret;
2352}
2353
2354/**
2355 * Starts the closing sequence for a logical channel.
2356 *
2357 * @lcid Logical channel ID
2358 *
2359 * @returns 0 for success, <0 otherwise
2360 *
2361 * Once the close event has been acknowledge by the remote side, the client
2362 * will receive a SMUX_DISCONNECTED notification.
2363 */
2364int msm_smux_close(uint8_t lcid)
2365{
2366 int ret = 0;
2367 struct smux_lch_t *ch;
2368 struct smux_pkt_t *pkt;
2369 int tx_ready = 0;
2370 unsigned long flags;
2371
2372 if (smux_assert_lch_id(lcid))
2373 return -ENXIO;
2374
2375 ch = &smux_lch[lcid];
2376 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2377 ch->local_tiocm = 0x0;
2378 ch->remote_tiocm = 0x0;
2379 ch->tx_pending_data_cnt = 0;
2380 ch->notify_lwm = 0;
2381
2382 /* Purge TX queue */
2383 spin_lock(&ch->tx_lock_lhb2);
2384 while (!list_empty(&ch->tx_queue)) {
2385 pkt = list_first_entry(&ch->tx_queue, struct smux_pkt_t,
2386 list);
2387 list_del(&pkt->list);
2388
2389 if (pkt->hdr.cmd == SMUX_CMD_OPEN_LCH) {
2390 /* Open was never sent, just force to closed state */
2391 union notifier_metadata meta_disconnected;
2392
2393 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
2394 meta_disconnected.disconnected.is_ssr = 0;
2395 schedule_notify(lcid, SMUX_DISCONNECTED,
2396 &meta_disconnected);
2397 } else if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2398 /* Notify client of failed write */
2399 union notifier_metadata meta_write;
2400
2401 meta_write.write.pkt_priv = pkt->priv;
2402 meta_write.write.buffer = pkt->payload;
2403 meta_write.write.len = pkt->hdr.payload_len;
2404 schedule_notify(ch->lcid, SMUX_WRITE_FAIL, &meta_write);
2405 }
2406 smux_free_pkt(pkt);
2407 }
2408 spin_unlock(&ch->tx_lock_lhb2);
2409
2410 /* Send Close Command */
2411 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
2412 ch->local_state == SMUX_LCH_LOCAL_OPENING) {
2413 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
2414 ch->local_state,
2415 SMUX_LCH_LOCAL_CLOSING);
2416
2417 ch->local_state = SMUX_LCH_LOCAL_CLOSING;
2418 pkt = smux_alloc_pkt();
2419 if (pkt) {
2420 pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
2421 pkt->hdr.flags = 0;
2422 pkt->hdr.lcid = lcid;
2423 pkt->hdr.payload_len = 0;
2424 pkt->hdr.pad_len = 0;
2425 smux_tx_queue(pkt, ch, 0);
2426 tx_ready = 1;
2427 } else {
2428 pr_err("%s: pkt allocation failed\n", __func__);
2429 ret = -ENOMEM;
2430 }
2431 }
2432 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2433
2434 if (tx_ready)
2435 list_channel(ch);
2436
2437 return ret;
2438}
2439
2440/**
2441 * Write data to a logical channel.
2442 *
2443 * @lcid Logical channel ID
2444 * @pkt_priv Client data that will be returned with the SMUX_WRITE_DONE or
2445 * SMUX_WRITE_FAIL notification.
2446 * @data Data to write
2447 * @len Length of @data
2448 *
2449 * @returns 0 for success, <0 otherwise
2450 *
2451 * Data may be written immediately after msm_smux_open() is called,
2452 * but the data will wait in the transmit queue until the channel has
2453 * been fully opened.
2454 *
2455 * Once the data has been written, the client will receive either a completion
2456 * (SMUX_WRITE_DONE) or a failure notice (SMUX_WRITE_FAIL).
2457 */
2458int msm_smux_write(uint8_t lcid, void *pkt_priv, const void *data, int len)
2459{
2460 struct smux_lch_t *ch;
2461 struct smux_pkt_t *pkt;
2462 int tx_ready = 0;
2463 unsigned long flags;
2464 int ret;
2465
2466 if (smux_assert_lch_id(lcid))
2467 return -ENXIO;
2468
2469 ch = &smux_lch[lcid];
2470 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2471
2472 if (ch->local_state != SMUX_LCH_LOCAL_OPENED &&
2473 ch->local_state != SMUX_LCH_LOCAL_OPENING) {
2474 pr_err("%s: hdr.invalid local state %d channel %d\n",
2475 __func__, ch->local_state, lcid);
2476 ret = -EINVAL;
2477 goto out;
2478 }
2479
2480 if (len > SMUX_MAX_PKT_SIZE - sizeof(struct smux_hdr_t)) {
2481 pr_err("%s: payload %d too large\n",
2482 __func__, len);
2483 ret = -E2BIG;
2484 goto out;
2485 }
2486
2487 pkt = smux_alloc_pkt();
2488 if (!pkt) {
2489 ret = -ENOMEM;
2490 goto out;
2491 }
2492
2493 pkt->hdr.cmd = SMUX_CMD_DATA;
2494 pkt->hdr.lcid = lcid;
2495 pkt->hdr.flags = 0;
2496 pkt->hdr.payload_len = len;
2497 pkt->payload = (void *)data;
2498 pkt->priv = pkt_priv;
2499 pkt->hdr.pad_len = 0;
2500
2501 spin_lock(&ch->tx_lock_lhb2);
2502 /* verify high watermark */
2503 SMUX_DBG("%s: pending %d", __func__, ch->tx_pending_data_cnt);
2504
2505 if (ch->tx_pending_data_cnt >= SMUX_WM_HIGH) {
2506 pr_err("%s: ch %d high watermark %d exceeded %d\n",
2507 __func__, lcid, SMUX_WM_HIGH,
2508 ch->tx_pending_data_cnt);
2509 ret = -EAGAIN;
2510 goto out_inner;
2511 }
2512
2513 /* queue packet for transmit */
2514 if (++ch->tx_pending_data_cnt == SMUX_WM_HIGH) {
2515 ch->notify_lwm = 1;
2516 pr_err("%s: high watermark hit\n", __func__);
2517 schedule_notify(lcid, SMUX_HIGH_WM_HIT, NULL);
2518 }
2519 list_add_tail(&pkt->list, &ch->tx_queue);
2520
2521 /* add to ready list */
2522 if (IS_FULLY_OPENED(ch))
2523 tx_ready = 1;
2524
2525 ret = 0;
2526
2527out_inner:
2528 spin_unlock(&ch->tx_lock_lhb2);
2529
2530out:
2531 if (ret)
2532 smux_free_pkt(pkt);
2533 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2534
2535 if (tx_ready)
2536 list_channel(ch);
2537
2538 return ret;
2539}
2540
2541/**
2542 * Returns true if the TX queue is currently full (high water mark).
2543 *
2544 * @lcid Logical channel ID
2545 * @returns 0 if channel is not full
2546 * 1 if it is full
2547 * < 0 for error
2548 */
2549int msm_smux_is_ch_full(uint8_t lcid)
2550{
2551 struct smux_lch_t *ch;
2552 unsigned long flags;
2553 int is_full = 0;
2554
2555 if (smux_assert_lch_id(lcid))
2556 return -ENXIO;
2557
2558 ch = &smux_lch[lcid];
2559
2560 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
2561 if (ch->tx_pending_data_cnt >= SMUX_WM_HIGH)
2562 is_full = 1;
2563 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
2564
2565 return is_full;
2566}
2567
2568/**
2569 * Returns true if the TX queue has space for more packets it is at or
2570 * below the low water mark).
2571 *
2572 * @lcid Logical channel ID
2573 * @returns 0 if channel is above low watermark
2574 * 1 if it's at or below the low watermark
2575 * < 0 for error
2576 */
2577int msm_smux_is_ch_low(uint8_t lcid)
2578{
2579 struct smux_lch_t *ch;
2580 unsigned long flags;
2581 int is_low = 0;
2582
2583 if (smux_assert_lch_id(lcid))
2584 return -ENXIO;
2585
2586 ch = &smux_lch[lcid];
2587
2588 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
2589 if (ch->tx_pending_data_cnt <= SMUX_WM_LOW)
2590 is_low = 1;
2591 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
2592
2593 return is_low;
2594}
2595
2596/**
2597 * Send TIOCM status update.
2598 *
2599 * @ch Channel for update
2600 *
2601 * @returns 0 for success, <0 for failure
2602 *
2603 * Channel lock must be held before calling.
2604 */
2605static int smux_send_status_cmd(struct smux_lch_t *ch)
2606{
2607 struct smux_pkt_t *pkt;
2608
2609 if (!ch)
2610 return -EINVAL;
2611
2612 pkt = smux_alloc_pkt();
2613 if (!pkt)
2614 return -ENOMEM;
2615
2616 pkt->hdr.lcid = ch->lcid;
2617 pkt->hdr.cmd = SMUX_CMD_STATUS;
2618 pkt->hdr.flags = ch->local_tiocm;
2619 pkt->hdr.payload_len = 0;
2620 pkt->hdr.pad_len = 0;
2621 smux_tx_queue(pkt, ch, 0);
2622
2623 return 0;
2624}
2625
2626/**
2627 * Internal helper function for getting the TIOCM status with
2628 * state_lock_lhb1 already locked.
2629 *
2630 * @ch Channel pointer
2631 *
2632 * @returns TIOCM status
2633 */
2634static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch)
2635{
2636 long status = 0x0;
2637
2638 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DSR : 0;
2639 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_CTS : 0;
2640 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RI) ? TIOCM_RI : 0;
2641 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_DCD) ? TIOCM_CD : 0;
2642
2643 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DTR : 0;
2644 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_RTS : 0;
2645
2646 return status;
2647}
2648
2649/**
2650 * Get the TIOCM status bits.
2651 *
2652 * @lcid Logical channel ID
2653 *
2654 * @returns >= 0 TIOCM status bits
2655 * < 0 Error condition
2656 */
2657long msm_smux_tiocm_get(uint8_t lcid)
2658{
2659 struct smux_lch_t *ch;
2660 unsigned long flags;
2661 long status = 0x0;
2662
2663 if (smux_assert_lch_id(lcid))
2664 return -ENXIO;
2665
2666 ch = &smux_lch[lcid];
2667 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2668 status = msm_smux_tiocm_get_atomic(ch);
2669 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2670
2671 return status;
2672}
2673
2674/**
2675 * Set/clear the TIOCM status bits.
2676 *
2677 * @lcid Logical channel ID
2678 * @set Bits to set
2679 * @clear Bits to clear
2680 *
2681 * @returns 0 for success; < 0 for failure
2682 *
2683 * If a bit is specified in both the @set and @clear masks, then the clear bit
2684 * definition will dominate and the bit will be cleared.
2685 */
2686int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear)
2687{
2688 struct smux_lch_t *ch;
2689 unsigned long flags;
2690 uint8_t old_status;
2691 uint8_t status_set = 0x0;
2692 uint8_t status_clear = 0x0;
2693 int tx_ready = 0;
2694 int ret = 0;
2695
2696 if (smux_assert_lch_id(lcid))
2697 return -ENXIO;
2698
2699 ch = &smux_lch[lcid];
2700 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2701
2702 status_set |= (set & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
2703 status_set |= (set & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
2704 status_set |= (set & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
2705 status_set |= (set & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
2706
2707 status_clear |= (clear & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
2708 status_clear |= (clear & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
2709 status_clear |= (clear & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
2710 status_clear |= (clear & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
2711
2712 old_status = ch->local_tiocm;
2713 ch->local_tiocm |= status_set;
2714 ch->local_tiocm &= ~status_clear;
2715
2716 if (ch->local_tiocm != old_status) {
2717 ret = smux_send_status_cmd(ch);
2718 tx_ready = 1;
2719 }
2720 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2721
2722 if (tx_ready)
2723 list_channel(ch);
2724
2725 return ret;
2726}
2727
2728/**********************************************************************/
2729/* Line Discipline Interface */
2730/**********************************************************************/
2731static int smuxld_open(struct tty_struct *tty)
2732{
2733 int i;
2734 int tmp;
2735 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002736
2737 if (!smux.is_initialized)
2738 return -ENODEV;
2739
2740 spin_lock_irqsave(&smux.lock_lha0, flags);
2741 if (smux.ld_open_count) {
2742 pr_err("%s: %p multiple instances not supported\n",
2743 __func__, tty);
Eric Holmberg902c51e2012-05-29 12:12:16 -06002744 spin_unlock_irqrestore(&smux.lock_lha0, flags);
2745 return -EEXIST;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002746 }
2747
2748 ++smux.ld_open_count;
2749 if (tty->ops->write == NULL) {
Eric Holmberg902c51e2012-05-29 12:12:16 -06002750 spin_unlock_irqrestore(&smux.lock_lha0, flags);
2751 return -EINVAL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002752 }
2753
2754 /* connect to TTY */
2755 smux.tty = tty;
2756 tty->disc_data = &smux;
2757 tty->receive_room = TTY_RECEIVE_ROOM;
2758 tty_driver_flush_buffer(tty);
2759
2760 /* power-down the UART if we are idle */
2761 spin_lock(&smux.tx_lock_lha2);
2762 if (smux.power_state == SMUX_PWR_OFF) {
2763 SMUX_DBG("%s: powering off uart\n", __func__);
2764 smux.power_state = SMUX_PWR_OFF_FLUSH;
2765 spin_unlock(&smux.tx_lock_lha2);
2766 queue_work(smux_tx_wq, &smux_inactivity_work);
2767 } else {
2768 spin_unlock(&smux.tx_lock_lha2);
2769 }
Eric Holmberg902c51e2012-05-29 12:12:16 -06002770 spin_unlock_irqrestore(&smux.lock_lha0, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002771
2772 /* register platform devices */
2773 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
2774 tmp = platform_device_register(&smux_devs[i]);
2775 if (tmp)
2776 pr_err("%s: error %d registering device %s\n",
2777 __func__, tmp, smux_devs[i].name);
2778 }
Eric Holmberg902c51e2012-05-29 12:12:16 -06002779 return 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002780}
2781
2782static void smuxld_close(struct tty_struct *tty)
2783{
2784 unsigned long flags;
2785 int i;
2786
2787 spin_lock_irqsave(&smux.lock_lha0, flags);
2788 if (smux.ld_open_count <= 0) {
2789 pr_err("%s: invalid ld count %d\n", __func__,
2790 smux.ld_open_count);
Eric Holmberg902c51e2012-05-29 12:12:16 -06002791 spin_unlock_irqrestore(&smux.lock_lha0, flags);
2792 return;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002793 }
Eric Holmberg902c51e2012-05-29 12:12:16 -06002794 spin_unlock_irqrestore(&smux.lock_lha0, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002795
2796 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i)
2797 platform_device_unregister(&smux_devs[i]);
2798
2799 --smux.ld_open_count;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002800}
2801
2802/**
2803 * Receive data from TTY Line Discipline.
2804 *
2805 * @tty TTY structure
2806 * @cp Character data
2807 * @fp Flag data
2808 * @count Size of character and flag data
2809 */
2810void smuxld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
2811 char *fp, int count)
2812{
2813 int i;
2814 int last_idx = 0;
2815 const char *tty_name = NULL;
2816 char *f;
2817
2818 if (smux_debug_mask & MSM_SMUX_DEBUG)
2819 print_hex_dump(KERN_INFO, "smux tty rx: ", DUMP_PREFIX_OFFSET,
2820 16, 1, cp, count, true);
2821
2822 /* verify error flags */
2823 for (i = 0, f = fp; i < count; ++i, ++f) {
2824 if (*f != TTY_NORMAL) {
2825 if (tty)
2826 tty_name = tty->name;
2827 pr_err("%s: TTY %s Error %d (%s)\n", __func__,
2828 tty_name, *f, tty_flag_to_str(*f));
2829
2830 /* feed all previous valid data to the parser */
2831 smux_rx_state_machine(cp + last_idx, i - last_idx,
2832 TTY_NORMAL);
2833
2834 /* feed bad data to parser */
2835 smux_rx_state_machine(cp + i, 1, *f);
2836 last_idx = i + 1;
2837 }
2838 }
2839
2840 /* feed data to RX state machine */
2841 smux_rx_state_machine(cp + last_idx, count - last_idx, TTY_NORMAL);
2842}
2843
2844static void smuxld_flush_buffer(struct tty_struct *tty)
2845{
2846 pr_err("%s: not supported\n", __func__);
2847}
2848
2849static ssize_t smuxld_chars_in_buffer(struct tty_struct *tty)
2850{
2851 pr_err("%s: not supported\n", __func__);
2852 return -ENODEV;
2853}
2854
2855static ssize_t smuxld_read(struct tty_struct *tty, struct file *file,
2856 unsigned char __user *buf, size_t nr)
2857{
2858 pr_err("%s: not supported\n", __func__);
2859 return -ENODEV;
2860}
2861
2862static ssize_t smuxld_write(struct tty_struct *tty, struct file *file,
2863 const unsigned char *buf, size_t nr)
2864{
2865 pr_err("%s: not supported\n", __func__);
2866 return -ENODEV;
2867}
2868
2869static int smuxld_ioctl(struct tty_struct *tty, struct file *file,
2870 unsigned int cmd, unsigned long arg)
2871{
2872 pr_err("%s: not supported\n", __func__);
2873 return -ENODEV;
2874}
2875
2876static unsigned int smuxld_poll(struct tty_struct *tty, struct file *file,
2877 struct poll_table_struct *tbl)
2878{
2879 pr_err("%s: not supported\n", __func__);
2880 return -ENODEV;
2881}
2882
2883static void smuxld_write_wakeup(struct tty_struct *tty)
2884{
2885 pr_err("%s: not supported\n", __func__);
2886}
2887
2888static struct tty_ldisc_ops smux_ldisc_ops = {
2889 .owner = THIS_MODULE,
2890 .magic = TTY_LDISC_MAGIC,
2891 .name = "n_smux",
2892 .open = smuxld_open,
2893 .close = smuxld_close,
2894 .flush_buffer = smuxld_flush_buffer,
2895 .chars_in_buffer = smuxld_chars_in_buffer,
2896 .read = smuxld_read,
2897 .write = smuxld_write,
2898 .ioctl = smuxld_ioctl,
2899 .poll = smuxld_poll,
2900 .receive_buf = smuxld_receive_buf,
2901 .write_wakeup = smuxld_write_wakeup
2902};
2903
2904static int __init smux_init(void)
2905{
2906 int ret;
2907
2908 spin_lock_init(&smux.lock_lha0);
2909
2910 spin_lock_init(&smux.rx_lock_lha1);
2911 smux.rx_state = SMUX_RX_IDLE;
2912 smux.power_state = SMUX_PWR_OFF;
2913 smux.pwr_wakeup_delay_us = 1;
2914 smux.powerdown_enabled = 0;
2915 smux.rx_activity_flag = 0;
2916 smux.tx_activity_flag = 0;
2917 smux.recv_len = 0;
2918 smux.tty = NULL;
2919 smux.ld_open_count = 0;
2920 smux.in_reset = 0;
2921 smux.is_initialized = 1;
2922 smux_byte_loopback = 0;
2923
2924 spin_lock_init(&smux.tx_lock_lha2);
2925 INIT_LIST_HEAD(&smux.lch_tx_ready_list);
2926
2927 ret = tty_register_ldisc(N_SMUX, &smux_ldisc_ops);
2928 if (ret != 0) {
2929 pr_err("%s: error %d registering line discipline\n",
2930 __func__, ret);
2931 return ret;
2932 }
2933
2934 ret = lch_init();
2935 if (ret != 0) {
2936 pr_err("%s: lch_init failed\n", __func__);
2937 return ret;
2938 }
2939
2940 return 0;
2941}
2942
2943static void __exit smux_exit(void)
2944{
2945 int ret;
2946
2947 ret = tty_unregister_ldisc(N_SMUX);
2948 if (ret != 0) {
2949 pr_err("%s error %d unregistering line discipline\n",
2950 __func__, ret);
2951 return;
2952 }
2953}
2954
2955module_init(smux_init);
2956module_exit(smux_exit);
2957
2958MODULE_DESCRIPTION("Serial Mux TTY Line Discipline");
2959MODULE_LICENSE("GPL v2");
2960MODULE_ALIAS_LDISC(N_SMUX);