blob: 3b7ab3338b917e01a642600975dccf0585511aa1 [file] [log] [blame]
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001/* drivers/tty/n_smux.c
2 *
3 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/errno.h>
18#include <linux/tty.h>
19#include <linux/tty_flip.h>
20#include <linux/tty_driver.h>
21#include <linux/smux.h>
22#include <linux/list.h>
23#include <linux/kfifo.h>
24#include <linux/slab.h>
25#include <linux/types.h>
26#include <linux/platform_device.h>
27#include <linux/delay.h>
Eric Holmberged1f00c2012-06-07 09:45:18 -060028#include <mach/subsystem_notif.h>
29#include <mach/subsystem_restart.h>
Eric Holmberg8ed30f22012-05-10 19:16:51 -060030#include <mach/msm_serial_hs.h>
31#include "smux_private.h"
32#include "smux_loopback.h"
33
34#define SMUX_NOTIFY_FIFO_SIZE 128
35#define SMUX_TX_QUEUE_SIZE 256
Eric Holmberg8ed30f22012-05-10 19:16:51 -060036#define SMUX_PKT_LOG_SIZE 80
37
38/* Maximum size we can accept in a single RX buffer */
39#define TTY_RECEIVE_ROOM 65536
40#define TTY_BUFFER_FULL_WAIT_MS 50
41
42/* maximum sleep time between wakeup attempts */
43#define SMUX_WAKEUP_DELAY_MAX (1 << 20)
44
45/* minimum delay for scheduling delayed work */
46#define SMUX_WAKEUP_DELAY_MIN (1 << 15)
47
48/* inactivity timeout for no rx/tx activity */
Eric Holmberg05620172012-07-03 11:13:18 -060049#define SMUX_INACTIVITY_TIMEOUT_MS 1000000
Eric Holmberg8ed30f22012-05-10 19:16:51 -060050
Eric Holmbergb8435c82012-06-05 14:51:29 -060051/* RX get_rx_buffer retry timeout values */
52#define SMUX_RX_RETRY_MIN_MS (1 << 0) /* 1 ms */
53#define SMUX_RX_RETRY_MAX_MS (1 << 10) /* 1024 ms */
54
Eric Holmberg8ed30f22012-05-10 19:16:51 -060055enum {
56 MSM_SMUX_DEBUG = 1U << 0,
57 MSM_SMUX_INFO = 1U << 1,
58 MSM_SMUX_POWER_INFO = 1U << 2,
59 MSM_SMUX_PKT = 1U << 3,
60};
61
62static int smux_debug_mask;
63module_param_named(debug_mask, smux_debug_mask,
64 int, S_IRUGO | S_IWUSR | S_IWGRP);
65
66/* Simulated wakeup used for testing */
67int smux_byte_loopback;
68module_param_named(byte_loopback, smux_byte_loopback,
69 int, S_IRUGO | S_IWUSR | S_IWGRP);
70int smux_simulate_wakeup_delay = 1;
71module_param_named(simulate_wakeup_delay, smux_simulate_wakeup_delay,
72 int, S_IRUGO | S_IWUSR | S_IWGRP);
73
74#define SMUX_DBG(x...) do { \
75 if (smux_debug_mask & MSM_SMUX_DEBUG) \
76 pr_info(x); \
77} while (0)
78
Eric Holmbergff0b0112012-06-08 15:06:57 -060079#define SMUX_PWR(x...) do { \
80 if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
81 pr_info(x); \
82} while (0)
83
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -060084#define SMUX_PWR_PKT_RX(pkt) do { \
85 if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
86 smux_log_pkt(pkt, 1); \
87} while (0)
88
89#define SMUX_PWR_PKT_TX(pkt) do { \
90 if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
91 if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
92 pkt->hdr.flags == SMUX_WAKEUP_ACK) \
93 pr_info("smux: TX Wakeup ACK\n"); \
94 else if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
95 pkt->hdr.flags == SMUX_WAKEUP_REQ) \
96 pr_info("smux: TX Wakeup REQ\n"); \
97 else \
98 smux_log_pkt(pkt, 0); \
99 } \
100} while (0)
101
102#define SMUX_PWR_BYTE_TX(pkt) do { \
103 if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
104 smux_log_pkt(pkt, 0); \
105 } \
106} while (0)
107
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600108#define SMUX_LOG_PKT_RX(pkt) do { \
109 if (smux_debug_mask & MSM_SMUX_PKT) \
110 smux_log_pkt(pkt, 1); \
111} while (0)
112
113#define SMUX_LOG_PKT_TX(pkt) do { \
114 if (smux_debug_mask & MSM_SMUX_PKT) \
115 smux_log_pkt(pkt, 0); \
116} while (0)
117
118/**
119 * Return true if channel is fully opened (both
120 * local and remote sides are in the OPENED state).
121 */
122#define IS_FULLY_OPENED(ch) \
123 (ch && (ch)->local_state == SMUX_LCH_LOCAL_OPENED \
124 && (ch)->remote_state == SMUX_LCH_REMOTE_OPENED)
125
126static struct platform_device smux_devs[] = {
127 {.name = "SMUX_CTL", .id = -1},
128 {.name = "SMUX_RMNET", .id = -1},
129 {.name = "SMUX_DUN_DATA_HSUART", .id = 0},
130 {.name = "SMUX_RMNET_DATA_HSUART", .id = 1},
131 {.name = "SMUX_RMNET_CTL_HSUART", .id = 0},
132 {.name = "SMUX_DIAG", .id = -1},
133};
134
135enum {
136 SMUX_CMD_STATUS_RTC = 1 << 0,
137 SMUX_CMD_STATUS_RTR = 1 << 1,
138 SMUX_CMD_STATUS_RI = 1 << 2,
139 SMUX_CMD_STATUS_DCD = 1 << 3,
140 SMUX_CMD_STATUS_FLOW_CNTL = 1 << 4,
141};
142
143/* Channel mode */
144enum {
145 SMUX_LCH_MODE_NORMAL,
146 SMUX_LCH_MODE_LOCAL_LOOPBACK,
147 SMUX_LCH_MODE_REMOTE_LOOPBACK,
148};
149
150enum {
151 SMUX_RX_IDLE,
152 SMUX_RX_MAGIC,
153 SMUX_RX_HDR,
154 SMUX_RX_PAYLOAD,
155 SMUX_RX_FAILURE,
156};
157
158/**
159 * Power states.
160 *
161 * The _FLUSH states are internal transitional states and are not part of the
162 * official state machine.
163 */
164enum {
165 SMUX_PWR_OFF,
166 SMUX_PWR_TURNING_ON,
167 SMUX_PWR_ON,
168 SMUX_PWR_TURNING_OFF_FLUSH,
169 SMUX_PWR_TURNING_OFF,
170 SMUX_PWR_OFF_FLUSH,
171};
172
173/**
174 * Logical Channel Structure. One instance per channel.
175 *
176 * Locking Hierarchy
177 * Each lock has a postfix that describes the locking level. If multiple locks
178 * are required, only increasing lock hierarchy numbers may be locked which
179 * ensures avoiding a deadlock.
180 *
181 * Locking Example
182 * If state_lock_lhb1 is currently held and the TX list needs to be
183 * manipulated, then tx_lock_lhb2 may be locked since it's locking hierarchy
184 * is greater. However, if tx_lock_lhb2 is held, then state_lock_lhb1 may
185 * not be acquired since it would result in a deadlock.
186 *
187 * Note that the Line Discipline locks (*_lha) should always be acquired
188 * before the logical channel locks.
189 */
190struct smux_lch_t {
191 /* channel state */
192 spinlock_t state_lock_lhb1;
193 uint8_t lcid;
194 unsigned local_state;
195 unsigned local_mode;
196 uint8_t local_tiocm;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600197 unsigned options;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600198
199 unsigned remote_state;
200 unsigned remote_mode;
201 uint8_t remote_tiocm;
202
203 int tx_flow_control;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600204 int rx_flow_control_auto;
205 int rx_flow_control_client;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600206
207 /* client callbacks and private data */
208 void *priv;
209 void (*notify)(void *priv, int event_type, const void *metadata);
210 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
211 int size);
212
Eric Holmbergb8435c82012-06-05 14:51:29 -0600213 /* RX Info */
214 struct list_head rx_retry_queue;
215 unsigned rx_retry_queue_cnt;
216 struct delayed_work rx_retry_work;
217
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600218 /* TX Info */
219 spinlock_t tx_lock_lhb2;
220 struct list_head tx_queue;
221 struct list_head tx_ready_list;
222 unsigned tx_pending_data_cnt;
223 unsigned notify_lwm;
224};
225
226union notifier_metadata {
227 struct smux_meta_disconnected disconnected;
228 struct smux_meta_read read;
229 struct smux_meta_write write;
230 struct smux_meta_tiocm tiocm;
231};
232
233struct smux_notify_handle {
234 void (*notify)(void *priv, int event_type, const void *metadata);
235 void *priv;
236 int event_type;
237 union notifier_metadata *metadata;
238};
239
240/**
Eric Holmbergb8435c82012-06-05 14:51:29 -0600241 * Get RX Buffer Retry structure.
242 *
243 * This is used for clients that are unable to provide an RX buffer
244 * immediately. This temporary structure will be used to temporarily hold the
245 * data and perform a retry.
246 */
247struct smux_rx_pkt_retry {
248 struct smux_pkt_t *pkt;
249 struct list_head rx_retry_list;
250 unsigned timeout_in_ms;
251};
252
253/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600254 * Receive worker data structure.
255 *
256 * One instance is created for every call to smux_rx_state_machine.
257 */
258struct smux_rx_worker_data {
259 const unsigned char *data;
260 int len;
261 int flag;
262
263 struct work_struct work;
264 struct completion work_complete;
265};
266
267/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600268 * Line discipline and module structure.
269 *
270 * Only one instance since multiple instances of line discipline are not
271 * allowed.
272 */
273struct smux_ldisc_t {
Eric Holmberged1f00c2012-06-07 09:45:18 -0600274 struct mutex mutex_lha0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600275
276 int is_initialized;
277 int in_reset;
278 int ld_open_count;
279 struct tty_struct *tty;
280
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600281 /* RX State Machine (singled-threaded access by smux_rx_wq) */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600282 unsigned char recv_buf[SMUX_MAX_PKT_SIZE];
283 unsigned int recv_len;
284 unsigned int pkt_remain;
285 unsigned rx_state;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600286
287 /* RX Activity - accessed by multiple threads */
288 spinlock_t rx_lock_lha1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600289 unsigned rx_activity_flag;
290
291 /* TX / Power */
292 spinlock_t tx_lock_lha2;
293 struct list_head lch_tx_ready_list;
294 unsigned power_state;
295 unsigned pwr_wakeup_delay_us;
296 unsigned tx_activity_flag;
297 unsigned powerdown_enabled;
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600298 struct list_head power_queue;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600299};
300
301
302/* data structures */
303static struct smux_lch_t smux_lch[SMUX_NUM_LOGICAL_CHANNELS];
304static struct smux_ldisc_t smux;
305static const char *tty_error_type[] = {
306 [TTY_NORMAL] = "normal",
307 [TTY_OVERRUN] = "overrun",
308 [TTY_BREAK] = "break",
309 [TTY_PARITY] = "parity",
310 [TTY_FRAME] = "framing",
311};
312
313static const char *smux_cmds[] = {
314 [SMUX_CMD_DATA] = "DATA",
315 [SMUX_CMD_OPEN_LCH] = "OPEN",
316 [SMUX_CMD_CLOSE_LCH] = "CLOSE",
317 [SMUX_CMD_STATUS] = "STATUS",
318 [SMUX_CMD_PWR_CTL] = "PWR",
319 [SMUX_CMD_BYTE] = "Raw Byte",
320};
321
322static void smux_notify_local_fn(struct work_struct *work);
323static DECLARE_WORK(smux_notify_local, smux_notify_local_fn);
324
325static struct workqueue_struct *smux_notify_wq;
326static size_t handle_size;
327static struct kfifo smux_notify_fifo;
328static int queued_fifo_notifications;
329static DEFINE_SPINLOCK(notify_lock_lhc1);
330
331static struct workqueue_struct *smux_tx_wq;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600332static struct workqueue_struct *smux_rx_wq;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600333static void smux_tx_worker(struct work_struct *work);
334static DECLARE_WORK(smux_tx_work, smux_tx_worker);
335
336static void smux_wakeup_worker(struct work_struct *work);
Eric Holmbergb8435c82012-06-05 14:51:29 -0600337static void smux_rx_retry_worker(struct work_struct *work);
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600338static void smux_rx_worker(struct work_struct *work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600339static DECLARE_WORK(smux_wakeup_work, smux_wakeup_worker);
340static DECLARE_DELAYED_WORK(smux_wakeup_delayed_work, smux_wakeup_worker);
341
342static void smux_inactivity_worker(struct work_struct *work);
343static DECLARE_WORK(smux_inactivity_work, smux_inactivity_worker);
344static DECLARE_DELAYED_WORK(smux_delayed_inactivity_work,
345 smux_inactivity_worker);
346
347static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch);
348static void list_channel(struct smux_lch_t *ch);
349static int smux_send_status_cmd(struct smux_lch_t *ch);
350static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600351static void smux_flush_tty(void);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600352static void smux_purge_ch_tx_queue(struct smux_lch_t *ch);
353static int schedule_notify(uint8_t lcid, int event,
354 const union notifier_metadata *metadata);
355static int ssr_notifier_cb(struct notifier_block *this,
356 unsigned long code,
357 void *data);
Eric Holmberg92a67df2012-06-25 13:56:24 -0600358static void smux_uart_power_on_atomic(void);
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600359static int smux_rx_flow_control_updated(struct smux_lch_t *ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600360
361/**
362 * Convert TTY Error Flags to string for logging purposes.
363 *
364 * @flag TTY_* flag
365 * @returns String description or NULL if unknown
366 */
367static const char *tty_flag_to_str(unsigned flag)
368{
369 if (flag < ARRAY_SIZE(tty_error_type))
370 return tty_error_type[flag];
371 return NULL;
372}
373
374/**
375 * Convert SMUX Command to string for logging purposes.
376 *
377 * @cmd SMUX command
378 * @returns String description or NULL if unknown
379 */
380static const char *cmd_to_str(unsigned cmd)
381{
382 if (cmd < ARRAY_SIZE(smux_cmds))
383 return smux_cmds[cmd];
384 return NULL;
385}
386
387/**
388 * Set the reset state due to an unrecoverable failure.
389 */
390static void smux_enter_reset(void)
391{
392 pr_err("%s: unrecoverable failure, waiting for ssr\n", __func__);
393 smux.in_reset = 1;
394}
395
396static int lch_init(void)
397{
398 unsigned int id;
399 struct smux_lch_t *ch;
400 int i = 0;
401
402 handle_size = sizeof(struct smux_notify_handle *);
403
404 smux_notify_wq = create_singlethread_workqueue("smux_notify_wq");
405 smux_tx_wq = create_singlethread_workqueue("smux_tx_wq");
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600406 smux_rx_wq = create_singlethread_workqueue("smux_rx_wq");
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600407
408 if (IS_ERR(smux_notify_wq) || IS_ERR(smux_tx_wq)) {
409 SMUX_DBG("%s: create_singlethread_workqueue ENOMEM\n",
410 __func__);
411 return -ENOMEM;
412 }
413
414 i |= kfifo_alloc(&smux_notify_fifo,
415 SMUX_NOTIFY_FIFO_SIZE * handle_size,
416 GFP_KERNEL);
417 i |= smux_loopback_init();
418
419 if (i) {
420 pr_err("%s: out of memory error\n", __func__);
421 return -ENOMEM;
422 }
423
424 for (id = 0 ; id < SMUX_NUM_LOGICAL_CHANNELS; id++) {
425 ch = &smux_lch[id];
426
427 spin_lock_init(&ch->state_lock_lhb1);
428 ch->lcid = id;
429 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
430 ch->local_mode = SMUX_LCH_MODE_NORMAL;
431 ch->local_tiocm = 0x0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600432 ch->options = SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600433 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
434 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
435 ch->remote_tiocm = 0x0;
436 ch->tx_flow_control = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600437 ch->rx_flow_control_auto = 0;
438 ch->rx_flow_control_client = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600439 ch->priv = 0;
440 ch->notify = 0;
441 ch->get_rx_buffer = 0;
442
Eric Holmbergb8435c82012-06-05 14:51:29 -0600443 INIT_LIST_HEAD(&ch->rx_retry_queue);
444 ch->rx_retry_queue_cnt = 0;
445 INIT_DELAYED_WORK(&ch->rx_retry_work, smux_rx_retry_worker);
446
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600447 spin_lock_init(&ch->tx_lock_lhb2);
448 INIT_LIST_HEAD(&ch->tx_queue);
449 INIT_LIST_HEAD(&ch->tx_ready_list);
450 ch->tx_pending_data_cnt = 0;
451 ch->notify_lwm = 0;
452 }
453
454 return 0;
455}
456
Eric Holmberged1f00c2012-06-07 09:45:18 -0600457/**
458 * Empty and cleanup all SMUX logical channels for subsystem restart or line
459 * discipline disconnect.
460 */
461static void smux_lch_purge(void)
462{
463 struct smux_lch_t *ch;
464 unsigned long flags;
465 int i;
466
467 /* Empty TX ready list */
468 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
469 while (!list_empty(&smux.lch_tx_ready_list)) {
470 SMUX_DBG("%s: emptying ready list %p\n",
471 __func__, smux.lch_tx_ready_list.next);
472 ch = list_first_entry(&smux.lch_tx_ready_list,
473 struct smux_lch_t,
474 tx_ready_list);
475 list_del(&ch->tx_ready_list);
476 INIT_LIST_HEAD(&ch->tx_ready_list);
477 }
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600478
479 /* Purge Power Queue */
480 while (!list_empty(&smux.power_queue)) {
481 struct smux_pkt_t *pkt;
482
483 pkt = list_first_entry(&smux.power_queue,
484 struct smux_pkt_t,
485 list);
Eric Holmberg6b19f7f2012-06-15 09:53:52 -0600486 list_del(&pkt->list);
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600487 SMUX_DBG("%s: emptying power queue pkt=%p\n",
488 __func__, pkt);
489 smux_free_pkt(pkt);
490 }
Eric Holmberged1f00c2012-06-07 09:45:18 -0600491 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
492
493 /* Close all ports */
494 for (i = 0 ; i < SMUX_NUM_LOGICAL_CHANNELS; i++) {
495 ch = &smux_lch[i];
496 SMUX_DBG("%s: cleaning up lcid %d\n", __func__, i);
497
498 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
499
500 /* Purge TX queue */
501 spin_lock(&ch->tx_lock_lhb2);
502 smux_purge_ch_tx_queue(ch);
503 spin_unlock(&ch->tx_lock_lhb2);
504
505 /* Notify user of disconnect and reset channel state */
506 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
507 ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
508 union notifier_metadata meta;
509
510 meta.disconnected.is_ssr = smux.in_reset;
511 schedule_notify(ch->lcid, SMUX_DISCONNECTED, &meta);
512 }
513
514 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
515 ch->local_mode = SMUX_LCH_MODE_NORMAL;
516 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
517 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
518 ch->tx_flow_control = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600519 ch->rx_flow_control_auto = 0;
520 ch->rx_flow_control_client = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -0600521
522 /* Purge RX retry queue */
523 if (ch->rx_retry_queue_cnt)
524 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
525
526 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
527 }
528
529 /* Flush TX/RX workqueues */
530 SMUX_DBG("%s: flushing tx wq\n", __func__);
531 flush_workqueue(smux_tx_wq);
532 SMUX_DBG("%s: flushing rx wq\n", __func__);
533 flush_workqueue(smux_rx_wq);
534}
535
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600536int smux_assert_lch_id(uint32_t lcid)
537{
538 if (lcid >= SMUX_NUM_LOGICAL_CHANNELS)
539 return -ENXIO;
540 else
541 return 0;
542}
543
544/**
545 * Log packet information for debug purposes.
546 *
547 * @pkt Packet to log
548 * @is_recv 1 = RX packet; 0 = TX Packet
549 *
550 * [DIR][LCID] [LOCAL_STATE][LOCAL_MODE]:[REMOTE_STATE][REMOTE_MODE] PKT Info
551 *
552 * PKT Info:
553 * [CMD] flags [flags] len [PAYLOAD_LEN]:[PAD_LEN] [Payload hex bytes]
554 *
555 * Direction: R = Receive, S = Send
556 * Local State: C = Closed; c = closing; o = opening; O = Opened
557 * Local Mode: L = Local loopback; R = Remote loopback; N = Normal
558 * Remote State: C = Closed; O = Opened
559 * Remote Mode: R = Remote loopback; N = Normal
560 */
561static void smux_log_pkt(struct smux_pkt_t *pkt, int is_recv)
562{
563 char logbuf[SMUX_PKT_LOG_SIZE];
564 char cmd_extra[16];
565 int i = 0;
566 int count;
567 int len;
568 char local_state;
569 char local_mode;
570 char remote_state;
571 char remote_mode;
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600572 struct smux_lch_t *ch = NULL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600573 unsigned char *data;
574
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600575 if (!smux_assert_lch_id(pkt->hdr.lcid))
576 ch = &smux_lch[pkt->hdr.lcid];
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600577
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600578 if (ch) {
579 switch (ch->local_state) {
580 case SMUX_LCH_LOCAL_CLOSED:
581 local_state = 'C';
582 break;
583 case SMUX_LCH_LOCAL_OPENING:
584 local_state = 'o';
585 break;
586 case SMUX_LCH_LOCAL_OPENED:
587 local_state = 'O';
588 break;
589 case SMUX_LCH_LOCAL_CLOSING:
590 local_state = 'c';
591 break;
592 default:
593 local_state = 'U';
594 break;
595 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600596
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600597 switch (ch->local_mode) {
598 case SMUX_LCH_MODE_LOCAL_LOOPBACK:
599 local_mode = 'L';
600 break;
601 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
602 local_mode = 'R';
603 break;
604 case SMUX_LCH_MODE_NORMAL:
605 local_mode = 'N';
606 break;
607 default:
608 local_mode = 'U';
609 break;
610 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600611
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600612 switch (ch->remote_state) {
613 case SMUX_LCH_REMOTE_CLOSED:
614 remote_state = 'C';
615 break;
616 case SMUX_LCH_REMOTE_OPENED:
617 remote_state = 'O';
618 break;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600619
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600620 default:
621 remote_state = 'U';
622 break;
623 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600624
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600625 switch (ch->remote_mode) {
626 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
627 remote_mode = 'R';
628 break;
629 case SMUX_LCH_MODE_NORMAL:
630 remote_mode = 'N';
631 break;
632 default:
633 remote_mode = 'U';
634 break;
635 }
636 } else {
637 /* broadcast channel */
638 local_state = '-';
639 local_mode = '-';
640 remote_state = '-';
641 remote_mode = '-';
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600642 }
643
644 /* determine command type (ACK, etc) */
645 cmd_extra[0] = '\0';
646 switch (pkt->hdr.cmd) {
647 case SMUX_CMD_OPEN_LCH:
648 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
649 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
650 break;
651 case SMUX_CMD_CLOSE_LCH:
652 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
653 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
654 break;
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -0600655
656 case SMUX_CMD_PWR_CTL:
657 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK)
658 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
659 break;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600660 };
661
662 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
663 "smux: %c%d %c%c:%c%c %s%s flags %x len %d:%d ",
664 is_recv ? 'R' : 'S', pkt->hdr.lcid,
665 local_state, local_mode,
666 remote_state, remote_mode,
667 cmd_to_str(pkt->hdr.cmd), cmd_extra, pkt->hdr.flags,
668 pkt->hdr.payload_len, pkt->hdr.pad_len);
669
670 len = (pkt->hdr.payload_len > 16) ? 16 : pkt->hdr.payload_len;
671 data = (unsigned char *)pkt->payload;
672 for (count = 0; count < len; count++)
673 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
674 "%02x ", (unsigned)data[count]);
675
676 pr_info("%s\n", logbuf);
677}
678
679static void smux_notify_local_fn(struct work_struct *work)
680{
681 struct smux_notify_handle *notify_handle = NULL;
682 union notifier_metadata *metadata = NULL;
683 unsigned long flags;
684 int i;
685
686 for (;;) {
687 /* retrieve notification */
688 spin_lock_irqsave(&notify_lock_lhc1, flags);
689 if (kfifo_len(&smux_notify_fifo) >= handle_size) {
690 i = kfifo_out(&smux_notify_fifo,
691 &notify_handle,
692 handle_size);
693 if (i != handle_size) {
694 pr_err("%s: unable to retrieve handle %d expected %d\n",
695 __func__, i, handle_size);
696 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
697 break;
698 }
699 } else {
700 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
701 break;
702 }
703 --queued_fifo_notifications;
704 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
705
706 /* notify client */
707 metadata = notify_handle->metadata;
708 notify_handle->notify(notify_handle->priv,
709 notify_handle->event_type,
710 metadata);
711
712 kfree(metadata);
713 kfree(notify_handle);
714 }
715}
716
717/**
718 * Initialize existing packet.
719 */
720void smux_init_pkt(struct smux_pkt_t *pkt)
721{
722 memset(pkt, 0x0, sizeof(*pkt));
723 pkt->hdr.magic = SMUX_MAGIC;
724 INIT_LIST_HEAD(&pkt->list);
725}
726
727/**
728 * Allocate and initialize packet.
729 *
730 * If a payload is needed, either set it directly and ensure that it's freed or
731 * use smd_alloc_pkt_payload() to allocate a packet and it will be freed
732 * automatically when smd_free_pkt() is called.
733 */
734struct smux_pkt_t *smux_alloc_pkt(void)
735{
736 struct smux_pkt_t *pkt;
737
738 /* Consider a free list implementation instead of kmalloc */
739 pkt = kmalloc(sizeof(struct smux_pkt_t), GFP_ATOMIC);
740 if (!pkt) {
741 pr_err("%s: out of memory\n", __func__);
742 return NULL;
743 }
744 smux_init_pkt(pkt);
745 pkt->allocated = 1;
746
747 return pkt;
748}
749
750/**
751 * Free packet.
752 *
753 * @pkt Packet to free (may be NULL)
754 *
755 * If payload was allocated using smux_alloc_pkt_payload(), then it is freed as
756 * well. Otherwise, the caller is responsible for freeing the payload.
757 */
758void smux_free_pkt(struct smux_pkt_t *pkt)
759{
760 if (pkt) {
761 if (pkt->free_payload)
762 kfree(pkt->payload);
763 if (pkt->allocated)
764 kfree(pkt);
765 }
766}
767
768/**
769 * Allocate packet payload.
770 *
771 * @pkt Packet to add payload to
772 *
773 * @returns 0 on success, <0 upon error
774 *
775 * A flag is set to signal smux_free_pkt() to free the payload.
776 */
777int smux_alloc_pkt_payload(struct smux_pkt_t *pkt)
778{
779 if (!pkt)
780 return -EINVAL;
781
782 pkt->payload = kmalloc(pkt->hdr.payload_len, GFP_ATOMIC);
783 pkt->free_payload = 1;
784 if (!pkt->payload) {
785 pr_err("%s: unable to malloc %d bytes for payload\n",
786 __func__, pkt->hdr.payload_len);
787 return -ENOMEM;
788 }
789
790 return 0;
791}
792
793static int schedule_notify(uint8_t lcid, int event,
794 const union notifier_metadata *metadata)
795{
796 struct smux_notify_handle *notify_handle = 0;
797 union notifier_metadata *meta_copy = 0;
798 struct smux_lch_t *ch;
799 int i;
800 unsigned long flags;
801 int ret = 0;
802
803 ch = &smux_lch[lcid];
804 notify_handle = kzalloc(sizeof(struct smux_notify_handle),
805 GFP_ATOMIC);
806 if (!notify_handle) {
807 pr_err("%s: out of memory\n", __func__);
808 ret = -ENOMEM;
809 goto free_out;
810 }
811
812 notify_handle->notify = ch->notify;
813 notify_handle->priv = ch->priv;
814 notify_handle->event_type = event;
815 if (metadata) {
816 meta_copy = kzalloc(sizeof(union notifier_metadata),
817 GFP_ATOMIC);
818 if (!meta_copy) {
819 pr_err("%s: out of memory\n", __func__);
820 ret = -ENOMEM;
821 goto free_out;
822 }
823 *meta_copy = *metadata;
824 notify_handle->metadata = meta_copy;
825 } else {
826 notify_handle->metadata = NULL;
827 }
828
829 spin_lock_irqsave(&notify_lock_lhc1, flags);
830 i = kfifo_avail(&smux_notify_fifo);
831 if (i < handle_size) {
832 pr_err("%s: fifo full error %d expected %d\n",
833 __func__, i, handle_size);
834 ret = -ENOMEM;
835 goto unlock_out;
836 }
837
838 i = kfifo_in(&smux_notify_fifo, &notify_handle, handle_size);
839 if (i < 0 || i != handle_size) {
840 pr_err("%s: fifo not available error %d (expected %d)\n",
841 __func__, i, handle_size);
842 ret = -ENOSPC;
843 goto unlock_out;
844 }
845 ++queued_fifo_notifications;
846
847unlock_out:
848 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
849
850free_out:
851 queue_work(smux_notify_wq, &smux_notify_local);
852 if (ret < 0 && notify_handle) {
853 kfree(notify_handle->metadata);
854 kfree(notify_handle);
855 }
856 return ret;
857}
858
859/**
860 * Returns the serialized size of a packet.
861 *
862 * @pkt Packet to serialize
863 *
864 * @returns Serialized length of packet
865 */
866static unsigned int smux_serialize_size(struct smux_pkt_t *pkt)
867{
868 unsigned int size;
869
870 size = sizeof(struct smux_hdr_t);
871 size += pkt->hdr.payload_len;
872 size += pkt->hdr.pad_len;
873
874 return size;
875}
876
877/**
878 * Serialize packet @pkt into output buffer @data.
879 *
880 * @pkt Packet to serialize
881 * @out Destination buffer pointer
882 * @out_len Size of serialized packet
883 *
884 * @returns 0 for success
885 */
886int smux_serialize(struct smux_pkt_t *pkt, char *out,
887 unsigned int *out_len)
888{
889 char *data_start = out;
890
891 if (smux_serialize_size(pkt) > SMUX_MAX_PKT_SIZE) {
892 pr_err("%s: packet size %d too big\n",
893 __func__, smux_serialize_size(pkt));
894 return -E2BIG;
895 }
896
897 memcpy(out, &pkt->hdr, sizeof(struct smux_hdr_t));
898 out += sizeof(struct smux_hdr_t);
899 if (pkt->payload) {
900 memcpy(out, pkt->payload, pkt->hdr.payload_len);
901 out += pkt->hdr.payload_len;
902 }
903 if (pkt->hdr.pad_len) {
904 memset(out, 0x0, pkt->hdr.pad_len);
905 out += pkt->hdr.pad_len;
906 }
907 *out_len = out - data_start;
908 return 0;
909}
910
911/**
912 * Serialize header and provide pointer to the data.
913 *
914 * @pkt Packet
915 * @out[out] Pointer to the serialized header data
916 * @out_len[out] Pointer to the serialized header length
917 */
918static void smux_serialize_hdr(struct smux_pkt_t *pkt, char **out,
919 unsigned int *out_len)
920{
921 *out = (char *)&pkt->hdr;
922 *out_len = sizeof(struct smux_hdr_t);
923}
924
925/**
926 * Serialize payload and provide pointer to the data.
927 *
928 * @pkt Packet
929 * @out[out] Pointer to the serialized payload data
930 * @out_len[out] Pointer to the serialized payload length
931 */
932static void smux_serialize_payload(struct smux_pkt_t *pkt, char **out,
933 unsigned int *out_len)
934{
935 *out = pkt->payload;
936 *out_len = pkt->hdr.payload_len;
937}
938
939/**
940 * Serialize padding and provide pointer to the data.
941 *
942 * @pkt Packet
943 * @out[out] Pointer to the serialized padding (always NULL)
944 * @out_len[out] Pointer to the serialized payload length
945 *
946 * Since the padding field value is undefined, only the size of the patting
947 * (@out_len) is set and the buffer pointer (@out) will always be NULL.
948 */
949static void smux_serialize_padding(struct smux_pkt_t *pkt, char **out,
950 unsigned int *out_len)
951{
952 *out = NULL;
953 *out_len = pkt->hdr.pad_len;
954}
955
956/**
957 * Write data to TTY framework and handle breaking the writes up if needed.
958 *
959 * @data Data to write
960 * @len Length of data
961 *
962 * @returns 0 for success, < 0 for failure
963 */
964static int write_to_tty(char *data, unsigned len)
965{
966 int data_written;
967
968 if (!data)
969 return 0;
970
Eric Holmberged1f00c2012-06-07 09:45:18 -0600971 while (len > 0 && !smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600972 data_written = smux.tty->ops->write(smux.tty, data, len);
973 if (data_written >= 0) {
974 len -= data_written;
975 data += data_written;
976 } else {
977 pr_err("%s: TTY write returned error %d\n",
978 __func__, data_written);
979 return data_written;
980 }
981
982 if (len)
983 tty_wait_until_sent(smux.tty,
984 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600985 }
986 return 0;
987}
988
989/**
990 * Write packet to TTY.
991 *
992 * @pkt packet to write
993 *
994 * @returns 0 on success
995 */
996static int smux_tx_tty(struct smux_pkt_t *pkt)
997{
998 char *data;
999 unsigned int len;
1000 int ret;
1001
1002 if (!smux.tty) {
1003 pr_err("%s: TTY not initialized", __func__);
1004 return -ENOTTY;
1005 }
1006
1007 if (pkt->hdr.cmd == SMUX_CMD_BYTE) {
1008 SMUX_DBG("%s: tty send single byte\n", __func__);
1009 ret = write_to_tty(&pkt->hdr.flags, 1);
1010 return ret;
1011 }
1012
1013 smux_serialize_hdr(pkt, &data, &len);
1014 ret = write_to_tty(data, len);
1015 if (ret) {
1016 pr_err("%s: failed %d to write header %d\n",
1017 __func__, ret, len);
1018 return ret;
1019 }
1020
1021 smux_serialize_payload(pkt, &data, &len);
1022 ret = write_to_tty(data, len);
1023 if (ret) {
1024 pr_err("%s: failed %d to write payload %d\n",
1025 __func__, ret, len);
1026 return ret;
1027 }
1028
1029 smux_serialize_padding(pkt, &data, &len);
1030 while (len > 0) {
1031 char zero = 0x0;
1032 ret = write_to_tty(&zero, 1);
1033 if (ret) {
1034 pr_err("%s: failed %d to write padding %d\n",
1035 __func__, ret, len);
1036 return ret;
1037 }
1038 --len;
1039 }
1040 return 0;
1041}
1042
1043/**
1044 * Send a single character.
1045 *
1046 * @ch Character to send
1047 */
1048static void smux_send_byte(char ch)
1049{
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001050 struct smux_pkt_t *pkt;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001051
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001052 pkt = smux_alloc_pkt();
1053 if (!pkt) {
1054 pr_err("%s: alloc failure for byte %x\n", __func__, ch);
1055 return;
1056 }
1057 pkt->hdr.cmd = SMUX_CMD_BYTE;
1058 pkt->hdr.flags = ch;
1059 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001060
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001061 list_add_tail(&pkt->list, &smux.power_queue);
1062 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001063}
1064
1065/**
1066 * Receive a single-character packet (used for internal testing).
1067 *
1068 * @ch Character to receive
1069 * @lcid Logical channel ID for packet
1070 *
1071 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001072 */
1073static int smux_receive_byte(char ch, int lcid)
1074{
1075 struct smux_pkt_t pkt;
1076
1077 smux_init_pkt(&pkt);
1078 pkt.hdr.lcid = lcid;
1079 pkt.hdr.cmd = SMUX_CMD_BYTE;
1080 pkt.hdr.flags = ch;
1081
1082 return smux_dispatch_rx_pkt(&pkt);
1083}
1084
1085/**
1086 * Queue packet for transmit.
1087 *
1088 * @pkt_ptr Packet to queue
1089 * @ch Channel to queue packet on
1090 * @queue Queue channel on ready list
1091 */
1092static void smux_tx_queue(struct smux_pkt_t *pkt_ptr, struct smux_lch_t *ch,
1093 int queue)
1094{
1095 unsigned long flags;
1096
1097 SMUX_DBG("%s: queuing pkt %p\n", __func__, pkt_ptr);
1098
1099 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
1100 list_add_tail(&pkt_ptr->list, &ch->tx_queue);
1101 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
1102
1103 if (queue)
1104 list_channel(ch);
1105}
1106
1107/**
1108 * Handle receive OPEN ACK command.
1109 *
1110 * @pkt Received packet
1111 *
1112 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001113 */
1114static int smux_handle_rx_open_ack(struct smux_pkt_t *pkt)
1115{
1116 uint8_t lcid;
1117 int ret;
1118 struct smux_lch_t *ch;
1119 int enable_powerdown = 0;
1120
1121 lcid = pkt->hdr.lcid;
1122 ch = &smux_lch[lcid];
1123
1124 spin_lock(&ch->state_lock_lhb1);
1125 if (ch->local_state == SMUX_LCH_LOCAL_OPENING) {
1126 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
1127 ch->local_state,
1128 SMUX_LCH_LOCAL_OPENED);
1129
1130 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1131 enable_powerdown = 1;
1132
1133 ch->local_state = SMUX_LCH_LOCAL_OPENED;
1134 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED)
1135 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1136 ret = 0;
1137 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1138 SMUX_DBG("Remote loopback OPEN ACK received\n");
1139 ret = 0;
1140 } else {
1141 pr_err("%s: lcid %d state 0x%x open ack invalid\n",
1142 __func__, lcid, ch->local_state);
1143 ret = -EINVAL;
1144 }
1145 spin_unlock(&ch->state_lock_lhb1);
1146
1147 if (enable_powerdown) {
1148 spin_lock(&smux.tx_lock_lha2);
1149 if (!smux.powerdown_enabled) {
1150 smux.powerdown_enabled = 1;
1151 SMUX_DBG("%s: enabling power-collapse support\n",
1152 __func__);
1153 }
1154 spin_unlock(&smux.tx_lock_lha2);
1155 }
1156
1157 return ret;
1158}
1159
1160static int smux_handle_close_ack(struct smux_pkt_t *pkt)
1161{
1162 uint8_t lcid;
1163 int ret;
1164 struct smux_lch_t *ch;
1165 union notifier_metadata meta_disconnected;
1166 unsigned long flags;
1167
1168 lcid = pkt->hdr.lcid;
1169 ch = &smux_lch[lcid];
1170 meta_disconnected.disconnected.is_ssr = 0;
1171
1172 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1173
1174 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
1175 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
1176 SMUX_LCH_LOCAL_CLOSING,
1177 SMUX_LCH_LOCAL_CLOSED);
1178 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
1179 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED)
1180 schedule_notify(lcid, SMUX_DISCONNECTED,
1181 &meta_disconnected);
1182 ret = 0;
1183 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1184 SMUX_DBG("Remote loopback CLOSE ACK received\n");
1185 ret = 0;
1186 } else {
1187 pr_err("%s: lcid %d state 0x%x close ack invalid\n",
1188 __func__, lcid, ch->local_state);
1189 ret = -EINVAL;
1190 }
1191 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1192 return ret;
1193}
1194
1195/**
1196 * Handle receive OPEN command.
1197 *
1198 * @pkt Received packet
1199 *
1200 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001201 */
1202static int smux_handle_rx_open_cmd(struct smux_pkt_t *pkt)
1203{
1204 uint8_t lcid;
1205 int ret;
1206 struct smux_lch_t *ch;
1207 struct smux_pkt_t *ack_pkt;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001208 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001209 int tx_ready = 0;
1210 int enable_powerdown = 0;
1211
1212 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
1213 return smux_handle_rx_open_ack(pkt);
1214
1215 lcid = pkt->hdr.lcid;
1216 ch = &smux_lch[lcid];
1217
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001218 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001219
1220 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED) {
1221 SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
1222 SMUX_LCH_REMOTE_CLOSED,
1223 SMUX_LCH_REMOTE_OPENED);
1224
1225 ch->remote_state = SMUX_LCH_REMOTE_OPENED;
1226 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1227 enable_powerdown = 1;
1228
1229 /* Send Open ACK */
1230 ack_pkt = smux_alloc_pkt();
1231 if (!ack_pkt) {
1232 /* exit out to allow retrying this later */
1233 ret = -ENOMEM;
1234 goto out;
1235 }
1236 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1237 ack_pkt->hdr.flags = SMUX_CMD_OPEN_ACK
1238 | SMUX_CMD_OPEN_POWER_COLLAPSE;
1239 ack_pkt->hdr.lcid = lcid;
1240 ack_pkt->hdr.payload_len = 0;
1241 ack_pkt->hdr.pad_len = 0;
1242 if (pkt->hdr.flags & SMUX_CMD_OPEN_REMOTE_LOOPBACK) {
1243 ch->remote_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
1244 ack_pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
1245 }
1246 smux_tx_queue(ack_pkt, ch, 0);
1247 tx_ready = 1;
1248
1249 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1250 /*
1251 * Send an Open command to the remote side to
1252 * simulate our local client doing it.
1253 */
1254 ack_pkt = smux_alloc_pkt();
1255 if (ack_pkt) {
1256 ack_pkt->hdr.lcid = lcid;
1257 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1258 ack_pkt->hdr.flags =
1259 SMUX_CMD_OPEN_POWER_COLLAPSE;
1260 ack_pkt->hdr.payload_len = 0;
1261 ack_pkt->hdr.pad_len = 0;
1262 smux_tx_queue(ack_pkt, ch, 0);
1263 tx_ready = 1;
1264 } else {
1265 pr_err("%s: Remote loopack allocation failure\n",
1266 __func__);
1267 }
1268 } else if (ch->local_state == SMUX_LCH_LOCAL_OPENED) {
1269 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1270 }
1271 ret = 0;
1272 } else {
1273 pr_err("%s: lcid %d remote state 0x%x open invalid\n",
1274 __func__, lcid, ch->remote_state);
1275 ret = -EINVAL;
1276 }
1277
1278out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001279 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001280
1281 if (enable_powerdown) {
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001282 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8b9a6402012-06-05 13:32:57 -06001283 if (!smux.powerdown_enabled) {
1284 smux.powerdown_enabled = 1;
1285 SMUX_DBG("%s: enabling power-collapse support\n",
1286 __func__);
1287 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001288 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001289 }
1290
1291 if (tx_ready)
1292 list_channel(ch);
1293
1294 return ret;
1295}
1296
1297/**
1298 * Handle receive CLOSE command.
1299 *
1300 * @pkt Received packet
1301 *
1302 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001303 */
1304static int smux_handle_rx_close_cmd(struct smux_pkt_t *pkt)
1305{
1306 uint8_t lcid;
1307 int ret;
1308 struct smux_lch_t *ch;
1309 struct smux_pkt_t *ack_pkt;
1310 union notifier_metadata meta_disconnected;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001311 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001312 int tx_ready = 0;
1313
1314 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
1315 return smux_handle_close_ack(pkt);
1316
1317 lcid = pkt->hdr.lcid;
1318 ch = &smux_lch[lcid];
1319 meta_disconnected.disconnected.is_ssr = 0;
1320
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001321 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001322 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED) {
1323 SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
1324 SMUX_LCH_REMOTE_OPENED,
1325 SMUX_LCH_REMOTE_CLOSED);
1326
1327 ack_pkt = smux_alloc_pkt();
1328 if (!ack_pkt) {
1329 /* exit out to allow retrying this later */
1330 ret = -ENOMEM;
1331 goto out;
1332 }
1333 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
1334 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1335 ack_pkt->hdr.flags = SMUX_CMD_CLOSE_ACK;
1336 ack_pkt->hdr.lcid = lcid;
1337 ack_pkt->hdr.payload_len = 0;
1338 ack_pkt->hdr.pad_len = 0;
1339 smux_tx_queue(ack_pkt, ch, 0);
1340 tx_ready = 1;
1341
1342 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1343 /*
1344 * Send a Close command to the remote side to simulate
1345 * our local client doing it.
1346 */
1347 ack_pkt = smux_alloc_pkt();
1348 if (ack_pkt) {
1349 ack_pkt->hdr.lcid = lcid;
1350 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1351 ack_pkt->hdr.flags = 0;
1352 ack_pkt->hdr.payload_len = 0;
1353 ack_pkt->hdr.pad_len = 0;
1354 smux_tx_queue(ack_pkt, ch, 0);
1355 tx_ready = 1;
1356 } else {
1357 pr_err("%s: Remote loopack allocation failure\n",
1358 __func__);
1359 }
1360 }
1361
1362 if (ch->local_state == SMUX_LCH_LOCAL_CLOSED)
1363 schedule_notify(lcid, SMUX_DISCONNECTED,
1364 &meta_disconnected);
1365 ret = 0;
1366 } else {
1367 pr_err("%s: lcid %d remote state 0x%x close invalid\n",
1368 __func__, lcid, ch->remote_state);
1369 ret = -EINVAL;
1370 }
1371out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001372 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001373 if (tx_ready)
1374 list_channel(ch);
1375
1376 return ret;
1377}
1378
1379/*
1380 * Handle receive DATA command.
1381 *
1382 * @pkt Received packet
1383 *
1384 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001385 */
1386static int smux_handle_rx_data_cmd(struct smux_pkt_t *pkt)
1387{
1388 uint8_t lcid;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001389 int ret = 0;
1390 int do_retry = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001391 int tx_ready = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001392 int tmp;
1393 int rx_len;
1394 struct smux_lch_t *ch;
1395 union notifier_metadata metadata;
1396 int remote_loopback;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001397 struct smux_pkt_t *ack_pkt;
1398 unsigned long flags;
1399
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001400 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1401 ret = -ENXIO;
1402 goto out;
1403 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001404
Eric Holmbergb8435c82012-06-05 14:51:29 -06001405 rx_len = pkt->hdr.payload_len;
1406 if (rx_len == 0) {
1407 ret = -EINVAL;
1408 goto out;
1409 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001410
1411 lcid = pkt->hdr.lcid;
1412 ch = &smux_lch[lcid];
1413 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1414 remote_loopback = ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK;
1415
1416 if (ch->local_state != SMUX_LCH_LOCAL_OPENED
1417 && !remote_loopback) {
1418 pr_err("smux: ch %d error data on local state 0x%x",
1419 lcid, ch->local_state);
1420 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001421 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001422 goto out;
1423 }
1424
1425 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1426 pr_err("smux: ch %d error data on remote state 0x%x",
1427 lcid, ch->remote_state);
1428 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001429 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001430 goto out;
1431 }
1432
Eric Holmbergb8435c82012-06-05 14:51:29 -06001433 if (!list_empty(&ch->rx_retry_queue)) {
1434 do_retry = 1;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001435
1436 if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
1437 !ch->rx_flow_control_auto &&
1438 ((ch->rx_retry_queue_cnt + 1) >= SMUX_RX_WM_HIGH)) {
1439 /* need to flow control RX */
1440 ch->rx_flow_control_auto = 1;
1441 tx_ready |= smux_rx_flow_control_updated(ch);
1442 schedule_notify(ch->lcid, SMUX_RX_RETRY_HIGH_WM_HIT,
1443 NULL);
1444 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06001445 if ((ch->rx_retry_queue_cnt + 1) > SMUX_RX_RETRY_MAX_PKTS) {
1446 /* retry queue full */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001447 pr_err("%s: ch %d RX retry queue full\n",
1448 __func__, lcid);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001449 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1450 ret = -ENOMEM;
1451 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1452 goto out;
1453 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001454 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001455 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001456
Eric Holmbergb8435c82012-06-05 14:51:29 -06001457 if (remote_loopback) {
1458 /* Echo the data back to the remote client. */
1459 ack_pkt = smux_alloc_pkt();
1460 if (ack_pkt) {
1461 ack_pkt->hdr.lcid = lcid;
1462 ack_pkt->hdr.cmd = SMUX_CMD_DATA;
1463 ack_pkt->hdr.flags = 0;
1464 ack_pkt->hdr.payload_len = pkt->hdr.payload_len;
1465 if (ack_pkt->hdr.payload_len) {
1466 smux_alloc_pkt_payload(ack_pkt);
1467 memcpy(ack_pkt->payload, pkt->payload,
1468 ack_pkt->hdr.payload_len);
1469 }
1470 ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
1471 smux_tx_queue(ack_pkt, ch, 0);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001472 tx_ready = 1;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001473 } else {
1474 pr_err("%s: Remote loopack allocation failure\n",
1475 __func__);
1476 }
1477 } else if (!do_retry) {
1478 /* request buffer from client */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001479 metadata.read.pkt_priv = 0;
1480 metadata.read.buffer = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001481 tmp = ch->get_rx_buffer(ch->priv,
1482 (void **)&metadata.read.pkt_priv,
1483 (void **)&metadata.read.buffer,
1484 rx_len);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001485
Eric Holmbergb8435c82012-06-05 14:51:29 -06001486 if (tmp == 0 && metadata.read.buffer) {
1487 /* place data into RX buffer */
1488 memcpy(metadata.read.buffer, pkt->payload,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001489 rx_len);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001490 metadata.read.len = rx_len;
1491 schedule_notify(lcid, SMUX_READ_DONE,
1492 &metadata);
1493 } else if (tmp == -EAGAIN ||
1494 (tmp == 0 && !metadata.read.buffer)) {
1495 /* buffer allocation failed - add to retry queue */
1496 do_retry = 1;
1497 } else if (tmp < 0) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001498 pr_err("%s: ch %d Client RX buffer alloc failed %d\n",
1499 __func__, lcid, tmp);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001500 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1501 ret = -ENOMEM;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001502 }
1503 }
1504
Eric Holmbergb8435c82012-06-05 14:51:29 -06001505 if (do_retry) {
1506 struct smux_rx_pkt_retry *retry;
1507
1508 retry = kmalloc(sizeof(struct smux_rx_pkt_retry), GFP_KERNEL);
1509 if (!retry) {
1510 pr_err("%s: retry alloc failure\n", __func__);
1511 ret = -ENOMEM;
1512 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1513 goto out;
1514 }
1515 INIT_LIST_HEAD(&retry->rx_retry_list);
1516 retry->timeout_in_ms = SMUX_RX_RETRY_MIN_MS;
1517
1518 /* copy packet */
1519 retry->pkt = smux_alloc_pkt();
1520 if (!retry->pkt) {
1521 kfree(retry);
1522 pr_err("%s: pkt alloc failure\n", __func__);
1523 ret = -ENOMEM;
1524 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1525 goto out;
1526 }
1527 retry->pkt->hdr.lcid = lcid;
1528 retry->pkt->hdr.payload_len = pkt->hdr.payload_len;
1529 retry->pkt->hdr.pad_len = pkt->hdr.pad_len;
1530 if (retry->pkt->hdr.payload_len) {
1531 smux_alloc_pkt_payload(retry->pkt);
1532 memcpy(retry->pkt->payload, pkt->payload,
1533 retry->pkt->hdr.payload_len);
1534 }
1535
1536 /* add to retry queue */
1537 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1538 list_add_tail(&retry->rx_retry_list, &ch->rx_retry_queue);
1539 ++ch->rx_retry_queue_cnt;
1540 if (ch->rx_retry_queue_cnt == 1)
1541 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
1542 msecs_to_jiffies(retry->timeout_in_ms));
1543 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1544 }
1545
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001546 if (tx_ready)
1547 list_channel(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001548out:
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001549 return ret;
1550}
1551
1552/**
1553 * Handle receive byte command for testing purposes.
1554 *
1555 * @pkt Received packet
1556 *
1557 * @returns 0 for success
1558 */
1559static int smux_handle_rx_byte_cmd(struct smux_pkt_t *pkt)
1560{
1561 uint8_t lcid;
1562 int ret;
1563 struct smux_lch_t *ch;
1564 union notifier_metadata metadata;
1565 unsigned long flags;
1566
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001567 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1568 pr_err("%s: invalid packet or channel id\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001569 return -ENXIO;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001570 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001571
1572 lcid = pkt->hdr.lcid;
1573 ch = &smux_lch[lcid];
1574 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1575
1576 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
1577 pr_err("smux: ch %d error data on local state 0x%x",
1578 lcid, ch->local_state);
1579 ret = -EIO;
1580 goto out;
1581 }
1582
1583 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1584 pr_err("smux: ch %d error data on remote state 0x%x",
1585 lcid, ch->remote_state);
1586 ret = -EIO;
1587 goto out;
1588 }
1589
1590 metadata.read.pkt_priv = (void *)(int)pkt->hdr.flags;
1591 metadata.read.buffer = 0;
1592 schedule_notify(lcid, SMUX_READ_DONE, &metadata);
1593 ret = 0;
1594
1595out:
1596 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1597 return ret;
1598}
1599
1600/**
1601 * Handle receive status command.
1602 *
1603 * @pkt Received packet
1604 *
1605 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001606 */
1607static int smux_handle_rx_status_cmd(struct smux_pkt_t *pkt)
1608{
1609 uint8_t lcid;
1610 int ret;
1611 struct smux_lch_t *ch;
1612 union notifier_metadata meta;
1613 unsigned long flags;
1614 int tx_ready = 0;
1615
1616 lcid = pkt->hdr.lcid;
1617 ch = &smux_lch[lcid];
1618
1619 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1620 meta.tiocm.tiocm_old = ch->remote_tiocm;
1621 meta.tiocm.tiocm_new = pkt->hdr.flags;
1622
1623 /* update logical channel flow control */
1624 if ((meta.tiocm.tiocm_old & SMUX_CMD_STATUS_FLOW_CNTL) ^
1625 (meta.tiocm.tiocm_new & SMUX_CMD_STATUS_FLOW_CNTL)) {
1626 /* logical channel flow control changed */
1627 if (pkt->hdr.flags & SMUX_CMD_STATUS_FLOW_CNTL) {
1628 /* disabled TX */
1629 SMUX_DBG("TX Flow control enabled\n");
1630 ch->tx_flow_control = 1;
1631 } else {
1632 /* re-enable channel */
1633 SMUX_DBG("TX Flow control disabled\n");
1634 ch->tx_flow_control = 0;
1635 tx_ready = 1;
1636 }
1637 }
1638 meta.tiocm.tiocm_old = msm_smux_tiocm_get_atomic(ch);
1639 ch->remote_tiocm = pkt->hdr.flags;
1640 meta.tiocm.tiocm_new = msm_smux_tiocm_get_atomic(ch);
1641
1642 /* client notification for status change */
1643 if (IS_FULLY_OPENED(ch)) {
1644 if (meta.tiocm.tiocm_old != meta.tiocm.tiocm_new)
1645 schedule_notify(lcid, SMUX_TIOCM_UPDATE, &meta);
1646 ret = 0;
1647 }
1648 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1649 if (tx_ready)
1650 list_channel(ch);
1651
1652 return ret;
1653}
1654
1655/**
1656 * Handle receive power command.
1657 *
1658 * @pkt Received packet
1659 *
1660 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001661 */
1662static int smux_handle_rx_power_cmd(struct smux_pkt_t *pkt)
1663{
Steve Mucklef132c6c2012-06-06 18:30:57 -07001664 struct smux_pkt_t *ack_pkt = NULL;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001665 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001666
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001667 SMUX_PWR_PKT_RX(pkt);
1668
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001669 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001670 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK) {
1671 /* local sleep request ack */
1672 if (smux.power_state == SMUX_PWR_TURNING_OFF) {
1673 /* Power-down complete, turn off UART */
Eric Holmbergff0b0112012-06-08 15:06:57 -06001674 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001675 smux.power_state, SMUX_PWR_OFF_FLUSH);
1676 smux.power_state = SMUX_PWR_OFF_FLUSH;
1677 queue_work(smux_tx_wq, &smux_inactivity_work);
1678 } else {
1679 pr_err("%s: sleep request ack invalid in state %d\n",
1680 __func__, smux.power_state);
1681 }
1682 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001683 /*
1684 * Remote sleep request
1685 *
1686 * Even if we have data pending, we need to transition to the
1687 * POWER_OFF state and then perform a wakeup since the remote
1688 * side has requested a power-down.
1689 *
1690 * The state here is set to SMUX_PWR_TURNING_OFF_FLUSH and
1691 * the TX thread will set the state to SMUX_PWR_TURNING_OFF
1692 * when it sends the packet.
1693 */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001694 if (smux.power_state == SMUX_PWR_ON
1695 || smux.power_state == SMUX_PWR_TURNING_OFF) {
1696 ack_pkt = smux_alloc_pkt();
1697 if (ack_pkt) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06001698 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001699 smux.power_state,
1700 SMUX_PWR_TURNING_OFF_FLUSH);
1701
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001702 smux.power_state = SMUX_PWR_TURNING_OFF_FLUSH;
1703
1704 /* send power-down ack */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001705 ack_pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
1706 ack_pkt->hdr.flags = SMUX_CMD_PWR_CTL_ACK;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001707 ack_pkt->hdr.lcid = SMUX_BROADCAST_LCID;
1708 list_add_tail(&ack_pkt->list,
1709 &smux.power_queue);
1710 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001711 }
1712 } else {
1713 pr_err("%s: sleep request invalid in state %d\n",
1714 __func__, smux.power_state);
1715 }
1716 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001717 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001718
1719 return 0;
1720}
1721
1722/**
1723 * Handle dispatching a completed packet for receive processing.
1724 *
1725 * @pkt Packet to process
1726 *
1727 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001728 */
1729static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt)
1730{
Eric Holmbergf9622662012-06-13 15:55:45 -06001731 int ret = -ENXIO;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001732
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001733 switch (pkt->hdr.cmd) {
1734 case SMUX_CMD_OPEN_LCH:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001735 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001736 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1737 pr_err("%s: invalid channel id %d\n",
1738 __func__, pkt->hdr.lcid);
1739 break;
1740 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001741 ret = smux_handle_rx_open_cmd(pkt);
1742 break;
1743
1744 case SMUX_CMD_DATA:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001745 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001746 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1747 pr_err("%s: invalid channel id %d\n",
1748 __func__, pkt->hdr.lcid);
1749 break;
1750 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001751 ret = smux_handle_rx_data_cmd(pkt);
1752 break;
1753
1754 case SMUX_CMD_CLOSE_LCH:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001755 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001756 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1757 pr_err("%s: invalid channel id %d\n",
1758 __func__, pkt->hdr.lcid);
1759 break;
1760 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001761 ret = smux_handle_rx_close_cmd(pkt);
1762 break;
1763
1764 case SMUX_CMD_STATUS:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001765 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001766 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1767 pr_err("%s: invalid channel id %d\n",
1768 __func__, pkt->hdr.lcid);
1769 break;
1770 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001771 ret = smux_handle_rx_status_cmd(pkt);
1772 break;
1773
1774 case SMUX_CMD_PWR_CTL:
1775 ret = smux_handle_rx_power_cmd(pkt);
1776 break;
1777
1778 case SMUX_CMD_BYTE:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001779 SMUX_LOG_PKT_RX(pkt);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001780 ret = smux_handle_rx_byte_cmd(pkt);
1781 break;
1782
1783 default:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001784 SMUX_LOG_PKT_RX(pkt);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001785 pr_err("%s: command %d unknown\n", __func__, pkt->hdr.cmd);
1786 ret = -EINVAL;
1787 }
1788 return ret;
1789}
1790
1791/**
1792 * Deserializes a packet and dispatches it to the packet receive logic.
1793 *
1794 * @data Raw data for one packet
1795 * @len Length of the data
1796 *
1797 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001798 */
1799static int smux_deserialize(unsigned char *data, int len)
1800{
1801 struct smux_pkt_t recv;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001802
1803 smux_init_pkt(&recv);
1804
1805 /*
1806 * It may be possible to optimize this to not use the
1807 * temporary buffer.
1808 */
1809 memcpy(&recv.hdr, data, sizeof(struct smux_hdr_t));
1810
1811 if (recv.hdr.magic != SMUX_MAGIC) {
1812 pr_err("%s: invalid header magic\n", __func__);
1813 return -EINVAL;
1814 }
1815
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001816 if (recv.hdr.payload_len)
1817 recv.payload = data + sizeof(struct smux_hdr_t);
1818
1819 return smux_dispatch_rx_pkt(&recv);
1820}
1821
1822/**
1823 * Handle wakeup request byte.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001824 */
1825static void smux_handle_wakeup_req(void)
1826{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001827 unsigned long flags;
1828
1829 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001830 if (smux.power_state == SMUX_PWR_OFF
1831 || smux.power_state == SMUX_PWR_TURNING_ON) {
1832 /* wakeup system */
Eric Holmbergff0b0112012-06-08 15:06:57 -06001833 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001834 smux.power_state, SMUX_PWR_ON);
1835 smux.power_state = SMUX_PWR_ON;
1836 queue_work(smux_tx_wq, &smux_wakeup_work);
1837 queue_work(smux_tx_wq, &smux_tx_work);
1838 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1839 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1840 smux_send_byte(SMUX_WAKEUP_ACK);
1841 } else {
1842 smux_send_byte(SMUX_WAKEUP_ACK);
1843 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001844 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001845}
1846
1847/**
1848 * Handle wakeup request ack.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001849 */
1850static void smux_handle_wakeup_ack(void)
1851{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001852 unsigned long flags;
1853
1854 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001855 if (smux.power_state == SMUX_PWR_TURNING_ON) {
1856 /* received response to wakeup request */
Eric Holmbergff0b0112012-06-08 15:06:57 -06001857 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001858 smux.power_state, SMUX_PWR_ON);
1859 smux.power_state = SMUX_PWR_ON;
1860 queue_work(smux_tx_wq, &smux_tx_work);
1861 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1862 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1863
1864 } else if (smux.power_state != SMUX_PWR_ON) {
1865 /* invalid message */
1866 pr_err("%s: wakeup request ack invalid in state %d\n",
1867 __func__, smux.power_state);
1868 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001869 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001870}
1871
1872/**
1873 * RX State machine - IDLE state processing.
1874 *
1875 * @data New RX data to process
1876 * @len Length of the data
1877 * @used Return value of length processed
1878 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001879 */
1880static void smux_rx_handle_idle(const unsigned char *data,
1881 int len, int *used, int flag)
1882{
1883 int i;
1884
1885 if (flag) {
1886 if (smux_byte_loopback)
1887 smux_receive_byte(SMUX_UT_ECHO_ACK_FAIL,
1888 smux_byte_loopback);
1889 pr_err("%s: TTY error 0x%x - ignoring\n", __func__, flag);
1890 ++*used;
1891 return;
1892 }
1893
1894 for (i = *used; i < len && smux.rx_state == SMUX_RX_IDLE; i++) {
1895 switch (data[i]) {
1896 case SMUX_MAGIC_WORD1:
1897 smux.rx_state = SMUX_RX_MAGIC;
1898 break;
1899 case SMUX_WAKEUP_REQ:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001900 SMUX_PWR("smux: RX Wakeup REQ\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001901 smux_handle_wakeup_req();
1902 break;
1903 case SMUX_WAKEUP_ACK:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001904 SMUX_PWR("smux: RX Wakeup ACK\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001905 smux_handle_wakeup_ack();
1906 break;
1907 default:
1908 /* unexpected character */
1909 if (smux_byte_loopback && data[i] == SMUX_UT_ECHO_REQ)
1910 smux_receive_byte(SMUX_UT_ECHO_ACK_OK,
1911 smux_byte_loopback);
1912 pr_err("%s: parse error 0x%02x - ignoring\n", __func__,
1913 (unsigned)data[i]);
1914 break;
1915 }
1916 }
1917
1918 *used = i;
1919}
1920
1921/**
1922 * RX State machine - Header Magic state processing.
1923 *
1924 * @data New RX data to process
1925 * @len Length of the data
1926 * @used Return value of length processed
1927 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001928 */
1929static void smux_rx_handle_magic(const unsigned char *data,
1930 int len, int *used, int flag)
1931{
1932 int i;
1933
1934 if (flag) {
1935 pr_err("%s: TTY RX error %d\n", __func__, flag);
1936 smux_enter_reset();
1937 smux.rx_state = SMUX_RX_FAILURE;
1938 ++*used;
1939 return;
1940 }
1941
1942 for (i = *used; i < len && smux.rx_state == SMUX_RX_MAGIC; i++) {
1943 /* wait for completion of the magic */
1944 if (data[i] == SMUX_MAGIC_WORD2) {
1945 smux.recv_len = 0;
1946 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD1;
1947 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD2;
1948 smux.rx_state = SMUX_RX_HDR;
1949 } else {
1950 /* unexpected / trash character */
1951 pr_err("%s: rx parse error for char %c; *used=%d, len=%d\n",
1952 __func__, data[i], *used, len);
1953 smux.rx_state = SMUX_RX_IDLE;
1954 }
1955 }
1956
1957 *used = i;
1958}
1959
1960/**
1961 * RX State machine - Packet Header state processing.
1962 *
1963 * @data New RX data to process
1964 * @len Length of the data
1965 * @used Return value of length processed
1966 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001967 */
1968static void smux_rx_handle_hdr(const unsigned char *data,
1969 int len, int *used, int flag)
1970{
1971 int i;
1972 struct smux_hdr_t *hdr;
1973
1974 if (flag) {
1975 pr_err("%s: TTY RX error %d\n", __func__, flag);
1976 smux_enter_reset();
1977 smux.rx_state = SMUX_RX_FAILURE;
1978 ++*used;
1979 return;
1980 }
1981
1982 for (i = *used; i < len && smux.rx_state == SMUX_RX_HDR; i++) {
1983 smux.recv_buf[smux.recv_len++] = data[i];
1984
1985 if (smux.recv_len == sizeof(struct smux_hdr_t)) {
1986 /* complete header received */
1987 hdr = (struct smux_hdr_t *)smux.recv_buf;
1988 smux.pkt_remain = hdr->payload_len + hdr->pad_len;
1989 smux.rx_state = SMUX_RX_PAYLOAD;
1990 }
1991 }
1992 *used = i;
1993}
1994
1995/**
1996 * RX State machine - Packet Payload state processing.
1997 *
1998 * @data New RX data to process
1999 * @len Length of the data
2000 * @used Return value of length processed
2001 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002002 */
2003static void smux_rx_handle_pkt_payload(const unsigned char *data,
2004 int len, int *used, int flag)
2005{
2006 int remaining;
2007
2008 if (flag) {
2009 pr_err("%s: TTY RX error %d\n", __func__, flag);
2010 smux_enter_reset();
2011 smux.rx_state = SMUX_RX_FAILURE;
2012 ++*used;
2013 return;
2014 }
2015
2016 /* copy data into rx buffer */
2017 if (smux.pkt_remain < (len - *used))
2018 remaining = smux.pkt_remain;
2019 else
2020 remaining = len - *used;
2021
2022 memcpy(&smux.recv_buf[smux.recv_len], &data[*used], remaining);
2023 smux.recv_len += remaining;
2024 smux.pkt_remain -= remaining;
2025 *used += remaining;
2026
2027 if (smux.pkt_remain == 0) {
2028 /* complete packet received */
2029 smux_deserialize(smux.recv_buf, smux.recv_len);
2030 smux.rx_state = SMUX_RX_IDLE;
2031 }
2032}
2033
2034/**
2035 * Feed data to the receive state machine.
2036 *
2037 * @data Pointer to data block
2038 * @len Length of data
2039 * @flag TTY_NORMAL (0) for no error, otherwise TTY Error Flag
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002040 */
2041void smux_rx_state_machine(const unsigned char *data,
2042 int len, int flag)
2043{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002044 struct smux_rx_worker_data work;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002045
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002046 work.data = data;
2047 work.len = len;
2048 work.flag = flag;
2049 INIT_WORK_ONSTACK(&work.work, smux_rx_worker);
2050 work.work_complete = COMPLETION_INITIALIZER_ONSTACK(work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002051
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002052 queue_work(smux_rx_wq, &work.work);
2053 wait_for_completion(&work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002054}
2055
2056/**
2057 * Add channel to transmit-ready list and trigger transmit worker.
2058 *
2059 * @ch Channel to add
2060 */
2061static void list_channel(struct smux_lch_t *ch)
2062{
2063 unsigned long flags;
2064
2065 SMUX_DBG("%s: listing channel %d\n",
2066 __func__, ch->lcid);
2067
2068 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2069 spin_lock(&ch->tx_lock_lhb2);
2070 smux.tx_activity_flag = 1;
2071 if (list_empty(&ch->tx_ready_list))
2072 list_add_tail(&ch->tx_ready_list, &smux.lch_tx_ready_list);
2073 spin_unlock(&ch->tx_lock_lhb2);
2074 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2075
2076 queue_work(smux_tx_wq, &smux_tx_work);
2077}
2078
2079/**
2080 * Transmit packet on correct transport and then perform client
2081 * notification.
2082 *
2083 * @ch Channel to transmit on
2084 * @pkt Packet to transmit
2085 */
2086static void smux_tx_pkt(struct smux_lch_t *ch, struct smux_pkt_t *pkt)
2087{
2088 union notifier_metadata meta_write;
2089 int ret;
2090
2091 if (ch && pkt) {
2092 SMUX_LOG_PKT_TX(pkt);
2093 if (ch->local_mode == SMUX_LCH_MODE_LOCAL_LOOPBACK)
2094 ret = smux_tx_loopback(pkt);
2095 else
2096 ret = smux_tx_tty(pkt);
2097
2098 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2099 /* notify write-done */
2100 meta_write.write.pkt_priv = pkt->priv;
2101 meta_write.write.buffer = pkt->payload;
2102 meta_write.write.len = pkt->hdr.payload_len;
2103 if (ret >= 0) {
2104 SMUX_DBG("%s: PKT write done", __func__);
2105 schedule_notify(ch->lcid, SMUX_WRITE_DONE,
2106 &meta_write);
2107 } else {
2108 pr_err("%s: failed to write pkt %d\n",
2109 __func__, ret);
2110 schedule_notify(ch->lcid, SMUX_WRITE_FAIL,
2111 &meta_write);
2112 }
2113 }
2114 }
2115}
2116
2117/**
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002118 * Flush pending TTY TX data.
2119 */
2120static void smux_flush_tty(void)
2121{
Eric Holmberg92a67df2012-06-25 13:56:24 -06002122 mutex_lock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002123 if (!smux.tty) {
2124 pr_err("%s: ldisc not loaded\n", __func__);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002125 mutex_unlock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002126 return;
2127 }
2128
2129 tty_wait_until_sent(smux.tty,
2130 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
2131
2132 if (tty_chars_in_buffer(smux.tty) > 0)
2133 pr_err("%s: unable to flush UART queue\n", __func__);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002134
2135 mutex_unlock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002136}
2137
2138/**
Eric Holmberged1f00c2012-06-07 09:45:18 -06002139 * Purge TX queue for logical channel.
2140 *
2141 * @ch Logical channel pointer
2142 *
2143 * Must be called with the following spinlocks locked:
2144 * state_lock_lhb1
2145 * tx_lock_lhb2
2146 */
2147static void smux_purge_ch_tx_queue(struct smux_lch_t *ch)
2148{
2149 struct smux_pkt_t *pkt;
2150 int send_disconnect = 0;
2151
2152 while (!list_empty(&ch->tx_queue)) {
2153 pkt = list_first_entry(&ch->tx_queue, struct smux_pkt_t,
2154 list);
2155 list_del(&pkt->list);
2156
2157 if (pkt->hdr.cmd == SMUX_CMD_OPEN_LCH) {
2158 /* Open was never sent, just force to closed state */
2159 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
2160 send_disconnect = 1;
2161 } else if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2162 /* Notify client of failed write */
2163 union notifier_metadata meta_write;
2164
2165 meta_write.write.pkt_priv = pkt->priv;
2166 meta_write.write.buffer = pkt->payload;
2167 meta_write.write.len = pkt->hdr.payload_len;
2168 schedule_notify(ch->lcid, SMUX_WRITE_FAIL, &meta_write);
2169 }
2170 smux_free_pkt(pkt);
2171 }
2172
2173 if (send_disconnect) {
2174 union notifier_metadata meta_disconnected;
2175
2176 meta_disconnected.disconnected.is_ssr = smux.in_reset;
2177 schedule_notify(ch->lcid, SMUX_DISCONNECTED,
2178 &meta_disconnected);
2179 }
2180}
2181
2182/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002183 * Power-up the UART.
Eric Holmberg92a67df2012-06-25 13:56:24 -06002184 *
2185 * Must be called with smux.mutex_lha0 already locked.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002186 */
Eric Holmberg92a67df2012-06-25 13:56:24 -06002187static void smux_uart_power_on_atomic(void)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002188{
2189 struct uart_state *state;
2190
2191 if (!smux.tty || !smux.tty->driver_data) {
2192 pr_err("%s: unable to find UART port for tty %p\n",
2193 __func__, smux.tty);
2194 return;
2195 }
2196 state = smux.tty->driver_data;
2197 msm_hs_request_clock_on(state->uart_port);
2198}
2199
2200/**
Eric Holmberg92a67df2012-06-25 13:56:24 -06002201 * Power-up the UART.
2202 */
2203static void smux_uart_power_on(void)
2204{
2205 mutex_lock(&smux.mutex_lha0);
2206 smux_uart_power_on_atomic();
2207 mutex_unlock(&smux.mutex_lha0);
2208}
2209
2210/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002211 * Power down the UART.
2212 */
2213static void smux_uart_power_off(void)
2214{
2215 struct uart_state *state;
2216
Eric Holmberg92a67df2012-06-25 13:56:24 -06002217 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002218 if (!smux.tty || !smux.tty->driver_data) {
2219 pr_err("%s: unable to find UART port for tty %p\n",
2220 __func__, smux.tty);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002221 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002222 return;
2223 }
2224 state = smux.tty->driver_data;
2225 msm_hs_request_clock_off(state->uart_port);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002226 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002227}
2228
2229/**
2230 * TX Wakeup Worker
2231 *
2232 * @work Not used
2233 *
2234 * Do an exponential back-off wakeup sequence with a maximum period
2235 * of approximately 1 second (1 << 20 microseconds).
2236 */
2237static void smux_wakeup_worker(struct work_struct *work)
2238{
2239 unsigned long flags;
2240 unsigned wakeup_delay;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002241
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002242 if (smux.in_reset)
2243 return;
2244
2245 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2246 if (smux.power_state == SMUX_PWR_ON) {
2247 /* wakeup complete */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002248 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002249 SMUX_DBG("%s: wakeup complete\n", __func__);
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002250
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002251 /*
2252 * Cancel any pending retry. This avoids a race condition with
2253 * a new power-up request because:
2254 * 1) this worker doesn't modify the state
2255 * 2) this worker is processed on the same single-threaded
2256 * workqueue as new TX wakeup requests
2257 */
2258 cancel_delayed_work(&smux_wakeup_delayed_work);
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002259 } else {
2260 /* retry wakeup */
2261 wakeup_delay = smux.pwr_wakeup_delay_us;
2262 smux.pwr_wakeup_delay_us <<= 1;
2263 if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
2264 smux.pwr_wakeup_delay_us =
2265 SMUX_WAKEUP_DELAY_MAX;
2266
2267 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2268 SMUX_DBG("%s: triggering wakeup\n", __func__);
2269 smux_send_byte(SMUX_WAKEUP_REQ);
2270
2271 if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
2272 SMUX_DBG("%s: sleeping for %u us\n", __func__,
2273 wakeup_delay);
2274 usleep_range(wakeup_delay, 2*wakeup_delay);
2275 queue_work(smux_tx_wq, &smux_wakeup_work);
2276 } else {
2277 /* schedule delayed work */
2278 SMUX_DBG("%s: scheduling delayed wakeup in %u ms\n",
2279 __func__, wakeup_delay / 1000);
2280 queue_delayed_work(smux_tx_wq,
2281 &smux_wakeup_delayed_work,
2282 msecs_to_jiffies(wakeup_delay / 1000));
2283 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002284 }
2285}
2286
2287
2288/**
2289 * Inactivity timeout worker. Periodically scheduled when link is active.
2290 * When it detects inactivity, it will power-down the UART link.
2291 *
2292 * @work Work structure (not used)
2293 */
2294static void smux_inactivity_worker(struct work_struct *work)
2295{
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002296 struct smux_pkt_t *pkt;
2297 unsigned long flags;
2298
2299 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2300 spin_lock(&smux.tx_lock_lha2);
2301
2302 if (!smux.tx_activity_flag && !smux.rx_activity_flag) {
2303 /* no activity */
2304 if (smux.powerdown_enabled) {
2305 if (smux.power_state == SMUX_PWR_ON) {
2306 /* start power-down sequence */
2307 pkt = smux_alloc_pkt();
2308 if (pkt) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06002309 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002310 smux.power_state,
2311 SMUX_PWR_TURNING_OFF);
2312 smux.power_state = SMUX_PWR_TURNING_OFF;
2313
2314 /* send power-down request */
2315 pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
2316 pkt->hdr.flags = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002317 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
2318 list_add_tail(&pkt->list,
2319 &smux.power_queue);
2320 queue_work(smux_tx_wq, &smux_tx_work);
2321 } else {
2322 pr_err("%s: packet alloc failed\n",
2323 __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002324 }
2325 }
2326 } else {
2327 SMUX_DBG("%s: link inactive, but powerdown disabled\n",
2328 __func__);
2329 }
2330 }
2331 smux.tx_activity_flag = 0;
2332 smux.rx_activity_flag = 0;
2333
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002334 if (smux.power_state == SMUX_PWR_OFF_FLUSH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002335 /* ready to power-down the UART */
Eric Holmbergff0b0112012-06-08 15:06:57 -06002336 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002337 smux.power_state, SMUX_PWR_OFF);
Eric Holmbergff0b0112012-06-08 15:06:57 -06002338 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002339
2340 /* if data is pending, schedule a new wakeup */
2341 if (!list_empty(&smux.lch_tx_ready_list) ||
2342 !list_empty(&smux.power_queue))
2343 queue_work(smux_tx_wq, &smux_tx_work);
2344
2345 spin_unlock(&smux.tx_lock_lha2);
2346 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2347
2348 /* flush UART output queue and power down */
2349 smux_flush_tty();
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002350 smux_uart_power_off();
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002351 } else {
2352 spin_unlock(&smux.tx_lock_lha2);
2353 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002354 }
2355
2356 /* reschedule inactivity worker */
2357 if (smux.power_state != SMUX_PWR_OFF)
2358 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
2359 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
2360}
2361
2362/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002363 * Remove RX retry packet from channel and free it.
2364 *
Eric Holmbergb8435c82012-06-05 14:51:29 -06002365 * @ch Channel for retry packet
2366 * @retry Retry packet to remove
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002367 *
2368 * @returns 1 if flow control updated; 0 otherwise
2369 *
2370 * Must be called with state_lock_lhb1 locked.
Eric Holmbergb8435c82012-06-05 14:51:29 -06002371 */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002372int smux_remove_rx_retry(struct smux_lch_t *ch,
Eric Holmbergb8435c82012-06-05 14:51:29 -06002373 struct smux_rx_pkt_retry *retry)
2374{
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002375 int tx_ready = 0;
2376
Eric Holmbergb8435c82012-06-05 14:51:29 -06002377 list_del(&retry->rx_retry_list);
2378 --ch->rx_retry_queue_cnt;
2379 smux_free_pkt(retry->pkt);
2380 kfree(retry);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002381
2382 if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
2383 (ch->rx_retry_queue_cnt <= SMUX_RX_WM_LOW) &&
2384 ch->rx_flow_control_auto) {
2385 ch->rx_flow_control_auto = 0;
2386 smux_rx_flow_control_updated(ch);
2387 schedule_notify(ch->lcid, SMUX_RX_RETRY_LOW_WM_HIT, NULL);
2388 tx_ready = 1;
2389 }
2390 return tx_ready;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002391}
2392
2393/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002394 * RX worker handles all receive operations.
2395 *
2396 * @work Work structure contained in TBD structure
2397 */
2398static void smux_rx_worker(struct work_struct *work)
2399{
2400 unsigned long flags;
2401 int used;
2402 int initial_rx_state;
2403 struct smux_rx_worker_data *w;
2404 const unsigned char *data;
2405 int len;
2406 int flag;
2407
2408 w = container_of(work, struct smux_rx_worker_data, work);
2409 data = w->data;
2410 len = w->len;
2411 flag = w->flag;
2412
2413 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2414 smux.rx_activity_flag = 1;
2415 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2416
2417 SMUX_DBG("%s: %p, len=%d, flag=%d\n", __func__, data, len, flag);
2418 used = 0;
2419 do {
2420 SMUX_DBG("%s: state %d; %d of %d\n",
2421 __func__, smux.rx_state, used, len);
2422 initial_rx_state = smux.rx_state;
2423
2424 switch (smux.rx_state) {
2425 case SMUX_RX_IDLE:
2426 smux_rx_handle_idle(data, len, &used, flag);
2427 break;
2428 case SMUX_RX_MAGIC:
2429 smux_rx_handle_magic(data, len, &used, flag);
2430 break;
2431 case SMUX_RX_HDR:
2432 smux_rx_handle_hdr(data, len, &used, flag);
2433 break;
2434 case SMUX_RX_PAYLOAD:
2435 smux_rx_handle_pkt_payload(data, len, &used, flag);
2436 break;
2437 default:
2438 SMUX_DBG("%s: invalid state %d\n",
2439 __func__, smux.rx_state);
2440 smux.rx_state = SMUX_RX_IDLE;
2441 break;
2442 }
2443 } while (used < len || smux.rx_state != initial_rx_state);
2444
2445 complete(&w->work_complete);
2446}
2447
2448/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002449 * RX Retry worker handles retrying get_rx_buffer calls that previously failed
2450 * because the client was not ready (-EAGAIN).
2451 *
2452 * @work Work structure contained in smux_lch_t structure
2453 */
2454static void smux_rx_retry_worker(struct work_struct *work)
2455{
2456 struct smux_lch_t *ch;
2457 struct smux_rx_pkt_retry *retry;
2458 union notifier_metadata metadata;
2459 int tmp;
2460 unsigned long flags;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002461 int immediate_retry = 0;
2462 int tx_ready = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002463
2464 ch = container_of(work, struct smux_lch_t, rx_retry_work.work);
2465
2466 /* get next retry packet */
2467 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2468 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
2469 /* port has been closed - remove all retries */
2470 while (!list_empty(&ch->rx_retry_queue)) {
2471 retry = list_first_entry(&ch->rx_retry_queue,
2472 struct smux_rx_pkt_retry,
2473 rx_retry_list);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002474 (void)smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002475 }
2476 }
2477
2478 if (list_empty(&ch->rx_retry_queue)) {
2479 SMUX_DBG("%s: retry list empty for channel %d\n",
2480 __func__, ch->lcid);
2481 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2482 return;
2483 }
2484 retry = list_first_entry(&ch->rx_retry_queue,
2485 struct smux_rx_pkt_retry,
2486 rx_retry_list);
2487 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2488
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002489 SMUX_DBG("%s: ch %d retrying rx pkt %p\n",
2490 __func__, ch->lcid, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002491 metadata.read.pkt_priv = 0;
2492 metadata.read.buffer = 0;
2493 tmp = ch->get_rx_buffer(ch->priv,
2494 (void **)&metadata.read.pkt_priv,
2495 (void **)&metadata.read.buffer,
2496 retry->pkt->hdr.payload_len);
2497 if (tmp == 0 && metadata.read.buffer) {
2498 /* have valid RX buffer */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002499
Eric Holmbergb8435c82012-06-05 14:51:29 -06002500 memcpy(metadata.read.buffer, retry->pkt->payload,
2501 retry->pkt->hdr.payload_len);
2502 metadata.read.len = retry->pkt->hdr.payload_len;
2503
2504 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002505 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002506 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002507 schedule_notify(ch->lcid, SMUX_READ_DONE, &metadata);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002508 if (tx_ready)
2509 list_channel(ch);
2510
2511 immediate_retry = 1;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002512 } else if (tmp == -EAGAIN ||
2513 (tmp == 0 && !metadata.read.buffer)) {
2514 /* retry again */
2515 retry->timeout_in_ms <<= 1;
2516 if (retry->timeout_in_ms > SMUX_RX_RETRY_MAX_MS) {
2517 /* timed out */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002518 pr_err("%s: ch %d RX retry client timeout\n",
2519 __func__, ch->lcid);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002520 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002521 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002522 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002523 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
2524 if (tx_ready)
2525 list_channel(ch);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002526 }
2527 } else {
2528 /* client error - drop packet */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002529 pr_err("%s: ch %d RX retry client failed (%d)\n",
2530 __func__, ch->lcid, tmp);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002531 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002532 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002533 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002534 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002535 if (tx_ready)
2536 list_channel(ch);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002537 }
2538
2539 /* schedule next retry */
2540 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2541 if (!list_empty(&ch->rx_retry_queue)) {
2542 retry = list_first_entry(&ch->rx_retry_queue,
2543 struct smux_rx_pkt_retry,
2544 rx_retry_list);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002545
2546 if (immediate_retry)
2547 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
2548 else
2549 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
2550 msecs_to_jiffies(retry->timeout_in_ms));
Eric Holmbergb8435c82012-06-05 14:51:29 -06002551 }
2552 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2553}
2554
2555/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002556 * Transmit worker handles serializing and transmitting packets onto the
2557 * underlying transport.
2558 *
2559 * @work Work structure (not used)
2560 */
2561static void smux_tx_worker(struct work_struct *work)
2562{
2563 struct smux_pkt_t *pkt;
2564 struct smux_lch_t *ch;
2565 unsigned low_wm_notif;
2566 unsigned lcid;
2567 unsigned long flags;
2568
2569
2570 /*
2571 * Transmit packets in round-robin fashion based upon ready
2572 * channels.
2573 *
2574 * To eliminate the need to hold a lock for the entire
2575 * iteration through the channel ready list, the head of the
2576 * ready-channel list is always the next channel to be
2577 * processed. To send a packet, the first valid packet in
2578 * the head channel is removed and the head channel is then
2579 * rescheduled at the end of the queue by removing it and
2580 * inserting after the tail. The locks can then be released
2581 * while the packet is processed.
2582 */
Eric Holmberged1f00c2012-06-07 09:45:18 -06002583 while (!smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002584 pkt = NULL;
2585 low_wm_notif = 0;
2586
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002587 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002588
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002589 /* handle wakeup if needed */
2590 if (smux.power_state == SMUX_PWR_OFF) {
2591 if (!list_empty(&smux.lch_tx_ready_list) ||
2592 !list_empty(&smux.power_queue)) {
2593 /* data to transmit, do wakeup */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002594 smux.pwr_wakeup_delay_us = 1;
Eric Holmbergff0b0112012-06-08 15:06:57 -06002595 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002596 smux.power_state,
2597 SMUX_PWR_TURNING_ON);
2598 smux.power_state = SMUX_PWR_TURNING_ON;
2599 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2600 flags);
2601 smux_uart_power_on();
2602 queue_work(smux_tx_wq, &smux_wakeup_work);
2603 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002604 /* no activity -- stay asleep */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002605 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2606 flags);
2607 }
2608 break;
2609 }
2610
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002611 /* process any pending power packets */
2612 if (!list_empty(&smux.power_queue)) {
2613 pkt = list_first_entry(&smux.power_queue,
2614 struct smux_pkt_t, list);
2615 list_del(&pkt->list);
2616 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2617
2618 /* send the packet */
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06002619 SMUX_PWR_PKT_TX(pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002620 if (!smux_byte_loopback) {
2621 smux_tx_tty(pkt);
2622 smux_flush_tty();
2623 } else {
2624 smux_tx_loopback(pkt);
2625 }
2626
2627 /* Adjust power state if this is a flush command */
2628 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2629 if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH &&
2630 pkt->hdr.cmd == SMUX_CMD_PWR_CTL &&
2631 (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK)) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06002632 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002633 smux.power_state,
2634 SMUX_PWR_OFF_FLUSH);
2635 smux.power_state = SMUX_PWR_OFF_FLUSH;
2636 queue_work(smux_tx_wq, &smux_inactivity_work);
2637 }
2638 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2639
2640 smux_free_pkt(pkt);
2641 continue;
2642 }
2643
2644 /* get the next ready channel */
2645 if (list_empty(&smux.lch_tx_ready_list)) {
2646 /* no ready channels */
2647 SMUX_DBG("%s: no more ready channels, exiting\n",
2648 __func__);
2649 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2650 break;
2651 }
2652 smux.tx_activity_flag = 1;
2653
2654 if (smux.power_state != SMUX_PWR_ON) {
2655 /* channel not ready to transmit */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002656 SMUX_DBG("%s: waiting for link up (state %d)\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002657 __func__,
2658 smux.power_state);
2659 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2660 break;
2661 }
2662
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002663 /* get the next packet to send and rotate channel list */
2664 ch = list_first_entry(&smux.lch_tx_ready_list,
2665 struct smux_lch_t,
2666 tx_ready_list);
2667
2668 spin_lock(&ch->state_lock_lhb1);
2669 spin_lock(&ch->tx_lock_lhb2);
2670 if (!list_empty(&ch->tx_queue)) {
2671 /*
2672 * If remote TX flow control is enabled or
2673 * the channel is not fully opened, then only
2674 * send command packets.
2675 */
2676 if (ch->tx_flow_control || !IS_FULLY_OPENED(ch)) {
2677 struct smux_pkt_t *curr;
2678 list_for_each_entry(curr, &ch->tx_queue, list) {
2679 if (curr->hdr.cmd != SMUX_CMD_DATA) {
2680 pkt = curr;
2681 break;
2682 }
2683 }
2684 } else {
2685 /* get next cmd/data packet to send */
2686 pkt = list_first_entry(&ch->tx_queue,
2687 struct smux_pkt_t, list);
2688 }
2689 }
2690
2691 if (pkt) {
2692 list_del(&pkt->list);
2693
2694 /* update packet stats */
2695 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2696 --ch->tx_pending_data_cnt;
2697 if (ch->notify_lwm &&
2698 ch->tx_pending_data_cnt
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002699 <= SMUX_TX_WM_LOW) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002700 ch->notify_lwm = 0;
2701 low_wm_notif = 1;
2702 }
2703 }
2704
2705 /* advance to the next ready channel */
2706 list_rotate_left(&smux.lch_tx_ready_list);
2707 } else {
2708 /* no data in channel to send, remove from ready list */
2709 list_del(&ch->tx_ready_list);
2710 INIT_LIST_HEAD(&ch->tx_ready_list);
2711 }
2712 lcid = ch->lcid;
2713 spin_unlock(&ch->tx_lock_lhb2);
2714 spin_unlock(&ch->state_lock_lhb1);
2715 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2716
2717 if (low_wm_notif)
2718 schedule_notify(lcid, SMUX_LOW_WM_HIT, NULL);
2719
2720 /* send the packet */
2721 smux_tx_pkt(ch, pkt);
2722 smux_free_pkt(pkt);
2723 }
2724}
2725
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002726/**
2727 * Update the RX flow control (sent in the TIOCM Status command).
2728 *
2729 * @ch Channel for update
2730 *
2731 * @returns 1 for updated, 0 for not updated
2732 *
2733 * Must be called with ch->state_lock_lhb1 locked.
2734 */
2735static int smux_rx_flow_control_updated(struct smux_lch_t *ch)
2736{
2737 int updated = 0;
2738 int prev_state;
2739
2740 prev_state = ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL;
2741
2742 if (ch->rx_flow_control_client || ch->rx_flow_control_auto)
2743 ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
2744 else
2745 ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
2746
2747 if (prev_state != (ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL)) {
2748 smux_send_status_cmd(ch);
2749 updated = 1;
2750 }
2751
2752 return updated;
2753}
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002754
2755/**********************************************************************/
2756/* Kernel API */
2757/**********************************************************************/
2758
2759/**
2760 * Set or clear channel option using the SMUX_CH_OPTION_* channel
2761 * flags.
2762 *
2763 * @lcid Logical channel ID
2764 * @set Options to set
2765 * @clear Options to clear
2766 *
2767 * @returns 0 for success, < 0 for failure
2768 */
2769int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear)
2770{
2771 unsigned long flags;
2772 struct smux_lch_t *ch;
2773 int tx_ready = 0;
2774 int ret = 0;
2775
2776 if (smux_assert_lch_id(lcid))
2777 return -ENXIO;
2778
2779 ch = &smux_lch[lcid];
2780 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2781
2782 /* Local loopback mode */
2783 if (set & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2784 ch->local_mode = SMUX_LCH_MODE_LOCAL_LOOPBACK;
2785
2786 if (clear & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2787 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2788
2789 /* Remote loopback mode */
2790 if (set & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2791 ch->local_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
2792
2793 if (clear & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2794 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2795
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002796 /* RX Flow control */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002797 if (set & SMUX_CH_OPTION_REMOTE_TX_STOP) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002798 ch->rx_flow_control_client = 1;
2799 tx_ready |= smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002800 }
2801
2802 if (clear & SMUX_CH_OPTION_REMOTE_TX_STOP) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002803 ch->rx_flow_control_client = 0;
2804 tx_ready |= smux_rx_flow_control_updated(ch);
2805 }
2806
2807 /* Auto RX Flow Control */
2808 if (set & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
2809 SMUX_DBG("%s: auto rx flow control option enabled\n",
2810 __func__);
2811 ch->options |= SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
2812 }
2813
2814 if (clear & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
2815 SMUX_DBG("%s: auto rx flow control option disabled\n",
2816 __func__);
2817 ch->options &= ~SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
2818 ch->rx_flow_control_auto = 0;
2819 tx_ready |= smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002820 }
2821
2822 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2823
2824 if (tx_ready)
2825 list_channel(ch);
2826
2827 return ret;
2828}
2829
2830/**
2831 * Starts the opening sequence for a logical channel.
2832 *
2833 * @lcid Logical channel ID
2834 * @priv Free for client usage
2835 * @notify Event notification function
2836 * @get_rx_buffer Function used to provide a receive buffer to SMUX
2837 *
2838 * @returns 0 for success, <0 otherwise
2839 *
2840 * A channel must be fully closed (either not previously opened or
2841 * msm_smux_close() has been called and the SMUX_DISCONNECTED has been
2842 * received.
2843 *
2844 * One the remote side is opened, the client will receive a SMUX_CONNECTED
2845 * event.
2846 */
2847int msm_smux_open(uint8_t lcid, void *priv,
2848 void (*notify)(void *priv, int event_type, const void *metadata),
2849 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
2850 int size))
2851{
2852 int ret;
2853 struct smux_lch_t *ch;
2854 struct smux_pkt_t *pkt;
2855 int tx_ready = 0;
2856 unsigned long flags;
2857
2858 if (smux_assert_lch_id(lcid))
2859 return -ENXIO;
2860
2861 ch = &smux_lch[lcid];
2862 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2863
2864 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
2865 ret = -EAGAIN;
2866 goto out;
2867 }
2868
2869 if (ch->local_state != SMUX_LCH_LOCAL_CLOSED) {
2870 pr_err("%s: open lcid %d local state %x invalid\n",
2871 __func__, lcid, ch->local_state);
2872 ret = -EINVAL;
2873 goto out;
2874 }
2875
2876 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
2877 ch->local_state,
2878 SMUX_LCH_LOCAL_OPENING);
2879
2880 ch->local_state = SMUX_LCH_LOCAL_OPENING;
2881
2882 ch->priv = priv;
2883 ch->notify = notify;
2884 ch->get_rx_buffer = get_rx_buffer;
2885 ret = 0;
2886
2887 /* Send Open Command */
2888 pkt = smux_alloc_pkt();
2889 if (!pkt) {
2890 ret = -ENOMEM;
2891 goto out;
2892 }
2893 pkt->hdr.magic = SMUX_MAGIC;
2894 pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
2895 pkt->hdr.flags = SMUX_CMD_OPEN_POWER_COLLAPSE;
2896 if (ch->local_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK)
2897 pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
2898 pkt->hdr.lcid = lcid;
2899 pkt->hdr.payload_len = 0;
2900 pkt->hdr.pad_len = 0;
2901 smux_tx_queue(pkt, ch, 0);
2902 tx_ready = 1;
2903
2904out:
2905 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2906 if (tx_ready)
2907 list_channel(ch);
2908 return ret;
2909}
2910
2911/**
2912 * Starts the closing sequence for a logical channel.
2913 *
2914 * @lcid Logical channel ID
2915 *
2916 * @returns 0 for success, <0 otherwise
2917 *
2918 * Once the close event has been acknowledge by the remote side, the client
2919 * will receive a SMUX_DISCONNECTED notification.
2920 */
2921int msm_smux_close(uint8_t lcid)
2922{
2923 int ret = 0;
2924 struct smux_lch_t *ch;
2925 struct smux_pkt_t *pkt;
2926 int tx_ready = 0;
2927 unsigned long flags;
2928
2929 if (smux_assert_lch_id(lcid))
2930 return -ENXIO;
2931
2932 ch = &smux_lch[lcid];
2933 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2934 ch->local_tiocm = 0x0;
2935 ch->remote_tiocm = 0x0;
2936 ch->tx_pending_data_cnt = 0;
2937 ch->notify_lwm = 0;
2938
2939 /* Purge TX queue */
2940 spin_lock(&ch->tx_lock_lhb2);
Eric Holmberged1f00c2012-06-07 09:45:18 -06002941 smux_purge_ch_tx_queue(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002942 spin_unlock(&ch->tx_lock_lhb2);
2943
2944 /* Send Close Command */
2945 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
2946 ch->local_state == SMUX_LCH_LOCAL_OPENING) {
2947 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
2948 ch->local_state,
2949 SMUX_LCH_LOCAL_CLOSING);
2950
2951 ch->local_state = SMUX_LCH_LOCAL_CLOSING;
2952 pkt = smux_alloc_pkt();
2953 if (pkt) {
2954 pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
2955 pkt->hdr.flags = 0;
2956 pkt->hdr.lcid = lcid;
2957 pkt->hdr.payload_len = 0;
2958 pkt->hdr.pad_len = 0;
2959 smux_tx_queue(pkt, ch, 0);
2960 tx_ready = 1;
2961 } else {
2962 pr_err("%s: pkt allocation failed\n", __func__);
2963 ret = -ENOMEM;
2964 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06002965
2966 /* Purge RX retry queue */
2967 if (ch->rx_retry_queue_cnt)
2968 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002969 }
2970 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2971
2972 if (tx_ready)
2973 list_channel(ch);
2974
2975 return ret;
2976}
2977
2978/**
2979 * Write data to a logical channel.
2980 *
2981 * @lcid Logical channel ID
2982 * @pkt_priv Client data that will be returned with the SMUX_WRITE_DONE or
2983 * SMUX_WRITE_FAIL notification.
2984 * @data Data to write
2985 * @len Length of @data
2986 *
2987 * @returns 0 for success, <0 otherwise
2988 *
2989 * Data may be written immediately after msm_smux_open() is called,
2990 * but the data will wait in the transmit queue until the channel has
2991 * been fully opened.
2992 *
2993 * Once the data has been written, the client will receive either a completion
2994 * (SMUX_WRITE_DONE) or a failure notice (SMUX_WRITE_FAIL).
2995 */
2996int msm_smux_write(uint8_t lcid, void *pkt_priv, const void *data, int len)
2997{
2998 struct smux_lch_t *ch;
2999 struct smux_pkt_t *pkt;
3000 int tx_ready = 0;
3001 unsigned long flags;
3002 int ret;
3003
3004 if (smux_assert_lch_id(lcid))
3005 return -ENXIO;
3006
3007 ch = &smux_lch[lcid];
3008 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3009
3010 if (ch->local_state != SMUX_LCH_LOCAL_OPENED &&
3011 ch->local_state != SMUX_LCH_LOCAL_OPENING) {
3012 pr_err("%s: hdr.invalid local state %d channel %d\n",
3013 __func__, ch->local_state, lcid);
3014 ret = -EINVAL;
3015 goto out;
3016 }
3017
3018 if (len > SMUX_MAX_PKT_SIZE - sizeof(struct smux_hdr_t)) {
3019 pr_err("%s: payload %d too large\n",
3020 __func__, len);
3021 ret = -E2BIG;
3022 goto out;
3023 }
3024
3025 pkt = smux_alloc_pkt();
3026 if (!pkt) {
3027 ret = -ENOMEM;
3028 goto out;
3029 }
3030
3031 pkt->hdr.cmd = SMUX_CMD_DATA;
3032 pkt->hdr.lcid = lcid;
3033 pkt->hdr.flags = 0;
3034 pkt->hdr.payload_len = len;
3035 pkt->payload = (void *)data;
3036 pkt->priv = pkt_priv;
3037 pkt->hdr.pad_len = 0;
3038
3039 spin_lock(&ch->tx_lock_lhb2);
3040 /* verify high watermark */
3041 SMUX_DBG("%s: pending %d", __func__, ch->tx_pending_data_cnt);
3042
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003043 if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003044 pr_err("%s: ch %d high watermark %d exceeded %d\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003045 __func__, lcid, SMUX_TX_WM_HIGH,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003046 ch->tx_pending_data_cnt);
3047 ret = -EAGAIN;
3048 goto out_inner;
3049 }
3050
3051 /* queue packet for transmit */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003052 if (++ch->tx_pending_data_cnt == SMUX_TX_WM_HIGH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003053 ch->notify_lwm = 1;
3054 pr_err("%s: high watermark hit\n", __func__);
3055 schedule_notify(lcid, SMUX_HIGH_WM_HIT, NULL);
3056 }
3057 list_add_tail(&pkt->list, &ch->tx_queue);
3058
3059 /* add to ready list */
3060 if (IS_FULLY_OPENED(ch))
3061 tx_ready = 1;
3062
3063 ret = 0;
3064
3065out_inner:
3066 spin_unlock(&ch->tx_lock_lhb2);
3067
3068out:
3069 if (ret)
3070 smux_free_pkt(pkt);
3071 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3072
3073 if (tx_ready)
3074 list_channel(ch);
3075
3076 return ret;
3077}
3078
3079/**
3080 * Returns true if the TX queue is currently full (high water mark).
3081 *
3082 * @lcid Logical channel ID
3083 * @returns 0 if channel is not full
3084 * 1 if it is full
3085 * < 0 for error
3086 */
3087int msm_smux_is_ch_full(uint8_t lcid)
3088{
3089 struct smux_lch_t *ch;
3090 unsigned long flags;
3091 int is_full = 0;
3092
3093 if (smux_assert_lch_id(lcid))
3094 return -ENXIO;
3095
3096 ch = &smux_lch[lcid];
3097
3098 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003099 if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003100 is_full = 1;
3101 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
3102
3103 return is_full;
3104}
3105
3106/**
3107 * Returns true if the TX queue has space for more packets it is at or
3108 * below the low water mark).
3109 *
3110 * @lcid Logical channel ID
3111 * @returns 0 if channel is above low watermark
3112 * 1 if it's at or below the low watermark
3113 * < 0 for error
3114 */
3115int msm_smux_is_ch_low(uint8_t lcid)
3116{
3117 struct smux_lch_t *ch;
3118 unsigned long flags;
3119 int is_low = 0;
3120
3121 if (smux_assert_lch_id(lcid))
3122 return -ENXIO;
3123
3124 ch = &smux_lch[lcid];
3125
3126 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003127 if (ch->tx_pending_data_cnt <= SMUX_TX_WM_LOW)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003128 is_low = 1;
3129 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
3130
3131 return is_low;
3132}
3133
3134/**
3135 * Send TIOCM status update.
3136 *
3137 * @ch Channel for update
3138 *
3139 * @returns 0 for success, <0 for failure
3140 *
3141 * Channel lock must be held before calling.
3142 */
3143static int smux_send_status_cmd(struct smux_lch_t *ch)
3144{
3145 struct smux_pkt_t *pkt;
3146
3147 if (!ch)
3148 return -EINVAL;
3149
3150 pkt = smux_alloc_pkt();
3151 if (!pkt)
3152 return -ENOMEM;
3153
3154 pkt->hdr.lcid = ch->lcid;
3155 pkt->hdr.cmd = SMUX_CMD_STATUS;
3156 pkt->hdr.flags = ch->local_tiocm;
3157 pkt->hdr.payload_len = 0;
3158 pkt->hdr.pad_len = 0;
3159 smux_tx_queue(pkt, ch, 0);
3160
3161 return 0;
3162}
3163
3164/**
3165 * Internal helper function for getting the TIOCM status with
3166 * state_lock_lhb1 already locked.
3167 *
3168 * @ch Channel pointer
3169 *
3170 * @returns TIOCM status
3171 */
3172static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch)
3173{
3174 long status = 0x0;
3175
3176 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DSR : 0;
3177 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_CTS : 0;
3178 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RI) ? TIOCM_RI : 0;
3179 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_DCD) ? TIOCM_CD : 0;
3180
3181 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DTR : 0;
3182 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_RTS : 0;
3183
3184 return status;
3185}
3186
3187/**
3188 * Get the TIOCM status bits.
3189 *
3190 * @lcid Logical channel ID
3191 *
3192 * @returns >= 0 TIOCM status bits
3193 * < 0 Error condition
3194 */
3195long msm_smux_tiocm_get(uint8_t lcid)
3196{
3197 struct smux_lch_t *ch;
3198 unsigned long flags;
3199 long status = 0x0;
3200
3201 if (smux_assert_lch_id(lcid))
3202 return -ENXIO;
3203
3204 ch = &smux_lch[lcid];
3205 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3206 status = msm_smux_tiocm_get_atomic(ch);
3207 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3208
3209 return status;
3210}
3211
3212/**
3213 * Set/clear the TIOCM status bits.
3214 *
3215 * @lcid Logical channel ID
3216 * @set Bits to set
3217 * @clear Bits to clear
3218 *
3219 * @returns 0 for success; < 0 for failure
3220 *
3221 * If a bit is specified in both the @set and @clear masks, then the clear bit
3222 * definition will dominate and the bit will be cleared.
3223 */
3224int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear)
3225{
3226 struct smux_lch_t *ch;
3227 unsigned long flags;
3228 uint8_t old_status;
3229 uint8_t status_set = 0x0;
3230 uint8_t status_clear = 0x0;
3231 int tx_ready = 0;
3232 int ret = 0;
3233
3234 if (smux_assert_lch_id(lcid))
3235 return -ENXIO;
3236
3237 ch = &smux_lch[lcid];
3238 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3239
3240 status_set |= (set & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3241 status_set |= (set & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3242 status_set |= (set & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3243 status_set |= (set & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3244
3245 status_clear |= (clear & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3246 status_clear |= (clear & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3247 status_clear |= (clear & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3248 status_clear |= (clear & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3249
3250 old_status = ch->local_tiocm;
3251 ch->local_tiocm |= status_set;
3252 ch->local_tiocm &= ~status_clear;
3253
3254 if (ch->local_tiocm != old_status) {
3255 ret = smux_send_status_cmd(ch);
3256 tx_ready = 1;
3257 }
3258 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3259
3260 if (tx_ready)
3261 list_channel(ch);
3262
3263 return ret;
3264}
3265
3266/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003267/* Subsystem Restart */
3268/**********************************************************************/
3269static struct notifier_block ssr_notifier = {
3270 .notifier_call = ssr_notifier_cb,
3271};
3272
3273/**
3274 * Handle Subsystem Restart (SSR) notifications.
3275 *
3276 * @this Pointer to ssr_notifier
3277 * @code SSR Code
3278 * @data Data pointer (not used)
3279 */
3280static int ssr_notifier_cb(struct notifier_block *this,
3281 unsigned long code,
3282 void *data)
3283{
3284 unsigned long flags;
3285 int power_off_uart = 0;
3286
Eric Holmbergd2697902012-06-15 09:58:46 -06003287 if (code == SUBSYS_BEFORE_SHUTDOWN) {
3288 SMUX_DBG("%s: ssr - before shutdown\n", __func__);
3289 mutex_lock(&smux.mutex_lha0);
3290 smux.in_reset = 1;
3291 mutex_unlock(&smux.mutex_lha0);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003292 return NOTIFY_DONE;
Eric Holmbergd2697902012-06-15 09:58:46 -06003293 } else if (code != SUBSYS_AFTER_SHUTDOWN) {
3294 return NOTIFY_DONE;
3295 }
3296 SMUX_DBG("%s: ssr - after shutdown\n", __func__);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003297
3298 /* Cleanup channels */
Eric Holmbergd2697902012-06-15 09:58:46 -06003299 mutex_lock(&smux.mutex_lha0);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003300 smux_lch_purge();
Eric Holmbergd2697902012-06-15 09:58:46 -06003301 if (smux.tty)
3302 tty_driver_flush_buffer(smux.tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003303
3304 /* Power-down UART */
3305 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
3306 if (smux.power_state != SMUX_PWR_OFF) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06003307 SMUX_PWR("%s: SSR - turning off UART\n", __func__);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003308 smux.power_state = SMUX_PWR_OFF;
3309 power_off_uart = 1;
3310 }
Eric Holmbergd2697902012-06-15 09:58:46 -06003311 smux.powerdown_enabled = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003312 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3313
3314 if (power_off_uart)
3315 smux_uart_power_off();
3316
Eric Holmbergd2697902012-06-15 09:58:46 -06003317 smux.in_reset = 0;
3318 mutex_unlock(&smux.mutex_lha0);
3319
Eric Holmberged1f00c2012-06-07 09:45:18 -06003320 return NOTIFY_DONE;
3321}
3322
3323/**********************************************************************/
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003324/* Line Discipline Interface */
3325/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003326static void smux_pdev_release(struct device *dev)
3327{
3328 struct platform_device *pdev;
3329
3330 pdev = container_of(dev, struct platform_device, dev);
3331 SMUX_DBG("%s: releasing pdev %p '%s'\n", __func__, pdev, pdev->name);
3332 memset(&pdev->dev, 0x0, sizeof(pdev->dev));
3333}
3334
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003335static int smuxld_open(struct tty_struct *tty)
3336{
3337 int i;
3338 int tmp;
3339 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003340
3341 if (!smux.is_initialized)
3342 return -ENODEV;
3343
Eric Holmberged1f00c2012-06-07 09:45:18 -06003344 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003345 if (smux.ld_open_count) {
3346 pr_err("%s: %p multiple instances not supported\n",
3347 __func__, tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003348 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003349 return -EEXIST;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003350 }
3351
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003352 if (tty->ops->write == NULL) {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003353 pr_err("%s: tty->ops->write already NULL\n", __func__);
3354 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003355 return -EINVAL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003356 }
3357
3358 /* connect to TTY */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003359 ++smux.ld_open_count;
3360 smux.in_reset = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003361 smux.tty = tty;
3362 tty->disc_data = &smux;
3363 tty->receive_room = TTY_RECEIVE_ROOM;
3364 tty_driver_flush_buffer(tty);
3365
3366 /* power-down the UART if we are idle */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003367 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003368 if (smux.power_state == SMUX_PWR_OFF) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06003369 SMUX_PWR("%s: powering off uart\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003370 smux.power_state = SMUX_PWR_OFF_FLUSH;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003371 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003372 queue_work(smux_tx_wq, &smux_inactivity_work);
3373 } else {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003374 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003375 }
3376
3377 /* register platform devices */
3378 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003379 SMUX_DBG("%s: register pdev '%s'\n",
3380 __func__, smux_devs[i].name);
3381 smux_devs[i].dev.release = smux_pdev_release;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003382 tmp = platform_device_register(&smux_devs[i]);
3383 if (tmp)
3384 pr_err("%s: error %d registering device %s\n",
3385 __func__, tmp, smux_devs[i].name);
3386 }
Eric Holmberged1f00c2012-06-07 09:45:18 -06003387 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003388 return 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003389}
3390
3391static void smuxld_close(struct tty_struct *tty)
3392{
3393 unsigned long flags;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003394 int power_up_uart = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003395 int i;
3396
Eric Holmberged1f00c2012-06-07 09:45:18 -06003397 SMUX_DBG("%s: ldisc unload\n", __func__);
3398 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003399 if (smux.ld_open_count <= 0) {
3400 pr_err("%s: invalid ld count %d\n", __func__,
3401 smux.ld_open_count);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003402 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003403 return;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003404 }
Eric Holmberged1f00c2012-06-07 09:45:18 -06003405 smux.in_reset = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003406 --smux.ld_open_count;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003407
3408 /* Cleanup channels */
3409 smux_lch_purge();
3410
3411 /* Unregister platform devices */
3412 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
3413 SMUX_DBG("%s: unregister pdev '%s'\n",
3414 __func__, smux_devs[i].name);
3415 platform_device_unregister(&smux_devs[i]);
3416 }
3417
3418 /* Schedule UART power-up if it's down */
3419 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003420 if (smux.power_state == SMUX_PWR_OFF)
Eric Holmberged1f00c2012-06-07 09:45:18 -06003421 power_up_uart = 1;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003422 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergd2697902012-06-15 09:58:46 -06003423 smux.powerdown_enabled = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003424 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3425
3426 if (power_up_uart)
Eric Holmberg92a67df2012-06-25 13:56:24 -06003427 smux_uart_power_on_atomic();
Eric Holmberged1f00c2012-06-07 09:45:18 -06003428
3429 /* Disconnect from TTY */
3430 smux.tty = NULL;
3431 mutex_unlock(&smux.mutex_lha0);
3432 SMUX_DBG("%s: ldisc complete\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003433}
3434
3435/**
3436 * Receive data from TTY Line Discipline.
3437 *
3438 * @tty TTY structure
3439 * @cp Character data
3440 * @fp Flag data
3441 * @count Size of character and flag data
3442 */
3443void smuxld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
3444 char *fp, int count)
3445{
3446 int i;
3447 int last_idx = 0;
3448 const char *tty_name = NULL;
3449 char *f;
3450
3451 if (smux_debug_mask & MSM_SMUX_DEBUG)
3452 print_hex_dump(KERN_INFO, "smux tty rx: ", DUMP_PREFIX_OFFSET,
3453 16, 1, cp, count, true);
3454
3455 /* verify error flags */
3456 for (i = 0, f = fp; i < count; ++i, ++f) {
3457 if (*f != TTY_NORMAL) {
3458 if (tty)
3459 tty_name = tty->name;
3460 pr_err("%s: TTY %s Error %d (%s)\n", __func__,
3461 tty_name, *f, tty_flag_to_str(*f));
3462
3463 /* feed all previous valid data to the parser */
3464 smux_rx_state_machine(cp + last_idx, i - last_idx,
3465 TTY_NORMAL);
3466
3467 /* feed bad data to parser */
3468 smux_rx_state_machine(cp + i, 1, *f);
3469 last_idx = i + 1;
3470 }
3471 }
3472
3473 /* feed data to RX state machine */
3474 smux_rx_state_machine(cp + last_idx, count - last_idx, TTY_NORMAL);
3475}
3476
3477static void smuxld_flush_buffer(struct tty_struct *tty)
3478{
3479 pr_err("%s: not supported\n", __func__);
3480}
3481
3482static ssize_t smuxld_chars_in_buffer(struct tty_struct *tty)
3483{
3484 pr_err("%s: not supported\n", __func__);
3485 return -ENODEV;
3486}
3487
3488static ssize_t smuxld_read(struct tty_struct *tty, struct file *file,
3489 unsigned char __user *buf, size_t nr)
3490{
3491 pr_err("%s: not supported\n", __func__);
3492 return -ENODEV;
3493}
3494
3495static ssize_t smuxld_write(struct tty_struct *tty, struct file *file,
3496 const unsigned char *buf, size_t nr)
3497{
3498 pr_err("%s: not supported\n", __func__);
3499 return -ENODEV;
3500}
3501
3502static int smuxld_ioctl(struct tty_struct *tty, struct file *file,
3503 unsigned int cmd, unsigned long arg)
3504{
3505 pr_err("%s: not supported\n", __func__);
3506 return -ENODEV;
3507}
3508
3509static unsigned int smuxld_poll(struct tty_struct *tty, struct file *file,
3510 struct poll_table_struct *tbl)
3511{
3512 pr_err("%s: not supported\n", __func__);
3513 return -ENODEV;
3514}
3515
3516static void smuxld_write_wakeup(struct tty_struct *tty)
3517{
3518 pr_err("%s: not supported\n", __func__);
3519}
3520
3521static struct tty_ldisc_ops smux_ldisc_ops = {
3522 .owner = THIS_MODULE,
3523 .magic = TTY_LDISC_MAGIC,
3524 .name = "n_smux",
3525 .open = smuxld_open,
3526 .close = smuxld_close,
3527 .flush_buffer = smuxld_flush_buffer,
3528 .chars_in_buffer = smuxld_chars_in_buffer,
3529 .read = smuxld_read,
3530 .write = smuxld_write,
3531 .ioctl = smuxld_ioctl,
3532 .poll = smuxld_poll,
3533 .receive_buf = smuxld_receive_buf,
3534 .write_wakeup = smuxld_write_wakeup
3535};
3536
3537static int __init smux_init(void)
3538{
3539 int ret;
3540
Eric Holmberged1f00c2012-06-07 09:45:18 -06003541 mutex_init(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003542
3543 spin_lock_init(&smux.rx_lock_lha1);
3544 smux.rx_state = SMUX_RX_IDLE;
3545 smux.power_state = SMUX_PWR_OFF;
3546 smux.pwr_wakeup_delay_us = 1;
3547 smux.powerdown_enabled = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003548 INIT_LIST_HEAD(&smux.power_queue);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003549 smux.rx_activity_flag = 0;
3550 smux.tx_activity_flag = 0;
3551 smux.recv_len = 0;
3552 smux.tty = NULL;
3553 smux.ld_open_count = 0;
3554 smux.in_reset = 0;
3555 smux.is_initialized = 1;
3556 smux_byte_loopback = 0;
3557
3558 spin_lock_init(&smux.tx_lock_lha2);
3559 INIT_LIST_HEAD(&smux.lch_tx_ready_list);
3560
3561 ret = tty_register_ldisc(N_SMUX, &smux_ldisc_ops);
3562 if (ret != 0) {
3563 pr_err("%s: error %d registering line discipline\n",
3564 __func__, ret);
3565 return ret;
3566 }
3567
Eric Holmberg6c9f2a52012-06-14 10:49:04 -06003568 subsys_notif_register_notifier("external_modem", &ssr_notifier);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003569
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003570 ret = lch_init();
3571 if (ret != 0) {
3572 pr_err("%s: lch_init failed\n", __func__);
3573 return ret;
3574 }
3575
3576 return 0;
3577}
3578
3579static void __exit smux_exit(void)
3580{
3581 int ret;
3582
3583 ret = tty_unregister_ldisc(N_SMUX);
3584 if (ret != 0) {
3585 pr_err("%s error %d unregistering line discipline\n",
3586 __func__, ret);
3587 return;
3588 }
3589}
3590
3591module_init(smux_init);
3592module_exit(smux_exit);
3593
3594MODULE_DESCRIPTION("Serial Mux TTY Line Discipline");
3595MODULE_LICENSE("GPL v2");
3596MODULE_ALIAS_LDISC(N_SMUX);