blob: 14b8ca2f79c85b048c74664fd79a3c0885a4ef30 [file] [log] [blame]
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001/* drivers/tty/n_smux.c
2 *
3 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/errno.h>
18#include <linux/tty.h>
19#include <linux/tty_flip.h>
20#include <linux/tty_driver.h>
21#include <linux/smux.h>
22#include <linux/list.h>
23#include <linux/kfifo.h>
24#include <linux/slab.h>
25#include <linux/types.h>
26#include <linux/platform_device.h>
27#include <linux/delay.h>
Eric Holmberged1f00c2012-06-07 09:45:18 -060028#include <mach/subsystem_notif.h>
29#include <mach/subsystem_restart.h>
Eric Holmberg8ed30f22012-05-10 19:16:51 -060030#include <mach/msm_serial_hs.h>
Angshuman Sarkarc2df7392012-07-24 14:50:42 +053031#include <mach/msm_ipc_logging.h>
Eric Holmberg8ed30f22012-05-10 19:16:51 -060032#include "smux_private.h"
33#include "smux_loopback.h"
34
35#define SMUX_NOTIFY_FIFO_SIZE 128
36#define SMUX_TX_QUEUE_SIZE 256
Eric Holmbergacd4c772012-08-30 15:38:11 -060037#define SMUX_PKT_LOG_SIZE 128
Eric Holmberg8ed30f22012-05-10 19:16:51 -060038
39/* Maximum size we can accept in a single RX buffer */
40#define TTY_RECEIVE_ROOM 65536
41#define TTY_BUFFER_FULL_WAIT_MS 50
42
43/* maximum sleep time between wakeup attempts */
44#define SMUX_WAKEUP_DELAY_MAX (1 << 20)
45
46/* minimum delay for scheduling delayed work */
47#define SMUX_WAKEUP_DELAY_MIN (1 << 15)
48
49/* inactivity timeout for no rx/tx activity */
Eric Holmberg05620172012-07-03 11:13:18 -060050#define SMUX_INACTIVITY_TIMEOUT_MS 1000000
Eric Holmberg8ed30f22012-05-10 19:16:51 -060051
Eric Holmbergb8435c82012-06-05 14:51:29 -060052/* RX get_rx_buffer retry timeout values */
53#define SMUX_RX_RETRY_MIN_MS (1 << 0) /* 1 ms */
54#define SMUX_RX_RETRY_MAX_MS (1 << 10) /* 1024 ms */
55
Eric Holmberg8ed30f22012-05-10 19:16:51 -060056enum {
57 MSM_SMUX_DEBUG = 1U << 0,
58 MSM_SMUX_INFO = 1U << 1,
59 MSM_SMUX_POWER_INFO = 1U << 2,
60 MSM_SMUX_PKT = 1U << 3,
61};
62
Angshuman Sarkarc2df7392012-07-24 14:50:42 +053063static int smux_debug_mask = MSM_SMUX_DEBUG | MSM_SMUX_POWER_INFO;
Eric Holmberg8ed30f22012-05-10 19:16:51 -060064module_param_named(debug_mask, smux_debug_mask,
65 int, S_IRUGO | S_IWUSR | S_IWGRP);
66
Angshuman Sarkarc2df7392012-07-24 14:50:42 +053067static int disable_ipc_logging;
68
Eric Holmberg8ed30f22012-05-10 19:16:51 -060069/* Simulated wakeup used for testing */
70int smux_byte_loopback;
71module_param_named(byte_loopback, smux_byte_loopback,
72 int, S_IRUGO | S_IWUSR | S_IWGRP);
73int smux_simulate_wakeup_delay = 1;
74module_param_named(simulate_wakeup_delay, smux_simulate_wakeup_delay,
75 int, S_IRUGO | S_IWUSR | S_IWGRP);
76
Angshuman Sarkarc2df7392012-07-24 14:50:42 +053077#define IPC_LOG_STR(x...) do { \
78 if (!disable_ipc_logging && log_ctx) \
79 ipc_log_string(log_ctx, x); \
80} while (0)
81
Eric Holmberg8ed30f22012-05-10 19:16:51 -060082#define SMUX_DBG(x...) do { \
83 if (smux_debug_mask & MSM_SMUX_DEBUG) \
Angshuman Sarkarc2df7392012-07-24 14:50:42 +053084 IPC_LOG_STR(x); \
Eric Holmberg8ed30f22012-05-10 19:16:51 -060085} while (0)
86
Eric Holmbergd7339a42012-08-21 16:28:12 -060087#define SMUX_ERR(x...) do { \
88 pr_err(x); \
89 IPC_LOG_STR(x); \
90} while (0)
91
Eric Holmbergff0b0112012-06-08 15:06:57 -060092#define SMUX_PWR(x...) do { \
93 if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
Angshuman Sarkarc2df7392012-07-24 14:50:42 +053094 IPC_LOG_STR(x); \
Eric Holmbergff0b0112012-06-08 15:06:57 -060095} while (0)
96
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -060097#define SMUX_PWR_PKT_RX(pkt) do { \
98 if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
99 smux_log_pkt(pkt, 1); \
100} while (0)
101
102#define SMUX_PWR_PKT_TX(pkt) do { \
103 if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
104 if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
105 pkt->hdr.flags == SMUX_WAKEUP_ACK) \
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530106 IPC_LOG_STR("smux: TX Wakeup ACK\n"); \
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -0600107 else if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
108 pkt->hdr.flags == SMUX_WAKEUP_REQ) \
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530109 IPC_LOG_STR("smux: TX Wakeup REQ\n"); \
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -0600110 else \
111 smux_log_pkt(pkt, 0); \
112 } \
113} while (0)
114
115#define SMUX_PWR_BYTE_TX(pkt) do { \
116 if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
117 smux_log_pkt(pkt, 0); \
118 } \
119} while (0)
120
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600121#define SMUX_LOG_PKT_RX(pkt) do { \
122 if (smux_debug_mask & MSM_SMUX_PKT) \
123 smux_log_pkt(pkt, 1); \
124} while (0)
125
126#define SMUX_LOG_PKT_TX(pkt) do { \
127 if (smux_debug_mask & MSM_SMUX_PKT) \
128 smux_log_pkt(pkt, 0); \
129} while (0)
130
131/**
132 * Return true if channel is fully opened (both
133 * local and remote sides are in the OPENED state).
134 */
135#define IS_FULLY_OPENED(ch) \
136 (ch && (ch)->local_state == SMUX_LCH_LOCAL_OPENED \
137 && (ch)->remote_state == SMUX_LCH_REMOTE_OPENED)
138
139static struct platform_device smux_devs[] = {
140 {.name = "SMUX_CTL", .id = -1},
141 {.name = "SMUX_RMNET", .id = -1},
142 {.name = "SMUX_DUN_DATA_HSUART", .id = 0},
143 {.name = "SMUX_RMNET_DATA_HSUART", .id = 1},
144 {.name = "SMUX_RMNET_CTL_HSUART", .id = 0},
145 {.name = "SMUX_DIAG", .id = -1},
146};
147
148enum {
149 SMUX_CMD_STATUS_RTC = 1 << 0,
150 SMUX_CMD_STATUS_RTR = 1 << 1,
151 SMUX_CMD_STATUS_RI = 1 << 2,
152 SMUX_CMD_STATUS_DCD = 1 << 3,
153 SMUX_CMD_STATUS_FLOW_CNTL = 1 << 4,
154};
155
156/* Channel mode */
157enum {
158 SMUX_LCH_MODE_NORMAL,
159 SMUX_LCH_MODE_LOCAL_LOOPBACK,
160 SMUX_LCH_MODE_REMOTE_LOOPBACK,
161};
162
163enum {
164 SMUX_RX_IDLE,
165 SMUX_RX_MAGIC,
166 SMUX_RX_HDR,
167 SMUX_RX_PAYLOAD,
168 SMUX_RX_FAILURE,
169};
170
171/**
172 * Power states.
173 *
174 * The _FLUSH states are internal transitional states and are not part of the
175 * official state machine.
176 */
177enum {
178 SMUX_PWR_OFF,
179 SMUX_PWR_TURNING_ON,
180 SMUX_PWR_ON,
Eric Holmberga9b06472012-06-22 09:46:34 -0600181 SMUX_PWR_TURNING_OFF_FLUSH, /* power-off req/ack in TX queue */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600182 SMUX_PWR_TURNING_OFF,
183 SMUX_PWR_OFF_FLUSH,
184};
185
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600186union notifier_metadata {
187 struct smux_meta_disconnected disconnected;
188 struct smux_meta_read read;
189 struct smux_meta_write write;
190 struct smux_meta_tiocm tiocm;
191};
192
193struct smux_notify_handle {
194 void (*notify)(void *priv, int event_type, const void *metadata);
195 void *priv;
196 int event_type;
197 union notifier_metadata *metadata;
198};
199
200/**
Eric Holmbergb8435c82012-06-05 14:51:29 -0600201 * Get RX Buffer Retry structure.
202 *
203 * This is used for clients that are unable to provide an RX buffer
204 * immediately. This temporary structure will be used to temporarily hold the
205 * data and perform a retry.
206 */
207struct smux_rx_pkt_retry {
208 struct smux_pkt_t *pkt;
209 struct list_head rx_retry_list;
210 unsigned timeout_in_ms;
211};
212
213/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600214 * Receive worker data structure.
215 *
216 * One instance is created for every call to smux_rx_state_machine.
217 */
218struct smux_rx_worker_data {
219 const unsigned char *data;
220 int len;
221 int flag;
222
223 struct work_struct work;
224 struct completion work_complete;
225};
226
227/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600228 * Line discipline and module structure.
229 *
230 * Only one instance since multiple instances of line discipline are not
231 * allowed.
232 */
233struct smux_ldisc_t {
Eric Holmberged1f00c2012-06-07 09:45:18 -0600234 struct mutex mutex_lha0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600235
236 int is_initialized;
Eric Holmberg2bf9c522012-08-09 13:23:21 -0600237 int platform_devs_registered;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600238 int in_reset;
Eric Holmberg0a81e8f2012-08-28 13:51:14 -0600239 int remote_is_alive;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600240 int ld_open_count;
241 struct tty_struct *tty;
242
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600243 /* RX State Machine (singled-threaded access by smux_rx_wq) */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600244 unsigned char recv_buf[SMUX_MAX_PKT_SIZE];
245 unsigned int recv_len;
246 unsigned int pkt_remain;
247 unsigned rx_state;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600248
249 /* RX Activity - accessed by multiple threads */
250 spinlock_t rx_lock_lha1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600251 unsigned rx_activity_flag;
252
253 /* TX / Power */
254 spinlock_t tx_lock_lha2;
255 struct list_head lch_tx_ready_list;
256 unsigned power_state;
257 unsigned pwr_wakeup_delay_us;
258 unsigned tx_activity_flag;
259 unsigned powerdown_enabled;
Eric Holmberga9b06472012-06-22 09:46:34 -0600260 unsigned power_ctl_remote_req_received;
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600261 struct list_head power_queue;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600262};
263
264
265/* data structures */
Eric Holmberg9d890672012-06-13 17:58:13 -0600266struct smux_lch_t smux_lch[SMUX_NUM_LOGICAL_CHANNELS];
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600267static struct smux_ldisc_t smux;
268static const char *tty_error_type[] = {
269 [TTY_NORMAL] = "normal",
270 [TTY_OVERRUN] = "overrun",
271 [TTY_BREAK] = "break",
272 [TTY_PARITY] = "parity",
273 [TTY_FRAME] = "framing",
274};
275
Eric Holmberg9d890672012-06-13 17:58:13 -0600276static const char * const smux_cmds[] = {
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600277 [SMUX_CMD_DATA] = "DATA",
278 [SMUX_CMD_OPEN_LCH] = "OPEN",
279 [SMUX_CMD_CLOSE_LCH] = "CLOSE",
280 [SMUX_CMD_STATUS] = "STATUS",
281 [SMUX_CMD_PWR_CTL] = "PWR",
282 [SMUX_CMD_BYTE] = "Raw Byte",
283};
284
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530285static const char * const smux_events[] = {
286 [SMUX_CONNECTED] = "CONNECTED" ,
287 [SMUX_DISCONNECTED] = "DISCONNECTED",
288 [SMUX_READ_DONE] = "READ_DONE",
289 [SMUX_READ_FAIL] = "READ_FAIL",
290 [SMUX_WRITE_DONE] = "WRITE_DONE",
291 [SMUX_WRITE_FAIL] = "WRITE_FAIL",
292 [SMUX_TIOCM_UPDATE] = "TIOCM_UPDATE",
293 [SMUX_LOW_WM_HIT] = "LOW_WM_HIT",
294 [SMUX_HIGH_WM_HIT] = "HIGH_WM_HIT",
295 [SMUX_RX_RETRY_HIGH_WM_HIT] = "RX_RETRY_HIGH_WM_HIT",
296 [SMUX_RX_RETRY_LOW_WM_HIT] = "RX_RETRY_LOW_WM_HIT",
297};
298
Eric Holmberg9d890672012-06-13 17:58:13 -0600299static const char * const smux_local_state[] = {
300 [SMUX_LCH_LOCAL_CLOSED] = "CLOSED",
301 [SMUX_LCH_LOCAL_OPENING] = "OPENING",
302 [SMUX_LCH_LOCAL_OPENED] = "OPENED",
303 [SMUX_LCH_LOCAL_CLOSING] = "CLOSING",
304};
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530305
Eric Holmberg9d890672012-06-13 17:58:13 -0600306static const char * const smux_remote_state[] = {
307 [SMUX_LCH_REMOTE_CLOSED] = "CLOSED",
308 [SMUX_LCH_REMOTE_OPENED] = "OPENED",
309};
310
311static const char * const smux_mode[] = {
312 [SMUX_LCH_MODE_NORMAL] = "N",
313 [SMUX_LCH_MODE_LOCAL_LOOPBACK] = "L",
314 [SMUX_LCH_MODE_REMOTE_LOOPBACK] = "R",
315};
316
317static const char * const smux_undef[] = {
318 [SMUX_UNDEF_LONG] = "UNDEF",
319 [SMUX_UNDEF_SHORT] = "U",
320};
321
322static void *log_ctx;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600323static void smux_notify_local_fn(struct work_struct *work);
324static DECLARE_WORK(smux_notify_local, smux_notify_local_fn);
325
326static struct workqueue_struct *smux_notify_wq;
327static size_t handle_size;
328static struct kfifo smux_notify_fifo;
329static int queued_fifo_notifications;
330static DEFINE_SPINLOCK(notify_lock_lhc1);
331
332static struct workqueue_struct *smux_tx_wq;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600333static struct workqueue_struct *smux_rx_wq;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600334static void smux_tx_worker(struct work_struct *work);
335static DECLARE_WORK(smux_tx_work, smux_tx_worker);
336
337static void smux_wakeup_worker(struct work_struct *work);
Eric Holmbergb8435c82012-06-05 14:51:29 -0600338static void smux_rx_retry_worker(struct work_struct *work);
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600339static void smux_rx_worker(struct work_struct *work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600340static DECLARE_WORK(smux_wakeup_work, smux_wakeup_worker);
341static DECLARE_DELAYED_WORK(smux_wakeup_delayed_work, smux_wakeup_worker);
342
343static void smux_inactivity_worker(struct work_struct *work);
344static DECLARE_WORK(smux_inactivity_work, smux_inactivity_worker);
345static DECLARE_DELAYED_WORK(smux_delayed_inactivity_work,
346 smux_inactivity_worker);
347
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600348static void list_channel(struct smux_lch_t *ch);
349static int smux_send_status_cmd(struct smux_lch_t *ch);
350static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600351static void smux_flush_tty(void);
Eric Holmberg0e914082012-07-11 11:46:28 -0600352static void smux_purge_ch_tx_queue(struct smux_lch_t *ch, int is_ssr);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600353static int schedule_notify(uint8_t lcid, int event,
354 const union notifier_metadata *metadata);
355static int ssr_notifier_cb(struct notifier_block *this,
356 unsigned long code,
357 void *data);
Eric Holmberg92a67df2012-06-25 13:56:24 -0600358static void smux_uart_power_on_atomic(void);
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600359static int smux_rx_flow_control_updated(struct smux_lch_t *ch);
Eric Holmberg06011322012-07-06 18:17:03 -0600360static void smux_flush_workqueues(void);
Eric Holmbergf6a364e2012-08-07 18:41:44 -0600361static void smux_pdev_release(struct device *dev);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600362
363/**
Eric Holmberg9d890672012-06-13 17:58:13 -0600364 * local_lch_state() - Return human readable form of local logical state.
365 * @state: Local logical channel state enum.
366 *
367 */
368const char *local_lch_state(unsigned state)
369{
370 if (state < ARRAY_SIZE(smux_local_state))
371 return smux_local_state[state];
372 else
373 return smux_undef[SMUX_UNDEF_LONG];
374}
375
376/**
377 * remote_lch_state() - Return human readable for of remote logical state.
378 * @state: Remote logical channel state enum.
379 *
380 */
381const char *remote_lch_state(unsigned state)
382{
383 if (state < ARRAY_SIZE(smux_remote_state))
384 return smux_remote_state[state];
385 else
386 return smux_undef[SMUX_UNDEF_LONG];
387}
388
389/**
390 * lch_mode() - Return human readable form of mode.
391 * @mode: Mode of the logical channel.
392 *
393 */
394const char *lch_mode(unsigned mode)
395{
396 if (mode < ARRAY_SIZE(smux_mode))
397 return smux_mode[mode];
398 else
399 return smux_undef[SMUX_UNDEF_SHORT];
400}
401
402/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600403 * Convert TTY Error Flags to string for logging purposes.
404 *
405 * @flag TTY_* flag
406 * @returns String description or NULL if unknown
407 */
408static const char *tty_flag_to_str(unsigned flag)
409{
410 if (flag < ARRAY_SIZE(tty_error_type))
411 return tty_error_type[flag];
412 return NULL;
413}
414
415/**
416 * Convert SMUX Command to string for logging purposes.
417 *
418 * @cmd SMUX command
419 * @returns String description or NULL if unknown
420 */
421static const char *cmd_to_str(unsigned cmd)
422{
423 if (cmd < ARRAY_SIZE(smux_cmds))
424 return smux_cmds[cmd];
425 return NULL;
426}
427
428/**
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530429 * Convert SMUX event to string for logging purposes.
430 *
431 * @event SMUX event
432 * @returns String description or NULL if unknown
433 */
434static const char *event_to_str(unsigned cmd)
435{
436 if (cmd < ARRAY_SIZE(smux_events))
437 return smux_events[cmd];
438 return NULL;
439}
440
441/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600442 * Set the reset state due to an unrecoverable failure.
443 */
444static void smux_enter_reset(void)
445{
Eric Holmberg51f46cb2012-08-21 16:43:39 -0600446 SMUX_ERR("%s: unrecoverable failure, waiting for ssr\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600447 smux.in_reset = 1;
Eric Holmberg0a81e8f2012-08-28 13:51:14 -0600448 smux.remote_is_alive = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600449}
450
Eric Holmberg9d890672012-06-13 17:58:13 -0600451/**
452 * Initialize the lch_structs.
453 */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600454static int lch_init(void)
455{
456 unsigned int id;
457 struct smux_lch_t *ch;
458 int i = 0;
459
460 handle_size = sizeof(struct smux_notify_handle *);
461
462 smux_notify_wq = create_singlethread_workqueue("smux_notify_wq");
463 smux_tx_wq = create_singlethread_workqueue("smux_tx_wq");
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600464 smux_rx_wq = create_singlethread_workqueue("smux_rx_wq");
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600465
466 if (IS_ERR(smux_notify_wq) || IS_ERR(smux_tx_wq)) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530467 SMUX_DBG("smux: %s: create_singlethread_workqueue ENOMEM\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600468 __func__);
469 return -ENOMEM;
470 }
471
472 i |= kfifo_alloc(&smux_notify_fifo,
473 SMUX_NOTIFY_FIFO_SIZE * handle_size,
474 GFP_KERNEL);
475 i |= smux_loopback_init();
476
477 if (i) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -0600478 SMUX_ERR("%s: out of memory error\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600479 return -ENOMEM;
480 }
481
482 for (id = 0 ; id < SMUX_NUM_LOGICAL_CHANNELS; id++) {
483 ch = &smux_lch[id];
484
485 spin_lock_init(&ch->state_lock_lhb1);
486 ch->lcid = id;
487 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
488 ch->local_mode = SMUX_LCH_MODE_NORMAL;
489 ch->local_tiocm = 0x0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600490 ch->options = SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600491 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
492 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
493 ch->remote_tiocm = 0x0;
494 ch->tx_flow_control = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600495 ch->rx_flow_control_auto = 0;
496 ch->rx_flow_control_client = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600497 ch->priv = 0;
498 ch->notify = 0;
499 ch->get_rx_buffer = 0;
500
Eric Holmbergb8435c82012-06-05 14:51:29 -0600501 INIT_LIST_HEAD(&ch->rx_retry_queue);
502 ch->rx_retry_queue_cnt = 0;
503 INIT_DELAYED_WORK(&ch->rx_retry_work, smux_rx_retry_worker);
504
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600505 spin_lock_init(&ch->tx_lock_lhb2);
506 INIT_LIST_HEAD(&ch->tx_queue);
507 INIT_LIST_HEAD(&ch->tx_ready_list);
508 ch->tx_pending_data_cnt = 0;
509 ch->notify_lwm = 0;
510 }
511
512 return 0;
513}
514
Eric Holmberged1f00c2012-06-07 09:45:18 -0600515/**
516 * Empty and cleanup all SMUX logical channels for subsystem restart or line
517 * discipline disconnect.
518 */
519static void smux_lch_purge(void)
520{
521 struct smux_lch_t *ch;
522 unsigned long flags;
523 int i;
524
525 /* Empty TX ready list */
526 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
527 while (!list_empty(&smux.lch_tx_ready_list)) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530528 SMUX_DBG("smux: %s: emptying ready list %p\n",
Eric Holmberged1f00c2012-06-07 09:45:18 -0600529 __func__, smux.lch_tx_ready_list.next);
530 ch = list_first_entry(&smux.lch_tx_ready_list,
531 struct smux_lch_t,
532 tx_ready_list);
533 list_del(&ch->tx_ready_list);
534 INIT_LIST_HEAD(&ch->tx_ready_list);
535 }
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600536
537 /* Purge Power Queue */
538 while (!list_empty(&smux.power_queue)) {
539 struct smux_pkt_t *pkt;
540
541 pkt = list_first_entry(&smux.power_queue,
542 struct smux_pkt_t,
543 list);
Eric Holmberg6b19f7f2012-06-15 09:53:52 -0600544 list_del(&pkt->list);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530545 SMUX_DBG("smux: %s: emptying power queue pkt=%p\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600546 __func__, pkt);
547 smux_free_pkt(pkt);
548 }
Eric Holmberged1f00c2012-06-07 09:45:18 -0600549 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
550
551 /* Close all ports */
552 for (i = 0 ; i < SMUX_NUM_LOGICAL_CHANNELS; i++) {
553 ch = &smux_lch[i];
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530554 SMUX_DBG("smux: %s: cleaning up lcid %d\n", __func__, i);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600555
556 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
557
558 /* Purge TX queue */
559 spin_lock(&ch->tx_lock_lhb2);
Eric Holmberg0e914082012-07-11 11:46:28 -0600560 smux_purge_ch_tx_queue(ch, 1);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600561 spin_unlock(&ch->tx_lock_lhb2);
562
563 /* Notify user of disconnect and reset channel state */
564 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
565 ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
566 union notifier_metadata meta;
567
568 meta.disconnected.is_ssr = smux.in_reset;
569 schedule_notify(ch->lcid, SMUX_DISCONNECTED, &meta);
570 }
571
572 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
Eric Holmberged1f00c2012-06-07 09:45:18 -0600573 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
574 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
575 ch->tx_flow_control = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600576 ch->rx_flow_control_auto = 0;
577 ch->rx_flow_control_client = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -0600578
579 /* Purge RX retry queue */
580 if (ch->rx_retry_queue_cnt)
581 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
582
583 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
584 }
Eric Holmberged1f00c2012-06-07 09:45:18 -0600585}
586
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600587int smux_assert_lch_id(uint32_t lcid)
588{
589 if (lcid >= SMUX_NUM_LOGICAL_CHANNELS)
590 return -ENXIO;
591 else
592 return 0;
593}
594
595/**
596 * Log packet information for debug purposes.
597 *
598 * @pkt Packet to log
599 * @is_recv 1 = RX packet; 0 = TX Packet
600 *
601 * [DIR][LCID] [LOCAL_STATE][LOCAL_MODE]:[REMOTE_STATE][REMOTE_MODE] PKT Info
602 *
603 * PKT Info:
604 * [CMD] flags [flags] len [PAYLOAD_LEN]:[PAD_LEN] [Payload hex bytes]
605 *
606 * Direction: R = Receive, S = Send
607 * Local State: C = Closed; c = closing; o = opening; O = Opened
608 * Local Mode: L = Local loopback; R = Remote loopback; N = Normal
609 * Remote State: C = Closed; O = Opened
610 * Remote Mode: R = Remote loopback; N = Normal
611 */
612static void smux_log_pkt(struct smux_pkt_t *pkt, int is_recv)
613{
614 char logbuf[SMUX_PKT_LOG_SIZE];
615 char cmd_extra[16];
616 int i = 0;
617 int count;
618 int len;
619 char local_state;
620 char local_mode;
621 char remote_state;
622 char remote_mode;
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600623 struct smux_lch_t *ch = NULL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600624 unsigned char *data;
625
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600626 if (!smux_assert_lch_id(pkt->hdr.lcid))
627 ch = &smux_lch[pkt->hdr.lcid];
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600628
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600629 if (ch) {
630 switch (ch->local_state) {
631 case SMUX_LCH_LOCAL_CLOSED:
632 local_state = 'C';
633 break;
634 case SMUX_LCH_LOCAL_OPENING:
635 local_state = 'o';
636 break;
637 case SMUX_LCH_LOCAL_OPENED:
638 local_state = 'O';
639 break;
640 case SMUX_LCH_LOCAL_CLOSING:
641 local_state = 'c';
642 break;
643 default:
644 local_state = 'U';
645 break;
646 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600647
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600648 switch (ch->local_mode) {
649 case SMUX_LCH_MODE_LOCAL_LOOPBACK:
650 local_mode = 'L';
651 break;
652 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
653 local_mode = 'R';
654 break;
655 case SMUX_LCH_MODE_NORMAL:
656 local_mode = 'N';
657 break;
658 default:
659 local_mode = 'U';
660 break;
661 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600662
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600663 switch (ch->remote_state) {
664 case SMUX_LCH_REMOTE_CLOSED:
665 remote_state = 'C';
666 break;
667 case SMUX_LCH_REMOTE_OPENED:
668 remote_state = 'O';
669 break;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600670
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600671 default:
672 remote_state = 'U';
673 break;
674 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600675
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600676 switch (ch->remote_mode) {
677 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
678 remote_mode = 'R';
679 break;
680 case SMUX_LCH_MODE_NORMAL:
681 remote_mode = 'N';
682 break;
683 default:
684 remote_mode = 'U';
685 break;
686 }
687 } else {
688 /* broadcast channel */
689 local_state = '-';
690 local_mode = '-';
691 remote_state = '-';
692 remote_mode = '-';
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600693 }
694
695 /* determine command type (ACK, etc) */
696 cmd_extra[0] = '\0';
697 switch (pkt->hdr.cmd) {
698 case SMUX_CMD_OPEN_LCH:
699 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
700 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
701 break;
702 case SMUX_CMD_CLOSE_LCH:
703 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
704 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
705 break;
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -0600706
707 case SMUX_CMD_PWR_CTL:
708 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK)
709 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
710 break;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600711 };
712
713 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
714 "smux: %c%d %c%c:%c%c %s%s flags %x len %d:%d ",
715 is_recv ? 'R' : 'S', pkt->hdr.lcid,
716 local_state, local_mode,
717 remote_state, remote_mode,
718 cmd_to_str(pkt->hdr.cmd), cmd_extra, pkt->hdr.flags,
719 pkt->hdr.payload_len, pkt->hdr.pad_len);
720
721 len = (pkt->hdr.payload_len > 16) ? 16 : pkt->hdr.payload_len;
722 data = (unsigned char *)pkt->payload;
723 for (count = 0; count < len; count++)
724 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
725 "%02x ", (unsigned)data[count]);
726
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530727 IPC_LOG_STR(logbuf);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600728}
729
730static void smux_notify_local_fn(struct work_struct *work)
731{
732 struct smux_notify_handle *notify_handle = NULL;
733 union notifier_metadata *metadata = NULL;
734 unsigned long flags;
735 int i;
736
737 for (;;) {
738 /* retrieve notification */
739 spin_lock_irqsave(&notify_lock_lhc1, flags);
740 if (kfifo_len(&smux_notify_fifo) >= handle_size) {
741 i = kfifo_out(&smux_notify_fifo,
742 &notify_handle,
743 handle_size);
744 if (i != handle_size) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -0600745 SMUX_ERR(
746 "%s: unable to retrieve handle %d expected %d\n",
747 __func__, i, handle_size);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600748 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
749 break;
750 }
751 } else {
752 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
753 break;
754 }
755 --queued_fifo_notifications;
756 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
757
758 /* notify client */
759 metadata = notify_handle->metadata;
760 notify_handle->notify(notify_handle->priv,
761 notify_handle->event_type,
762 metadata);
763
764 kfree(metadata);
765 kfree(notify_handle);
766 }
767}
768
769/**
770 * Initialize existing packet.
771 */
772void smux_init_pkt(struct smux_pkt_t *pkt)
773{
774 memset(pkt, 0x0, sizeof(*pkt));
775 pkt->hdr.magic = SMUX_MAGIC;
776 INIT_LIST_HEAD(&pkt->list);
777}
778
779/**
780 * Allocate and initialize packet.
781 *
782 * If a payload is needed, either set it directly and ensure that it's freed or
783 * use smd_alloc_pkt_payload() to allocate a packet and it will be freed
784 * automatically when smd_free_pkt() is called.
785 */
786struct smux_pkt_t *smux_alloc_pkt(void)
787{
788 struct smux_pkt_t *pkt;
789
790 /* Consider a free list implementation instead of kmalloc */
791 pkt = kmalloc(sizeof(struct smux_pkt_t), GFP_ATOMIC);
792 if (!pkt) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -0600793 SMUX_ERR("%s: out of memory\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600794 return NULL;
795 }
796 smux_init_pkt(pkt);
797 pkt->allocated = 1;
798
799 return pkt;
800}
801
802/**
803 * Free packet.
804 *
805 * @pkt Packet to free (may be NULL)
806 *
807 * If payload was allocated using smux_alloc_pkt_payload(), then it is freed as
808 * well. Otherwise, the caller is responsible for freeing the payload.
809 */
810void smux_free_pkt(struct smux_pkt_t *pkt)
811{
812 if (pkt) {
813 if (pkt->free_payload)
814 kfree(pkt->payload);
815 if (pkt->allocated)
816 kfree(pkt);
817 }
818}
819
820/**
821 * Allocate packet payload.
822 *
823 * @pkt Packet to add payload to
824 *
825 * @returns 0 on success, <0 upon error
826 *
827 * A flag is set to signal smux_free_pkt() to free the payload.
828 */
829int smux_alloc_pkt_payload(struct smux_pkt_t *pkt)
830{
831 if (!pkt)
832 return -EINVAL;
833
834 pkt->payload = kmalloc(pkt->hdr.payload_len, GFP_ATOMIC);
835 pkt->free_payload = 1;
836 if (!pkt->payload) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -0600837 SMUX_ERR("%s: unable to malloc %d bytes for payload\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600838 __func__, pkt->hdr.payload_len);
839 return -ENOMEM;
840 }
841
842 return 0;
843}
844
845static int schedule_notify(uint8_t lcid, int event,
846 const union notifier_metadata *metadata)
847{
848 struct smux_notify_handle *notify_handle = 0;
849 union notifier_metadata *meta_copy = 0;
850 struct smux_lch_t *ch;
851 int i;
852 unsigned long flags;
853 int ret = 0;
854
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530855 IPC_LOG_STR("smux: %s ch:%d\n", event_to_str(event), lcid);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600856 ch = &smux_lch[lcid];
857 notify_handle = kzalloc(sizeof(struct smux_notify_handle),
858 GFP_ATOMIC);
859 if (!notify_handle) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -0600860 SMUX_ERR("%s: out of memory\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600861 ret = -ENOMEM;
862 goto free_out;
863 }
864
865 notify_handle->notify = ch->notify;
866 notify_handle->priv = ch->priv;
867 notify_handle->event_type = event;
868 if (metadata) {
869 meta_copy = kzalloc(sizeof(union notifier_metadata),
870 GFP_ATOMIC);
871 if (!meta_copy) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -0600872 SMUX_ERR("%s: out of memory\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600873 ret = -ENOMEM;
874 goto free_out;
875 }
876 *meta_copy = *metadata;
877 notify_handle->metadata = meta_copy;
878 } else {
879 notify_handle->metadata = NULL;
880 }
881
882 spin_lock_irqsave(&notify_lock_lhc1, flags);
883 i = kfifo_avail(&smux_notify_fifo);
884 if (i < handle_size) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -0600885 SMUX_ERR("%s: fifo full error %d expected %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600886 __func__, i, handle_size);
887 ret = -ENOMEM;
888 goto unlock_out;
889 }
890
891 i = kfifo_in(&smux_notify_fifo, &notify_handle, handle_size);
892 if (i < 0 || i != handle_size) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -0600893 SMUX_ERR("%s: fifo not available error %d (expected %d)\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600894 __func__, i, handle_size);
895 ret = -ENOSPC;
896 goto unlock_out;
897 }
898 ++queued_fifo_notifications;
899
900unlock_out:
901 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
902
903free_out:
904 queue_work(smux_notify_wq, &smux_notify_local);
905 if (ret < 0 && notify_handle) {
906 kfree(notify_handle->metadata);
907 kfree(notify_handle);
908 }
909 return ret;
910}
911
912/**
913 * Returns the serialized size of a packet.
914 *
915 * @pkt Packet to serialize
916 *
917 * @returns Serialized length of packet
918 */
919static unsigned int smux_serialize_size(struct smux_pkt_t *pkt)
920{
921 unsigned int size;
922
923 size = sizeof(struct smux_hdr_t);
924 size += pkt->hdr.payload_len;
925 size += pkt->hdr.pad_len;
926
927 return size;
928}
929
930/**
931 * Serialize packet @pkt into output buffer @data.
932 *
933 * @pkt Packet to serialize
934 * @out Destination buffer pointer
935 * @out_len Size of serialized packet
936 *
937 * @returns 0 for success
938 */
939int smux_serialize(struct smux_pkt_t *pkt, char *out,
940 unsigned int *out_len)
941{
942 char *data_start = out;
943
944 if (smux_serialize_size(pkt) > SMUX_MAX_PKT_SIZE) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -0600945 SMUX_ERR("%s: packet size %d too big\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600946 __func__, smux_serialize_size(pkt));
947 return -E2BIG;
948 }
949
950 memcpy(out, &pkt->hdr, sizeof(struct smux_hdr_t));
951 out += sizeof(struct smux_hdr_t);
952 if (pkt->payload) {
953 memcpy(out, pkt->payload, pkt->hdr.payload_len);
954 out += pkt->hdr.payload_len;
955 }
956 if (pkt->hdr.pad_len) {
957 memset(out, 0x0, pkt->hdr.pad_len);
958 out += pkt->hdr.pad_len;
959 }
960 *out_len = out - data_start;
961 return 0;
962}
963
964/**
965 * Serialize header and provide pointer to the data.
966 *
967 * @pkt Packet
968 * @out[out] Pointer to the serialized header data
969 * @out_len[out] Pointer to the serialized header length
970 */
971static void smux_serialize_hdr(struct smux_pkt_t *pkt, char **out,
972 unsigned int *out_len)
973{
974 *out = (char *)&pkt->hdr;
975 *out_len = sizeof(struct smux_hdr_t);
976}
977
978/**
979 * Serialize payload and provide pointer to the data.
980 *
981 * @pkt Packet
982 * @out[out] Pointer to the serialized payload data
983 * @out_len[out] Pointer to the serialized payload length
984 */
985static void smux_serialize_payload(struct smux_pkt_t *pkt, char **out,
986 unsigned int *out_len)
987{
988 *out = pkt->payload;
989 *out_len = pkt->hdr.payload_len;
990}
991
992/**
993 * Serialize padding and provide pointer to the data.
994 *
995 * @pkt Packet
996 * @out[out] Pointer to the serialized padding (always NULL)
997 * @out_len[out] Pointer to the serialized payload length
998 *
999 * Since the padding field value is undefined, only the size of the patting
1000 * (@out_len) is set and the buffer pointer (@out) will always be NULL.
1001 */
1002static void smux_serialize_padding(struct smux_pkt_t *pkt, char **out,
1003 unsigned int *out_len)
1004{
1005 *out = NULL;
1006 *out_len = pkt->hdr.pad_len;
1007}
1008
1009/**
1010 * Write data to TTY framework and handle breaking the writes up if needed.
1011 *
1012 * @data Data to write
1013 * @len Length of data
1014 *
1015 * @returns 0 for success, < 0 for failure
1016 */
1017static int write_to_tty(char *data, unsigned len)
1018{
1019 int data_written;
1020
1021 if (!data)
1022 return 0;
1023
Eric Holmberged1f00c2012-06-07 09:45:18 -06001024 while (len > 0 && !smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001025 data_written = smux.tty->ops->write(smux.tty, data, len);
1026 if (data_written >= 0) {
1027 len -= data_written;
1028 data += data_written;
1029 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001030 SMUX_ERR("%s: TTY write returned error %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001031 __func__, data_written);
1032 return data_written;
1033 }
1034
1035 if (len)
1036 tty_wait_until_sent(smux.tty,
1037 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001038 }
1039 return 0;
1040}
1041
1042/**
1043 * Write packet to TTY.
1044 *
1045 * @pkt packet to write
1046 *
1047 * @returns 0 on success
1048 */
1049static int smux_tx_tty(struct smux_pkt_t *pkt)
1050{
1051 char *data;
1052 unsigned int len;
1053 int ret;
1054
1055 if (!smux.tty) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001056 SMUX_ERR("%s: TTY not initialized", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001057 return -ENOTTY;
1058 }
1059
1060 if (pkt->hdr.cmd == SMUX_CMD_BYTE) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301061 SMUX_DBG("smux: %s: tty send single byte\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001062 ret = write_to_tty(&pkt->hdr.flags, 1);
1063 return ret;
1064 }
1065
1066 smux_serialize_hdr(pkt, &data, &len);
1067 ret = write_to_tty(data, len);
1068 if (ret) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001069 SMUX_ERR("%s: failed %d to write header %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001070 __func__, ret, len);
1071 return ret;
1072 }
1073
1074 smux_serialize_payload(pkt, &data, &len);
1075 ret = write_to_tty(data, len);
1076 if (ret) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001077 SMUX_ERR("%s: failed %d to write payload %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001078 __func__, ret, len);
1079 return ret;
1080 }
1081
1082 smux_serialize_padding(pkt, &data, &len);
1083 while (len > 0) {
1084 char zero = 0x0;
1085 ret = write_to_tty(&zero, 1);
1086 if (ret) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001087 SMUX_ERR("%s: failed %d to write padding %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001088 __func__, ret, len);
1089 return ret;
1090 }
1091 --len;
1092 }
1093 return 0;
1094}
1095
1096/**
1097 * Send a single character.
1098 *
1099 * @ch Character to send
1100 */
1101static void smux_send_byte(char ch)
1102{
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001103 struct smux_pkt_t *pkt;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001104
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001105 pkt = smux_alloc_pkt();
1106 if (!pkt) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001107 SMUX_ERR("%s: alloc failure for byte %x\n", __func__, ch);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001108 return;
1109 }
1110 pkt->hdr.cmd = SMUX_CMD_BYTE;
1111 pkt->hdr.flags = ch;
1112 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001113
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001114 list_add_tail(&pkt->list, &smux.power_queue);
1115 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001116}
1117
1118/**
1119 * Receive a single-character packet (used for internal testing).
1120 *
1121 * @ch Character to receive
1122 * @lcid Logical channel ID for packet
1123 *
1124 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001125 */
1126static int smux_receive_byte(char ch, int lcid)
1127{
1128 struct smux_pkt_t pkt;
1129
1130 smux_init_pkt(&pkt);
1131 pkt.hdr.lcid = lcid;
1132 pkt.hdr.cmd = SMUX_CMD_BYTE;
1133 pkt.hdr.flags = ch;
1134
1135 return smux_dispatch_rx_pkt(&pkt);
1136}
1137
1138/**
1139 * Queue packet for transmit.
1140 *
1141 * @pkt_ptr Packet to queue
1142 * @ch Channel to queue packet on
1143 * @queue Queue channel on ready list
1144 */
1145static void smux_tx_queue(struct smux_pkt_t *pkt_ptr, struct smux_lch_t *ch,
1146 int queue)
1147{
1148 unsigned long flags;
1149
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301150 SMUX_DBG("smux: %s: queuing pkt %p\n", __func__, pkt_ptr);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001151
1152 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
1153 list_add_tail(&pkt_ptr->list, &ch->tx_queue);
1154 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
1155
1156 if (queue)
1157 list_channel(ch);
1158}
1159
1160/**
1161 * Handle receive OPEN ACK command.
1162 *
1163 * @pkt Received packet
1164 *
1165 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001166 */
1167static int smux_handle_rx_open_ack(struct smux_pkt_t *pkt)
1168{
1169 uint8_t lcid;
1170 int ret;
1171 struct smux_lch_t *ch;
1172 int enable_powerdown = 0;
1173
1174 lcid = pkt->hdr.lcid;
1175 ch = &smux_lch[lcid];
1176
1177 spin_lock(&ch->state_lock_lhb1);
1178 if (ch->local_state == SMUX_LCH_LOCAL_OPENING) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301179 SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001180 ch->local_state,
1181 SMUX_LCH_LOCAL_OPENED);
1182
1183 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1184 enable_powerdown = 1;
1185
1186 ch->local_state = SMUX_LCH_LOCAL_OPENED;
1187 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED)
1188 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1189 ret = 0;
1190 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301191 SMUX_DBG("smux: Remote loopback OPEN ACK received\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001192 ret = 0;
1193 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001194 SMUX_ERR("%s: lcid %d state 0x%x open ack invalid\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001195 __func__, lcid, ch->local_state);
1196 ret = -EINVAL;
1197 }
1198 spin_unlock(&ch->state_lock_lhb1);
1199
1200 if (enable_powerdown) {
1201 spin_lock(&smux.tx_lock_lha2);
1202 if (!smux.powerdown_enabled) {
1203 smux.powerdown_enabled = 1;
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301204 SMUX_DBG("smux: %s: enabling power-collapse support\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001205 __func__);
1206 }
1207 spin_unlock(&smux.tx_lock_lha2);
1208 }
1209
1210 return ret;
1211}
1212
1213static int smux_handle_close_ack(struct smux_pkt_t *pkt)
1214{
1215 uint8_t lcid;
1216 int ret;
1217 struct smux_lch_t *ch;
1218 union notifier_metadata meta_disconnected;
1219 unsigned long flags;
1220
1221 lcid = pkt->hdr.lcid;
1222 ch = &smux_lch[lcid];
1223 meta_disconnected.disconnected.is_ssr = 0;
1224
1225 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1226
1227 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301228 SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001229 SMUX_LCH_LOCAL_CLOSING,
1230 SMUX_LCH_LOCAL_CLOSED);
1231 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
1232 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED)
1233 schedule_notify(lcid, SMUX_DISCONNECTED,
1234 &meta_disconnected);
1235 ret = 0;
1236 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301237 SMUX_DBG("smux: Remote loopback CLOSE ACK received\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001238 ret = 0;
1239 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001240 SMUX_ERR("%s: lcid %d state 0x%x close ack invalid\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001241 __func__, lcid, ch->local_state);
1242 ret = -EINVAL;
1243 }
1244 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1245 return ret;
1246}
1247
1248/**
1249 * Handle receive OPEN command.
1250 *
1251 * @pkt Received packet
1252 *
1253 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001254 */
1255static int smux_handle_rx_open_cmd(struct smux_pkt_t *pkt)
1256{
1257 uint8_t lcid;
1258 int ret;
1259 struct smux_lch_t *ch;
1260 struct smux_pkt_t *ack_pkt;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001261 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001262 int tx_ready = 0;
1263 int enable_powerdown = 0;
1264
1265 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
1266 return smux_handle_rx_open_ack(pkt);
1267
1268 lcid = pkt->hdr.lcid;
1269 ch = &smux_lch[lcid];
1270
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001271 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001272
1273 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301274 SMUX_DBG("smux: lcid %d remote state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001275 SMUX_LCH_REMOTE_CLOSED,
1276 SMUX_LCH_REMOTE_OPENED);
1277
1278 ch->remote_state = SMUX_LCH_REMOTE_OPENED;
1279 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1280 enable_powerdown = 1;
1281
1282 /* Send Open ACK */
1283 ack_pkt = smux_alloc_pkt();
1284 if (!ack_pkt) {
1285 /* exit out to allow retrying this later */
1286 ret = -ENOMEM;
1287 goto out;
1288 }
1289 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1290 ack_pkt->hdr.flags = SMUX_CMD_OPEN_ACK
1291 | SMUX_CMD_OPEN_POWER_COLLAPSE;
1292 ack_pkt->hdr.lcid = lcid;
1293 ack_pkt->hdr.payload_len = 0;
1294 ack_pkt->hdr.pad_len = 0;
1295 if (pkt->hdr.flags & SMUX_CMD_OPEN_REMOTE_LOOPBACK) {
1296 ch->remote_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
1297 ack_pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
1298 }
1299 smux_tx_queue(ack_pkt, ch, 0);
1300 tx_ready = 1;
1301
1302 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1303 /*
1304 * Send an Open command to the remote side to
1305 * simulate our local client doing it.
1306 */
1307 ack_pkt = smux_alloc_pkt();
1308 if (ack_pkt) {
1309 ack_pkt->hdr.lcid = lcid;
1310 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1311 ack_pkt->hdr.flags =
1312 SMUX_CMD_OPEN_POWER_COLLAPSE;
1313 ack_pkt->hdr.payload_len = 0;
1314 ack_pkt->hdr.pad_len = 0;
1315 smux_tx_queue(ack_pkt, ch, 0);
1316 tx_ready = 1;
1317 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001318 SMUX_ERR(
1319 "%s: Remote loopack allocation failure\n",
1320 __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001321 }
1322 } else if (ch->local_state == SMUX_LCH_LOCAL_OPENED) {
1323 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1324 }
1325 ret = 0;
1326 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001327 SMUX_ERR("%s: lcid %d remote state 0x%x open invalid\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001328 __func__, lcid, ch->remote_state);
1329 ret = -EINVAL;
1330 }
1331
1332out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001333 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001334
1335 if (enable_powerdown) {
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001336 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8b9a6402012-06-05 13:32:57 -06001337 if (!smux.powerdown_enabled) {
1338 smux.powerdown_enabled = 1;
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301339 SMUX_DBG("smux: %s: enabling power-collapse support\n",
Eric Holmberg8b9a6402012-06-05 13:32:57 -06001340 __func__);
1341 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001342 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001343 }
1344
1345 if (tx_ready)
1346 list_channel(ch);
1347
1348 return ret;
1349}
1350
1351/**
1352 * Handle receive CLOSE command.
1353 *
1354 * @pkt Received packet
1355 *
1356 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001357 */
1358static int smux_handle_rx_close_cmd(struct smux_pkt_t *pkt)
1359{
1360 uint8_t lcid;
1361 int ret;
1362 struct smux_lch_t *ch;
1363 struct smux_pkt_t *ack_pkt;
1364 union notifier_metadata meta_disconnected;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001365 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001366 int tx_ready = 0;
1367
1368 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
1369 return smux_handle_close_ack(pkt);
1370
1371 lcid = pkt->hdr.lcid;
1372 ch = &smux_lch[lcid];
1373 meta_disconnected.disconnected.is_ssr = 0;
1374
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001375 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001376 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301377 SMUX_DBG("smux: lcid %d remote state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001378 SMUX_LCH_REMOTE_OPENED,
1379 SMUX_LCH_REMOTE_CLOSED);
1380
1381 ack_pkt = smux_alloc_pkt();
1382 if (!ack_pkt) {
1383 /* exit out to allow retrying this later */
1384 ret = -ENOMEM;
1385 goto out;
1386 }
1387 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
1388 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1389 ack_pkt->hdr.flags = SMUX_CMD_CLOSE_ACK;
1390 ack_pkt->hdr.lcid = lcid;
1391 ack_pkt->hdr.payload_len = 0;
1392 ack_pkt->hdr.pad_len = 0;
1393 smux_tx_queue(ack_pkt, ch, 0);
1394 tx_ready = 1;
1395
1396 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1397 /*
1398 * Send a Close command to the remote side to simulate
1399 * our local client doing it.
1400 */
1401 ack_pkt = smux_alloc_pkt();
1402 if (ack_pkt) {
1403 ack_pkt->hdr.lcid = lcid;
1404 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1405 ack_pkt->hdr.flags = 0;
1406 ack_pkt->hdr.payload_len = 0;
1407 ack_pkt->hdr.pad_len = 0;
1408 smux_tx_queue(ack_pkt, ch, 0);
1409 tx_ready = 1;
1410 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001411 SMUX_ERR(
1412 "%s: Remote loopack allocation failure\n",
1413 __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001414 }
1415 }
1416
1417 if (ch->local_state == SMUX_LCH_LOCAL_CLOSED)
1418 schedule_notify(lcid, SMUX_DISCONNECTED,
1419 &meta_disconnected);
1420 ret = 0;
1421 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001422 SMUX_ERR("%s: lcid %d remote state 0x%x close invalid\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001423 __func__, lcid, ch->remote_state);
1424 ret = -EINVAL;
1425 }
1426out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001427 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001428 if (tx_ready)
1429 list_channel(ch);
1430
1431 return ret;
1432}
1433
1434/*
1435 * Handle receive DATA command.
1436 *
1437 * @pkt Received packet
1438 *
1439 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001440 */
1441static int smux_handle_rx_data_cmd(struct smux_pkt_t *pkt)
1442{
1443 uint8_t lcid;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001444 int ret = 0;
1445 int do_retry = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001446 int tx_ready = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001447 int tmp;
1448 int rx_len;
1449 struct smux_lch_t *ch;
1450 union notifier_metadata metadata;
1451 int remote_loopback;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001452 struct smux_pkt_t *ack_pkt;
1453 unsigned long flags;
1454
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001455 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1456 ret = -ENXIO;
1457 goto out;
1458 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001459
Eric Holmbergb8435c82012-06-05 14:51:29 -06001460 rx_len = pkt->hdr.payload_len;
1461 if (rx_len == 0) {
1462 ret = -EINVAL;
1463 goto out;
1464 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001465
1466 lcid = pkt->hdr.lcid;
1467 ch = &smux_lch[lcid];
1468 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1469 remote_loopback = ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK;
1470
1471 if (ch->local_state != SMUX_LCH_LOCAL_OPENED
1472 && !remote_loopback) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001473 SMUX_ERR("smux: ch %d error data on local state 0x%x",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001474 lcid, ch->local_state);
1475 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001476 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001477 goto out;
1478 }
1479
1480 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001481 SMUX_ERR("smux: ch %d error data on remote state 0x%x",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001482 lcid, ch->remote_state);
1483 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001484 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001485 goto out;
1486 }
1487
Eric Holmbergb8435c82012-06-05 14:51:29 -06001488 if (!list_empty(&ch->rx_retry_queue)) {
1489 do_retry = 1;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001490
1491 if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
1492 !ch->rx_flow_control_auto &&
1493 ((ch->rx_retry_queue_cnt + 1) >= SMUX_RX_WM_HIGH)) {
1494 /* need to flow control RX */
1495 ch->rx_flow_control_auto = 1;
1496 tx_ready |= smux_rx_flow_control_updated(ch);
1497 schedule_notify(ch->lcid, SMUX_RX_RETRY_HIGH_WM_HIT,
1498 NULL);
1499 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06001500 if ((ch->rx_retry_queue_cnt + 1) > SMUX_RX_RETRY_MAX_PKTS) {
1501 /* retry queue full */
Eric Holmbergd7339a42012-08-21 16:28:12 -06001502 SMUX_ERR(
1503 "%s: ch %d RX retry queue full; rx flow=%d\n",
1504 __func__, lcid, ch->rx_flow_control_auto);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001505 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1506 ret = -ENOMEM;
1507 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1508 goto out;
1509 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001510 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001511 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001512
Eric Holmbergb8435c82012-06-05 14:51:29 -06001513 if (remote_loopback) {
1514 /* Echo the data back to the remote client. */
1515 ack_pkt = smux_alloc_pkt();
1516 if (ack_pkt) {
1517 ack_pkt->hdr.lcid = lcid;
1518 ack_pkt->hdr.cmd = SMUX_CMD_DATA;
1519 ack_pkt->hdr.flags = 0;
1520 ack_pkt->hdr.payload_len = pkt->hdr.payload_len;
1521 if (ack_pkt->hdr.payload_len) {
1522 smux_alloc_pkt_payload(ack_pkt);
1523 memcpy(ack_pkt->payload, pkt->payload,
1524 ack_pkt->hdr.payload_len);
1525 }
1526 ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
1527 smux_tx_queue(ack_pkt, ch, 0);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001528 tx_ready = 1;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001529 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001530 SMUX_ERR("%s: Remote loopack allocation failure\n",
Eric Holmbergb8435c82012-06-05 14:51:29 -06001531 __func__);
1532 }
1533 } else if (!do_retry) {
1534 /* request buffer from client */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001535 metadata.read.pkt_priv = 0;
1536 metadata.read.buffer = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001537 tmp = ch->get_rx_buffer(ch->priv,
1538 (void **)&metadata.read.pkt_priv,
1539 (void **)&metadata.read.buffer,
1540 rx_len);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001541
Eric Holmbergb8435c82012-06-05 14:51:29 -06001542 if (tmp == 0 && metadata.read.buffer) {
1543 /* place data into RX buffer */
1544 memcpy(metadata.read.buffer, pkt->payload,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001545 rx_len);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001546 metadata.read.len = rx_len;
1547 schedule_notify(lcid, SMUX_READ_DONE,
1548 &metadata);
1549 } else if (tmp == -EAGAIN ||
1550 (tmp == 0 && !metadata.read.buffer)) {
1551 /* buffer allocation failed - add to retry queue */
1552 do_retry = 1;
1553 } else if (tmp < 0) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001554 SMUX_ERR("%s: ch %d Client RX buffer alloc failed %d\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001555 __func__, lcid, tmp);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001556 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1557 ret = -ENOMEM;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001558 }
1559 }
1560
Eric Holmbergb8435c82012-06-05 14:51:29 -06001561 if (do_retry) {
1562 struct smux_rx_pkt_retry *retry;
1563
1564 retry = kmalloc(sizeof(struct smux_rx_pkt_retry), GFP_KERNEL);
1565 if (!retry) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001566 SMUX_ERR("%s: retry alloc failure\n", __func__);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001567 ret = -ENOMEM;
1568 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1569 goto out;
1570 }
1571 INIT_LIST_HEAD(&retry->rx_retry_list);
1572 retry->timeout_in_ms = SMUX_RX_RETRY_MIN_MS;
1573
1574 /* copy packet */
1575 retry->pkt = smux_alloc_pkt();
1576 if (!retry->pkt) {
1577 kfree(retry);
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001578 SMUX_ERR("%s: pkt alloc failure\n", __func__);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001579 ret = -ENOMEM;
1580 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1581 goto out;
1582 }
1583 retry->pkt->hdr.lcid = lcid;
1584 retry->pkt->hdr.payload_len = pkt->hdr.payload_len;
1585 retry->pkt->hdr.pad_len = pkt->hdr.pad_len;
1586 if (retry->pkt->hdr.payload_len) {
1587 smux_alloc_pkt_payload(retry->pkt);
1588 memcpy(retry->pkt->payload, pkt->payload,
1589 retry->pkt->hdr.payload_len);
1590 }
1591
1592 /* add to retry queue */
1593 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1594 list_add_tail(&retry->rx_retry_list, &ch->rx_retry_queue);
1595 ++ch->rx_retry_queue_cnt;
1596 if (ch->rx_retry_queue_cnt == 1)
1597 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
1598 msecs_to_jiffies(retry->timeout_in_ms));
1599 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1600 }
1601
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001602 if (tx_ready)
1603 list_channel(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001604out:
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001605 return ret;
1606}
1607
1608/**
1609 * Handle receive byte command for testing purposes.
1610 *
1611 * @pkt Received packet
1612 *
1613 * @returns 0 for success
1614 */
1615static int smux_handle_rx_byte_cmd(struct smux_pkt_t *pkt)
1616{
1617 uint8_t lcid;
1618 int ret;
1619 struct smux_lch_t *ch;
1620 union notifier_metadata metadata;
1621 unsigned long flags;
1622
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001623 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001624 SMUX_ERR("%s: invalid packet or channel id\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001625 return -ENXIO;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001626 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001627
1628 lcid = pkt->hdr.lcid;
1629 ch = &smux_lch[lcid];
1630 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1631
1632 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001633 SMUX_ERR("smux: ch %d error data on local state 0x%x",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001634 lcid, ch->local_state);
1635 ret = -EIO;
1636 goto out;
1637 }
1638
1639 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001640 SMUX_ERR("smux: ch %d error data on remote state 0x%x",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001641 lcid, ch->remote_state);
1642 ret = -EIO;
1643 goto out;
1644 }
1645
1646 metadata.read.pkt_priv = (void *)(int)pkt->hdr.flags;
1647 metadata.read.buffer = 0;
1648 schedule_notify(lcid, SMUX_READ_DONE, &metadata);
1649 ret = 0;
1650
1651out:
1652 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1653 return ret;
1654}
1655
1656/**
1657 * Handle receive status command.
1658 *
1659 * @pkt Received packet
1660 *
1661 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001662 */
1663static int smux_handle_rx_status_cmd(struct smux_pkt_t *pkt)
1664{
1665 uint8_t lcid;
1666 int ret;
1667 struct smux_lch_t *ch;
1668 union notifier_metadata meta;
1669 unsigned long flags;
1670 int tx_ready = 0;
1671
1672 lcid = pkt->hdr.lcid;
1673 ch = &smux_lch[lcid];
1674
1675 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1676 meta.tiocm.tiocm_old = ch->remote_tiocm;
1677 meta.tiocm.tiocm_new = pkt->hdr.flags;
1678
1679 /* update logical channel flow control */
1680 if ((meta.tiocm.tiocm_old & SMUX_CMD_STATUS_FLOW_CNTL) ^
1681 (meta.tiocm.tiocm_new & SMUX_CMD_STATUS_FLOW_CNTL)) {
1682 /* logical channel flow control changed */
1683 if (pkt->hdr.flags & SMUX_CMD_STATUS_FLOW_CNTL) {
1684 /* disabled TX */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301685 SMUX_DBG("smux: TX Flow control enabled\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001686 ch->tx_flow_control = 1;
1687 } else {
1688 /* re-enable channel */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301689 SMUX_DBG("smux: TX Flow control disabled\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001690 ch->tx_flow_control = 0;
1691 tx_ready = 1;
1692 }
1693 }
1694 meta.tiocm.tiocm_old = msm_smux_tiocm_get_atomic(ch);
1695 ch->remote_tiocm = pkt->hdr.flags;
1696 meta.tiocm.tiocm_new = msm_smux_tiocm_get_atomic(ch);
1697
1698 /* client notification for status change */
1699 if (IS_FULLY_OPENED(ch)) {
1700 if (meta.tiocm.tiocm_old != meta.tiocm.tiocm_new)
1701 schedule_notify(lcid, SMUX_TIOCM_UPDATE, &meta);
1702 ret = 0;
1703 }
1704 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1705 if (tx_ready)
1706 list_channel(ch);
1707
1708 return ret;
1709}
1710
1711/**
1712 * Handle receive power command.
1713 *
1714 * @pkt Received packet
1715 *
1716 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001717 */
1718static int smux_handle_rx_power_cmd(struct smux_pkt_t *pkt)
1719{
Steve Mucklef132c6c2012-06-06 18:30:57 -07001720 struct smux_pkt_t *ack_pkt = NULL;
Eric Holmberga9b06472012-06-22 09:46:34 -06001721 int power_down = 0;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001722 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001723
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001724 SMUX_PWR_PKT_RX(pkt);
1725
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001726 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001727 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK) {
1728 /* local sleep request ack */
Eric Holmberga9b06472012-06-22 09:46:34 -06001729 if (smux.power_state == SMUX_PWR_TURNING_OFF)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001730 /* Power-down complete, turn off UART */
Eric Holmberga9b06472012-06-22 09:46:34 -06001731 power_down = 1;
1732 else
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001733 SMUX_ERR("%s: sleep request ack invalid in state %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001734 __func__, smux.power_state);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001735 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001736 /*
1737 * Remote sleep request
1738 *
1739 * Even if we have data pending, we need to transition to the
1740 * POWER_OFF state and then perform a wakeup since the remote
1741 * side has requested a power-down.
1742 *
1743 * The state here is set to SMUX_PWR_TURNING_OFF_FLUSH and
1744 * the TX thread will set the state to SMUX_PWR_TURNING_OFF
1745 * when it sends the packet.
Eric Holmberga9b06472012-06-22 09:46:34 -06001746 *
1747 * If we are already powering down, then no ACK is sent.
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001748 */
Eric Holmberga9b06472012-06-22 09:46:34 -06001749 if (smux.power_state == SMUX_PWR_ON) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001750 ack_pkt = smux_alloc_pkt();
1751 if (ack_pkt) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301752 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001753 smux.power_state,
1754 SMUX_PWR_TURNING_OFF_FLUSH);
1755
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001756 smux.power_state = SMUX_PWR_TURNING_OFF_FLUSH;
1757
1758 /* send power-down ack */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001759 ack_pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
1760 ack_pkt->hdr.flags = SMUX_CMD_PWR_CTL_ACK;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001761 ack_pkt->hdr.lcid = SMUX_BROADCAST_LCID;
1762 list_add_tail(&ack_pkt->list,
1763 &smux.power_queue);
1764 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001765 }
Eric Holmberga9b06472012-06-22 09:46:34 -06001766 } else if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH) {
1767 /* Local power-down request still in TX queue */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301768 SMUX_PWR("smux: %s: Power-down shortcut - no ack\n",
Eric Holmberga9b06472012-06-22 09:46:34 -06001769 __func__);
1770 smux.power_ctl_remote_req_received = 1;
1771 } else if (smux.power_state == SMUX_PWR_TURNING_OFF) {
1772 /*
1773 * Local power-down request already sent to remote
1774 * side, so this request gets treated as an ACK.
1775 */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301776 SMUX_PWR("smux: %s: Power-down shortcut - no ack\n",
Eric Holmberga9b06472012-06-22 09:46:34 -06001777 __func__);
1778 power_down = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001779 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001780 SMUX_ERR("%s: sleep request invalid in state %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001781 __func__, smux.power_state);
1782 }
1783 }
Eric Holmberga9b06472012-06-22 09:46:34 -06001784
1785 if (power_down) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301786 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberga9b06472012-06-22 09:46:34 -06001787 smux.power_state, SMUX_PWR_OFF_FLUSH);
1788 smux.power_state = SMUX_PWR_OFF_FLUSH;
1789 queue_work(smux_tx_wq, &smux_inactivity_work);
1790 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001791 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001792
1793 return 0;
1794}
1795
1796/**
1797 * Handle dispatching a completed packet for receive processing.
1798 *
1799 * @pkt Packet to process
1800 *
1801 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001802 */
1803static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt)
1804{
Eric Holmbergf9622662012-06-13 15:55:45 -06001805 int ret = -ENXIO;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001806
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001807 switch (pkt->hdr.cmd) {
1808 case SMUX_CMD_OPEN_LCH:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001809 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001810 if (smux_assert_lch_id(pkt->hdr.lcid)) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001811 SMUX_ERR("%s: invalid channel id %d\n",
Eric Holmbergf9622662012-06-13 15:55:45 -06001812 __func__, pkt->hdr.lcid);
1813 break;
1814 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001815 ret = smux_handle_rx_open_cmd(pkt);
1816 break;
1817
1818 case SMUX_CMD_DATA:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001819 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001820 if (smux_assert_lch_id(pkt->hdr.lcid)) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001821 SMUX_ERR("%s: invalid channel id %d\n",
Eric Holmbergf9622662012-06-13 15:55:45 -06001822 __func__, pkt->hdr.lcid);
1823 break;
1824 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001825 ret = smux_handle_rx_data_cmd(pkt);
1826 break;
1827
1828 case SMUX_CMD_CLOSE_LCH:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001829 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001830 if (smux_assert_lch_id(pkt->hdr.lcid)) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001831 SMUX_ERR("%s: invalid channel id %d\n",
Eric Holmbergf9622662012-06-13 15:55:45 -06001832 __func__, pkt->hdr.lcid);
1833 break;
1834 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001835 ret = smux_handle_rx_close_cmd(pkt);
1836 break;
1837
1838 case SMUX_CMD_STATUS:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001839 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001840 if (smux_assert_lch_id(pkt->hdr.lcid)) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001841 SMUX_ERR("%s: invalid channel id %d\n",
Eric Holmbergf9622662012-06-13 15:55:45 -06001842 __func__, pkt->hdr.lcid);
1843 break;
1844 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001845 ret = smux_handle_rx_status_cmd(pkt);
1846 break;
1847
1848 case SMUX_CMD_PWR_CTL:
1849 ret = smux_handle_rx_power_cmd(pkt);
1850 break;
1851
1852 case SMUX_CMD_BYTE:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001853 SMUX_LOG_PKT_RX(pkt);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001854 ret = smux_handle_rx_byte_cmd(pkt);
1855 break;
1856
1857 default:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001858 SMUX_LOG_PKT_RX(pkt);
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001859 SMUX_ERR("%s: command %d unknown\n", __func__, pkt->hdr.cmd);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001860 ret = -EINVAL;
1861 }
1862 return ret;
1863}
1864
1865/**
1866 * Deserializes a packet and dispatches it to the packet receive logic.
1867 *
1868 * @data Raw data for one packet
1869 * @len Length of the data
1870 *
1871 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001872 */
1873static int smux_deserialize(unsigned char *data, int len)
1874{
1875 struct smux_pkt_t recv;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001876
1877 smux_init_pkt(&recv);
1878
1879 /*
1880 * It may be possible to optimize this to not use the
1881 * temporary buffer.
1882 */
1883 memcpy(&recv.hdr, data, sizeof(struct smux_hdr_t));
1884
1885 if (recv.hdr.magic != SMUX_MAGIC) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001886 SMUX_ERR("%s: invalid header magic\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001887 return -EINVAL;
1888 }
1889
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001890 if (recv.hdr.payload_len)
1891 recv.payload = data + sizeof(struct smux_hdr_t);
1892
1893 return smux_dispatch_rx_pkt(&recv);
1894}
1895
1896/**
1897 * Handle wakeup request byte.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001898 */
1899static void smux_handle_wakeup_req(void)
1900{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001901 unsigned long flags;
1902
1903 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001904 if (smux.power_state == SMUX_PWR_OFF
1905 || smux.power_state == SMUX_PWR_TURNING_ON) {
1906 /* wakeup system */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301907 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001908 smux.power_state, SMUX_PWR_ON);
1909 smux.power_state = SMUX_PWR_ON;
1910 queue_work(smux_tx_wq, &smux_wakeup_work);
1911 queue_work(smux_tx_wq, &smux_tx_work);
1912 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1913 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1914 smux_send_byte(SMUX_WAKEUP_ACK);
Eric Holmberga9b06472012-06-22 09:46:34 -06001915 } else if (smux.power_state == SMUX_PWR_ON) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001916 smux_send_byte(SMUX_WAKEUP_ACK);
Eric Holmberga9b06472012-06-22 09:46:34 -06001917 } else {
1918 /* stale wakeup request from previous wakeup */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301919 SMUX_PWR("smux: %s: stale Wakeup REQ in state %d\n",
Eric Holmberga9b06472012-06-22 09:46:34 -06001920 __func__, smux.power_state);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001921 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001922 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001923}
1924
1925/**
1926 * Handle wakeup request ack.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001927 */
1928static void smux_handle_wakeup_ack(void)
1929{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001930 unsigned long flags;
1931
1932 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001933 if (smux.power_state == SMUX_PWR_TURNING_ON) {
1934 /* received response to wakeup request */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301935 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001936 smux.power_state, SMUX_PWR_ON);
1937 smux.power_state = SMUX_PWR_ON;
1938 queue_work(smux_tx_wq, &smux_tx_work);
1939 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1940 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1941
1942 } else if (smux.power_state != SMUX_PWR_ON) {
1943 /* invalid message */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301944 SMUX_PWR("smux: %s: stale Wakeup REQ ACK in state %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001945 __func__, smux.power_state);
1946 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001947 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001948}
1949
1950/**
1951 * RX State machine - IDLE state processing.
1952 *
1953 * @data New RX data to process
1954 * @len Length of the data
1955 * @used Return value of length processed
1956 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001957 */
1958static void smux_rx_handle_idle(const unsigned char *data,
1959 int len, int *used, int flag)
1960{
1961 int i;
1962
1963 if (flag) {
1964 if (smux_byte_loopback)
1965 smux_receive_byte(SMUX_UT_ECHO_ACK_FAIL,
1966 smux_byte_loopback);
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001967 SMUX_ERR("%s: TTY error 0x%x - ignoring\n", __func__, flag);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001968 ++*used;
1969 return;
1970 }
1971
1972 for (i = *used; i < len && smux.rx_state == SMUX_RX_IDLE; i++) {
1973 switch (data[i]) {
1974 case SMUX_MAGIC_WORD1:
1975 smux.rx_state = SMUX_RX_MAGIC;
1976 break;
1977 case SMUX_WAKEUP_REQ:
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301978 SMUX_PWR("smux: smux: RX Wakeup REQ\n");
Eric Holmberg0a81e8f2012-08-28 13:51:14 -06001979 if (unlikely(!smux.remote_is_alive)) {
1980 mutex_lock(&smux.mutex_lha0);
1981 smux.remote_is_alive = 1;
1982 mutex_unlock(&smux.mutex_lha0);
1983 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001984 smux_handle_wakeup_req();
1985 break;
1986 case SMUX_WAKEUP_ACK:
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301987 SMUX_PWR("smux: smux: RX Wakeup ACK\n");
Eric Holmberg0a81e8f2012-08-28 13:51:14 -06001988 if (unlikely(!smux.remote_is_alive)) {
1989 mutex_lock(&smux.mutex_lha0);
1990 smux.remote_is_alive = 1;
1991 mutex_unlock(&smux.mutex_lha0);
1992 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001993 smux_handle_wakeup_ack();
1994 break;
1995 default:
1996 /* unexpected character */
1997 if (smux_byte_loopback && data[i] == SMUX_UT_ECHO_REQ)
1998 smux_receive_byte(SMUX_UT_ECHO_ACK_OK,
1999 smux_byte_loopback);
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002000 SMUX_ERR("%s: parse error 0x%02x - ignoring\n",
2001 __func__, (unsigned)data[i]);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002002 break;
2003 }
2004 }
2005
2006 *used = i;
2007}
2008
2009/**
2010 * RX State machine - Header Magic state processing.
2011 *
2012 * @data New RX data to process
2013 * @len Length of the data
2014 * @used Return value of length processed
2015 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002016 */
2017static void smux_rx_handle_magic(const unsigned char *data,
2018 int len, int *used, int flag)
2019{
2020 int i;
2021
2022 if (flag) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002023 SMUX_ERR("%s: TTY RX error %d\n", __func__, flag);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002024 smux_enter_reset();
2025 smux.rx_state = SMUX_RX_FAILURE;
2026 ++*used;
2027 return;
2028 }
2029
2030 for (i = *used; i < len && smux.rx_state == SMUX_RX_MAGIC; i++) {
2031 /* wait for completion of the magic */
2032 if (data[i] == SMUX_MAGIC_WORD2) {
2033 smux.recv_len = 0;
2034 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD1;
2035 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD2;
2036 smux.rx_state = SMUX_RX_HDR;
2037 } else {
2038 /* unexpected / trash character */
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002039 SMUX_ERR(
2040 "%s: rx parse error for char %c; *used=%d, len=%d\n",
2041 __func__, data[i], *used, len);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002042 smux.rx_state = SMUX_RX_IDLE;
2043 }
2044 }
2045
2046 *used = i;
2047}
2048
2049/**
2050 * RX State machine - Packet Header state processing.
2051 *
2052 * @data New RX data to process
2053 * @len Length of the data
2054 * @used Return value of length processed
2055 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002056 */
2057static void smux_rx_handle_hdr(const unsigned char *data,
2058 int len, int *used, int flag)
2059{
2060 int i;
2061 struct smux_hdr_t *hdr;
2062
2063 if (flag) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002064 SMUX_ERR("%s: TTY RX error %d\n", __func__, flag);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002065 smux_enter_reset();
2066 smux.rx_state = SMUX_RX_FAILURE;
2067 ++*used;
2068 return;
2069 }
2070
2071 for (i = *used; i < len && smux.rx_state == SMUX_RX_HDR; i++) {
2072 smux.recv_buf[smux.recv_len++] = data[i];
2073
2074 if (smux.recv_len == sizeof(struct smux_hdr_t)) {
2075 /* complete header received */
2076 hdr = (struct smux_hdr_t *)smux.recv_buf;
2077 smux.pkt_remain = hdr->payload_len + hdr->pad_len;
2078 smux.rx_state = SMUX_RX_PAYLOAD;
2079 }
2080 }
2081 *used = i;
2082}
2083
2084/**
2085 * RX State machine - Packet Payload state processing.
2086 *
2087 * @data New RX data to process
2088 * @len Length of the data
2089 * @used Return value of length processed
2090 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002091 */
2092static void smux_rx_handle_pkt_payload(const unsigned char *data,
2093 int len, int *used, int flag)
2094{
2095 int remaining;
2096
2097 if (flag) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002098 SMUX_ERR("%s: TTY RX error %d\n", __func__, flag);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002099 smux_enter_reset();
2100 smux.rx_state = SMUX_RX_FAILURE;
2101 ++*used;
2102 return;
2103 }
2104
2105 /* copy data into rx buffer */
2106 if (smux.pkt_remain < (len - *used))
2107 remaining = smux.pkt_remain;
2108 else
2109 remaining = len - *used;
2110
2111 memcpy(&smux.recv_buf[smux.recv_len], &data[*used], remaining);
2112 smux.recv_len += remaining;
2113 smux.pkt_remain -= remaining;
2114 *used += remaining;
2115
2116 if (smux.pkt_remain == 0) {
2117 /* complete packet received */
2118 smux_deserialize(smux.recv_buf, smux.recv_len);
2119 smux.rx_state = SMUX_RX_IDLE;
2120 }
2121}
2122
2123/**
2124 * Feed data to the receive state machine.
2125 *
2126 * @data Pointer to data block
2127 * @len Length of data
2128 * @flag TTY_NORMAL (0) for no error, otherwise TTY Error Flag
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002129 */
2130void smux_rx_state_machine(const unsigned char *data,
2131 int len, int flag)
2132{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002133 struct smux_rx_worker_data work;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002134
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002135 work.data = data;
2136 work.len = len;
2137 work.flag = flag;
2138 INIT_WORK_ONSTACK(&work.work, smux_rx_worker);
2139 work.work_complete = COMPLETION_INITIALIZER_ONSTACK(work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002140
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002141 queue_work(smux_rx_wq, &work.work);
2142 wait_for_completion(&work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002143}
2144
2145/**
Eric Holmberg0a81e8f2012-08-28 13:51:14 -06002146 * Returns true if the remote side has acknowledged a wakeup
2147 * request previously, so we know that the link is alive and active.
2148 *
2149 * @returns true for is alive, false for not alive
2150 */
2151bool smux_remote_is_active(void)
2152{
2153 bool is_active = false;
2154
2155 mutex_lock(&smux.mutex_lha0);
2156 if (smux.remote_is_alive)
2157 is_active = true;
2158 mutex_unlock(&smux.mutex_lha0);
2159
2160 return is_active;
2161}
2162
2163/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002164 * Add channel to transmit-ready list and trigger transmit worker.
2165 *
2166 * @ch Channel to add
2167 */
2168static void list_channel(struct smux_lch_t *ch)
2169{
2170 unsigned long flags;
2171
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302172 SMUX_DBG("smux: %s: listing channel %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002173 __func__, ch->lcid);
2174
2175 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2176 spin_lock(&ch->tx_lock_lhb2);
2177 smux.tx_activity_flag = 1;
2178 if (list_empty(&ch->tx_ready_list))
2179 list_add_tail(&ch->tx_ready_list, &smux.lch_tx_ready_list);
2180 spin_unlock(&ch->tx_lock_lhb2);
2181 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2182
2183 queue_work(smux_tx_wq, &smux_tx_work);
2184}
2185
2186/**
2187 * Transmit packet on correct transport and then perform client
2188 * notification.
2189 *
2190 * @ch Channel to transmit on
2191 * @pkt Packet to transmit
2192 */
2193static void smux_tx_pkt(struct smux_lch_t *ch, struct smux_pkt_t *pkt)
2194{
2195 union notifier_metadata meta_write;
2196 int ret;
2197
2198 if (ch && pkt) {
2199 SMUX_LOG_PKT_TX(pkt);
2200 if (ch->local_mode == SMUX_LCH_MODE_LOCAL_LOOPBACK)
2201 ret = smux_tx_loopback(pkt);
2202 else
2203 ret = smux_tx_tty(pkt);
2204
2205 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2206 /* notify write-done */
2207 meta_write.write.pkt_priv = pkt->priv;
2208 meta_write.write.buffer = pkt->payload;
2209 meta_write.write.len = pkt->hdr.payload_len;
2210 if (ret >= 0) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302211 SMUX_DBG("smux: %s: PKT write done", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002212 schedule_notify(ch->lcid, SMUX_WRITE_DONE,
2213 &meta_write);
2214 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002215 SMUX_ERR("%s: failed to write pkt %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002216 __func__, ret);
2217 schedule_notify(ch->lcid, SMUX_WRITE_FAIL,
2218 &meta_write);
2219 }
2220 }
2221 }
2222}
2223
2224/**
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002225 * Flush pending TTY TX data.
2226 */
2227static void smux_flush_tty(void)
2228{
Eric Holmberg92a67df2012-06-25 13:56:24 -06002229 mutex_lock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002230 if (!smux.tty) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002231 SMUX_ERR("%s: ldisc not loaded\n", __func__);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002232 mutex_unlock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002233 return;
2234 }
2235
2236 tty_wait_until_sent(smux.tty,
2237 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
2238
2239 if (tty_chars_in_buffer(smux.tty) > 0)
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002240 SMUX_ERR("%s: unable to flush UART queue\n", __func__);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002241
2242 mutex_unlock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002243}
2244
2245/**
Eric Holmberged1f00c2012-06-07 09:45:18 -06002246 * Purge TX queue for logical channel.
2247 *
2248 * @ch Logical channel pointer
Eric Holmberg0e914082012-07-11 11:46:28 -06002249 * @is_ssr 1 = this is a subsystem restart purge
Eric Holmberged1f00c2012-06-07 09:45:18 -06002250 *
2251 * Must be called with the following spinlocks locked:
2252 * state_lock_lhb1
2253 * tx_lock_lhb2
2254 */
Eric Holmberg0e914082012-07-11 11:46:28 -06002255static void smux_purge_ch_tx_queue(struct smux_lch_t *ch, int is_ssr)
Eric Holmberged1f00c2012-06-07 09:45:18 -06002256{
2257 struct smux_pkt_t *pkt;
2258 int send_disconnect = 0;
Eric Holmberg0e914082012-07-11 11:46:28 -06002259 struct smux_pkt_t *pkt_tmp;
2260 int is_state_pkt;
Eric Holmberged1f00c2012-06-07 09:45:18 -06002261
Eric Holmberg0e914082012-07-11 11:46:28 -06002262 list_for_each_entry_safe(pkt, pkt_tmp, &ch->tx_queue, list) {
2263 is_state_pkt = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06002264 if (pkt->hdr.cmd == SMUX_CMD_OPEN_LCH) {
Eric Holmberg0e914082012-07-11 11:46:28 -06002265 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK) {
2266 /* Open ACK must still be sent */
2267 is_state_pkt = 1;
2268 } else {
2269 /* Open never sent -- force to closed state */
2270 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
2271 send_disconnect = 1;
2272 }
2273 } else if (pkt->hdr.cmd == SMUX_CMD_CLOSE_LCH) {
2274 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
2275 is_state_pkt = 1;
2276 if (!send_disconnect)
2277 is_state_pkt = 1;
Eric Holmberged1f00c2012-06-07 09:45:18 -06002278 } else if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2279 /* Notify client of failed write */
2280 union notifier_metadata meta_write;
2281
2282 meta_write.write.pkt_priv = pkt->priv;
2283 meta_write.write.buffer = pkt->payload;
2284 meta_write.write.len = pkt->hdr.payload_len;
2285 schedule_notify(ch->lcid, SMUX_WRITE_FAIL, &meta_write);
2286 }
Eric Holmberg0e914082012-07-11 11:46:28 -06002287
2288 if (!is_state_pkt || is_ssr) {
2289 list_del(&pkt->list);
2290 smux_free_pkt(pkt);
2291 }
Eric Holmberged1f00c2012-06-07 09:45:18 -06002292 }
2293
2294 if (send_disconnect) {
2295 union notifier_metadata meta_disconnected;
2296
2297 meta_disconnected.disconnected.is_ssr = smux.in_reset;
2298 schedule_notify(ch->lcid, SMUX_DISCONNECTED,
2299 &meta_disconnected);
2300 }
2301}
2302
2303/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002304 * Power-up the UART.
Eric Holmberg92a67df2012-06-25 13:56:24 -06002305 *
2306 * Must be called with smux.mutex_lha0 already locked.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002307 */
Eric Holmberg92a67df2012-06-25 13:56:24 -06002308static void smux_uart_power_on_atomic(void)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002309{
2310 struct uart_state *state;
2311
2312 if (!smux.tty || !smux.tty->driver_data) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002313 SMUX_ERR("%s: unable to find UART port for tty %p\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002314 __func__, smux.tty);
2315 return;
2316 }
2317 state = smux.tty->driver_data;
2318 msm_hs_request_clock_on(state->uart_port);
2319}
2320
2321/**
Eric Holmberg92a67df2012-06-25 13:56:24 -06002322 * Power-up the UART.
2323 */
2324static void smux_uart_power_on(void)
2325{
2326 mutex_lock(&smux.mutex_lha0);
2327 smux_uart_power_on_atomic();
2328 mutex_unlock(&smux.mutex_lha0);
2329}
2330
2331/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002332 * Power down the UART.
Eric Holmberg06011322012-07-06 18:17:03 -06002333 *
2334 * Must be called with mutex_lha0 locked.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002335 */
Eric Holmberg06011322012-07-06 18:17:03 -06002336static void smux_uart_power_off_atomic(void)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002337{
2338 struct uart_state *state;
2339
2340 if (!smux.tty || !smux.tty->driver_data) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002341 SMUX_ERR("%s: unable to find UART port for tty %p\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002342 __func__, smux.tty);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002343 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002344 return;
2345 }
2346 state = smux.tty->driver_data;
2347 msm_hs_request_clock_off(state->uart_port);
Eric Holmberg06011322012-07-06 18:17:03 -06002348}
2349
2350/**
2351 * Power down the UART.
2352 */
2353static void smux_uart_power_off(void)
2354{
2355 mutex_lock(&smux.mutex_lha0);
2356 smux_uart_power_off_atomic();
Eric Holmberg92a67df2012-06-25 13:56:24 -06002357 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002358}
2359
2360/**
2361 * TX Wakeup Worker
2362 *
2363 * @work Not used
2364 *
2365 * Do an exponential back-off wakeup sequence with a maximum period
2366 * of approximately 1 second (1 << 20 microseconds).
2367 */
2368static void smux_wakeup_worker(struct work_struct *work)
2369{
2370 unsigned long flags;
2371 unsigned wakeup_delay;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002372
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002373 if (smux.in_reset)
2374 return;
2375
2376 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2377 if (smux.power_state == SMUX_PWR_ON) {
2378 /* wakeup complete */
Eric Holmberga9b06472012-06-22 09:46:34 -06002379 smux.pwr_wakeup_delay_us = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002380 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302381 SMUX_DBG("smux: %s: wakeup complete\n", __func__);
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002382
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002383 /*
2384 * Cancel any pending retry. This avoids a race condition with
2385 * a new power-up request because:
2386 * 1) this worker doesn't modify the state
2387 * 2) this worker is processed on the same single-threaded
2388 * workqueue as new TX wakeup requests
2389 */
2390 cancel_delayed_work(&smux_wakeup_delayed_work);
Eric Holmbergd032f5b2012-06-29 19:02:00 -06002391 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberga9b06472012-06-22 09:46:34 -06002392 } else if (smux.power_state == SMUX_PWR_TURNING_ON) {
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002393 /* retry wakeup */
2394 wakeup_delay = smux.pwr_wakeup_delay_us;
2395 smux.pwr_wakeup_delay_us <<= 1;
2396 if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
2397 smux.pwr_wakeup_delay_us =
2398 SMUX_WAKEUP_DELAY_MAX;
2399
2400 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302401 SMUX_PWR("smux: %s: triggering wakeup\n", __func__);
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002402 smux_send_byte(SMUX_WAKEUP_REQ);
2403
2404 if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302405 SMUX_DBG("smux: %s: sleeping for %u us\n", __func__,
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002406 wakeup_delay);
2407 usleep_range(wakeup_delay, 2*wakeup_delay);
2408 queue_work(smux_tx_wq, &smux_wakeup_work);
2409 } else {
2410 /* schedule delayed work */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302411 SMUX_DBG(
2412 "smux: %s: scheduling delayed wakeup in %u ms\n",
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002413 __func__, wakeup_delay / 1000);
2414 queue_delayed_work(smux_tx_wq,
2415 &smux_wakeup_delayed_work,
2416 msecs_to_jiffies(wakeup_delay / 1000));
2417 }
Eric Holmberga9b06472012-06-22 09:46:34 -06002418 } else {
2419 /* wakeup aborted */
2420 smux.pwr_wakeup_delay_us = 1;
2421 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302422 SMUX_PWR("smux: %s: wakeup aborted\n", __func__);
Eric Holmberga9b06472012-06-22 09:46:34 -06002423 cancel_delayed_work(&smux_wakeup_delayed_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002424 }
2425}
2426
2427
2428/**
2429 * Inactivity timeout worker. Periodically scheduled when link is active.
2430 * When it detects inactivity, it will power-down the UART link.
2431 *
2432 * @work Work structure (not used)
2433 */
2434static void smux_inactivity_worker(struct work_struct *work)
2435{
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002436 struct smux_pkt_t *pkt;
2437 unsigned long flags;
2438
Eric Holmberg06011322012-07-06 18:17:03 -06002439 if (smux.in_reset)
2440 return;
2441
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002442 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2443 spin_lock(&smux.tx_lock_lha2);
2444
2445 if (!smux.tx_activity_flag && !smux.rx_activity_flag) {
2446 /* no activity */
2447 if (smux.powerdown_enabled) {
2448 if (smux.power_state == SMUX_PWR_ON) {
2449 /* start power-down sequence */
2450 pkt = smux_alloc_pkt();
2451 if (pkt) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302452 SMUX_PWR(
2453 "smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002454 smux.power_state,
Eric Holmberga9b06472012-06-22 09:46:34 -06002455 SMUX_PWR_TURNING_OFF_FLUSH);
2456 smux.power_state =
2457 SMUX_PWR_TURNING_OFF_FLUSH;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002458
2459 /* send power-down request */
2460 pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
2461 pkt->hdr.flags = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002462 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
2463 list_add_tail(&pkt->list,
2464 &smux.power_queue);
2465 queue_work(smux_tx_wq, &smux_tx_work);
2466 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002467 SMUX_ERR("%s: packet alloc failed\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002468 __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002469 }
2470 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002471 }
2472 }
2473 smux.tx_activity_flag = 0;
2474 smux.rx_activity_flag = 0;
2475
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002476 if (smux.power_state == SMUX_PWR_OFF_FLUSH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002477 /* ready to power-down the UART */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302478 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002479 smux.power_state, SMUX_PWR_OFF);
Eric Holmbergff0b0112012-06-08 15:06:57 -06002480 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002481
2482 /* if data is pending, schedule a new wakeup */
2483 if (!list_empty(&smux.lch_tx_ready_list) ||
2484 !list_empty(&smux.power_queue))
2485 queue_work(smux_tx_wq, &smux_tx_work);
2486
2487 spin_unlock(&smux.tx_lock_lha2);
2488 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2489
2490 /* flush UART output queue and power down */
2491 smux_flush_tty();
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002492 smux_uart_power_off();
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002493 } else {
2494 spin_unlock(&smux.tx_lock_lha2);
2495 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002496 }
2497
2498 /* reschedule inactivity worker */
2499 if (smux.power_state != SMUX_PWR_OFF)
2500 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
2501 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
2502}
2503
2504/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002505 * Remove RX retry packet from channel and free it.
2506 *
Eric Holmbergb8435c82012-06-05 14:51:29 -06002507 * @ch Channel for retry packet
2508 * @retry Retry packet to remove
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002509 *
2510 * @returns 1 if flow control updated; 0 otherwise
2511 *
2512 * Must be called with state_lock_lhb1 locked.
Eric Holmbergb8435c82012-06-05 14:51:29 -06002513 */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002514int smux_remove_rx_retry(struct smux_lch_t *ch,
Eric Holmbergb8435c82012-06-05 14:51:29 -06002515 struct smux_rx_pkt_retry *retry)
2516{
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002517 int tx_ready = 0;
2518
Eric Holmbergb8435c82012-06-05 14:51:29 -06002519 list_del(&retry->rx_retry_list);
2520 --ch->rx_retry_queue_cnt;
2521 smux_free_pkt(retry->pkt);
2522 kfree(retry);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002523
2524 if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
2525 (ch->rx_retry_queue_cnt <= SMUX_RX_WM_LOW) &&
2526 ch->rx_flow_control_auto) {
2527 ch->rx_flow_control_auto = 0;
2528 smux_rx_flow_control_updated(ch);
2529 schedule_notify(ch->lcid, SMUX_RX_RETRY_LOW_WM_HIT, NULL);
2530 tx_ready = 1;
2531 }
2532 return tx_ready;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002533}
2534
2535/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002536 * RX worker handles all receive operations.
2537 *
2538 * @work Work structure contained in TBD structure
2539 */
2540static void smux_rx_worker(struct work_struct *work)
2541{
2542 unsigned long flags;
2543 int used;
2544 int initial_rx_state;
2545 struct smux_rx_worker_data *w;
2546 const unsigned char *data;
2547 int len;
2548 int flag;
2549
2550 w = container_of(work, struct smux_rx_worker_data, work);
2551 data = w->data;
2552 len = w->len;
2553 flag = w->flag;
2554
2555 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2556 smux.rx_activity_flag = 1;
2557 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2558
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302559 SMUX_DBG("smux: %s: %p, len=%d, flag=%d\n", __func__, data, len, flag);
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002560 used = 0;
2561 do {
Eric Holmberg06011322012-07-06 18:17:03 -06002562 if (smux.in_reset) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302563 SMUX_DBG("smux: %s: abort RX due to reset\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06002564 smux.rx_state = SMUX_RX_IDLE;
2565 break;
2566 }
2567
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302568 SMUX_DBG("smux: %s: state %d; %d of %d\n",
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002569 __func__, smux.rx_state, used, len);
2570 initial_rx_state = smux.rx_state;
2571
2572 switch (smux.rx_state) {
2573 case SMUX_RX_IDLE:
2574 smux_rx_handle_idle(data, len, &used, flag);
2575 break;
2576 case SMUX_RX_MAGIC:
2577 smux_rx_handle_magic(data, len, &used, flag);
2578 break;
2579 case SMUX_RX_HDR:
2580 smux_rx_handle_hdr(data, len, &used, flag);
2581 break;
2582 case SMUX_RX_PAYLOAD:
2583 smux_rx_handle_pkt_payload(data, len, &used, flag);
2584 break;
2585 default:
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302586 SMUX_DBG("smux: %s: invalid state %d\n",
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002587 __func__, smux.rx_state);
2588 smux.rx_state = SMUX_RX_IDLE;
2589 break;
2590 }
2591 } while (used < len || smux.rx_state != initial_rx_state);
2592
2593 complete(&w->work_complete);
2594}
2595
2596/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002597 * RX Retry worker handles retrying get_rx_buffer calls that previously failed
2598 * because the client was not ready (-EAGAIN).
2599 *
2600 * @work Work structure contained in smux_lch_t structure
2601 */
2602static void smux_rx_retry_worker(struct work_struct *work)
2603{
2604 struct smux_lch_t *ch;
2605 struct smux_rx_pkt_retry *retry;
2606 union notifier_metadata metadata;
2607 int tmp;
2608 unsigned long flags;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002609 int immediate_retry = 0;
2610 int tx_ready = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002611
2612 ch = container_of(work, struct smux_lch_t, rx_retry_work.work);
2613
2614 /* get next retry packet */
2615 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg06011322012-07-06 18:17:03 -06002616 if ((ch->local_state != SMUX_LCH_LOCAL_OPENED) || smux.in_reset) {
Eric Holmbergb8435c82012-06-05 14:51:29 -06002617 /* port has been closed - remove all retries */
2618 while (!list_empty(&ch->rx_retry_queue)) {
2619 retry = list_first_entry(&ch->rx_retry_queue,
2620 struct smux_rx_pkt_retry,
2621 rx_retry_list);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002622 (void)smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002623 }
2624 }
2625
2626 if (list_empty(&ch->rx_retry_queue)) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302627 SMUX_DBG("smux: %s: retry list empty for channel %d\n",
Eric Holmbergb8435c82012-06-05 14:51:29 -06002628 __func__, ch->lcid);
2629 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2630 return;
2631 }
2632 retry = list_first_entry(&ch->rx_retry_queue,
2633 struct smux_rx_pkt_retry,
2634 rx_retry_list);
2635 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2636
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302637 SMUX_DBG("smux: %s: ch %d retrying rx pkt %p\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002638 __func__, ch->lcid, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002639 metadata.read.pkt_priv = 0;
2640 metadata.read.buffer = 0;
2641 tmp = ch->get_rx_buffer(ch->priv,
2642 (void **)&metadata.read.pkt_priv,
2643 (void **)&metadata.read.buffer,
2644 retry->pkt->hdr.payload_len);
2645 if (tmp == 0 && metadata.read.buffer) {
2646 /* have valid RX buffer */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002647
Eric Holmbergb8435c82012-06-05 14:51:29 -06002648 memcpy(metadata.read.buffer, retry->pkt->payload,
2649 retry->pkt->hdr.payload_len);
2650 metadata.read.len = retry->pkt->hdr.payload_len;
2651
2652 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002653 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002654 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002655 schedule_notify(ch->lcid, SMUX_READ_DONE, &metadata);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002656 if (tx_ready)
2657 list_channel(ch);
2658
2659 immediate_retry = 1;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002660 } else if (tmp == -EAGAIN ||
2661 (tmp == 0 && !metadata.read.buffer)) {
2662 /* retry again */
2663 retry->timeout_in_ms <<= 1;
2664 if (retry->timeout_in_ms > SMUX_RX_RETRY_MAX_MS) {
2665 /* timed out */
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002666 SMUX_ERR("%s: ch %d RX retry client timeout\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002667 __func__, ch->lcid);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002668 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002669 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002670 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002671 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
2672 if (tx_ready)
2673 list_channel(ch);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002674 }
2675 } else {
2676 /* client error - drop packet */
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002677 SMUX_ERR("%s: ch %d RX retry client failed (%d)\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002678 __func__, ch->lcid, tmp);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002679 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002680 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002681 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002682 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002683 if (tx_ready)
2684 list_channel(ch);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002685 }
2686
2687 /* schedule next retry */
2688 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2689 if (!list_empty(&ch->rx_retry_queue)) {
2690 retry = list_first_entry(&ch->rx_retry_queue,
2691 struct smux_rx_pkt_retry,
2692 rx_retry_list);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002693
2694 if (immediate_retry)
2695 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
2696 else
2697 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
2698 msecs_to_jiffies(retry->timeout_in_ms));
Eric Holmbergb8435c82012-06-05 14:51:29 -06002699 }
2700 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2701}
2702
2703/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002704 * Transmit worker handles serializing and transmitting packets onto the
2705 * underlying transport.
2706 *
2707 * @work Work structure (not used)
2708 */
2709static void smux_tx_worker(struct work_struct *work)
2710{
2711 struct smux_pkt_t *pkt;
2712 struct smux_lch_t *ch;
2713 unsigned low_wm_notif;
2714 unsigned lcid;
2715 unsigned long flags;
2716
2717
2718 /*
2719 * Transmit packets in round-robin fashion based upon ready
2720 * channels.
2721 *
2722 * To eliminate the need to hold a lock for the entire
2723 * iteration through the channel ready list, the head of the
2724 * ready-channel list is always the next channel to be
2725 * processed. To send a packet, the first valid packet in
2726 * the head channel is removed and the head channel is then
2727 * rescheduled at the end of the queue by removing it and
2728 * inserting after the tail. The locks can then be released
2729 * while the packet is processed.
2730 */
Eric Holmberged1f00c2012-06-07 09:45:18 -06002731 while (!smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002732 pkt = NULL;
2733 low_wm_notif = 0;
2734
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002735 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002736
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002737 /* handle wakeup if needed */
2738 if (smux.power_state == SMUX_PWR_OFF) {
2739 if (!list_empty(&smux.lch_tx_ready_list) ||
2740 !list_empty(&smux.power_queue)) {
2741 /* data to transmit, do wakeup */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302742 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002743 smux.power_state,
2744 SMUX_PWR_TURNING_ON);
2745 smux.power_state = SMUX_PWR_TURNING_ON;
2746 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2747 flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002748 queue_work(smux_tx_wq, &smux_wakeup_work);
2749 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002750 /* no activity -- stay asleep */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002751 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2752 flags);
2753 }
2754 break;
2755 }
2756
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002757 /* process any pending power packets */
2758 if (!list_empty(&smux.power_queue)) {
2759 pkt = list_first_entry(&smux.power_queue,
2760 struct smux_pkt_t, list);
2761 list_del(&pkt->list);
2762 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2763
Eric Holmberga9b06472012-06-22 09:46:34 -06002764 /* Adjust power state if this is a flush command */
2765 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2766 if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH &&
2767 pkt->hdr.cmd == SMUX_CMD_PWR_CTL) {
2768 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK ||
2769 smux.power_ctl_remote_req_received) {
2770 /*
2771 * Sending remote power-down request ACK
2772 * or sending local power-down request
2773 * and we already received a remote
2774 * power-down request.
2775 */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302776 SMUX_PWR(
2777 "smux: %s: Power %d->%d\n", __func__,
Eric Holmberga9b06472012-06-22 09:46:34 -06002778 smux.power_state,
2779 SMUX_PWR_OFF_FLUSH);
2780 smux.power_state = SMUX_PWR_OFF_FLUSH;
2781 smux.power_ctl_remote_req_received = 0;
2782 queue_work(smux_tx_wq,
2783 &smux_inactivity_work);
2784 } else {
2785 /* sending local power-down request */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302786 SMUX_PWR(
2787 "smux: %s: Power %d->%d\n", __func__,
Eric Holmberga9b06472012-06-22 09:46:34 -06002788 smux.power_state,
2789 SMUX_PWR_TURNING_OFF);
2790 smux.power_state = SMUX_PWR_TURNING_OFF;
2791 }
2792 }
2793 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2794
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002795 /* send the packet */
Eric Holmberga9b06472012-06-22 09:46:34 -06002796 smux_uart_power_on();
2797 smux.tx_activity_flag = 1;
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06002798 SMUX_PWR_PKT_TX(pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002799 if (!smux_byte_loopback) {
2800 smux_tx_tty(pkt);
2801 smux_flush_tty();
2802 } else {
2803 smux_tx_loopback(pkt);
2804 }
2805
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002806 smux_free_pkt(pkt);
2807 continue;
2808 }
2809
2810 /* get the next ready channel */
2811 if (list_empty(&smux.lch_tx_ready_list)) {
2812 /* no ready channels */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302813 SMUX_DBG("smux: %s: no more ready channels, exiting\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002814 __func__);
2815 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2816 break;
2817 }
2818 smux.tx_activity_flag = 1;
2819
2820 if (smux.power_state != SMUX_PWR_ON) {
2821 /* channel not ready to transmit */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302822 SMUX_DBG("smux: %s: waiting for link up (state %d)\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002823 __func__,
2824 smux.power_state);
2825 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2826 break;
2827 }
2828
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002829 /* get the next packet to send and rotate channel list */
2830 ch = list_first_entry(&smux.lch_tx_ready_list,
2831 struct smux_lch_t,
2832 tx_ready_list);
2833
2834 spin_lock(&ch->state_lock_lhb1);
2835 spin_lock(&ch->tx_lock_lhb2);
2836 if (!list_empty(&ch->tx_queue)) {
2837 /*
2838 * If remote TX flow control is enabled or
2839 * the channel is not fully opened, then only
2840 * send command packets.
2841 */
2842 if (ch->tx_flow_control || !IS_FULLY_OPENED(ch)) {
2843 struct smux_pkt_t *curr;
2844 list_for_each_entry(curr, &ch->tx_queue, list) {
2845 if (curr->hdr.cmd != SMUX_CMD_DATA) {
2846 pkt = curr;
2847 break;
2848 }
2849 }
2850 } else {
2851 /* get next cmd/data packet to send */
2852 pkt = list_first_entry(&ch->tx_queue,
2853 struct smux_pkt_t, list);
2854 }
2855 }
2856
2857 if (pkt) {
2858 list_del(&pkt->list);
2859
2860 /* update packet stats */
2861 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2862 --ch->tx_pending_data_cnt;
2863 if (ch->notify_lwm &&
2864 ch->tx_pending_data_cnt
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002865 <= SMUX_TX_WM_LOW) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002866 ch->notify_lwm = 0;
2867 low_wm_notif = 1;
2868 }
2869 }
2870
2871 /* advance to the next ready channel */
2872 list_rotate_left(&smux.lch_tx_ready_list);
2873 } else {
2874 /* no data in channel to send, remove from ready list */
2875 list_del(&ch->tx_ready_list);
2876 INIT_LIST_HEAD(&ch->tx_ready_list);
2877 }
2878 lcid = ch->lcid;
2879 spin_unlock(&ch->tx_lock_lhb2);
2880 spin_unlock(&ch->state_lock_lhb1);
2881 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2882
2883 if (low_wm_notif)
2884 schedule_notify(lcid, SMUX_LOW_WM_HIT, NULL);
2885
2886 /* send the packet */
2887 smux_tx_pkt(ch, pkt);
2888 smux_free_pkt(pkt);
2889 }
2890}
2891
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002892/**
2893 * Update the RX flow control (sent in the TIOCM Status command).
2894 *
2895 * @ch Channel for update
2896 *
2897 * @returns 1 for updated, 0 for not updated
2898 *
2899 * Must be called with ch->state_lock_lhb1 locked.
2900 */
2901static int smux_rx_flow_control_updated(struct smux_lch_t *ch)
2902{
2903 int updated = 0;
2904 int prev_state;
2905
2906 prev_state = ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL;
2907
2908 if (ch->rx_flow_control_client || ch->rx_flow_control_auto)
2909 ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
2910 else
2911 ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
2912
2913 if (prev_state != (ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL)) {
2914 smux_send_status_cmd(ch);
2915 updated = 1;
2916 }
2917
2918 return updated;
2919}
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002920
Eric Holmberg06011322012-07-06 18:17:03 -06002921/**
2922 * Flush all SMUX workqueues.
2923 *
2924 * This sets the reset bit to abort any processing loops and then
2925 * flushes the workqueues to ensure that no new pending work is
2926 * running. Do not call with any locks used by workers held as
2927 * this will result in a deadlock.
2928 */
2929static void smux_flush_workqueues(void)
2930{
2931 smux.in_reset = 1;
2932
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302933 SMUX_DBG("smux: %s: flushing tx wq\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06002934 flush_workqueue(smux_tx_wq);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302935 SMUX_DBG("smux: %s: flushing rx wq\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06002936 flush_workqueue(smux_rx_wq);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302937 SMUX_DBG("smux: %s: flushing notify wq\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06002938 flush_workqueue(smux_notify_wq);
2939}
2940
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002941/**********************************************************************/
2942/* Kernel API */
2943/**********************************************************************/
2944
2945/**
2946 * Set or clear channel option using the SMUX_CH_OPTION_* channel
2947 * flags.
2948 *
2949 * @lcid Logical channel ID
2950 * @set Options to set
2951 * @clear Options to clear
2952 *
2953 * @returns 0 for success, < 0 for failure
2954 */
2955int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear)
2956{
2957 unsigned long flags;
2958 struct smux_lch_t *ch;
2959 int tx_ready = 0;
2960 int ret = 0;
2961
2962 if (smux_assert_lch_id(lcid))
2963 return -ENXIO;
2964
2965 ch = &smux_lch[lcid];
2966 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2967
2968 /* Local loopback mode */
2969 if (set & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2970 ch->local_mode = SMUX_LCH_MODE_LOCAL_LOOPBACK;
2971
2972 if (clear & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2973 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2974
2975 /* Remote loopback mode */
2976 if (set & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2977 ch->local_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
2978
2979 if (clear & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2980 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2981
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002982 /* RX Flow control */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002983 if (set & SMUX_CH_OPTION_REMOTE_TX_STOP) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002984 ch->rx_flow_control_client = 1;
2985 tx_ready |= smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002986 }
2987
2988 if (clear & SMUX_CH_OPTION_REMOTE_TX_STOP) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002989 ch->rx_flow_control_client = 0;
2990 tx_ready |= smux_rx_flow_control_updated(ch);
2991 }
2992
2993 /* Auto RX Flow Control */
2994 if (set & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302995 SMUX_DBG("smux: %s: auto rx flow control option enabled\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002996 __func__);
2997 ch->options |= SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
2998 }
2999
3000 if (clear & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303001 SMUX_DBG("smux: %s: auto rx flow control option disabled\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003002 __func__);
3003 ch->options &= ~SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
3004 ch->rx_flow_control_auto = 0;
3005 tx_ready |= smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003006 }
3007
3008 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3009
3010 if (tx_ready)
3011 list_channel(ch);
3012
3013 return ret;
3014}
3015
3016/**
3017 * Starts the opening sequence for a logical channel.
3018 *
3019 * @lcid Logical channel ID
3020 * @priv Free for client usage
3021 * @notify Event notification function
3022 * @get_rx_buffer Function used to provide a receive buffer to SMUX
3023 *
3024 * @returns 0 for success, <0 otherwise
3025 *
3026 * A channel must be fully closed (either not previously opened or
3027 * msm_smux_close() has been called and the SMUX_DISCONNECTED has been
3028 * received.
3029 *
3030 * One the remote side is opened, the client will receive a SMUX_CONNECTED
3031 * event.
3032 */
3033int msm_smux_open(uint8_t lcid, void *priv,
3034 void (*notify)(void *priv, int event_type, const void *metadata),
3035 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
3036 int size))
3037{
3038 int ret;
3039 struct smux_lch_t *ch;
3040 struct smux_pkt_t *pkt;
3041 int tx_ready = 0;
3042 unsigned long flags;
3043
3044 if (smux_assert_lch_id(lcid))
3045 return -ENXIO;
3046
3047 ch = &smux_lch[lcid];
3048 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3049
3050 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
3051 ret = -EAGAIN;
3052 goto out;
3053 }
3054
3055 if (ch->local_state != SMUX_LCH_LOCAL_CLOSED) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003056 SMUX_ERR("%s: open lcid %d local state %x invalid\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003057 __func__, lcid, ch->local_state);
3058 ret = -EINVAL;
3059 goto out;
3060 }
3061
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303062 SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003063 ch->local_state,
3064 SMUX_LCH_LOCAL_OPENING);
3065
Eric Holmberg06011322012-07-06 18:17:03 -06003066 ch->rx_flow_control_auto = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003067 ch->local_state = SMUX_LCH_LOCAL_OPENING;
3068
3069 ch->priv = priv;
3070 ch->notify = notify;
3071 ch->get_rx_buffer = get_rx_buffer;
3072 ret = 0;
3073
3074 /* Send Open Command */
3075 pkt = smux_alloc_pkt();
3076 if (!pkt) {
3077 ret = -ENOMEM;
3078 goto out;
3079 }
3080 pkt->hdr.magic = SMUX_MAGIC;
3081 pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
3082 pkt->hdr.flags = SMUX_CMD_OPEN_POWER_COLLAPSE;
3083 if (ch->local_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK)
3084 pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
3085 pkt->hdr.lcid = lcid;
3086 pkt->hdr.payload_len = 0;
3087 pkt->hdr.pad_len = 0;
3088 smux_tx_queue(pkt, ch, 0);
3089 tx_ready = 1;
3090
3091out:
3092 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg06011322012-07-06 18:17:03 -06003093 smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003094 if (tx_ready)
3095 list_channel(ch);
3096 return ret;
3097}
3098
3099/**
3100 * Starts the closing sequence for a logical channel.
3101 *
3102 * @lcid Logical channel ID
3103 *
3104 * @returns 0 for success, <0 otherwise
3105 *
3106 * Once the close event has been acknowledge by the remote side, the client
3107 * will receive a SMUX_DISCONNECTED notification.
3108 */
3109int msm_smux_close(uint8_t lcid)
3110{
3111 int ret = 0;
3112 struct smux_lch_t *ch;
3113 struct smux_pkt_t *pkt;
3114 int tx_ready = 0;
3115 unsigned long flags;
3116
3117 if (smux_assert_lch_id(lcid))
3118 return -ENXIO;
3119
3120 ch = &smux_lch[lcid];
3121 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3122 ch->local_tiocm = 0x0;
3123 ch->remote_tiocm = 0x0;
3124 ch->tx_pending_data_cnt = 0;
3125 ch->notify_lwm = 0;
Eric Holmbergeee5d5a2012-08-13 14:45:27 -06003126 ch->tx_flow_control = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003127
3128 /* Purge TX queue */
3129 spin_lock(&ch->tx_lock_lhb2);
Eric Holmberg0e914082012-07-11 11:46:28 -06003130 smux_purge_ch_tx_queue(ch, 0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003131 spin_unlock(&ch->tx_lock_lhb2);
3132
3133 /* Send Close Command */
3134 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
3135 ch->local_state == SMUX_LCH_LOCAL_OPENING) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303136 SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003137 ch->local_state,
3138 SMUX_LCH_LOCAL_CLOSING);
3139
3140 ch->local_state = SMUX_LCH_LOCAL_CLOSING;
3141 pkt = smux_alloc_pkt();
3142 if (pkt) {
3143 pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
3144 pkt->hdr.flags = 0;
3145 pkt->hdr.lcid = lcid;
3146 pkt->hdr.payload_len = 0;
3147 pkt->hdr.pad_len = 0;
3148 smux_tx_queue(pkt, ch, 0);
3149 tx_ready = 1;
3150 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003151 SMUX_ERR("%s: pkt allocation failed\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003152 ret = -ENOMEM;
3153 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06003154
3155 /* Purge RX retry queue */
3156 if (ch->rx_retry_queue_cnt)
3157 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003158 }
3159 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3160
3161 if (tx_ready)
3162 list_channel(ch);
3163
3164 return ret;
3165}
3166
3167/**
3168 * Write data to a logical channel.
3169 *
3170 * @lcid Logical channel ID
3171 * @pkt_priv Client data that will be returned with the SMUX_WRITE_DONE or
3172 * SMUX_WRITE_FAIL notification.
3173 * @data Data to write
3174 * @len Length of @data
3175 *
3176 * @returns 0 for success, <0 otherwise
3177 *
3178 * Data may be written immediately after msm_smux_open() is called,
3179 * but the data will wait in the transmit queue until the channel has
3180 * been fully opened.
3181 *
3182 * Once the data has been written, the client will receive either a completion
3183 * (SMUX_WRITE_DONE) or a failure notice (SMUX_WRITE_FAIL).
3184 */
3185int msm_smux_write(uint8_t lcid, void *pkt_priv, const void *data, int len)
3186{
3187 struct smux_lch_t *ch;
3188 struct smux_pkt_t *pkt;
3189 int tx_ready = 0;
3190 unsigned long flags;
3191 int ret;
3192
3193 if (smux_assert_lch_id(lcid))
3194 return -ENXIO;
3195
3196 ch = &smux_lch[lcid];
3197 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3198
3199 if (ch->local_state != SMUX_LCH_LOCAL_OPENED &&
3200 ch->local_state != SMUX_LCH_LOCAL_OPENING) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003201 SMUX_ERR("%s: hdr.invalid local state %d channel %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003202 __func__, ch->local_state, lcid);
3203 ret = -EINVAL;
3204 goto out;
3205 }
3206
3207 if (len > SMUX_MAX_PKT_SIZE - sizeof(struct smux_hdr_t)) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003208 SMUX_ERR("%s: payload %d too large\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003209 __func__, len);
3210 ret = -E2BIG;
3211 goto out;
3212 }
3213
3214 pkt = smux_alloc_pkt();
3215 if (!pkt) {
3216 ret = -ENOMEM;
3217 goto out;
3218 }
3219
3220 pkt->hdr.cmd = SMUX_CMD_DATA;
3221 pkt->hdr.lcid = lcid;
3222 pkt->hdr.flags = 0;
3223 pkt->hdr.payload_len = len;
3224 pkt->payload = (void *)data;
3225 pkt->priv = pkt_priv;
3226 pkt->hdr.pad_len = 0;
3227
3228 spin_lock(&ch->tx_lock_lhb2);
3229 /* verify high watermark */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303230 SMUX_DBG("smux: %s: pending %d", __func__, ch->tx_pending_data_cnt);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003231
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003232 if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003233 SMUX_ERR("%s: ch %d high watermark %d exceeded %d\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003234 __func__, lcid, SMUX_TX_WM_HIGH,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003235 ch->tx_pending_data_cnt);
3236 ret = -EAGAIN;
3237 goto out_inner;
3238 }
3239
3240 /* queue packet for transmit */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003241 if (++ch->tx_pending_data_cnt == SMUX_TX_WM_HIGH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003242 ch->notify_lwm = 1;
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003243 SMUX_ERR("%s: high watermark hit\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003244 schedule_notify(lcid, SMUX_HIGH_WM_HIT, NULL);
3245 }
3246 list_add_tail(&pkt->list, &ch->tx_queue);
3247
3248 /* add to ready list */
3249 if (IS_FULLY_OPENED(ch))
3250 tx_ready = 1;
3251
3252 ret = 0;
3253
3254out_inner:
3255 spin_unlock(&ch->tx_lock_lhb2);
3256
3257out:
3258 if (ret)
3259 smux_free_pkt(pkt);
3260 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3261
3262 if (tx_ready)
3263 list_channel(ch);
3264
3265 return ret;
3266}
3267
3268/**
3269 * Returns true if the TX queue is currently full (high water mark).
3270 *
3271 * @lcid Logical channel ID
3272 * @returns 0 if channel is not full
3273 * 1 if it is full
3274 * < 0 for error
3275 */
3276int msm_smux_is_ch_full(uint8_t lcid)
3277{
3278 struct smux_lch_t *ch;
3279 unsigned long flags;
3280 int is_full = 0;
3281
3282 if (smux_assert_lch_id(lcid))
3283 return -ENXIO;
3284
3285 ch = &smux_lch[lcid];
3286
3287 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003288 if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003289 is_full = 1;
3290 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
3291
3292 return is_full;
3293}
3294
3295/**
3296 * Returns true if the TX queue has space for more packets it is at or
3297 * below the low water mark).
3298 *
3299 * @lcid Logical channel ID
3300 * @returns 0 if channel is above low watermark
3301 * 1 if it's at or below the low watermark
3302 * < 0 for error
3303 */
3304int msm_smux_is_ch_low(uint8_t lcid)
3305{
3306 struct smux_lch_t *ch;
3307 unsigned long flags;
3308 int is_low = 0;
3309
3310 if (smux_assert_lch_id(lcid))
3311 return -ENXIO;
3312
3313 ch = &smux_lch[lcid];
3314
3315 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003316 if (ch->tx_pending_data_cnt <= SMUX_TX_WM_LOW)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003317 is_low = 1;
3318 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
3319
3320 return is_low;
3321}
3322
3323/**
3324 * Send TIOCM status update.
3325 *
3326 * @ch Channel for update
3327 *
3328 * @returns 0 for success, <0 for failure
3329 *
3330 * Channel lock must be held before calling.
3331 */
3332static int smux_send_status_cmd(struct smux_lch_t *ch)
3333{
3334 struct smux_pkt_t *pkt;
3335
3336 if (!ch)
3337 return -EINVAL;
3338
3339 pkt = smux_alloc_pkt();
3340 if (!pkt)
3341 return -ENOMEM;
3342
3343 pkt->hdr.lcid = ch->lcid;
3344 pkt->hdr.cmd = SMUX_CMD_STATUS;
3345 pkt->hdr.flags = ch->local_tiocm;
3346 pkt->hdr.payload_len = 0;
3347 pkt->hdr.pad_len = 0;
3348 smux_tx_queue(pkt, ch, 0);
3349
3350 return 0;
3351}
3352
3353/**
3354 * Internal helper function for getting the TIOCM status with
3355 * state_lock_lhb1 already locked.
3356 *
3357 * @ch Channel pointer
3358 *
3359 * @returns TIOCM status
3360 */
Eric Holmberg9d890672012-06-13 17:58:13 -06003361long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003362{
3363 long status = 0x0;
3364
3365 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DSR : 0;
3366 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_CTS : 0;
3367 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RI) ? TIOCM_RI : 0;
3368 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_DCD) ? TIOCM_CD : 0;
3369
3370 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DTR : 0;
3371 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_RTS : 0;
3372
3373 return status;
3374}
3375
3376/**
3377 * Get the TIOCM status bits.
3378 *
3379 * @lcid Logical channel ID
3380 *
3381 * @returns >= 0 TIOCM status bits
3382 * < 0 Error condition
3383 */
3384long msm_smux_tiocm_get(uint8_t lcid)
3385{
3386 struct smux_lch_t *ch;
3387 unsigned long flags;
3388 long status = 0x0;
3389
3390 if (smux_assert_lch_id(lcid))
3391 return -ENXIO;
3392
3393 ch = &smux_lch[lcid];
3394 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3395 status = msm_smux_tiocm_get_atomic(ch);
3396 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3397
3398 return status;
3399}
3400
3401/**
3402 * Set/clear the TIOCM status bits.
3403 *
3404 * @lcid Logical channel ID
3405 * @set Bits to set
3406 * @clear Bits to clear
3407 *
3408 * @returns 0 for success; < 0 for failure
3409 *
3410 * If a bit is specified in both the @set and @clear masks, then the clear bit
3411 * definition will dominate and the bit will be cleared.
3412 */
3413int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear)
3414{
3415 struct smux_lch_t *ch;
3416 unsigned long flags;
3417 uint8_t old_status;
3418 uint8_t status_set = 0x0;
3419 uint8_t status_clear = 0x0;
3420 int tx_ready = 0;
3421 int ret = 0;
3422
3423 if (smux_assert_lch_id(lcid))
3424 return -ENXIO;
3425
3426 ch = &smux_lch[lcid];
3427 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3428
3429 status_set |= (set & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3430 status_set |= (set & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3431 status_set |= (set & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3432 status_set |= (set & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3433
3434 status_clear |= (clear & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3435 status_clear |= (clear & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3436 status_clear |= (clear & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3437 status_clear |= (clear & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3438
3439 old_status = ch->local_tiocm;
3440 ch->local_tiocm |= status_set;
3441 ch->local_tiocm &= ~status_clear;
3442
3443 if (ch->local_tiocm != old_status) {
3444 ret = smux_send_status_cmd(ch);
3445 tx_ready = 1;
3446 }
3447 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3448
3449 if (tx_ready)
3450 list_channel(ch);
3451
3452 return ret;
3453}
3454
3455/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003456/* Subsystem Restart */
3457/**********************************************************************/
3458static struct notifier_block ssr_notifier = {
3459 .notifier_call = ssr_notifier_cb,
3460};
3461
3462/**
3463 * Handle Subsystem Restart (SSR) notifications.
3464 *
3465 * @this Pointer to ssr_notifier
3466 * @code SSR Code
3467 * @data Data pointer (not used)
3468 */
3469static int ssr_notifier_cb(struct notifier_block *this,
3470 unsigned long code,
3471 void *data)
3472{
3473 unsigned long flags;
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003474 int i;
3475 int tmp;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003476 int power_off_uart = 0;
3477
Eric Holmbergd2697902012-06-15 09:58:46 -06003478 if (code == SUBSYS_BEFORE_SHUTDOWN) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303479 SMUX_DBG("smux: %s: ssr - before shutdown\n", __func__);
Eric Holmbergd2697902012-06-15 09:58:46 -06003480 mutex_lock(&smux.mutex_lha0);
3481 smux.in_reset = 1;
Eric Holmberg0a81e8f2012-08-28 13:51:14 -06003482 smux.remote_is_alive = 0;
Eric Holmbergd2697902012-06-15 09:58:46 -06003483 mutex_unlock(&smux.mutex_lha0);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003484 return NOTIFY_DONE;
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003485 } else if (code == SUBSYS_AFTER_POWERUP) {
3486 /* re-register platform devices */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303487 SMUX_DBG("smux: %s: ssr - after power-up\n", __func__);
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003488 mutex_lock(&smux.mutex_lha0);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003489 if (smux.ld_open_count > 0
3490 && !smux.platform_devs_registered) {
3491 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303492 SMUX_DBG("smux: %s: register pdev '%s'\n",
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003493 __func__, smux_devs[i].name);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003494 smux_devs[i].dev.release = smux_pdev_release;
3495 tmp = platform_device_register(&smux_devs[i]);
3496 if (tmp)
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003497 SMUX_ERR(
3498 "%s: error %d registering device %s\n",
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003499 __func__, tmp, smux_devs[i].name);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003500 }
3501 smux.platform_devs_registered = 1;
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003502 }
3503 mutex_unlock(&smux.mutex_lha0);
3504 return NOTIFY_DONE;
Eric Holmbergd2697902012-06-15 09:58:46 -06003505 } else if (code != SUBSYS_AFTER_SHUTDOWN) {
3506 return NOTIFY_DONE;
3507 }
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303508 SMUX_DBG("smux: %s: ssr - after shutdown\n", __func__);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003509
3510 /* Cleanup channels */
Eric Holmberg06011322012-07-06 18:17:03 -06003511 smux_flush_workqueues();
Eric Holmbergd2697902012-06-15 09:58:46 -06003512 mutex_lock(&smux.mutex_lha0);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003513 if (smux.ld_open_count > 0) {
3514 smux_lch_purge();
3515 if (smux.tty)
3516 tty_driver_flush_buffer(smux.tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003517
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003518 /* Unregister platform devices */
3519 if (smux.platform_devs_registered) {
3520 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303521 SMUX_DBG("smux: %s: unregister pdev '%s'\n",
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003522 __func__, smux_devs[i].name);
3523 platform_device_unregister(&smux_devs[i]);
3524 }
3525 smux.platform_devs_registered = 0;
3526 }
3527
3528 /* Power-down UART */
3529 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
3530 if (smux.power_state != SMUX_PWR_OFF) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303531 SMUX_PWR("smux: %s: SSR - turning off UART\n",
3532 __func__);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003533 smux.power_state = SMUX_PWR_OFF;
3534 power_off_uart = 1;
3535 }
3536 smux.powerdown_enabled = 0;
3537 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3538
3539 if (power_off_uart)
3540 smux_uart_power_off_atomic();
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003541 }
Eric Holmberg06011322012-07-06 18:17:03 -06003542 smux.tx_activity_flag = 0;
3543 smux.rx_activity_flag = 0;
3544 smux.rx_state = SMUX_RX_IDLE;
Eric Holmbergd2697902012-06-15 09:58:46 -06003545 smux.in_reset = 0;
Eric Holmberg0a81e8f2012-08-28 13:51:14 -06003546 smux.remote_is_alive = 0;
Eric Holmbergd2697902012-06-15 09:58:46 -06003547 mutex_unlock(&smux.mutex_lha0);
3548
Eric Holmberged1f00c2012-06-07 09:45:18 -06003549 return NOTIFY_DONE;
3550}
3551
3552/**********************************************************************/
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003553/* Line Discipline Interface */
3554/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003555static void smux_pdev_release(struct device *dev)
3556{
3557 struct platform_device *pdev;
3558
3559 pdev = container_of(dev, struct platform_device, dev);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303560 SMUX_DBG("smux: %s: releasing pdev %p '%s'\n",
3561 __func__, pdev, pdev->name);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003562 memset(&pdev->dev, 0x0, sizeof(pdev->dev));
3563}
3564
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003565static int smuxld_open(struct tty_struct *tty)
3566{
3567 int i;
3568 int tmp;
3569 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003570
3571 if (!smux.is_initialized)
3572 return -ENODEV;
3573
Eric Holmberged1f00c2012-06-07 09:45:18 -06003574 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003575 if (smux.ld_open_count) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003576 SMUX_ERR("%s: %p multiple instances not supported\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003577 __func__, tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003578 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003579 return -EEXIST;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003580 }
3581
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003582 if (tty->ops->write == NULL) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003583 SMUX_ERR("%s: tty->ops->write already NULL\n", __func__);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003584 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003585 return -EINVAL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003586 }
3587
3588 /* connect to TTY */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003589 ++smux.ld_open_count;
3590 smux.in_reset = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003591 smux.tty = tty;
3592 tty->disc_data = &smux;
3593 tty->receive_room = TTY_RECEIVE_ROOM;
3594 tty_driver_flush_buffer(tty);
3595
3596 /* power-down the UART if we are idle */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003597 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003598 if (smux.power_state == SMUX_PWR_OFF) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303599 SMUX_PWR("smux: %s: powering off uart\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003600 smux.power_state = SMUX_PWR_OFF_FLUSH;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003601 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003602 queue_work(smux_tx_wq, &smux_inactivity_work);
3603 } else {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003604 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003605 }
3606
3607 /* register platform devices */
3608 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303609 SMUX_DBG("smux: %s: register pdev '%s'\n",
Eric Holmberged1f00c2012-06-07 09:45:18 -06003610 __func__, smux_devs[i].name);
3611 smux_devs[i].dev.release = smux_pdev_release;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003612 tmp = platform_device_register(&smux_devs[i]);
3613 if (tmp)
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003614 SMUX_ERR("%s: error %d registering device %s\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003615 __func__, tmp, smux_devs[i].name);
3616 }
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003617 smux.platform_devs_registered = 1;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003618 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003619 return 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003620}
3621
3622static void smuxld_close(struct tty_struct *tty)
3623{
3624 unsigned long flags;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003625 int power_up_uart = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003626 int i;
3627
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303628 SMUX_DBG("smux: %s: ldisc unload\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06003629 smux_flush_workqueues();
3630
Eric Holmberged1f00c2012-06-07 09:45:18 -06003631 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003632 if (smux.ld_open_count <= 0) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003633 SMUX_ERR("%s: invalid ld count %d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003634 smux.ld_open_count);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003635 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003636 return;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003637 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003638 --smux.ld_open_count;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003639
3640 /* Cleanup channels */
3641 smux_lch_purge();
3642
3643 /* Unregister platform devices */
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003644 if (smux.platform_devs_registered) {
3645 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303646 SMUX_DBG("smux: %s: unregister pdev '%s'\n",
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003647 __func__, smux_devs[i].name);
3648 platform_device_unregister(&smux_devs[i]);
3649 }
3650 smux.platform_devs_registered = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003651 }
3652
3653 /* Schedule UART power-up if it's down */
3654 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003655 if (smux.power_state == SMUX_PWR_OFF)
Eric Holmberged1f00c2012-06-07 09:45:18 -06003656 power_up_uart = 1;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003657 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergd2697902012-06-15 09:58:46 -06003658 smux.powerdown_enabled = 0;
Eric Holmberg06011322012-07-06 18:17:03 -06003659 smux.tx_activity_flag = 0;
3660 smux.rx_activity_flag = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003661 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3662
3663 if (power_up_uart)
Eric Holmberg92a67df2012-06-25 13:56:24 -06003664 smux_uart_power_on_atomic();
Eric Holmberged1f00c2012-06-07 09:45:18 -06003665
Eric Holmberg06011322012-07-06 18:17:03 -06003666 smux.rx_state = SMUX_RX_IDLE;
3667
Eric Holmberged1f00c2012-06-07 09:45:18 -06003668 /* Disconnect from TTY */
3669 smux.tty = NULL;
Eric Holmberg0a81e8f2012-08-28 13:51:14 -06003670 smux.remote_is_alive = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003671 mutex_unlock(&smux.mutex_lha0);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303672 SMUX_DBG("smux: %s: ldisc complete\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003673}
3674
3675/**
3676 * Receive data from TTY Line Discipline.
3677 *
3678 * @tty TTY structure
3679 * @cp Character data
3680 * @fp Flag data
3681 * @count Size of character and flag data
3682 */
3683void smuxld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
3684 char *fp, int count)
3685{
3686 int i;
3687 int last_idx = 0;
3688 const char *tty_name = NULL;
3689 char *f;
3690
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003691 /* verify error flags */
3692 for (i = 0, f = fp; i < count; ++i, ++f) {
3693 if (*f != TTY_NORMAL) {
3694 if (tty)
3695 tty_name = tty->name;
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003696 SMUX_ERR("%s: TTY %s Error %d (%s)\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003697 tty_name, *f, tty_flag_to_str(*f));
3698
3699 /* feed all previous valid data to the parser */
3700 smux_rx_state_machine(cp + last_idx, i - last_idx,
3701 TTY_NORMAL);
3702
3703 /* feed bad data to parser */
3704 smux_rx_state_machine(cp + i, 1, *f);
3705 last_idx = i + 1;
3706 }
3707 }
3708
3709 /* feed data to RX state machine */
3710 smux_rx_state_machine(cp + last_idx, count - last_idx, TTY_NORMAL);
3711}
3712
3713static void smuxld_flush_buffer(struct tty_struct *tty)
3714{
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003715 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003716}
3717
3718static ssize_t smuxld_chars_in_buffer(struct tty_struct *tty)
3719{
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003720 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003721 return -ENODEV;
3722}
3723
3724static ssize_t smuxld_read(struct tty_struct *tty, struct file *file,
3725 unsigned char __user *buf, size_t nr)
3726{
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003727 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003728 return -ENODEV;
3729}
3730
3731static ssize_t smuxld_write(struct tty_struct *tty, struct file *file,
3732 const unsigned char *buf, size_t nr)
3733{
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003734 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003735 return -ENODEV;
3736}
3737
3738static int smuxld_ioctl(struct tty_struct *tty, struct file *file,
3739 unsigned int cmd, unsigned long arg)
3740{
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003741 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003742 return -ENODEV;
3743}
3744
3745static unsigned int smuxld_poll(struct tty_struct *tty, struct file *file,
3746 struct poll_table_struct *tbl)
3747{
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003748 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003749 return -ENODEV;
3750}
3751
3752static void smuxld_write_wakeup(struct tty_struct *tty)
3753{
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003754 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003755}
3756
3757static struct tty_ldisc_ops smux_ldisc_ops = {
3758 .owner = THIS_MODULE,
3759 .magic = TTY_LDISC_MAGIC,
3760 .name = "n_smux",
3761 .open = smuxld_open,
3762 .close = smuxld_close,
3763 .flush_buffer = smuxld_flush_buffer,
3764 .chars_in_buffer = smuxld_chars_in_buffer,
3765 .read = smuxld_read,
3766 .write = smuxld_write,
3767 .ioctl = smuxld_ioctl,
3768 .poll = smuxld_poll,
3769 .receive_buf = smuxld_receive_buf,
3770 .write_wakeup = smuxld_write_wakeup
3771};
3772
3773static int __init smux_init(void)
3774{
3775 int ret;
3776
Eric Holmberged1f00c2012-06-07 09:45:18 -06003777 mutex_init(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003778
3779 spin_lock_init(&smux.rx_lock_lha1);
3780 smux.rx_state = SMUX_RX_IDLE;
3781 smux.power_state = SMUX_PWR_OFF;
3782 smux.pwr_wakeup_delay_us = 1;
3783 smux.powerdown_enabled = 0;
Eric Holmberga9b06472012-06-22 09:46:34 -06003784 smux.power_ctl_remote_req_received = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003785 INIT_LIST_HEAD(&smux.power_queue);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003786 smux.rx_activity_flag = 0;
3787 smux.tx_activity_flag = 0;
3788 smux.recv_len = 0;
3789 smux.tty = NULL;
3790 smux.ld_open_count = 0;
3791 smux.in_reset = 0;
Eric Holmberg0a81e8f2012-08-28 13:51:14 -06003792 smux.remote_is_alive = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003793 smux.is_initialized = 1;
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003794 smux.platform_devs_registered = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003795 smux_byte_loopback = 0;
3796
3797 spin_lock_init(&smux.tx_lock_lha2);
3798 INIT_LIST_HEAD(&smux.lch_tx_ready_list);
3799
3800 ret = tty_register_ldisc(N_SMUX, &smux_ldisc_ops);
3801 if (ret != 0) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003802 SMUX_ERR("%s: error %d registering line discipline\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003803 __func__, ret);
3804 return ret;
3805 }
3806
Eric Holmberg6c9f2a52012-06-14 10:49:04 -06003807 subsys_notif_register_notifier("external_modem", &ssr_notifier);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003808
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003809 ret = lch_init();
3810 if (ret != 0) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003811 SMUX_ERR("%s: lch_init failed\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003812 return ret;
3813 }
3814
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303815 log_ctx = ipc_log_context_create(1, "smux");
3816 if (!log_ctx) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003817 SMUX_ERR("%s: unable to create log context\n", __func__);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303818 disable_ipc_logging = 1;
3819 }
3820
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003821 return 0;
3822}
3823
3824static void __exit smux_exit(void)
3825{
3826 int ret;
3827
3828 ret = tty_unregister_ldisc(N_SMUX);
3829 if (ret != 0) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003830 SMUX_ERR("%s error %d unregistering line discipline\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003831 __func__, ret);
3832 return;
3833 }
3834}
3835
3836module_init(smux_init);
3837module_exit(smux_exit);
3838
3839MODULE_DESCRIPTION("Serial Mux TTY Line Discipline");
3840MODULE_LICENSE("GPL v2");
3841MODULE_ALIAS_LDISC(N_SMUX);