blob: 3794aa9da2f96b8e02b030f7ac036cbb12ba6eee [file] [log] [blame]
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001/* drivers/tty/n_smux.c
2 *
Eric Holmberg371b4622013-05-21 18:04:50 -06003 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06004 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/errno.h>
18#include <linux/tty.h>
19#include <linux/tty_flip.h>
20#include <linux/tty_driver.h>
21#include <linux/smux.h>
22#include <linux/list.h>
23#include <linux/kfifo.h>
24#include <linux/slab.h>
25#include <linux/types.h>
26#include <linux/platform_device.h>
27#include <linux/delay.h>
Eric Holmberged1f00c2012-06-07 09:45:18 -060028#include <mach/subsystem_notif.h>
29#include <mach/subsystem_restart.h>
Eric Holmberg8ed30f22012-05-10 19:16:51 -060030#include <mach/msm_serial_hs.h>
Angshuman Sarkarc2df7392012-07-24 14:50:42 +053031#include <mach/msm_ipc_logging.h>
Eric Holmberg8ed30f22012-05-10 19:16:51 -060032#include "smux_private.h"
33#include "smux_loopback.h"
34
35#define SMUX_NOTIFY_FIFO_SIZE 128
36#define SMUX_TX_QUEUE_SIZE 256
Eric Holmbergacd4c772012-08-30 15:38:11 -060037#define SMUX_PKT_LOG_SIZE 128
Eric Holmberg8ed30f22012-05-10 19:16:51 -060038
39/* Maximum size we can accept in a single RX buffer */
40#define TTY_RECEIVE_ROOM 65536
41#define TTY_BUFFER_FULL_WAIT_MS 50
42
43/* maximum sleep time between wakeup attempts */
44#define SMUX_WAKEUP_DELAY_MAX (1 << 20)
45
46/* minimum delay for scheduling delayed work */
47#define SMUX_WAKEUP_DELAY_MIN (1 << 15)
48
49/* inactivity timeout for no rx/tx activity */
Eric Holmberg05620172012-07-03 11:13:18 -060050#define SMUX_INACTIVITY_TIMEOUT_MS 1000000
Eric Holmberg8ed30f22012-05-10 19:16:51 -060051
Eric Holmbergb8435c82012-06-05 14:51:29 -060052/* RX get_rx_buffer retry timeout values */
53#define SMUX_RX_RETRY_MIN_MS (1 << 0) /* 1 ms */
54#define SMUX_RX_RETRY_MAX_MS (1 << 10) /* 1024 ms */
55
Eric Holmberg8ed30f22012-05-10 19:16:51 -060056enum {
57 MSM_SMUX_DEBUG = 1U << 0,
58 MSM_SMUX_INFO = 1U << 1,
59 MSM_SMUX_POWER_INFO = 1U << 2,
60 MSM_SMUX_PKT = 1U << 3,
61};
62
Angshuman Sarkarc2df7392012-07-24 14:50:42 +053063static int smux_debug_mask = MSM_SMUX_DEBUG | MSM_SMUX_POWER_INFO;
Eric Holmberg8ed30f22012-05-10 19:16:51 -060064module_param_named(debug_mask, smux_debug_mask,
65 int, S_IRUGO | S_IWUSR | S_IWGRP);
66
Angshuman Sarkarc2df7392012-07-24 14:50:42 +053067static int disable_ipc_logging;
68
Eric Holmberg8ed30f22012-05-10 19:16:51 -060069/* Simulated wakeup used for testing */
70int smux_byte_loopback;
71module_param_named(byte_loopback, smux_byte_loopback,
72 int, S_IRUGO | S_IWUSR | S_IWGRP);
73int smux_simulate_wakeup_delay = 1;
74module_param_named(simulate_wakeup_delay, smux_simulate_wakeup_delay,
75 int, S_IRUGO | S_IWUSR | S_IWGRP);
76
Angshuman Sarkarc2df7392012-07-24 14:50:42 +053077#define IPC_LOG_STR(x...) do { \
78 if (!disable_ipc_logging && log_ctx) \
79 ipc_log_string(log_ctx, x); \
80} while (0)
81
Eric Holmberg8ed30f22012-05-10 19:16:51 -060082#define SMUX_DBG(x...) do { \
83 if (smux_debug_mask & MSM_SMUX_DEBUG) \
Angshuman Sarkarc2df7392012-07-24 14:50:42 +053084 IPC_LOG_STR(x); \
Eric Holmberg8ed30f22012-05-10 19:16:51 -060085} while (0)
86
Eric Holmbergd7339a42012-08-21 16:28:12 -060087#define SMUX_ERR(x...) do { \
88 pr_err(x); \
89 IPC_LOG_STR(x); \
90} while (0)
91
Eric Holmbergff0b0112012-06-08 15:06:57 -060092#define SMUX_PWR(x...) do { \
93 if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
Angshuman Sarkarc2df7392012-07-24 14:50:42 +053094 IPC_LOG_STR(x); \
Eric Holmbergff0b0112012-06-08 15:06:57 -060095} while (0)
96
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -060097#define SMUX_PWR_PKT_RX(pkt) do { \
98 if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
99 smux_log_pkt(pkt, 1); \
100} while (0)
101
102#define SMUX_PWR_PKT_TX(pkt) do { \
103 if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
104 if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
105 pkt->hdr.flags == SMUX_WAKEUP_ACK) \
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530106 IPC_LOG_STR("smux: TX Wakeup ACK\n"); \
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -0600107 else if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
108 pkt->hdr.flags == SMUX_WAKEUP_REQ) \
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530109 IPC_LOG_STR("smux: TX Wakeup REQ\n"); \
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -0600110 else \
111 smux_log_pkt(pkt, 0); \
112 } \
113} while (0)
114
115#define SMUX_PWR_BYTE_TX(pkt) do { \
116 if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
117 smux_log_pkt(pkt, 0); \
118 } \
119} while (0)
120
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600121#define SMUX_LOG_PKT_RX(pkt) do { \
122 if (smux_debug_mask & MSM_SMUX_PKT) \
123 smux_log_pkt(pkt, 1); \
124} while (0)
125
126#define SMUX_LOG_PKT_TX(pkt) do { \
127 if (smux_debug_mask & MSM_SMUX_PKT) \
128 smux_log_pkt(pkt, 0); \
129} while (0)
130
131/**
132 * Return true if channel is fully opened (both
133 * local and remote sides are in the OPENED state).
134 */
135#define IS_FULLY_OPENED(ch) \
136 (ch && (ch)->local_state == SMUX_LCH_LOCAL_OPENED \
137 && (ch)->remote_state == SMUX_LCH_REMOTE_OPENED)
138
139static struct platform_device smux_devs[] = {
140 {.name = "SMUX_CTL", .id = -1},
141 {.name = "SMUX_RMNET", .id = -1},
142 {.name = "SMUX_DUN_DATA_HSUART", .id = 0},
143 {.name = "SMUX_RMNET_DATA_HSUART", .id = 1},
144 {.name = "SMUX_RMNET_CTL_HSUART", .id = 0},
145 {.name = "SMUX_DIAG", .id = -1},
146};
147
148enum {
149 SMUX_CMD_STATUS_RTC = 1 << 0,
150 SMUX_CMD_STATUS_RTR = 1 << 1,
151 SMUX_CMD_STATUS_RI = 1 << 2,
152 SMUX_CMD_STATUS_DCD = 1 << 3,
153 SMUX_CMD_STATUS_FLOW_CNTL = 1 << 4,
154};
155
156/* Channel mode */
157enum {
158 SMUX_LCH_MODE_NORMAL,
159 SMUX_LCH_MODE_LOCAL_LOOPBACK,
160 SMUX_LCH_MODE_REMOTE_LOOPBACK,
161};
162
163enum {
164 SMUX_RX_IDLE,
165 SMUX_RX_MAGIC,
166 SMUX_RX_HDR,
167 SMUX_RX_PAYLOAD,
168 SMUX_RX_FAILURE,
169};
170
171/**
172 * Power states.
173 *
174 * The _FLUSH states are internal transitional states and are not part of the
175 * official state machine.
176 */
177enum {
178 SMUX_PWR_OFF,
179 SMUX_PWR_TURNING_ON,
180 SMUX_PWR_ON,
Eric Holmberga9b06472012-06-22 09:46:34 -0600181 SMUX_PWR_TURNING_OFF_FLUSH, /* power-off req/ack in TX queue */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600182 SMUX_PWR_TURNING_OFF,
183 SMUX_PWR_OFF_FLUSH,
184};
185
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600186union notifier_metadata {
187 struct smux_meta_disconnected disconnected;
188 struct smux_meta_read read;
189 struct smux_meta_write write;
190 struct smux_meta_tiocm tiocm;
191};
192
193struct smux_notify_handle {
194 void (*notify)(void *priv, int event_type, const void *metadata);
195 void *priv;
196 int event_type;
197 union notifier_metadata *metadata;
198};
199
200/**
Eric Holmbergb8435c82012-06-05 14:51:29 -0600201 * Get RX Buffer Retry structure.
202 *
203 * This is used for clients that are unable to provide an RX buffer
204 * immediately. This temporary structure will be used to temporarily hold the
205 * data and perform a retry.
206 */
207struct smux_rx_pkt_retry {
208 struct smux_pkt_t *pkt;
209 struct list_head rx_retry_list;
210 unsigned timeout_in_ms;
211};
212
213/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600214 * Receive worker data structure.
215 *
216 * One instance is created for every call to smux_rx_state_machine.
217 */
218struct smux_rx_worker_data {
219 const unsigned char *data;
220 int len;
221 int flag;
222
223 struct work_struct work;
224 struct completion work_complete;
225};
226
227/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600228 * Line discipline and module structure.
229 *
230 * Only one instance since multiple instances of line discipline are not
231 * allowed.
232 */
233struct smux_ldisc_t {
Eric Holmberged1f00c2012-06-07 09:45:18 -0600234 struct mutex mutex_lha0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600235
236 int is_initialized;
Eric Holmberg2bf9c522012-08-09 13:23:21 -0600237 int platform_devs_registered;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600238 int in_reset;
Eric Holmberg0a81e8f2012-08-28 13:51:14 -0600239 int remote_is_alive;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600240 int ld_open_count;
241 struct tty_struct *tty;
242
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600243 /* RX State Machine (singled-threaded access by smux_rx_wq) */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600244 unsigned char recv_buf[SMUX_MAX_PKT_SIZE];
245 unsigned int recv_len;
246 unsigned int pkt_remain;
247 unsigned rx_state;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600248
249 /* RX Activity - accessed by multiple threads */
250 spinlock_t rx_lock_lha1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600251 unsigned rx_activity_flag;
252
253 /* TX / Power */
254 spinlock_t tx_lock_lha2;
255 struct list_head lch_tx_ready_list;
256 unsigned power_state;
257 unsigned pwr_wakeup_delay_us;
258 unsigned tx_activity_flag;
259 unsigned powerdown_enabled;
Eric Holmberga9b06472012-06-22 09:46:34 -0600260 unsigned power_ctl_remote_req_received;
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600261 struct list_head power_queue;
Eric Holmberg371b4622013-05-21 18:04:50 -0600262 unsigned remote_initiated_wakeup_count;
263 unsigned local_initiated_wakeup_count;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600264};
265
266
267/* data structures */
Eric Holmberg9d890672012-06-13 17:58:13 -0600268struct smux_lch_t smux_lch[SMUX_NUM_LOGICAL_CHANNELS];
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600269static struct smux_ldisc_t smux;
270static const char *tty_error_type[] = {
271 [TTY_NORMAL] = "normal",
272 [TTY_OVERRUN] = "overrun",
273 [TTY_BREAK] = "break",
274 [TTY_PARITY] = "parity",
275 [TTY_FRAME] = "framing",
276};
277
Eric Holmberg9d890672012-06-13 17:58:13 -0600278static const char * const smux_cmds[] = {
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600279 [SMUX_CMD_DATA] = "DATA",
280 [SMUX_CMD_OPEN_LCH] = "OPEN",
281 [SMUX_CMD_CLOSE_LCH] = "CLOSE",
282 [SMUX_CMD_STATUS] = "STATUS",
283 [SMUX_CMD_PWR_CTL] = "PWR",
Eric Holmberg371b4622013-05-21 18:04:50 -0600284 [SMUX_CMD_DELAY] = "DELAY",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600285 [SMUX_CMD_BYTE] = "Raw Byte",
286};
287
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530288static const char * const smux_events[] = {
289 [SMUX_CONNECTED] = "CONNECTED" ,
290 [SMUX_DISCONNECTED] = "DISCONNECTED",
291 [SMUX_READ_DONE] = "READ_DONE",
292 [SMUX_READ_FAIL] = "READ_FAIL",
293 [SMUX_WRITE_DONE] = "WRITE_DONE",
294 [SMUX_WRITE_FAIL] = "WRITE_FAIL",
295 [SMUX_TIOCM_UPDATE] = "TIOCM_UPDATE",
296 [SMUX_LOW_WM_HIT] = "LOW_WM_HIT",
297 [SMUX_HIGH_WM_HIT] = "HIGH_WM_HIT",
298 [SMUX_RX_RETRY_HIGH_WM_HIT] = "RX_RETRY_HIGH_WM_HIT",
299 [SMUX_RX_RETRY_LOW_WM_HIT] = "RX_RETRY_LOW_WM_HIT",
300};
301
Eric Holmberg9d890672012-06-13 17:58:13 -0600302static const char * const smux_local_state[] = {
303 [SMUX_LCH_LOCAL_CLOSED] = "CLOSED",
304 [SMUX_LCH_LOCAL_OPENING] = "OPENING",
305 [SMUX_LCH_LOCAL_OPENED] = "OPENED",
306 [SMUX_LCH_LOCAL_CLOSING] = "CLOSING",
307};
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530308
Eric Holmberg9d890672012-06-13 17:58:13 -0600309static const char * const smux_remote_state[] = {
310 [SMUX_LCH_REMOTE_CLOSED] = "CLOSED",
311 [SMUX_LCH_REMOTE_OPENED] = "OPENED",
312};
313
314static const char * const smux_mode[] = {
315 [SMUX_LCH_MODE_NORMAL] = "N",
316 [SMUX_LCH_MODE_LOCAL_LOOPBACK] = "L",
317 [SMUX_LCH_MODE_REMOTE_LOOPBACK] = "R",
318};
319
320static const char * const smux_undef[] = {
321 [SMUX_UNDEF_LONG] = "UNDEF",
322 [SMUX_UNDEF_SHORT] = "U",
323};
324
325static void *log_ctx;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600326static void smux_notify_local_fn(struct work_struct *work);
327static DECLARE_WORK(smux_notify_local, smux_notify_local_fn);
328
329static struct workqueue_struct *smux_notify_wq;
330static size_t handle_size;
331static struct kfifo smux_notify_fifo;
332static int queued_fifo_notifications;
333static DEFINE_SPINLOCK(notify_lock_lhc1);
334
335static struct workqueue_struct *smux_tx_wq;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600336static struct workqueue_struct *smux_rx_wq;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600337static void smux_tx_worker(struct work_struct *work);
338static DECLARE_WORK(smux_tx_work, smux_tx_worker);
339
340static void smux_wakeup_worker(struct work_struct *work);
Eric Holmbergb8435c82012-06-05 14:51:29 -0600341static void smux_rx_retry_worker(struct work_struct *work);
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600342static void smux_rx_worker(struct work_struct *work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600343static DECLARE_WORK(smux_wakeup_work, smux_wakeup_worker);
344static DECLARE_DELAYED_WORK(smux_wakeup_delayed_work, smux_wakeup_worker);
345
346static void smux_inactivity_worker(struct work_struct *work);
347static DECLARE_WORK(smux_inactivity_work, smux_inactivity_worker);
348static DECLARE_DELAYED_WORK(smux_delayed_inactivity_work,
349 smux_inactivity_worker);
350
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600351static void list_channel(struct smux_lch_t *ch);
352static int smux_send_status_cmd(struct smux_lch_t *ch);
353static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600354static void smux_flush_tty(void);
Eric Holmberg0e914082012-07-11 11:46:28 -0600355static void smux_purge_ch_tx_queue(struct smux_lch_t *ch, int is_ssr);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600356static int schedule_notify(uint8_t lcid, int event,
357 const union notifier_metadata *metadata);
358static int ssr_notifier_cb(struct notifier_block *this,
359 unsigned long code,
360 void *data);
Eric Holmberg92a67df2012-06-25 13:56:24 -0600361static void smux_uart_power_on_atomic(void);
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600362static int smux_rx_flow_control_updated(struct smux_lch_t *ch);
Eric Holmberg06011322012-07-06 18:17:03 -0600363static void smux_flush_workqueues(void);
Eric Holmbergf6a364e2012-08-07 18:41:44 -0600364static void smux_pdev_release(struct device *dev);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600365
366/**
Eric Holmberg9d890672012-06-13 17:58:13 -0600367 * local_lch_state() - Return human readable form of local logical state.
368 * @state: Local logical channel state enum.
369 *
370 */
371const char *local_lch_state(unsigned state)
372{
373 if (state < ARRAY_SIZE(smux_local_state))
374 return smux_local_state[state];
375 else
376 return smux_undef[SMUX_UNDEF_LONG];
377}
378
379/**
380 * remote_lch_state() - Return human readable for of remote logical state.
381 * @state: Remote logical channel state enum.
382 *
383 */
384const char *remote_lch_state(unsigned state)
385{
386 if (state < ARRAY_SIZE(smux_remote_state))
387 return smux_remote_state[state];
388 else
389 return smux_undef[SMUX_UNDEF_LONG];
390}
391
392/**
393 * lch_mode() - Return human readable form of mode.
394 * @mode: Mode of the logical channel.
395 *
396 */
397const char *lch_mode(unsigned mode)
398{
399 if (mode < ARRAY_SIZE(smux_mode))
400 return smux_mode[mode];
401 else
402 return smux_undef[SMUX_UNDEF_SHORT];
403}
404
405/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600406 * Convert TTY Error Flags to string for logging purposes.
407 *
408 * @flag TTY_* flag
409 * @returns String description or NULL if unknown
410 */
411static const char *tty_flag_to_str(unsigned flag)
412{
413 if (flag < ARRAY_SIZE(tty_error_type))
414 return tty_error_type[flag];
415 return NULL;
416}
417
418/**
419 * Convert SMUX Command to string for logging purposes.
420 *
421 * @cmd SMUX command
422 * @returns String description or NULL if unknown
423 */
424static const char *cmd_to_str(unsigned cmd)
425{
426 if (cmd < ARRAY_SIZE(smux_cmds))
427 return smux_cmds[cmd];
428 return NULL;
429}
430
431/**
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530432 * Convert SMUX event to string for logging purposes.
433 *
434 * @event SMUX event
435 * @returns String description or NULL if unknown
436 */
437static const char *event_to_str(unsigned cmd)
438{
439 if (cmd < ARRAY_SIZE(smux_events))
440 return smux_events[cmd];
441 return NULL;
442}
443
444/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600445 * Set the reset state due to an unrecoverable failure.
446 */
447static void smux_enter_reset(void)
448{
Eric Holmberg51f46cb2012-08-21 16:43:39 -0600449 SMUX_ERR("%s: unrecoverable failure, waiting for ssr\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600450 smux.in_reset = 1;
Eric Holmberg0a81e8f2012-08-28 13:51:14 -0600451 smux.remote_is_alive = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600452}
453
Eric Holmberg9d890672012-06-13 17:58:13 -0600454/**
455 * Initialize the lch_structs.
456 */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600457static int lch_init(void)
458{
459 unsigned int id;
460 struct smux_lch_t *ch;
461 int i = 0;
462
463 handle_size = sizeof(struct smux_notify_handle *);
464
465 smux_notify_wq = create_singlethread_workqueue("smux_notify_wq");
466 smux_tx_wq = create_singlethread_workqueue("smux_tx_wq");
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600467 smux_rx_wq = create_singlethread_workqueue("smux_rx_wq");
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600468
469 if (IS_ERR(smux_notify_wq) || IS_ERR(smux_tx_wq)) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530470 SMUX_DBG("smux: %s: create_singlethread_workqueue ENOMEM\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600471 __func__);
472 return -ENOMEM;
473 }
474
475 i |= kfifo_alloc(&smux_notify_fifo,
476 SMUX_NOTIFY_FIFO_SIZE * handle_size,
477 GFP_KERNEL);
478 i |= smux_loopback_init();
479
480 if (i) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -0600481 SMUX_ERR("%s: out of memory error\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600482 return -ENOMEM;
483 }
484
485 for (id = 0 ; id < SMUX_NUM_LOGICAL_CHANNELS; id++) {
486 ch = &smux_lch[id];
487
488 spin_lock_init(&ch->state_lock_lhb1);
489 ch->lcid = id;
490 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
491 ch->local_mode = SMUX_LCH_MODE_NORMAL;
492 ch->local_tiocm = 0x0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600493 ch->options = SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600494 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
495 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
496 ch->remote_tiocm = 0x0;
497 ch->tx_flow_control = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600498 ch->rx_flow_control_auto = 0;
499 ch->rx_flow_control_client = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600500 ch->priv = 0;
501 ch->notify = 0;
502 ch->get_rx_buffer = 0;
503
Eric Holmbergb8435c82012-06-05 14:51:29 -0600504 INIT_LIST_HEAD(&ch->rx_retry_queue);
505 ch->rx_retry_queue_cnt = 0;
506 INIT_DELAYED_WORK(&ch->rx_retry_work, smux_rx_retry_worker);
507
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600508 spin_lock_init(&ch->tx_lock_lhb2);
509 INIT_LIST_HEAD(&ch->tx_queue);
510 INIT_LIST_HEAD(&ch->tx_ready_list);
511 ch->tx_pending_data_cnt = 0;
512 ch->notify_lwm = 0;
513 }
514
515 return 0;
516}
517
Eric Holmberged1f00c2012-06-07 09:45:18 -0600518/**
519 * Empty and cleanup all SMUX logical channels for subsystem restart or line
520 * discipline disconnect.
521 */
522static void smux_lch_purge(void)
523{
524 struct smux_lch_t *ch;
525 unsigned long flags;
526 int i;
527
528 /* Empty TX ready list */
529 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
530 while (!list_empty(&smux.lch_tx_ready_list)) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530531 SMUX_DBG("smux: %s: emptying ready list %p\n",
Eric Holmberged1f00c2012-06-07 09:45:18 -0600532 __func__, smux.lch_tx_ready_list.next);
533 ch = list_first_entry(&smux.lch_tx_ready_list,
534 struct smux_lch_t,
535 tx_ready_list);
536 list_del(&ch->tx_ready_list);
537 INIT_LIST_HEAD(&ch->tx_ready_list);
538 }
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600539
540 /* Purge Power Queue */
541 while (!list_empty(&smux.power_queue)) {
542 struct smux_pkt_t *pkt;
543
544 pkt = list_first_entry(&smux.power_queue,
545 struct smux_pkt_t,
546 list);
Eric Holmberg6b19f7f2012-06-15 09:53:52 -0600547 list_del(&pkt->list);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530548 SMUX_DBG("smux: %s: emptying power queue pkt=%p\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600549 __func__, pkt);
550 smux_free_pkt(pkt);
551 }
Eric Holmberged1f00c2012-06-07 09:45:18 -0600552 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
553
554 /* Close all ports */
555 for (i = 0 ; i < SMUX_NUM_LOGICAL_CHANNELS; i++) {
556 ch = &smux_lch[i];
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530557 SMUX_DBG("smux: %s: cleaning up lcid %d\n", __func__, i);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600558
559 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
560
561 /* Purge TX queue */
562 spin_lock(&ch->tx_lock_lhb2);
Eric Holmberg0e914082012-07-11 11:46:28 -0600563 smux_purge_ch_tx_queue(ch, 1);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600564 spin_unlock(&ch->tx_lock_lhb2);
565
566 /* Notify user of disconnect and reset channel state */
567 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
568 ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
569 union notifier_metadata meta;
570
571 meta.disconnected.is_ssr = smux.in_reset;
572 schedule_notify(ch->lcid, SMUX_DISCONNECTED, &meta);
573 }
574
575 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
Eric Holmberged1f00c2012-06-07 09:45:18 -0600576 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
577 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
578 ch->tx_flow_control = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600579 ch->rx_flow_control_auto = 0;
580 ch->rx_flow_control_client = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -0600581
582 /* Purge RX retry queue */
583 if (ch->rx_retry_queue_cnt)
584 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
585
586 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
587 }
Eric Holmberged1f00c2012-06-07 09:45:18 -0600588}
589
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600590int smux_assert_lch_id(uint32_t lcid)
591{
592 if (lcid >= SMUX_NUM_LOGICAL_CHANNELS)
593 return -ENXIO;
594 else
595 return 0;
596}
597
598/**
599 * Log packet information for debug purposes.
600 *
601 * @pkt Packet to log
602 * @is_recv 1 = RX packet; 0 = TX Packet
603 *
604 * [DIR][LCID] [LOCAL_STATE][LOCAL_MODE]:[REMOTE_STATE][REMOTE_MODE] PKT Info
605 *
606 * PKT Info:
607 * [CMD] flags [flags] len [PAYLOAD_LEN]:[PAD_LEN] [Payload hex bytes]
608 *
609 * Direction: R = Receive, S = Send
610 * Local State: C = Closed; c = closing; o = opening; O = Opened
611 * Local Mode: L = Local loopback; R = Remote loopback; N = Normal
612 * Remote State: C = Closed; O = Opened
613 * Remote Mode: R = Remote loopback; N = Normal
614 */
615static void smux_log_pkt(struct smux_pkt_t *pkt, int is_recv)
616{
617 char logbuf[SMUX_PKT_LOG_SIZE];
618 char cmd_extra[16];
619 int i = 0;
620 int count;
621 int len;
622 char local_state;
623 char local_mode;
624 char remote_state;
625 char remote_mode;
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600626 struct smux_lch_t *ch = NULL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600627 unsigned char *data;
628
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600629 if (!smux_assert_lch_id(pkt->hdr.lcid))
630 ch = &smux_lch[pkt->hdr.lcid];
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600631
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600632 if (ch) {
633 switch (ch->local_state) {
634 case SMUX_LCH_LOCAL_CLOSED:
635 local_state = 'C';
636 break;
637 case SMUX_LCH_LOCAL_OPENING:
638 local_state = 'o';
639 break;
640 case SMUX_LCH_LOCAL_OPENED:
641 local_state = 'O';
642 break;
643 case SMUX_LCH_LOCAL_CLOSING:
644 local_state = 'c';
645 break;
646 default:
647 local_state = 'U';
648 break;
649 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600650
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600651 switch (ch->local_mode) {
652 case SMUX_LCH_MODE_LOCAL_LOOPBACK:
653 local_mode = 'L';
654 break;
655 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
656 local_mode = 'R';
657 break;
658 case SMUX_LCH_MODE_NORMAL:
659 local_mode = 'N';
660 break;
661 default:
662 local_mode = 'U';
663 break;
664 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600665
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600666 switch (ch->remote_state) {
667 case SMUX_LCH_REMOTE_CLOSED:
668 remote_state = 'C';
669 break;
670 case SMUX_LCH_REMOTE_OPENED:
671 remote_state = 'O';
672 break;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600673
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600674 default:
675 remote_state = 'U';
676 break;
677 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600678
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600679 switch (ch->remote_mode) {
680 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
681 remote_mode = 'R';
682 break;
683 case SMUX_LCH_MODE_NORMAL:
684 remote_mode = 'N';
685 break;
686 default:
687 remote_mode = 'U';
688 break;
689 }
690 } else {
691 /* broadcast channel */
692 local_state = '-';
693 local_mode = '-';
694 remote_state = '-';
695 remote_mode = '-';
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600696 }
697
698 /* determine command type (ACK, etc) */
699 cmd_extra[0] = '\0';
700 switch (pkt->hdr.cmd) {
701 case SMUX_CMD_OPEN_LCH:
702 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
703 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
704 break;
705 case SMUX_CMD_CLOSE_LCH:
706 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
707 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
708 break;
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -0600709
710 case SMUX_CMD_PWR_CTL:
711 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK)
712 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
713 break;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600714 };
715
716 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
717 "smux: %c%d %c%c:%c%c %s%s flags %x len %d:%d ",
718 is_recv ? 'R' : 'S', pkt->hdr.lcid,
719 local_state, local_mode,
720 remote_state, remote_mode,
721 cmd_to_str(pkt->hdr.cmd), cmd_extra, pkt->hdr.flags,
722 pkt->hdr.payload_len, pkt->hdr.pad_len);
723
724 len = (pkt->hdr.payload_len > 16) ? 16 : pkt->hdr.payload_len;
725 data = (unsigned char *)pkt->payload;
726 for (count = 0; count < len; count++)
727 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
728 "%02x ", (unsigned)data[count]);
729
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530730 IPC_LOG_STR(logbuf);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600731}
732
733static void smux_notify_local_fn(struct work_struct *work)
734{
735 struct smux_notify_handle *notify_handle = NULL;
736 union notifier_metadata *metadata = NULL;
737 unsigned long flags;
738 int i;
739
740 for (;;) {
741 /* retrieve notification */
742 spin_lock_irqsave(&notify_lock_lhc1, flags);
743 if (kfifo_len(&smux_notify_fifo) >= handle_size) {
744 i = kfifo_out(&smux_notify_fifo,
745 &notify_handle,
746 handle_size);
747 if (i != handle_size) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -0600748 SMUX_ERR(
749 "%s: unable to retrieve handle %d expected %d\n",
750 __func__, i, handle_size);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600751 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
752 break;
753 }
754 } else {
755 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
756 break;
757 }
758 --queued_fifo_notifications;
759 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
760
761 /* notify client */
762 metadata = notify_handle->metadata;
763 notify_handle->notify(notify_handle->priv,
764 notify_handle->event_type,
765 metadata);
766
767 kfree(metadata);
768 kfree(notify_handle);
769 }
770}
771
772/**
773 * Initialize existing packet.
774 */
775void smux_init_pkt(struct smux_pkt_t *pkt)
776{
777 memset(pkt, 0x0, sizeof(*pkt));
778 pkt->hdr.magic = SMUX_MAGIC;
779 INIT_LIST_HEAD(&pkt->list);
780}
781
782/**
783 * Allocate and initialize packet.
784 *
785 * If a payload is needed, either set it directly and ensure that it's freed or
786 * use smd_alloc_pkt_payload() to allocate a packet and it will be freed
787 * automatically when smd_free_pkt() is called.
788 */
789struct smux_pkt_t *smux_alloc_pkt(void)
790{
791 struct smux_pkt_t *pkt;
792
793 /* Consider a free list implementation instead of kmalloc */
794 pkt = kmalloc(sizeof(struct smux_pkt_t), GFP_ATOMIC);
795 if (!pkt) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -0600796 SMUX_ERR("%s: out of memory\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600797 return NULL;
798 }
799 smux_init_pkt(pkt);
800 pkt->allocated = 1;
801
802 return pkt;
803}
804
805/**
806 * Free packet.
807 *
808 * @pkt Packet to free (may be NULL)
809 *
810 * If payload was allocated using smux_alloc_pkt_payload(), then it is freed as
811 * well. Otherwise, the caller is responsible for freeing the payload.
812 */
813void smux_free_pkt(struct smux_pkt_t *pkt)
814{
815 if (pkt) {
816 if (pkt->free_payload)
817 kfree(pkt->payload);
818 if (pkt->allocated)
819 kfree(pkt);
820 }
821}
822
823/**
824 * Allocate packet payload.
825 *
826 * @pkt Packet to add payload to
827 *
828 * @returns 0 on success, <0 upon error
829 *
830 * A flag is set to signal smux_free_pkt() to free the payload.
831 */
832int smux_alloc_pkt_payload(struct smux_pkt_t *pkt)
833{
834 if (!pkt)
835 return -EINVAL;
836
837 pkt->payload = kmalloc(pkt->hdr.payload_len, GFP_ATOMIC);
838 pkt->free_payload = 1;
839 if (!pkt->payload) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -0600840 SMUX_ERR("%s: unable to malloc %d bytes for payload\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600841 __func__, pkt->hdr.payload_len);
842 return -ENOMEM;
843 }
844
845 return 0;
846}
847
848static int schedule_notify(uint8_t lcid, int event,
849 const union notifier_metadata *metadata)
850{
851 struct smux_notify_handle *notify_handle = 0;
852 union notifier_metadata *meta_copy = 0;
853 struct smux_lch_t *ch;
854 int i;
855 unsigned long flags;
856 int ret = 0;
857
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530858 IPC_LOG_STR("smux: %s ch:%d\n", event_to_str(event), lcid);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600859 ch = &smux_lch[lcid];
860 notify_handle = kzalloc(sizeof(struct smux_notify_handle),
861 GFP_ATOMIC);
862 if (!notify_handle) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -0600863 SMUX_ERR("%s: out of memory\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600864 ret = -ENOMEM;
865 goto free_out;
866 }
867
868 notify_handle->notify = ch->notify;
869 notify_handle->priv = ch->priv;
870 notify_handle->event_type = event;
871 if (metadata) {
872 meta_copy = kzalloc(sizeof(union notifier_metadata),
873 GFP_ATOMIC);
874 if (!meta_copy) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -0600875 SMUX_ERR("%s: out of memory\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600876 ret = -ENOMEM;
877 goto free_out;
878 }
879 *meta_copy = *metadata;
880 notify_handle->metadata = meta_copy;
881 } else {
882 notify_handle->metadata = NULL;
883 }
884
885 spin_lock_irqsave(&notify_lock_lhc1, flags);
886 i = kfifo_avail(&smux_notify_fifo);
887 if (i < handle_size) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -0600888 SMUX_ERR("%s: fifo full error %d expected %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600889 __func__, i, handle_size);
890 ret = -ENOMEM;
891 goto unlock_out;
892 }
893
894 i = kfifo_in(&smux_notify_fifo, &notify_handle, handle_size);
895 if (i < 0 || i != handle_size) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -0600896 SMUX_ERR("%s: fifo not available error %d (expected %d)\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600897 __func__, i, handle_size);
898 ret = -ENOSPC;
899 goto unlock_out;
900 }
901 ++queued_fifo_notifications;
902
903unlock_out:
904 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
905
906free_out:
907 queue_work(smux_notify_wq, &smux_notify_local);
908 if (ret < 0 && notify_handle) {
909 kfree(notify_handle->metadata);
910 kfree(notify_handle);
911 }
912 return ret;
913}
914
915/**
916 * Returns the serialized size of a packet.
917 *
918 * @pkt Packet to serialize
919 *
920 * @returns Serialized length of packet
921 */
922static unsigned int smux_serialize_size(struct smux_pkt_t *pkt)
923{
924 unsigned int size;
925
926 size = sizeof(struct smux_hdr_t);
927 size += pkt->hdr.payload_len;
928 size += pkt->hdr.pad_len;
929
930 return size;
931}
932
933/**
934 * Serialize packet @pkt into output buffer @data.
935 *
936 * @pkt Packet to serialize
937 * @out Destination buffer pointer
938 * @out_len Size of serialized packet
939 *
940 * @returns 0 for success
941 */
942int smux_serialize(struct smux_pkt_t *pkt, char *out,
943 unsigned int *out_len)
944{
945 char *data_start = out;
946
947 if (smux_serialize_size(pkt) > SMUX_MAX_PKT_SIZE) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -0600948 SMUX_ERR("%s: packet size %d too big\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600949 __func__, smux_serialize_size(pkt));
950 return -E2BIG;
951 }
952
953 memcpy(out, &pkt->hdr, sizeof(struct smux_hdr_t));
954 out += sizeof(struct smux_hdr_t);
955 if (pkt->payload) {
956 memcpy(out, pkt->payload, pkt->hdr.payload_len);
957 out += pkt->hdr.payload_len;
958 }
959 if (pkt->hdr.pad_len) {
960 memset(out, 0x0, pkt->hdr.pad_len);
961 out += pkt->hdr.pad_len;
962 }
963 *out_len = out - data_start;
964 return 0;
965}
966
967/**
968 * Serialize header and provide pointer to the data.
969 *
970 * @pkt Packet
971 * @out[out] Pointer to the serialized header data
972 * @out_len[out] Pointer to the serialized header length
973 */
974static void smux_serialize_hdr(struct smux_pkt_t *pkt, char **out,
975 unsigned int *out_len)
976{
977 *out = (char *)&pkt->hdr;
978 *out_len = sizeof(struct smux_hdr_t);
979}
980
981/**
982 * Serialize payload and provide pointer to the data.
983 *
984 * @pkt Packet
985 * @out[out] Pointer to the serialized payload data
986 * @out_len[out] Pointer to the serialized payload length
987 */
988static void smux_serialize_payload(struct smux_pkt_t *pkt, char **out,
989 unsigned int *out_len)
990{
991 *out = pkt->payload;
992 *out_len = pkt->hdr.payload_len;
993}
994
995/**
996 * Serialize padding and provide pointer to the data.
997 *
998 * @pkt Packet
999 * @out[out] Pointer to the serialized padding (always NULL)
1000 * @out_len[out] Pointer to the serialized payload length
1001 *
1002 * Since the padding field value is undefined, only the size of the patting
1003 * (@out_len) is set and the buffer pointer (@out) will always be NULL.
1004 */
1005static void smux_serialize_padding(struct smux_pkt_t *pkt, char **out,
1006 unsigned int *out_len)
1007{
1008 *out = NULL;
1009 *out_len = pkt->hdr.pad_len;
1010}
1011
1012/**
1013 * Write data to TTY framework and handle breaking the writes up if needed.
1014 *
1015 * @data Data to write
1016 * @len Length of data
1017 *
1018 * @returns 0 for success, < 0 for failure
1019 */
1020static int write_to_tty(char *data, unsigned len)
1021{
1022 int data_written;
1023
1024 if (!data)
1025 return 0;
1026
Eric Holmberged1f00c2012-06-07 09:45:18 -06001027 while (len > 0 && !smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001028 data_written = smux.tty->ops->write(smux.tty, data, len);
1029 if (data_written >= 0) {
1030 len -= data_written;
1031 data += data_written;
1032 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001033 SMUX_ERR("%s: TTY write returned error %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001034 __func__, data_written);
1035 return data_written;
1036 }
1037
1038 if (len)
1039 tty_wait_until_sent(smux.tty,
1040 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001041 }
1042 return 0;
1043}
1044
1045/**
1046 * Write packet to TTY.
1047 *
1048 * @pkt packet to write
1049 *
1050 * @returns 0 on success
1051 */
1052static int smux_tx_tty(struct smux_pkt_t *pkt)
1053{
1054 char *data;
1055 unsigned int len;
1056 int ret;
1057
1058 if (!smux.tty) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001059 SMUX_ERR("%s: TTY not initialized", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001060 return -ENOTTY;
1061 }
1062
1063 if (pkt->hdr.cmd == SMUX_CMD_BYTE) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301064 SMUX_DBG("smux: %s: tty send single byte\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001065 ret = write_to_tty(&pkt->hdr.flags, 1);
1066 return ret;
1067 }
1068
1069 smux_serialize_hdr(pkt, &data, &len);
1070 ret = write_to_tty(data, len);
1071 if (ret) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001072 SMUX_ERR("%s: failed %d to write header %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001073 __func__, ret, len);
1074 return ret;
1075 }
1076
1077 smux_serialize_payload(pkt, &data, &len);
1078 ret = write_to_tty(data, len);
1079 if (ret) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001080 SMUX_ERR("%s: failed %d to write payload %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001081 __func__, ret, len);
1082 return ret;
1083 }
1084
1085 smux_serialize_padding(pkt, &data, &len);
1086 while (len > 0) {
1087 char zero = 0x0;
1088 ret = write_to_tty(&zero, 1);
1089 if (ret) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001090 SMUX_ERR("%s: failed %d to write padding %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001091 __func__, ret, len);
1092 return ret;
1093 }
1094 --len;
1095 }
1096 return 0;
1097}
1098
1099/**
1100 * Send a single character.
1101 *
1102 * @ch Character to send
1103 */
1104static void smux_send_byte(char ch)
1105{
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001106 struct smux_pkt_t *pkt;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001107
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001108 pkt = smux_alloc_pkt();
1109 if (!pkt) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001110 SMUX_ERR("%s: alloc failure for byte %x\n", __func__, ch);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001111 return;
1112 }
1113 pkt->hdr.cmd = SMUX_CMD_BYTE;
1114 pkt->hdr.flags = ch;
1115 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001116
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001117 list_add_tail(&pkt->list, &smux.power_queue);
1118 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001119}
1120
1121/**
1122 * Receive a single-character packet (used for internal testing).
1123 *
1124 * @ch Character to receive
1125 * @lcid Logical channel ID for packet
1126 *
1127 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001128 */
1129static int smux_receive_byte(char ch, int lcid)
1130{
1131 struct smux_pkt_t pkt;
1132
1133 smux_init_pkt(&pkt);
1134 pkt.hdr.lcid = lcid;
1135 pkt.hdr.cmd = SMUX_CMD_BYTE;
1136 pkt.hdr.flags = ch;
1137
1138 return smux_dispatch_rx_pkt(&pkt);
1139}
1140
1141/**
1142 * Queue packet for transmit.
1143 *
1144 * @pkt_ptr Packet to queue
1145 * @ch Channel to queue packet on
1146 * @queue Queue channel on ready list
1147 */
1148static void smux_tx_queue(struct smux_pkt_t *pkt_ptr, struct smux_lch_t *ch,
1149 int queue)
1150{
1151 unsigned long flags;
1152
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301153 SMUX_DBG("smux: %s: queuing pkt %p\n", __func__, pkt_ptr);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001154
1155 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
1156 list_add_tail(&pkt_ptr->list, &ch->tx_queue);
1157 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
1158
1159 if (queue)
1160 list_channel(ch);
1161}
1162
1163/**
1164 * Handle receive OPEN ACK command.
1165 *
1166 * @pkt Received packet
1167 *
1168 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001169 */
1170static int smux_handle_rx_open_ack(struct smux_pkt_t *pkt)
1171{
1172 uint8_t lcid;
1173 int ret;
1174 struct smux_lch_t *ch;
1175 int enable_powerdown = 0;
1176
1177 lcid = pkt->hdr.lcid;
1178 ch = &smux_lch[lcid];
1179
1180 spin_lock(&ch->state_lock_lhb1);
1181 if (ch->local_state == SMUX_LCH_LOCAL_OPENING) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301182 SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001183 ch->local_state,
1184 SMUX_LCH_LOCAL_OPENED);
1185
1186 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1187 enable_powerdown = 1;
1188
1189 ch->local_state = SMUX_LCH_LOCAL_OPENED;
Arun Kumar Neelakantam60252c92013-07-02 14:47:51 +05301190 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001191 schedule_notify(lcid, SMUX_CONNECTED, NULL);
Arun Kumar Neelakantam60252c92013-07-02 14:47:51 +05301192 if (!(list_empty(&ch->tx_queue)))
1193 list_channel(ch);
1194 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001195 ret = 0;
1196 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301197 SMUX_DBG("smux: Remote loopback OPEN ACK received\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001198 ret = 0;
1199 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001200 SMUX_ERR("%s: lcid %d state 0x%x open ack invalid\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001201 __func__, lcid, ch->local_state);
1202 ret = -EINVAL;
1203 }
1204 spin_unlock(&ch->state_lock_lhb1);
1205
1206 if (enable_powerdown) {
1207 spin_lock(&smux.tx_lock_lha2);
1208 if (!smux.powerdown_enabled) {
1209 smux.powerdown_enabled = 1;
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301210 SMUX_DBG("smux: %s: enabling power-collapse support\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001211 __func__);
1212 }
1213 spin_unlock(&smux.tx_lock_lha2);
1214 }
1215
1216 return ret;
1217}
1218
1219static int smux_handle_close_ack(struct smux_pkt_t *pkt)
1220{
1221 uint8_t lcid;
1222 int ret;
1223 struct smux_lch_t *ch;
1224 union notifier_metadata meta_disconnected;
1225 unsigned long flags;
1226
1227 lcid = pkt->hdr.lcid;
1228 ch = &smux_lch[lcid];
1229 meta_disconnected.disconnected.is_ssr = 0;
1230
1231 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1232
1233 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301234 SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001235 SMUX_LCH_LOCAL_CLOSING,
1236 SMUX_LCH_LOCAL_CLOSED);
1237 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
1238 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED)
1239 schedule_notify(lcid, SMUX_DISCONNECTED,
1240 &meta_disconnected);
1241 ret = 0;
1242 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301243 SMUX_DBG("smux: Remote loopback CLOSE ACK received\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001244 ret = 0;
1245 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001246 SMUX_ERR("%s: lcid %d state 0x%x close ack invalid\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001247 __func__, lcid, ch->local_state);
1248 ret = -EINVAL;
1249 }
1250 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1251 return ret;
1252}
1253
1254/**
1255 * Handle receive OPEN command.
1256 *
1257 * @pkt Received packet
1258 *
1259 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001260 */
1261static int smux_handle_rx_open_cmd(struct smux_pkt_t *pkt)
1262{
1263 uint8_t lcid;
1264 int ret;
1265 struct smux_lch_t *ch;
1266 struct smux_pkt_t *ack_pkt;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001267 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001268 int tx_ready = 0;
1269 int enable_powerdown = 0;
1270
1271 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
1272 return smux_handle_rx_open_ack(pkt);
1273
1274 lcid = pkt->hdr.lcid;
1275 ch = &smux_lch[lcid];
1276
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001277 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001278
1279 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301280 SMUX_DBG("smux: lcid %d remote state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001281 SMUX_LCH_REMOTE_CLOSED,
1282 SMUX_LCH_REMOTE_OPENED);
1283
1284 ch->remote_state = SMUX_LCH_REMOTE_OPENED;
1285 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1286 enable_powerdown = 1;
1287
1288 /* Send Open ACK */
1289 ack_pkt = smux_alloc_pkt();
1290 if (!ack_pkt) {
1291 /* exit out to allow retrying this later */
1292 ret = -ENOMEM;
1293 goto out;
1294 }
1295 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
Eric Holmbergc89532e2013-01-15 16:43:47 -07001296 ack_pkt->hdr.flags = SMUX_CMD_OPEN_ACK;
1297 if (enable_powerdown)
1298 ack_pkt->hdr.flags |= SMUX_CMD_OPEN_POWER_COLLAPSE;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001299 ack_pkt->hdr.lcid = lcid;
1300 ack_pkt->hdr.payload_len = 0;
1301 ack_pkt->hdr.pad_len = 0;
1302 if (pkt->hdr.flags & SMUX_CMD_OPEN_REMOTE_LOOPBACK) {
1303 ch->remote_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
1304 ack_pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
1305 }
1306 smux_tx_queue(ack_pkt, ch, 0);
1307 tx_ready = 1;
1308
1309 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1310 /*
1311 * Send an Open command to the remote side to
1312 * simulate our local client doing it.
1313 */
1314 ack_pkt = smux_alloc_pkt();
1315 if (ack_pkt) {
1316 ack_pkt->hdr.lcid = lcid;
1317 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
Eric Holmbergc89532e2013-01-15 16:43:47 -07001318 if (enable_powerdown)
1319 ack_pkt->hdr.flags |=
1320 SMUX_CMD_OPEN_POWER_COLLAPSE;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001321 ack_pkt->hdr.payload_len = 0;
1322 ack_pkt->hdr.pad_len = 0;
1323 smux_tx_queue(ack_pkt, ch, 0);
1324 tx_ready = 1;
1325 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001326 SMUX_ERR(
1327 "%s: Remote loopack allocation failure\n",
1328 __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001329 }
1330 } else if (ch->local_state == SMUX_LCH_LOCAL_OPENED) {
1331 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1332 }
1333 ret = 0;
1334 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001335 SMUX_ERR("%s: lcid %d remote state 0x%x open invalid\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001336 __func__, lcid, ch->remote_state);
1337 ret = -EINVAL;
1338 }
1339
1340out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001341 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001342
1343 if (enable_powerdown) {
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001344 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8b9a6402012-06-05 13:32:57 -06001345 if (!smux.powerdown_enabled) {
1346 smux.powerdown_enabled = 1;
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301347 SMUX_DBG("smux: %s: enabling power-collapse support\n",
Eric Holmberg8b9a6402012-06-05 13:32:57 -06001348 __func__);
1349 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001350 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001351 }
1352
1353 if (tx_ready)
1354 list_channel(ch);
1355
1356 return ret;
1357}
1358
1359/**
1360 * Handle receive CLOSE command.
1361 *
1362 * @pkt Received packet
1363 *
1364 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001365 */
1366static int smux_handle_rx_close_cmd(struct smux_pkt_t *pkt)
1367{
1368 uint8_t lcid;
1369 int ret;
1370 struct smux_lch_t *ch;
1371 struct smux_pkt_t *ack_pkt;
1372 union notifier_metadata meta_disconnected;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001373 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001374 int tx_ready = 0;
1375
1376 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
1377 return smux_handle_close_ack(pkt);
1378
1379 lcid = pkt->hdr.lcid;
1380 ch = &smux_lch[lcid];
1381 meta_disconnected.disconnected.is_ssr = 0;
1382
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001383 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001384 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301385 SMUX_DBG("smux: lcid %d remote state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001386 SMUX_LCH_REMOTE_OPENED,
1387 SMUX_LCH_REMOTE_CLOSED);
1388
1389 ack_pkt = smux_alloc_pkt();
1390 if (!ack_pkt) {
1391 /* exit out to allow retrying this later */
1392 ret = -ENOMEM;
1393 goto out;
1394 }
1395 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
1396 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1397 ack_pkt->hdr.flags = SMUX_CMD_CLOSE_ACK;
1398 ack_pkt->hdr.lcid = lcid;
1399 ack_pkt->hdr.payload_len = 0;
1400 ack_pkt->hdr.pad_len = 0;
1401 smux_tx_queue(ack_pkt, ch, 0);
1402 tx_ready = 1;
1403
1404 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1405 /*
1406 * Send a Close command to the remote side to simulate
1407 * our local client doing it.
1408 */
1409 ack_pkt = smux_alloc_pkt();
1410 if (ack_pkt) {
1411 ack_pkt->hdr.lcid = lcid;
1412 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1413 ack_pkt->hdr.flags = 0;
1414 ack_pkt->hdr.payload_len = 0;
1415 ack_pkt->hdr.pad_len = 0;
1416 smux_tx_queue(ack_pkt, ch, 0);
1417 tx_ready = 1;
1418 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001419 SMUX_ERR(
1420 "%s: Remote loopack allocation failure\n",
1421 __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001422 }
1423 }
1424
1425 if (ch->local_state == SMUX_LCH_LOCAL_CLOSED)
1426 schedule_notify(lcid, SMUX_DISCONNECTED,
1427 &meta_disconnected);
1428 ret = 0;
1429 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001430 SMUX_ERR("%s: lcid %d remote state 0x%x close invalid\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001431 __func__, lcid, ch->remote_state);
1432 ret = -EINVAL;
1433 }
1434out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001435 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001436 if (tx_ready)
1437 list_channel(ch);
1438
1439 return ret;
1440}
1441
1442/*
1443 * Handle receive DATA command.
1444 *
1445 * @pkt Received packet
1446 *
1447 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001448 */
1449static int smux_handle_rx_data_cmd(struct smux_pkt_t *pkt)
1450{
1451 uint8_t lcid;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001452 int ret = 0;
1453 int do_retry = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001454 int tx_ready = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001455 int tmp;
1456 int rx_len;
1457 struct smux_lch_t *ch;
1458 union notifier_metadata metadata;
1459 int remote_loopback;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001460 struct smux_pkt_t *ack_pkt;
1461 unsigned long flags;
1462
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001463 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1464 ret = -ENXIO;
1465 goto out;
1466 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001467
Eric Holmbergb8435c82012-06-05 14:51:29 -06001468 rx_len = pkt->hdr.payload_len;
1469 if (rx_len == 0) {
1470 ret = -EINVAL;
1471 goto out;
1472 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001473
1474 lcid = pkt->hdr.lcid;
1475 ch = &smux_lch[lcid];
1476 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1477 remote_loopback = ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK;
1478
1479 if (ch->local_state != SMUX_LCH_LOCAL_OPENED
1480 && !remote_loopback) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001481 SMUX_ERR("smux: ch %d error data on local state 0x%x",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001482 lcid, ch->local_state);
1483 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001484 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001485 goto out;
1486 }
1487
1488 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001489 SMUX_ERR("smux: ch %d error data on remote state 0x%x",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001490 lcid, ch->remote_state);
1491 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001492 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001493 goto out;
1494 }
1495
Eric Holmbergb8435c82012-06-05 14:51:29 -06001496 if (!list_empty(&ch->rx_retry_queue)) {
1497 do_retry = 1;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001498
1499 if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
1500 !ch->rx_flow_control_auto &&
1501 ((ch->rx_retry_queue_cnt + 1) >= SMUX_RX_WM_HIGH)) {
1502 /* need to flow control RX */
1503 ch->rx_flow_control_auto = 1;
1504 tx_ready |= smux_rx_flow_control_updated(ch);
1505 schedule_notify(ch->lcid, SMUX_RX_RETRY_HIGH_WM_HIT,
1506 NULL);
1507 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06001508 if ((ch->rx_retry_queue_cnt + 1) > SMUX_RX_RETRY_MAX_PKTS) {
1509 /* retry queue full */
Eric Holmbergd7339a42012-08-21 16:28:12 -06001510 SMUX_ERR(
1511 "%s: ch %d RX retry queue full; rx flow=%d\n",
1512 __func__, lcid, ch->rx_flow_control_auto);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001513 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1514 ret = -ENOMEM;
1515 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1516 goto out;
1517 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001518 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001519 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001520
Eric Holmbergb8435c82012-06-05 14:51:29 -06001521 if (remote_loopback) {
1522 /* Echo the data back to the remote client. */
1523 ack_pkt = smux_alloc_pkt();
1524 if (ack_pkt) {
1525 ack_pkt->hdr.lcid = lcid;
1526 ack_pkt->hdr.cmd = SMUX_CMD_DATA;
1527 ack_pkt->hdr.flags = 0;
1528 ack_pkt->hdr.payload_len = pkt->hdr.payload_len;
1529 if (ack_pkt->hdr.payload_len) {
1530 smux_alloc_pkt_payload(ack_pkt);
1531 memcpy(ack_pkt->payload, pkt->payload,
1532 ack_pkt->hdr.payload_len);
1533 }
1534 ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
1535 smux_tx_queue(ack_pkt, ch, 0);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001536 tx_ready = 1;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001537 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001538 SMUX_ERR("%s: Remote loopack allocation failure\n",
Eric Holmbergb8435c82012-06-05 14:51:29 -06001539 __func__);
1540 }
1541 } else if (!do_retry) {
1542 /* request buffer from client */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001543 metadata.read.pkt_priv = 0;
1544 metadata.read.buffer = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001545 tmp = ch->get_rx_buffer(ch->priv,
1546 (void **)&metadata.read.pkt_priv,
1547 (void **)&metadata.read.buffer,
1548 rx_len);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001549
Eric Holmbergb8435c82012-06-05 14:51:29 -06001550 if (tmp == 0 && metadata.read.buffer) {
1551 /* place data into RX buffer */
1552 memcpy(metadata.read.buffer, pkt->payload,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001553 rx_len);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001554 metadata.read.len = rx_len;
1555 schedule_notify(lcid, SMUX_READ_DONE,
1556 &metadata);
1557 } else if (tmp == -EAGAIN ||
1558 (tmp == 0 && !metadata.read.buffer)) {
1559 /* buffer allocation failed - add to retry queue */
1560 do_retry = 1;
1561 } else if (tmp < 0) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001562 SMUX_ERR("%s: ch %d Client RX buffer alloc failed %d\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001563 __func__, lcid, tmp);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001564 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1565 ret = -ENOMEM;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001566 }
1567 }
1568
Eric Holmbergb8435c82012-06-05 14:51:29 -06001569 if (do_retry) {
1570 struct smux_rx_pkt_retry *retry;
1571
1572 retry = kmalloc(sizeof(struct smux_rx_pkt_retry), GFP_KERNEL);
1573 if (!retry) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001574 SMUX_ERR("%s: retry alloc failure\n", __func__);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001575 ret = -ENOMEM;
1576 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1577 goto out;
1578 }
1579 INIT_LIST_HEAD(&retry->rx_retry_list);
1580 retry->timeout_in_ms = SMUX_RX_RETRY_MIN_MS;
1581
1582 /* copy packet */
1583 retry->pkt = smux_alloc_pkt();
1584 if (!retry->pkt) {
1585 kfree(retry);
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001586 SMUX_ERR("%s: pkt alloc failure\n", __func__);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001587 ret = -ENOMEM;
1588 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1589 goto out;
1590 }
1591 retry->pkt->hdr.lcid = lcid;
1592 retry->pkt->hdr.payload_len = pkt->hdr.payload_len;
1593 retry->pkt->hdr.pad_len = pkt->hdr.pad_len;
1594 if (retry->pkt->hdr.payload_len) {
1595 smux_alloc_pkt_payload(retry->pkt);
1596 memcpy(retry->pkt->payload, pkt->payload,
1597 retry->pkt->hdr.payload_len);
1598 }
1599
1600 /* add to retry queue */
1601 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1602 list_add_tail(&retry->rx_retry_list, &ch->rx_retry_queue);
1603 ++ch->rx_retry_queue_cnt;
1604 if (ch->rx_retry_queue_cnt == 1)
1605 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
1606 msecs_to_jiffies(retry->timeout_in_ms));
1607 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1608 }
1609
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001610 if (tx_ready)
1611 list_channel(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001612out:
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001613 return ret;
1614}
1615
1616/**
1617 * Handle receive byte command for testing purposes.
1618 *
1619 * @pkt Received packet
1620 *
1621 * @returns 0 for success
1622 */
1623static int smux_handle_rx_byte_cmd(struct smux_pkt_t *pkt)
1624{
1625 uint8_t lcid;
1626 int ret;
1627 struct smux_lch_t *ch;
1628 union notifier_metadata metadata;
1629 unsigned long flags;
1630
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001631 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001632 SMUX_ERR("%s: invalid packet or channel id\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001633 return -ENXIO;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001634 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001635
1636 lcid = pkt->hdr.lcid;
1637 ch = &smux_lch[lcid];
1638 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1639
1640 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001641 SMUX_ERR("smux: ch %d error data on local state 0x%x",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001642 lcid, ch->local_state);
1643 ret = -EIO;
1644 goto out;
1645 }
1646
1647 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001648 SMUX_ERR("smux: ch %d error data on remote state 0x%x",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001649 lcid, ch->remote_state);
1650 ret = -EIO;
1651 goto out;
1652 }
1653
1654 metadata.read.pkt_priv = (void *)(int)pkt->hdr.flags;
1655 metadata.read.buffer = 0;
1656 schedule_notify(lcid, SMUX_READ_DONE, &metadata);
1657 ret = 0;
1658
1659out:
1660 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1661 return ret;
1662}
1663
1664/**
1665 * Handle receive status command.
1666 *
1667 * @pkt Received packet
1668 *
1669 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001670 */
1671static int smux_handle_rx_status_cmd(struct smux_pkt_t *pkt)
1672{
1673 uint8_t lcid;
1674 int ret;
1675 struct smux_lch_t *ch;
1676 union notifier_metadata meta;
1677 unsigned long flags;
1678 int tx_ready = 0;
1679
1680 lcid = pkt->hdr.lcid;
1681 ch = &smux_lch[lcid];
1682
1683 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1684 meta.tiocm.tiocm_old = ch->remote_tiocm;
1685 meta.tiocm.tiocm_new = pkt->hdr.flags;
1686
1687 /* update logical channel flow control */
1688 if ((meta.tiocm.tiocm_old & SMUX_CMD_STATUS_FLOW_CNTL) ^
1689 (meta.tiocm.tiocm_new & SMUX_CMD_STATUS_FLOW_CNTL)) {
1690 /* logical channel flow control changed */
1691 if (pkt->hdr.flags & SMUX_CMD_STATUS_FLOW_CNTL) {
1692 /* disabled TX */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301693 SMUX_DBG("smux: TX Flow control enabled\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001694 ch->tx_flow_control = 1;
1695 } else {
1696 /* re-enable channel */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301697 SMUX_DBG("smux: TX Flow control disabled\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001698 ch->tx_flow_control = 0;
1699 tx_ready = 1;
1700 }
1701 }
1702 meta.tiocm.tiocm_old = msm_smux_tiocm_get_atomic(ch);
1703 ch->remote_tiocm = pkt->hdr.flags;
1704 meta.tiocm.tiocm_new = msm_smux_tiocm_get_atomic(ch);
1705
1706 /* client notification for status change */
1707 if (IS_FULLY_OPENED(ch)) {
1708 if (meta.tiocm.tiocm_old != meta.tiocm.tiocm_new)
1709 schedule_notify(lcid, SMUX_TIOCM_UPDATE, &meta);
1710 ret = 0;
1711 }
1712 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1713 if (tx_ready)
1714 list_channel(ch);
1715
1716 return ret;
1717}
1718
1719/**
1720 * Handle receive power command.
1721 *
1722 * @pkt Received packet
1723 *
1724 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001725 */
1726static int smux_handle_rx_power_cmd(struct smux_pkt_t *pkt)
1727{
David Brownd2f01b52013-01-16 15:22:17 -08001728 struct smux_pkt_t *ack_pkt;
Eric Holmberga9b06472012-06-22 09:46:34 -06001729 int power_down = 0;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001730 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001731
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001732 SMUX_PWR_PKT_RX(pkt);
1733
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001734 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001735 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK) {
1736 /* local sleep request ack */
Eric Holmberga9b06472012-06-22 09:46:34 -06001737 if (smux.power_state == SMUX_PWR_TURNING_OFF)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001738 /* Power-down complete, turn off UART */
Eric Holmberga9b06472012-06-22 09:46:34 -06001739 power_down = 1;
1740 else
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001741 SMUX_ERR("%s: sleep request ack invalid in state %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001742 __func__, smux.power_state);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001743 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001744 /*
1745 * Remote sleep request
1746 *
1747 * Even if we have data pending, we need to transition to the
1748 * POWER_OFF state and then perform a wakeup since the remote
1749 * side has requested a power-down.
1750 *
1751 * The state here is set to SMUX_PWR_TURNING_OFF_FLUSH and
1752 * the TX thread will set the state to SMUX_PWR_TURNING_OFF
1753 * when it sends the packet.
Eric Holmberga9b06472012-06-22 09:46:34 -06001754 *
1755 * If we are already powering down, then no ACK is sent.
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001756 */
Eric Holmberga9b06472012-06-22 09:46:34 -06001757 if (smux.power_state == SMUX_PWR_ON) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001758 ack_pkt = smux_alloc_pkt();
1759 if (ack_pkt) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301760 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001761 smux.power_state,
1762 SMUX_PWR_TURNING_OFF_FLUSH);
1763
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001764 smux.power_state = SMUX_PWR_TURNING_OFF_FLUSH;
1765
1766 /* send power-down ack */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001767 ack_pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
1768 ack_pkt->hdr.flags = SMUX_CMD_PWR_CTL_ACK;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001769 ack_pkt->hdr.lcid = SMUX_BROADCAST_LCID;
1770 list_add_tail(&ack_pkt->list,
1771 &smux.power_queue);
1772 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001773 }
Eric Holmberga9b06472012-06-22 09:46:34 -06001774 } else if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH) {
1775 /* Local power-down request still in TX queue */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301776 SMUX_PWR("smux: %s: Power-down shortcut - no ack\n",
Eric Holmberga9b06472012-06-22 09:46:34 -06001777 __func__);
1778 smux.power_ctl_remote_req_received = 1;
1779 } else if (smux.power_state == SMUX_PWR_TURNING_OFF) {
1780 /*
1781 * Local power-down request already sent to remote
1782 * side, so this request gets treated as an ACK.
1783 */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301784 SMUX_PWR("smux: %s: Power-down shortcut - no ack\n",
Eric Holmberga9b06472012-06-22 09:46:34 -06001785 __func__);
1786 power_down = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001787 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001788 SMUX_ERR("%s: sleep request invalid in state %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001789 __func__, smux.power_state);
1790 }
1791 }
Eric Holmberga9b06472012-06-22 09:46:34 -06001792
1793 if (power_down) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301794 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberga9b06472012-06-22 09:46:34 -06001795 smux.power_state, SMUX_PWR_OFF_FLUSH);
1796 smux.power_state = SMUX_PWR_OFF_FLUSH;
1797 queue_work(smux_tx_wq, &smux_inactivity_work);
1798 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001799 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001800
1801 return 0;
1802}
1803
1804/**
1805 * Handle dispatching a completed packet for receive processing.
1806 *
1807 * @pkt Packet to process
1808 *
1809 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001810 */
1811static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt)
1812{
Eric Holmbergf9622662012-06-13 15:55:45 -06001813 int ret = -ENXIO;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001814
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001815 switch (pkt->hdr.cmd) {
1816 case SMUX_CMD_OPEN_LCH:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001817 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001818 if (smux_assert_lch_id(pkt->hdr.lcid)) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001819 SMUX_ERR("%s: invalid channel id %d\n",
Eric Holmbergf9622662012-06-13 15:55:45 -06001820 __func__, pkt->hdr.lcid);
1821 break;
1822 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001823 ret = smux_handle_rx_open_cmd(pkt);
1824 break;
1825
1826 case SMUX_CMD_DATA:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001827 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001828 if (smux_assert_lch_id(pkt->hdr.lcid)) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001829 SMUX_ERR("%s: invalid channel id %d\n",
Eric Holmbergf9622662012-06-13 15:55:45 -06001830 __func__, pkt->hdr.lcid);
1831 break;
1832 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001833 ret = smux_handle_rx_data_cmd(pkt);
1834 break;
1835
1836 case SMUX_CMD_CLOSE_LCH:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001837 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001838 if (smux_assert_lch_id(pkt->hdr.lcid)) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001839 SMUX_ERR("%s: invalid channel id %d\n",
Eric Holmbergf9622662012-06-13 15:55:45 -06001840 __func__, pkt->hdr.lcid);
1841 break;
1842 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001843 ret = smux_handle_rx_close_cmd(pkt);
1844 break;
1845
1846 case SMUX_CMD_STATUS:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001847 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001848 if (smux_assert_lch_id(pkt->hdr.lcid)) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001849 SMUX_ERR("%s: invalid channel id %d\n",
Eric Holmbergf9622662012-06-13 15:55:45 -06001850 __func__, pkt->hdr.lcid);
1851 break;
1852 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001853 ret = smux_handle_rx_status_cmd(pkt);
1854 break;
1855
1856 case SMUX_CMD_PWR_CTL:
1857 ret = smux_handle_rx_power_cmd(pkt);
1858 break;
1859
1860 case SMUX_CMD_BYTE:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001861 SMUX_LOG_PKT_RX(pkt);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001862 ret = smux_handle_rx_byte_cmd(pkt);
1863 break;
1864
1865 default:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001866 SMUX_LOG_PKT_RX(pkt);
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001867 SMUX_ERR("%s: command %d unknown\n", __func__, pkt->hdr.cmd);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001868 ret = -EINVAL;
1869 }
1870 return ret;
1871}
1872
1873/**
1874 * Deserializes a packet and dispatches it to the packet receive logic.
1875 *
1876 * @data Raw data for one packet
1877 * @len Length of the data
1878 *
1879 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001880 */
1881static int smux_deserialize(unsigned char *data, int len)
1882{
1883 struct smux_pkt_t recv;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001884
1885 smux_init_pkt(&recv);
1886
1887 /*
1888 * It may be possible to optimize this to not use the
1889 * temporary buffer.
1890 */
1891 memcpy(&recv.hdr, data, sizeof(struct smux_hdr_t));
1892
1893 if (recv.hdr.magic != SMUX_MAGIC) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001894 SMUX_ERR("%s: invalid header magic\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001895 return -EINVAL;
1896 }
1897
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001898 if (recv.hdr.payload_len)
1899 recv.payload = data + sizeof(struct smux_hdr_t);
1900
1901 return smux_dispatch_rx_pkt(&recv);
1902}
1903
1904/**
1905 * Handle wakeup request byte.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001906 */
1907static void smux_handle_wakeup_req(void)
1908{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001909 unsigned long flags;
1910
1911 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001912 if (smux.power_state == SMUX_PWR_OFF
1913 || smux.power_state == SMUX_PWR_TURNING_ON) {
1914 /* wakeup system */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301915 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001916 smux.power_state, SMUX_PWR_ON);
Eric Holmberg371b4622013-05-21 18:04:50 -06001917 smux.remote_initiated_wakeup_count++;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001918 smux.power_state = SMUX_PWR_ON;
1919 queue_work(smux_tx_wq, &smux_wakeup_work);
1920 queue_work(smux_tx_wq, &smux_tx_work);
1921 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1922 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1923 smux_send_byte(SMUX_WAKEUP_ACK);
Eric Holmberga9b06472012-06-22 09:46:34 -06001924 } else if (smux.power_state == SMUX_PWR_ON) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001925 smux_send_byte(SMUX_WAKEUP_ACK);
Eric Holmberga9b06472012-06-22 09:46:34 -06001926 } else {
1927 /* stale wakeup request from previous wakeup */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301928 SMUX_PWR("smux: %s: stale Wakeup REQ in state %d\n",
Eric Holmberga9b06472012-06-22 09:46:34 -06001929 __func__, smux.power_state);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001930 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001931 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001932}
1933
1934/**
1935 * Handle wakeup request ack.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001936 */
1937static void smux_handle_wakeup_ack(void)
1938{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001939 unsigned long flags;
1940
1941 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001942 if (smux.power_state == SMUX_PWR_TURNING_ON) {
1943 /* received response to wakeup request */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301944 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001945 smux.power_state, SMUX_PWR_ON);
1946 smux.power_state = SMUX_PWR_ON;
1947 queue_work(smux_tx_wq, &smux_tx_work);
1948 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1949 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1950
1951 } else if (smux.power_state != SMUX_PWR_ON) {
1952 /* invalid message */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301953 SMUX_PWR("smux: %s: stale Wakeup REQ ACK in state %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001954 __func__, smux.power_state);
1955 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001956 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001957}
1958
1959/**
1960 * RX State machine - IDLE state processing.
1961 *
1962 * @data New RX data to process
1963 * @len Length of the data
1964 * @used Return value of length processed
1965 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001966 */
1967static void smux_rx_handle_idle(const unsigned char *data,
1968 int len, int *used, int flag)
1969{
1970 int i;
1971
1972 if (flag) {
1973 if (smux_byte_loopback)
1974 smux_receive_byte(SMUX_UT_ECHO_ACK_FAIL,
1975 smux_byte_loopback);
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001976 SMUX_ERR("%s: TTY error 0x%x - ignoring\n", __func__, flag);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001977 ++*used;
1978 return;
1979 }
1980
1981 for (i = *used; i < len && smux.rx_state == SMUX_RX_IDLE; i++) {
1982 switch (data[i]) {
1983 case SMUX_MAGIC_WORD1:
1984 smux.rx_state = SMUX_RX_MAGIC;
1985 break;
1986 case SMUX_WAKEUP_REQ:
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301987 SMUX_PWR("smux: smux: RX Wakeup REQ\n");
Eric Holmberg0a81e8f2012-08-28 13:51:14 -06001988 if (unlikely(!smux.remote_is_alive)) {
1989 mutex_lock(&smux.mutex_lha0);
1990 smux.remote_is_alive = 1;
1991 mutex_unlock(&smux.mutex_lha0);
1992 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001993 smux_handle_wakeup_req();
1994 break;
1995 case SMUX_WAKEUP_ACK:
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301996 SMUX_PWR("smux: smux: RX Wakeup ACK\n");
Eric Holmberg0a81e8f2012-08-28 13:51:14 -06001997 if (unlikely(!smux.remote_is_alive)) {
1998 mutex_lock(&smux.mutex_lha0);
1999 smux.remote_is_alive = 1;
2000 mutex_unlock(&smux.mutex_lha0);
2001 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002002 smux_handle_wakeup_ack();
2003 break;
2004 default:
2005 /* unexpected character */
2006 if (smux_byte_loopback && data[i] == SMUX_UT_ECHO_REQ)
2007 smux_receive_byte(SMUX_UT_ECHO_ACK_OK,
2008 smux_byte_loopback);
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002009 SMUX_ERR("%s: parse error 0x%02x - ignoring\n",
2010 __func__, (unsigned)data[i]);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002011 break;
2012 }
2013 }
2014
2015 *used = i;
2016}
2017
2018/**
2019 * RX State machine - Header Magic state processing.
2020 *
2021 * @data New RX data to process
2022 * @len Length of the data
2023 * @used Return value of length processed
2024 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002025 */
2026static void smux_rx_handle_magic(const unsigned char *data,
2027 int len, int *used, int flag)
2028{
2029 int i;
2030
2031 if (flag) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002032 SMUX_ERR("%s: TTY RX error %d\n", __func__, flag);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002033 smux_enter_reset();
2034 smux.rx_state = SMUX_RX_FAILURE;
2035 ++*used;
2036 return;
2037 }
2038
2039 for (i = *used; i < len && smux.rx_state == SMUX_RX_MAGIC; i++) {
2040 /* wait for completion of the magic */
2041 if (data[i] == SMUX_MAGIC_WORD2) {
2042 smux.recv_len = 0;
2043 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD1;
2044 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD2;
2045 smux.rx_state = SMUX_RX_HDR;
2046 } else {
2047 /* unexpected / trash character */
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002048 SMUX_ERR(
2049 "%s: rx parse error for char %c; *used=%d, len=%d\n",
2050 __func__, data[i], *used, len);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002051 smux.rx_state = SMUX_RX_IDLE;
2052 }
2053 }
2054
2055 *used = i;
2056}
2057
2058/**
2059 * RX State machine - Packet Header state processing.
2060 *
2061 * @data New RX data to process
2062 * @len Length of the data
2063 * @used Return value of length processed
2064 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002065 */
2066static void smux_rx_handle_hdr(const unsigned char *data,
2067 int len, int *used, int flag)
2068{
2069 int i;
2070 struct smux_hdr_t *hdr;
2071
2072 if (flag) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002073 SMUX_ERR("%s: TTY RX error %d\n", __func__, flag);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002074 smux_enter_reset();
2075 smux.rx_state = SMUX_RX_FAILURE;
2076 ++*used;
2077 return;
2078 }
2079
2080 for (i = *used; i < len && smux.rx_state == SMUX_RX_HDR; i++) {
2081 smux.recv_buf[smux.recv_len++] = data[i];
2082
2083 if (smux.recv_len == sizeof(struct smux_hdr_t)) {
2084 /* complete header received */
2085 hdr = (struct smux_hdr_t *)smux.recv_buf;
2086 smux.pkt_remain = hdr->payload_len + hdr->pad_len;
2087 smux.rx_state = SMUX_RX_PAYLOAD;
2088 }
2089 }
2090 *used = i;
2091}
2092
2093/**
2094 * RX State machine - Packet Payload state processing.
2095 *
2096 * @data New RX data to process
2097 * @len Length of the data
2098 * @used Return value of length processed
2099 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002100 */
2101static void smux_rx_handle_pkt_payload(const unsigned char *data,
2102 int len, int *used, int flag)
2103{
2104 int remaining;
2105
2106 if (flag) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002107 SMUX_ERR("%s: TTY RX error %d\n", __func__, flag);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002108 smux_enter_reset();
2109 smux.rx_state = SMUX_RX_FAILURE;
2110 ++*used;
2111 return;
2112 }
2113
2114 /* copy data into rx buffer */
2115 if (smux.pkt_remain < (len - *used))
2116 remaining = smux.pkt_remain;
2117 else
2118 remaining = len - *used;
2119
2120 memcpy(&smux.recv_buf[smux.recv_len], &data[*used], remaining);
2121 smux.recv_len += remaining;
2122 smux.pkt_remain -= remaining;
2123 *used += remaining;
2124
2125 if (smux.pkt_remain == 0) {
2126 /* complete packet received */
2127 smux_deserialize(smux.recv_buf, smux.recv_len);
2128 smux.rx_state = SMUX_RX_IDLE;
2129 }
2130}
2131
2132/**
2133 * Feed data to the receive state machine.
2134 *
2135 * @data Pointer to data block
2136 * @len Length of data
2137 * @flag TTY_NORMAL (0) for no error, otherwise TTY Error Flag
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002138 */
2139void smux_rx_state_machine(const unsigned char *data,
2140 int len, int flag)
2141{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002142 struct smux_rx_worker_data work;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002143
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002144 work.data = data;
2145 work.len = len;
2146 work.flag = flag;
2147 INIT_WORK_ONSTACK(&work.work, smux_rx_worker);
2148 work.work_complete = COMPLETION_INITIALIZER_ONSTACK(work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002149
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002150 queue_work(smux_rx_wq, &work.work);
2151 wait_for_completion(&work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002152}
2153
2154/**
Eric Holmberg0a81e8f2012-08-28 13:51:14 -06002155 * Returns true if the remote side has acknowledged a wakeup
2156 * request previously, so we know that the link is alive and active.
2157 *
2158 * @returns true for is alive, false for not alive
2159 */
2160bool smux_remote_is_active(void)
2161{
2162 bool is_active = false;
2163
2164 mutex_lock(&smux.mutex_lha0);
2165 if (smux.remote_is_alive)
2166 is_active = true;
2167 mutex_unlock(&smux.mutex_lha0);
2168
2169 return is_active;
2170}
2171
2172/**
Eric Holmberg371b4622013-05-21 18:04:50 -06002173 * Sends a delay command to the remote side.
2174 *
2175 * @ms: Time in milliseconds for the remote side to delay
2176 *
2177 * This command defines the delay that the remote side will use
2178 * to slow the response time for DATA commands.
2179 */
2180void smux_set_loopback_data_reply_delay(uint32_t ms)
2181{
2182 struct smux_lch_t *ch = &smux_lch[SMUX_TEST_LCID];
2183 struct smux_pkt_t *pkt;
2184
2185 pkt = smux_alloc_pkt();
2186 if (!pkt) {
2187 pr_err("%s: unable to allocate packet\n", __func__);
2188 return;
2189 }
2190
2191 pkt->hdr.lcid = ch->lcid;
2192 pkt->hdr.cmd = SMUX_CMD_DELAY;
2193 pkt->hdr.flags = 0;
2194 pkt->hdr.payload_len = sizeof(uint32_t);
2195 pkt->hdr.pad_len = 0;
2196
2197 if (smux_alloc_pkt_payload(pkt)) {
2198 pr_err("%s: unable to allocate payload\n", __func__);
2199 smux_free_pkt(pkt);
2200 return;
2201 }
2202 memcpy(pkt->payload, &ms, sizeof(uint32_t));
2203
2204 smux_tx_queue(pkt, ch, 1);
2205}
2206
2207/**
2208 * Retrieve wakeup counts.
2209 *
2210 * @local_cnt: Pointer to local wakeup count
2211 * @remote_cnt: Pointer to remote wakeup count
2212 */
2213void smux_get_wakeup_counts(int *local_cnt, int *remote_cnt)
2214{
2215 unsigned long flags;
2216
2217 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2218
2219 if (local_cnt)
2220 *local_cnt = smux.local_initiated_wakeup_count;
2221
2222 if (remote_cnt)
2223 *remote_cnt = smux.remote_initiated_wakeup_count;
2224
2225 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2226}
2227
2228/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002229 * Add channel to transmit-ready list and trigger transmit worker.
2230 *
2231 * @ch Channel to add
2232 */
2233static void list_channel(struct smux_lch_t *ch)
2234{
2235 unsigned long flags;
2236
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302237 SMUX_DBG("smux: %s: listing channel %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002238 __func__, ch->lcid);
2239
2240 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2241 spin_lock(&ch->tx_lock_lhb2);
2242 smux.tx_activity_flag = 1;
2243 if (list_empty(&ch->tx_ready_list))
2244 list_add_tail(&ch->tx_ready_list, &smux.lch_tx_ready_list);
2245 spin_unlock(&ch->tx_lock_lhb2);
2246 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2247
2248 queue_work(smux_tx_wq, &smux_tx_work);
2249}
2250
2251/**
2252 * Transmit packet on correct transport and then perform client
2253 * notification.
2254 *
2255 * @ch Channel to transmit on
2256 * @pkt Packet to transmit
2257 */
2258static void smux_tx_pkt(struct smux_lch_t *ch, struct smux_pkt_t *pkt)
2259{
2260 union notifier_metadata meta_write;
2261 int ret;
2262
2263 if (ch && pkt) {
2264 SMUX_LOG_PKT_TX(pkt);
2265 if (ch->local_mode == SMUX_LCH_MODE_LOCAL_LOOPBACK)
2266 ret = smux_tx_loopback(pkt);
2267 else
2268 ret = smux_tx_tty(pkt);
2269
2270 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2271 /* notify write-done */
2272 meta_write.write.pkt_priv = pkt->priv;
2273 meta_write.write.buffer = pkt->payload;
2274 meta_write.write.len = pkt->hdr.payload_len;
2275 if (ret >= 0) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302276 SMUX_DBG("smux: %s: PKT write done", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002277 schedule_notify(ch->lcid, SMUX_WRITE_DONE,
2278 &meta_write);
2279 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002280 SMUX_ERR("%s: failed to write pkt %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002281 __func__, ret);
2282 schedule_notify(ch->lcid, SMUX_WRITE_FAIL,
2283 &meta_write);
2284 }
2285 }
2286 }
2287}
2288
2289/**
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002290 * Flush pending TTY TX data.
2291 */
2292static void smux_flush_tty(void)
2293{
Eric Holmberg92a67df2012-06-25 13:56:24 -06002294 mutex_lock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002295 if (!smux.tty) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002296 SMUX_ERR("%s: ldisc not loaded\n", __func__);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002297 mutex_unlock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002298 return;
2299 }
2300
2301 tty_wait_until_sent(smux.tty,
2302 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
2303
2304 if (tty_chars_in_buffer(smux.tty) > 0)
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002305 SMUX_ERR("%s: unable to flush UART queue\n", __func__);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002306
2307 mutex_unlock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002308}
2309
2310/**
Eric Holmberged1f00c2012-06-07 09:45:18 -06002311 * Purge TX queue for logical channel.
2312 *
2313 * @ch Logical channel pointer
Eric Holmberg0e914082012-07-11 11:46:28 -06002314 * @is_ssr 1 = this is a subsystem restart purge
Eric Holmberged1f00c2012-06-07 09:45:18 -06002315 *
2316 * Must be called with the following spinlocks locked:
2317 * state_lock_lhb1
2318 * tx_lock_lhb2
2319 */
Eric Holmberg0e914082012-07-11 11:46:28 -06002320static void smux_purge_ch_tx_queue(struct smux_lch_t *ch, int is_ssr)
Eric Holmberged1f00c2012-06-07 09:45:18 -06002321{
2322 struct smux_pkt_t *pkt;
2323 int send_disconnect = 0;
Eric Holmberg0e914082012-07-11 11:46:28 -06002324 struct smux_pkt_t *pkt_tmp;
2325 int is_state_pkt;
Eric Holmberged1f00c2012-06-07 09:45:18 -06002326
Eric Holmberg0e914082012-07-11 11:46:28 -06002327 list_for_each_entry_safe(pkt, pkt_tmp, &ch->tx_queue, list) {
2328 is_state_pkt = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06002329 if (pkt->hdr.cmd == SMUX_CMD_OPEN_LCH) {
Eric Holmberg0e914082012-07-11 11:46:28 -06002330 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK) {
2331 /* Open ACK must still be sent */
2332 is_state_pkt = 1;
2333 } else {
2334 /* Open never sent -- force to closed state */
2335 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
2336 send_disconnect = 1;
2337 }
2338 } else if (pkt->hdr.cmd == SMUX_CMD_CLOSE_LCH) {
2339 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
2340 is_state_pkt = 1;
2341 if (!send_disconnect)
2342 is_state_pkt = 1;
Eric Holmberged1f00c2012-06-07 09:45:18 -06002343 } else if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2344 /* Notify client of failed write */
2345 union notifier_metadata meta_write;
2346
2347 meta_write.write.pkt_priv = pkt->priv;
2348 meta_write.write.buffer = pkt->payload;
2349 meta_write.write.len = pkt->hdr.payload_len;
2350 schedule_notify(ch->lcid, SMUX_WRITE_FAIL, &meta_write);
2351 }
Eric Holmberg0e914082012-07-11 11:46:28 -06002352
2353 if (!is_state_pkt || is_ssr) {
2354 list_del(&pkt->list);
2355 smux_free_pkt(pkt);
2356 }
Eric Holmberged1f00c2012-06-07 09:45:18 -06002357 }
2358
2359 if (send_disconnect) {
2360 union notifier_metadata meta_disconnected;
2361
2362 meta_disconnected.disconnected.is_ssr = smux.in_reset;
2363 schedule_notify(ch->lcid, SMUX_DISCONNECTED,
2364 &meta_disconnected);
2365 }
2366}
2367
2368/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002369 * Power-up the UART.
Eric Holmberg92a67df2012-06-25 13:56:24 -06002370 *
2371 * Must be called with smux.mutex_lha0 already locked.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002372 */
Eric Holmberg92a67df2012-06-25 13:56:24 -06002373static void smux_uart_power_on_atomic(void)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002374{
2375 struct uart_state *state;
2376
2377 if (!smux.tty || !smux.tty->driver_data) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002378 SMUX_ERR("%s: unable to find UART port for tty %p\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002379 __func__, smux.tty);
2380 return;
2381 }
2382 state = smux.tty->driver_data;
2383 msm_hs_request_clock_on(state->uart_port);
2384}
2385
2386/**
Eric Holmberg92a67df2012-06-25 13:56:24 -06002387 * Power-up the UART.
2388 */
2389static void smux_uart_power_on(void)
2390{
2391 mutex_lock(&smux.mutex_lha0);
2392 smux_uart_power_on_atomic();
2393 mutex_unlock(&smux.mutex_lha0);
2394}
2395
2396/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002397 * Power down the UART.
Eric Holmberg06011322012-07-06 18:17:03 -06002398 *
2399 * Must be called with mutex_lha0 locked.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002400 */
Eric Holmberg06011322012-07-06 18:17:03 -06002401static void smux_uart_power_off_atomic(void)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002402{
2403 struct uart_state *state;
2404
2405 if (!smux.tty || !smux.tty->driver_data) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002406 SMUX_ERR("%s: unable to find UART port for tty %p\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002407 __func__, smux.tty);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002408 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002409 return;
2410 }
2411 state = smux.tty->driver_data;
2412 msm_hs_request_clock_off(state->uart_port);
Eric Holmberg06011322012-07-06 18:17:03 -06002413}
2414
2415/**
2416 * Power down the UART.
2417 */
2418static void smux_uart_power_off(void)
2419{
2420 mutex_lock(&smux.mutex_lha0);
2421 smux_uart_power_off_atomic();
Eric Holmberg92a67df2012-06-25 13:56:24 -06002422 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002423}
2424
2425/**
2426 * TX Wakeup Worker
2427 *
2428 * @work Not used
2429 *
2430 * Do an exponential back-off wakeup sequence with a maximum period
2431 * of approximately 1 second (1 << 20 microseconds).
2432 */
2433static void smux_wakeup_worker(struct work_struct *work)
2434{
2435 unsigned long flags;
2436 unsigned wakeup_delay;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002437
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002438 if (smux.in_reset)
2439 return;
2440
2441 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2442 if (smux.power_state == SMUX_PWR_ON) {
2443 /* wakeup complete */
Eric Holmberga9b06472012-06-22 09:46:34 -06002444 smux.pwr_wakeup_delay_us = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002445 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302446 SMUX_DBG("smux: %s: wakeup complete\n", __func__);
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002447
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002448 /*
2449 * Cancel any pending retry. This avoids a race condition with
2450 * a new power-up request because:
2451 * 1) this worker doesn't modify the state
2452 * 2) this worker is processed on the same single-threaded
2453 * workqueue as new TX wakeup requests
2454 */
2455 cancel_delayed_work(&smux_wakeup_delayed_work);
Eric Holmbergd032f5b2012-06-29 19:02:00 -06002456 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberga9b06472012-06-22 09:46:34 -06002457 } else if (smux.power_state == SMUX_PWR_TURNING_ON) {
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002458 /* retry wakeup */
2459 wakeup_delay = smux.pwr_wakeup_delay_us;
2460 smux.pwr_wakeup_delay_us <<= 1;
2461 if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
2462 smux.pwr_wakeup_delay_us =
2463 SMUX_WAKEUP_DELAY_MAX;
2464
2465 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302466 SMUX_PWR("smux: %s: triggering wakeup\n", __func__);
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002467 smux_send_byte(SMUX_WAKEUP_REQ);
2468
2469 if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302470 SMUX_DBG("smux: %s: sleeping for %u us\n", __func__,
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002471 wakeup_delay);
2472 usleep_range(wakeup_delay, 2*wakeup_delay);
2473 queue_work(smux_tx_wq, &smux_wakeup_work);
2474 } else {
2475 /* schedule delayed work */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302476 SMUX_DBG(
2477 "smux: %s: scheduling delayed wakeup in %u ms\n",
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002478 __func__, wakeup_delay / 1000);
2479 queue_delayed_work(smux_tx_wq,
2480 &smux_wakeup_delayed_work,
2481 msecs_to_jiffies(wakeup_delay / 1000));
2482 }
Eric Holmberga9b06472012-06-22 09:46:34 -06002483 } else {
2484 /* wakeup aborted */
2485 smux.pwr_wakeup_delay_us = 1;
2486 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302487 SMUX_PWR("smux: %s: wakeup aborted\n", __func__);
Eric Holmberga9b06472012-06-22 09:46:34 -06002488 cancel_delayed_work(&smux_wakeup_delayed_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002489 }
2490}
2491
2492
2493/**
2494 * Inactivity timeout worker. Periodically scheduled when link is active.
2495 * When it detects inactivity, it will power-down the UART link.
2496 *
2497 * @work Work structure (not used)
2498 */
2499static void smux_inactivity_worker(struct work_struct *work)
2500{
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002501 struct smux_pkt_t *pkt;
2502 unsigned long flags;
2503
Eric Holmberg06011322012-07-06 18:17:03 -06002504 if (smux.in_reset)
2505 return;
2506
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002507 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2508 spin_lock(&smux.tx_lock_lha2);
2509
2510 if (!smux.tx_activity_flag && !smux.rx_activity_flag) {
2511 /* no activity */
2512 if (smux.powerdown_enabled) {
2513 if (smux.power_state == SMUX_PWR_ON) {
2514 /* start power-down sequence */
2515 pkt = smux_alloc_pkt();
2516 if (pkt) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302517 SMUX_PWR(
2518 "smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002519 smux.power_state,
Eric Holmberga9b06472012-06-22 09:46:34 -06002520 SMUX_PWR_TURNING_OFF_FLUSH);
2521 smux.power_state =
2522 SMUX_PWR_TURNING_OFF_FLUSH;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002523
2524 /* send power-down request */
2525 pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
2526 pkt->hdr.flags = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002527 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
2528 list_add_tail(&pkt->list,
2529 &smux.power_queue);
2530 queue_work(smux_tx_wq, &smux_tx_work);
2531 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002532 SMUX_ERR("%s: packet alloc failed\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002533 __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002534 }
2535 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002536 }
2537 }
2538 smux.tx_activity_flag = 0;
2539 smux.rx_activity_flag = 0;
2540
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002541 if (smux.power_state == SMUX_PWR_OFF_FLUSH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002542 /* ready to power-down the UART */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302543 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002544 smux.power_state, SMUX_PWR_OFF);
Eric Holmbergff0b0112012-06-08 15:06:57 -06002545 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002546
2547 /* if data is pending, schedule a new wakeup */
2548 if (!list_empty(&smux.lch_tx_ready_list) ||
2549 !list_empty(&smux.power_queue))
2550 queue_work(smux_tx_wq, &smux_tx_work);
2551
2552 spin_unlock(&smux.tx_lock_lha2);
2553 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2554
2555 /* flush UART output queue and power down */
2556 smux_flush_tty();
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002557 smux_uart_power_off();
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002558 } else {
2559 spin_unlock(&smux.tx_lock_lha2);
2560 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002561 }
2562
2563 /* reschedule inactivity worker */
2564 if (smux.power_state != SMUX_PWR_OFF)
2565 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
2566 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
2567}
2568
2569/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002570 * Remove RX retry packet from channel and free it.
2571 *
Eric Holmbergb8435c82012-06-05 14:51:29 -06002572 * @ch Channel for retry packet
2573 * @retry Retry packet to remove
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002574 *
2575 * @returns 1 if flow control updated; 0 otherwise
2576 *
2577 * Must be called with state_lock_lhb1 locked.
Eric Holmbergb8435c82012-06-05 14:51:29 -06002578 */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002579int smux_remove_rx_retry(struct smux_lch_t *ch,
Eric Holmbergb8435c82012-06-05 14:51:29 -06002580 struct smux_rx_pkt_retry *retry)
2581{
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002582 int tx_ready = 0;
2583
Eric Holmbergb8435c82012-06-05 14:51:29 -06002584 list_del(&retry->rx_retry_list);
2585 --ch->rx_retry_queue_cnt;
2586 smux_free_pkt(retry->pkt);
2587 kfree(retry);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002588
2589 if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
2590 (ch->rx_retry_queue_cnt <= SMUX_RX_WM_LOW) &&
2591 ch->rx_flow_control_auto) {
2592 ch->rx_flow_control_auto = 0;
2593 smux_rx_flow_control_updated(ch);
2594 schedule_notify(ch->lcid, SMUX_RX_RETRY_LOW_WM_HIT, NULL);
2595 tx_ready = 1;
2596 }
2597 return tx_ready;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002598}
2599
2600/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002601 * RX worker handles all receive operations.
2602 *
2603 * @work Work structure contained in TBD structure
2604 */
2605static void smux_rx_worker(struct work_struct *work)
2606{
2607 unsigned long flags;
2608 int used;
2609 int initial_rx_state;
2610 struct smux_rx_worker_data *w;
2611 const unsigned char *data;
2612 int len;
2613 int flag;
2614
2615 w = container_of(work, struct smux_rx_worker_data, work);
2616 data = w->data;
2617 len = w->len;
2618 flag = w->flag;
2619
2620 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2621 smux.rx_activity_flag = 1;
2622 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2623
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302624 SMUX_DBG("smux: %s: %p, len=%d, flag=%d\n", __func__, data, len, flag);
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002625 used = 0;
2626 do {
Eric Holmberg06011322012-07-06 18:17:03 -06002627 if (smux.in_reset) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302628 SMUX_DBG("smux: %s: abort RX due to reset\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06002629 smux.rx_state = SMUX_RX_IDLE;
2630 break;
2631 }
2632
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302633 SMUX_DBG("smux: %s: state %d; %d of %d\n",
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002634 __func__, smux.rx_state, used, len);
2635 initial_rx_state = smux.rx_state;
2636
2637 switch (smux.rx_state) {
2638 case SMUX_RX_IDLE:
2639 smux_rx_handle_idle(data, len, &used, flag);
2640 break;
2641 case SMUX_RX_MAGIC:
2642 smux_rx_handle_magic(data, len, &used, flag);
2643 break;
2644 case SMUX_RX_HDR:
2645 smux_rx_handle_hdr(data, len, &used, flag);
2646 break;
2647 case SMUX_RX_PAYLOAD:
2648 smux_rx_handle_pkt_payload(data, len, &used, flag);
2649 break;
2650 default:
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302651 SMUX_DBG("smux: %s: invalid state %d\n",
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002652 __func__, smux.rx_state);
2653 smux.rx_state = SMUX_RX_IDLE;
2654 break;
2655 }
2656 } while (used < len || smux.rx_state != initial_rx_state);
2657
2658 complete(&w->work_complete);
2659}
2660
2661/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002662 * RX Retry worker handles retrying get_rx_buffer calls that previously failed
2663 * because the client was not ready (-EAGAIN).
2664 *
2665 * @work Work structure contained in smux_lch_t structure
2666 */
2667static void smux_rx_retry_worker(struct work_struct *work)
2668{
2669 struct smux_lch_t *ch;
2670 struct smux_rx_pkt_retry *retry;
2671 union notifier_metadata metadata;
2672 int tmp;
2673 unsigned long flags;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002674 int immediate_retry = 0;
2675 int tx_ready = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002676
2677 ch = container_of(work, struct smux_lch_t, rx_retry_work.work);
2678
2679 /* get next retry packet */
2680 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg06011322012-07-06 18:17:03 -06002681 if ((ch->local_state != SMUX_LCH_LOCAL_OPENED) || smux.in_reset) {
Eric Holmbergb8435c82012-06-05 14:51:29 -06002682 /* port has been closed - remove all retries */
2683 while (!list_empty(&ch->rx_retry_queue)) {
2684 retry = list_first_entry(&ch->rx_retry_queue,
2685 struct smux_rx_pkt_retry,
2686 rx_retry_list);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002687 (void)smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002688 }
2689 }
2690
2691 if (list_empty(&ch->rx_retry_queue)) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302692 SMUX_DBG("smux: %s: retry list empty for channel %d\n",
Eric Holmbergb8435c82012-06-05 14:51:29 -06002693 __func__, ch->lcid);
2694 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2695 return;
2696 }
2697 retry = list_first_entry(&ch->rx_retry_queue,
2698 struct smux_rx_pkt_retry,
2699 rx_retry_list);
2700 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2701
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302702 SMUX_DBG("smux: %s: ch %d retrying rx pkt %p\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002703 __func__, ch->lcid, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002704 metadata.read.pkt_priv = 0;
2705 metadata.read.buffer = 0;
2706 tmp = ch->get_rx_buffer(ch->priv,
2707 (void **)&metadata.read.pkt_priv,
2708 (void **)&metadata.read.buffer,
2709 retry->pkt->hdr.payload_len);
2710 if (tmp == 0 && metadata.read.buffer) {
2711 /* have valid RX buffer */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002712
Eric Holmbergb8435c82012-06-05 14:51:29 -06002713 memcpy(metadata.read.buffer, retry->pkt->payload,
2714 retry->pkt->hdr.payload_len);
2715 metadata.read.len = retry->pkt->hdr.payload_len;
2716
2717 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002718 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002719 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002720 schedule_notify(ch->lcid, SMUX_READ_DONE, &metadata);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002721 if (tx_ready)
2722 list_channel(ch);
2723
2724 immediate_retry = 1;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002725 } else if (tmp == -EAGAIN ||
2726 (tmp == 0 && !metadata.read.buffer)) {
2727 /* retry again */
2728 retry->timeout_in_ms <<= 1;
2729 if (retry->timeout_in_ms > SMUX_RX_RETRY_MAX_MS) {
2730 /* timed out */
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002731 SMUX_ERR("%s: ch %d RX retry client timeout\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002732 __func__, ch->lcid);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002733 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002734 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002735 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002736 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
2737 if (tx_ready)
2738 list_channel(ch);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002739 }
2740 } else {
2741 /* client error - drop packet */
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002742 SMUX_ERR("%s: ch %d RX retry client failed (%d)\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002743 __func__, ch->lcid, tmp);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002744 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002745 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002746 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002747 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002748 if (tx_ready)
2749 list_channel(ch);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002750 }
2751
2752 /* schedule next retry */
2753 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2754 if (!list_empty(&ch->rx_retry_queue)) {
2755 retry = list_first_entry(&ch->rx_retry_queue,
2756 struct smux_rx_pkt_retry,
2757 rx_retry_list);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002758
2759 if (immediate_retry)
2760 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
2761 else
2762 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
2763 msecs_to_jiffies(retry->timeout_in_ms));
Eric Holmbergb8435c82012-06-05 14:51:29 -06002764 }
2765 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2766}
2767
2768/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002769 * Transmit worker handles serializing and transmitting packets onto the
2770 * underlying transport.
2771 *
2772 * @work Work structure (not used)
2773 */
2774static void smux_tx_worker(struct work_struct *work)
2775{
2776 struct smux_pkt_t *pkt;
2777 struct smux_lch_t *ch;
2778 unsigned low_wm_notif;
2779 unsigned lcid;
2780 unsigned long flags;
2781
2782
2783 /*
2784 * Transmit packets in round-robin fashion based upon ready
2785 * channels.
2786 *
2787 * To eliminate the need to hold a lock for the entire
2788 * iteration through the channel ready list, the head of the
2789 * ready-channel list is always the next channel to be
2790 * processed. To send a packet, the first valid packet in
2791 * the head channel is removed and the head channel is then
2792 * rescheduled at the end of the queue by removing it and
2793 * inserting after the tail. The locks can then be released
2794 * while the packet is processed.
2795 */
Eric Holmberged1f00c2012-06-07 09:45:18 -06002796 while (!smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002797 pkt = NULL;
2798 low_wm_notif = 0;
2799
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002800 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002801
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002802 /* handle wakeup if needed */
2803 if (smux.power_state == SMUX_PWR_OFF) {
2804 if (!list_empty(&smux.lch_tx_ready_list) ||
2805 !list_empty(&smux.power_queue)) {
2806 /* data to transmit, do wakeup */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302807 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002808 smux.power_state,
2809 SMUX_PWR_TURNING_ON);
Eric Holmberg371b4622013-05-21 18:04:50 -06002810 smux.local_initiated_wakeup_count++;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002811 smux.power_state = SMUX_PWR_TURNING_ON;
2812 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2813 flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002814 queue_work(smux_tx_wq, &smux_wakeup_work);
2815 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002816 /* no activity -- stay asleep */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002817 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2818 flags);
2819 }
2820 break;
2821 }
2822
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002823 /* process any pending power packets */
2824 if (!list_empty(&smux.power_queue)) {
2825 pkt = list_first_entry(&smux.power_queue,
2826 struct smux_pkt_t, list);
2827 list_del(&pkt->list);
2828 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2829
Eric Holmberga9b06472012-06-22 09:46:34 -06002830 /* Adjust power state if this is a flush command */
2831 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2832 if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH &&
2833 pkt->hdr.cmd == SMUX_CMD_PWR_CTL) {
2834 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK ||
2835 smux.power_ctl_remote_req_received) {
2836 /*
2837 * Sending remote power-down request ACK
2838 * or sending local power-down request
2839 * and we already received a remote
2840 * power-down request.
2841 */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302842 SMUX_PWR(
2843 "smux: %s: Power %d->%d\n", __func__,
Eric Holmberga9b06472012-06-22 09:46:34 -06002844 smux.power_state,
2845 SMUX_PWR_OFF_FLUSH);
2846 smux.power_state = SMUX_PWR_OFF_FLUSH;
2847 smux.power_ctl_remote_req_received = 0;
2848 queue_work(smux_tx_wq,
2849 &smux_inactivity_work);
2850 } else {
2851 /* sending local power-down request */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302852 SMUX_PWR(
2853 "smux: %s: Power %d->%d\n", __func__,
Eric Holmberga9b06472012-06-22 09:46:34 -06002854 smux.power_state,
2855 SMUX_PWR_TURNING_OFF);
2856 smux.power_state = SMUX_PWR_TURNING_OFF;
2857 }
2858 }
2859 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2860
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002861 /* send the packet */
Eric Holmberga9b06472012-06-22 09:46:34 -06002862 smux_uart_power_on();
2863 smux.tx_activity_flag = 1;
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06002864 SMUX_PWR_PKT_TX(pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002865 if (!smux_byte_loopback) {
2866 smux_tx_tty(pkt);
2867 smux_flush_tty();
2868 } else {
2869 smux_tx_loopback(pkt);
2870 }
2871
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002872 smux_free_pkt(pkt);
2873 continue;
2874 }
2875
2876 /* get the next ready channel */
2877 if (list_empty(&smux.lch_tx_ready_list)) {
2878 /* no ready channels */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302879 SMUX_DBG("smux: %s: no more ready channels, exiting\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002880 __func__);
2881 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2882 break;
2883 }
2884 smux.tx_activity_flag = 1;
2885
2886 if (smux.power_state != SMUX_PWR_ON) {
2887 /* channel not ready to transmit */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302888 SMUX_DBG("smux: %s: waiting for link up (state %d)\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002889 __func__,
2890 smux.power_state);
2891 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2892 break;
2893 }
2894
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002895 /* get the next packet to send and rotate channel list */
2896 ch = list_first_entry(&smux.lch_tx_ready_list,
2897 struct smux_lch_t,
2898 tx_ready_list);
2899
2900 spin_lock(&ch->state_lock_lhb1);
2901 spin_lock(&ch->tx_lock_lhb2);
2902 if (!list_empty(&ch->tx_queue)) {
2903 /*
2904 * If remote TX flow control is enabled or
2905 * the channel is not fully opened, then only
2906 * send command packets.
2907 */
2908 if (ch->tx_flow_control || !IS_FULLY_OPENED(ch)) {
2909 struct smux_pkt_t *curr;
2910 list_for_each_entry(curr, &ch->tx_queue, list) {
2911 if (curr->hdr.cmd != SMUX_CMD_DATA) {
2912 pkt = curr;
2913 break;
2914 }
2915 }
2916 } else {
2917 /* get next cmd/data packet to send */
2918 pkt = list_first_entry(&ch->tx_queue,
2919 struct smux_pkt_t, list);
2920 }
2921 }
2922
2923 if (pkt) {
2924 list_del(&pkt->list);
2925
2926 /* update packet stats */
2927 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2928 --ch->tx_pending_data_cnt;
2929 if (ch->notify_lwm &&
2930 ch->tx_pending_data_cnt
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002931 <= SMUX_TX_WM_LOW) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002932 ch->notify_lwm = 0;
2933 low_wm_notif = 1;
2934 }
2935 }
2936
2937 /* advance to the next ready channel */
2938 list_rotate_left(&smux.lch_tx_ready_list);
2939 } else {
2940 /* no data in channel to send, remove from ready list */
2941 list_del(&ch->tx_ready_list);
2942 INIT_LIST_HEAD(&ch->tx_ready_list);
2943 }
2944 lcid = ch->lcid;
2945 spin_unlock(&ch->tx_lock_lhb2);
2946 spin_unlock(&ch->state_lock_lhb1);
2947 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2948
2949 if (low_wm_notif)
2950 schedule_notify(lcid, SMUX_LOW_WM_HIT, NULL);
2951
2952 /* send the packet */
2953 smux_tx_pkt(ch, pkt);
2954 smux_free_pkt(pkt);
2955 }
2956}
2957
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002958/**
2959 * Update the RX flow control (sent in the TIOCM Status command).
2960 *
2961 * @ch Channel for update
2962 *
2963 * @returns 1 for updated, 0 for not updated
2964 *
2965 * Must be called with ch->state_lock_lhb1 locked.
2966 */
2967static int smux_rx_flow_control_updated(struct smux_lch_t *ch)
2968{
2969 int updated = 0;
2970 int prev_state;
2971
2972 prev_state = ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL;
2973
2974 if (ch->rx_flow_control_client || ch->rx_flow_control_auto)
2975 ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
2976 else
2977 ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
2978
2979 if (prev_state != (ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL)) {
2980 smux_send_status_cmd(ch);
2981 updated = 1;
2982 }
2983
2984 return updated;
2985}
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002986
Eric Holmberg06011322012-07-06 18:17:03 -06002987/**
2988 * Flush all SMUX workqueues.
2989 *
2990 * This sets the reset bit to abort any processing loops and then
2991 * flushes the workqueues to ensure that no new pending work is
2992 * running. Do not call with any locks used by workers held as
2993 * this will result in a deadlock.
2994 */
2995static void smux_flush_workqueues(void)
2996{
2997 smux.in_reset = 1;
2998
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302999 SMUX_DBG("smux: %s: flushing tx wq\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06003000 flush_workqueue(smux_tx_wq);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303001 SMUX_DBG("smux: %s: flushing rx wq\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06003002 flush_workqueue(smux_rx_wq);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303003 SMUX_DBG("smux: %s: flushing notify wq\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06003004 flush_workqueue(smux_notify_wq);
3005}
3006
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003007/**********************************************************************/
3008/* Kernel API */
3009/**********************************************************************/
3010
3011/**
3012 * Set or clear channel option using the SMUX_CH_OPTION_* channel
3013 * flags.
3014 *
3015 * @lcid Logical channel ID
3016 * @set Options to set
3017 * @clear Options to clear
3018 *
3019 * @returns 0 for success, < 0 for failure
3020 */
3021int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear)
3022{
3023 unsigned long flags;
3024 struct smux_lch_t *ch;
3025 int tx_ready = 0;
3026 int ret = 0;
3027
3028 if (smux_assert_lch_id(lcid))
3029 return -ENXIO;
3030
3031 ch = &smux_lch[lcid];
3032 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3033
3034 /* Local loopback mode */
3035 if (set & SMUX_CH_OPTION_LOCAL_LOOPBACK)
3036 ch->local_mode = SMUX_LCH_MODE_LOCAL_LOOPBACK;
3037
3038 if (clear & SMUX_CH_OPTION_LOCAL_LOOPBACK)
3039 ch->local_mode = SMUX_LCH_MODE_NORMAL;
3040
3041 /* Remote loopback mode */
3042 if (set & SMUX_CH_OPTION_REMOTE_LOOPBACK)
3043 ch->local_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
3044
3045 if (clear & SMUX_CH_OPTION_REMOTE_LOOPBACK)
3046 ch->local_mode = SMUX_LCH_MODE_NORMAL;
3047
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003048 /* RX Flow control */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003049 if (set & SMUX_CH_OPTION_REMOTE_TX_STOP) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003050 ch->rx_flow_control_client = 1;
3051 tx_ready |= smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003052 }
3053
3054 if (clear & SMUX_CH_OPTION_REMOTE_TX_STOP) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003055 ch->rx_flow_control_client = 0;
3056 tx_ready |= smux_rx_flow_control_updated(ch);
3057 }
3058
3059 /* Auto RX Flow Control */
3060 if (set & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303061 SMUX_DBG("smux: %s: auto rx flow control option enabled\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003062 __func__);
3063 ch->options |= SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
3064 }
3065
3066 if (clear & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303067 SMUX_DBG("smux: %s: auto rx flow control option disabled\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003068 __func__);
3069 ch->options &= ~SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
3070 ch->rx_flow_control_auto = 0;
3071 tx_ready |= smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003072 }
3073
3074 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3075
3076 if (tx_ready)
3077 list_channel(ch);
3078
3079 return ret;
3080}
3081
3082/**
3083 * Starts the opening sequence for a logical channel.
3084 *
3085 * @lcid Logical channel ID
3086 * @priv Free for client usage
3087 * @notify Event notification function
3088 * @get_rx_buffer Function used to provide a receive buffer to SMUX
3089 *
3090 * @returns 0 for success, <0 otherwise
3091 *
3092 * A channel must be fully closed (either not previously opened or
3093 * msm_smux_close() has been called and the SMUX_DISCONNECTED has been
3094 * received.
3095 *
3096 * One the remote side is opened, the client will receive a SMUX_CONNECTED
3097 * event.
3098 */
3099int msm_smux_open(uint8_t lcid, void *priv,
3100 void (*notify)(void *priv, int event_type, const void *metadata),
3101 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
3102 int size))
3103{
3104 int ret;
3105 struct smux_lch_t *ch;
3106 struct smux_pkt_t *pkt;
3107 int tx_ready = 0;
3108 unsigned long flags;
3109
3110 if (smux_assert_lch_id(lcid))
3111 return -ENXIO;
3112
3113 ch = &smux_lch[lcid];
3114 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3115
3116 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
3117 ret = -EAGAIN;
3118 goto out;
3119 }
3120
3121 if (ch->local_state != SMUX_LCH_LOCAL_CLOSED) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003122 SMUX_ERR("%s: open lcid %d local state %x invalid\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003123 __func__, lcid, ch->local_state);
3124 ret = -EINVAL;
3125 goto out;
3126 }
3127
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303128 SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003129 ch->local_state,
3130 SMUX_LCH_LOCAL_OPENING);
3131
Eric Holmberg06011322012-07-06 18:17:03 -06003132 ch->rx_flow_control_auto = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003133 ch->local_state = SMUX_LCH_LOCAL_OPENING;
3134
3135 ch->priv = priv;
3136 ch->notify = notify;
3137 ch->get_rx_buffer = get_rx_buffer;
3138 ret = 0;
3139
3140 /* Send Open Command */
3141 pkt = smux_alloc_pkt();
3142 if (!pkt) {
3143 ret = -ENOMEM;
3144 goto out;
3145 }
3146 pkt->hdr.magic = SMUX_MAGIC;
3147 pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
3148 pkt->hdr.flags = SMUX_CMD_OPEN_POWER_COLLAPSE;
3149 if (ch->local_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK)
3150 pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
3151 pkt->hdr.lcid = lcid;
3152 pkt->hdr.payload_len = 0;
3153 pkt->hdr.pad_len = 0;
3154 smux_tx_queue(pkt, ch, 0);
3155 tx_ready = 1;
3156
3157out:
3158 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg06011322012-07-06 18:17:03 -06003159 smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003160 if (tx_ready)
3161 list_channel(ch);
3162 return ret;
3163}
3164
3165/**
3166 * Starts the closing sequence for a logical channel.
3167 *
3168 * @lcid Logical channel ID
3169 *
3170 * @returns 0 for success, <0 otherwise
3171 *
3172 * Once the close event has been acknowledge by the remote side, the client
3173 * will receive a SMUX_DISCONNECTED notification.
3174 */
3175int msm_smux_close(uint8_t lcid)
3176{
3177 int ret = 0;
3178 struct smux_lch_t *ch;
3179 struct smux_pkt_t *pkt;
3180 int tx_ready = 0;
3181 unsigned long flags;
3182
3183 if (smux_assert_lch_id(lcid))
3184 return -ENXIO;
3185
3186 ch = &smux_lch[lcid];
3187 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3188 ch->local_tiocm = 0x0;
3189 ch->remote_tiocm = 0x0;
3190 ch->tx_pending_data_cnt = 0;
3191 ch->notify_lwm = 0;
Eric Holmbergeee5d5a2012-08-13 14:45:27 -06003192 ch->tx_flow_control = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003193
3194 /* Purge TX queue */
3195 spin_lock(&ch->tx_lock_lhb2);
Eric Holmberg0e914082012-07-11 11:46:28 -06003196 smux_purge_ch_tx_queue(ch, 0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003197 spin_unlock(&ch->tx_lock_lhb2);
3198
3199 /* Send Close Command */
3200 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
3201 ch->local_state == SMUX_LCH_LOCAL_OPENING) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303202 SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003203 ch->local_state,
3204 SMUX_LCH_LOCAL_CLOSING);
3205
3206 ch->local_state = SMUX_LCH_LOCAL_CLOSING;
3207 pkt = smux_alloc_pkt();
3208 if (pkt) {
3209 pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
3210 pkt->hdr.flags = 0;
3211 pkt->hdr.lcid = lcid;
3212 pkt->hdr.payload_len = 0;
3213 pkt->hdr.pad_len = 0;
3214 smux_tx_queue(pkt, ch, 0);
3215 tx_ready = 1;
3216 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003217 SMUX_ERR("%s: pkt allocation failed\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003218 ret = -ENOMEM;
3219 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06003220
3221 /* Purge RX retry queue */
3222 if (ch->rx_retry_queue_cnt)
3223 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003224 }
3225 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3226
3227 if (tx_ready)
3228 list_channel(ch);
3229
3230 return ret;
3231}
3232
3233/**
3234 * Write data to a logical channel.
3235 *
3236 * @lcid Logical channel ID
3237 * @pkt_priv Client data that will be returned with the SMUX_WRITE_DONE or
3238 * SMUX_WRITE_FAIL notification.
3239 * @data Data to write
3240 * @len Length of @data
3241 *
3242 * @returns 0 for success, <0 otherwise
3243 *
3244 * Data may be written immediately after msm_smux_open() is called,
3245 * but the data will wait in the transmit queue until the channel has
3246 * been fully opened.
3247 *
3248 * Once the data has been written, the client will receive either a completion
3249 * (SMUX_WRITE_DONE) or a failure notice (SMUX_WRITE_FAIL).
3250 */
3251int msm_smux_write(uint8_t lcid, void *pkt_priv, const void *data, int len)
3252{
3253 struct smux_lch_t *ch;
3254 struct smux_pkt_t *pkt;
3255 int tx_ready = 0;
3256 unsigned long flags;
3257 int ret;
3258
3259 if (smux_assert_lch_id(lcid))
3260 return -ENXIO;
3261
3262 ch = &smux_lch[lcid];
3263 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3264
3265 if (ch->local_state != SMUX_LCH_LOCAL_OPENED &&
3266 ch->local_state != SMUX_LCH_LOCAL_OPENING) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003267 SMUX_ERR("%s: hdr.invalid local state %d channel %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003268 __func__, ch->local_state, lcid);
3269 ret = -EINVAL;
3270 goto out;
3271 }
3272
3273 if (len > SMUX_MAX_PKT_SIZE - sizeof(struct smux_hdr_t)) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003274 SMUX_ERR("%s: payload %d too large\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003275 __func__, len);
3276 ret = -E2BIG;
3277 goto out;
3278 }
3279
3280 pkt = smux_alloc_pkt();
3281 if (!pkt) {
3282 ret = -ENOMEM;
3283 goto out;
3284 }
3285
3286 pkt->hdr.cmd = SMUX_CMD_DATA;
3287 pkt->hdr.lcid = lcid;
3288 pkt->hdr.flags = 0;
3289 pkt->hdr.payload_len = len;
3290 pkt->payload = (void *)data;
3291 pkt->priv = pkt_priv;
3292 pkt->hdr.pad_len = 0;
3293
3294 spin_lock(&ch->tx_lock_lhb2);
3295 /* verify high watermark */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303296 SMUX_DBG("smux: %s: pending %d", __func__, ch->tx_pending_data_cnt);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003297
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003298 if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003299 SMUX_ERR("%s: ch %d high watermark %d exceeded %d\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003300 __func__, lcid, SMUX_TX_WM_HIGH,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003301 ch->tx_pending_data_cnt);
3302 ret = -EAGAIN;
3303 goto out_inner;
3304 }
3305
3306 /* queue packet for transmit */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003307 if (++ch->tx_pending_data_cnt == SMUX_TX_WM_HIGH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003308 ch->notify_lwm = 1;
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003309 SMUX_ERR("%s: high watermark hit\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003310 schedule_notify(lcid, SMUX_HIGH_WM_HIT, NULL);
3311 }
3312 list_add_tail(&pkt->list, &ch->tx_queue);
3313
3314 /* add to ready list */
3315 if (IS_FULLY_OPENED(ch))
3316 tx_ready = 1;
3317
3318 ret = 0;
3319
3320out_inner:
3321 spin_unlock(&ch->tx_lock_lhb2);
3322
3323out:
3324 if (ret)
3325 smux_free_pkt(pkt);
3326 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3327
3328 if (tx_ready)
3329 list_channel(ch);
3330
3331 return ret;
3332}
3333
3334/**
3335 * Returns true if the TX queue is currently full (high water mark).
3336 *
3337 * @lcid Logical channel ID
3338 * @returns 0 if channel is not full
3339 * 1 if it is full
3340 * < 0 for error
3341 */
3342int msm_smux_is_ch_full(uint8_t lcid)
3343{
3344 struct smux_lch_t *ch;
3345 unsigned long flags;
3346 int is_full = 0;
3347
3348 if (smux_assert_lch_id(lcid))
3349 return -ENXIO;
3350
3351 ch = &smux_lch[lcid];
3352
3353 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003354 if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003355 is_full = 1;
3356 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
3357
3358 return is_full;
3359}
3360
3361/**
3362 * Returns true if the TX queue has space for more packets it is at or
3363 * below the low water mark).
3364 *
3365 * @lcid Logical channel ID
3366 * @returns 0 if channel is above low watermark
3367 * 1 if it's at or below the low watermark
3368 * < 0 for error
3369 */
3370int msm_smux_is_ch_low(uint8_t lcid)
3371{
3372 struct smux_lch_t *ch;
3373 unsigned long flags;
3374 int is_low = 0;
3375
3376 if (smux_assert_lch_id(lcid))
3377 return -ENXIO;
3378
3379 ch = &smux_lch[lcid];
3380
3381 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003382 if (ch->tx_pending_data_cnt <= SMUX_TX_WM_LOW)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003383 is_low = 1;
3384 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
3385
3386 return is_low;
3387}
3388
3389/**
3390 * Send TIOCM status update.
3391 *
3392 * @ch Channel for update
3393 *
3394 * @returns 0 for success, <0 for failure
3395 *
3396 * Channel lock must be held before calling.
3397 */
3398static int smux_send_status_cmd(struct smux_lch_t *ch)
3399{
3400 struct smux_pkt_t *pkt;
3401
3402 if (!ch)
3403 return -EINVAL;
3404
3405 pkt = smux_alloc_pkt();
3406 if (!pkt)
3407 return -ENOMEM;
3408
3409 pkt->hdr.lcid = ch->lcid;
3410 pkt->hdr.cmd = SMUX_CMD_STATUS;
3411 pkt->hdr.flags = ch->local_tiocm;
3412 pkt->hdr.payload_len = 0;
3413 pkt->hdr.pad_len = 0;
3414 smux_tx_queue(pkt, ch, 0);
3415
3416 return 0;
3417}
3418
3419/**
3420 * Internal helper function for getting the TIOCM status with
3421 * state_lock_lhb1 already locked.
3422 *
3423 * @ch Channel pointer
3424 *
3425 * @returns TIOCM status
3426 */
Eric Holmberg9d890672012-06-13 17:58:13 -06003427long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003428{
3429 long status = 0x0;
3430
3431 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DSR : 0;
3432 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_CTS : 0;
3433 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RI) ? TIOCM_RI : 0;
3434 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_DCD) ? TIOCM_CD : 0;
3435
3436 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DTR : 0;
3437 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_RTS : 0;
3438
3439 return status;
3440}
3441
3442/**
3443 * Get the TIOCM status bits.
3444 *
3445 * @lcid Logical channel ID
3446 *
3447 * @returns >= 0 TIOCM status bits
3448 * < 0 Error condition
3449 */
3450long msm_smux_tiocm_get(uint8_t lcid)
3451{
3452 struct smux_lch_t *ch;
3453 unsigned long flags;
3454 long status = 0x0;
3455
3456 if (smux_assert_lch_id(lcid))
3457 return -ENXIO;
3458
3459 ch = &smux_lch[lcid];
3460 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3461 status = msm_smux_tiocm_get_atomic(ch);
3462 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3463
3464 return status;
3465}
3466
3467/**
3468 * Set/clear the TIOCM status bits.
3469 *
3470 * @lcid Logical channel ID
3471 * @set Bits to set
3472 * @clear Bits to clear
3473 *
3474 * @returns 0 for success; < 0 for failure
3475 *
3476 * If a bit is specified in both the @set and @clear masks, then the clear bit
3477 * definition will dominate and the bit will be cleared.
3478 */
3479int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear)
3480{
3481 struct smux_lch_t *ch;
3482 unsigned long flags;
3483 uint8_t old_status;
3484 uint8_t status_set = 0x0;
3485 uint8_t status_clear = 0x0;
3486 int tx_ready = 0;
3487 int ret = 0;
3488
3489 if (smux_assert_lch_id(lcid))
3490 return -ENXIO;
3491
3492 ch = &smux_lch[lcid];
3493 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3494
3495 status_set |= (set & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3496 status_set |= (set & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3497 status_set |= (set & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3498 status_set |= (set & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3499
3500 status_clear |= (clear & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3501 status_clear |= (clear & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3502 status_clear |= (clear & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3503 status_clear |= (clear & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3504
3505 old_status = ch->local_tiocm;
3506 ch->local_tiocm |= status_set;
3507 ch->local_tiocm &= ~status_clear;
3508
3509 if (ch->local_tiocm != old_status) {
3510 ret = smux_send_status_cmd(ch);
3511 tx_ready = 1;
3512 }
3513 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3514
3515 if (tx_ready)
3516 list_channel(ch);
3517
3518 return ret;
3519}
3520
3521/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003522/* Subsystem Restart */
3523/**********************************************************************/
3524static struct notifier_block ssr_notifier = {
3525 .notifier_call = ssr_notifier_cb,
3526};
3527
3528/**
3529 * Handle Subsystem Restart (SSR) notifications.
3530 *
3531 * @this Pointer to ssr_notifier
3532 * @code SSR Code
3533 * @data Data pointer (not used)
3534 */
3535static int ssr_notifier_cb(struct notifier_block *this,
3536 unsigned long code,
3537 void *data)
3538{
3539 unsigned long flags;
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003540 int i;
3541 int tmp;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003542 int power_off_uart = 0;
3543
Eric Holmbergd2697902012-06-15 09:58:46 -06003544 if (code == SUBSYS_BEFORE_SHUTDOWN) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303545 SMUX_DBG("smux: %s: ssr - before shutdown\n", __func__);
Eric Holmbergd2697902012-06-15 09:58:46 -06003546 mutex_lock(&smux.mutex_lha0);
3547 smux.in_reset = 1;
Eric Holmberg0a81e8f2012-08-28 13:51:14 -06003548 smux.remote_is_alive = 0;
Eric Holmbergd2697902012-06-15 09:58:46 -06003549 mutex_unlock(&smux.mutex_lha0);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003550 return NOTIFY_DONE;
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003551 } else if (code == SUBSYS_AFTER_POWERUP) {
3552 /* re-register platform devices */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303553 SMUX_DBG("smux: %s: ssr - after power-up\n", __func__);
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003554 mutex_lock(&smux.mutex_lha0);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003555 if (smux.ld_open_count > 0
3556 && !smux.platform_devs_registered) {
3557 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303558 SMUX_DBG("smux: %s: register pdev '%s'\n",
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003559 __func__, smux_devs[i].name);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003560 smux_devs[i].dev.release = smux_pdev_release;
3561 tmp = platform_device_register(&smux_devs[i]);
3562 if (tmp)
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003563 SMUX_ERR(
3564 "%s: error %d registering device %s\n",
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003565 __func__, tmp, smux_devs[i].name);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003566 }
3567 smux.platform_devs_registered = 1;
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003568 }
3569 mutex_unlock(&smux.mutex_lha0);
3570 return NOTIFY_DONE;
Eric Holmbergd2697902012-06-15 09:58:46 -06003571 } else if (code != SUBSYS_AFTER_SHUTDOWN) {
3572 return NOTIFY_DONE;
3573 }
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303574 SMUX_DBG("smux: %s: ssr - after shutdown\n", __func__);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003575
3576 /* Cleanup channels */
Eric Holmberg06011322012-07-06 18:17:03 -06003577 smux_flush_workqueues();
Eric Holmbergd2697902012-06-15 09:58:46 -06003578 mutex_lock(&smux.mutex_lha0);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003579 if (smux.ld_open_count > 0) {
3580 smux_lch_purge();
3581 if (smux.tty)
3582 tty_driver_flush_buffer(smux.tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003583
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003584 /* Unregister platform devices */
3585 if (smux.platform_devs_registered) {
3586 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303587 SMUX_DBG("smux: %s: unregister pdev '%s'\n",
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003588 __func__, smux_devs[i].name);
3589 platform_device_unregister(&smux_devs[i]);
3590 }
3591 smux.platform_devs_registered = 0;
3592 }
3593
3594 /* Power-down UART */
3595 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
3596 if (smux.power_state != SMUX_PWR_OFF) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303597 SMUX_PWR("smux: %s: SSR - turning off UART\n",
3598 __func__);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003599 smux.power_state = SMUX_PWR_OFF;
3600 power_off_uart = 1;
3601 }
3602 smux.powerdown_enabled = 0;
3603 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3604
3605 if (power_off_uart)
3606 smux_uart_power_off_atomic();
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003607 }
Eric Holmberg06011322012-07-06 18:17:03 -06003608 smux.tx_activity_flag = 0;
3609 smux.rx_activity_flag = 0;
3610 smux.rx_state = SMUX_RX_IDLE;
Eric Holmbergd2697902012-06-15 09:58:46 -06003611 smux.in_reset = 0;
Eric Holmberg0a81e8f2012-08-28 13:51:14 -06003612 smux.remote_is_alive = 0;
Eric Holmbergd2697902012-06-15 09:58:46 -06003613 mutex_unlock(&smux.mutex_lha0);
3614
Eric Holmberged1f00c2012-06-07 09:45:18 -06003615 return NOTIFY_DONE;
3616}
3617
3618/**********************************************************************/
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003619/* Line Discipline Interface */
3620/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003621static void smux_pdev_release(struct device *dev)
3622{
3623 struct platform_device *pdev;
3624
3625 pdev = container_of(dev, struct platform_device, dev);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303626 SMUX_DBG("smux: %s: releasing pdev %p '%s'\n",
3627 __func__, pdev, pdev->name);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003628 memset(&pdev->dev, 0x0, sizeof(pdev->dev));
3629}
3630
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003631static int smuxld_open(struct tty_struct *tty)
3632{
3633 int i;
3634 int tmp;
3635 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003636
3637 if (!smux.is_initialized)
3638 return -ENODEV;
3639
Eric Holmberged1f00c2012-06-07 09:45:18 -06003640 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003641 if (smux.ld_open_count) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003642 SMUX_ERR("%s: %p multiple instances not supported\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003643 __func__, tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003644 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003645 return -EEXIST;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003646 }
3647
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003648 if (tty->ops->write == NULL) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003649 SMUX_ERR("%s: tty->ops->write already NULL\n", __func__);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003650 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003651 return -EINVAL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003652 }
3653
3654 /* connect to TTY */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003655 ++smux.ld_open_count;
3656 smux.in_reset = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003657 smux.tty = tty;
3658 tty->disc_data = &smux;
3659 tty->receive_room = TTY_RECEIVE_ROOM;
3660 tty_driver_flush_buffer(tty);
3661
3662 /* power-down the UART if we are idle */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003663 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003664 if (smux.power_state == SMUX_PWR_OFF) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303665 SMUX_PWR("smux: %s: powering off uart\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003666 smux.power_state = SMUX_PWR_OFF_FLUSH;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003667 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003668 queue_work(smux_tx_wq, &smux_inactivity_work);
3669 } else {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003670 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003671 }
3672
3673 /* register platform devices */
3674 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303675 SMUX_DBG("smux: %s: register pdev '%s'\n",
Eric Holmberged1f00c2012-06-07 09:45:18 -06003676 __func__, smux_devs[i].name);
3677 smux_devs[i].dev.release = smux_pdev_release;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003678 tmp = platform_device_register(&smux_devs[i]);
3679 if (tmp)
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003680 SMUX_ERR("%s: error %d registering device %s\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003681 __func__, tmp, smux_devs[i].name);
3682 }
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003683 smux.platform_devs_registered = 1;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003684 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003685 return 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003686}
3687
3688static void smuxld_close(struct tty_struct *tty)
3689{
3690 unsigned long flags;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003691 int power_up_uart = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003692 int i;
3693
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303694 SMUX_DBG("smux: %s: ldisc unload\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06003695 smux_flush_workqueues();
3696
Eric Holmberged1f00c2012-06-07 09:45:18 -06003697 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003698 if (smux.ld_open_count <= 0) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003699 SMUX_ERR("%s: invalid ld count %d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003700 smux.ld_open_count);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003701 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003702 return;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003703 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003704 --smux.ld_open_count;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003705
3706 /* Cleanup channels */
3707 smux_lch_purge();
3708
3709 /* Unregister platform devices */
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003710 if (smux.platform_devs_registered) {
3711 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303712 SMUX_DBG("smux: %s: unregister pdev '%s'\n",
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003713 __func__, smux_devs[i].name);
3714 platform_device_unregister(&smux_devs[i]);
3715 }
3716 smux.platform_devs_registered = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003717 }
3718
3719 /* Schedule UART power-up if it's down */
3720 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003721 if (smux.power_state == SMUX_PWR_OFF)
Eric Holmberged1f00c2012-06-07 09:45:18 -06003722 power_up_uart = 1;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003723 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergd2697902012-06-15 09:58:46 -06003724 smux.powerdown_enabled = 0;
Eric Holmberg06011322012-07-06 18:17:03 -06003725 smux.tx_activity_flag = 0;
3726 smux.rx_activity_flag = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003727 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3728
3729 if (power_up_uart)
Eric Holmberg92a67df2012-06-25 13:56:24 -06003730 smux_uart_power_on_atomic();
Eric Holmberged1f00c2012-06-07 09:45:18 -06003731
Eric Holmberg06011322012-07-06 18:17:03 -06003732 smux.rx_state = SMUX_RX_IDLE;
3733
Eric Holmberged1f00c2012-06-07 09:45:18 -06003734 /* Disconnect from TTY */
3735 smux.tty = NULL;
Eric Holmberg0a81e8f2012-08-28 13:51:14 -06003736 smux.remote_is_alive = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003737 mutex_unlock(&smux.mutex_lha0);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303738 SMUX_DBG("smux: %s: ldisc complete\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003739}
3740
3741/**
3742 * Receive data from TTY Line Discipline.
3743 *
3744 * @tty TTY structure
3745 * @cp Character data
3746 * @fp Flag data
3747 * @count Size of character and flag data
3748 */
3749void smuxld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
3750 char *fp, int count)
3751{
3752 int i;
3753 int last_idx = 0;
3754 const char *tty_name = NULL;
3755 char *f;
3756
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003757 /* verify error flags */
3758 for (i = 0, f = fp; i < count; ++i, ++f) {
3759 if (*f != TTY_NORMAL) {
3760 if (tty)
3761 tty_name = tty->name;
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003762 SMUX_ERR("%s: TTY %s Error %d (%s)\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003763 tty_name, *f, tty_flag_to_str(*f));
3764
3765 /* feed all previous valid data to the parser */
3766 smux_rx_state_machine(cp + last_idx, i - last_idx,
3767 TTY_NORMAL);
3768
3769 /* feed bad data to parser */
3770 smux_rx_state_machine(cp + i, 1, *f);
3771 last_idx = i + 1;
3772 }
3773 }
3774
3775 /* feed data to RX state machine */
3776 smux_rx_state_machine(cp + last_idx, count - last_idx, TTY_NORMAL);
3777}
3778
3779static void smuxld_flush_buffer(struct tty_struct *tty)
3780{
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003781 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003782}
3783
3784static ssize_t smuxld_chars_in_buffer(struct tty_struct *tty)
3785{
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003786 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003787 return -ENODEV;
3788}
3789
3790static ssize_t smuxld_read(struct tty_struct *tty, struct file *file,
3791 unsigned char __user *buf, size_t nr)
3792{
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003793 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003794 return -ENODEV;
3795}
3796
3797static ssize_t smuxld_write(struct tty_struct *tty, struct file *file,
3798 const unsigned char *buf, size_t nr)
3799{
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003800 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003801 return -ENODEV;
3802}
3803
3804static int smuxld_ioctl(struct tty_struct *tty, struct file *file,
3805 unsigned int cmd, unsigned long arg)
3806{
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003807 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003808 return -ENODEV;
3809}
3810
3811static unsigned int smuxld_poll(struct tty_struct *tty, struct file *file,
3812 struct poll_table_struct *tbl)
3813{
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003814 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003815 return -ENODEV;
3816}
3817
3818static void smuxld_write_wakeup(struct tty_struct *tty)
3819{
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003820 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003821}
3822
3823static struct tty_ldisc_ops smux_ldisc_ops = {
3824 .owner = THIS_MODULE,
3825 .magic = TTY_LDISC_MAGIC,
3826 .name = "n_smux",
3827 .open = smuxld_open,
3828 .close = smuxld_close,
3829 .flush_buffer = smuxld_flush_buffer,
3830 .chars_in_buffer = smuxld_chars_in_buffer,
3831 .read = smuxld_read,
3832 .write = smuxld_write,
3833 .ioctl = smuxld_ioctl,
3834 .poll = smuxld_poll,
3835 .receive_buf = smuxld_receive_buf,
3836 .write_wakeup = smuxld_write_wakeup
3837};
3838
3839static int __init smux_init(void)
3840{
3841 int ret;
3842
Eric Holmberged1f00c2012-06-07 09:45:18 -06003843 mutex_init(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003844
3845 spin_lock_init(&smux.rx_lock_lha1);
3846 smux.rx_state = SMUX_RX_IDLE;
3847 smux.power_state = SMUX_PWR_OFF;
3848 smux.pwr_wakeup_delay_us = 1;
3849 smux.powerdown_enabled = 0;
Eric Holmberga9b06472012-06-22 09:46:34 -06003850 smux.power_ctl_remote_req_received = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003851 INIT_LIST_HEAD(&smux.power_queue);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003852 smux.rx_activity_flag = 0;
3853 smux.tx_activity_flag = 0;
3854 smux.recv_len = 0;
3855 smux.tty = NULL;
3856 smux.ld_open_count = 0;
3857 smux.in_reset = 0;
Eric Holmberg0a81e8f2012-08-28 13:51:14 -06003858 smux.remote_is_alive = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003859 smux.is_initialized = 1;
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003860 smux.platform_devs_registered = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003861 smux_byte_loopback = 0;
3862
3863 spin_lock_init(&smux.tx_lock_lha2);
3864 INIT_LIST_HEAD(&smux.lch_tx_ready_list);
3865
3866 ret = tty_register_ldisc(N_SMUX, &smux_ldisc_ops);
3867 if (ret != 0) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003868 SMUX_ERR("%s: error %d registering line discipline\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003869 __func__, ret);
3870 return ret;
3871 }
3872
Eric Holmberg6c9f2a52012-06-14 10:49:04 -06003873 subsys_notif_register_notifier("external_modem", &ssr_notifier);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003874
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003875 ret = lch_init();
3876 if (ret != 0) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003877 SMUX_ERR("%s: lch_init failed\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003878 return ret;
3879 }
3880
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303881 log_ctx = ipc_log_context_create(1, "smux");
3882 if (!log_ctx) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003883 SMUX_ERR("%s: unable to create log context\n", __func__);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303884 disable_ipc_logging = 1;
3885 }
3886
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003887 return 0;
3888}
3889
3890static void __exit smux_exit(void)
3891{
3892 int ret;
3893
3894 ret = tty_unregister_ldisc(N_SMUX);
3895 if (ret != 0) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003896 SMUX_ERR("%s error %d unregistering line discipline\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003897 __func__, ret);
3898 return;
3899 }
3900}
3901
3902module_init(smux_init);
3903module_exit(smux_exit);
3904
3905MODULE_DESCRIPTION("Serial Mux TTY Line Discipline");
3906MODULE_LICENSE("GPL v2");
3907MODULE_ALIAS_LDISC(N_SMUX);