blob: 98d5f3f0626ed30d0df703cfe76ece20e55a9a2f [file] [log] [blame]
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001/* drivers/tty/n_smux.c
2 *
Eric Holmberg371b4622013-05-21 18:04:50 -06003 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06004 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/errno.h>
18#include <linux/tty.h>
19#include <linux/tty_flip.h>
20#include <linux/tty_driver.h>
21#include <linux/smux.h>
22#include <linux/list.h>
23#include <linux/kfifo.h>
24#include <linux/slab.h>
25#include <linux/types.h>
26#include <linux/platform_device.h>
27#include <linux/delay.h>
Eric Holmberged1f00c2012-06-07 09:45:18 -060028#include <mach/subsystem_notif.h>
29#include <mach/subsystem_restart.h>
Eric Holmberg8ed30f22012-05-10 19:16:51 -060030#include <mach/msm_serial_hs.h>
Angshuman Sarkarc2df7392012-07-24 14:50:42 +053031#include <mach/msm_ipc_logging.h>
Eric Holmberg8ed30f22012-05-10 19:16:51 -060032#include "smux_private.h"
33#include "smux_loopback.h"
34
35#define SMUX_NOTIFY_FIFO_SIZE 128
36#define SMUX_TX_QUEUE_SIZE 256
Eric Holmbergacd4c772012-08-30 15:38:11 -060037#define SMUX_PKT_LOG_SIZE 128
Eric Holmberg8ed30f22012-05-10 19:16:51 -060038
39/* Maximum size we can accept in a single RX buffer */
40#define TTY_RECEIVE_ROOM 65536
41#define TTY_BUFFER_FULL_WAIT_MS 50
42
43/* maximum sleep time between wakeup attempts */
44#define SMUX_WAKEUP_DELAY_MAX (1 << 20)
45
46/* minimum delay for scheduling delayed work */
47#define SMUX_WAKEUP_DELAY_MIN (1 << 15)
48
49/* inactivity timeout for no rx/tx activity */
Eric Holmberg05620172012-07-03 11:13:18 -060050#define SMUX_INACTIVITY_TIMEOUT_MS 1000000
Eric Holmberg8ed30f22012-05-10 19:16:51 -060051
Eric Holmbergb8435c82012-06-05 14:51:29 -060052/* RX get_rx_buffer retry timeout values */
53#define SMUX_RX_RETRY_MIN_MS (1 << 0) /* 1 ms */
54#define SMUX_RX_RETRY_MAX_MS (1 << 10) /* 1024 ms */
55
Eric Holmberg8ed30f22012-05-10 19:16:51 -060056enum {
57 MSM_SMUX_DEBUG = 1U << 0,
58 MSM_SMUX_INFO = 1U << 1,
59 MSM_SMUX_POWER_INFO = 1U << 2,
60 MSM_SMUX_PKT = 1U << 3,
61};
62
Angshuman Sarkarc2df7392012-07-24 14:50:42 +053063static int smux_debug_mask = MSM_SMUX_DEBUG | MSM_SMUX_POWER_INFO;
Eric Holmberg8ed30f22012-05-10 19:16:51 -060064module_param_named(debug_mask, smux_debug_mask,
65 int, S_IRUGO | S_IWUSR | S_IWGRP);
66
Angshuman Sarkarc2df7392012-07-24 14:50:42 +053067static int disable_ipc_logging;
68
Eric Holmberg8ed30f22012-05-10 19:16:51 -060069/* Simulated wakeup used for testing */
70int smux_byte_loopback;
71module_param_named(byte_loopback, smux_byte_loopback,
72 int, S_IRUGO | S_IWUSR | S_IWGRP);
73int smux_simulate_wakeup_delay = 1;
74module_param_named(simulate_wakeup_delay, smux_simulate_wakeup_delay,
75 int, S_IRUGO | S_IWUSR | S_IWGRP);
76
Angshuman Sarkarc2df7392012-07-24 14:50:42 +053077#define IPC_LOG_STR(x...) do { \
78 if (!disable_ipc_logging && log_ctx) \
79 ipc_log_string(log_ctx, x); \
80} while (0)
81
Eric Holmberg8ed30f22012-05-10 19:16:51 -060082#define SMUX_DBG(x...) do { \
83 if (smux_debug_mask & MSM_SMUX_DEBUG) \
Angshuman Sarkarc2df7392012-07-24 14:50:42 +053084 IPC_LOG_STR(x); \
Eric Holmberg8ed30f22012-05-10 19:16:51 -060085} while (0)
86
Eric Holmbergd7339a42012-08-21 16:28:12 -060087#define SMUX_ERR(x...) do { \
88 pr_err(x); \
89 IPC_LOG_STR(x); \
90} while (0)
91
Eric Holmbergff0b0112012-06-08 15:06:57 -060092#define SMUX_PWR(x...) do { \
93 if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
Angshuman Sarkarc2df7392012-07-24 14:50:42 +053094 IPC_LOG_STR(x); \
Eric Holmbergff0b0112012-06-08 15:06:57 -060095} while (0)
96
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -060097#define SMUX_PWR_PKT_RX(pkt) do { \
98 if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
99 smux_log_pkt(pkt, 1); \
100} while (0)
101
102#define SMUX_PWR_PKT_TX(pkt) do { \
103 if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
104 if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
105 pkt->hdr.flags == SMUX_WAKEUP_ACK) \
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530106 IPC_LOG_STR("smux: TX Wakeup ACK\n"); \
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -0600107 else if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
108 pkt->hdr.flags == SMUX_WAKEUP_REQ) \
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530109 IPC_LOG_STR("smux: TX Wakeup REQ\n"); \
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -0600110 else \
111 smux_log_pkt(pkt, 0); \
112 } \
113} while (0)
114
115#define SMUX_PWR_BYTE_TX(pkt) do { \
116 if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
117 smux_log_pkt(pkt, 0); \
118 } \
119} while (0)
120
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600121#define SMUX_LOG_PKT_RX(pkt) do { \
122 if (smux_debug_mask & MSM_SMUX_PKT) \
123 smux_log_pkt(pkt, 1); \
124} while (0)
125
126#define SMUX_LOG_PKT_TX(pkt) do { \
127 if (smux_debug_mask & MSM_SMUX_PKT) \
128 smux_log_pkt(pkt, 0); \
129} while (0)
130
131/**
132 * Return true if channel is fully opened (both
133 * local and remote sides are in the OPENED state).
134 */
135#define IS_FULLY_OPENED(ch) \
136 (ch && (ch)->local_state == SMUX_LCH_LOCAL_OPENED \
137 && (ch)->remote_state == SMUX_LCH_REMOTE_OPENED)
138
139static struct platform_device smux_devs[] = {
140 {.name = "SMUX_CTL", .id = -1},
141 {.name = "SMUX_RMNET", .id = -1},
142 {.name = "SMUX_DUN_DATA_HSUART", .id = 0},
143 {.name = "SMUX_RMNET_DATA_HSUART", .id = 1},
144 {.name = "SMUX_RMNET_CTL_HSUART", .id = 0},
145 {.name = "SMUX_DIAG", .id = -1},
146};
147
148enum {
149 SMUX_CMD_STATUS_RTC = 1 << 0,
150 SMUX_CMD_STATUS_RTR = 1 << 1,
151 SMUX_CMD_STATUS_RI = 1 << 2,
152 SMUX_CMD_STATUS_DCD = 1 << 3,
153 SMUX_CMD_STATUS_FLOW_CNTL = 1 << 4,
154};
155
156/* Channel mode */
157enum {
158 SMUX_LCH_MODE_NORMAL,
159 SMUX_LCH_MODE_LOCAL_LOOPBACK,
160 SMUX_LCH_MODE_REMOTE_LOOPBACK,
161};
162
163enum {
164 SMUX_RX_IDLE,
165 SMUX_RX_MAGIC,
166 SMUX_RX_HDR,
167 SMUX_RX_PAYLOAD,
168 SMUX_RX_FAILURE,
169};
170
171/**
172 * Power states.
173 *
174 * The _FLUSH states are internal transitional states and are not part of the
175 * official state machine.
176 */
177enum {
178 SMUX_PWR_OFF,
179 SMUX_PWR_TURNING_ON,
180 SMUX_PWR_ON,
Eric Holmberga9b06472012-06-22 09:46:34 -0600181 SMUX_PWR_TURNING_OFF_FLUSH, /* power-off req/ack in TX queue */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600182 SMUX_PWR_TURNING_OFF,
183 SMUX_PWR_OFF_FLUSH,
184};
185
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600186union notifier_metadata {
187 struct smux_meta_disconnected disconnected;
188 struct smux_meta_read read;
189 struct smux_meta_write write;
190 struct smux_meta_tiocm tiocm;
191};
192
193struct smux_notify_handle {
194 void (*notify)(void *priv, int event_type, const void *metadata);
195 void *priv;
196 int event_type;
197 union notifier_metadata *metadata;
198};
199
200/**
Eric Holmbergb8435c82012-06-05 14:51:29 -0600201 * Get RX Buffer Retry structure.
202 *
203 * This is used for clients that are unable to provide an RX buffer
204 * immediately. This temporary structure will be used to temporarily hold the
205 * data and perform a retry.
206 */
207struct smux_rx_pkt_retry {
208 struct smux_pkt_t *pkt;
209 struct list_head rx_retry_list;
210 unsigned timeout_in_ms;
211};
212
213/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600214 * Receive worker data structure.
215 *
216 * One instance is created for every call to smux_rx_state_machine.
217 */
218struct smux_rx_worker_data {
219 const unsigned char *data;
220 int len;
221 int flag;
222
223 struct work_struct work;
224 struct completion work_complete;
225};
226
227/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600228 * Line discipline and module structure.
229 *
230 * Only one instance since multiple instances of line discipline are not
231 * allowed.
232 */
233struct smux_ldisc_t {
Eric Holmberged1f00c2012-06-07 09:45:18 -0600234 struct mutex mutex_lha0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600235
236 int is_initialized;
Eric Holmberg2bf9c522012-08-09 13:23:21 -0600237 int platform_devs_registered;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600238 int in_reset;
Eric Holmberg0a81e8f2012-08-28 13:51:14 -0600239 int remote_is_alive;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600240 int ld_open_count;
241 struct tty_struct *tty;
242
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600243 /* RX State Machine (singled-threaded access by smux_rx_wq) */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600244 unsigned char recv_buf[SMUX_MAX_PKT_SIZE];
245 unsigned int recv_len;
246 unsigned int pkt_remain;
247 unsigned rx_state;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600248
249 /* RX Activity - accessed by multiple threads */
250 spinlock_t rx_lock_lha1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600251 unsigned rx_activity_flag;
252
253 /* TX / Power */
254 spinlock_t tx_lock_lha2;
255 struct list_head lch_tx_ready_list;
256 unsigned power_state;
257 unsigned pwr_wakeup_delay_us;
258 unsigned tx_activity_flag;
259 unsigned powerdown_enabled;
Eric Holmberga9b06472012-06-22 09:46:34 -0600260 unsigned power_ctl_remote_req_received;
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600261 struct list_head power_queue;
Eric Holmberg371b4622013-05-21 18:04:50 -0600262 unsigned remote_initiated_wakeup_count;
263 unsigned local_initiated_wakeup_count;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600264};
265
266
267/* data structures */
Eric Holmberg9d890672012-06-13 17:58:13 -0600268struct smux_lch_t smux_lch[SMUX_NUM_LOGICAL_CHANNELS];
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600269static struct smux_ldisc_t smux;
270static const char *tty_error_type[] = {
271 [TTY_NORMAL] = "normal",
272 [TTY_OVERRUN] = "overrun",
273 [TTY_BREAK] = "break",
274 [TTY_PARITY] = "parity",
275 [TTY_FRAME] = "framing",
276};
277
Eric Holmberg9d890672012-06-13 17:58:13 -0600278static const char * const smux_cmds[] = {
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600279 [SMUX_CMD_DATA] = "DATA",
280 [SMUX_CMD_OPEN_LCH] = "OPEN",
281 [SMUX_CMD_CLOSE_LCH] = "CLOSE",
282 [SMUX_CMD_STATUS] = "STATUS",
283 [SMUX_CMD_PWR_CTL] = "PWR",
Eric Holmberg371b4622013-05-21 18:04:50 -0600284 [SMUX_CMD_DELAY] = "DELAY",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600285 [SMUX_CMD_BYTE] = "Raw Byte",
286};
287
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530288static const char * const smux_events[] = {
289 [SMUX_CONNECTED] = "CONNECTED" ,
290 [SMUX_DISCONNECTED] = "DISCONNECTED",
291 [SMUX_READ_DONE] = "READ_DONE",
292 [SMUX_READ_FAIL] = "READ_FAIL",
293 [SMUX_WRITE_DONE] = "WRITE_DONE",
294 [SMUX_WRITE_FAIL] = "WRITE_FAIL",
295 [SMUX_TIOCM_UPDATE] = "TIOCM_UPDATE",
296 [SMUX_LOW_WM_HIT] = "LOW_WM_HIT",
297 [SMUX_HIGH_WM_HIT] = "HIGH_WM_HIT",
298 [SMUX_RX_RETRY_HIGH_WM_HIT] = "RX_RETRY_HIGH_WM_HIT",
299 [SMUX_RX_RETRY_LOW_WM_HIT] = "RX_RETRY_LOW_WM_HIT",
300};
301
Eric Holmberg9d890672012-06-13 17:58:13 -0600302static const char * const smux_local_state[] = {
303 [SMUX_LCH_LOCAL_CLOSED] = "CLOSED",
304 [SMUX_LCH_LOCAL_OPENING] = "OPENING",
305 [SMUX_LCH_LOCAL_OPENED] = "OPENED",
306 [SMUX_LCH_LOCAL_CLOSING] = "CLOSING",
307};
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530308
Eric Holmberg9d890672012-06-13 17:58:13 -0600309static const char * const smux_remote_state[] = {
310 [SMUX_LCH_REMOTE_CLOSED] = "CLOSED",
311 [SMUX_LCH_REMOTE_OPENED] = "OPENED",
312};
313
314static const char * const smux_mode[] = {
315 [SMUX_LCH_MODE_NORMAL] = "N",
316 [SMUX_LCH_MODE_LOCAL_LOOPBACK] = "L",
317 [SMUX_LCH_MODE_REMOTE_LOOPBACK] = "R",
318};
319
320static const char * const smux_undef[] = {
321 [SMUX_UNDEF_LONG] = "UNDEF",
322 [SMUX_UNDEF_SHORT] = "U",
323};
324
325static void *log_ctx;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600326static void smux_notify_local_fn(struct work_struct *work);
327static DECLARE_WORK(smux_notify_local, smux_notify_local_fn);
328
329static struct workqueue_struct *smux_notify_wq;
330static size_t handle_size;
331static struct kfifo smux_notify_fifo;
332static int queued_fifo_notifications;
333static DEFINE_SPINLOCK(notify_lock_lhc1);
334
335static struct workqueue_struct *smux_tx_wq;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600336static struct workqueue_struct *smux_rx_wq;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600337static void smux_tx_worker(struct work_struct *work);
338static DECLARE_WORK(smux_tx_work, smux_tx_worker);
339
340static void smux_wakeup_worker(struct work_struct *work);
Eric Holmbergb8435c82012-06-05 14:51:29 -0600341static void smux_rx_retry_worker(struct work_struct *work);
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600342static void smux_rx_worker(struct work_struct *work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600343static DECLARE_WORK(smux_wakeup_work, smux_wakeup_worker);
344static DECLARE_DELAYED_WORK(smux_wakeup_delayed_work, smux_wakeup_worker);
345
346static void smux_inactivity_worker(struct work_struct *work);
347static DECLARE_WORK(smux_inactivity_work, smux_inactivity_worker);
348static DECLARE_DELAYED_WORK(smux_delayed_inactivity_work,
349 smux_inactivity_worker);
350
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600351static void list_channel(struct smux_lch_t *ch);
352static int smux_send_status_cmd(struct smux_lch_t *ch);
353static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600354static void smux_flush_tty(void);
Eric Holmberg0e914082012-07-11 11:46:28 -0600355static void smux_purge_ch_tx_queue(struct smux_lch_t *ch, int is_ssr);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600356static int schedule_notify(uint8_t lcid, int event,
357 const union notifier_metadata *metadata);
358static int ssr_notifier_cb(struct notifier_block *this,
359 unsigned long code,
360 void *data);
Eric Holmberg92a67df2012-06-25 13:56:24 -0600361static void smux_uart_power_on_atomic(void);
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600362static int smux_rx_flow_control_updated(struct smux_lch_t *ch);
Eric Holmberg06011322012-07-06 18:17:03 -0600363static void smux_flush_workqueues(void);
Eric Holmbergf6a364e2012-08-07 18:41:44 -0600364static void smux_pdev_release(struct device *dev);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600365
366/**
Eric Holmberg9d890672012-06-13 17:58:13 -0600367 * local_lch_state() - Return human readable form of local logical state.
368 * @state: Local logical channel state enum.
369 *
370 */
371const char *local_lch_state(unsigned state)
372{
373 if (state < ARRAY_SIZE(smux_local_state))
374 return smux_local_state[state];
375 else
376 return smux_undef[SMUX_UNDEF_LONG];
377}
378
379/**
380 * remote_lch_state() - Return human readable for of remote logical state.
381 * @state: Remote logical channel state enum.
382 *
383 */
384const char *remote_lch_state(unsigned state)
385{
386 if (state < ARRAY_SIZE(smux_remote_state))
387 return smux_remote_state[state];
388 else
389 return smux_undef[SMUX_UNDEF_LONG];
390}
391
392/**
393 * lch_mode() - Return human readable form of mode.
394 * @mode: Mode of the logical channel.
395 *
396 */
397const char *lch_mode(unsigned mode)
398{
399 if (mode < ARRAY_SIZE(smux_mode))
400 return smux_mode[mode];
401 else
402 return smux_undef[SMUX_UNDEF_SHORT];
403}
404
405/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600406 * Convert TTY Error Flags to string for logging purposes.
407 *
408 * @flag TTY_* flag
409 * @returns String description or NULL if unknown
410 */
411static const char *tty_flag_to_str(unsigned flag)
412{
413 if (flag < ARRAY_SIZE(tty_error_type))
414 return tty_error_type[flag];
415 return NULL;
416}
417
418/**
419 * Convert SMUX Command to string for logging purposes.
420 *
421 * @cmd SMUX command
422 * @returns String description or NULL if unknown
423 */
424static const char *cmd_to_str(unsigned cmd)
425{
426 if (cmd < ARRAY_SIZE(smux_cmds))
427 return smux_cmds[cmd];
428 return NULL;
429}
430
431/**
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530432 * Convert SMUX event to string for logging purposes.
433 *
434 * @event SMUX event
435 * @returns String description or NULL if unknown
436 */
437static const char *event_to_str(unsigned cmd)
438{
439 if (cmd < ARRAY_SIZE(smux_events))
440 return smux_events[cmd];
441 return NULL;
442}
443
444/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600445 * Set the reset state due to an unrecoverable failure.
446 */
447static void smux_enter_reset(void)
448{
Eric Holmberg51f46cb2012-08-21 16:43:39 -0600449 SMUX_ERR("%s: unrecoverable failure, waiting for ssr\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600450 smux.in_reset = 1;
Eric Holmberg0a81e8f2012-08-28 13:51:14 -0600451 smux.remote_is_alive = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600452}
453
Eric Holmberg9d890672012-06-13 17:58:13 -0600454/**
455 * Initialize the lch_structs.
456 */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600457static int lch_init(void)
458{
459 unsigned int id;
460 struct smux_lch_t *ch;
461 int i = 0;
462
463 handle_size = sizeof(struct smux_notify_handle *);
464
465 smux_notify_wq = create_singlethread_workqueue("smux_notify_wq");
466 smux_tx_wq = create_singlethread_workqueue("smux_tx_wq");
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600467 smux_rx_wq = create_singlethread_workqueue("smux_rx_wq");
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600468
469 if (IS_ERR(smux_notify_wq) || IS_ERR(smux_tx_wq)) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530470 SMUX_DBG("smux: %s: create_singlethread_workqueue ENOMEM\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600471 __func__);
472 return -ENOMEM;
473 }
474
475 i |= kfifo_alloc(&smux_notify_fifo,
476 SMUX_NOTIFY_FIFO_SIZE * handle_size,
477 GFP_KERNEL);
478 i |= smux_loopback_init();
479
480 if (i) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -0600481 SMUX_ERR("%s: out of memory error\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600482 return -ENOMEM;
483 }
484
485 for (id = 0 ; id < SMUX_NUM_LOGICAL_CHANNELS; id++) {
486 ch = &smux_lch[id];
487
488 spin_lock_init(&ch->state_lock_lhb1);
489 ch->lcid = id;
490 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
491 ch->local_mode = SMUX_LCH_MODE_NORMAL;
492 ch->local_tiocm = 0x0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600493 ch->options = SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600494 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
495 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
496 ch->remote_tiocm = 0x0;
497 ch->tx_flow_control = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600498 ch->rx_flow_control_auto = 0;
499 ch->rx_flow_control_client = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600500 ch->priv = 0;
501 ch->notify = 0;
502 ch->get_rx_buffer = 0;
503
Eric Holmbergb8435c82012-06-05 14:51:29 -0600504 INIT_LIST_HEAD(&ch->rx_retry_queue);
505 ch->rx_retry_queue_cnt = 0;
506 INIT_DELAYED_WORK(&ch->rx_retry_work, smux_rx_retry_worker);
507
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600508 spin_lock_init(&ch->tx_lock_lhb2);
509 INIT_LIST_HEAD(&ch->tx_queue);
510 INIT_LIST_HEAD(&ch->tx_ready_list);
511 ch->tx_pending_data_cnt = 0;
512 ch->notify_lwm = 0;
513 }
514
515 return 0;
516}
517
Eric Holmberged1f00c2012-06-07 09:45:18 -0600518/**
519 * Empty and cleanup all SMUX logical channels for subsystem restart or line
520 * discipline disconnect.
521 */
522static void smux_lch_purge(void)
523{
524 struct smux_lch_t *ch;
525 unsigned long flags;
526 int i;
527
528 /* Empty TX ready list */
529 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
530 while (!list_empty(&smux.lch_tx_ready_list)) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530531 SMUX_DBG("smux: %s: emptying ready list %p\n",
Eric Holmberged1f00c2012-06-07 09:45:18 -0600532 __func__, smux.lch_tx_ready_list.next);
533 ch = list_first_entry(&smux.lch_tx_ready_list,
534 struct smux_lch_t,
535 tx_ready_list);
536 list_del(&ch->tx_ready_list);
537 INIT_LIST_HEAD(&ch->tx_ready_list);
538 }
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600539
540 /* Purge Power Queue */
541 while (!list_empty(&smux.power_queue)) {
542 struct smux_pkt_t *pkt;
543
544 pkt = list_first_entry(&smux.power_queue,
545 struct smux_pkt_t,
546 list);
Eric Holmberg6b19f7f2012-06-15 09:53:52 -0600547 list_del(&pkt->list);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530548 SMUX_DBG("smux: %s: emptying power queue pkt=%p\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600549 __func__, pkt);
550 smux_free_pkt(pkt);
551 }
Eric Holmberged1f00c2012-06-07 09:45:18 -0600552 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
553
554 /* Close all ports */
555 for (i = 0 ; i < SMUX_NUM_LOGICAL_CHANNELS; i++) {
556 ch = &smux_lch[i];
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530557 SMUX_DBG("smux: %s: cleaning up lcid %d\n", __func__, i);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600558
559 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
560
561 /* Purge TX queue */
562 spin_lock(&ch->tx_lock_lhb2);
Eric Holmberg0e914082012-07-11 11:46:28 -0600563 smux_purge_ch_tx_queue(ch, 1);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600564 spin_unlock(&ch->tx_lock_lhb2);
565
566 /* Notify user of disconnect and reset channel state */
567 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
568 ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
569 union notifier_metadata meta;
570
571 meta.disconnected.is_ssr = smux.in_reset;
572 schedule_notify(ch->lcid, SMUX_DISCONNECTED, &meta);
573 }
574
575 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
Eric Holmberged1f00c2012-06-07 09:45:18 -0600576 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
577 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
578 ch->tx_flow_control = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600579 ch->rx_flow_control_auto = 0;
580 ch->rx_flow_control_client = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -0600581
582 /* Purge RX retry queue */
583 if (ch->rx_retry_queue_cnt)
584 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
585
586 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
587 }
Eric Holmberged1f00c2012-06-07 09:45:18 -0600588}
589
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600590int smux_assert_lch_id(uint32_t lcid)
591{
592 if (lcid >= SMUX_NUM_LOGICAL_CHANNELS)
593 return -ENXIO;
594 else
595 return 0;
596}
597
598/**
599 * Log packet information for debug purposes.
600 *
601 * @pkt Packet to log
602 * @is_recv 1 = RX packet; 0 = TX Packet
603 *
604 * [DIR][LCID] [LOCAL_STATE][LOCAL_MODE]:[REMOTE_STATE][REMOTE_MODE] PKT Info
605 *
606 * PKT Info:
607 * [CMD] flags [flags] len [PAYLOAD_LEN]:[PAD_LEN] [Payload hex bytes]
608 *
609 * Direction: R = Receive, S = Send
610 * Local State: C = Closed; c = closing; o = opening; O = Opened
611 * Local Mode: L = Local loopback; R = Remote loopback; N = Normal
612 * Remote State: C = Closed; O = Opened
613 * Remote Mode: R = Remote loopback; N = Normal
614 */
615static void smux_log_pkt(struct smux_pkt_t *pkt, int is_recv)
616{
617 char logbuf[SMUX_PKT_LOG_SIZE];
618 char cmd_extra[16];
619 int i = 0;
620 int count;
621 int len;
622 char local_state;
623 char local_mode;
624 char remote_state;
625 char remote_mode;
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600626 struct smux_lch_t *ch = NULL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600627 unsigned char *data;
628
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600629 if (!smux_assert_lch_id(pkt->hdr.lcid))
630 ch = &smux_lch[pkt->hdr.lcid];
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600631
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600632 if (ch) {
633 switch (ch->local_state) {
634 case SMUX_LCH_LOCAL_CLOSED:
635 local_state = 'C';
636 break;
637 case SMUX_LCH_LOCAL_OPENING:
638 local_state = 'o';
639 break;
640 case SMUX_LCH_LOCAL_OPENED:
641 local_state = 'O';
642 break;
643 case SMUX_LCH_LOCAL_CLOSING:
644 local_state = 'c';
645 break;
646 default:
647 local_state = 'U';
648 break;
649 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600650
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600651 switch (ch->local_mode) {
652 case SMUX_LCH_MODE_LOCAL_LOOPBACK:
653 local_mode = 'L';
654 break;
655 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
656 local_mode = 'R';
657 break;
658 case SMUX_LCH_MODE_NORMAL:
659 local_mode = 'N';
660 break;
661 default:
662 local_mode = 'U';
663 break;
664 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600665
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600666 switch (ch->remote_state) {
667 case SMUX_LCH_REMOTE_CLOSED:
668 remote_state = 'C';
669 break;
670 case SMUX_LCH_REMOTE_OPENED:
671 remote_state = 'O';
672 break;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600673
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600674 default:
675 remote_state = 'U';
676 break;
677 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600678
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600679 switch (ch->remote_mode) {
680 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
681 remote_mode = 'R';
682 break;
683 case SMUX_LCH_MODE_NORMAL:
684 remote_mode = 'N';
685 break;
686 default:
687 remote_mode = 'U';
688 break;
689 }
690 } else {
691 /* broadcast channel */
692 local_state = '-';
693 local_mode = '-';
694 remote_state = '-';
695 remote_mode = '-';
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600696 }
697
698 /* determine command type (ACK, etc) */
699 cmd_extra[0] = '\0';
700 switch (pkt->hdr.cmd) {
701 case SMUX_CMD_OPEN_LCH:
702 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
703 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
704 break;
705 case SMUX_CMD_CLOSE_LCH:
706 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
707 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
708 break;
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -0600709
710 case SMUX_CMD_PWR_CTL:
711 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK)
712 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
713 break;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600714 };
715
716 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
717 "smux: %c%d %c%c:%c%c %s%s flags %x len %d:%d ",
718 is_recv ? 'R' : 'S', pkt->hdr.lcid,
719 local_state, local_mode,
720 remote_state, remote_mode,
721 cmd_to_str(pkt->hdr.cmd), cmd_extra, pkt->hdr.flags,
722 pkt->hdr.payload_len, pkt->hdr.pad_len);
723
724 len = (pkt->hdr.payload_len > 16) ? 16 : pkt->hdr.payload_len;
725 data = (unsigned char *)pkt->payload;
726 for (count = 0; count < len; count++)
727 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
728 "%02x ", (unsigned)data[count]);
729
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530730 IPC_LOG_STR(logbuf);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600731}
732
733static void smux_notify_local_fn(struct work_struct *work)
734{
735 struct smux_notify_handle *notify_handle = NULL;
736 union notifier_metadata *metadata = NULL;
737 unsigned long flags;
738 int i;
739
740 for (;;) {
741 /* retrieve notification */
742 spin_lock_irqsave(&notify_lock_lhc1, flags);
743 if (kfifo_len(&smux_notify_fifo) >= handle_size) {
744 i = kfifo_out(&smux_notify_fifo,
745 &notify_handle,
746 handle_size);
747 if (i != handle_size) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -0600748 SMUX_ERR(
749 "%s: unable to retrieve handle %d expected %d\n",
750 __func__, i, handle_size);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600751 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
752 break;
753 }
754 } else {
755 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
756 break;
757 }
758 --queued_fifo_notifications;
759 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
760
761 /* notify client */
762 metadata = notify_handle->metadata;
763 notify_handle->notify(notify_handle->priv,
764 notify_handle->event_type,
765 metadata);
766
767 kfree(metadata);
768 kfree(notify_handle);
769 }
770}
771
772/**
773 * Initialize existing packet.
774 */
775void smux_init_pkt(struct smux_pkt_t *pkt)
776{
777 memset(pkt, 0x0, sizeof(*pkt));
778 pkt->hdr.magic = SMUX_MAGIC;
779 INIT_LIST_HEAD(&pkt->list);
780}
781
782/**
783 * Allocate and initialize packet.
784 *
785 * If a payload is needed, either set it directly and ensure that it's freed or
786 * use smd_alloc_pkt_payload() to allocate a packet and it will be freed
787 * automatically when smd_free_pkt() is called.
788 */
789struct smux_pkt_t *smux_alloc_pkt(void)
790{
791 struct smux_pkt_t *pkt;
792
793 /* Consider a free list implementation instead of kmalloc */
794 pkt = kmalloc(sizeof(struct smux_pkt_t), GFP_ATOMIC);
795 if (!pkt) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -0600796 SMUX_ERR("%s: out of memory\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600797 return NULL;
798 }
799 smux_init_pkt(pkt);
800 pkt->allocated = 1;
801
802 return pkt;
803}
804
805/**
806 * Free packet.
807 *
808 * @pkt Packet to free (may be NULL)
809 *
810 * If payload was allocated using smux_alloc_pkt_payload(), then it is freed as
811 * well. Otherwise, the caller is responsible for freeing the payload.
812 */
813void smux_free_pkt(struct smux_pkt_t *pkt)
814{
815 if (pkt) {
816 if (pkt->free_payload)
817 kfree(pkt->payload);
818 if (pkt->allocated)
819 kfree(pkt);
820 }
821}
822
823/**
824 * Allocate packet payload.
825 *
826 * @pkt Packet to add payload to
827 *
828 * @returns 0 on success, <0 upon error
829 *
830 * A flag is set to signal smux_free_pkt() to free the payload.
831 */
832int smux_alloc_pkt_payload(struct smux_pkt_t *pkt)
833{
834 if (!pkt)
835 return -EINVAL;
836
837 pkt->payload = kmalloc(pkt->hdr.payload_len, GFP_ATOMIC);
838 pkt->free_payload = 1;
839 if (!pkt->payload) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -0600840 SMUX_ERR("%s: unable to malloc %d bytes for payload\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600841 __func__, pkt->hdr.payload_len);
842 return -ENOMEM;
843 }
844
845 return 0;
846}
847
848static int schedule_notify(uint8_t lcid, int event,
849 const union notifier_metadata *metadata)
850{
851 struct smux_notify_handle *notify_handle = 0;
852 union notifier_metadata *meta_copy = 0;
853 struct smux_lch_t *ch;
854 int i;
855 unsigned long flags;
856 int ret = 0;
857
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530858 IPC_LOG_STR("smux: %s ch:%d\n", event_to_str(event), lcid);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600859 ch = &smux_lch[lcid];
860 notify_handle = kzalloc(sizeof(struct smux_notify_handle),
861 GFP_ATOMIC);
862 if (!notify_handle) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -0600863 SMUX_ERR("%s: out of memory\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600864 ret = -ENOMEM;
865 goto free_out;
866 }
867
868 notify_handle->notify = ch->notify;
869 notify_handle->priv = ch->priv;
870 notify_handle->event_type = event;
871 if (metadata) {
872 meta_copy = kzalloc(sizeof(union notifier_metadata),
873 GFP_ATOMIC);
874 if (!meta_copy) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -0600875 SMUX_ERR("%s: out of memory\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600876 ret = -ENOMEM;
877 goto free_out;
878 }
879 *meta_copy = *metadata;
880 notify_handle->metadata = meta_copy;
881 } else {
882 notify_handle->metadata = NULL;
883 }
884
885 spin_lock_irqsave(&notify_lock_lhc1, flags);
886 i = kfifo_avail(&smux_notify_fifo);
887 if (i < handle_size) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -0600888 SMUX_ERR("%s: fifo full error %d expected %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600889 __func__, i, handle_size);
890 ret = -ENOMEM;
891 goto unlock_out;
892 }
893
894 i = kfifo_in(&smux_notify_fifo, &notify_handle, handle_size);
895 if (i < 0 || i != handle_size) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -0600896 SMUX_ERR("%s: fifo not available error %d (expected %d)\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600897 __func__, i, handle_size);
898 ret = -ENOSPC;
899 goto unlock_out;
900 }
901 ++queued_fifo_notifications;
902
903unlock_out:
904 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
905
906free_out:
907 queue_work(smux_notify_wq, &smux_notify_local);
908 if (ret < 0 && notify_handle) {
909 kfree(notify_handle->metadata);
910 kfree(notify_handle);
911 }
912 return ret;
913}
914
915/**
916 * Returns the serialized size of a packet.
917 *
918 * @pkt Packet to serialize
919 *
920 * @returns Serialized length of packet
921 */
922static unsigned int smux_serialize_size(struct smux_pkt_t *pkt)
923{
924 unsigned int size;
925
926 size = sizeof(struct smux_hdr_t);
927 size += pkt->hdr.payload_len;
928 size += pkt->hdr.pad_len;
929
930 return size;
931}
932
933/**
934 * Serialize packet @pkt into output buffer @data.
935 *
936 * @pkt Packet to serialize
937 * @out Destination buffer pointer
938 * @out_len Size of serialized packet
939 *
940 * @returns 0 for success
941 */
942int smux_serialize(struct smux_pkt_t *pkt, char *out,
943 unsigned int *out_len)
944{
945 char *data_start = out;
946
947 if (smux_serialize_size(pkt) > SMUX_MAX_PKT_SIZE) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -0600948 SMUX_ERR("%s: packet size %d too big\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600949 __func__, smux_serialize_size(pkt));
950 return -E2BIG;
951 }
952
953 memcpy(out, &pkt->hdr, sizeof(struct smux_hdr_t));
954 out += sizeof(struct smux_hdr_t);
955 if (pkt->payload) {
956 memcpy(out, pkt->payload, pkt->hdr.payload_len);
957 out += pkt->hdr.payload_len;
958 }
959 if (pkt->hdr.pad_len) {
960 memset(out, 0x0, pkt->hdr.pad_len);
961 out += pkt->hdr.pad_len;
962 }
963 *out_len = out - data_start;
964 return 0;
965}
966
967/**
968 * Serialize header and provide pointer to the data.
969 *
970 * @pkt Packet
971 * @out[out] Pointer to the serialized header data
972 * @out_len[out] Pointer to the serialized header length
973 */
974static void smux_serialize_hdr(struct smux_pkt_t *pkt, char **out,
975 unsigned int *out_len)
976{
977 *out = (char *)&pkt->hdr;
978 *out_len = sizeof(struct smux_hdr_t);
979}
980
981/**
982 * Serialize payload and provide pointer to the data.
983 *
984 * @pkt Packet
985 * @out[out] Pointer to the serialized payload data
986 * @out_len[out] Pointer to the serialized payload length
987 */
988static void smux_serialize_payload(struct smux_pkt_t *pkt, char **out,
989 unsigned int *out_len)
990{
991 *out = pkt->payload;
992 *out_len = pkt->hdr.payload_len;
993}
994
995/**
996 * Serialize padding and provide pointer to the data.
997 *
998 * @pkt Packet
999 * @out[out] Pointer to the serialized padding (always NULL)
1000 * @out_len[out] Pointer to the serialized payload length
1001 *
1002 * Since the padding field value is undefined, only the size of the patting
1003 * (@out_len) is set and the buffer pointer (@out) will always be NULL.
1004 */
1005static void smux_serialize_padding(struct smux_pkt_t *pkt, char **out,
1006 unsigned int *out_len)
1007{
1008 *out = NULL;
1009 *out_len = pkt->hdr.pad_len;
1010}
1011
1012/**
1013 * Write data to TTY framework and handle breaking the writes up if needed.
1014 *
1015 * @data Data to write
1016 * @len Length of data
1017 *
1018 * @returns 0 for success, < 0 for failure
1019 */
1020static int write_to_tty(char *data, unsigned len)
1021{
1022 int data_written;
1023
1024 if (!data)
1025 return 0;
1026
Eric Holmberged1f00c2012-06-07 09:45:18 -06001027 while (len > 0 && !smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001028 data_written = smux.tty->ops->write(smux.tty, data, len);
1029 if (data_written >= 0) {
1030 len -= data_written;
1031 data += data_written;
1032 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001033 SMUX_ERR("%s: TTY write returned error %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001034 __func__, data_written);
1035 return data_written;
1036 }
1037
1038 if (len)
1039 tty_wait_until_sent(smux.tty,
1040 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001041 }
1042 return 0;
1043}
1044
1045/**
1046 * Write packet to TTY.
1047 *
1048 * @pkt packet to write
1049 *
1050 * @returns 0 on success
1051 */
1052static int smux_tx_tty(struct smux_pkt_t *pkt)
1053{
1054 char *data;
1055 unsigned int len;
1056 int ret;
1057
1058 if (!smux.tty) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001059 SMUX_ERR("%s: TTY not initialized", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001060 return -ENOTTY;
1061 }
1062
1063 if (pkt->hdr.cmd == SMUX_CMD_BYTE) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301064 SMUX_DBG("smux: %s: tty send single byte\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001065 ret = write_to_tty(&pkt->hdr.flags, 1);
1066 return ret;
1067 }
1068
1069 smux_serialize_hdr(pkt, &data, &len);
1070 ret = write_to_tty(data, len);
1071 if (ret) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001072 SMUX_ERR("%s: failed %d to write header %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001073 __func__, ret, len);
1074 return ret;
1075 }
1076
1077 smux_serialize_payload(pkt, &data, &len);
1078 ret = write_to_tty(data, len);
1079 if (ret) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001080 SMUX_ERR("%s: failed %d to write payload %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001081 __func__, ret, len);
1082 return ret;
1083 }
1084
1085 smux_serialize_padding(pkt, &data, &len);
1086 while (len > 0) {
1087 char zero = 0x0;
1088 ret = write_to_tty(&zero, 1);
1089 if (ret) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001090 SMUX_ERR("%s: failed %d to write padding %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001091 __func__, ret, len);
1092 return ret;
1093 }
1094 --len;
1095 }
1096 return 0;
1097}
1098
1099/**
1100 * Send a single character.
1101 *
1102 * @ch Character to send
1103 */
1104static void smux_send_byte(char ch)
1105{
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001106 struct smux_pkt_t *pkt;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001107
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001108 pkt = smux_alloc_pkt();
1109 if (!pkt) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001110 SMUX_ERR("%s: alloc failure for byte %x\n", __func__, ch);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001111 return;
1112 }
1113 pkt->hdr.cmd = SMUX_CMD_BYTE;
1114 pkt->hdr.flags = ch;
1115 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001116
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001117 list_add_tail(&pkt->list, &smux.power_queue);
1118 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001119}
1120
1121/**
1122 * Receive a single-character packet (used for internal testing).
1123 *
1124 * @ch Character to receive
1125 * @lcid Logical channel ID for packet
1126 *
1127 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001128 */
1129static int smux_receive_byte(char ch, int lcid)
1130{
1131 struct smux_pkt_t pkt;
1132
1133 smux_init_pkt(&pkt);
1134 pkt.hdr.lcid = lcid;
1135 pkt.hdr.cmd = SMUX_CMD_BYTE;
1136 pkt.hdr.flags = ch;
1137
1138 return smux_dispatch_rx_pkt(&pkt);
1139}
1140
1141/**
1142 * Queue packet for transmit.
1143 *
1144 * @pkt_ptr Packet to queue
1145 * @ch Channel to queue packet on
1146 * @queue Queue channel on ready list
1147 */
1148static void smux_tx_queue(struct smux_pkt_t *pkt_ptr, struct smux_lch_t *ch,
1149 int queue)
1150{
1151 unsigned long flags;
1152
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301153 SMUX_DBG("smux: %s: queuing pkt %p\n", __func__, pkt_ptr);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001154
1155 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
1156 list_add_tail(&pkt_ptr->list, &ch->tx_queue);
1157 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
1158
1159 if (queue)
1160 list_channel(ch);
1161}
1162
1163/**
1164 * Handle receive OPEN ACK command.
1165 *
1166 * @pkt Received packet
1167 *
1168 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001169 */
1170static int smux_handle_rx_open_ack(struct smux_pkt_t *pkt)
1171{
1172 uint8_t lcid;
1173 int ret;
1174 struct smux_lch_t *ch;
1175 int enable_powerdown = 0;
Arun Kumar Neelakantamee84b1d2013-07-17 22:00:46 +05301176 int tx_ready = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001177
1178 lcid = pkt->hdr.lcid;
1179 ch = &smux_lch[lcid];
1180
1181 spin_lock(&ch->state_lock_lhb1);
1182 if (ch->local_state == SMUX_LCH_LOCAL_OPENING) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301183 SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001184 ch->local_state,
1185 SMUX_LCH_LOCAL_OPENED);
1186
1187 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1188 enable_powerdown = 1;
1189
1190 ch->local_state = SMUX_LCH_LOCAL_OPENED;
Arun Kumar Neelakantam60252c92013-07-02 14:47:51 +05301191 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001192 schedule_notify(lcid, SMUX_CONNECTED, NULL);
Arun Kumar Neelakantam60252c92013-07-02 14:47:51 +05301193 if (!(list_empty(&ch->tx_queue)))
Arun Kumar Neelakantamee84b1d2013-07-17 22:00:46 +05301194 tx_ready = 1;
Arun Kumar Neelakantam60252c92013-07-02 14:47:51 +05301195 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001196 ret = 0;
1197 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301198 SMUX_DBG("smux: Remote loopback OPEN ACK received\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001199 ret = 0;
1200 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001201 SMUX_ERR("%s: lcid %d state 0x%x open ack invalid\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001202 __func__, lcid, ch->local_state);
1203 ret = -EINVAL;
1204 }
1205 spin_unlock(&ch->state_lock_lhb1);
1206
1207 if (enable_powerdown) {
1208 spin_lock(&smux.tx_lock_lha2);
1209 if (!smux.powerdown_enabled) {
1210 smux.powerdown_enabled = 1;
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301211 SMUX_DBG("smux: %s: enabling power-collapse support\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001212 __func__);
1213 }
1214 spin_unlock(&smux.tx_lock_lha2);
1215 }
1216
Arun Kumar Neelakantamee84b1d2013-07-17 22:00:46 +05301217 if (tx_ready)
1218 list_channel(ch);
1219
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001220 return ret;
1221}
1222
1223static int smux_handle_close_ack(struct smux_pkt_t *pkt)
1224{
1225 uint8_t lcid;
1226 int ret;
1227 struct smux_lch_t *ch;
1228 union notifier_metadata meta_disconnected;
1229 unsigned long flags;
1230
1231 lcid = pkt->hdr.lcid;
1232 ch = &smux_lch[lcid];
1233 meta_disconnected.disconnected.is_ssr = 0;
1234
1235 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1236
1237 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301238 SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001239 SMUX_LCH_LOCAL_CLOSING,
1240 SMUX_LCH_LOCAL_CLOSED);
1241 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
1242 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED)
1243 schedule_notify(lcid, SMUX_DISCONNECTED,
1244 &meta_disconnected);
1245 ret = 0;
1246 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301247 SMUX_DBG("smux: Remote loopback CLOSE ACK received\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001248 ret = 0;
1249 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001250 SMUX_ERR("%s: lcid %d state 0x%x close ack invalid\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001251 __func__, lcid, ch->local_state);
1252 ret = -EINVAL;
1253 }
1254 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1255 return ret;
1256}
1257
1258/**
1259 * Handle receive OPEN command.
1260 *
1261 * @pkt Received packet
1262 *
1263 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001264 */
1265static int smux_handle_rx_open_cmd(struct smux_pkt_t *pkt)
1266{
1267 uint8_t lcid;
1268 int ret;
1269 struct smux_lch_t *ch;
1270 struct smux_pkt_t *ack_pkt;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001271 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001272 int tx_ready = 0;
1273 int enable_powerdown = 0;
1274
1275 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
1276 return smux_handle_rx_open_ack(pkt);
1277
1278 lcid = pkt->hdr.lcid;
1279 ch = &smux_lch[lcid];
1280
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001281 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001282
1283 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301284 SMUX_DBG("smux: lcid %d remote state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001285 SMUX_LCH_REMOTE_CLOSED,
1286 SMUX_LCH_REMOTE_OPENED);
1287
1288 ch->remote_state = SMUX_LCH_REMOTE_OPENED;
1289 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1290 enable_powerdown = 1;
1291
1292 /* Send Open ACK */
1293 ack_pkt = smux_alloc_pkt();
1294 if (!ack_pkt) {
1295 /* exit out to allow retrying this later */
1296 ret = -ENOMEM;
1297 goto out;
1298 }
1299 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
Eric Holmbergc89532e2013-01-15 16:43:47 -07001300 ack_pkt->hdr.flags = SMUX_CMD_OPEN_ACK;
1301 if (enable_powerdown)
1302 ack_pkt->hdr.flags |= SMUX_CMD_OPEN_POWER_COLLAPSE;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001303 ack_pkt->hdr.lcid = lcid;
1304 ack_pkt->hdr.payload_len = 0;
1305 ack_pkt->hdr.pad_len = 0;
1306 if (pkt->hdr.flags & SMUX_CMD_OPEN_REMOTE_LOOPBACK) {
1307 ch->remote_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
1308 ack_pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
1309 }
1310 smux_tx_queue(ack_pkt, ch, 0);
1311 tx_ready = 1;
1312
1313 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1314 /*
1315 * Send an Open command to the remote side to
1316 * simulate our local client doing it.
1317 */
1318 ack_pkt = smux_alloc_pkt();
1319 if (ack_pkt) {
1320 ack_pkt->hdr.lcid = lcid;
1321 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
Eric Holmbergc89532e2013-01-15 16:43:47 -07001322 if (enable_powerdown)
1323 ack_pkt->hdr.flags |=
1324 SMUX_CMD_OPEN_POWER_COLLAPSE;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001325 ack_pkt->hdr.payload_len = 0;
1326 ack_pkt->hdr.pad_len = 0;
1327 smux_tx_queue(ack_pkt, ch, 0);
1328 tx_ready = 1;
1329 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001330 SMUX_ERR(
1331 "%s: Remote loopack allocation failure\n",
1332 __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001333 }
1334 } else if (ch->local_state == SMUX_LCH_LOCAL_OPENED) {
1335 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1336 }
1337 ret = 0;
1338 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001339 SMUX_ERR("%s: lcid %d remote state 0x%x open invalid\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001340 __func__, lcid, ch->remote_state);
1341 ret = -EINVAL;
1342 }
1343
1344out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001345 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001346
1347 if (enable_powerdown) {
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001348 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8b9a6402012-06-05 13:32:57 -06001349 if (!smux.powerdown_enabled) {
1350 smux.powerdown_enabled = 1;
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301351 SMUX_DBG("smux: %s: enabling power-collapse support\n",
Eric Holmberg8b9a6402012-06-05 13:32:57 -06001352 __func__);
1353 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001354 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001355 }
1356
1357 if (tx_ready)
1358 list_channel(ch);
1359
1360 return ret;
1361}
1362
1363/**
1364 * Handle receive CLOSE command.
1365 *
1366 * @pkt Received packet
1367 *
1368 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001369 */
1370static int smux_handle_rx_close_cmd(struct smux_pkt_t *pkt)
1371{
1372 uint8_t lcid;
1373 int ret;
1374 struct smux_lch_t *ch;
1375 struct smux_pkt_t *ack_pkt;
1376 union notifier_metadata meta_disconnected;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001377 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001378 int tx_ready = 0;
1379
1380 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
1381 return smux_handle_close_ack(pkt);
1382
1383 lcid = pkt->hdr.lcid;
1384 ch = &smux_lch[lcid];
1385 meta_disconnected.disconnected.is_ssr = 0;
1386
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001387 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001388 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301389 SMUX_DBG("smux: lcid %d remote state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001390 SMUX_LCH_REMOTE_OPENED,
1391 SMUX_LCH_REMOTE_CLOSED);
1392
1393 ack_pkt = smux_alloc_pkt();
1394 if (!ack_pkt) {
1395 /* exit out to allow retrying this later */
1396 ret = -ENOMEM;
1397 goto out;
1398 }
1399 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
1400 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1401 ack_pkt->hdr.flags = SMUX_CMD_CLOSE_ACK;
1402 ack_pkt->hdr.lcid = lcid;
1403 ack_pkt->hdr.payload_len = 0;
1404 ack_pkt->hdr.pad_len = 0;
1405 smux_tx_queue(ack_pkt, ch, 0);
1406 tx_ready = 1;
1407
1408 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1409 /*
1410 * Send a Close command to the remote side to simulate
1411 * our local client doing it.
1412 */
1413 ack_pkt = smux_alloc_pkt();
1414 if (ack_pkt) {
1415 ack_pkt->hdr.lcid = lcid;
1416 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1417 ack_pkt->hdr.flags = 0;
1418 ack_pkt->hdr.payload_len = 0;
1419 ack_pkt->hdr.pad_len = 0;
1420 smux_tx_queue(ack_pkt, ch, 0);
1421 tx_ready = 1;
1422 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001423 SMUX_ERR(
1424 "%s: Remote loopack allocation failure\n",
1425 __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001426 }
1427 }
1428
1429 if (ch->local_state == SMUX_LCH_LOCAL_CLOSED)
1430 schedule_notify(lcid, SMUX_DISCONNECTED,
1431 &meta_disconnected);
1432 ret = 0;
1433 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001434 SMUX_ERR("%s: lcid %d remote state 0x%x close invalid\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001435 __func__, lcid, ch->remote_state);
1436 ret = -EINVAL;
1437 }
1438out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001439 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001440 if (tx_ready)
1441 list_channel(ch);
1442
1443 return ret;
1444}
1445
1446/*
1447 * Handle receive DATA command.
1448 *
1449 * @pkt Received packet
1450 *
1451 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001452 */
1453static int smux_handle_rx_data_cmd(struct smux_pkt_t *pkt)
1454{
1455 uint8_t lcid;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001456 int ret = 0;
1457 int do_retry = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001458 int tx_ready = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001459 int tmp;
1460 int rx_len;
1461 struct smux_lch_t *ch;
1462 union notifier_metadata metadata;
1463 int remote_loopback;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001464 struct smux_pkt_t *ack_pkt;
1465 unsigned long flags;
1466
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001467 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1468 ret = -ENXIO;
1469 goto out;
1470 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001471
Eric Holmbergb8435c82012-06-05 14:51:29 -06001472 rx_len = pkt->hdr.payload_len;
1473 if (rx_len == 0) {
1474 ret = -EINVAL;
1475 goto out;
1476 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001477
1478 lcid = pkt->hdr.lcid;
1479 ch = &smux_lch[lcid];
1480 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1481 remote_loopback = ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK;
1482
1483 if (ch->local_state != SMUX_LCH_LOCAL_OPENED
1484 && !remote_loopback) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001485 SMUX_ERR("smux: ch %d error data on local state 0x%x",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001486 lcid, ch->local_state);
1487 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001488 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001489 goto out;
1490 }
1491
1492 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001493 SMUX_ERR("smux: ch %d error data on remote state 0x%x",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001494 lcid, ch->remote_state);
1495 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001496 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001497 goto out;
1498 }
1499
Eric Holmbergb8435c82012-06-05 14:51:29 -06001500 if (!list_empty(&ch->rx_retry_queue)) {
1501 do_retry = 1;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001502
1503 if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
1504 !ch->rx_flow_control_auto &&
1505 ((ch->rx_retry_queue_cnt + 1) >= SMUX_RX_WM_HIGH)) {
1506 /* need to flow control RX */
1507 ch->rx_flow_control_auto = 1;
1508 tx_ready |= smux_rx_flow_control_updated(ch);
1509 schedule_notify(ch->lcid, SMUX_RX_RETRY_HIGH_WM_HIT,
1510 NULL);
1511 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06001512 if ((ch->rx_retry_queue_cnt + 1) > SMUX_RX_RETRY_MAX_PKTS) {
1513 /* retry queue full */
Eric Holmbergd7339a42012-08-21 16:28:12 -06001514 SMUX_ERR(
1515 "%s: ch %d RX retry queue full; rx flow=%d\n",
1516 __func__, lcid, ch->rx_flow_control_auto);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001517 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1518 ret = -ENOMEM;
1519 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1520 goto out;
1521 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001522 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001523 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001524
Eric Holmbergb8435c82012-06-05 14:51:29 -06001525 if (remote_loopback) {
1526 /* Echo the data back to the remote client. */
1527 ack_pkt = smux_alloc_pkt();
1528 if (ack_pkt) {
1529 ack_pkt->hdr.lcid = lcid;
1530 ack_pkt->hdr.cmd = SMUX_CMD_DATA;
1531 ack_pkt->hdr.flags = 0;
1532 ack_pkt->hdr.payload_len = pkt->hdr.payload_len;
1533 if (ack_pkt->hdr.payload_len) {
1534 smux_alloc_pkt_payload(ack_pkt);
1535 memcpy(ack_pkt->payload, pkt->payload,
1536 ack_pkt->hdr.payload_len);
1537 }
1538 ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
1539 smux_tx_queue(ack_pkt, ch, 0);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001540 tx_ready = 1;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001541 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001542 SMUX_ERR("%s: Remote loopack allocation failure\n",
Eric Holmbergb8435c82012-06-05 14:51:29 -06001543 __func__);
1544 }
1545 } else if (!do_retry) {
1546 /* request buffer from client */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001547 metadata.read.pkt_priv = 0;
1548 metadata.read.buffer = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001549 tmp = ch->get_rx_buffer(ch->priv,
1550 (void **)&metadata.read.pkt_priv,
1551 (void **)&metadata.read.buffer,
1552 rx_len);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001553
Eric Holmbergb8435c82012-06-05 14:51:29 -06001554 if (tmp == 0 && metadata.read.buffer) {
1555 /* place data into RX buffer */
1556 memcpy(metadata.read.buffer, pkt->payload,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001557 rx_len);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001558 metadata.read.len = rx_len;
1559 schedule_notify(lcid, SMUX_READ_DONE,
1560 &metadata);
1561 } else if (tmp == -EAGAIN ||
1562 (tmp == 0 && !metadata.read.buffer)) {
1563 /* buffer allocation failed - add to retry queue */
1564 do_retry = 1;
1565 } else if (tmp < 0) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001566 SMUX_ERR("%s: ch %d Client RX buffer alloc failed %d\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001567 __func__, lcid, tmp);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001568 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1569 ret = -ENOMEM;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001570 }
1571 }
1572
Eric Holmbergb8435c82012-06-05 14:51:29 -06001573 if (do_retry) {
1574 struct smux_rx_pkt_retry *retry;
1575
1576 retry = kmalloc(sizeof(struct smux_rx_pkt_retry), GFP_KERNEL);
1577 if (!retry) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001578 SMUX_ERR("%s: retry alloc failure\n", __func__);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001579 ret = -ENOMEM;
1580 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1581 goto out;
1582 }
1583 INIT_LIST_HEAD(&retry->rx_retry_list);
1584 retry->timeout_in_ms = SMUX_RX_RETRY_MIN_MS;
1585
1586 /* copy packet */
1587 retry->pkt = smux_alloc_pkt();
1588 if (!retry->pkt) {
1589 kfree(retry);
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001590 SMUX_ERR("%s: pkt alloc failure\n", __func__);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001591 ret = -ENOMEM;
1592 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1593 goto out;
1594 }
1595 retry->pkt->hdr.lcid = lcid;
1596 retry->pkt->hdr.payload_len = pkt->hdr.payload_len;
1597 retry->pkt->hdr.pad_len = pkt->hdr.pad_len;
1598 if (retry->pkt->hdr.payload_len) {
1599 smux_alloc_pkt_payload(retry->pkt);
1600 memcpy(retry->pkt->payload, pkt->payload,
1601 retry->pkt->hdr.payload_len);
1602 }
1603
1604 /* add to retry queue */
1605 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1606 list_add_tail(&retry->rx_retry_list, &ch->rx_retry_queue);
1607 ++ch->rx_retry_queue_cnt;
1608 if (ch->rx_retry_queue_cnt == 1)
1609 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
1610 msecs_to_jiffies(retry->timeout_in_ms));
1611 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1612 }
1613
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001614 if (tx_ready)
1615 list_channel(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001616out:
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001617 return ret;
1618}
1619
1620/**
1621 * Handle receive byte command for testing purposes.
1622 *
1623 * @pkt Received packet
1624 *
1625 * @returns 0 for success
1626 */
1627static int smux_handle_rx_byte_cmd(struct smux_pkt_t *pkt)
1628{
1629 uint8_t lcid;
1630 int ret;
1631 struct smux_lch_t *ch;
1632 union notifier_metadata metadata;
1633 unsigned long flags;
1634
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001635 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001636 SMUX_ERR("%s: invalid packet or channel id\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001637 return -ENXIO;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001638 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001639
1640 lcid = pkt->hdr.lcid;
1641 ch = &smux_lch[lcid];
1642 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1643
1644 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001645 SMUX_ERR("smux: ch %d error data on local state 0x%x",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001646 lcid, ch->local_state);
1647 ret = -EIO;
1648 goto out;
1649 }
1650
1651 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001652 SMUX_ERR("smux: ch %d error data on remote state 0x%x",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001653 lcid, ch->remote_state);
1654 ret = -EIO;
1655 goto out;
1656 }
1657
1658 metadata.read.pkt_priv = (void *)(int)pkt->hdr.flags;
1659 metadata.read.buffer = 0;
1660 schedule_notify(lcid, SMUX_READ_DONE, &metadata);
1661 ret = 0;
1662
1663out:
1664 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1665 return ret;
1666}
1667
1668/**
1669 * Handle receive status command.
1670 *
1671 * @pkt Received packet
1672 *
1673 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001674 */
1675static int smux_handle_rx_status_cmd(struct smux_pkt_t *pkt)
1676{
1677 uint8_t lcid;
1678 int ret;
1679 struct smux_lch_t *ch;
1680 union notifier_metadata meta;
1681 unsigned long flags;
1682 int tx_ready = 0;
1683
1684 lcid = pkt->hdr.lcid;
1685 ch = &smux_lch[lcid];
1686
1687 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1688 meta.tiocm.tiocm_old = ch->remote_tiocm;
1689 meta.tiocm.tiocm_new = pkt->hdr.flags;
1690
1691 /* update logical channel flow control */
1692 if ((meta.tiocm.tiocm_old & SMUX_CMD_STATUS_FLOW_CNTL) ^
1693 (meta.tiocm.tiocm_new & SMUX_CMD_STATUS_FLOW_CNTL)) {
1694 /* logical channel flow control changed */
1695 if (pkt->hdr.flags & SMUX_CMD_STATUS_FLOW_CNTL) {
1696 /* disabled TX */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301697 SMUX_DBG("smux: TX Flow control enabled\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001698 ch->tx_flow_control = 1;
1699 } else {
1700 /* re-enable channel */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301701 SMUX_DBG("smux: TX Flow control disabled\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001702 ch->tx_flow_control = 0;
1703 tx_ready = 1;
1704 }
1705 }
1706 meta.tiocm.tiocm_old = msm_smux_tiocm_get_atomic(ch);
1707 ch->remote_tiocm = pkt->hdr.flags;
1708 meta.tiocm.tiocm_new = msm_smux_tiocm_get_atomic(ch);
1709
1710 /* client notification for status change */
1711 if (IS_FULLY_OPENED(ch)) {
1712 if (meta.tiocm.tiocm_old != meta.tiocm.tiocm_new)
1713 schedule_notify(lcid, SMUX_TIOCM_UPDATE, &meta);
1714 ret = 0;
1715 }
1716 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1717 if (tx_ready)
1718 list_channel(ch);
1719
1720 return ret;
1721}
1722
1723/**
1724 * Handle receive power command.
1725 *
1726 * @pkt Received packet
1727 *
1728 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001729 */
1730static int smux_handle_rx_power_cmd(struct smux_pkt_t *pkt)
1731{
David Brownd2f01b52013-01-16 15:22:17 -08001732 struct smux_pkt_t *ack_pkt;
Eric Holmberga9b06472012-06-22 09:46:34 -06001733 int power_down = 0;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001734 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001735
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001736 SMUX_PWR_PKT_RX(pkt);
1737
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001738 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001739 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK) {
1740 /* local sleep request ack */
Eric Holmberga9b06472012-06-22 09:46:34 -06001741 if (smux.power_state == SMUX_PWR_TURNING_OFF)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001742 /* Power-down complete, turn off UART */
Eric Holmberga9b06472012-06-22 09:46:34 -06001743 power_down = 1;
1744 else
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001745 SMUX_ERR("%s: sleep request ack invalid in state %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001746 __func__, smux.power_state);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001747 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001748 /*
1749 * Remote sleep request
1750 *
1751 * Even if we have data pending, we need to transition to the
1752 * POWER_OFF state and then perform a wakeup since the remote
1753 * side has requested a power-down.
1754 *
1755 * The state here is set to SMUX_PWR_TURNING_OFF_FLUSH and
1756 * the TX thread will set the state to SMUX_PWR_TURNING_OFF
1757 * when it sends the packet.
Eric Holmberga9b06472012-06-22 09:46:34 -06001758 *
1759 * If we are already powering down, then no ACK is sent.
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001760 */
Eric Holmberga9b06472012-06-22 09:46:34 -06001761 if (smux.power_state == SMUX_PWR_ON) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001762 ack_pkt = smux_alloc_pkt();
1763 if (ack_pkt) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301764 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001765 smux.power_state,
1766 SMUX_PWR_TURNING_OFF_FLUSH);
1767
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001768 smux.power_state = SMUX_PWR_TURNING_OFF_FLUSH;
1769
1770 /* send power-down ack */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001771 ack_pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
1772 ack_pkt->hdr.flags = SMUX_CMD_PWR_CTL_ACK;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001773 ack_pkt->hdr.lcid = SMUX_BROADCAST_LCID;
1774 list_add_tail(&ack_pkt->list,
1775 &smux.power_queue);
1776 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001777 }
Eric Holmberga9b06472012-06-22 09:46:34 -06001778 } else if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH) {
1779 /* Local power-down request still in TX queue */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301780 SMUX_PWR("smux: %s: Power-down shortcut - no ack\n",
Eric Holmberga9b06472012-06-22 09:46:34 -06001781 __func__);
1782 smux.power_ctl_remote_req_received = 1;
1783 } else if (smux.power_state == SMUX_PWR_TURNING_OFF) {
1784 /*
1785 * Local power-down request already sent to remote
1786 * side, so this request gets treated as an ACK.
1787 */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301788 SMUX_PWR("smux: %s: Power-down shortcut - no ack\n",
Eric Holmberga9b06472012-06-22 09:46:34 -06001789 __func__);
1790 power_down = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001791 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001792 SMUX_ERR("%s: sleep request invalid in state %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001793 __func__, smux.power_state);
1794 }
1795 }
Eric Holmberga9b06472012-06-22 09:46:34 -06001796
1797 if (power_down) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301798 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberga9b06472012-06-22 09:46:34 -06001799 smux.power_state, SMUX_PWR_OFF_FLUSH);
1800 smux.power_state = SMUX_PWR_OFF_FLUSH;
1801 queue_work(smux_tx_wq, &smux_inactivity_work);
1802 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001803 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001804
1805 return 0;
1806}
1807
1808/**
1809 * Handle dispatching a completed packet for receive processing.
1810 *
1811 * @pkt Packet to process
1812 *
1813 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001814 */
1815static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt)
1816{
Eric Holmbergf9622662012-06-13 15:55:45 -06001817 int ret = -ENXIO;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001818
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001819 switch (pkt->hdr.cmd) {
1820 case SMUX_CMD_OPEN_LCH:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001821 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001822 if (smux_assert_lch_id(pkt->hdr.lcid)) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001823 SMUX_ERR("%s: invalid channel id %d\n",
Eric Holmbergf9622662012-06-13 15:55:45 -06001824 __func__, pkt->hdr.lcid);
1825 break;
1826 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001827 ret = smux_handle_rx_open_cmd(pkt);
1828 break;
1829
1830 case SMUX_CMD_DATA:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001831 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001832 if (smux_assert_lch_id(pkt->hdr.lcid)) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001833 SMUX_ERR("%s: invalid channel id %d\n",
Eric Holmbergf9622662012-06-13 15:55:45 -06001834 __func__, pkt->hdr.lcid);
1835 break;
1836 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001837 ret = smux_handle_rx_data_cmd(pkt);
1838 break;
1839
1840 case SMUX_CMD_CLOSE_LCH:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001841 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001842 if (smux_assert_lch_id(pkt->hdr.lcid)) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001843 SMUX_ERR("%s: invalid channel id %d\n",
Eric Holmbergf9622662012-06-13 15:55:45 -06001844 __func__, pkt->hdr.lcid);
1845 break;
1846 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001847 ret = smux_handle_rx_close_cmd(pkt);
1848 break;
1849
1850 case SMUX_CMD_STATUS:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001851 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001852 if (smux_assert_lch_id(pkt->hdr.lcid)) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001853 SMUX_ERR("%s: invalid channel id %d\n",
Eric Holmbergf9622662012-06-13 15:55:45 -06001854 __func__, pkt->hdr.lcid);
1855 break;
1856 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001857 ret = smux_handle_rx_status_cmd(pkt);
1858 break;
1859
1860 case SMUX_CMD_PWR_CTL:
1861 ret = smux_handle_rx_power_cmd(pkt);
1862 break;
1863
1864 case SMUX_CMD_BYTE:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001865 SMUX_LOG_PKT_RX(pkt);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001866 ret = smux_handle_rx_byte_cmd(pkt);
1867 break;
1868
1869 default:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001870 SMUX_LOG_PKT_RX(pkt);
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001871 SMUX_ERR("%s: command %d unknown\n", __func__, pkt->hdr.cmd);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001872 ret = -EINVAL;
1873 }
1874 return ret;
1875}
1876
1877/**
1878 * Deserializes a packet and dispatches it to the packet receive logic.
1879 *
1880 * @data Raw data for one packet
1881 * @len Length of the data
1882 *
1883 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001884 */
1885static int smux_deserialize(unsigned char *data, int len)
1886{
1887 struct smux_pkt_t recv;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001888
1889 smux_init_pkt(&recv);
1890
1891 /*
1892 * It may be possible to optimize this to not use the
1893 * temporary buffer.
1894 */
1895 memcpy(&recv.hdr, data, sizeof(struct smux_hdr_t));
1896
1897 if (recv.hdr.magic != SMUX_MAGIC) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001898 SMUX_ERR("%s: invalid header magic\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001899 return -EINVAL;
1900 }
1901
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001902 if (recv.hdr.payload_len)
1903 recv.payload = data + sizeof(struct smux_hdr_t);
1904
1905 return smux_dispatch_rx_pkt(&recv);
1906}
1907
1908/**
1909 * Handle wakeup request byte.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001910 */
1911static void smux_handle_wakeup_req(void)
1912{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001913 unsigned long flags;
1914
1915 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001916 if (smux.power_state == SMUX_PWR_OFF
1917 || smux.power_state == SMUX_PWR_TURNING_ON) {
1918 /* wakeup system */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301919 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001920 smux.power_state, SMUX_PWR_ON);
Eric Holmberg371b4622013-05-21 18:04:50 -06001921 smux.remote_initiated_wakeup_count++;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001922 smux.power_state = SMUX_PWR_ON;
1923 queue_work(smux_tx_wq, &smux_wakeup_work);
1924 queue_work(smux_tx_wq, &smux_tx_work);
1925 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1926 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1927 smux_send_byte(SMUX_WAKEUP_ACK);
Eric Holmberga9b06472012-06-22 09:46:34 -06001928 } else if (smux.power_state == SMUX_PWR_ON) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001929 smux_send_byte(SMUX_WAKEUP_ACK);
Eric Holmberga9b06472012-06-22 09:46:34 -06001930 } else {
1931 /* stale wakeup request from previous wakeup */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301932 SMUX_PWR("smux: %s: stale Wakeup REQ in state %d\n",
Eric Holmberga9b06472012-06-22 09:46:34 -06001933 __func__, smux.power_state);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001934 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001935 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001936}
1937
1938/**
1939 * Handle wakeup request ack.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001940 */
1941static void smux_handle_wakeup_ack(void)
1942{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001943 unsigned long flags;
1944
1945 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001946 if (smux.power_state == SMUX_PWR_TURNING_ON) {
1947 /* received response to wakeup request */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301948 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001949 smux.power_state, SMUX_PWR_ON);
1950 smux.power_state = SMUX_PWR_ON;
1951 queue_work(smux_tx_wq, &smux_tx_work);
1952 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1953 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1954
1955 } else if (smux.power_state != SMUX_PWR_ON) {
1956 /* invalid message */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301957 SMUX_PWR("smux: %s: stale Wakeup REQ ACK in state %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001958 __func__, smux.power_state);
1959 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001960 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001961}
1962
1963/**
1964 * RX State machine - IDLE state processing.
1965 *
1966 * @data New RX data to process
1967 * @len Length of the data
1968 * @used Return value of length processed
1969 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001970 */
1971static void smux_rx_handle_idle(const unsigned char *data,
1972 int len, int *used, int flag)
1973{
1974 int i;
1975
1976 if (flag) {
1977 if (smux_byte_loopback)
1978 smux_receive_byte(SMUX_UT_ECHO_ACK_FAIL,
1979 smux_byte_loopback);
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001980 SMUX_ERR("%s: TTY error 0x%x - ignoring\n", __func__, flag);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001981 ++*used;
1982 return;
1983 }
1984
1985 for (i = *used; i < len && smux.rx_state == SMUX_RX_IDLE; i++) {
1986 switch (data[i]) {
1987 case SMUX_MAGIC_WORD1:
1988 smux.rx_state = SMUX_RX_MAGIC;
1989 break;
1990 case SMUX_WAKEUP_REQ:
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301991 SMUX_PWR("smux: smux: RX Wakeup REQ\n");
Eric Holmberg0a81e8f2012-08-28 13:51:14 -06001992 if (unlikely(!smux.remote_is_alive)) {
1993 mutex_lock(&smux.mutex_lha0);
1994 smux.remote_is_alive = 1;
1995 mutex_unlock(&smux.mutex_lha0);
1996 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001997 smux_handle_wakeup_req();
1998 break;
1999 case SMUX_WAKEUP_ACK:
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302000 SMUX_PWR("smux: smux: RX Wakeup ACK\n");
Eric Holmberg0a81e8f2012-08-28 13:51:14 -06002001 if (unlikely(!smux.remote_is_alive)) {
2002 mutex_lock(&smux.mutex_lha0);
2003 smux.remote_is_alive = 1;
2004 mutex_unlock(&smux.mutex_lha0);
2005 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002006 smux_handle_wakeup_ack();
2007 break;
2008 default:
2009 /* unexpected character */
2010 if (smux_byte_loopback && data[i] == SMUX_UT_ECHO_REQ)
2011 smux_receive_byte(SMUX_UT_ECHO_ACK_OK,
2012 smux_byte_loopback);
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002013 SMUX_ERR("%s: parse error 0x%02x - ignoring\n",
2014 __func__, (unsigned)data[i]);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002015 break;
2016 }
2017 }
2018
2019 *used = i;
2020}
2021
2022/**
2023 * RX State machine - Header Magic state processing.
2024 *
2025 * @data New RX data to process
2026 * @len Length of the data
2027 * @used Return value of length processed
2028 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002029 */
2030static void smux_rx_handle_magic(const unsigned char *data,
2031 int len, int *used, int flag)
2032{
2033 int i;
2034
2035 if (flag) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002036 SMUX_ERR("%s: TTY RX error %d\n", __func__, flag);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002037 smux_enter_reset();
2038 smux.rx_state = SMUX_RX_FAILURE;
2039 ++*used;
2040 return;
2041 }
2042
2043 for (i = *used; i < len && smux.rx_state == SMUX_RX_MAGIC; i++) {
2044 /* wait for completion of the magic */
2045 if (data[i] == SMUX_MAGIC_WORD2) {
2046 smux.recv_len = 0;
2047 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD1;
2048 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD2;
2049 smux.rx_state = SMUX_RX_HDR;
2050 } else {
2051 /* unexpected / trash character */
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002052 SMUX_ERR(
2053 "%s: rx parse error for char %c; *used=%d, len=%d\n",
2054 __func__, data[i], *used, len);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002055 smux.rx_state = SMUX_RX_IDLE;
2056 }
2057 }
2058
2059 *used = i;
2060}
2061
2062/**
2063 * RX State machine - Packet Header state processing.
2064 *
2065 * @data New RX data to process
2066 * @len Length of the data
2067 * @used Return value of length processed
2068 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002069 */
2070static void smux_rx_handle_hdr(const unsigned char *data,
2071 int len, int *used, int flag)
2072{
2073 int i;
2074 struct smux_hdr_t *hdr;
2075
2076 if (flag) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002077 SMUX_ERR("%s: TTY RX error %d\n", __func__, flag);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002078 smux_enter_reset();
2079 smux.rx_state = SMUX_RX_FAILURE;
2080 ++*used;
2081 return;
2082 }
2083
2084 for (i = *used; i < len && smux.rx_state == SMUX_RX_HDR; i++) {
2085 smux.recv_buf[smux.recv_len++] = data[i];
2086
2087 if (smux.recv_len == sizeof(struct smux_hdr_t)) {
2088 /* complete header received */
2089 hdr = (struct smux_hdr_t *)smux.recv_buf;
2090 smux.pkt_remain = hdr->payload_len + hdr->pad_len;
2091 smux.rx_state = SMUX_RX_PAYLOAD;
2092 }
2093 }
2094 *used = i;
2095}
2096
2097/**
2098 * RX State machine - Packet Payload state processing.
2099 *
2100 * @data New RX data to process
2101 * @len Length of the data
2102 * @used Return value of length processed
2103 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002104 */
2105static void smux_rx_handle_pkt_payload(const unsigned char *data,
2106 int len, int *used, int flag)
2107{
2108 int remaining;
2109
2110 if (flag) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002111 SMUX_ERR("%s: TTY RX error %d\n", __func__, flag);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002112 smux_enter_reset();
2113 smux.rx_state = SMUX_RX_FAILURE;
2114 ++*used;
2115 return;
2116 }
2117
2118 /* copy data into rx buffer */
2119 if (smux.pkt_remain < (len - *used))
2120 remaining = smux.pkt_remain;
2121 else
2122 remaining = len - *used;
2123
2124 memcpy(&smux.recv_buf[smux.recv_len], &data[*used], remaining);
2125 smux.recv_len += remaining;
2126 smux.pkt_remain -= remaining;
2127 *used += remaining;
2128
2129 if (smux.pkt_remain == 0) {
2130 /* complete packet received */
2131 smux_deserialize(smux.recv_buf, smux.recv_len);
2132 smux.rx_state = SMUX_RX_IDLE;
2133 }
2134}
2135
2136/**
2137 * Feed data to the receive state machine.
2138 *
2139 * @data Pointer to data block
2140 * @len Length of data
2141 * @flag TTY_NORMAL (0) for no error, otherwise TTY Error Flag
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002142 */
2143void smux_rx_state_machine(const unsigned char *data,
2144 int len, int flag)
2145{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002146 struct smux_rx_worker_data work;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002147
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002148 work.data = data;
2149 work.len = len;
2150 work.flag = flag;
2151 INIT_WORK_ONSTACK(&work.work, smux_rx_worker);
2152 work.work_complete = COMPLETION_INITIALIZER_ONSTACK(work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002153
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002154 queue_work(smux_rx_wq, &work.work);
2155 wait_for_completion(&work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002156}
2157
2158/**
Eric Holmberg0a81e8f2012-08-28 13:51:14 -06002159 * Returns true if the remote side has acknowledged a wakeup
2160 * request previously, so we know that the link is alive and active.
2161 *
2162 * @returns true for is alive, false for not alive
2163 */
2164bool smux_remote_is_active(void)
2165{
2166 bool is_active = false;
2167
2168 mutex_lock(&smux.mutex_lha0);
2169 if (smux.remote_is_alive)
2170 is_active = true;
2171 mutex_unlock(&smux.mutex_lha0);
2172
2173 return is_active;
2174}
2175
2176/**
Eric Holmberg371b4622013-05-21 18:04:50 -06002177 * Sends a delay command to the remote side.
2178 *
2179 * @ms: Time in milliseconds for the remote side to delay
2180 *
2181 * This command defines the delay that the remote side will use
2182 * to slow the response time for DATA commands.
2183 */
2184void smux_set_loopback_data_reply_delay(uint32_t ms)
2185{
2186 struct smux_lch_t *ch = &smux_lch[SMUX_TEST_LCID];
2187 struct smux_pkt_t *pkt;
2188
2189 pkt = smux_alloc_pkt();
2190 if (!pkt) {
2191 pr_err("%s: unable to allocate packet\n", __func__);
2192 return;
2193 }
2194
2195 pkt->hdr.lcid = ch->lcid;
2196 pkt->hdr.cmd = SMUX_CMD_DELAY;
2197 pkt->hdr.flags = 0;
2198 pkt->hdr.payload_len = sizeof(uint32_t);
2199 pkt->hdr.pad_len = 0;
2200
2201 if (smux_alloc_pkt_payload(pkt)) {
2202 pr_err("%s: unable to allocate payload\n", __func__);
2203 smux_free_pkt(pkt);
2204 return;
2205 }
2206 memcpy(pkt->payload, &ms, sizeof(uint32_t));
2207
2208 smux_tx_queue(pkt, ch, 1);
2209}
2210
2211/**
2212 * Retrieve wakeup counts.
2213 *
2214 * @local_cnt: Pointer to local wakeup count
2215 * @remote_cnt: Pointer to remote wakeup count
2216 */
2217void smux_get_wakeup_counts(int *local_cnt, int *remote_cnt)
2218{
2219 unsigned long flags;
2220
2221 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2222
2223 if (local_cnt)
2224 *local_cnt = smux.local_initiated_wakeup_count;
2225
2226 if (remote_cnt)
2227 *remote_cnt = smux.remote_initiated_wakeup_count;
2228
2229 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2230}
2231
2232/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002233 * Add channel to transmit-ready list and trigger transmit worker.
2234 *
2235 * @ch Channel to add
2236 */
2237static void list_channel(struct smux_lch_t *ch)
2238{
2239 unsigned long flags;
2240
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302241 SMUX_DBG("smux: %s: listing channel %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002242 __func__, ch->lcid);
2243
2244 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2245 spin_lock(&ch->tx_lock_lhb2);
2246 smux.tx_activity_flag = 1;
2247 if (list_empty(&ch->tx_ready_list))
2248 list_add_tail(&ch->tx_ready_list, &smux.lch_tx_ready_list);
2249 spin_unlock(&ch->tx_lock_lhb2);
2250 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2251
2252 queue_work(smux_tx_wq, &smux_tx_work);
2253}
2254
2255/**
2256 * Transmit packet on correct transport and then perform client
2257 * notification.
2258 *
2259 * @ch Channel to transmit on
2260 * @pkt Packet to transmit
2261 */
2262static void smux_tx_pkt(struct smux_lch_t *ch, struct smux_pkt_t *pkt)
2263{
2264 union notifier_metadata meta_write;
2265 int ret;
2266
2267 if (ch && pkt) {
2268 SMUX_LOG_PKT_TX(pkt);
2269 if (ch->local_mode == SMUX_LCH_MODE_LOCAL_LOOPBACK)
2270 ret = smux_tx_loopback(pkt);
2271 else
2272 ret = smux_tx_tty(pkt);
2273
2274 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2275 /* notify write-done */
2276 meta_write.write.pkt_priv = pkt->priv;
2277 meta_write.write.buffer = pkt->payload;
2278 meta_write.write.len = pkt->hdr.payload_len;
2279 if (ret >= 0) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302280 SMUX_DBG("smux: %s: PKT write done", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002281 schedule_notify(ch->lcid, SMUX_WRITE_DONE,
2282 &meta_write);
2283 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002284 SMUX_ERR("%s: failed to write pkt %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002285 __func__, ret);
2286 schedule_notify(ch->lcid, SMUX_WRITE_FAIL,
2287 &meta_write);
2288 }
2289 }
2290 }
2291}
2292
2293/**
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002294 * Flush pending TTY TX data.
2295 */
2296static void smux_flush_tty(void)
2297{
Eric Holmberg92a67df2012-06-25 13:56:24 -06002298 mutex_lock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002299 if (!smux.tty) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002300 SMUX_ERR("%s: ldisc not loaded\n", __func__);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002301 mutex_unlock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002302 return;
2303 }
2304
2305 tty_wait_until_sent(smux.tty,
2306 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
2307
2308 if (tty_chars_in_buffer(smux.tty) > 0)
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002309 SMUX_ERR("%s: unable to flush UART queue\n", __func__);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002310
2311 mutex_unlock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002312}
2313
2314/**
Eric Holmberged1f00c2012-06-07 09:45:18 -06002315 * Purge TX queue for logical channel.
2316 *
2317 * @ch Logical channel pointer
Eric Holmberg0e914082012-07-11 11:46:28 -06002318 * @is_ssr 1 = this is a subsystem restart purge
Eric Holmberged1f00c2012-06-07 09:45:18 -06002319 *
2320 * Must be called with the following spinlocks locked:
2321 * state_lock_lhb1
2322 * tx_lock_lhb2
2323 */
Eric Holmberg0e914082012-07-11 11:46:28 -06002324static void smux_purge_ch_tx_queue(struct smux_lch_t *ch, int is_ssr)
Eric Holmberged1f00c2012-06-07 09:45:18 -06002325{
2326 struct smux_pkt_t *pkt;
2327 int send_disconnect = 0;
Eric Holmberg0e914082012-07-11 11:46:28 -06002328 struct smux_pkt_t *pkt_tmp;
2329 int is_state_pkt;
Eric Holmberged1f00c2012-06-07 09:45:18 -06002330
Eric Holmberg0e914082012-07-11 11:46:28 -06002331 list_for_each_entry_safe(pkt, pkt_tmp, &ch->tx_queue, list) {
2332 is_state_pkt = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06002333 if (pkt->hdr.cmd == SMUX_CMD_OPEN_LCH) {
Eric Holmberg0e914082012-07-11 11:46:28 -06002334 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK) {
2335 /* Open ACK must still be sent */
2336 is_state_pkt = 1;
2337 } else {
2338 /* Open never sent -- force to closed state */
2339 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
2340 send_disconnect = 1;
2341 }
2342 } else if (pkt->hdr.cmd == SMUX_CMD_CLOSE_LCH) {
2343 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
2344 is_state_pkt = 1;
2345 if (!send_disconnect)
2346 is_state_pkt = 1;
Eric Holmberged1f00c2012-06-07 09:45:18 -06002347 } else if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2348 /* Notify client of failed write */
2349 union notifier_metadata meta_write;
2350
2351 meta_write.write.pkt_priv = pkt->priv;
2352 meta_write.write.buffer = pkt->payload;
2353 meta_write.write.len = pkt->hdr.payload_len;
2354 schedule_notify(ch->lcid, SMUX_WRITE_FAIL, &meta_write);
2355 }
Eric Holmberg0e914082012-07-11 11:46:28 -06002356
2357 if (!is_state_pkt || is_ssr) {
2358 list_del(&pkt->list);
2359 smux_free_pkt(pkt);
2360 }
Eric Holmberged1f00c2012-06-07 09:45:18 -06002361 }
2362
2363 if (send_disconnect) {
2364 union notifier_metadata meta_disconnected;
2365
2366 meta_disconnected.disconnected.is_ssr = smux.in_reset;
2367 schedule_notify(ch->lcid, SMUX_DISCONNECTED,
2368 &meta_disconnected);
2369 }
2370}
2371
2372/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002373 * Power-up the UART.
Eric Holmberg92a67df2012-06-25 13:56:24 -06002374 *
2375 * Must be called with smux.mutex_lha0 already locked.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002376 */
Eric Holmberg92a67df2012-06-25 13:56:24 -06002377static void smux_uart_power_on_atomic(void)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002378{
2379 struct uart_state *state;
2380
2381 if (!smux.tty || !smux.tty->driver_data) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002382 SMUX_ERR("%s: unable to find UART port for tty %p\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002383 __func__, smux.tty);
2384 return;
2385 }
2386 state = smux.tty->driver_data;
2387 msm_hs_request_clock_on(state->uart_port);
2388}
2389
2390/**
Eric Holmberg92a67df2012-06-25 13:56:24 -06002391 * Power-up the UART.
2392 */
2393static void smux_uart_power_on(void)
2394{
2395 mutex_lock(&smux.mutex_lha0);
2396 smux_uart_power_on_atomic();
2397 mutex_unlock(&smux.mutex_lha0);
2398}
2399
2400/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002401 * Power down the UART.
Eric Holmberg06011322012-07-06 18:17:03 -06002402 *
2403 * Must be called with mutex_lha0 locked.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002404 */
Eric Holmberg06011322012-07-06 18:17:03 -06002405static void smux_uart_power_off_atomic(void)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002406{
2407 struct uart_state *state;
2408
2409 if (!smux.tty || !smux.tty->driver_data) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002410 SMUX_ERR("%s: unable to find UART port for tty %p\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002411 __func__, smux.tty);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002412 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002413 return;
2414 }
2415 state = smux.tty->driver_data;
2416 msm_hs_request_clock_off(state->uart_port);
Eric Holmberg06011322012-07-06 18:17:03 -06002417}
2418
2419/**
2420 * Power down the UART.
2421 */
2422static void smux_uart_power_off(void)
2423{
2424 mutex_lock(&smux.mutex_lha0);
2425 smux_uart_power_off_atomic();
Eric Holmberg92a67df2012-06-25 13:56:24 -06002426 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002427}
2428
2429/**
2430 * TX Wakeup Worker
2431 *
2432 * @work Not used
2433 *
2434 * Do an exponential back-off wakeup sequence with a maximum period
2435 * of approximately 1 second (1 << 20 microseconds).
2436 */
2437static void smux_wakeup_worker(struct work_struct *work)
2438{
2439 unsigned long flags;
2440 unsigned wakeup_delay;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002441
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002442 if (smux.in_reset)
2443 return;
2444
2445 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2446 if (smux.power_state == SMUX_PWR_ON) {
2447 /* wakeup complete */
Eric Holmberga9b06472012-06-22 09:46:34 -06002448 smux.pwr_wakeup_delay_us = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002449 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302450 SMUX_DBG("smux: %s: wakeup complete\n", __func__);
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002451
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002452 /*
2453 * Cancel any pending retry. This avoids a race condition with
2454 * a new power-up request because:
2455 * 1) this worker doesn't modify the state
2456 * 2) this worker is processed on the same single-threaded
2457 * workqueue as new TX wakeup requests
2458 */
2459 cancel_delayed_work(&smux_wakeup_delayed_work);
Eric Holmbergd032f5b2012-06-29 19:02:00 -06002460 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberga9b06472012-06-22 09:46:34 -06002461 } else if (smux.power_state == SMUX_PWR_TURNING_ON) {
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002462 /* retry wakeup */
2463 wakeup_delay = smux.pwr_wakeup_delay_us;
2464 smux.pwr_wakeup_delay_us <<= 1;
2465 if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
2466 smux.pwr_wakeup_delay_us =
2467 SMUX_WAKEUP_DELAY_MAX;
2468
2469 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302470 SMUX_PWR("smux: %s: triggering wakeup\n", __func__);
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002471 smux_send_byte(SMUX_WAKEUP_REQ);
2472
2473 if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302474 SMUX_DBG("smux: %s: sleeping for %u us\n", __func__,
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002475 wakeup_delay);
2476 usleep_range(wakeup_delay, 2*wakeup_delay);
2477 queue_work(smux_tx_wq, &smux_wakeup_work);
2478 } else {
2479 /* schedule delayed work */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302480 SMUX_DBG(
2481 "smux: %s: scheduling delayed wakeup in %u ms\n",
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002482 __func__, wakeup_delay / 1000);
2483 queue_delayed_work(smux_tx_wq,
2484 &smux_wakeup_delayed_work,
2485 msecs_to_jiffies(wakeup_delay / 1000));
2486 }
Eric Holmberga9b06472012-06-22 09:46:34 -06002487 } else {
2488 /* wakeup aborted */
2489 smux.pwr_wakeup_delay_us = 1;
2490 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302491 SMUX_PWR("smux: %s: wakeup aborted\n", __func__);
Eric Holmberga9b06472012-06-22 09:46:34 -06002492 cancel_delayed_work(&smux_wakeup_delayed_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002493 }
2494}
2495
2496
2497/**
2498 * Inactivity timeout worker. Periodically scheduled when link is active.
2499 * When it detects inactivity, it will power-down the UART link.
2500 *
2501 * @work Work structure (not used)
2502 */
2503static void smux_inactivity_worker(struct work_struct *work)
2504{
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002505 struct smux_pkt_t *pkt;
2506 unsigned long flags;
2507
Eric Holmberg06011322012-07-06 18:17:03 -06002508 if (smux.in_reset)
2509 return;
2510
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002511 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2512 spin_lock(&smux.tx_lock_lha2);
2513
2514 if (!smux.tx_activity_flag && !smux.rx_activity_flag) {
2515 /* no activity */
2516 if (smux.powerdown_enabled) {
2517 if (smux.power_state == SMUX_PWR_ON) {
2518 /* start power-down sequence */
2519 pkt = smux_alloc_pkt();
2520 if (pkt) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302521 SMUX_PWR(
2522 "smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002523 smux.power_state,
Eric Holmberga9b06472012-06-22 09:46:34 -06002524 SMUX_PWR_TURNING_OFF_FLUSH);
2525 smux.power_state =
2526 SMUX_PWR_TURNING_OFF_FLUSH;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002527
2528 /* send power-down request */
2529 pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
2530 pkt->hdr.flags = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002531 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
2532 list_add_tail(&pkt->list,
2533 &smux.power_queue);
2534 queue_work(smux_tx_wq, &smux_tx_work);
2535 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002536 SMUX_ERR("%s: packet alloc failed\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002537 __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002538 }
2539 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002540 }
2541 }
2542 smux.tx_activity_flag = 0;
2543 smux.rx_activity_flag = 0;
2544
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002545 if (smux.power_state == SMUX_PWR_OFF_FLUSH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002546 /* ready to power-down the UART */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302547 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002548 smux.power_state, SMUX_PWR_OFF);
Eric Holmbergff0b0112012-06-08 15:06:57 -06002549 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002550
2551 /* if data is pending, schedule a new wakeup */
2552 if (!list_empty(&smux.lch_tx_ready_list) ||
2553 !list_empty(&smux.power_queue))
2554 queue_work(smux_tx_wq, &smux_tx_work);
2555
2556 spin_unlock(&smux.tx_lock_lha2);
2557 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2558
2559 /* flush UART output queue and power down */
2560 smux_flush_tty();
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002561 smux_uart_power_off();
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002562 } else {
2563 spin_unlock(&smux.tx_lock_lha2);
2564 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002565 }
2566
2567 /* reschedule inactivity worker */
2568 if (smux.power_state != SMUX_PWR_OFF)
2569 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
2570 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
2571}
2572
2573/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002574 * Remove RX retry packet from channel and free it.
2575 *
Eric Holmbergb8435c82012-06-05 14:51:29 -06002576 * @ch Channel for retry packet
2577 * @retry Retry packet to remove
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002578 *
2579 * @returns 1 if flow control updated; 0 otherwise
2580 *
2581 * Must be called with state_lock_lhb1 locked.
Eric Holmbergb8435c82012-06-05 14:51:29 -06002582 */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002583int smux_remove_rx_retry(struct smux_lch_t *ch,
Eric Holmbergb8435c82012-06-05 14:51:29 -06002584 struct smux_rx_pkt_retry *retry)
2585{
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002586 int tx_ready = 0;
2587
Eric Holmbergb8435c82012-06-05 14:51:29 -06002588 list_del(&retry->rx_retry_list);
2589 --ch->rx_retry_queue_cnt;
2590 smux_free_pkt(retry->pkt);
2591 kfree(retry);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002592
2593 if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
2594 (ch->rx_retry_queue_cnt <= SMUX_RX_WM_LOW) &&
2595 ch->rx_flow_control_auto) {
2596 ch->rx_flow_control_auto = 0;
2597 smux_rx_flow_control_updated(ch);
2598 schedule_notify(ch->lcid, SMUX_RX_RETRY_LOW_WM_HIT, NULL);
2599 tx_ready = 1;
2600 }
2601 return tx_ready;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002602}
2603
2604/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002605 * RX worker handles all receive operations.
2606 *
2607 * @work Work structure contained in TBD structure
2608 */
2609static void smux_rx_worker(struct work_struct *work)
2610{
2611 unsigned long flags;
2612 int used;
2613 int initial_rx_state;
2614 struct smux_rx_worker_data *w;
2615 const unsigned char *data;
2616 int len;
2617 int flag;
2618
2619 w = container_of(work, struct smux_rx_worker_data, work);
2620 data = w->data;
2621 len = w->len;
2622 flag = w->flag;
2623
2624 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2625 smux.rx_activity_flag = 1;
2626 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2627
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302628 SMUX_DBG("smux: %s: %p, len=%d, flag=%d\n", __func__, data, len, flag);
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002629 used = 0;
2630 do {
Eric Holmberg06011322012-07-06 18:17:03 -06002631 if (smux.in_reset) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302632 SMUX_DBG("smux: %s: abort RX due to reset\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06002633 smux.rx_state = SMUX_RX_IDLE;
2634 break;
2635 }
2636
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302637 SMUX_DBG("smux: %s: state %d; %d of %d\n",
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002638 __func__, smux.rx_state, used, len);
2639 initial_rx_state = smux.rx_state;
2640
2641 switch (smux.rx_state) {
2642 case SMUX_RX_IDLE:
2643 smux_rx_handle_idle(data, len, &used, flag);
2644 break;
2645 case SMUX_RX_MAGIC:
2646 smux_rx_handle_magic(data, len, &used, flag);
2647 break;
2648 case SMUX_RX_HDR:
2649 smux_rx_handle_hdr(data, len, &used, flag);
2650 break;
2651 case SMUX_RX_PAYLOAD:
2652 smux_rx_handle_pkt_payload(data, len, &used, flag);
2653 break;
2654 default:
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302655 SMUX_DBG("smux: %s: invalid state %d\n",
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002656 __func__, smux.rx_state);
2657 smux.rx_state = SMUX_RX_IDLE;
2658 break;
2659 }
2660 } while (used < len || smux.rx_state != initial_rx_state);
2661
2662 complete(&w->work_complete);
2663}
2664
2665/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002666 * RX Retry worker handles retrying get_rx_buffer calls that previously failed
2667 * because the client was not ready (-EAGAIN).
2668 *
2669 * @work Work structure contained in smux_lch_t structure
2670 */
2671static void smux_rx_retry_worker(struct work_struct *work)
2672{
2673 struct smux_lch_t *ch;
2674 struct smux_rx_pkt_retry *retry;
2675 union notifier_metadata metadata;
2676 int tmp;
2677 unsigned long flags;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002678 int immediate_retry = 0;
2679 int tx_ready = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002680
2681 ch = container_of(work, struct smux_lch_t, rx_retry_work.work);
2682
2683 /* get next retry packet */
2684 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg06011322012-07-06 18:17:03 -06002685 if ((ch->local_state != SMUX_LCH_LOCAL_OPENED) || smux.in_reset) {
Eric Holmbergb8435c82012-06-05 14:51:29 -06002686 /* port has been closed - remove all retries */
2687 while (!list_empty(&ch->rx_retry_queue)) {
2688 retry = list_first_entry(&ch->rx_retry_queue,
2689 struct smux_rx_pkt_retry,
2690 rx_retry_list);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002691 (void)smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002692 }
2693 }
2694
2695 if (list_empty(&ch->rx_retry_queue)) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302696 SMUX_DBG("smux: %s: retry list empty for channel %d\n",
Eric Holmbergb8435c82012-06-05 14:51:29 -06002697 __func__, ch->lcid);
2698 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2699 return;
2700 }
2701 retry = list_first_entry(&ch->rx_retry_queue,
2702 struct smux_rx_pkt_retry,
2703 rx_retry_list);
2704 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2705
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302706 SMUX_DBG("smux: %s: ch %d retrying rx pkt %p\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002707 __func__, ch->lcid, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002708 metadata.read.pkt_priv = 0;
2709 metadata.read.buffer = 0;
2710 tmp = ch->get_rx_buffer(ch->priv,
2711 (void **)&metadata.read.pkt_priv,
2712 (void **)&metadata.read.buffer,
2713 retry->pkt->hdr.payload_len);
2714 if (tmp == 0 && metadata.read.buffer) {
2715 /* have valid RX buffer */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002716
Eric Holmbergb8435c82012-06-05 14:51:29 -06002717 memcpy(metadata.read.buffer, retry->pkt->payload,
2718 retry->pkt->hdr.payload_len);
2719 metadata.read.len = retry->pkt->hdr.payload_len;
2720
2721 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002722 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002723 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002724 schedule_notify(ch->lcid, SMUX_READ_DONE, &metadata);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002725 if (tx_ready)
2726 list_channel(ch);
2727
2728 immediate_retry = 1;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002729 } else if (tmp == -EAGAIN ||
2730 (tmp == 0 && !metadata.read.buffer)) {
2731 /* retry again */
2732 retry->timeout_in_ms <<= 1;
2733 if (retry->timeout_in_ms > SMUX_RX_RETRY_MAX_MS) {
2734 /* timed out */
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002735 SMUX_ERR("%s: ch %d RX retry client timeout\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002736 __func__, ch->lcid);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002737 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002738 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002739 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002740 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
2741 if (tx_ready)
2742 list_channel(ch);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002743 }
2744 } else {
2745 /* client error - drop packet */
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002746 SMUX_ERR("%s: ch %d RX retry client failed (%d)\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002747 __func__, ch->lcid, tmp);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002748 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002749 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002750 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002751 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002752 if (tx_ready)
2753 list_channel(ch);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002754 }
2755
2756 /* schedule next retry */
2757 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2758 if (!list_empty(&ch->rx_retry_queue)) {
2759 retry = list_first_entry(&ch->rx_retry_queue,
2760 struct smux_rx_pkt_retry,
2761 rx_retry_list);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002762
2763 if (immediate_retry)
2764 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
2765 else
2766 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
2767 msecs_to_jiffies(retry->timeout_in_ms));
Eric Holmbergb8435c82012-06-05 14:51:29 -06002768 }
2769 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2770}
2771
2772/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002773 * Transmit worker handles serializing and transmitting packets onto the
2774 * underlying transport.
2775 *
2776 * @work Work structure (not used)
2777 */
2778static void smux_tx_worker(struct work_struct *work)
2779{
2780 struct smux_pkt_t *pkt;
2781 struct smux_lch_t *ch;
2782 unsigned low_wm_notif;
2783 unsigned lcid;
2784 unsigned long flags;
2785
2786
2787 /*
2788 * Transmit packets in round-robin fashion based upon ready
2789 * channels.
2790 *
2791 * To eliminate the need to hold a lock for the entire
2792 * iteration through the channel ready list, the head of the
2793 * ready-channel list is always the next channel to be
2794 * processed. To send a packet, the first valid packet in
2795 * the head channel is removed and the head channel is then
2796 * rescheduled at the end of the queue by removing it and
2797 * inserting after the tail. The locks can then be released
2798 * while the packet is processed.
2799 */
Eric Holmberged1f00c2012-06-07 09:45:18 -06002800 while (!smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002801 pkt = NULL;
2802 low_wm_notif = 0;
2803
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002804 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002805
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002806 /* handle wakeup if needed */
2807 if (smux.power_state == SMUX_PWR_OFF) {
2808 if (!list_empty(&smux.lch_tx_ready_list) ||
2809 !list_empty(&smux.power_queue)) {
2810 /* data to transmit, do wakeup */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302811 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002812 smux.power_state,
2813 SMUX_PWR_TURNING_ON);
Eric Holmberg371b4622013-05-21 18:04:50 -06002814 smux.local_initiated_wakeup_count++;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002815 smux.power_state = SMUX_PWR_TURNING_ON;
2816 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2817 flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002818 queue_work(smux_tx_wq, &smux_wakeup_work);
2819 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002820 /* no activity -- stay asleep */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002821 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2822 flags);
2823 }
2824 break;
2825 }
2826
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002827 /* process any pending power packets */
2828 if (!list_empty(&smux.power_queue)) {
2829 pkt = list_first_entry(&smux.power_queue,
2830 struct smux_pkt_t, list);
2831 list_del(&pkt->list);
2832 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2833
Eric Holmberga9b06472012-06-22 09:46:34 -06002834 /* Adjust power state if this is a flush command */
2835 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2836 if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH &&
2837 pkt->hdr.cmd == SMUX_CMD_PWR_CTL) {
2838 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK ||
2839 smux.power_ctl_remote_req_received) {
2840 /*
2841 * Sending remote power-down request ACK
2842 * or sending local power-down request
2843 * and we already received a remote
2844 * power-down request.
2845 */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302846 SMUX_PWR(
2847 "smux: %s: Power %d->%d\n", __func__,
Eric Holmberga9b06472012-06-22 09:46:34 -06002848 smux.power_state,
2849 SMUX_PWR_OFF_FLUSH);
2850 smux.power_state = SMUX_PWR_OFF_FLUSH;
2851 smux.power_ctl_remote_req_received = 0;
2852 queue_work(smux_tx_wq,
2853 &smux_inactivity_work);
2854 } else {
2855 /* sending local power-down request */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302856 SMUX_PWR(
2857 "smux: %s: Power %d->%d\n", __func__,
Eric Holmberga9b06472012-06-22 09:46:34 -06002858 smux.power_state,
2859 SMUX_PWR_TURNING_OFF);
2860 smux.power_state = SMUX_PWR_TURNING_OFF;
2861 }
2862 }
2863 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2864
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002865 /* send the packet */
Eric Holmberga9b06472012-06-22 09:46:34 -06002866 smux_uart_power_on();
2867 smux.tx_activity_flag = 1;
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06002868 SMUX_PWR_PKT_TX(pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002869 if (!smux_byte_loopback) {
2870 smux_tx_tty(pkt);
2871 smux_flush_tty();
2872 } else {
2873 smux_tx_loopback(pkt);
2874 }
2875
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002876 smux_free_pkt(pkt);
2877 continue;
2878 }
2879
2880 /* get the next ready channel */
2881 if (list_empty(&smux.lch_tx_ready_list)) {
2882 /* no ready channels */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302883 SMUX_DBG("smux: %s: no more ready channels, exiting\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002884 __func__);
2885 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2886 break;
2887 }
2888 smux.tx_activity_flag = 1;
2889
2890 if (smux.power_state != SMUX_PWR_ON) {
2891 /* channel not ready to transmit */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302892 SMUX_DBG("smux: %s: waiting for link up (state %d)\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002893 __func__,
2894 smux.power_state);
2895 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2896 break;
2897 }
2898
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002899 /* get the next packet to send and rotate channel list */
2900 ch = list_first_entry(&smux.lch_tx_ready_list,
2901 struct smux_lch_t,
2902 tx_ready_list);
2903
2904 spin_lock(&ch->state_lock_lhb1);
2905 spin_lock(&ch->tx_lock_lhb2);
2906 if (!list_empty(&ch->tx_queue)) {
2907 /*
2908 * If remote TX flow control is enabled or
2909 * the channel is not fully opened, then only
2910 * send command packets.
2911 */
2912 if (ch->tx_flow_control || !IS_FULLY_OPENED(ch)) {
2913 struct smux_pkt_t *curr;
2914 list_for_each_entry(curr, &ch->tx_queue, list) {
2915 if (curr->hdr.cmd != SMUX_CMD_DATA) {
2916 pkt = curr;
2917 break;
2918 }
2919 }
2920 } else {
2921 /* get next cmd/data packet to send */
2922 pkt = list_first_entry(&ch->tx_queue,
2923 struct smux_pkt_t, list);
2924 }
2925 }
2926
2927 if (pkt) {
2928 list_del(&pkt->list);
2929
2930 /* update packet stats */
2931 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2932 --ch->tx_pending_data_cnt;
2933 if (ch->notify_lwm &&
2934 ch->tx_pending_data_cnt
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002935 <= SMUX_TX_WM_LOW) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002936 ch->notify_lwm = 0;
2937 low_wm_notif = 1;
2938 }
2939 }
2940
2941 /* advance to the next ready channel */
2942 list_rotate_left(&smux.lch_tx_ready_list);
2943 } else {
2944 /* no data in channel to send, remove from ready list */
2945 list_del(&ch->tx_ready_list);
2946 INIT_LIST_HEAD(&ch->tx_ready_list);
2947 }
2948 lcid = ch->lcid;
2949 spin_unlock(&ch->tx_lock_lhb2);
2950 spin_unlock(&ch->state_lock_lhb1);
2951 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2952
2953 if (low_wm_notif)
2954 schedule_notify(lcid, SMUX_LOW_WM_HIT, NULL);
2955
2956 /* send the packet */
2957 smux_tx_pkt(ch, pkt);
2958 smux_free_pkt(pkt);
2959 }
2960}
2961
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002962/**
2963 * Update the RX flow control (sent in the TIOCM Status command).
2964 *
2965 * @ch Channel for update
2966 *
2967 * @returns 1 for updated, 0 for not updated
2968 *
2969 * Must be called with ch->state_lock_lhb1 locked.
2970 */
2971static int smux_rx_flow_control_updated(struct smux_lch_t *ch)
2972{
2973 int updated = 0;
2974 int prev_state;
2975
2976 prev_state = ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL;
2977
2978 if (ch->rx_flow_control_client || ch->rx_flow_control_auto)
2979 ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
2980 else
2981 ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
2982
2983 if (prev_state != (ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL)) {
2984 smux_send_status_cmd(ch);
2985 updated = 1;
2986 }
2987
2988 return updated;
2989}
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002990
Eric Holmberg06011322012-07-06 18:17:03 -06002991/**
2992 * Flush all SMUX workqueues.
2993 *
2994 * This sets the reset bit to abort any processing loops and then
2995 * flushes the workqueues to ensure that no new pending work is
2996 * running. Do not call with any locks used by workers held as
2997 * this will result in a deadlock.
2998 */
2999static void smux_flush_workqueues(void)
3000{
3001 smux.in_reset = 1;
3002
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303003 SMUX_DBG("smux: %s: flushing tx wq\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06003004 flush_workqueue(smux_tx_wq);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303005 SMUX_DBG("smux: %s: flushing rx wq\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06003006 flush_workqueue(smux_rx_wq);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303007 SMUX_DBG("smux: %s: flushing notify wq\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06003008 flush_workqueue(smux_notify_wq);
3009}
3010
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003011/**********************************************************************/
3012/* Kernel API */
3013/**********************************************************************/
3014
3015/**
3016 * Set or clear channel option using the SMUX_CH_OPTION_* channel
3017 * flags.
3018 *
3019 * @lcid Logical channel ID
3020 * @set Options to set
3021 * @clear Options to clear
3022 *
3023 * @returns 0 for success, < 0 for failure
3024 */
3025int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear)
3026{
3027 unsigned long flags;
3028 struct smux_lch_t *ch;
3029 int tx_ready = 0;
3030 int ret = 0;
3031
3032 if (smux_assert_lch_id(lcid))
3033 return -ENXIO;
3034
3035 ch = &smux_lch[lcid];
3036 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3037
3038 /* Local loopback mode */
3039 if (set & SMUX_CH_OPTION_LOCAL_LOOPBACK)
3040 ch->local_mode = SMUX_LCH_MODE_LOCAL_LOOPBACK;
3041
3042 if (clear & SMUX_CH_OPTION_LOCAL_LOOPBACK)
3043 ch->local_mode = SMUX_LCH_MODE_NORMAL;
3044
3045 /* Remote loopback mode */
3046 if (set & SMUX_CH_OPTION_REMOTE_LOOPBACK)
3047 ch->local_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
3048
3049 if (clear & SMUX_CH_OPTION_REMOTE_LOOPBACK)
3050 ch->local_mode = SMUX_LCH_MODE_NORMAL;
3051
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003052 /* RX Flow control */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003053 if (set & SMUX_CH_OPTION_REMOTE_TX_STOP) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003054 ch->rx_flow_control_client = 1;
3055 tx_ready |= smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003056 }
3057
3058 if (clear & SMUX_CH_OPTION_REMOTE_TX_STOP) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003059 ch->rx_flow_control_client = 0;
3060 tx_ready |= smux_rx_flow_control_updated(ch);
3061 }
3062
3063 /* Auto RX Flow Control */
3064 if (set & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303065 SMUX_DBG("smux: %s: auto rx flow control option enabled\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003066 __func__);
3067 ch->options |= SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
3068 }
3069
3070 if (clear & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303071 SMUX_DBG("smux: %s: auto rx flow control option disabled\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003072 __func__);
3073 ch->options &= ~SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
3074 ch->rx_flow_control_auto = 0;
3075 tx_ready |= smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003076 }
3077
3078 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3079
3080 if (tx_ready)
3081 list_channel(ch);
3082
3083 return ret;
3084}
3085
3086/**
3087 * Starts the opening sequence for a logical channel.
3088 *
3089 * @lcid Logical channel ID
3090 * @priv Free for client usage
3091 * @notify Event notification function
3092 * @get_rx_buffer Function used to provide a receive buffer to SMUX
3093 *
3094 * @returns 0 for success, <0 otherwise
3095 *
3096 * A channel must be fully closed (either not previously opened or
3097 * msm_smux_close() has been called and the SMUX_DISCONNECTED has been
3098 * received.
3099 *
3100 * One the remote side is opened, the client will receive a SMUX_CONNECTED
3101 * event.
3102 */
3103int msm_smux_open(uint8_t lcid, void *priv,
3104 void (*notify)(void *priv, int event_type, const void *metadata),
3105 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
3106 int size))
3107{
3108 int ret;
3109 struct smux_lch_t *ch;
3110 struct smux_pkt_t *pkt;
3111 int tx_ready = 0;
3112 unsigned long flags;
3113
3114 if (smux_assert_lch_id(lcid))
3115 return -ENXIO;
3116
3117 ch = &smux_lch[lcid];
3118 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3119
3120 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
3121 ret = -EAGAIN;
3122 goto out;
3123 }
3124
3125 if (ch->local_state != SMUX_LCH_LOCAL_CLOSED) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003126 SMUX_ERR("%s: open lcid %d local state %x invalid\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003127 __func__, lcid, ch->local_state);
3128 ret = -EINVAL;
3129 goto out;
3130 }
3131
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303132 SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003133 ch->local_state,
3134 SMUX_LCH_LOCAL_OPENING);
3135
Eric Holmberg06011322012-07-06 18:17:03 -06003136 ch->rx_flow_control_auto = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003137 ch->local_state = SMUX_LCH_LOCAL_OPENING;
3138
3139 ch->priv = priv;
3140 ch->notify = notify;
3141 ch->get_rx_buffer = get_rx_buffer;
3142 ret = 0;
3143
3144 /* Send Open Command */
3145 pkt = smux_alloc_pkt();
3146 if (!pkt) {
3147 ret = -ENOMEM;
3148 goto out;
3149 }
3150 pkt->hdr.magic = SMUX_MAGIC;
3151 pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
3152 pkt->hdr.flags = SMUX_CMD_OPEN_POWER_COLLAPSE;
3153 if (ch->local_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK)
3154 pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
3155 pkt->hdr.lcid = lcid;
3156 pkt->hdr.payload_len = 0;
3157 pkt->hdr.pad_len = 0;
3158 smux_tx_queue(pkt, ch, 0);
3159 tx_ready = 1;
3160
3161out:
3162 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg06011322012-07-06 18:17:03 -06003163 smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003164 if (tx_ready)
3165 list_channel(ch);
3166 return ret;
3167}
3168
3169/**
3170 * Starts the closing sequence for a logical channel.
3171 *
3172 * @lcid Logical channel ID
3173 *
3174 * @returns 0 for success, <0 otherwise
3175 *
3176 * Once the close event has been acknowledge by the remote side, the client
3177 * will receive a SMUX_DISCONNECTED notification.
3178 */
3179int msm_smux_close(uint8_t lcid)
3180{
3181 int ret = 0;
3182 struct smux_lch_t *ch;
3183 struct smux_pkt_t *pkt;
3184 int tx_ready = 0;
3185 unsigned long flags;
3186
3187 if (smux_assert_lch_id(lcid))
3188 return -ENXIO;
3189
3190 ch = &smux_lch[lcid];
3191 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3192 ch->local_tiocm = 0x0;
3193 ch->remote_tiocm = 0x0;
3194 ch->tx_pending_data_cnt = 0;
3195 ch->notify_lwm = 0;
Eric Holmbergeee5d5a2012-08-13 14:45:27 -06003196 ch->tx_flow_control = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003197
3198 /* Purge TX queue */
3199 spin_lock(&ch->tx_lock_lhb2);
Eric Holmberg0e914082012-07-11 11:46:28 -06003200 smux_purge_ch_tx_queue(ch, 0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003201 spin_unlock(&ch->tx_lock_lhb2);
3202
3203 /* Send Close Command */
3204 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
3205 ch->local_state == SMUX_LCH_LOCAL_OPENING) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303206 SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003207 ch->local_state,
3208 SMUX_LCH_LOCAL_CLOSING);
3209
3210 ch->local_state = SMUX_LCH_LOCAL_CLOSING;
3211 pkt = smux_alloc_pkt();
3212 if (pkt) {
3213 pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
3214 pkt->hdr.flags = 0;
3215 pkt->hdr.lcid = lcid;
3216 pkt->hdr.payload_len = 0;
3217 pkt->hdr.pad_len = 0;
3218 smux_tx_queue(pkt, ch, 0);
3219 tx_ready = 1;
3220 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003221 SMUX_ERR("%s: pkt allocation failed\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003222 ret = -ENOMEM;
3223 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06003224
3225 /* Purge RX retry queue */
3226 if (ch->rx_retry_queue_cnt)
3227 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003228 }
3229 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3230
3231 if (tx_ready)
3232 list_channel(ch);
3233
3234 return ret;
3235}
3236
3237/**
3238 * Write data to a logical channel.
3239 *
3240 * @lcid Logical channel ID
3241 * @pkt_priv Client data that will be returned with the SMUX_WRITE_DONE or
3242 * SMUX_WRITE_FAIL notification.
3243 * @data Data to write
3244 * @len Length of @data
3245 *
3246 * @returns 0 for success, <0 otherwise
3247 *
3248 * Data may be written immediately after msm_smux_open() is called,
3249 * but the data will wait in the transmit queue until the channel has
3250 * been fully opened.
3251 *
3252 * Once the data has been written, the client will receive either a completion
3253 * (SMUX_WRITE_DONE) or a failure notice (SMUX_WRITE_FAIL).
3254 */
3255int msm_smux_write(uint8_t lcid, void *pkt_priv, const void *data, int len)
3256{
3257 struct smux_lch_t *ch;
3258 struct smux_pkt_t *pkt;
3259 int tx_ready = 0;
3260 unsigned long flags;
3261 int ret;
3262
3263 if (smux_assert_lch_id(lcid))
3264 return -ENXIO;
3265
3266 ch = &smux_lch[lcid];
3267 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3268
3269 if (ch->local_state != SMUX_LCH_LOCAL_OPENED &&
3270 ch->local_state != SMUX_LCH_LOCAL_OPENING) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003271 SMUX_ERR("%s: hdr.invalid local state %d channel %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003272 __func__, ch->local_state, lcid);
3273 ret = -EINVAL;
3274 goto out;
3275 }
3276
3277 if (len > SMUX_MAX_PKT_SIZE - sizeof(struct smux_hdr_t)) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003278 SMUX_ERR("%s: payload %d too large\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003279 __func__, len);
3280 ret = -E2BIG;
3281 goto out;
3282 }
3283
3284 pkt = smux_alloc_pkt();
3285 if (!pkt) {
3286 ret = -ENOMEM;
3287 goto out;
3288 }
3289
3290 pkt->hdr.cmd = SMUX_CMD_DATA;
3291 pkt->hdr.lcid = lcid;
3292 pkt->hdr.flags = 0;
3293 pkt->hdr.payload_len = len;
3294 pkt->payload = (void *)data;
3295 pkt->priv = pkt_priv;
3296 pkt->hdr.pad_len = 0;
3297
3298 spin_lock(&ch->tx_lock_lhb2);
3299 /* verify high watermark */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303300 SMUX_DBG("smux: %s: pending %d", __func__, ch->tx_pending_data_cnt);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003301
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003302 if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003303 SMUX_ERR("%s: ch %d high watermark %d exceeded %d\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003304 __func__, lcid, SMUX_TX_WM_HIGH,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003305 ch->tx_pending_data_cnt);
3306 ret = -EAGAIN;
3307 goto out_inner;
3308 }
3309
3310 /* queue packet for transmit */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003311 if (++ch->tx_pending_data_cnt == SMUX_TX_WM_HIGH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003312 ch->notify_lwm = 1;
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003313 SMUX_ERR("%s: high watermark hit\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003314 schedule_notify(lcid, SMUX_HIGH_WM_HIT, NULL);
3315 }
3316 list_add_tail(&pkt->list, &ch->tx_queue);
3317
3318 /* add to ready list */
3319 if (IS_FULLY_OPENED(ch))
3320 tx_ready = 1;
3321
3322 ret = 0;
3323
3324out_inner:
3325 spin_unlock(&ch->tx_lock_lhb2);
3326
3327out:
3328 if (ret)
3329 smux_free_pkt(pkt);
3330 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3331
3332 if (tx_ready)
3333 list_channel(ch);
3334
3335 return ret;
3336}
3337
3338/**
3339 * Returns true if the TX queue is currently full (high water mark).
3340 *
3341 * @lcid Logical channel ID
3342 * @returns 0 if channel is not full
3343 * 1 if it is full
3344 * < 0 for error
3345 */
3346int msm_smux_is_ch_full(uint8_t lcid)
3347{
3348 struct smux_lch_t *ch;
3349 unsigned long flags;
3350 int is_full = 0;
3351
3352 if (smux_assert_lch_id(lcid))
3353 return -ENXIO;
3354
3355 ch = &smux_lch[lcid];
3356
3357 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003358 if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003359 is_full = 1;
3360 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
3361
3362 return is_full;
3363}
3364
3365/**
3366 * Returns true if the TX queue has space for more packets it is at or
3367 * below the low water mark).
3368 *
3369 * @lcid Logical channel ID
3370 * @returns 0 if channel is above low watermark
3371 * 1 if it's at or below the low watermark
3372 * < 0 for error
3373 */
3374int msm_smux_is_ch_low(uint8_t lcid)
3375{
3376 struct smux_lch_t *ch;
3377 unsigned long flags;
3378 int is_low = 0;
3379
3380 if (smux_assert_lch_id(lcid))
3381 return -ENXIO;
3382
3383 ch = &smux_lch[lcid];
3384
3385 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003386 if (ch->tx_pending_data_cnt <= SMUX_TX_WM_LOW)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003387 is_low = 1;
3388 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
3389
3390 return is_low;
3391}
3392
3393/**
3394 * Send TIOCM status update.
3395 *
3396 * @ch Channel for update
3397 *
3398 * @returns 0 for success, <0 for failure
3399 *
3400 * Channel lock must be held before calling.
3401 */
3402static int smux_send_status_cmd(struct smux_lch_t *ch)
3403{
3404 struct smux_pkt_t *pkt;
3405
3406 if (!ch)
3407 return -EINVAL;
3408
3409 pkt = smux_alloc_pkt();
3410 if (!pkt)
3411 return -ENOMEM;
3412
3413 pkt->hdr.lcid = ch->lcid;
3414 pkt->hdr.cmd = SMUX_CMD_STATUS;
3415 pkt->hdr.flags = ch->local_tiocm;
3416 pkt->hdr.payload_len = 0;
3417 pkt->hdr.pad_len = 0;
3418 smux_tx_queue(pkt, ch, 0);
3419
3420 return 0;
3421}
3422
3423/**
3424 * Internal helper function for getting the TIOCM status with
3425 * state_lock_lhb1 already locked.
3426 *
3427 * @ch Channel pointer
3428 *
3429 * @returns TIOCM status
3430 */
Eric Holmberg9d890672012-06-13 17:58:13 -06003431long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003432{
3433 long status = 0x0;
3434
3435 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DSR : 0;
3436 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_CTS : 0;
3437 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RI) ? TIOCM_RI : 0;
3438 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_DCD) ? TIOCM_CD : 0;
3439
3440 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DTR : 0;
3441 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_RTS : 0;
3442
3443 return status;
3444}
3445
3446/**
3447 * Get the TIOCM status bits.
3448 *
3449 * @lcid Logical channel ID
3450 *
3451 * @returns >= 0 TIOCM status bits
3452 * < 0 Error condition
3453 */
3454long msm_smux_tiocm_get(uint8_t lcid)
3455{
3456 struct smux_lch_t *ch;
3457 unsigned long flags;
3458 long status = 0x0;
3459
3460 if (smux_assert_lch_id(lcid))
3461 return -ENXIO;
3462
3463 ch = &smux_lch[lcid];
3464 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3465 status = msm_smux_tiocm_get_atomic(ch);
3466 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3467
3468 return status;
3469}
3470
3471/**
3472 * Set/clear the TIOCM status bits.
3473 *
3474 * @lcid Logical channel ID
3475 * @set Bits to set
3476 * @clear Bits to clear
3477 *
3478 * @returns 0 for success; < 0 for failure
3479 *
3480 * If a bit is specified in both the @set and @clear masks, then the clear bit
3481 * definition will dominate and the bit will be cleared.
3482 */
3483int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear)
3484{
3485 struct smux_lch_t *ch;
3486 unsigned long flags;
3487 uint8_t old_status;
3488 uint8_t status_set = 0x0;
3489 uint8_t status_clear = 0x0;
3490 int tx_ready = 0;
3491 int ret = 0;
3492
3493 if (smux_assert_lch_id(lcid))
3494 return -ENXIO;
3495
3496 ch = &smux_lch[lcid];
3497 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3498
3499 status_set |= (set & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3500 status_set |= (set & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3501 status_set |= (set & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3502 status_set |= (set & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3503
3504 status_clear |= (clear & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3505 status_clear |= (clear & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3506 status_clear |= (clear & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3507 status_clear |= (clear & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3508
3509 old_status = ch->local_tiocm;
3510 ch->local_tiocm |= status_set;
3511 ch->local_tiocm &= ~status_clear;
3512
3513 if (ch->local_tiocm != old_status) {
3514 ret = smux_send_status_cmd(ch);
3515 tx_ready = 1;
3516 }
3517 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3518
3519 if (tx_ready)
3520 list_channel(ch);
3521
3522 return ret;
3523}
3524
3525/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003526/* Subsystem Restart */
3527/**********************************************************************/
3528static struct notifier_block ssr_notifier = {
3529 .notifier_call = ssr_notifier_cb,
3530};
3531
3532/**
3533 * Handle Subsystem Restart (SSR) notifications.
3534 *
3535 * @this Pointer to ssr_notifier
3536 * @code SSR Code
3537 * @data Data pointer (not used)
3538 */
3539static int ssr_notifier_cb(struct notifier_block *this,
3540 unsigned long code,
3541 void *data)
3542{
3543 unsigned long flags;
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003544 int i;
3545 int tmp;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003546 int power_off_uart = 0;
3547
Eric Holmbergd2697902012-06-15 09:58:46 -06003548 if (code == SUBSYS_BEFORE_SHUTDOWN) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303549 SMUX_DBG("smux: %s: ssr - before shutdown\n", __func__);
Eric Holmbergd2697902012-06-15 09:58:46 -06003550 mutex_lock(&smux.mutex_lha0);
3551 smux.in_reset = 1;
Eric Holmberg0a81e8f2012-08-28 13:51:14 -06003552 smux.remote_is_alive = 0;
Eric Holmbergd2697902012-06-15 09:58:46 -06003553 mutex_unlock(&smux.mutex_lha0);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003554 return NOTIFY_DONE;
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003555 } else if (code == SUBSYS_AFTER_POWERUP) {
3556 /* re-register platform devices */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303557 SMUX_DBG("smux: %s: ssr - after power-up\n", __func__);
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003558 mutex_lock(&smux.mutex_lha0);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003559 if (smux.ld_open_count > 0
3560 && !smux.platform_devs_registered) {
3561 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303562 SMUX_DBG("smux: %s: register pdev '%s'\n",
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003563 __func__, smux_devs[i].name);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003564 smux_devs[i].dev.release = smux_pdev_release;
3565 tmp = platform_device_register(&smux_devs[i]);
3566 if (tmp)
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003567 SMUX_ERR(
3568 "%s: error %d registering device %s\n",
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003569 __func__, tmp, smux_devs[i].name);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003570 }
3571 smux.platform_devs_registered = 1;
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003572 }
3573 mutex_unlock(&smux.mutex_lha0);
3574 return NOTIFY_DONE;
Eric Holmbergd2697902012-06-15 09:58:46 -06003575 } else if (code != SUBSYS_AFTER_SHUTDOWN) {
3576 return NOTIFY_DONE;
3577 }
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303578 SMUX_DBG("smux: %s: ssr - after shutdown\n", __func__);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003579
3580 /* Cleanup channels */
Eric Holmberg06011322012-07-06 18:17:03 -06003581 smux_flush_workqueues();
Eric Holmbergd2697902012-06-15 09:58:46 -06003582 mutex_lock(&smux.mutex_lha0);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003583 if (smux.ld_open_count > 0) {
3584 smux_lch_purge();
3585 if (smux.tty)
3586 tty_driver_flush_buffer(smux.tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003587
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003588 /* Unregister platform devices */
3589 if (smux.platform_devs_registered) {
3590 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303591 SMUX_DBG("smux: %s: unregister pdev '%s'\n",
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003592 __func__, smux_devs[i].name);
3593 platform_device_unregister(&smux_devs[i]);
3594 }
3595 smux.platform_devs_registered = 0;
3596 }
3597
3598 /* Power-down UART */
3599 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
3600 if (smux.power_state != SMUX_PWR_OFF) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303601 SMUX_PWR("smux: %s: SSR - turning off UART\n",
3602 __func__);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003603 smux.power_state = SMUX_PWR_OFF;
3604 power_off_uart = 1;
3605 }
3606 smux.powerdown_enabled = 0;
3607 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3608
3609 if (power_off_uart)
3610 smux_uart_power_off_atomic();
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003611 }
Eric Holmberg06011322012-07-06 18:17:03 -06003612 smux.tx_activity_flag = 0;
3613 smux.rx_activity_flag = 0;
3614 smux.rx_state = SMUX_RX_IDLE;
Eric Holmbergd2697902012-06-15 09:58:46 -06003615 smux.in_reset = 0;
Eric Holmberg0a81e8f2012-08-28 13:51:14 -06003616 smux.remote_is_alive = 0;
Eric Holmbergd2697902012-06-15 09:58:46 -06003617 mutex_unlock(&smux.mutex_lha0);
3618
Eric Holmberged1f00c2012-06-07 09:45:18 -06003619 return NOTIFY_DONE;
3620}
3621
3622/**********************************************************************/
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003623/* Line Discipline Interface */
3624/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003625static void smux_pdev_release(struct device *dev)
3626{
3627 struct platform_device *pdev;
3628
3629 pdev = container_of(dev, struct platform_device, dev);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303630 SMUX_DBG("smux: %s: releasing pdev %p '%s'\n",
3631 __func__, pdev, pdev->name);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003632 memset(&pdev->dev, 0x0, sizeof(pdev->dev));
3633}
3634
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003635static int smuxld_open(struct tty_struct *tty)
3636{
3637 int i;
3638 int tmp;
3639 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003640
3641 if (!smux.is_initialized)
3642 return -ENODEV;
3643
Eric Holmberged1f00c2012-06-07 09:45:18 -06003644 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003645 if (smux.ld_open_count) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003646 SMUX_ERR("%s: %p multiple instances not supported\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003647 __func__, tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003648 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003649 return -EEXIST;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003650 }
3651
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003652 if (tty->ops->write == NULL) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003653 SMUX_ERR("%s: tty->ops->write already NULL\n", __func__);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003654 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003655 return -EINVAL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003656 }
3657
3658 /* connect to TTY */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003659 ++smux.ld_open_count;
3660 smux.in_reset = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003661 smux.tty = tty;
3662 tty->disc_data = &smux;
3663 tty->receive_room = TTY_RECEIVE_ROOM;
3664 tty_driver_flush_buffer(tty);
3665
3666 /* power-down the UART if we are idle */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003667 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003668 if (smux.power_state == SMUX_PWR_OFF) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303669 SMUX_PWR("smux: %s: powering off uart\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003670 smux.power_state = SMUX_PWR_OFF_FLUSH;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003671 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003672 queue_work(smux_tx_wq, &smux_inactivity_work);
3673 } else {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003674 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003675 }
3676
3677 /* register platform devices */
3678 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303679 SMUX_DBG("smux: %s: register pdev '%s'\n",
Eric Holmberged1f00c2012-06-07 09:45:18 -06003680 __func__, smux_devs[i].name);
3681 smux_devs[i].dev.release = smux_pdev_release;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003682 tmp = platform_device_register(&smux_devs[i]);
3683 if (tmp)
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003684 SMUX_ERR("%s: error %d registering device %s\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003685 __func__, tmp, smux_devs[i].name);
3686 }
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003687 smux.platform_devs_registered = 1;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003688 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003689 return 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003690}
3691
3692static void smuxld_close(struct tty_struct *tty)
3693{
3694 unsigned long flags;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003695 int power_up_uart = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003696 int i;
3697
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303698 SMUX_DBG("smux: %s: ldisc unload\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06003699 smux_flush_workqueues();
3700
Eric Holmberged1f00c2012-06-07 09:45:18 -06003701 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003702 if (smux.ld_open_count <= 0) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003703 SMUX_ERR("%s: invalid ld count %d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003704 smux.ld_open_count);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003705 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003706 return;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003707 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003708 --smux.ld_open_count;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003709
3710 /* Cleanup channels */
3711 smux_lch_purge();
3712
3713 /* Unregister platform devices */
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003714 if (smux.platform_devs_registered) {
3715 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303716 SMUX_DBG("smux: %s: unregister pdev '%s'\n",
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003717 __func__, smux_devs[i].name);
3718 platform_device_unregister(&smux_devs[i]);
3719 }
3720 smux.platform_devs_registered = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003721 }
3722
3723 /* Schedule UART power-up if it's down */
3724 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003725 if (smux.power_state == SMUX_PWR_OFF)
Eric Holmberged1f00c2012-06-07 09:45:18 -06003726 power_up_uart = 1;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003727 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergd2697902012-06-15 09:58:46 -06003728 smux.powerdown_enabled = 0;
Eric Holmberg06011322012-07-06 18:17:03 -06003729 smux.tx_activity_flag = 0;
3730 smux.rx_activity_flag = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003731 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3732
3733 if (power_up_uart)
Eric Holmberg92a67df2012-06-25 13:56:24 -06003734 smux_uart_power_on_atomic();
Eric Holmberged1f00c2012-06-07 09:45:18 -06003735
Eric Holmberg06011322012-07-06 18:17:03 -06003736 smux.rx_state = SMUX_RX_IDLE;
3737
Eric Holmberged1f00c2012-06-07 09:45:18 -06003738 /* Disconnect from TTY */
3739 smux.tty = NULL;
Eric Holmberg0a81e8f2012-08-28 13:51:14 -06003740 smux.remote_is_alive = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003741 mutex_unlock(&smux.mutex_lha0);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303742 SMUX_DBG("smux: %s: ldisc complete\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003743}
3744
3745/**
3746 * Receive data from TTY Line Discipline.
3747 *
3748 * @tty TTY structure
3749 * @cp Character data
3750 * @fp Flag data
3751 * @count Size of character and flag data
3752 */
3753void smuxld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
3754 char *fp, int count)
3755{
3756 int i;
3757 int last_idx = 0;
3758 const char *tty_name = NULL;
3759 char *f;
3760
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003761 /* verify error flags */
3762 for (i = 0, f = fp; i < count; ++i, ++f) {
3763 if (*f != TTY_NORMAL) {
3764 if (tty)
3765 tty_name = tty->name;
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003766 SMUX_ERR("%s: TTY %s Error %d (%s)\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003767 tty_name, *f, tty_flag_to_str(*f));
3768
3769 /* feed all previous valid data to the parser */
3770 smux_rx_state_machine(cp + last_idx, i - last_idx,
3771 TTY_NORMAL);
3772
3773 /* feed bad data to parser */
3774 smux_rx_state_machine(cp + i, 1, *f);
3775 last_idx = i + 1;
3776 }
3777 }
3778
3779 /* feed data to RX state machine */
3780 smux_rx_state_machine(cp + last_idx, count - last_idx, TTY_NORMAL);
3781}
3782
3783static void smuxld_flush_buffer(struct tty_struct *tty)
3784{
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003785 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003786}
3787
3788static ssize_t smuxld_chars_in_buffer(struct tty_struct *tty)
3789{
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003790 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003791 return -ENODEV;
3792}
3793
3794static ssize_t smuxld_read(struct tty_struct *tty, struct file *file,
3795 unsigned char __user *buf, size_t nr)
3796{
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003797 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003798 return -ENODEV;
3799}
3800
3801static ssize_t smuxld_write(struct tty_struct *tty, struct file *file,
3802 const unsigned char *buf, size_t nr)
3803{
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003804 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003805 return -ENODEV;
3806}
3807
3808static int smuxld_ioctl(struct tty_struct *tty, struct file *file,
3809 unsigned int cmd, unsigned long arg)
3810{
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003811 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003812 return -ENODEV;
3813}
3814
3815static unsigned int smuxld_poll(struct tty_struct *tty, struct file *file,
3816 struct poll_table_struct *tbl)
3817{
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003818 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003819 return -ENODEV;
3820}
3821
3822static void smuxld_write_wakeup(struct tty_struct *tty)
3823{
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003824 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003825}
3826
3827static struct tty_ldisc_ops smux_ldisc_ops = {
3828 .owner = THIS_MODULE,
3829 .magic = TTY_LDISC_MAGIC,
3830 .name = "n_smux",
3831 .open = smuxld_open,
3832 .close = smuxld_close,
3833 .flush_buffer = smuxld_flush_buffer,
3834 .chars_in_buffer = smuxld_chars_in_buffer,
3835 .read = smuxld_read,
3836 .write = smuxld_write,
3837 .ioctl = smuxld_ioctl,
3838 .poll = smuxld_poll,
3839 .receive_buf = smuxld_receive_buf,
3840 .write_wakeup = smuxld_write_wakeup
3841};
3842
3843static int __init smux_init(void)
3844{
3845 int ret;
3846
Eric Holmberged1f00c2012-06-07 09:45:18 -06003847 mutex_init(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003848
3849 spin_lock_init(&smux.rx_lock_lha1);
3850 smux.rx_state = SMUX_RX_IDLE;
3851 smux.power_state = SMUX_PWR_OFF;
3852 smux.pwr_wakeup_delay_us = 1;
3853 smux.powerdown_enabled = 0;
Eric Holmberga9b06472012-06-22 09:46:34 -06003854 smux.power_ctl_remote_req_received = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003855 INIT_LIST_HEAD(&smux.power_queue);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003856 smux.rx_activity_flag = 0;
3857 smux.tx_activity_flag = 0;
3858 smux.recv_len = 0;
3859 smux.tty = NULL;
3860 smux.ld_open_count = 0;
3861 smux.in_reset = 0;
Eric Holmberg0a81e8f2012-08-28 13:51:14 -06003862 smux.remote_is_alive = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003863 smux.is_initialized = 1;
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003864 smux.platform_devs_registered = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003865 smux_byte_loopback = 0;
3866
3867 spin_lock_init(&smux.tx_lock_lha2);
3868 INIT_LIST_HEAD(&smux.lch_tx_ready_list);
3869
3870 ret = tty_register_ldisc(N_SMUX, &smux_ldisc_ops);
3871 if (ret != 0) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003872 SMUX_ERR("%s: error %d registering line discipline\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003873 __func__, ret);
3874 return ret;
3875 }
3876
Eric Holmberg6c9f2a52012-06-14 10:49:04 -06003877 subsys_notif_register_notifier("external_modem", &ssr_notifier);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003878
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003879 ret = lch_init();
3880 if (ret != 0) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003881 SMUX_ERR("%s: lch_init failed\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003882 return ret;
3883 }
3884
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303885 log_ctx = ipc_log_context_create(1, "smux");
3886 if (!log_ctx) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003887 SMUX_ERR("%s: unable to create log context\n", __func__);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303888 disable_ipc_logging = 1;
3889 }
3890
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003891 return 0;
3892}
3893
3894static void __exit smux_exit(void)
3895{
3896 int ret;
3897
3898 ret = tty_unregister_ldisc(N_SMUX);
3899 if (ret != 0) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003900 SMUX_ERR("%s error %d unregistering line discipline\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003901 __func__, ret);
3902 return;
3903 }
3904}
3905
3906module_init(smux_init);
3907module_exit(smux_exit);
3908
3909MODULE_DESCRIPTION("Serial Mux TTY Line Discipline");
3910MODULE_LICENSE("GPL v2");
3911MODULE_ALIAS_LDISC(N_SMUX);