blob: a17fda30e79c053c6992a0f2520b6dbf980dbe37 [file] [log] [blame]
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001/* drivers/tty/n_smux.c
2 *
3 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/errno.h>
18#include <linux/tty.h>
19#include <linux/tty_flip.h>
20#include <linux/tty_driver.h>
21#include <linux/smux.h>
22#include <linux/list.h>
23#include <linux/kfifo.h>
24#include <linux/slab.h>
25#include <linux/types.h>
26#include <linux/platform_device.h>
27#include <linux/delay.h>
Eric Holmberged1f00c2012-06-07 09:45:18 -060028#include <mach/subsystem_notif.h>
29#include <mach/subsystem_restart.h>
Eric Holmberg8ed30f22012-05-10 19:16:51 -060030#include <mach/msm_serial_hs.h>
Angshuman Sarkarc2df7392012-07-24 14:50:42 +053031#include <mach/msm_ipc_logging.h>
Eric Holmberg8ed30f22012-05-10 19:16:51 -060032#include "smux_private.h"
33#include "smux_loopback.h"
34
35#define SMUX_NOTIFY_FIFO_SIZE 128
36#define SMUX_TX_QUEUE_SIZE 256
Eric Holmberg8ed30f22012-05-10 19:16:51 -060037#define SMUX_PKT_LOG_SIZE 80
38
39/* Maximum size we can accept in a single RX buffer */
40#define TTY_RECEIVE_ROOM 65536
41#define TTY_BUFFER_FULL_WAIT_MS 50
42
43/* maximum sleep time between wakeup attempts */
44#define SMUX_WAKEUP_DELAY_MAX (1 << 20)
45
46/* minimum delay for scheduling delayed work */
47#define SMUX_WAKEUP_DELAY_MIN (1 << 15)
48
49/* inactivity timeout for no rx/tx activity */
Eric Holmberg05620172012-07-03 11:13:18 -060050#define SMUX_INACTIVITY_TIMEOUT_MS 1000000
Eric Holmberg8ed30f22012-05-10 19:16:51 -060051
Eric Holmbergb8435c82012-06-05 14:51:29 -060052/* RX get_rx_buffer retry timeout values */
53#define SMUX_RX_RETRY_MIN_MS (1 << 0) /* 1 ms */
54#define SMUX_RX_RETRY_MAX_MS (1 << 10) /* 1024 ms */
55
Eric Holmberg8ed30f22012-05-10 19:16:51 -060056enum {
57 MSM_SMUX_DEBUG = 1U << 0,
58 MSM_SMUX_INFO = 1U << 1,
59 MSM_SMUX_POWER_INFO = 1U << 2,
60 MSM_SMUX_PKT = 1U << 3,
61};
62
Angshuman Sarkarc2df7392012-07-24 14:50:42 +053063static int smux_debug_mask = MSM_SMUX_DEBUG | MSM_SMUX_POWER_INFO;
Eric Holmberg8ed30f22012-05-10 19:16:51 -060064module_param_named(debug_mask, smux_debug_mask,
65 int, S_IRUGO | S_IWUSR | S_IWGRP);
66
Angshuman Sarkarc2df7392012-07-24 14:50:42 +053067static int disable_ipc_logging;
68
Eric Holmberg8ed30f22012-05-10 19:16:51 -060069/* Simulated wakeup used for testing */
70int smux_byte_loopback;
71module_param_named(byte_loopback, smux_byte_loopback,
72 int, S_IRUGO | S_IWUSR | S_IWGRP);
73int smux_simulate_wakeup_delay = 1;
74module_param_named(simulate_wakeup_delay, smux_simulate_wakeup_delay,
75 int, S_IRUGO | S_IWUSR | S_IWGRP);
76
Angshuman Sarkarc2df7392012-07-24 14:50:42 +053077#define IPC_LOG_STR(x...) do { \
78 if (!disable_ipc_logging && log_ctx) \
79 ipc_log_string(log_ctx, x); \
80} while (0)
81
Eric Holmberg8ed30f22012-05-10 19:16:51 -060082#define SMUX_DBG(x...) do { \
83 if (smux_debug_mask & MSM_SMUX_DEBUG) \
Angshuman Sarkarc2df7392012-07-24 14:50:42 +053084 IPC_LOG_STR(x); \
Eric Holmberg8ed30f22012-05-10 19:16:51 -060085} while (0)
86
Eric Holmbergd7339a42012-08-21 16:28:12 -060087#define SMUX_ERR(x...) do { \
88 pr_err(x); \
89 IPC_LOG_STR(x); \
90} while (0)
91
Eric Holmbergff0b0112012-06-08 15:06:57 -060092#define SMUX_PWR(x...) do { \
93 if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
Angshuman Sarkarc2df7392012-07-24 14:50:42 +053094 IPC_LOG_STR(x); \
Eric Holmbergff0b0112012-06-08 15:06:57 -060095} while (0)
96
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -060097#define SMUX_PWR_PKT_RX(pkt) do { \
98 if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
99 smux_log_pkt(pkt, 1); \
100} while (0)
101
102#define SMUX_PWR_PKT_TX(pkt) do { \
103 if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
104 if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
105 pkt->hdr.flags == SMUX_WAKEUP_ACK) \
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530106 IPC_LOG_STR("smux: TX Wakeup ACK\n"); \
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -0600107 else if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
108 pkt->hdr.flags == SMUX_WAKEUP_REQ) \
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530109 IPC_LOG_STR("smux: TX Wakeup REQ\n"); \
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -0600110 else \
111 smux_log_pkt(pkt, 0); \
112 } \
113} while (0)
114
115#define SMUX_PWR_BYTE_TX(pkt) do { \
116 if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
117 smux_log_pkt(pkt, 0); \
118 } \
119} while (0)
120
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600121#define SMUX_LOG_PKT_RX(pkt) do { \
122 if (smux_debug_mask & MSM_SMUX_PKT) \
123 smux_log_pkt(pkt, 1); \
124} while (0)
125
126#define SMUX_LOG_PKT_TX(pkt) do { \
127 if (smux_debug_mask & MSM_SMUX_PKT) \
128 smux_log_pkt(pkt, 0); \
129} while (0)
130
131/**
132 * Return true if channel is fully opened (both
133 * local and remote sides are in the OPENED state).
134 */
135#define IS_FULLY_OPENED(ch) \
136 (ch && (ch)->local_state == SMUX_LCH_LOCAL_OPENED \
137 && (ch)->remote_state == SMUX_LCH_REMOTE_OPENED)
138
139static struct platform_device smux_devs[] = {
140 {.name = "SMUX_CTL", .id = -1},
141 {.name = "SMUX_RMNET", .id = -1},
142 {.name = "SMUX_DUN_DATA_HSUART", .id = 0},
143 {.name = "SMUX_RMNET_DATA_HSUART", .id = 1},
144 {.name = "SMUX_RMNET_CTL_HSUART", .id = 0},
145 {.name = "SMUX_DIAG", .id = -1},
146};
147
148enum {
149 SMUX_CMD_STATUS_RTC = 1 << 0,
150 SMUX_CMD_STATUS_RTR = 1 << 1,
151 SMUX_CMD_STATUS_RI = 1 << 2,
152 SMUX_CMD_STATUS_DCD = 1 << 3,
153 SMUX_CMD_STATUS_FLOW_CNTL = 1 << 4,
154};
155
156/* Channel mode */
157enum {
158 SMUX_LCH_MODE_NORMAL,
159 SMUX_LCH_MODE_LOCAL_LOOPBACK,
160 SMUX_LCH_MODE_REMOTE_LOOPBACK,
161};
162
163enum {
164 SMUX_RX_IDLE,
165 SMUX_RX_MAGIC,
166 SMUX_RX_HDR,
167 SMUX_RX_PAYLOAD,
168 SMUX_RX_FAILURE,
169};
170
171/**
172 * Power states.
173 *
174 * The _FLUSH states are internal transitional states and are not part of the
175 * official state machine.
176 */
177enum {
178 SMUX_PWR_OFF,
179 SMUX_PWR_TURNING_ON,
180 SMUX_PWR_ON,
Eric Holmberga9b06472012-06-22 09:46:34 -0600181 SMUX_PWR_TURNING_OFF_FLUSH, /* power-off req/ack in TX queue */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600182 SMUX_PWR_TURNING_OFF,
183 SMUX_PWR_OFF_FLUSH,
184};
185
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600186union notifier_metadata {
187 struct smux_meta_disconnected disconnected;
188 struct smux_meta_read read;
189 struct smux_meta_write write;
190 struct smux_meta_tiocm tiocm;
191};
192
193struct smux_notify_handle {
194 void (*notify)(void *priv, int event_type, const void *metadata);
195 void *priv;
196 int event_type;
197 union notifier_metadata *metadata;
198};
199
200/**
Eric Holmbergb8435c82012-06-05 14:51:29 -0600201 * Get RX Buffer Retry structure.
202 *
203 * This is used for clients that are unable to provide an RX buffer
204 * immediately. This temporary structure will be used to temporarily hold the
205 * data and perform a retry.
206 */
207struct smux_rx_pkt_retry {
208 struct smux_pkt_t *pkt;
209 struct list_head rx_retry_list;
210 unsigned timeout_in_ms;
211};
212
213/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600214 * Receive worker data structure.
215 *
216 * One instance is created for every call to smux_rx_state_machine.
217 */
218struct smux_rx_worker_data {
219 const unsigned char *data;
220 int len;
221 int flag;
222
223 struct work_struct work;
224 struct completion work_complete;
225};
226
227/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600228 * Line discipline and module structure.
229 *
230 * Only one instance since multiple instances of line discipline are not
231 * allowed.
232 */
233struct smux_ldisc_t {
Eric Holmberged1f00c2012-06-07 09:45:18 -0600234 struct mutex mutex_lha0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600235
236 int is_initialized;
Eric Holmberg2bf9c522012-08-09 13:23:21 -0600237 int platform_devs_registered;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600238 int in_reset;
239 int ld_open_count;
240 struct tty_struct *tty;
241
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600242 /* RX State Machine (singled-threaded access by smux_rx_wq) */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600243 unsigned char recv_buf[SMUX_MAX_PKT_SIZE];
244 unsigned int recv_len;
245 unsigned int pkt_remain;
246 unsigned rx_state;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600247
248 /* RX Activity - accessed by multiple threads */
249 spinlock_t rx_lock_lha1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600250 unsigned rx_activity_flag;
251
252 /* TX / Power */
253 spinlock_t tx_lock_lha2;
254 struct list_head lch_tx_ready_list;
255 unsigned power_state;
256 unsigned pwr_wakeup_delay_us;
257 unsigned tx_activity_flag;
258 unsigned powerdown_enabled;
Eric Holmberga9b06472012-06-22 09:46:34 -0600259 unsigned power_ctl_remote_req_received;
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600260 struct list_head power_queue;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600261};
262
263
264/* data structures */
Eric Holmberg9d890672012-06-13 17:58:13 -0600265struct smux_lch_t smux_lch[SMUX_NUM_LOGICAL_CHANNELS];
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600266static struct smux_ldisc_t smux;
267static const char *tty_error_type[] = {
268 [TTY_NORMAL] = "normal",
269 [TTY_OVERRUN] = "overrun",
270 [TTY_BREAK] = "break",
271 [TTY_PARITY] = "parity",
272 [TTY_FRAME] = "framing",
273};
274
Eric Holmberg9d890672012-06-13 17:58:13 -0600275static const char * const smux_cmds[] = {
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600276 [SMUX_CMD_DATA] = "DATA",
277 [SMUX_CMD_OPEN_LCH] = "OPEN",
278 [SMUX_CMD_CLOSE_LCH] = "CLOSE",
279 [SMUX_CMD_STATUS] = "STATUS",
280 [SMUX_CMD_PWR_CTL] = "PWR",
281 [SMUX_CMD_BYTE] = "Raw Byte",
282};
283
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530284static const char * const smux_events[] = {
285 [SMUX_CONNECTED] = "CONNECTED" ,
286 [SMUX_DISCONNECTED] = "DISCONNECTED",
287 [SMUX_READ_DONE] = "READ_DONE",
288 [SMUX_READ_FAIL] = "READ_FAIL",
289 [SMUX_WRITE_DONE] = "WRITE_DONE",
290 [SMUX_WRITE_FAIL] = "WRITE_FAIL",
291 [SMUX_TIOCM_UPDATE] = "TIOCM_UPDATE",
292 [SMUX_LOW_WM_HIT] = "LOW_WM_HIT",
293 [SMUX_HIGH_WM_HIT] = "HIGH_WM_HIT",
294 [SMUX_RX_RETRY_HIGH_WM_HIT] = "RX_RETRY_HIGH_WM_HIT",
295 [SMUX_RX_RETRY_LOW_WM_HIT] = "RX_RETRY_LOW_WM_HIT",
296};
297
Eric Holmberg9d890672012-06-13 17:58:13 -0600298static const char * const smux_local_state[] = {
299 [SMUX_LCH_LOCAL_CLOSED] = "CLOSED",
300 [SMUX_LCH_LOCAL_OPENING] = "OPENING",
301 [SMUX_LCH_LOCAL_OPENED] = "OPENED",
302 [SMUX_LCH_LOCAL_CLOSING] = "CLOSING",
303};
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530304
Eric Holmberg9d890672012-06-13 17:58:13 -0600305static const char * const smux_remote_state[] = {
306 [SMUX_LCH_REMOTE_CLOSED] = "CLOSED",
307 [SMUX_LCH_REMOTE_OPENED] = "OPENED",
308};
309
310static const char * const smux_mode[] = {
311 [SMUX_LCH_MODE_NORMAL] = "N",
312 [SMUX_LCH_MODE_LOCAL_LOOPBACK] = "L",
313 [SMUX_LCH_MODE_REMOTE_LOOPBACK] = "R",
314};
315
316static const char * const smux_undef[] = {
317 [SMUX_UNDEF_LONG] = "UNDEF",
318 [SMUX_UNDEF_SHORT] = "U",
319};
320
321static void *log_ctx;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600322static void smux_notify_local_fn(struct work_struct *work);
323static DECLARE_WORK(smux_notify_local, smux_notify_local_fn);
324
325static struct workqueue_struct *smux_notify_wq;
326static size_t handle_size;
327static struct kfifo smux_notify_fifo;
328static int queued_fifo_notifications;
329static DEFINE_SPINLOCK(notify_lock_lhc1);
330
331static struct workqueue_struct *smux_tx_wq;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600332static struct workqueue_struct *smux_rx_wq;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600333static void smux_tx_worker(struct work_struct *work);
334static DECLARE_WORK(smux_tx_work, smux_tx_worker);
335
336static void smux_wakeup_worker(struct work_struct *work);
Eric Holmbergb8435c82012-06-05 14:51:29 -0600337static void smux_rx_retry_worker(struct work_struct *work);
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600338static void smux_rx_worker(struct work_struct *work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600339static DECLARE_WORK(smux_wakeup_work, smux_wakeup_worker);
340static DECLARE_DELAYED_WORK(smux_wakeup_delayed_work, smux_wakeup_worker);
341
342static void smux_inactivity_worker(struct work_struct *work);
343static DECLARE_WORK(smux_inactivity_work, smux_inactivity_worker);
344static DECLARE_DELAYED_WORK(smux_delayed_inactivity_work,
345 smux_inactivity_worker);
346
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600347static void list_channel(struct smux_lch_t *ch);
348static int smux_send_status_cmd(struct smux_lch_t *ch);
349static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600350static void smux_flush_tty(void);
Eric Holmberg0e914082012-07-11 11:46:28 -0600351static void smux_purge_ch_tx_queue(struct smux_lch_t *ch, int is_ssr);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600352static int schedule_notify(uint8_t lcid, int event,
353 const union notifier_metadata *metadata);
354static int ssr_notifier_cb(struct notifier_block *this,
355 unsigned long code,
356 void *data);
Eric Holmberg92a67df2012-06-25 13:56:24 -0600357static void smux_uart_power_on_atomic(void);
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600358static int smux_rx_flow_control_updated(struct smux_lch_t *ch);
Eric Holmberg06011322012-07-06 18:17:03 -0600359static void smux_flush_workqueues(void);
Eric Holmbergf6a364e2012-08-07 18:41:44 -0600360static void smux_pdev_release(struct device *dev);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600361
362/**
Eric Holmberg9d890672012-06-13 17:58:13 -0600363 * local_lch_state() - Return human readable form of local logical state.
364 * @state: Local logical channel state enum.
365 *
366 */
367const char *local_lch_state(unsigned state)
368{
369 if (state < ARRAY_SIZE(smux_local_state))
370 return smux_local_state[state];
371 else
372 return smux_undef[SMUX_UNDEF_LONG];
373}
374
375/**
376 * remote_lch_state() - Return human readable for of remote logical state.
377 * @state: Remote logical channel state enum.
378 *
379 */
380const char *remote_lch_state(unsigned state)
381{
382 if (state < ARRAY_SIZE(smux_remote_state))
383 return smux_remote_state[state];
384 else
385 return smux_undef[SMUX_UNDEF_LONG];
386}
387
388/**
389 * lch_mode() - Return human readable form of mode.
390 * @mode: Mode of the logical channel.
391 *
392 */
393const char *lch_mode(unsigned mode)
394{
395 if (mode < ARRAY_SIZE(smux_mode))
396 return smux_mode[mode];
397 else
398 return smux_undef[SMUX_UNDEF_SHORT];
399}
400
401/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600402 * Convert TTY Error Flags to string for logging purposes.
403 *
404 * @flag TTY_* flag
405 * @returns String description or NULL if unknown
406 */
407static const char *tty_flag_to_str(unsigned flag)
408{
409 if (flag < ARRAY_SIZE(tty_error_type))
410 return tty_error_type[flag];
411 return NULL;
412}
413
414/**
415 * Convert SMUX Command to string for logging purposes.
416 *
417 * @cmd SMUX command
418 * @returns String description or NULL if unknown
419 */
420static const char *cmd_to_str(unsigned cmd)
421{
422 if (cmd < ARRAY_SIZE(smux_cmds))
423 return smux_cmds[cmd];
424 return NULL;
425}
426
427/**
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530428 * Convert SMUX event to string for logging purposes.
429 *
430 * @event SMUX event
431 * @returns String description or NULL if unknown
432 */
433static const char *event_to_str(unsigned cmd)
434{
435 if (cmd < ARRAY_SIZE(smux_events))
436 return smux_events[cmd];
437 return NULL;
438}
439
440/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600441 * Set the reset state due to an unrecoverable failure.
442 */
443static void smux_enter_reset(void)
444{
445 pr_err("%s: unrecoverable failure, waiting for ssr\n", __func__);
446 smux.in_reset = 1;
447}
448
Eric Holmberg9d890672012-06-13 17:58:13 -0600449/**
450 * Initialize the lch_structs.
451 */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600452static int lch_init(void)
453{
454 unsigned int id;
455 struct smux_lch_t *ch;
456 int i = 0;
457
458 handle_size = sizeof(struct smux_notify_handle *);
459
460 smux_notify_wq = create_singlethread_workqueue("smux_notify_wq");
461 smux_tx_wq = create_singlethread_workqueue("smux_tx_wq");
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600462 smux_rx_wq = create_singlethread_workqueue("smux_rx_wq");
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600463
464 if (IS_ERR(smux_notify_wq) || IS_ERR(smux_tx_wq)) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530465 SMUX_DBG("smux: %s: create_singlethread_workqueue ENOMEM\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600466 __func__);
467 return -ENOMEM;
468 }
469
470 i |= kfifo_alloc(&smux_notify_fifo,
471 SMUX_NOTIFY_FIFO_SIZE * handle_size,
472 GFP_KERNEL);
473 i |= smux_loopback_init();
474
475 if (i) {
476 pr_err("%s: out of memory error\n", __func__);
477 return -ENOMEM;
478 }
479
480 for (id = 0 ; id < SMUX_NUM_LOGICAL_CHANNELS; id++) {
481 ch = &smux_lch[id];
482
483 spin_lock_init(&ch->state_lock_lhb1);
484 ch->lcid = id;
485 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
486 ch->local_mode = SMUX_LCH_MODE_NORMAL;
487 ch->local_tiocm = 0x0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600488 ch->options = SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600489 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
490 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
491 ch->remote_tiocm = 0x0;
492 ch->tx_flow_control = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600493 ch->rx_flow_control_auto = 0;
494 ch->rx_flow_control_client = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600495 ch->priv = 0;
496 ch->notify = 0;
497 ch->get_rx_buffer = 0;
498
Eric Holmbergb8435c82012-06-05 14:51:29 -0600499 INIT_LIST_HEAD(&ch->rx_retry_queue);
500 ch->rx_retry_queue_cnt = 0;
501 INIT_DELAYED_WORK(&ch->rx_retry_work, smux_rx_retry_worker);
502
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600503 spin_lock_init(&ch->tx_lock_lhb2);
504 INIT_LIST_HEAD(&ch->tx_queue);
505 INIT_LIST_HEAD(&ch->tx_ready_list);
506 ch->tx_pending_data_cnt = 0;
507 ch->notify_lwm = 0;
508 }
509
510 return 0;
511}
512
Eric Holmberged1f00c2012-06-07 09:45:18 -0600513/**
514 * Empty and cleanup all SMUX logical channels for subsystem restart or line
515 * discipline disconnect.
516 */
517static void smux_lch_purge(void)
518{
519 struct smux_lch_t *ch;
520 unsigned long flags;
521 int i;
522
523 /* Empty TX ready list */
524 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
525 while (!list_empty(&smux.lch_tx_ready_list)) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530526 SMUX_DBG("smux: %s: emptying ready list %p\n",
Eric Holmberged1f00c2012-06-07 09:45:18 -0600527 __func__, smux.lch_tx_ready_list.next);
528 ch = list_first_entry(&smux.lch_tx_ready_list,
529 struct smux_lch_t,
530 tx_ready_list);
531 list_del(&ch->tx_ready_list);
532 INIT_LIST_HEAD(&ch->tx_ready_list);
533 }
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600534
535 /* Purge Power Queue */
536 while (!list_empty(&smux.power_queue)) {
537 struct smux_pkt_t *pkt;
538
539 pkt = list_first_entry(&smux.power_queue,
540 struct smux_pkt_t,
541 list);
Eric Holmberg6b19f7f2012-06-15 09:53:52 -0600542 list_del(&pkt->list);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530543 SMUX_DBG("smux: %s: emptying power queue pkt=%p\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600544 __func__, pkt);
545 smux_free_pkt(pkt);
546 }
Eric Holmberged1f00c2012-06-07 09:45:18 -0600547 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
548
549 /* Close all ports */
550 for (i = 0 ; i < SMUX_NUM_LOGICAL_CHANNELS; i++) {
551 ch = &smux_lch[i];
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530552 SMUX_DBG("smux: %s: cleaning up lcid %d\n", __func__, i);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600553
554 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
555
556 /* Purge TX queue */
557 spin_lock(&ch->tx_lock_lhb2);
Eric Holmberg0e914082012-07-11 11:46:28 -0600558 smux_purge_ch_tx_queue(ch, 1);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600559 spin_unlock(&ch->tx_lock_lhb2);
560
561 /* Notify user of disconnect and reset channel state */
562 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
563 ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
564 union notifier_metadata meta;
565
566 meta.disconnected.is_ssr = smux.in_reset;
567 schedule_notify(ch->lcid, SMUX_DISCONNECTED, &meta);
568 }
569
570 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
Eric Holmberged1f00c2012-06-07 09:45:18 -0600571 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
572 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
573 ch->tx_flow_control = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600574 ch->rx_flow_control_auto = 0;
575 ch->rx_flow_control_client = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -0600576
577 /* Purge RX retry queue */
578 if (ch->rx_retry_queue_cnt)
579 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
580
581 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
582 }
Eric Holmberged1f00c2012-06-07 09:45:18 -0600583}
584
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600585int smux_assert_lch_id(uint32_t lcid)
586{
587 if (lcid >= SMUX_NUM_LOGICAL_CHANNELS)
588 return -ENXIO;
589 else
590 return 0;
591}
592
593/**
594 * Log packet information for debug purposes.
595 *
596 * @pkt Packet to log
597 * @is_recv 1 = RX packet; 0 = TX Packet
598 *
599 * [DIR][LCID] [LOCAL_STATE][LOCAL_MODE]:[REMOTE_STATE][REMOTE_MODE] PKT Info
600 *
601 * PKT Info:
602 * [CMD] flags [flags] len [PAYLOAD_LEN]:[PAD_LEN] [Payload hex bytes]
603 *
604 * Direction: R = Receive, S = Send
605 * Local State: C = Closed; c = closing; o = opening; O = Opened
606 * Local Mode: L = Local loopback; R = Remote loopback; N = Normal
607 * Remote State: C = Closed; O = Opened
608 * Remote Mode: R = Remote loopback; N = Normal
609 */
610static void smux_log_pkt(struct smux_pkt_t *pkt, int is_recv)
611{
612 char logbuf[SMUX_PKT_LOG_SIZE];
613 char cmd_extra[16];
614 int i = 0;
615 int count;
616 int len;
617 char local_state;
618 char local_mode;
619 char remote_state;
620 char remote_mode;
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600621 struct smux_lch_t *ch = NULL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600622 unsigned char *data;
623
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600624 if (!smux_assert_lch_id(pkt->hdr.lcid))
625 ch = &smux_lch[pkt->hdr.lcid];
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600626
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600627 if (ch) {
628 switch (ch->local_state) {
629 case SMUX_LCH_LOCAL_CLOSED:
630 local_state = 'C';
631 break;
632 case SMUX_LCH_LOCAL_OPENING:
633 local_state = 'o';
634 break;
635 case SMUX_LCH_LOCAL_OPENED:
636 local_state = 'O';
637 break;
638 case SMUX_LCH_LOCAL_CLOSING:
639 local_state = 'c';
640 break;
641 default:
642 local_state = 'U';
643 break;
644 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600645
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600646 switch (ch->local_mode) {
647 case SMUX_LCH_MODE_LOCAL_LOOPBACK:
648 local_mode = 'L';
649 break;
650 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
651 local_mode = 'R';
652 break;
653 case SMUX_LCH_MODE_NORMAL:
654 local_mode = 'N';
655 break;
656 default:
657 local_mode = 'U';
658 break;
659 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600660
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600661 switch (ch->remote_state) {
662 case SMUX_LCH_REMOTE_CLOSED:
663 remote_state = 'C';
664 break;
665 case SMUX_LCH_REMOTE_OPENED:
666 remote_state = 'O';
667 break;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600668
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600669 default:
670 remote_state = 'U';
671 break;
672 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600673
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600674 switch (ch->remote_mode) {
675 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
676 remote_mode = 'R';
677 break;
678 case SMUX_LCH_MODE_NORMAL:
679 remote_mode = 'N';
680 break;
681 default:
682 remote_mode = 'U';
683 break;
684 }
685 } else {
686 /* broadcast channel */
687 local_state = '-';
688 local_mode = '-';
689 remote_state = '-';
690 remote_mode = '-';
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600691 }
692
693 /* determine command type (ACK, etc) */
694 cmd_extra[0] = '\0';
695 switch (pkt->hdr.cmd) {
696 case SMUX_CMD_OPEN_LCH:
697 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
698 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
699 break;
700 case SMUX_CMD_CLOSE_LCH:
701 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
702 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
703 break;
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -0600704
705 case SMUX_CMD_PWR_CTL:
706 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK)
707 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
708 break;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600709 };
710
711 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
712 "smux: %c%d %c%c:%c%c %s%s flags %x len %d:%d ",
713 is_recv ? 'R' : 'S', pkt->hdr.lcid,
714 local_state, local_mode,
715 remote_state, remote_mode,
716 cmd_to_str(pkt->hdr.cmd), cmd_extra, pkt->hdr.flags,
717 pkt->hdr.payload_len, pkt->hdr.pad_len);
718
719 len = (pkt->hdr.payload_len > 16) ? 16 : pkt->hdr.payload_len;
720 data = (unsigned char *)pkt->payload;
721 for (count = 0; count < len; count++)
722 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
723 "%02x ", (unsigned)data[count]);
724
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530725 IPC_LOG_STR(logbuf);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600726}
727
728static void smux_notify_local_fn(struct work_struct *work)
729{
730 struct smux_notify_handle *notify_handle = NULL;
731 union notifier_metadata *metadata = NULL;
732 unsigned long flags;
733 int i;
734
735 for (;;) {
736 /* retrieve notification */
737 spin_lock_irqsave(&notify_lock_lhc1, flags);
738 if (kfifo_len(&smux_notify_fifo) >= handle_size) {
739 i = kfifo_out(&smux_notify_fifo,
740 &notify_handle,
741 handle_size);
742 if (i != handle_size) {
743 pr_err("%s: unable to retrieve handle %d expected %d\n",
744 __func__, i, handle_size);
745 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
746 break;
747 }
748 } else {
749 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
750 break;
751 }
752 --queued_fifo_notifications;
753 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
754
755 /* notify client */
756 metadata = notify_handle->metadata;
757 notify_handle->notify(notify_handle->priv,
758 notify_handle->event_type,
759 metadata);
760
761 kfree(metadata);
762 kfree(notify_handle);
763 }
764}
765
766/**
767 * Initialize existing packet.
768 */
769void smux_init_pkt(struct smux_pkt_t *pkt)
770{
771 memset(pkt, 0x0, sizeof(*pkt));
772 pkt->hdr.magic = SMUX_MAGIC;
773 INIT_LIST_HEAD(&pkt->list);
774}
775
776/**
777 * Allocate and initialize packet.
778 *
779 * If a payload is needed, either set it directly and ensure that it's freed or
780 * use smd_alloc_pkt_payload() to allocate a packet and it will be freed
781 * automatically when smd_free_pkt() is called.
782 */
783struct smux_pkt_t *smux_alloc_pkt(void)
784{
785 struct smux_pkt_t *pkt;
786
787 /* Consider a free list implementation instead of kmalloc */
788 pkt = kmalloc(sizeof(struct smux_pkt_t), GFP_ATOMIC);
789 if (!pkt) {
790 pr_err("%s: out of memory\n", __func__);
791 return NULL;
792 }
793 smux_init_pkt(pkt);
794 pkt->allocated = 1;
795
796 return pkt;
797}
798
799/**
800 * Free packet.
801 *
802 * @pkt Packet to free (may be NULL)
803 *
804 * If payload was allocated using smux_alloc_pkt_payload(), then it is freed as
805 * well. Otherwise, the caller is responsible for freeing the payload.
806 */
807void smux_free_pkt(struct smux_pkt_t *pkt)
808{
809 if (pkt) {
810 if (pkt->free_payload)
811 kfree(pkt->payload);
812 if (pkt->allocated)
813 kfree(pkt);
814 }
815}
816
817/**
818 * Allocate packet payload.
819 *
820 * @pkt Packet to add payload to
821 *
822 * @returns 0 on success, <0 upon error
823 *
824 * A flag is set to signal smux_free_pkt() to free the payload.
825 */
826int smux_alloc_pkt_payload(struct smux_pkt_t *pkt)
827{
828 if (!pkt)
829 return -EINVAL;
830
831 pkt->payload = kmalloc(pkt->hdr.payload_len, GFP_ATOMIC);
832 pkt->free_payload = 1;
833 if (!pkt->payload) {
834 pr_err("%s: unable to malloc %d bytes for payload\n",
835 __func__, pkt->hdr.payload_len);
836 return -ENOMEM;
837 }
838
839 return 0;
840}
841
842static int schedule_notify(uint8_t lcid, int event,
843 const union notifier_metadata *metadata)
844{
845 struct smux_notify_handle *notify_handle = 0;
846 union notifier_metadata *meta_copy = 0;
847 struct smux_lch_t *ch;
848 int i;
849 unsigned long flags;
850 int ret = 0;
851
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530852 IPC_LOG_STR("smux: %s ch:%d\n", event_to_str(event), lcid);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600853 ch = &smux_lch[lcid];
854 notify_handle = kzalloc(sizeof(struct smux_notify_handle),
855 GFP_ATOMIC);
856 if (!notify_handle) {
857 pr_err("%s: out of memory\n", __func__);
858 ret = -ENOMEM;
859 goto free_out;
860 }
861
862 notify_handle->notify = ch->notify;
863 notify_handle->priv = ch->priv;
864 notify_handle->event_type = event;
865 if (metadata) {
866 meta_copy = kzalloc(sizeof(union notifier_metadata),
867 GFP_ATOMIC);
868 if (!meta_copy) {
869 pr_err("%s: out of memory\n", __func__);
870 ret = -ENOMEM;
871 goto free_out;
872 }
873 *meta_copy = *metadata;
874 notify_handle->metadata = meta_copy;
875 } else {
876 notify_handle->metadata = NULL;
877 }
878
879 spin_lock_irqsave(&notify_lock_lhc1, flags);
880 i = kfifo_avail(&smux_notify_fifo);
881 if (i < handle_size) {
882 pr_err("%s: fifo full error %d expected %d\n",
883 __func__, i, handle_size);
884 ret = -ENOMEM;
885 goto unlock_out;
886 }
887
888 i = kfifo_in(&smux_notify_fifo, &notify_handle, handle_size);
889 if (i < 0 || i != handle_size) {
890 pr_err("%s: fifo not available error %d (expected %d)\n",
891 __func__, i, handle_size);
892 ret = -ENOSPC;
893 goto unlock_out;
894 }
895 ++queued_fifo_notifications;
896
897unlock_out:
898 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
899
900free_out:
901 queue_work(smux_notify_wq, &smux_notify_local);
902 if (ret < 0 && notify_handle) {
903 kfree(notify_handle->metadata);
904 kfree(notify_handle);
905 }
906 return ret;
907}
908
909/**
910 * Returns the serialized size of a packet.
911 *
912 * @pkt Packet to serialize
913 *
914 * @returns Serialized length of packet
915 */
916static unsigned int smux_serialize_size(struct smux_pkt_t *pkt)
917{
918 unsigned int size;
919
920 size = sizeof(struct smux_hdr_t);
921 size += pkt->hdr.payload_len;
922 size += pkt->hdr.pad_len;
923
924 return size;
925}
926
927/**
928 * Serialize packet @pkt into output buffer @data.
929 *
930 * @pkt Packet to serialize
931 * @out Destination buffer pointer
932 * @out_len Size of serialized packet
933 *
934 * @returns 0 for success
935 */
936int smux_serialize(struct smux_pkt_t *pkt, char *out,
937 unsigned int *out_len)
938{
939 char *data_start = out;
940
941 if (smux_serialize_size(pkt) > SMUX_MAX_PKT_SIZE) {
942 pr_err("%s: packet size %d too big\n",
943 __func__, smux_serialize_size(pkt));
944 return -E2BIG;
945 }
946
947 memcpy(out, &pkt->hdr, sizeof(struct smux_hdr_t));
948 out += sizeof(struct smux_hdr_t);
949 if (pkt->payload) {
950 memcpy(out, pkt->payload, pkt->hdr.payload_len);
951 out += pkt->hdr.payload_len;
952 }
953 if (pkt->hdr.pad_len) {
954 memset(out, 0x0, pkt->hdr.pad_len);
955 out += pkt->hdr.pad_len;
956 }
957 *out_len = out - data_start;
958 return 0;
959}
960
961/**
962 * Serialize header and provide pointer to the data.
963 *
964 * @pkt Packet
965 * @out[out] Pointer to the serialized header data
966 * @out_len[out] Pointer to the serialized header length
967 */
968static void smux_serialize_hdr(struct smux_pkt_t *pkt, char **out,
969 unsigned int *out_len)
970{
971 *out = (char *)&pkt->hdr;
972 *out_len = sizeof(struct smux_hdr_t);
973}
974
975/**
976 * Serialize payload and provide pointer to the data.
977 *
978 * @pkt Packet
979 * @out[out] Pointer to the serialized payload data
980 * @out_len[out] Pointer to the serialized payload length
981 */
982static void smux_serialize_payload(struct smux_pkt_t *pkt, char **out,
983 unsigned int *out_len)
984{
985 *out = pkt->payload;
986 *out_len = pkt->hdr.payload_len;
987}
988
989/**
990 * Serialize padding and provide pointer to the data.
991 *
992 * @pkt Packet
993 * @out[out] Pointer to the serialized padding (always NULL)
994 * @out_len[out] Pointer to the serialized payload length
995 *
996 * Since the padding field value is undefined, only the size of the patting
997 * (@out_len) is set and the buffer pointer (@out) will always be NULL.
998 */
999static void smux_serialize_padding(struct smux_pkt_t *pkt, char **out,
1000 unsigned int *out_len)
1001{
1002 *out = NULL;
1003 *out_len = pkt->hdr.pad_len;
1004}
1005
1006/**
1007 * Write data to TTY framework and handle breaking the writes up if needed.
1008 *
1009 * @data Data to write
1010 * @len Length of data
1011 *
1012 * @returns 0 for success, < 0 for failure
1013 */
1014static int write_to_tty(char *data, unsigned len)
1015{
1016 int data_written;
1017
1018 if (!data)
1019 return 0;
1020
Eric Holmberged1f00c2012-06-07 09:45:18 -06001021 while (len > 0 && !smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001022 data_written = smux.tty->ops->write(smux.tty, data, len);
1023 if (data_written >= 0) {
1024 len -= data_written;
1025 data += data_written;
1026 } else {
1027 pr_err("%s: TTY write returned error %d\n",
1028 __func__, data_written);
1029 return data_written;
1030 }
1031
1032 if (len)
1033 tty_wait_until_sent(smux.tty,
1034 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001035 }
1036 return 0;
1037}
1038
1039/**
1040 * Write packet to TTY.
1041 *
1042 * @pkt packet to write
1043 *
1044 * @returns 0 on success
1045 */
1046static int smux_tx_tty(struct smux_pkt_t *pkt)
1047{
1048 char *data;
1049 unsigned int len;
1050 int ret;
1051
1052 if (!smux.tty) {
1053 pr_err("%s: TTY not initialized", __func__);
1054 return -ENOTTY;
1055 }
1056
1057 if (pkt->hdr.cmd == SMUX_CMD_BYTE) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301058 SMUX_DBG("smux: %s: tty send single byte\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001059 ret = write_to_tty(&pkt->hdr.flags, 1);
1060 return ret;
1061 }
1062
1063 smux_serialize_hdr(pkt, &data, &len);
1064 ret = write_to_tty(data, len);
1065 if (ret) {
1066 pr_err("%s: failed %d to write header %d\n",
1067 __func__, ret, len);
1068 return ret;
1069 }
1070
1071 smux_serialize_payload(pkt, &data, &len);
1072 ret = write_to_tty(data, len);
1073 if (ret) {
1074 pr_err("%s: failed %d to write payload %d\n",
1075 __func__, ret, len);
1076 return ret;
1077 }
1078
1079 smux_serialize_padding(pkt, &data, &len);
1080 while (len > 0) {
1081 char zero = 0x0;
1082 ret = write_to_tty(&zero, 1);
1083 if (ret) {
1084 pr_err("%s: failed %d to write padding %d\n",
1085 __func__, ret, len);
1086 return ret;
1087 }
1088 --len;
1089 }
1090 return 0;
1091}
1092
1093/**
1094 * Send a single character.
1095 *
1096 * @ch Character to send
1097 */
1098static void smux_send_byte(char ch)
1099{
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001100 struct smux_pkt_t *pkt;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001101
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001102 pkt = smux_alloc_pkt();
1103 if (!pkt) {
1104 pr_err("%s: alloc failure for byte %x\n", __func__, ch);
1105 return;
1106 }
1107 pkt->hdr.cmd = SMUX_CMD_BYTE;
1108 pkt->hdr.flags = ch;
1109 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001110
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001111 list_add_tail(&pkt->list, &smux.power_queue);
1112 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001113}
1114
1115/**
1116 * Receive a single-character packet (used for internal testing).
1117 *
1118 * @ch Character to receive
1119 * @lcid Logical channel ID for packet
1120 *
1121 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001122 */
1123static int smux_receive_byte(char ch, int lcid)
1124{
1125 struct smux_pkt_t pkt;
1126
1127 smux_init_pkt(&pkt);
1128 pkt.hdr.lcid = lcid;
1129 pkt.hdr.cmd = SMUX_CMD_BYTE;
1130 pkt.hdr.flags = ch;
1131
1132 return smux_dispatch_rx_pkt(&pkt);
1133}
1134
1135/**
1136 * Queue packet for transmit.
1137 *
1138 * @pkt_ptr Packet to queue
1139 * @ch Channel to queue packet on
1140 * @queue Queue channel on ready list
1141 */
1142static void smux_tx_queue(struct smux_pkt_t *pkt_ptr, struct smux_lch_t *ch,
1143 int queue)
1144{
1145 unsigned long flags;
1146
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301147 SMUX_DBG("smux: %s: queuing pkt %p\n", __func__, pkt_ptr);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001148
1149 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
1150 list_add_tail(&pkt_ptr->list, &ch->tx_queue);
1151 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
1152
1153 if (queue)
1154 list_channel(ch);
1155}
1156
1157/**
1158 * Handle receive OPEN ACK command.
1159 *
1160 * @pkt Received packet
1161 *
1162 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001163 */
1164static int smux_handle_rx_open_ack(struct smux_pkt_t *pkt)
1165{
1166 uint8_t lcid;
1167 int ret;
1168 struct smux_lch_t *ch;
1169 int enable_powerdown = 0;
1170
1171 lcid = pkt->hdr.lcid;
1172 ch = &smux_lch[lcid];
1173
1174 spin_lock(&ch->state_lock_lhb1);
1175 if (ch->local_state == SMUX_LCH_LOCAL_OPENING) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301176 SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001177 ch->local_state,
1178 SMUX_LCH_LOCAL_OPENED);
1179
1180 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1181 enable_powerdown = 1;
1182
1183 ch->local_state = SMUX_LCH_LOCAL_OPENED;
1184 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED)
1185 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1186 ret = 0;
1187 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301188 SMUX_DBG("smux: Remote loopback OPEN ACK received\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001189 ret = 0;
1190 } else {
1191 pr_err("%s: lcid %d state 0x%x open ack invalid\n",
1192 __func__, lcid, ch->local_state);
1193 ret = -EINVAL;
1194 }
1195 spin_unlock(&ch->state_lock_lhb1);
1196
1197 if (enable_powerdown) {
1198 spin_lock(&smux.tx_lock_lha2);
1199 if (!smux.powerdown_enabled) {
1200 smux.powerdown_enabled = 1;
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301201 SMUX_DBG("smux: %s: enabling power-collapse support\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001202 __func__);
1203 }
1204 spin_unlock(&smux.tx_lock_lha2);
1205 }
1206
1207 return ret;
1208}
1209
1210static int smux_handle_close_ack(struct smux_pkt_t *pkt)
1211{
1212 uint8_t lcid;
1213 int ret;
1214 struct smux_lch_t *ch;
1215 union notifier_metadata meta_disconnected;
1216 unsigned long flags;
1217
1218 lcid = pkt->hdr.lcid;
1219 ch = &smux_lch[lcid];
1220 meta_disconnected.disconnected.is_ssr = 0;
1221
1222 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1223
1224 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301225 SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001226 SMUX_LCH_LOCAL_CLOSING,
1227 SMUX_LCH_LOCAL_CLOSED);
1228 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
1229 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED)
1230 schedule_notify(lcid, SMUX_DISCONNECTED,
1231 &meta_disconnected);
1232 ret = 0;
1233 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301234 SMUX_DBG("smux: Remote loopback CLOSE ACK received\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001235 ret = 0;
1236 } else {
1237 pr_err("%s: lcid %d state 0x%x close ack invalid\n",
1238 __func__, lcid, ch->local_state);
1239 ret = -EINVAL;
1240 }
1241 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1242 return ret;
1243}
1244
1245/**
1246 * Handle receive OPEN command.
1247 *
1248 * @pkt Received packet
1249 *
1250 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001251 */
1252static int smux_handle_rx_open_cmd(struct smux_pkt_t *pkt)
1253{
1254 uint8_t lcid;
1255 int ret;
1256 struct smux_lch_t *ch;
1257 struct smux_pkt_t *ack_pkt;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001258 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001259 int tx_ready = 0;
1260 int enable_powerdown = 0;
1261
1262 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
1263 return smux_handle_rx_open_ack(pkt);
1264
1265 lcid = pkt->hdr.lcid;
1266 ch = &smux_lch[lcid];
1267
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001268 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001269
1270 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301271 SMUX_DBG("smux: lcid %d remote state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001272 SMUX_LCH_REMOTE_CLOSED,
1273 SMUX_LCH_REMOTE_OPENED);
1274
1275 ch->remote_state = SMUX_LCH_REMOTE_OPENED;
1276 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1277 enable_powerdown = 1;
1278
1279 /* Send Open ACK */
1280 ack_pkt = smux_alloc_pkt();
1281 if (!ack_pkt) {
1282 /* exit out to allow retrying this later */
1283 ret = -ENOMEM;
1284 goto out;
1285 }
1286 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1287 ack_pkt->hdr.flags = SMUX_CMD_OPEN_ACK
1288 | SMUX_CMD_OPEN_POWER_COLLAPSE;
1289 ack_pkt->hdr.lcid = lcid;
1290 ack_pkt->hdr.payload_len = 0;
1291 ack_pkt->hdr.pad_len = 0;
1292 if (pkt->hdr.flags & SMUX_CMD_OPEN_REMOTE_LOOPBACK) {
1293 ch->remote_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
1294 ack_pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
1295 }
1296 smux_tx_queue(ack_pkt, ch, 0);
1297 tx_ready = 1;
1298
1299 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1300 /*
1301 * Send an Open command to the remote side to
1302 * simulate our local client doing it.
1303 */
1304 ack_pkt = smux_alloc_pkt();
1305 if (ack_pkt) {
1306 ack_pkt->hdr.lcid = lcid;
1307 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1308 ack_pkt->hdr.flags =
1309 SMUX_CMD_OPEN_POWER_COLLAPSE;
1310 ack_pkt->hdr.payload_len = 0;
1311 ack_pkt->hdr.pad_len = 0;
1312 smux_tx_queue(ack_pkt, ch, 0);
1313 tx_ready = 1;
1314 } else {
1315 pr_err("%s: Remote loopack allocation failure\n",
1316 __func__);
1317 }
1318 } else if (ch->local_state == SMUX_LCH_LOCAL_OPENED) {
1319 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1320 }
1321 ret = 0;
1322 } else {
1323 pr_err("%s: lcid %d remote state 0x%x open invalid\n",
1324 __func__, lcid, ch->remote_state);
1325 ret = -EINVAL;
1326 }
1327
1328out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001329 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001330
1331 if (enable_powerdown) {
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001332 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8b9a6402012-06-05 13:32:57 -06001333 if (!smux.powerdown_enabled) {
1334 smux.powerdown_enabled = 1;
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301335 SMUX_DBG("smux: %s: enabling power-collapse support\n",
Eric Holmberg8b9a6402012-06-05 13:32:57 -06001336 __func__);
1337 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001338 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001339 }
1340
1341 if (tx_ready)
1342 list_channel(ch);
1343
1344 return ret;
1345}
1346
1347/**
1348 * Handle receive CLOSE command.
1349 *
1350 * @pkt Received packet
1351 *
1352 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001353 */
1354static int smux_handle_rx_close_cmd(struct smux_pkt_t *pkt)
1355{
1356 uint8_t lcid;
1357 int ret;
1358 struct smux_lch_t *ch;
1359 struct smux_pkt_t *ack_pkt;
1360 union notifier_metadata meta_disconnected;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001361 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001362 int tx_ready = 0;
1363
1364 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
1365 return smux_handle_close_ack(pkt);
1366
1367 lcid = pkt->hdr.lcid;
1368 ch = &smux_lch[lcid];
1369 meta_disconnected.disconnected.is_ssr = 0;
1370
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001371 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001372 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301373 SMUX_DBG("smux: lcid %d remote state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001374 SMUX_LCH_REMOTE_OPENED,
1375 SMUX_LCH_REMOTE_CLOSED);
1376
1377 ack_pkt = smux_alloc_pkt();
1378 if (!ack_pkt) {
1379 /* exit out to allow retrying this later */
1380 ret = -ENOMEM;
1381 goto out;
1382 }
1383 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
1384 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1385 ack_pkt->hdr.flags = SMUX_CMD_CLOSE_ACK;
1386 ack_pkt->hdr.lcid = lcid;
1387 ack_pkt->hdr.payload_len = 0;
1388 ack_pkt->hdr.pad_len = 0;
1389 smux_tx_queue(ack_pkt, ch, 0);
1390 tx_ready = 1;
1391
1392 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1393 /*
1394 * Send a Close command to the remote side to simulate
1395 * our local client doing it.
1396 */
1397 ack_pkt = smux_alloc_pkt();
1398 if (ack_pkt) {
1399 ack_pkt->hdr.lcid = lcid;
1400 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1401 ack_pkt->hdr.flags = 0;
1402 ack_pkt->hdr.payload_len = 0;
1403 ack_pkt->hdr.pad_len = 0;
1404 smux_tx_queue(ack_pkt, ch, 0);
1405 tx_ready = 1;
1406 } else {
1407 pr_err("%s: Remote loopack allocation failure\n",
1408 __func__);
1409 }
1410 }
1411
1412 if (ch->local_state == SMUX_LCH_LOCAL_CLOSED)
1413 schedule_notify(lcid, SMUX_DISCONNECTED,
1414 &meta_disconnected);
1415 ret = 0;
1416 } else {
1417 pr_err("%s: lcid %d remote state 0x%x close invalid\n",
1418 __func__, lcid, ch->remote_state);
1419 ret = -EINVAL;
1420 }
1421out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001422 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001423 if (tx_ready)
1424 list_channel(ch);
1425
1426 return ret;
1427}
1428
1429/*
1430 * Handle receive DATA command.
1431 *
1432 * @pkt Received packet
1433 *
1434 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001435 */
1436static int smux_handle_rx_data_cmd(struct smux_pkt_t *pkt)
1437{
1438 uint8_t lcid;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001439 int ret = 0;
1440 int do_retry = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001441 int tx_ready = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001442 int tmp;
1443 int rx_len;
1444 struct smux_lch_t *ch;
1445 union notifier_metadata metadata;
1446 int remote_loopback;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001447 struct smux_pkt_t *ack_pkt;
1448 unsigned long flags;
1449
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001450 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1451 ret = -ENXIO;
1452 goto out;
1453 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001454
Eric Holmbergb8435c82012-06-05 14:51:29 -06001455 rx_len = pkt->hdr.payload_len;
1456 if (rx_len == 0) {
1457 ret = -EINVAL;
1458 goto out;
1459 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001460
1461 lcid = pkt->hdr.lcid;
1462 ch = &smux_lch[lcid];
1463 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1464 remote_loopback = ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK;
1465
1466 if (ch->local_state != SMUX_LCH_LOCAL_OPENED
1467 && !remote_loopback) {
1468 pr_err("smux: ch %d error data on local state 0x%x",
1469 lcid, ch->local_state);
1470 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001471 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001472 goto out;
1473 }
1474
1475 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1476 pr_err("smux: ch %d error data on remote state 0x%x",
1477 lcid, ch->remote_state);
1478 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001479 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001480 goto out;
1481 }
1482
Eric Holmbergb8435c82012-06-05 14:51:29 -06001483 if (!list_empty(&ch->rx_retry_queue)) {
1484 do_retry = 1;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001485
1486 if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
1487 !ch->rx_flow_control_auto &&
1488 ((ch->rx_retry_queue_cnt + 1) >= SMUX_RX_WM_HIGH)) {
1489 /* need to flow control RX */
1490 ch->rx_flow_control_auto = 1;
1491 tx_ready |= smux_rx_flow_control_updated(ch);
1492 schedule_notify(ch->lcid, SMUX_RX_RETRY_HIGH_WM_HIT,
1493 NULL);
1494 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06001495 if ((ch->rx_retry_queue_cnt + 1) > SMUX_RX_RETRY_MAX_PKTS) {
1496 /* retry queue full */
Eric Holmbergd7339a42012-08-21 16:28:12 -06001497 SMUX_ERR(
1498 "%s: ch %d RX retry queue full; rx flow=%d\n",
1499 __func__, lcid, ch->rx_flow_control_auto);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001500 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1501 ret = -ENOMEM;
1502 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1503 goto out;
1504 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001505 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001506 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001507
Eric Holmbergb8435c82012-06-05 14:51:29 -06001508 if (remote_loopback) {
1509 /* Echo the data back to the remote client. */
1510 ack_pkt = smux_alloc_pkt();
1511 if (ack_pkt) {
1512 ack_pkt->hdr.lcid = lcid;
1513 ack_pkt->hdr.cmd = SMUX_CMD_DATA;
1514 ack_pkt->hdr.flags = 0;
1515 ack_pkt->hdr.payload_len = pkt->hdr.payload_len;
1516 if (ack_pkt->hdr.payload_len) {
1517 smux_alloc_pkt_payload(ack_pkt);
1518 memcpy(ack_pkt->payload, pkt->payload,
1519 ack_pkt->hdr.payload_len);
1520 }
1521 ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
1522 smux_tx_queue(ack_pkt, ch, 0);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001523 tx_ready = 1;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001524 } else {
1525 pr_err("%s: Remote loopack allocation failure\n",
1526 __func__);
1527 }
1528 } else if (!do_retry) {
1529 /* request buffer from client */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001530 metadata.read.pkt_priv = 0;
1531 metadata.read.buffer = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001532 tmp = ch->get_rx_buffer(ch->priv,
1533 (void **)&metadata.read.pkt_priv,
1534 (void **)&metadata.read.buffer,
1535 rx_len);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001536
Eric Holmbergb8435c82012-06-05 14:51:29 -06001537 if (tmp == 0 && metadata.read.buffer) {
1538 /* place data into RX buffer */
1539 memcpy(metadata.read.buffer, pkt->payload,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001540 rx_len);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001541 metadata.read.len = rx_len;
1542 schedule_notify(lcid, SMUX_READ_DONE,
1543 &metadata);
1544 } else if (tmp == -EAGAIN ||
1545 (tmp == 0 && !metadata.read.buffer)) {
1546 /* buffer allocation failed - add to retry queue */
1547 do_retry = 1;
1548 } else if (tmp < 0) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001549 pr_err("%s: ch %d Client RX buffer alloc failed %d\n",
1550 __func__, lcid, tmp);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001551 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1552 ret = -ENOMEM;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001553 }
1554 }
1555
Eric Holmbergb8435c82012-06-05 14:51:29 -06001556 if (do_retry) {
1557 struct smux_rx_pkt_retry *retry;
1558
1559 retry = kmalloc(sizeof(struct smux_rx_pkt_retry), GFP_KERNEL);
1560 if (!retry) {
1561 pr_err("%s: retry alloc failure\n", __func__);
1562 ret = -ENOMEM;
1563 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1564 goto out;
1565 }
1566 INIT_LIST_HEAD(&retry->rx_retry_list);
1567 retry->timeout_in_ms = SMUX_RX_RETRY_MIN_MS;
1568
1569 /* copy packet */
1570 retry->pkt = smux_alloc_pkt();
1571 if (!retry->pkt) {
1572 kfree(retry);
1573 pr_err("%s: pkt alloc failure\n", __func__);
1574 ret = -ENOMEM;
1575 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1576 goto out;
1577 }
1578 retry->pkt->hdr.lcid = lcid;
1579 retry->pkt->hdr.payload_len = pkt->hdr.payload_len;
1580 retry->pkt->hdr.pad_len = pkt->hdr.pad_len;
1581 if (retry->pkt->hdr.payload_len) {
1582 smux_alloc_pkt_payload(retry->pkt);
1583 memcpy(retry->pkt->payload, pkt->payload,
1584 retry->pkt->hdr.payload_len);
1585 }
1586
1587 /* add to retry queue */
1588 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1589 list_add_tail(&retry->rx_retry_list, &ch->rx_retry_queue);
1590 ++ch->rx_retry_queue_cnt;
1591 if (ch->rx_retry_queue_cnt == 1)
1592 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
1593 msecs_to_jiffies(retry->timeout_in_ms));
1594 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1595 }
1596
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001597 if (tx_ready)
1598 list_channel(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001599out:
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001600 return ret;
1601}
1602
1603/**
1604 * Handle receive byte command for testing purposes.
1605 *
1606 * @pkt Received packet
1607 *
1608 * @returns 0 for success
1609 */
1610static int smux_handle_rx_byte_cmd(struct smux_pkt_t *pkt)
1611{
1612 uint8_t lcid;
1613 int ret;
1614 struct smux_lch_t *ch;
1615 union notifier_metadata metadata;
1616 unsigned long flags;
1617
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001618 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1619 pr_err("%s: invalid packet or channel id\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001620 return -ENXIO;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001621 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001622
1623 lcid = pkt->hdr.lcid;
1624 ch = &smux_lch[lcid];
1625 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1626
1627 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
1628 pr_err("smux: ch %d error data on local state 0x%x",
1629 lcid, ch->local_state);
1630 ret = -EIO;
1631 goto out;
1632 }
1633
1634 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1635 pr_err("smux: ch %d error data on remote state 0x%x",
1636 lcid, ch->remote_state);
1637 ret = -EIO;
1638 goto out;
1639 }
1640
1641 metadata.read.pkt_priv = (void *)(int)pkt->hdr.flags;
1642 metadata.read.buffer = 0;
1643 schedule_notify(lcid, SMUX_READ_DONE, &metadata);
1644 ret = 0;
1645
1646out:
1647 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1648 return ret;
1649}
1650
1651/**
1652 * Handle receive status command.
1653 *
1654 * @pkt Received packet
1655 *
1656 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001657 */
1658static int smux_handle_rx_status_cmd(struct smux_pkt_t *pkt)
1659{
1660 uint8_t lcid;
1661 int ret;
1662 struct smux_lch_t *ch;
1663 union notifier_metadata meta;
1664 unsigned long flags;
1665 int tx_ready = 0;
1666
1667 lcid = pkt->hdr.lcid;
1668 ch = &smux_lch[lcid];
1669
1670 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1671 meta.tiocm.tiocm_old = ch->remote_tiocm;
1672 meta.tiocm.tiocm_new = pkt->hdr.flags;
1673
1674 /* update logical channel flow control */
1675 if ((meta.tiocm.tiocm_old & SMUX_CMD_STATUS_FLOW_CNTL) ^
1676 (meta.tiocm.tiocm_new & SMUX_CMD_STATUS_FLOW_CNTL)) {
1677 /* logical channel flow control changed */
1678 if (pkt->hdr.flags & SMUX_CMD_STATUS_FLOW_CNTL) {
1679 /* disabled TX */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301680 SMUX_DBG("smux: TX Flow control enabled\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001681 ch->tx_flow_control = 1;
1682 } else {
1683 /* re-enable channel */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301684 SMUX_DBG("smux: TX Flow control disabled\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001685 ch->tx_flow_control = 0;
1686 tx_ready = 1;
1687 }
1688 }
1689 meta.tiocm.tiocm_old = msm_smux_tiocm_get_atomic(ch);
1690 ch->remote_tiocm = pkt->hdr.flags;
1691 meta.tiocm.tiocm_new = msm_smux_tiocm_get_atomic(ch);
1692
1693 /* client notification for status change */
1694 if (IS_FULLY_OPENED(ch)) {
1695 if (meta.tiocm.tiocm_old != meta.tiocm.tiocm_new)
1696 schedule_notify(lcid, SMUX_TIOCM_UPDATE, &meta);
1697 ret = 0;
1698 }
1699 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1700 if (tx_ready)
1701 list_channel(ch);
1702
1703 return ret;
1704}
1705
1706/**
1707 * Handle receive power command.
1708 *
1709 * @pkt Received packet
1710 *
1711 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001712 */
1713static int smux_handle_rx_power_cmd(struct smux_pkt_t *pkt)
1714{
Steve Mucklef132c6c2012-06-06 18:30:57 -07001715 struct smux_pkt_t *ack_pkt = NULL;
Eric Holmberga9b06472012-06-22 09:46:34 -06001716 int power_down = 0;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001717 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001718
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001719 SMUX_PWR_PKT_RX(pkt);
1720
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001721 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001722 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK) {
1723 /* local sleep request ack */
Eric Holmberga9b06472012-06-22 09:46:34 -06001724 if (smux.power_state == SMUX_PWR_TURNING_OFF)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001725 /* Power-down complete, turn off UART */
Eric Holmberga9b06472012-06-22 09:46:34 -06001726 power_down = 1;
1727 else
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001728 pr_err("%s: sleep request ack invalid in state %d\n",
1729 __func__, smux.power_state);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001730 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001731 /*
1732 * Remote sleep request
1733 *
1734 * Even if we have data pending, we need to transition to the
1735 * POWER_OFF state and then perform a wakeup since the remote
1736 * side has requested a power-down.
1737 *
1738 * The state here is set to SMUX_PWR_TURNING_OFF_FLUSH and
1739 * the TX thread will set the state to SMUX_PWR_TURNING_OFF
1740 * when it sends the packet.
Eric Holmberga9b06472012-06-22 09:46:34 -06001741 *
1742 * If we are already powering down, then no ACK is sent.
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001743 */
Eric Holmberga9b06472012-06-22 09:46:34 -06001744 if (smux.power_state == SMUX_PWR_ON) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001745 ack_pkt = smux_alloc_pkt();
1746 if (ack_pkt) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301747 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001748 smux.power_state,
1749 SMUX_PWR_TURNING_OFF_FLUSH);
1750
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001751 smux.power_state = SMUX_PWR_TURNING_OFF_FLUSH;
1752
1753 /* send power-down ack */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001754 ack_pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
1755 ack_pkt->hdr.flags = SMUX_CMD_PWR_CTL_ACK;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001756 ack_pkt->hdr.lcid = SMUX_BROADCAST_LCID;
1757 list_add_tail(&ack_pkt->list,
1758 &smux.power_queue);
1759 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001760 }
Eric Holmberga9b06472012-06-22 09:46:34 -06001761 } else if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH) {
1762 /* Local power-down request still in TX queue */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301763 SMUX_PWR("smux: %s: Power-down shortcut - no ack\n",
Eric Holmberga9b06472012-06-22 09:46:34 -06001764 __func__);
1765 smux.power_ctl_remote_req_received = 1;
1766 } else if (smux.power_state == SMUX_PWR_TURNING_OFF) {
1767 /*
1768 * Local power-down request already sent to remote
1769 * side, so this request gets treated as an ACK.
1770 */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301771 SMUX_PWR("smux: %s: Power-down shortcut - no ack\n",
Eric Holmberga9b06472012-06-22 09:46:34 -06001772 __func__);
1773 power_down = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001774 } else {
1775 pr_err("%s: sleep request invalid in state %d\n",
1776 __func__, smux.power_state);
1777 }
1778 }
Eric Holmberga9b06472012-06-22 09:46:34 -06001779
1780 if (power_down) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301781 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberga9b06472012-06-22 09:46:34 -06001782 smux.power_state, SMUX_PWR_OFF_FLUSH);
1783 smux.power_state = SMUX_PWR_OFF_FLUSH;
1784 queue_work(smux_tx_wq, &smux_inactivity_work);
1785 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001786 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001787
1788 return 0;
1789}
1790
1791/**
1792 * Handle dispatching a completed packet for receive processing.
1793 *
1794 * @pkt Packet to process
1795 *
1796 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001797 */
1798static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt)
1799{
Eric Holmbergf9622662012-06-13 15:55:45 -06001800 int ret = -ENXIO;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001801
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001802 switch (pkt->hdr.cmd) {
1803 case SMUX_CMD_OPEN_LCH:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001804 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001805 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1806 pr_err("%s: invalid channel id %d\n",
1807 __func__, pkt->hdr.lcid);
1808 break;
1809 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001810 ret = smux_handle_rx_open_cmd(pkt);
1811 break;
1812
1813 case SMUX_CMD_DATA:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001814 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001815 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1816 pr_err("%s: invalid channel id %d\n",
1817 __func__, pkt->hdr.lcid);
1818 break;
1819 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001820 ret = smux_handle_rx_data_cmd(pkt);
1821 break;
1822
1823 case SMUX_CMD_CLOSE_LCH:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001824 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001825 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1826 pr_err("%s: invalid channel id %d\n",
1827 __func__, pkt->hdr.lcid);
1828 break;
1829 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001830 ret = smux_handle_rx_close_cmd(pkt);
1831 break;
1832
1833 case SMUX_CMD_STATUS:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001834 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001835 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1836 pr_err("%s: invalid channel id %d\n",
1837 __func__, pkt->hdr.lcid);
1838 break;
1839 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001840 ret = smux_handle_rx_status_cmd(pkt);
1841 break;
1842
1843 case SMUX_CMD_PWR_CTL:
1844 ret = smux_handle_rx_power_cmd(pkt);
1845 break;
1846
1847 case SMUX_CMD_BYTE:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001848 SMUX_LOG_PKT_RX(pkt);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001849 ret = smux_handle_rx_byte_cmd(pkt);
1850 break;
1851
1852 default:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001853 SMUX_LOG_PKT_RX(pkt);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001854 pr_err("%s: command %d unknown\n", __func__, pkt->hdr.cmd);
1855 ret = -EINVAL;
1856 }
1857 return ret;
1858}
1859
1860/**
1861 * Deserializes a packet and dispatches it to the packet receive logic.
1862 *
1863 * @data Raw data for one packet
1864 * @len Length of the data
1865 *
1866 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001867 */
1868static int smux_deserialize(unsigned char *data, int len)
1869{
1870 struct smux_pkt_t recv;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001871
1872 smux_init_pkt(&recv);
1873
1874 /*
1875 * It may be possible to optimize this to not use the
1876 * temporary buffer.
1877 */
1878 memcpy(&recv.hdr, data, sizeof(struct smux_hdr_t));
1879
1880 if (recv.hdr.magic != SMUX_MAGIC) {
1881 pr_err("%s: invalid header magic\n", __func__);
1882 return -EINVAL;
1883 }
1884
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001885 if (recv.hdr.payload_len)
1886 recv.payload = data + sizeof(struct smux_hdr_t);
1887
1888 return smux_dispatch_rx_pkt(&recv);
1889}
1890
1891/**
1892 * Handle wakeup request byte.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001893 */
1894static void smux_handle_wakeup_req(void)
1895{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001896 unsigned long flags;
1897
1898 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001899 if (smux.power_state == SMUX_PWR_OFF
1900 || smux.power_state == SMUX_PWR_TURNING_ON) {
1901 /* wakeup system */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301902 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001903 smux.power_state, SMUX_PWR_ON);
1904 smux.power_state = SMUX_PWR_ON;
1905 queue_work(smux_tx_wq, &smux_wakeup_work);
1906 queue_work(smux_tx_wq, &smux_tx_work);
1907 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1908 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1909 smux_send_byte(SMUX_WAKEUP_ACK);
Eric Holmberga9b06472012-06-22 09:46:34 -06001910 } else if (smux.power_state == SMUX_PWR_ON) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001911 smux_send_byte(SMUX_WAKEUP_ACK);
Eric Holmberga9b06472012-06-22 09:46:34 -06001912 } else {
1913 /* stale wakeup request from previous wakeup */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301914 SMUX_PWR("smux: %s: stale Wakeup REQ in state %d\n",
Eric Holmberga9b06472012-06-22 09:46:34 -06001915 __func__, smux.power_state);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001916 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001917 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001918}
1919
1920/**
1921 * Handle wakeup request ack.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001922 */
1923static void smux_handle_wakeup_ack(void)
1924{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001925 unsigned long flags;
1926
1927 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001928 if (smux.power_state == SMUX_PWR_TURNING_ON) {
1929 /* received response to wakeup request */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301930 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001931 smux.power_state, SMUX_PWR_ON);
1932 smux.power_state = SMUX_PWR_ON;
1933 queue_work(smux_tx_wq, &smux_tx_work);
1934 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1935 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1936
1937 } else if (smux.power_state != SMUX_PWR_ON) {
1938 /* invalid message */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301939 SMUX_PWR("smux: %s: stale Wakeup REQ ACK in state %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001940 __func__, smux.power_state);
1941 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001942 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001943}
1944
1945/**
1946 * RX State machine - IDLE state processing.
1947 *
1948 * @data New RX data to process
1949 * @len Length of the data
1950 * @used Return value of length processed
1951 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001952 */
1953static void smux_rx_handle_idle(const unsigned char *data,
1954 int len, int *used, int flag)
1955{
1956 int i;
1957
1958 if (flag) {
1959 if (smux_byte_loopback)
1960 smux_receive_byte(SMUX_UT_ECHO_ACK_FAIL,
1961 smux_byte_loopback);
1962 pr_err("%s: TTY error 0x%x - ignoring\n", __func__, flag);
1963 ++*used;
1964 return;
1965 }
1966
1967 for (i = *used; i < len && smux.rx_state == SMUX_RX_IDLE; i++) {
1968 switch (data[i]) {
1969 case SMUX_MAGIC_WORD1:
1970 smux.rx_state = SMUX_RX_MAGIC;
1971 break;
1972 case SMUX_WAKEUP_REQ:
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301973 SMUX_PWR("smux: smux: RX Wakeup REQ\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001974 smux_handle_wakeup_req();
1975 break;
1976 case SMUX_WAKEUP_ACK:
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301977 SMUX_PWR("smux: smux: RX Wakeup ACK\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001978 smux_handle_wakeup_ack();
1979 break;
1980 default:
1981 /* unexpected character */
1982 if (smux_byte_loopback && data[i] == SMUX_UT_ECHO_REQ)
1983 smux_receive_byte(SMUX_UT_ECHO_ACK_OK,
1984 smux_byte_loopback);
1985 pr_err("%s: parse error 0x%02x - ignoring\n", __func__,
1986 (unsigned)data[i]);
1987 break;
1988 }
1989 }
1990
1991 *used = i;
1992}
1993
1994/**
1995 * RX State machine - Header Magic state processing.
1996 *
1997 * @data New RX data to process
1998 * @len Length of the data
1999 * @used Return value of length processed
2000 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002001 */
2002static void smux_rx_handle_magic(const unsigned char *data,
2003 int len, int *used, int flag)
2004{
2005 int i;
2006
2007 if (flag) {
2008 pr_err("%s: TTY RX error %d\n", __func__, flag);
2009 smux_enter_reset();
2010 smux.rx_state = SMUX_RX_FAILURE;
2011 ++*used;
2012 return;
2013 }
2014
2015 for (i = *used; i < len && smux.rx_state == SMUX_RX_MAGIC; i++) {
2016 /* wait for completion of the magic */
2017 if (data[i] == SMUX_MAGIC_WORD2) {
2018 smux.recv_len = 0;
2019 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD1;
2020 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD2;
2021 smux.rx_state = SMUX_RX_HDR;
2022 } else {
2023 /* unexpected / trash character */
2024 pr_err("%s: rx parse error for char %c; *used=%d, len=%d\n",
2025 __func__, data[i], *used, len);
2026 smux.rx_state = SMUX_RX_IDLE;
2027 }
2028 }
2029
2030 *used = i;
2031}
2032
2033/**
2034 * RX State machine - Packet Header state processing.
2035 *
2036 * @data New RX data to process
2037 * @len Length of the data
2038 * @used Return value of length processed
2039 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002040 */
2041static void smux_rx_handle_hdr(const unsigned char *data,
2042 int len, int *used, int flag)
2043{
2044 int i;
2045 struct smux_hdr_t *hdr;
2046
2047 if (flag) {
2048 pr_err("%s: TTY RX error %d\n", __func__, flag);
2049 smux_enter_reset();
2050 smux.rx_state = SMUX_RX_FAILURE;
2051 ++*used;
2052 return;
2053 }
2054
2055 for (i = *used; i < len && smux.rx_state == SMUX_RX_HDR; i++) {
2056 smux.recv_buf[smux.recv_len++] = data[i];
2057
2058 if (smux.recv_len == sizeof(struct smux_hdr_t)) {
2059 /* complete header received */
2060 hdr = (struct smux_hdr_t *)smux.recv_buf;
2061 smux.pkt_remain = hdr->payload_len + hdr->pad_len;
2062 smux.rx_state = SMUX_RX_PAYLOAD;
2063 }
2064 }
2065 *used = i;
2066}
2067
2068/**
2069 * RX State machine - Packet Payload state processing.
2070 *
2071 * @data New RX data to process
2072 * @len Length of the data
2073 * @used Return value of length processed
2074 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002075 */
2076static void smux_rx_handle_pkt_payload(const unsigned char *data,
2077 int len, int *used, int flag)
2078{
2079 int remaining;
2080
2081 if (flag) {
2082 pr_err("%s: TTY RX error %d\n", __func__, flag);
2083 smux_enter_reset();
2084 smux.rx_state = SMUX_RX_FAILURE;
2085 ++*used;
2086 return;
2087 }
2088
2089 /* copy data into rx buffer */
2090 if (smux.pkt_remain < (len - *used))
2091 remaining = smux.pkt_remain;
2092 else
2093 remaining = len - *used;
2094
2095 memcpy(&smux.recv_buf[smux.recv_len], &data[*used], remaining);
2096 smux.recv_len += remaining;
2097 smux.pkt_remain -= remaining;
2098 *used += remaining;
2099
2100 if (smux.pkt_remain == 0) {
2101 /* complete packet received */
2102 smux_deserialize(smux.recv_buf, smux.recv_len);
2103 smux.rx_state = SMUX_RX_IDLE;
2104 }
2105}
2106
2107/**
2108 * Feed data to the receive state machine.
2109 *
2110 * @data Pointer to data block
2111 * @len Length of data
2112 * @flag TTY_NORMAL (0) for no error, otherwise TTY Error Flag
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002113 */
2114void smux_rx_state_machine(const unsigned char *data,
2115 int len, int flag)
2116{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002117 struct smux_rx_worker_data work;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002118
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002119 work.data = data;
2120 work.len = len;
2121 work.flag = flag;
2122 INIT_WORK_ONSTACK(&work.work, smux_rx_worker);
2123 work.work_complete = COMPLETION_INITIALIZER_ONSTACK(work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002124
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002125 queue_work(smux_rx_wq, &work.work);
2126 wait_for_completion(&work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002127}
2128
2129/**
2130 * Add channel to transmit-ready list and trigger transmit worker.
2131 *
2132 * @ch Channel to add
2133 */
2134static void list_channel(struct smux_lch_t *ch)
2135{
2136 unsigned long flags;
2137
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302138 SMUX_DBG("smux: %s: listing channel %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002139 __func__, ch->lcid);
2140
2141 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2142 spin_lock(&ch->tx_lock_lhb2);
2143 smux.tx_activity_flag = 1;
2144 if (list_empty(&ch->tx_ready_list))
2145 list_add_tail(&ch->tx_ready_list, &smux.lch_tx_ready_list);
2146 spin_unlock(&ch->tx_lock_lhb2);
2147 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2148
2149 queue_work(smux_tx_wq, &smux_tx_work);
2150}
2151
2152/**
2153 * Transmit packet on correct transport and then perform client
2154 * notification.
2155 *
2156 * @ch Channel to transmit on
2157 * @pkt Packet to transmit
2158 */
2159static void smux_tx_pkt(struct smux_lch_t *ch, struct smux_pkt_t *pkt)
2160{
2161 union notifier_metadata meta_write;
2162 int ret;
2163
2164 if (ch && pkt) {
2165 SMUX_LOG_PKT_TX(pkt);
2166 if (ch->local_mode == SMUX_LCH_MODE_LOCAL_LOOPBACK)
2167 ret = smux_tx_loopback(pkt);
2168 else
2169 ret = smux_tx_tty(pkt);
2170
2171 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2172 /* notify write-done */
2173 meta_write.write.pkt_priv = pkt->priv;
2174 meta_write.write.buffer = pkt->payload;
2175 meta_write.write.len = pkt->hdr.payload_len;
2176 if (ret >= 0) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302177 SMUX_DBG("smux: %s: PKT write done", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002178 schedule_notify(ch->lcid, SMUX_WRITE_DONE,
2179 &meta_write);
2180 } else {
2181 pr_err("%s: failed to write pkt %d\n",
2182 __func__, ret);
2183 schedule_notify(ch->lcid, SMUX_WRITE_FAIL,
2184 &meta_write);
2185 }
2186 }
2187 }
2188}
2189
2190/**
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002191 * Flush pending TTY TX data.
2192 */
2193static void smux_flush_tty(void)
2194{
Eric Holmberg92a67df2012-06-25 13:56:24 -06002195 mutex_lock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002196 if (!smux.tty) {
2197 pr_err("%s: ldisc not loaded\n", __func__);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002198 mutex_unlock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002199 return;
2200 }
2201
2202 tty_wait_until_sent(smux.tty,
2203 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
2204
2205 if (tty_chars_in_buffer(smux.tty) > 0)
2206 pr_err("%s: unable to flush UART queue\n", __func__);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002207
2208 mutex_unlock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002209}
2210
2211/**
Eric Holmberged1f00c2012-06-07 09:45:18 -06002212 * Purge TX queue for logical channel.
2213 *
2214 * @ch Logical channel pointer
Eric Holmberg0e914082012-07-11 11:46:28 -06002215 * @is_ssr 1 = this is a subsystem restart purge
Eric Holmberged1f00c2012-06-07 09:45:18 -06002216 *
2217 * Must be called with the following spinlocks locked:
2218 * state_lock_lhb1
2219 * tx_lock_lhb2
2220 */
Eric Holmberg0e914082012-07-11 11:46:28 -06002221static void smux_purge_ch_tx_queue(struct smux_lch_t *ch, int is_ssr)
Eric Holmberged1f00c2012-06-07 09:45:18 -06002222{
2223 struct smux_pkt_t *pkt;
2224 int send_disconnect = 0;
Eric Holmberg0e914082012-07-11 11:46:28 -06002225 struct smux_pkt_t *pkt_tmp;
2226 int is_state_pkt;
Eric Holmberged1f00c2012-06-07 09:45:18 -06002227
Eric Holmberg0e914082012-07-11 11:46:28 -06002228 list_for_each_entry_safe(pkt, pkt_tmp, &ch->tx_queue, list) {
2229 is_state_pkt = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06002230 if (pkt->hdr.cmd == SMUX_CMD_OPEN_LCH) {
Eric Holmberg0e914082012-07-11 11:46:28 -06002231 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK) {
2232 /* Open ACK must still be sent */
2233 is_state_pkt = 1;
2234 } else {
2235 /* Open never sent -- force to closed state */
2236 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
2237 send_disconnect = 1;
2238 }
2239 } else if (pkt->hdr.cmd == SMUX_CMD_CLOSE_LCH) {
2240 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
2241 is_state_pkt = 1;
2242 if (!send_disconnect)
2243 is_state_pkt = 1;
Eric Holmberged1f00c2012-06-07 09:45:18 -06002244 } else if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2245 /* Notify client of failed write */
2246 union notifier_metadata meta_write;
2247
2248 meta_write.write.pkt_priv = pkt->priv;
2249 meta_write.write.buffer = pkt->payload;
2250 meta_write.write.len = pkt->hdr.payload_len;
2251 schedule_notify(ch->lcid, SMUX_WRITE_FAIL, &meta_write);
2252 }
Eric Holmberg0e914082012-07-11 11:46:28 -06002253
2254 if (!is_state_pkt || is_ssr) {
2255 list_del(&pkt->list);
2256 smux_free_pkt(pkt);
2257 }
Eric Holmberged1f00c2012-06-07 09:45:18 -06002258 }
2259
2260 if (send_disconnect) {
2261 union notifier_metadata meta_disconnected;
2262
2263 meta_disconnected.disconnected.is_ssr = smux.in_reset;
2264 schedule_notify(ch->lcid, SMUX_DISCONNECTED,
2265 &meta_disconnected);
2266 }
2267}
2268
2269/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002270 * Power-up the UART.
Eric Holmberg92a67df2012-06-25 13:56:24 -06002271 *
2272 * Must be called with smux.mutex_lha0 already locked.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002273 */
Eric Holmberg92a67df2012-06-25 13:56:24 -06002274static void smux_uart_power_on_atomic(void)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002275{
2276 struct uart_state *state;
2277
2278 if (!smux.tty || !smux.tty->driver_data) {
2279 pr_err("%s: unable to find UART port for tty %p\n",
2280 __func__, smux.tty);
2281 return;
2282 }
2283 state = smux.tty->driver_data;
2284 msm_hs_request_clock_on(state->uart_port);
2285}
2286
2287/**
Eric Holmberg92a67df2012-06-25 13:56:24 -06002288 * Power-up the UART.
2289 */
2290static void smux_uart_power_on(void)
2291{
2292 mutex_lock(&smux.mutex_lha0);
2293 smux_uart_power_on_atomic();
2294 mutex_unlock(&smux.mutex_lha0);
2295}
2296
2297/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002298 * Power down the UART.
Eric Holmberg06011322012-07-06 18:17:03 -06002299 *
2300 * Must be called with mutex_lha0 locked.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002301 */
Eric Holmberg06011322012-07-06 18:17:03 -06002302static void smux_uart_power_off_atomic(void)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002303{
2304 struct uart_state *state;
2305
2306 if (!smux.tty || !smux.tty->driver_data) {
2307 pr_err("%s: unable to find UART port for tty %p\n",
2308 __func__, smux.tty);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002309 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002310 return;
2311 }
2312 state = smux.tty->driver_data;
2313 msm_hs_request_clock_off(state->uart_port);
Eric Holmberg06011322012-07-06 18:17:03 -06002314}
2315
2316/**
2317 * Power down the UART.
2318 */
2319static void smux_uart_power_off(void)
2320{
2321 mutex_lock(&smux.mutex_lha0);
2322 smux_uart_power_off_atomic();
Eric Holmberg92a67df2012-06-25 13:56:24 -06002323 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002324}
2325
2326/**
2327 * TX Wakeup Worker
2328 *
2329 * @work Not used
2330 *
2331 * Do an exponential back-off wakeup sequence with a maximum period
2332 * of approximately 1 second (1 << 20 microseconds).
2333 */
2334static void smux_wakeup_worker(struct work_struct *work)
2335{
2336 unsigned long flags;
2337 unsigned wakeup_delay;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002338
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002339 if (smux.in_reset)
2340 return;
2341
2342 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2343 if (smux.power_state == SMUX_PWR_ON) {
2344 /* wakeup complete */
Eric Holmberga9b06472012-06-22 09:46:34 -06002345 smux.pwr_wakeup_delay_us = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002346 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302347 SMUX_DBG("smux: %s: wakeup complete\n", __func__);
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002348
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002349 /*
2350 * Cancel any pending retry. This avoids a race condition with
2351 * a new power-up request because:
2352 * 1) this worker doesn't modify the state
2353 * 2) this worker is processed on the same single-threaded
2354 * workqueue as new TX wakeup requests
2355 */
2356 cancel_delayed_work(&smux_wakeup_delayed_work);
Eric Holmbergd032f5b2012-06-29 19:02:00 -06002357 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberga9b06472012-06-22 09:46:34 -06002358 } else if (smux.power_state == SMUX_PWR_TURNING_ON) {
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002359 /* retry wakeup */
2360 wakeup_delay = smux.pwr_wakeup_delay_us;
2361 smux.pwr_wakeup_delay_us <<= 1;
2362 if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
2363 smux.pwr_wakeup_delay_us =
2364 SMUX_WAKEUP_DELAY_MAX;
2365
2366 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302367 SMUX_PWR("smux: %s: triggering wakeup\n", __func__);
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002368 smux_send_byte(SMUX_WAKEUP_REQ);
2369
2370 if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302371 SMUX_DBG("smux: %s: sleeping for %u us\n", __func__,
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002372 wakeup_delay);
2373 usleep_range(wakeup_delay, 2*wakeup_delay);
2374 queue_work(smux_tx_wq, &smux_wakeup_work);
2375 } else {
2376 /* schedule delayed work */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302377 SMUX_DBG(
2378 "smux: %s: scheduling delayed wakeup in %u ms\n",
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002379 __func__, wakeup_delay / 1000);
2380 queue_delayed_work(smux_tx_wq,
2381 &smux_wakeup_delayed_work,
2382 msecs_to_jiffies(wakeup_delay / 1000));
2383 }
Eric Holmberga9b06472012-06-22 09:46:34 -06002384 } else {
2385 /* wakeup aborted */
2386 smux.pwr_wakeup_delay_us = 1;
2387 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302388 SMUX_PWR("smux: %s: wakeup aborted\n", __func__);
Eric Holmberga9b06472012-06-22 09:46:34 -06002389 cancel_delayed_work(&smux_wakeup_delayed_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002390 }
2391}
2392
2393
2394/**
2395 * Inactivity timeout worker. Periodically scheduled when link is active.
2396 * When it detects inactivity, it will power-down the UART link.
2397 *
2398 * @work Work structure (not used)
2399 */
2400static void smux_inactivity_worker(struct work_struct *work)
2401{
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002402 struct smux_pkt_t *pkt;
2403 unsigned long flags;
2404
Eric Holmberg06011322012-07-06 18:17:03 -06002405 if (smux.in_reset)
2406 return;
2407
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002408 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2409 spin_lock(&smux.tx_lock_lha2);
2410
2411 if (!smux.tx_activity_flag && !smux.rx_activity_flag) {
2412 /* no activity */
2413 if (smux.powerdown_enabled) {
2414 if (smux.power_state == SMUX_PWR_ON) {
2415 /* start power-down sequence */
2416 pkt = smux_alloc_pkt();
2417 if (pkt) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302418 SMUX_PWR(
2419 "smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002420 smux.power_state,
Eric Holmberga9b06472012-06-22 09:46:34 -06002421 SMUX_PWR_TURNING_OFF_FLUSH);
2422 smux.power_state =
2423 SMUX_PWR_TURNING_OFF_FLUSH;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002424
2425 /* send power-down request */
2426 pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
2427 pkt->hdr.flags = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002428 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
2429 list_add_tail(&pkt->list,
2430 &smux.power_queue);
2431 queue_work(smux_tx_wq, &smux_tx_work);
2432 } else {
2433 pr_err("%s: packet alloc failed\n",
2434 __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002435 }
2436 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002437 }
2438 }
2439 smux.tx_activity_flag = 0;
2440 smux.rx_activity_flag = 0;
2441
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002442 if (smux.power_state == SMUX_PWR_OFF_FLUSH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002443 /* ready to power-down the UART */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302444 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002445 smux.power_state, SMUX_PWR_OFF);
Eric Holmbergff0b0112012-06-08 15:06:57 -06002446 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002447
2448 /* if data is pending, schedule a new wakeup */
2449 if (!list_empty(&smux.lch_tx_ready_list) ||
2450 !list_empty(&smux.power_queue))
2451 queue_work(smux_tx_wq, &smux_tx_work);
2452
2453 spin_unlock(&smux.tx_lock_lha2);
2454 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2455
2456 /* flush UART output queue and power down */
2457 smux_flush_tty();
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002458 smux_uart_power_off();
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002459 } else {
2460 spin_unlock(&smux.tx_lock_lha2);
2461 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002462 }
2463
2464 /* reschedule inactivity worker */
2465 if (smux.power_state != SMUX_PWR_OFF)
2466 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
2467 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
2468}
2469
2470/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002471 * Remove RX retry packet from channel and free it.
2472 *
Eric Holmbergb8435c82012-06-05 14:51:29 -06002473 * @ch Channel for retry packet
2474 * @retry Retry packet to remove
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002475 *
2476 * @returns 1 if flow control updated; 0 otherwise
2477 *
2478 * Must be called with state_lock_lhb1 locked.
Eric Holmbergb8435c82012-06-05 14:51:29 -06002479 */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002480int smux_remove_rx_retry(struct smux_lch_t *ch,
Eric Holmbergb8435c82012-06-05 14:51:29 -06002481 struct smux_rx_pkt_retry *retry)
2482{
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002483 int tx_ready = 0;
2484
Eric Holmbergb8435c82012-06-05 14:51:29 -06002485 list_del(&retry->rx_retry_list);
2486 --ch->rx_retry_queue_cnt;
2487 smux_free_pkt(retry->pkt);
2488 kfree(retry);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002489
2490 if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
2491 (ch->rx_retry_queue_cnt <= SMUX_RX_WM_LOW) &&
2492 ch->rx_flow_control_auto) {
2493 ch->rx_flow_control_auto = 0;
2494 smux_rx_flow_control_updated(ch);
2495 schedule_notify(ch->lcid, SMUX_RX_RETRY_LOW_WM_HIT, NULL);
2496 tx_ready = 1;
2497 }
2498 return tx_ready;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002499}
2500
2501/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002502 * RX worker handles all receive operations.
2503 *
2504 * @work Work structure contained in TBD structure
2505 */
2506static void smux_rx_worker(struct work_struct *work)
2507{
2508 unsigned long flags;
2509 int used;
2510 int initial_rx_state;
2511 struct smux_rx_worker_data *w;
2512 const unsigned char *data;
2513 int len;
2514 int flag;
2515
2516 w = container_of(work, struct smux_rx_worker_data, work);
2517 data = w->data;
2518 len = w->len;
2519 flag = w->flag;
2520
2521 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2522 smux.rx_activity_flag = 1;
2523 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2524
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302525 SMUX_DBG("smux: %s: %p, len=%d, flag=%d\n", __func__, data, len, flag);
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002526 used = 0;
2527 do {
Eric Holmberg06011322012-07-06 18:17:03 -06002528 if (smux.in_reset) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302529 SMUX_DBG("smux: %s: abort RX due to reset\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06002530 smux.rx_state = SMUX_RX_IDLE;
2531 break;
2532 }
2533
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302534 SMUX_DBG("smux: %s: state %d; %d of %d\n",
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002535 __func__, smux.rx_state, used, len);
2536 initial_rx_state = smux.rx_state;
2537
2538 switch (smux.rx_state) {
2539 case SMUX_RX_IDLE:
2540 smux_rx_handle_idle(data, len, &used, flag);
2541 break;
2542 case SMUX_RX_MAGIC:
2543 smux_rx_handle_magic(data, len, &used, flag);
2544 break;
2545 case SMUX_RX_HDR:
2546 smux_rx_handle_hdr(data, len, &used, flag);
2547 break;
2548 case SMUX_RX_PAYLOAD:
2549 smux_rx_handle_pkt_payload(data, len, &used, flag);
2550 break;
2551 default:
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302552 SMUX_DBG("smux: %s: invalid state %d\n",
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002553 __func__, smux.rx_state);
2554 smux.rx_state = SMUX_RX_IDLE;
2555 break;
2556 }
2557 } while (used < len || smux.rx_state != initial_rx_state);
2558
2559 complete(&w->work_complete);
2560}
2561
2562/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002563 * RX Retry worker handles retrying get_rx_buffer calls that previously failed
2564 * because the client was not ready (-EAGAIN).
2565 *
2566 * @work Work structure contained in smux_lch_t structure
2567 */
2568static void smux_rx_retry_worker(struct work_struct *work)
2569{
2570 struct smux_lch_t *ch;
2571 struct smux_rx_pkt_retry *retry;
2572 union notifier_metadata metadata;
2573 int tmp;
2574 unsigned long flags;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002575 int immediate_retry = 0;
2576 int tx_ready = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002577
2578 ch = container_of(work, struct smux_lch_t, rx_retry_work.work);
2579
2580 /* get next retry packet */
2581 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg06011322012-07-06 18:17:03 -06002582 if ((ch->local_state != SMUX_LCH_LOCAL_OPENED) || smux.in_reset) {
Eric Holmbergb8435c82012-06-05 14:51:29 -06002583 /* port has been closed - remove all retries */
2584 while (!list_empty(&ch->rx_retry_queue)) {
2585 retry = list_first_entry(&ch->rx_retry_queue,
2586 struct smux_rx_pkt_retry,
2587 rx_retry_list);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002588 (void)smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002589 }
2590 }
2591
2592 if (list_empty(&ch->rx_retry_queue)) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302593 SMUX_DBG("smux: %s: retry list empty for channel %d\n",
Eric Holmbergb8435c82012-06-05 14:51:29 -06002594 __func__, ch->lcid);
2595 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2596 return;
2597 }
2598 retry = list_first_entry(&ch->rx_retry_queue,
2599 struct smux_rx_pkt_retry,
2600 rx_retry_list);
2601 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2602
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302603 SMUX_DBG("smux: %s: ch %d retrying rx pkt %p\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002604 __func__, ch->lcid, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002605 metadata.read.pkt_priv = 0;
2606 metadata.read.buffer = 0;
2607 tmp = ch->get_rx_buffer(ch->priv,
2608 (void **)&metadata.read.pkt_priv,
2609 (void **)&metadata.read.buffer,
2610 retry->pkt->hdr.payload_len);
2611 if (tmp == 0 && metadata.read.buffer) {
2612 /* have valid RX buffer */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002613
Eric Holmbergb8435c82012-06-05 14:51:29 -06002614 memcpy(metadata.read.buffer, retry->pkt->payload,
2615 retry->pkt->hdr.payload_len);
2616 metadata.read.len = retry->pkt->hdr.payload_len;
2617
2618 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002619 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002620 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002621 schedule_notify(ch->lcid, SMUX_READ_DONE, &metadata);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002622 if (tx_ready)
2623 list_channel(ch);
2624
2625 immediate_retry = 1;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002626 } else if (tmp == -EAGAIN ||
2627 (tmp == 0 && !metadata.read.buffer)) {
2628 /* retry again */
2629 retry->timeout_in_ms <<= 1;
2630 if (retry->timeout_in_ms > SMUX_RX_RETRY_MAX_MS) {
2631 /* timed out */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002632 pr_err("%s: ch %d RX retry client timeout\n",
2633 __func__, ch->lcid);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002634 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002635 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002636 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002637 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
2638 if (tx_ready)
2639 list_channel(ch);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002640 }
2641 } else {
2642 /* client error - drop packet */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002643 pr_err("%s: ch %d RX retry client failed (%d)\n",
2644 __func__, ch->lcid, tmp);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002645 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002646 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002647 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002648 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002649 if (tx_ready)
2650 list_channel(ch);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002651 }
2652
2653 /* schedule next retry */
2654 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2655 if (!list_empty(&ch->rx_retry_queue)) {
2656 retry = list_first_entry(&ch->rx_retry_queue,
2657 struct smux_rx_pkt_retry,
2658 rx_retry_list);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002659
2660 if (immediate_retry)
2661 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
2662 else
2663 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
2664 msecs_to_jiffies(retry->timeout_in_ms));
Eric Holmbergb8435c82012-06-05 14:51:29 -06002665 }
2666 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2667}
2668
2669/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002670 * Transmit worker handles serializing and transmitting packets onto the
2671 * underlying transport.
2672 *
2673 * @work Work structure (not used)
2674 */
2675static void smux_tx_worker(struct work_struct *work)
2676{
2677 struct smux_pkt_t *pkt;
2678 struct smux_lch_t *ch;
2679 unsigned low_wm_notif;
2680 unsigned lcid;
2681 unsigned long flags;
2682
2683
2684 /*
2685 * Transmit packets in round-robin fashion based upon ready
2686 * channels.
2687 *
2688 * To eliminate the need to hold a lock for the entire
2689 * iteration through the channel ready list, the head of the
2690 * ready-channel list is always the next channel to be
2691 * processed. To send a packet, the first valid packet in
2692 * the head channel is removed and the head channel is then
2693 * rescheduled at the end of the queue by removing it and
2694 * inserting after the tail. The locks can then be released
2695 * while the packet is processed.
2696 */
Eric Holmberged1f00c2012-06-07 09:45:18 -06002697 while (!smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002698 pkt = NULL;
2699 low_wm_notif = 0;
2700
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002701 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002702
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002703 /* handle wakeup if needed */
2704 if (smux.power_state == SMUX_PWR_OFF) {
2705 if (!list_empty(&smux.lch_tx_ready_list) ||
2706 !list_empty(&smux.power_queue)) {
2707 /* data to transmit, do wakeup */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302708 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002709 smux.power_state,
2710 SMUX_PWR_TURNING_ON);
2711 smux.power_state = SMUX_PWR_TURNING_ON;
2712 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2713 flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002714 queue_work(smux_tx_wq, &smux_wakeup_work);
2715 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002716 /* no activity -- stay asleep */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002717 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2718 flags);
2719 }
2720 break;
2721 }
2722
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002723 /* process any pending power packets */
2724 if (!list_empty(&smux.power_queue)) {
2725 pkt = list_first_entry(&smux.power_queue,
2726 struct smux_pkt_t, list);
2727 list_del(&pkt->list);
2728 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2729
Eric Holmberga9b06472012-06-22 09:46:34 -06002730 /* Adjust power state if this is a flush command */
2731 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2732 if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH &&
2733 pkt->hdr.cmd == SMUX_CMD_PWR_CTL) {
2734 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK ||
2735 smux.power_ctl_remote_req_received) {
2736 /*
2737 * Sending remote power-down request ACK
2738 * or sending local power-down request
2739 * and we already received a remote
2740 * power-down request.
2741 */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302742 SMUX_PWR(
2743 "smux: %s: Power %d->%d\n", __func__,
Eric Holmberga9b06472012-06-22 09:46:34 -06002744 smux.power_state,
2745 SMUX_PWR_OFF_FLUSH);
2746 smux.power_state = SMUX_PWR_OFF_FLUSH;
2747 smux.power_ctl_remote_req_received = 0;
2748 queue_work(smux_tx_wq,
2749 &smux_inactivity_work);
2750 } else {
2751 /* sending local power-down request */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302752 SMUX_PWR(
2753 "smux: %s: Power %d->%d\n", __func__,
Eric Holmberga9b06472012-06-22 09:46:34 -06002754 smux.power_state,
2755 SMUX_PWR_TURNING_OFF);
2756 smux.power_state = SMUX_PWR_TURNING_OFF;
2757 }
2758 }
2759 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2760
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002761 /* send the packet */
Eric Holmberga9b06472012-06-22 09:46:34 -06002762 smux_uart_power_on();
2763 smux.tx_activity_flag = 1;
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06002764 SMUX_PWR_PKT_TX(pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002765 if (!smux_byte_loopback) {
2766 smux_tx_tty(pkt);
2767 smux_flush_tty();
2768 } else {
2769 smux_tx_loopback(pkt);
2770 }
2771
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002772 smux_free_pkt(pkt);
2773 continue;
2774 }
2775
2776 /* get the next ready channel */
2777 if (list_empty(&smux.lch_tx_ready_list)) {
2778 /* no ready channels */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302779 SMUX_DBG("smux: %s: no more ready channels, exiting\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002780 __func__);
2781 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2782 break;
2783 }
2784 smux.tx_activity_flag = 1;
2785
2786 if (smux.power_state != SMUX_PWR_ON) {
2787 /* channel not ready to transmit */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302788 SMUX_DBG("smux: %s: waiting for link up (state %d)\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002789 __func__,
2790 smux.power_state);
2791 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2792 break;
2793 }
2794
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002795 /* get the next packet to send and rotate channel list */
2796 ch = list_first_entry(&smux.lch_tx_ready_list,
2797 struct smux_lch_t,
2798 tx_ready_list);
2799
2800 spin_lock(&ch->state_lock_lhb1);
2801 spin_lock(&ch->tx_lock_lhb2);
2802 if (!list_empty(&ch->tx_queue)) {
2803 /*
2804 * If remote TX flow control is enabled or
2805 * the channel is not fully opened, then only
2806 * send command packets.
2807 */
2808 if (ch->tx_flow_control || !IS_FULLY_OPENED(ch)) {
2809 struct smux_pkt_t *curr;
2810 list_for_each_entry(curr, &ch->tx_queue, list) {
2811 if (curr->hdr.cmd != SMUX_CMD_DATA) {
2812 pkt = curr;
2813 break;
2814 }
2815 }
2816 } else {
2817 /* get next cmd/data packet to send */
2818 pkt = list_first_entry(&ch->tx_queue,
2819 struct smux_pkt_t, list);
2820 }
2821 }
2822
2823 if (pkt) {
2824 list_del(&pkt->list);
2825
2826 /* update packet stats */
2827 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2828 --ch->tx_pending_data_cnt;
2829 if (ch->notify_lwm &&
2830 ch->tx_pending_data_cnt
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002831 <= SMUX_TX_WM_LOW) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002832 ch->notify_lwm = 0;
2833 low_wm_notif = 1;
2834 }
2835 }
2836
2837 /* advance to the next ready channel */
2838 list_rotate_left(&smux.lch_tx_ready_list);
2839 } else {
2840 /* no data in channel to send, remove from ready list */
2841 list_del(&ch->tx_ready_list);
2842 INIT_LIST_HEAD(&ch->tx_ready_list);
2843 }
2844 lcid = ch->lcid;
2845 spin_unlock(&ch->tx_lock_lhb2);
2846 spin_unlock(&ch->state_lock_lhb1);
2847 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2848
2849 if (low_wm_notif)
2850 schedule_notify(lcid, SMUX_LOW_WM_HIT, NULL);
2851
2852 /* send the packet */
2853 smux_tx_pkt(ch, pkt);
2854 smux_free_pkt(pkt);
2855 }
2856}
2857
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002858/**
2859 * Update the RX flow control (sent in the TIOCM Status command).
2860 *
2861 * @ch Channel for update
2862 *
2863 * @returns 1 for updated, 0 for not updated
2864 *
2865 * Must be called with ch->state_lock_lhb1 locked.
2866 */
2867static int smux_rx_flow_control_updated(struct smux_lch_t *ch)
2868{
2869 int updated = 0;
2870 int prev_state;
2871
2872 prev_state = ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL;
2873
2874 if (ch->rx_flow_control_client || ch->rx_flow_control_auto)
2875 ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
2876 else
2877 ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
2878
2879 if (prev_state != (ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL)) {
2880 smux_send_status_cmd(ch);
2881 updated = 1;
2882 }
2883
2884 return updated;
2885}
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002886
Eric Holmberg06011322012-07-06 18:17:03 -06002887/**
2888 * Flush all SMUX workqueues.
2889 *
2890 * This sets the reset bit to abort any processing loops and then
2891 * flushes the workqueues to ensure that no new pending work is
2892 * running. Do not call with any locks used by workers held as
2893 * this will result in a deadlock.
2894 */
2895static void smux_flush_workqueues(void)
2896{
2897 smux.in_reset = 1;
2898
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302899 SMUX_DBG("smux: %s: flushing tx wq\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06002900 flush_workqueue(smux_tx_wq);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302901 SMUX_DBG("smux: %s: flushing rx wq\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06002902 flush_workqueue(smux_rx_wq);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302903 SMUX_DBG("smux: %s: flushing notify wq\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06002904 flush_workqueue(smux_notify_wq);
2905}
2906
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002907/**********************************************************************/
2908/* Kernel API */
2909/**********************************************************************/
2910
2911/**
2912 * Set or clear channel option using the SMUX_CH_OPTION_* channel
2913 * flags.
2914 *
2915 * @lcid Logical channel ID
2916 * @set Options to set
2917 * @clear Options to clear
2918 *
2919 * @returns 0 for success, < 0 for failure
2920 */
2921int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear)
2922{
2923 unsigned long flags;
2924 struct smux_lch_t *ch;
2925 int tx_ready = 0;
2926 int ret = 0;
2927
2928 if (smux_assert_lch_id(lcid))
2929 return -ENXIO;
2930
2931 ch = &smux_lch[lcid];
2932 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2933
2934 /* Local loopback mode */
2935 if (set & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2936 ch->local_mode = SMUX_LCH_MODE_LOCAL_LOOPBACK;
2937
2938 if (clear & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2939 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2940
2941 /* Remote loopback mode */
2942 if (set & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2943 ch->local_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
2944
2945 if (clear & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2946 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2947
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002948 /* RX Flow control */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002949 if (set & SMUX_CH_OPTION_REMOTE_TX_STOP) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002950 ch->rx_flow_control_client = 1;
2951 tx_ready |= smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002952 }
2953
2954 if (clear & SMUX_CH_OPTION_REMOTE_TX_STOP) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002955 ch->rx_flow_control_client = 0;
2956 tx_ready |= smux_rx_flow_control_updated(ch);
2957 }
2958
2959 /* Auto RX Flow Control */
2960 if (set & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302961 SMUX_DBG("smux: %s: auto rx flow control option enabled\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002962 __func__);
2963 ch->options |= SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
2964 }
2965
2966 if (clear & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302967 SMUX_DBG("smux: %s: auto rx flow control option disabled\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002968 __func__);
2969 ch->options &= ~SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
2970 ch->rx_flow_control_auto = 0;
2971 tx_ready |= smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002972 }
2973
2974 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2975
2976 if (tx_ready)
2977 list_channel(ch);
2978
2979 return ret;
2980}
2981
2982/**
2983 * Starts the opening sequence for a logical channel.
2984 *
2985 * @lcid Logical channel ID
2986 * @priv Free for client usage
2987 * @notify Event notification function
2988 * @get_rx_buffer Function used to provide a receive buffer to SMUX
2989 *
2990 * @returns 0 for success, <0 otherwise
2991 *
2992 * A channel must be fully closed (either not previously opened or
2993 * msm_smux_close() has been called and the SMUX_DISCONNECTED has been
2994 * received.
2995 *
2996 * One the remote side is opened, the client will receive a SMUX_CONNECTED
2997 * event.
2998 */
2999int msm_smux_open(uint8_t lcid, void *priv,
3000 void (*notify)(void *priv, int event_type, const void *metadata),
3001 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
3002 int size))
3003{
3004 int ret;
3005 struct smux_lch_t *ch;
3006 struct smux_pkt_t *pkt;
3007 int tx_ready = 0;
3008 unsigned long flags;
3009
3010 if (smux_assert_lch_id(lcid))
3011 return -ENXIO;
3012
3013 ch = &smux_lch[lcid];
3014 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3015
3016 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
3017 ret = -EAGAIN;
3018 goto out;
3019 }
3020
3021 if (ch->local_state != SMUX_LCH_LOCAL_CLOSED) {
3022 pr_err("%s: open lcid %d local state %x invalid\n",
3023 __func__, lcid, ch->local_state);
3024 ret = -EINVAL;
3025 goto out;
3026 }
3027
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303028 SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003029 ch->local_state,
3030 SMUX_LCH_LOCAL_OPENING);
3031
Eric Holmberg06011322012-07-06 18:17:03 -06003032 ch->rx_flow_control_auto = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003033 ch->local_state = SMUX_LCH_LOCAL_OPENING;
3034
3035 ch->priv = priv;
3036 ch->notify = notify;
3037 ch->get_rx_buffer = get_rx_buffer;
3038 ret = 0;
3039
3040 /* Send Open Command */
3041 pkt = smux_alloc_pkt();
3042 if (!pkt) {
3043 ret = -ENOMEM;
3044 goto out;
3045 }
3046 pkt->hdr.magic = SMUX_MAGIC;
3047 pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
3048 pkt->hdr.flags = SMUX_CMD_OPEN_POWER_COLLAPSE;
3049 if (ch->local_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK)
3050 pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
3051 pkt->hdr.lcid = lcid;
3052 pkt->hdr.payload_len = 0;
3053 pkt->hdr.pad_len = 0;
3054 smux_tx_queue(pkt, ch, 0);
3055 tx_ready = 1;
3056
3057out:
3058 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg06011322012-07-06 18:17:03 -06003059 smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003060 if (tx_ready)
3061 list_channel(ch);
3062 return ret;
3063}
3064
3065/**
3066 * Starts the closing sequence for a logical channel.
3067 *
3068 * @lcid Logical channel ID
3069 *
3070 * @returns 0 for success, <0 otherwise
3071 *
3072 * Once the close event has been acknowledge by the remote side, the client
3073 * will receive a SMUX_DISCONNECTED notification.
3074 */
3075int msm_smux_close(uint8_t lcid)
3076{
3077 int ret = 0;
3078 struct smux_lch_t *ch;
3079 struct smux_pkt_t *pkt;
3080 int tx_ready = 0;
3081 unsigned long flags;
3082
3083 if (smux_assert_lch_id(lcid))
3084 return -ENXIO;
3085
3086 ch = &smux_lch[lcid];
3087 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3088 ch->local_tiocm = 0x0;
3089 ch->remote_tiocm = 0x0;
3090 ch->tx_pending_data_cnt = 0;
3091 ch->notify_lwm = 0;
Eric Holmbergeee5d5a2012-08-13 14:45:27 -06003092 ch->tx_flow_control = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003093
3094 /* Purge TX queue */
3095 spin_lock(&ch->tx_lock_lhb2);
Eric Holmberg0e914082012-07-11 11:46:28 -06003096 smux_purge_ch_tx_queue(ch, 0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003097 spin_unlock(&ch->tx_lock_lhb2);
3098
3099 /* Send Close Command */
3100 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
3101 ch->local_state == SMUX_LCH_LOCAL_OPENING) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303102 SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003103 ch->local_state,
3104 SMUX_LCH_LOCAL_CLOSING);
3105
3106 ch->local_state = SMUX_LCH_LOCAL_CLOSING;
3107 pkt = smux_alloc_pkt();
3108 if (pkt) {
3109 pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
3110 pkt->hdr.flags = 0;
3111 pkt->hdr.lcid = lcid;
3112 pkt->hdr.payload_len = 0;
3113 pkt->hdr.pad_len = 0;
3114 smux_tx_queue(pkt, ch, 0);
3115 tx_ready = 1;
3116 } else {
3117 pr_err("%s: pkt allocation failed\n", __func__);
3118 ret = -ENOMEM;
3119 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06003120
3121 /* Purge RX retry queue */
3122 if (ch->rx_retry_queue_cnt)
3123 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003124 }
3125 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3126
3127 if (tx_ready)
3128 list_channel(ch);
3129
3130 return ret;
3131}
3132
3133/**
3134 * Write data to a logical channel.
3135 *
3136 * @lcid Logical channel ID
3137 * @pkt_priv Client data that will be returned with the SMUX_WRITE_DONE or
3138 * SMUX_WRITE_FAIL notification.
3139 * @data Data to write
3140 * @len Length of @data
3141 *
3142 * @returns 0 for success, <0 otherwise
3143 *
3144 * Data may be written immediately after msm_smux_open() is called,
3145 * but the data will wait in the transmit queue until the channel has
3146 * been fully opened.
3147 *
3148 * Once the data has been written, the client will receive either a completion
3149 * (SMUX_WRITE_DONE) or a failure notice (SMUX_WRITE_FAIL).
3150 */
3151int msm_smux_write(uint8_t lcid, void *pkt_priv, const void *data, int len)
3152{
3153 struct smux_lch_t *ch;
3154 struct smux_pkt_t *pkt;
3155 int tx_ready = 0;
3156 unsigned long flags;
3157 int ret;
3158
3159 if (smux_assert_lch_id(lcid))
3160 return -ENXIO;
3161
3162 ch = &smux_lch[lcid];
3163 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3164
3165 if (ch->local_state != SMUX_LCH_LOCAL_OPENED &&
3166 ch->local_state != SMUX_LCH_LOCAL_OPENING) {
3167 pr_err("%s: hdr.invalid local state %d channel %d\n",
3168 __func__, ch->local_state, lcid);
3169 ret = -EINVAL;
3170 goto out;
3171 }
3172
3173 if (len > SMUX_MAX_PKT_SIZE - sizeof(struct smux_hdr_t)) {
3174 pr_err("%s: payload %d too large\n",
3175 __func__, len);
3176 ret = -E2BIG;
3177 goto out;
3178 }
3179
3180 pkt = smux_alloc_pkt();
3181 if (!pkt) {
3182 ret = -ENOMEM;
3183 goto out;
3184 }
3185
3186 pkt->hdr.cmd = SMUX_CMD_DATA;
3187 pkt->hdr.lcid = lcid;
3188 pkt->hdr.flags = 0;
3189 pkt->hdr.payload_len = len;
3190 pkt->payload = (void *)data;
3191 pkt->priv = pkt_priv;
3192 pkt->hdr.pad_len = 0;
3193
3194 spin_lock(&ch->tx_lock_lhb2);
3195 /* verify high watermark */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303196 SMUX_DBG("smux: %s: pending %d", __func__, ch->tx_pending_data_cnt);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003197
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003198 if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003199 pr_err("%s: ch %d high watermark %d exceeded %d\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003200 __func__, lcid, SMUX_TX_WM_HIGH,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003201 ch->tx_pending_data_cnt);
3202 ret = -EAGAIN;
3203 goto out_inner;
3204 }
3205
3206 /* queue packet for transmit */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003207 if (++ch->tx_pending_data_cnt == SMUX_TX_WM_HIGH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003208 ch->notify_lwm = 1;
3209 pr_err("%s: high watermark hit\n", __func__);
3210 schedule_notify(lcid, SMUX_HIGH_WM_HIT, NULL);
3211 }
3212 list_add_tail(&pkt->list, &ch->tx_queue);
3213
3214 /* add to ready list */
3215 if (IS_FULLY_OPENED(ch))
3216 tx_ready = 1;
3217
3218 ret = 0;
3219
3220out_inner:
3221 spin_unlock(&ch->tx_lock_lhb2);
3222
3223out:
3224 if (ret)
3225 smux_free_pkt(pkt);
3226 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3227
3228 if (tx_ready)
3229 list_channel(ch);
3230
3231 return ret;
3232}
3233
3234/**
3235 * Returns true if the TX queue is currently full (high water mark).
3236 *
3237 * @lcid Logical channel ID
3238 * @returns 0 if channel is not full
3239 * 1 if it is full
3240 * < 0 for error
3241 */
3242int msm_smux_is_ch_full(uint8_t lcid)
3243{
3244 struct smux_lch_t *ch;
3245 unsigned long flags;
3246 int is_full = 0;
3247
3248 if (smux_assert_lch_id(lcid))
3249 return -ENXIO;
3250
3251 ch = &smux_lch[lcid];
3252
3253 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003254 if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003255 is_full = 1;
3256 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
3257
3258 return is_full;
3259}
3260
3261/**
3262 * Returns true if the TX queue has space for more packets it is at or
3263 * below the low water mark).
3264 *
3265 * @lcid Logical channel ID
3266 * @returns 0 if channel is above low watermark
3267 * 1 if it's at or below the low watermark
3268 * < 0 for error
3269 */
3270int msm_smux_is_ch_low(uint8_t lcid)
3271{
3272 struct smux_lch_t *ch;
3273 unsigned long flags;
3274 int is_low = 0;
3275
3276 if (smux_assert_lch_id(lcid))
3277 return -ENXIO;
3278
3279 ch = &smux_lch[lcid];
3280
3281 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003282 if (ch->tx_pending_data_cnt <= SMUX_TX_WM_LOW)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003283 is_low = 1;
3284 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
3285
3286 return is_low;
3287}
3288
3289/**
3290 * Send TIOCM status update.
3291 *
3292 * @ch Channel for update
3293 *
3294 * @returns 0 for success, <0 for failure
3295 *
3296 * Channel lock must be held before calling.
3297 */
3298static int smux_send_status_cmd(struct smux_lch_t *ch)
3299{
3300 struct smux_pkt_t *pkt;
3301
3302 if (!ch)
3303 return -EINVAL;
3304
3305 pkt = smux_alloc_pkt();
3306 if (!pkt)
3307 return -ENOMEM;
3308
3309 pkt->hdr.lcid = ch->lcid;
3310 pkt->hdr.cmd = SMUX_CMD_STATUS;
3311 pkt->hdr.flags = ch->local_tiocm;
3312 pkt->hdr.payload_len = 0;
3313 pkt->hdr.pad_len = 0;
3314 smux_tx_queue(pkt, ch, 0);
3315
3316 return 0;
3317}
3318
3319/**
3320 * Internal helper function for getting the TIOCM status with
3321 * state_lock_lhb1 already locked.
3322 *
3323 * @ch Channel pointer
3324 *
3325 * @returns TIOCM status
3326 */
Eric Holmberg9d890672012-06-13 17:58:13 -06003327long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003328{
3329 long status = 0x0;
3330
3331 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DSR : 0;
3332 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_CTS : 0;
3333 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RI) ? TIOCM_RI : 0;
3334 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_DCD) ? TIOCM_CD : 0;
3335
3336 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DTR : 0;
3337 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_RTS : 0;
3338
3339 return status;
3340}
3341
3342/**
3343 * Get the TIOCM status bits.
3344 *
3345 * @lcid Logical channel ID
3346 *
3347 * @returns >= 0 TIOCM status bits
3348 * < 0 Error condition
3349 */
3350long msm_smux_tiocm_get(uint8_t lcid)
3351{
3352 struct smux_lch_t *ch;
3353 unsigned long flags;
3354 long status = 0x0;
3355
3356 if (smux_assert_lch_id(lcid))
3357 return -ENXIO;
3358
3359 ch = &smux_lch[lcid];
3360 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3361 status = msm_smux_tiocm_get_atomic(ch);
3362 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3363
3364 return status;
3365}
3366
3367/**
3368 * Set/clear the TIOCM status bits.
3369 *
3370 * @lcid Logical channel ID
3371 * @set Bits to set
3372 * @clear Bits to clear
3373 *
3374 * @returns 0 for success; < 0 for failure
3375 *
3376 * If a bit is specified in both the @set and @clear masks, then the clear bit
3377 * definition will dominate and the bit will be cleared.
3378 */
3379int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear)
3380{
3381 struct smux_lch_t *ch;
3382 unsigned long flags;
3383 uint8_t old_status;
3384 uint8_t status_set = 0x0;
3385 uint8_t status_clear = 0x0;
3386 int tx_ready = 0;
3387 int ret = 0;
3388
3389 if (smux_assert_lch_id(lcid))
3390 return -ENXIO;
3391
3392 ch = &smux_lch[lcid];
3393 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3394
3395 status_set |= (set & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3396 status_set |= (set & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3397 status_set |= (set & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3398 status_set |= (set & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3399
3400 status_clear |= (clear & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3401 status_clear |= (clear & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3402 status_clear |= (clear & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3403 status_clear |= (clear & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3404
3405 old_status = ch->local_tiocm;
3406 ch->local_tiocm |= status_set;
3407 ch->local_tiocm &= ~status_clear;
3408
3409 if (ch->local_tiocm != old_status) {
3410 ret = smux_send_status_cmd(ch);
3411 tx_ready = 1;
3412 }
3413 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3414
3415 if (tx_ready)
3416 list_channel(ch);
3417
3418 return ret;
3419}
3420
3421/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003422/* Subsystem Restart */
3423/**********************************************************************/
3424static struct notifier_block ssr_notifier = {
3425 .notifier_call = ssr_notifier_cb,
3426};
3427
3428/**
3429 * Handle Subsystem Restart (SSR) notifications.
3430 *
3431 * @this Pointer to ssr_notifier
3432 * @code SSR Code
3433 * @data Data pointer (not used)
3434 */
3435static int ssr_notifier_cb(struct notifier_block *this,
3436 unsigned long code,
3437 void *data)
3438{
3439 unsigned long flags;
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003440 int i;
3441 int tmp;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003442 int power_off_uart = 0;
3443
Eric Holmbergd2697902012-06-15 09:58:46 -06003444 if (code == SUBSYS_BEFORE_SHUTDOWN) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303445 SMUX_DBG("smux: %s: ssr - before shutdown\n", __func__);
Eric Holmbergd2697902012-06-15 09:58:46 -06003446 mutex_lock(&smux.mutex_lha0);
3447 smux.in_reset = 1;
3448 mutex_unlock(&smux.mutex_lha0);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003449 return NOTIFY_DONE;
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003450 } else if (code == SUBSYS_AFTER_POWERUP) {
3451 /* re-register platform devices */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303452 SMUX_DBG("smux: %s: ssr - after power-up\n", __func__);
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003453 mutex_lock(&smux.mutex_lha0);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003454 if (smux.ld_open_count > 0
3455 && !smux.platform_devs_registered) {
3456 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303457 SMUX_DBG("smux: %s: register pdev '%s'\n",
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003458 __func__, smux_devs[i].name);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003459 smux_devs[i].dev.release = smux_pdev_release;
3460 tmp = platform_device_register(&smux_devs[i]);
3461 if (tmp)
3462 pr_err("%s: error %d registering device %s\n",
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003463 __func__, tmp, smux_devs[i].name);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003464 }
3465 smux.platform_devs_registered = 1;
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003466 }
3467 mutex_unlock(&smux.mutex_lha0);
3468 return NOTIFY_DONE;
Eric Holmbergd2697902012-06-15 09:58:46 -06003469 } else if (code != SUBSYS_AFTER_SHUTDOWN) {
3470 return NOTIFY_DONE;
3471 }
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303472 SMUX_DBG("smux: %s: ssr - after shutdown\n", __func__);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003473
3474 /* Cleanup channels */
Eric Holmberg06011322012-07-06 18:17:03 -06003475 smux_flush_workqueues();
Eric Holmbergd2697902012-06-15 09:58:46 -06003476 mutex_lock(&smux.mutex_lha0);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003477 if (smux.ld_open_count > 0) {
3478 smux_lch_purge();
3479 if (smux.tty)
3480 tty_driver_flush_buffer(smux.tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003481
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003482 /* Unregister platform devices */
3483 if (smux.platform_devs_registered) {
3484 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303485 SMUX_DBG("smux: %s: unregister pdev '%s'\n",
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003486 __func__, smux_devs[i].name);
3487 platform_device_unregister(&smux_devs[i]);
3488 }
3489 smux.platform_devs_registered = 0;
3490 }
3491
3492 /* Power-down UART */
3493 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
3494 if (smux.power_state != SMUX_PWR_OFF) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303495 SMUX_PWR("smux: %s: SSR - turning off UART\n",
3496 __func__);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003497 smux.power_state = SMUX_PWR_OFF;
3498 power_off_uart = 1;
3499 }
3500 smux.powerdown_enabled = 0;
3501 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3502
3503 if (power_off_uart)
3504 smux_uart_power_off_atomic();
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003505 }
Eric Holmberg06011322012-07-06 18:17:03 -06003506 smux.tx_activity_flag = 0;
3507 smux.rx_activity_flag = 0;
3508 smux.rx_state = SMUX_RX_IDLE;
Eric Holmbergd2697902012-06-15 09:58:46 -06003509 smux.in_reset = 0;
3510 mutex_unlock(&smux.mutex_lha0);
3511
Eric Holmberged1f00c2012-06-07 09:45:18 -06003512 return NOTIFY_DONE;
3513}
3514
3515/**********************************************************************/
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003516/* Line Discipline Interface */
3517/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003518static void smux_pdev_release(struct device *dev)
3519{
3520 struct platform_device *pdev;
3521
3522 pdev = container_of(dev, struct platform_device, dev);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303523 SMUX_DBG("smux: %s: releasing pdev %p '%s'\n",
3524 __func__, pdev, pdev->name);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003525 memset(&pdev->dev, 0x0, sizeof(pdev->dev));
3526}
3527
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003528static int smuxld_open(struct tty_struct *tty)
3529{
3530 int i;
3531 int tmp;
3532 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003533
3534 if (!smux.is_initialized)
3535 return -ENODEV;
3536
Eric Holmberged1f00c2012-06-07 09:45:18 -06003537 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003538 if (smux.ld_open_count) {
3539 pr_err("%s: %p multiple instances not supported\n",
3540 __func__, tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003541 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003542 return -EEXIST;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003543 }
3544
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003545 if (tty->ops->write == NULL) {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003546 pr_err("%s: tty->ops->write already NULL\n", __func__);
3547 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003548 return -EINVAL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003549 }
3550
3551 /* connect to TTY */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003552 ++smux.ld_open_count;
3553 smux.in_reset = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003554 smux.tty = tty;
3555 tty->disc_data = &smux;
3556 tty->receive_room = TTY_RECEIVE_ROOM;
3557 tty_driver_flush_buffer(tty);
3558
3559 /* power-down the UART if we are idle */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003560 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003561 if (smux.power_state == SMUX_PWR_OFF) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303562 SMUX_PWR("smux: %s: powering off uart\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003563 smux.power_state = SMUX_PWR_OFF_FLUSH;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003564 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003565 queue_work(smux_tx_wq, &smux_inactivity_work);
3566 } else {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003567 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003568 }
3569
3570 /* register platform devices */
3571 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303572 SMUX_DBG("smux: %s: register pdev '%s'\n",
Eric Holmberged1f00c2012-06-07 09:45:18 -06003573 __func__, smux_devs[i].name);
3574 smux_devs[i].dev.release = smux_pdev_release;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003575 tmp = platform_device_register(&smux_devs[i]);
3576 if (tmp)
3577 pr_err("%s: error %d registering device %s\n",
3578 __func__, tmp, smux_devs[i].name);
3579 }
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003580 smux.platform_devs_registered = 1;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003581 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003582 return 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003583}
3584
3585static void smuxld_close(struct tty_struct *tty)
3586{
3587 unsigned long flags;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003588 int power_up_uart = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003589 int i;
3590
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303591 SMUX_DBG("smux: %s: ldisc unload\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06003592 smux_flush_workqueues();
3593
Eric Holmberged1f00c2012-06-07 09:45:18 -06003594 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003595 if (smux.ld_open_count <= 0) {
3596 pr_err("%s: invalid ld count %d\n", __func__,
3597 smux.ld_open_count);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003598 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003599 return;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003600 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003601 --smux.ld_open_count;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003602
3603 /* Cleanup channels */
3604 smux_lch_purge();
3605
3606 /* Unregister platform devices */
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003607 if (smux.platform_devs_registered) {
3608 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303609 SMUX_DBG("smux: %s: unregister pdev '%s'\n",
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003610 __func__, smux_devs[i].name);
3611 platform_device_unregister(&smux_devs[i]);
3612 }
3613 smux.platform_devs_registered = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003614 }
3615
3616 /* Schedule UART power-up if it's down */
3617 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003618 if (smux.power_state == SMUX_PWR_OFF)
Eric Holmberged1f00c2012-06-07 09:45:18 -06003619 power_up_uart = 1;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003620 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergd2697902012-06-15 09:58:46 -06003621 smux.powerdown_enabled = 0;
Eric Holmberg06011322012-07-06 18:17:03 -06003622 smux.tx_activity_flag = 0;
3623 smux.rx_activity_flag = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003624 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3625
3626 if (power_up_uart)
Eric Holmberg92a67df2012-06-25 13:56:24 -06003627 smux_uart_power_on_atomic();
Eric Holmberged1f00c2012-06-07 09:45:18 -06003628
Eric Holmberg06011322012-07-06 18:17:03 -06003629 smux.rx_state = SMUX_RX_IDLE;
3630
Eric Holmberged1f00c2012-06-07 09:45:18 -06003631 /* Disconnect from TTY */
3632 smux.tty = NULL;
3633 mutex_unlock(&smux.mutex_lha0);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303634 SMUX_DBG("smux: %s: ldisc complete\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003635}
3636
3637/**
3638 * Receive data from TTY Line Discipline.
3639 *
3640 * @tty TTY structure
3641 * @cp Character data
3642 * @fp Flag data
3643 * @count Size of character and flag data
3644 */
3645void smuxld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
3646 char *fp, int count)
3647{
3648 int i;
3649 int last_idx = 0;
3650 const char *tty_name = NULL;
3651 char *f;
3652
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003653 /* verify error flags */
3654 for (i = 0, f = fp; i < count; ++i, ++f) {
3655 if (*f != TTY_NORMAL) {
3656 if (tty)
3657 tty_name = tty->name;
3658 pr_err("%s: TTY %s Error %d (%s)\n", __func__,
3659 tty_name, *f, tty_flag_to_str(*f));
3660
3661 /* feed all previous valid data to the parser */
3662 smux_rx_state_machine(cp + last_idx, i - last_idx,
3663 TTY_NORMAL);
3664
3665 /* feed bad data to parser */
3666 smux_rx_state_machine(cp + i, 1, *f);
3667 last_idx = i + 1;
3668 }
3669 }
3670
3671 /* feed data to RX state machine */
3672 smux_rx_state_machine(cp + last_idx, count - last_idx, TTY_NORMAL);
3673}
3674
3675static void smuxld_flush_buffer(struct tty_struct *tty)
3676{
3677 pr_err("%s: not supported\n", __func__);
3678}
3679
3680static ssize_t smuxld_chars_in_buffer(struct tty_struct *tty)
3681{
3682 pr_err("%s: not supported\n", __func__);
3683 return -ENODEV;
3684}
3685
3686static ssize_t smuxld_read(struct tty_struct *tty, struct file *file,
3687 unsigned char __user *buf, size_t nr)
3688{
3689 pr_err("%s: not supported\n", __func__);
3690 return -ENODEV;
3691}
3692
3693static ssize_t smuxld_write(struct tty_struct *tty, struct file *file,
3694 const unsigned char *buf, size_t nr)
3695{
3696 pr_err("%s: not supported\n", __func__);
3697 return -ENODEV;
3698}
3699
3700static int smuxld_ioctl(struct tty_struct *tty, struct file *file,
3701 unsigned int cmd, unsigned long arg)
3702{
3703 pr_err("%s: not supported\n", __func__);
3704 return -ENODEV;
3705}
3706
3707static unsigned int smuxld_poll(struct tty_struct *tty, struct file *file,
3708 struct poll_table_struct *tbl)
3709{
3710 pr_err("%s: not supported\n", __func__);
3711 return -ENODEV;
3712}
3713
3714static void smuxld_write_wakeup(struct tty_struct *tty)
3715{
3716 pr_err("%s: not supported\n", __func__);
3717}
3718
3719static struct tty_ldisc_ops smux_ldisc_ops = {
3720 .owner = THIS_MODULE,
3721 .magic = TTY_LDISC_MAGIC,
3722 .name = "n_smux",
3723 .open = smuxld_open,
3724 .close = smuxld_close,
3725 .flush_buffer = smuxld_flush_buffer,
3726 .chars_in_buffer = smuxld_chars_in_buffer,
3727 .read = smuxld_read,
3728 .write = smuxld_write,
3729 .ioctl = smuxld_ioctl,
3730 .poll = smuxld_poll,
3731 .receive_buf = smuxld_receive_buf,
3732 .write_wakeup = smuxld_write_wakeup
3733};
3734
3735static int __init smux_init(void)
3736{
3737 int ret;
3738
Eric Holmberged1f00c2012-06-07 09:45:18 -06003739 mutex_init(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003740
3741 spin_lock_init(&smux.rx_lock_lha1);
3742 smux.rx_state = SMUX_RX_IDLE;
3743 smux.power_state = SMUX_PWR_OFF;
3744 smux.pwr_wakeup_delay_us = 1;
3745 smux.powerdown_enabled = 0;
Eric Holmberga9b06472012-06-22 09:46:34 -06003746 smux.power_ctl_remote_req_received = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003747 INIT_LIST_HEAD(&smux.power_queue);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003748 smux.rx_activity_flag = 0;
3749 smux.tx_activity_flag = 0;
3750 smux.recv_len = 0;
3751 smux.tty = NULL;
3752 smux.ld_open_count = 0;
3753 smux.in_reset = 0;
3754 smux.is_initialized = 1;
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003755 smux.platform_devs_registered = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003756 smux_byte_loopback = 0;
3757
3758 spin_lock_init(&smux.tx_lock_lha2);
3759 INIT_LIST_HEAD(&smux.lch_tx_ready_list);
3760
3761 ret = tty_register_ldisc(N_SMUX, &smux_ldisc_ops);
3762 if (ret != 0) {
3763 pr_err("%s: error %d registering line discipline\n",
3764 __func__, ret);
3765 return ret;
3766 }
3767
Eric Holmberg6c9f2a52012-06-14 10:49:04 -06003768 subsys_notif_register_notifier("external_modem", &ssr_notifier);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003769
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003770 ret = lch_init();
3771 if (ret != 0) {
3772 pr_err("%s: lch_init failed\n", __func__);
3773 return ret;
3774 }
3775
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303776 log_ctx = ipc_log_context_create(1, "smux");
3777 if (!log_ctx) {
3778 pr_err("%s: unable to create log context\n", __func__);
3779 disable_ipc_logging = 1;
3780 }
3781
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003782 return 0;
3783}
3784
3785static void __exit smux_exit(void)
3786{
3787 int ret;
3788
3789 ret = tty_unregister_ldisc(N_SMUX);
3790 if (ret != 0) {
3791 pr_err("%s error %d unregistering line discipline\n",
3792 __func__, ret);
3793 return;
3794 }
3795}
3796
3797module_init(smux_init);
3798module_exit(smux_exit);
3799
3800MODULE_DESCRIPTION("Serial Mux TTY Line Discipline");
3801MODULE_LICENSE("GPL v2");
3802MODULE_ALIAS_LDISC(N_SMUX);