blob: 69d4d118d80a2f714974ee2f3a59c9d2926a082b [file] [log] [blame]
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001/* drivers/tty/n_smux.c
2 *
3 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/errno.h>
18#include <linux/tty.h>
19#include <linux/tty_flip.h>
20#include <linux/tty_driver.h>
21#include <linux/smux.h>
22#include <linux/list.h>
23#include <linux/kfifo.h>
24#include <linux/slab.h>
25#include <linux/types.h>
26#include <linux/platform_device.h>
27#include <linux/delay.h>
Eric Holmberged1f00c2012-06-07 09:45:18 -060028#include <mach/subsystem_notif.h>
29#include <mach/subsystem_restart.h>
Eric Holmberg8ed30f22012-05-10 19:16:51 -060030#include <mach/msm_serial_hs.h>
Angshuman Sarkarc2df7392012-07-24 14:50:42 +053031#include <mach/msm_ipc_logging.h>
Eric Holmberg8ed30f22012-05-10 19:16:51 -060032#include "smux_private.h"
33#include "smux_loopback.h"
34
35#define SMUX_NOTIFY_FIFO_SIZE 128
36#define SMUX_TX_QUEUE_SIZE 256
Eric Holmberg8ed30f22012-05-10 19:16:51 -060037#define SMUX_PKT_LOG_SIZE 80
38
39/* Maximum size we can accept in a single RX buffer */
40#define TTY_RECEIVE_ROOM 65536
41#define TTY_BUFFER_FULL_WAIT_MS 50
42
43/* maximum sleep time between wakeup attempts */
44#define SMUX_WAKEUP_DELAY_MAX (1 << 20)
45
46/* minimum delay for scheduling delayed work */
47#define SMUX_WAKEUP_DELAY_MIN (1 << 15)
48
49/* inactivity timeout for no rx/tx activity */
Eric Holmberg05620172012-07-03 11:13:18 -060050#define SMUX_INACTIVITY_TIMEOUT_MS 1000000
Eric Holmberg8ed30f22012-05-10 19:16:51 -060051
Eric Holmbergb8435c82012-06-05 14:51:29 -060052/* RX get_rx_buffer retry timeout values */
53#define SMUX_RX_RETRY_MIN_MS (1 << 0) /* 1 ms */
54#define SMUX_RX_RETRY_MAX_MS (1 << 10) /* 1024 ms */
55
Eric Holmberg8ed30f22012-05-10 19:16:51 -060056enum {
57 MSM_SMUX_DEBUG = 1U << 0,
58 MSM_SMUX_INFO = 1U << 1,
59 MSM_SMUX_POWER_INFO = 1U << 2,
60 MSM_SMUX_PKT = 1U << 3,
61};
62
Angshuman Sarkarc2df7392012-07-24 14:50:42 +053063static int smux_debug_mask = MSM_SMUX_DEBUG | MSM_SMUX_POWER_INFO;
Eric Holmberg8ed30f22012-05-10 19:16:51 -060064module_param_named(debug_mask, smux_debug_mask,
65 int, S_IRUGO | S_IWUSR | S_IWGRP);
66
Angshuman Sarkarc2df7392012-07-24 14:50:42 +053067static int disable_ipc_logging;
68
Eric Holmberg8ed30f22012-05-10 19:16:51 -060069/* Simulated wakeup used for testing */
70int smux_byte_loopback;
71module_param_named(byte_loopback, smux_byte_loopback,
72 int, S_IRUGO | S_IWUSR | S_IWGRP);
73int smux_simulate_wakeup_delay = 1;
74module_param_named(simulate_wakeup_delay, smux_simulate_wakeup_delay,
75 int, S_IRUGO | S_IWUSR | S_IWGRP);
76
Angshuman Sarkarc2df7392012-07-24 14:50:42 +053077#define IPC_LOG_STR(x...) do { \
78 if (!disable_ipc_logging && log_ctx) \
79 ipc_log_string(log_ctx, x); \
80} while (0)
81
Eric Holmberg8ed30f22012-05-10 19:16:51 -060082#define SMUX_DBG(x...) do { \
83 if (smux_debug_mask & MSM_SMUX_DEBUG) \
Angshuman Sarkarc2df7392012-07-24 14:50:42 +053084 IPC_LOG_STR(x); \
Eric Holmberg8ed30f22012-05-10 19:16:51 -060085} while (0)
86
Eric Holmbergff0b0112012-06-08 15:06:57 -060087#define SMUX_PWR(x...) do { \
88 if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
Angshuman Sarkarc2df7392012-07-24 14:50:42 +053089 IPC_LOG_STR(x); \
Eric Holmbergff0b0112012-06-08 15:06:57 -060090} while (0)
91
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -060092#define SMUX_PWR_PKT_RX(pkt) do { \
93 if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
94 smux_log_pkt(pkt, 1); \
95} while (0)
96
97#define SMUX_PWR_PKT_TX(pkt) do { \
98 if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
99 if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
100 pkt->hdr.flags == SMUX_WAKEUP_ACK) \
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530101 IPC_LOG_STR("smux: TX Wakeup ACK\n"); \
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -0600102 else if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
103 pkt->hdr.flags == SMUX_WAKEUP_REQ) \
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530104 IPC_LOG_STR("smux: TX Wakeup REQ\n"); \
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -0600105 else \
106 smux_log_pkt(pkt, 0); \
107 } \
108} while (0)
109
110#define SMUX_PWR_BYTE_TX(pkt) do { \
111 if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
112 smux_log_pkt(pkt, 0); \
113 } \
114} while (0)
115
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600116#define SMUX_LOG_PKT_RX(pkt) do { \
117 if (smux_debug_mask & MSM_SMUX_PKT) \
118 smux_log_pkt(pkt, 1); \
119} while (0)
120
121#define SMUX_LOG_PKT_TX(pkt) do { \
122 if (smux_debug_mask & MSM_SMUX_PKT) \
123 smux_log_pkt(pkt, 0); \
124} while (0)
125
126/**
127 * Return true if channel is fully opened (both
128 * local and remote sides are in the OPENED state).
129 */
130#define IS_FULLY_OPENED(ch) \
131 (ch && (ch)->local_state == SMUX_LCH_LOCAL_OPENED \
132 && (ch)->remote_state == SMUX_LCH_REMOTE_OPENED)
133
134static struct platform_device smux_devs[] = {
135 {.name = "SMUX_CTL", .id = -1},
136 {.name = "SMUX_RMNET", .id = -1},
137 {.name = "SMUX_DUN_DATA_HSUART", .id = 0},
138 {.name = "SMUX_RMNET_DATA_HSUART", .id = 1},
139 {.name = "SMUX_RMNET_CTL_HSUART", .id = 0},
140 {.name = "SMUX_DIAG", .id = -1},
141};
142
143enum {
144 SMUX_CMD_STATUS_RTC = 1 << 0,
145 SMUX_CMD_STATUS_RTR = 1 << 1,
146 SMUX_CMD_STATUS_RI = 1 << 2,
147 SMUX_CMD_STATUS_DCD = 1 << 3,
148 SMUX_CMD_STATUS_FLOW_CNTL = 1 << 4,
149};
150
151/* Channel mode */
152enum {
153 SMUX_LCH_MODE_NORMAL,
154 SMUX_LCH_MODE_LOCAL_LOOPBACK,
155 SMUX_LCH_MODE_REMOTE_LOOPBACK,
156};
157
158enum {
159 SMUX_RX_IDLE,
160 SMUX_RX_MAGIC,
161 SMUX_RX_HDR,
162 SMUX_RX_PAYLOAD,
163 SMUX_RX_FAILURE,
164};
165
166/**
167 * Power states.
168 *
169 * The _FLUSH states are internal transitional states and are not part of the
170 * official state machine.
171 */
172enum {
173 SMUX_PWR_OFF,
174 SMUX_PWR_TURNING_ON,
175 SMUX_PWR_ON,
Eric Holmberga9b06472012-06-22 09:46:34 -0600176 SMUX_PWR_TURNING_OFF_FLUSH, /* power-off req/ack in TX queue */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600177 SMUX_PWR_TURNING_OFF,
178 SMUX_PWR_OFF_FLUSH,
179};
180
181/**
182 * Logical Channel Structure. One instance per channel.
183 *
184 * Locking Hierarchy
185 * Each lock has a postfix that describes the locking level. If multiple locks
186 * are required, only increasing lock hierarchy numbers may be locked which
187 * ensures avoiding a deadlock.
188 *
189 * Locking Example
190 * If state_lock_lhb1 is currently held and the TX list needs to be
191 * manipulated, then tx_lock_lhb2 may be locked since it's locking hierarchy
192 * is greater. However, if tx_lock_lhb2 is held, then state_lock_lhb1 may
193 * not be acquired since it would result in a deadlock.
194 *
195 * Note that the Line Discipline locks (*_lha) should always be acquired
196 * before the logical channel locks.
197 */
198struct smux_lch_t {
199 /* channel state */
200 spinlock_t state_lock_lhb1;
201 uint8_t lcid;
202 unsigned local_state;
203 unsigned local_mode;
204 uint8_t local_tiocm;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600205 unsigned options;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600206
207 unsigned remote_state;
208 unsigned remote_mode;
209 uint8_t remote_tiocm;
210
211 int tx_flow_control;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600212 int rx_flow_control_auto;
213 int rx_flow_control_client;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600214
215 /* client callbacks and private data */
216 void *priv;
217 void (*notify)(void *priv, int event_type, const void *metadata);
218 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
219 int size);
220
Eric Holmbergb8435c82012-06-05 14:51:29 -0600221 /* RX Info */
222 struct list_head rx_retry_queue;
223 unsigned rx_retry_queue_cnt;
224 struct delayed_work rx_retry_work;
225
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600226 /* TX Info */
227 spinlock_t tx_lock_lhb2;
228 struct list_head tx_queue;
229 struct list_head tx_ready_list;
230 unsigned tx_pending_data_cnt;
231 unsigned notify_lwm;
232};
233
234union notifier_metadata {
235 struct smux_meta_disconnected disconnected;
236 struct smux_meta_read read;
237 struct smux_meta_write write;
238 struct smux_meta_tiocm tiocm;
239};
240
241struct smux_notify_handle {
242 void (*notify)(void *priv, int event_type, const void *metadata);
243 void *priv;
244 int event_type;
245 union notifier_metadata *metadata;
246};
247
248/**
Eric Holmbergb8435c82012-06-05 14:51:29 -0600249 * Get RX Buffer Retry structure.
250 *
251 * This is used for clients that are unable to provide an RX buffer
252 * immediately. This temporary structure will be used to temporarily hold the
253 * data and perform a retry.
254 */
255struct smux_rx_pkt_retry {
256 struct smux_pkt_t *pkt;
257 struct list_head rx_retry_list;
258 unsigned timeout_in_ms;
259};
260
261/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600262 * Receive worker data structure.
263 *
264 * One instance is created for every call to smux_rx_state_machine.
265 */
266struct smux_rx_worker_data {
267 const unsigned char *data;
268 int len;
269 int flag;
270
271 struct work_struct work;
272 struct completion work_complete;
273};
274
275/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600276 * Line discipline and module structure.
277 *
278 * Only one instance since multiple instances of line discipline are not
279 * allowed.
280 */
281struct smux_ldisc_t {
Eric Holmberged1f00c2012-06-07 09:45:18 -0600282 struct mutex mutex_lha0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600283
284 int is_initialized;
Eric Holmberg2bf9c522012-08-09 13:23:21 -0600285 int platform_devs_registered;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600286 int in_reset;
287 int ld_open_count;
288 struct tty_struct *tty;
289
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600290 /* RX State Machine (singled-threaded access by smux_rx_wq) */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600291 unsigned char recv_buf[SMUX_MAX_PKT_SIZE];
292 unsigned int recv_len;
293 unsigned int pkt_remain;
294 unsigned rx_state;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600295
296 /* RX Activity - accessed by multiple threads */
297 spinlock_t rx_lock_lha1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600298 unsigned rx_activity_flag;
299
300 /* TX / Power */
301 spinlock_t tx_lock_lha2;
302 struct list_head lch_tx_ready_list;
303 unsigned power_state;
304 unsigned pwr_wakeup_delay_us;
305 unsigned tx_activity_flag;
306 unsigned powerdown_enabled;
Eric Holmberga9b06472012-06-22 09:46:34 -0600307 unsigned power_ctl_remote_req_received;
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600308 struct list_head power_queue;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600309};
310
311
312/* data structures */
313static struct smux_lch_t smux_lch[SMUX_NUM_LOGICAL_CHANNELS];
314static struct smux_ldisc_t smux;
315static const char *tty_error_type[] = {
316 [TTY_NORMAL] = "normal",
317 [TTY_OVERRUN] = "overrun",
318 [TTY_BREAK] = "break",
319 [TTY_PARITY] = "parity",
320 [TTY_FRAME] = "framing",
321};
322
323static const char *smux_cmds[] = {
324 [SMUX_CMD_DATA] = "DATA",
325 [SMUX_CMD_OPEN_LCH] = "OPEN",
326 [SMUX_CMD_CLOSE_LCH] = "CLOSE",
327 [SMUX_CMD_STATUS] = "STATUS",
328 [SMUX_CMD_PWR_CTL] = "PWR",
329 [SMUX_CMD_BYTE] = "Raw Byte",
330};
331
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530332static const char * const smux_events[] = {
333 [SMUX_CONNECTED] = "CONNECTED" ,
334 [SMUX_DISCONNECTED] = "DISCONNECTED",
335 [SMUX_READ_DONE] = "READ_DONE",
336 [SMUX_READ_FAIL] = "READ_FAIL",
337 [SMUX_WRITE_DONE] = "WRITE_DONE",
338 [SMUX_WRITE_FAIL] = "WRITE_FAIL",
339 [SMUX_TIOCM_UPDATE] = "TIOCM_UPDATE",
340 [SMUX_LOW_WM_HIT] = "LOW_WM_HIT",
341 [SMUX_HIGH_WM_HIT] = "HIGH_WM_HIT",
342 [SMUX_RX_RETRY_HIGH_WM_HIT] = "RX_RETRY_HIGH_WM_HIT",
343 [SMUX_RX_RETRY_LOW_WM_HIT] = "RX_RETRY_LOW_WM_HIT",
344};
345
346static void *log_ctx;
347
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600348static void smux_notify_local_fn(struct work_struct *work);
349static DECLARE_WORK(smux_notify_local, smux_notify_local_fn);
350
351static struct workqueue_struct *smux_notify_wq;
352static size_t handle_size;
353static struct kfifo smux_notify_fifo;
354static int queued_fifo_notifications;
355static DEFINE_SPINLOCK(notify_lock_lhc1);
356
357static struct workqueue_struct *smux_tx_wq;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600358static struct workqueue_struct *smux_rx_wq;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600359static void smux_tx_worker(struct work_struct *work);
360static DECLARE_WORK(smux_tx_work, smux_tx_worker);
361
362static void smux_wakeup_worker(struct work_struct *work);
Eric Holmbergb8435c82012-06-05 14:51:29 -0600363static void smux_rx_retry_worker(struct work_struct *work);
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600364static void smux_rx_worker(struct work_struct *work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600365static DECLARE_WORK(smux_wakeup_work, smux_wakeup_worker);
366static DECLARE_DELAYED_WORK(smux_wakeup_delayed_work, smux_wakeup_worker);
367
368static void smux_inactivity_worker(struct work_struct *work);
369static DECLARE_WORK(smux_inactivity_work, smux_inactivity_worker);
370static DECLARE_DELAYED_WORK(smux_delayed_inactivity_work,
371 smux_inactivity_worker);
372
373static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch);
374static void list_channel(struct smux_lch_t *ch);
375static int smux_send_status_cmd(struct smux_lch_t *ch);
376static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600377static void smux_flush_tty(void);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600378static void smux_purge_ch_tx_queue(struct smux_lch_t *ch);
379static int schedule_notify(uint8_t lcid, int event,
380 const union notifier_metadata *metadata);
381static int ssr_notifier_cb(struct notifier_block *this,
382 unsigned long code,
383 void *data);
Eric Holmberg92a67df2012-06-25 13:56:24 -0600384static void smux_uart_power_on_atomic(void);
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600385static int smux_rx_flow_control_updated(struct smux_lch_t *ch);
Eric Holmberg06011322012-07-06 18:17:03 -0600386static void smux_flush_workqueues(void);
Eric Holmbergf6a364e2012-08-07 18:41:44 -0600387static void smux_pdev_release(struct device *dev);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600388
389/**
390 * Convert TTY Error Flags to string for logging purposes.
391 *
392 * @flag TTY_* flag
393 * @returns String description or NULL if unknown
394 */
395static const char *tty_flag_to_str(unsigned flag)
396{
397 if (flag < ARRAY_SIZE(tty_error_type))
398 return tty_error_type[flag];
399 return NULL;
400}
401
402/**
403 * Convert SMUX Command to string for logging purposes.
404 *
405 * @cmd SMUX command
406 * @returns String description or NULL if unknown
407 */
408static const char *cmd_to_str(unsigned cmd)
409{
410 if (cmd < ARRAY_SIZE(smux_cmds))
411 return smux_cmds[cmd];
412 return NULL;
413}
414
415/**
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530416 * Convert SMUX event to string for logging purposes.
417 *
418 * @event SMUX event
419 * @returns String description or NULL if unknown
420 */
421static const char *event_to_str(unsigned cmd)
422{
423 if (cmd < ARRAY_SIZE(smux_events))
424 return smux_events[cmd];
425 return NULL;
426}
427
428/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600429 * Set the reset state due to an unrecoverable failure.
430 */
431static void smux_enter_reset(void)
432{
433 pr_err("%s: unrecoverable failure, waiting for ssr\n", __func__);
434 smux.in_reset = 1;
435}
436
437static int lch_init(void)
438{
439 unsigned int id;
440 struct smux_lch_t *ch;
441 int i = 0;
442
443 handle_size = sizeof(struct smux_notify_handle *);
444
445 smux_notify_wq = create_singlethread_workqueue("smux_notify_wq");
446 smux_tx_wq = create_singlethread_workqueue("smux_tx_wq");
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600447 smux_rx_wq = create_singlethread_workqueue("smux_rx_wq");
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600448
449 if (IS_ERR(smux_notify_wq) || IS_ERR(smux_tx_wq)) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530450 SMUX_DBG("smux: %s: create_singlethread_workqueue ENOMEM\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600451 __func__);
452 return -ENOMEM;
453 }
454
455 i |= kfifo_alloc(&smux_notify_fifo,
456 SMUX_NOTIFY_FIFO_SIZE * handle_size,
457 GFP_KERNEL);
458 i |= smux_loopback_init();
459
460 if (i) {
461 pr_err("%s: out of memory error\n", __func__);
462 return -ENOMEM;
463 }
464
465 for (id = 0 ; id < SMUX_NUM_LOGICAL_CHANNELS; id++) {
466 ch = &smux_lch[id];
467
468 spin_lock_init(&ch->state_lock_lhb1);
469 ch->lcid = id;
470 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
471 ch->local_mode = SMUX_LCH_MODE_NORMAL;
472 ch->local_tiocm = 0x0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600473 ch->options = SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600474 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
475 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
476 ch->remote_tiocm = 0x0;
477 ch->tx_flow_control = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600478 ch->rx_flow_control_auto = 0;
479 ch->rx_flow_control_client = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600480 ch->priv = 0;
481 ch->notify = 0;
482 ch->get_rx_buffer = 0;
483
Eric Holmbergb8435c82012-06-05 14:51:29 -0600484 INIT_LIST_HEAD(&ch->rx_retry_queue);
485 ch->rx_retry_queue_cnt = 0;
486 INIT_DELAYED_WORK(&ch->rx_retry_work, smux_rx_retry_worker);
487
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600488 spin_lock_init(&ch->tx_lock_lhb2);
489 INIT_LIST_HEAD(&ch->tx_queue);
490 INIT_LIST_HEAD(&ch->tx_ready_list);
491 ch->tx_pending_data_cnt = 0;
492 ch->notify_lwm = 0;
493 }
494
495 return 0;
496}
497
Eric Holmberged1f00c2012-06-07 09:45:18 -0600498/**
499 * Empty and cleanup all SMUX logical channels for subsystem restart or line
500 * discipline disconnect.
501 */
502static void smux_lch_purge(void)
503{
504 struct smux_lch_t *ch;
505 unsigned long flags;
506 int i;
507
508 /* Empty TX ready list */
509 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
510 while (!list_empty(&smux.lch_tx_ready_list)) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530511 SMUX_DBG("smux: %s: emptying ready list %p\n",
Eric Holmberged1f00c2012-06-07 09:45:18 -0600512 __func__, smux.lch_tx_ready_list.next);
513 ch = list_first_entry(&smux.lch_tx_ready_list,
514 struct smux_lch_t,
515 tx_ready_list);
516 list_del(&ch->tx_ready_list);
517 INIT_LIST_HEAD(&ch->tx_ready_list);
518 }
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600519
520 /* Purge Power Queue */
521 while (!list_empty(&smux.power_queue)) {
522 struct smux_pkt_t *pkt;
523
524 pkt = list_first_entry(&smux.power_queue,
525 struct smux_pkt_t,
526 list);
Eric Holmberg6b19f7f2012-06-15 09:53:52 -0600527 list_del(&pkt->list);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530528 SMUX_DBG("smux: %s: emptying power queue pkt=%p\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600529 __func__, pkt);
530 smux_free_pkt(pkt);
531 }
Eric Holmberged1f00c2012-06-07 09:45:18 -0600532 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
533
534 /* Close all ports */
535 for (i = 0 ; i < SMUX_NUM_LOGICAL_CHANNELS; i++) {
536 ch = &smux_lch[i];
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530537 SMUX_DBG("smux: %s: cleaning up lcid %d\n", __func__, i);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600538
539 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
540
541 /* Purge TX queue */
542 spin_lock(&ch->tx_lock_lhb2);
543 smux_purge_ch_tx_queue(ch);
544 spin_unlock(&ch->tx_lock_lhb2);
545
546 /* Notify user of disconnect and reset channel state */
547 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
548 ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
549 union notifier_metadata meta;
550
551 meta.disconnected.is_ssr = smux.in_reset;
552 schedule_notify(ch->lcid, SMUX_DISCONNECTED, &meta);
553 }
554
555 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
Eric Holmberged1f00c2012-06-07 09:45:18 -0600556 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
557 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
558 ch->tx_flow_control = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600559 ch->rx_flow_control_auto = 0;
560 ch->rx_flow_control_client = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -0600561
562 /* Purge RX retry queue */
563 if (ch->rx_retry_queue_cnt)
564 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
565
566 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
567 }
Eric Holmberged1f00c2012-06-07 09:45:18 -0600568}
569
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600570int smux_assert_lch_id(uint32_t lcid)
571{
572 if (lcid >= SMUX_NUM_LOGICAL_CHANNELS)
573 return -ENXIO;
574 else
575 return 0;
576}
577
578/**
579 * Log packet information for debug purposes.
580 *
581 * @pkt Packet to log
582 * @is_recv 1 = RX packet; 0 = TX Packet
583 *
584 * [DIR][LCID] [LOCAL_STATE][LOCAL_MODE]:[REMOTE_STATE][REMOTE_MODE] PKT Info
585 *
586 * PKT Info:
587 * [CMD] flags [flags] len [PAYLOAD_LEN]:[PAD_LEN] [Payload hex bytes]
588 *
589 * Direction: R = Receive, S = Send
590 * Local State: C = Closed; c = closing; o = opening; O = Opened
591 * Local Mode: L = Local loopback; R = Remote loopback; N = Normal
592 * Remote State: C = Closed; O = Opened
593 * Remote Mode: R = Remote loopback; N = Normal
594 */
595static void smux_log_pkt(struct smux_pkt_t *pkt, int is_recv)
596{
597 char logbuf[SMUX_PKT_LOG_SIZE];
598 char cmd_extra[16];
599 int i = 0;
600 int count;
601 int len;
602 char local_state;
603 char local_mode;
604 char remote_state;
605 char remote_mode;
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600606 struct smux_lch_t *ch = NULL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600607 unsigned char *data;
608
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600609 if (!smux_assert_lch_id(pkt->hdr.lcid))
610 ch = &smux_lch[pkt->hdr.lcid];
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600611
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600612 if (ch) {
613 switch (ch->local_state) {
614 case SMUX_LCH_LOCAL_CLOSED:
615 local_state = 'C';
616 break;
617 case SMUX_LCH_LOCAL_OPENING:
618 local_state = 'o';
619 break;
620 case SMUX_LCH_LOCAL_OPENED:
621 local_state = 'O';
622 break;
623 case SMUX_LCH_LOCAL_CLOSING:
624 local_state = 'c';
625 break;
626 default:
627 local_state = 'U';
628 break;
629 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600630
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600631 switch (ch->local_mode) {
632 case SMUX_LCH_MODE_LOCAL_LOOPBACK:
633 local_mode = 'L';
634 break;
635 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
636 local_mode = 'R';
637 break;
638 case SMUX_LCH_MODE_NORMAL:
639 local_mode = 'N';
640 break;
641 default:
642 local_mode = 'U';
643 break;
644 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600645
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600646 switch (ch->remote_state) {
647 case SMUX_LCH_REMOTE_CLOSED:
648 remote_state = 'C';
649 break;
650 case SMUX_LCH_REMOTE_OPENED:
651 remote_state = 'O';
652 break;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600653
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600654 default:
655 remote_state = 'U';
656 break;
657 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600658
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600659 switch (ch->remote_mode) {
660 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
661 remote_mode = 'R';
662 break;
663 case SMUX_LCH_MODE_NORMAL:
664 remote_mode = 'N';
665 break;
666 default:
667 remote_mode = 'U';
668 break;
669 }
670 } else {
671 /* broadcast channel */
672 local_state = '-';
673 local_mode = '-';
674 remote_state = '-';
675 remote_mode = '-';
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600676 }
677
678 /* determine command type (ACK, etc) */
679 cmd_extra[0] = '\0';
680 switch (pkt->hdr.cmd) {
681 case SMUX_CMD_OPEN_LCH:
682 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
683 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
684 break;
685 case SMUX_CMD_CLOSE_LCH:
686 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
687 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
688 break;
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -0600689
690 case SMUX_CMD_PWR_CTL:
691 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK)
692 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
693 break;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600694 };
695
696 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
697 "smux: %c%d %c%c:%c%c %s%s flags %x len %d:%d ",
698 is_recv ? 'R' : 'S', pkt->hdr.lcid,
699 local_state, local_mode,
700 remote_state, remote_mode,
701 cmd_to_str(pkt->hdr.cmd), cmd_extra, pkt->hdr.flags,
702 pkt->hdr.payload_len, pkt->hdr.pad_len);
703
704 len = (pkt->hdr.payload_len > 16) ? 16 : pkt->hdr.payload_len;
705 data = (unsigned char *)pkt->payload;
706 for (count = 0; count < len; count++)
707 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
708 "%02x ", (unsigned)data[count]);
709
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530710 IPC_LOG_STR(logbuf);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600711}
712
713static void smux_notify_local_fn(struct work_struct *work)
714{
715 struct smux_notify_handle *notify_handle = NULL;
716 union notifier_metadata *metadata = NULL;
717 unsigned long flags;
718 int i;
719
720 for (;;) {
721 /* retrieve notification */
722 spin_lock_irqsave(&notify_lock_lhc1, flags);
723 if (kfifo_len(&smux_notify_fifo) >= handle_size) {
724 i = kfifo_out(&smux_notify_fifo,
725 &notify_handle,
726 handle_size);
727 if (i != handle_size) {
728 pr_err("%s: unable to retrieve handle %d expected %d\n",
729 __func__, i, handle_size);
730 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
731 break;
732 }
733 } else {
734 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
735 break;
736 }
737 --queued_fifo_notifications;
738 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
739
740 /* notify client */
741 metadata = notify_handle->metadata;
742 notify_handle->notify(notify_handle->priv,
743 notify_handle->event_type,
744 metadata);
745
746 kfree(metadata);
747 kfree(notify_handle);
748 }
749}
750
751/**
752 * Initialize existing packet.
753 */
754void smux_init_pkt(struct smux_pkt_t *pkt)
755{
756 memset(pkt, 0x0, sizeof(*pkt));
757 pkt->hdr.magic = SMUX_MAGIC;
758 INIT_LIST_HEAD(&pkt->list);
759}
760
761/**
762 * Allocate and initialize packet.
763 *
764 * If a payload is needed, either set it directly and ensure that it's freed or
765 * use smd_alloc_pkt_payload() to allocate a packet and it will be freed
766 * automatically when smd_free_pkt() is called.
767 */
768struct smux_pkt_t *smux_alloc_pkt(void)
769{
770 struct smux_pkt_t *pkt;
771
772 /* Consider a free list implementation instead of kmalloc */
773 pkt = kmalloc(sizeof(struct smux_pkt_t), GFP_ATOMIC);
774 if (!pkt) {
775 pr_err("%s: out of memory\n", __func__);
776 return NULL;
777 }
778 smux_init_pkt(pkt);
779 pkt->allocated = 1;
780
781 return pkt;
782}
783
784/**
785 * Free packet.
786 *
787 * @pkt Packet to free (may be NULL)
788 *
789 * If payload was allocated using smux_alloc_pkt_payload(), then it is freed as
790 * well. Otherwise, the caller is responsible for freeing the payload.
791 */
792void smux_free_pkt(struct smux_pkt_t *pkt)
793{
794 if (pkt) {
795 if (pkt->free_payload)
796 kfree(pkt->payload);
797 if (pkt->allocated)
798 kfree(pkt);
799 }
800}
801
802/**
803 * Allocate packet payload.
804 *
805 * @pkt Packet to add payload to
806 *
807 * @returns 0 on success, <0 upon error
808 *
809 * A flag is set to signal smux_free_pkt() to free the payload.
810 */
811int smux_alloc_pkt_payload(struct smux_pkt_t *pkt)
812{
813 if (!pkt)
814 return -EINVAL;
815
816 pkt->payload = kmalloc(pkt->hdr.payload_len, GFP_ATOMIC);
817 pkt->free_payload = 1;
818 if (!pkt->payload) {
819 pr_err("%s: unable to malloc %d bytes for payload\n",
820 __func__, pkt->hdr.payload_len);
821 return -ENOMEM;
822 }
823
824 return 0;
825}
826
827static int schedule_notify(uint8_t lcid, int event,
828 const union notifier_metadata *metadata)
829{
830 struct smux_notify_handle *notify_handle = 0;
831 union notifier_metadata *meta_copy = 0;
832 struct smux_lch_t *ch;
833 int i;
834 unsigned long flags;
835 int ret = 0;
836
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530837 IPC_LOG_STR("smux: %s ch:%d\n", event_to_str(event), lcid);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600838 ch = &smux_lch[lcid];
839 notify_handle = kzalloc(sizeof(struct smux_notify_handle),
840 GFP_ATOMIC);
841 if (!notify_handle) {
842 pr_err("%s: out of memory\n", __func__);
843 ret = -ENOMEM;
844 goto free_out;
845 }
846
847 notify_handle->notify = ch->notify;
848 notify_handle->priv = ch->priv;
849 notify_handle->event_type = event;
850 if (metadata) {
851 meta_copy = kzalloc(sizeof(union notifier_metadata),
852 GFP_ATOMIC);
853 if (!meta_copy) {
854 pr_err("%s: out of memory\n", __func__);
855 ret = -ENOMEM;
856 goto free_out;
857 }
858 *meta_copy = *metadata;
859 notify_handle->metadata = meta_copy;
860 } else {
861 notify_handle->metadata = NULL;
862 }
863
864 spin_lock_irqsave(&notify_lock_lhc1, flags);
865 i = kfifo_avail(&smux_notify_fifo);
866 if (i < handle_size) {
867 pr_err("%s: fifo full error %d expected %d\n",
868 __func__, i, handle_size);
869 ret = -ENOMEM;
870 goto unlock_out;
871 }
872
873 i = kfifo_in(&smux_notify_fifo, &notify_handle, handle_size);
874 if (i < 0 || i != handle_size) {
875 pr_err("%s: fifo not available error %d (expected %d)\n",
876 __func__, i, handle_size);
877 ret = -ENOSPC;
878 goto unlock_out;
879 }
880 ++queued_fifo_notifications;
881
882unlock_out:
883 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
884
885free_out:
886 queue_work(smux_notify_wq, &smux_notify_local);
887 if (ret < 0 && notify_handle) {
888 kfree(notify_handle->metadata);
889 kfree(notify_handle);
890 }
891 return ret;
892}
893
894/**
895 * Returns the serialized size of a packet.
896 *
897 * @pkt Packet to serialize
898 *
899 * @returns Serialized length of packet
900 */
901static unsigned int smux_serialize_size(struct smux_pkt_t *pkt)
902{
903 unsigned int size;
904
905 size = sizeof(struct smux_hdr_t);
906 size += pkt->hdr.payload_len;
907 size += pkt->hdr.pad_len;
908
909 return size;
910}
911
912/**
913 * Serialize packet @pkt into output buffer @data.
914 *
915 * @pkt Packet to serialize
916 * @out Destination buffer pointer
917 * @out_len Size of serialized packet
918 *
919 * @returns 0 for success
920 */
921int smux_serialize(struct smux_pkt_t *pkt, char *out,
922 unsigned int *out_len)
923{
924 char *data_start = out;
925
926 if (smux_serialize_size(pkt) > SMUX_MAX_PKT_SIZE) {
927 pr_err("%s: packet size %d too big\n",
928 __func__, smux_serialize_size(pkt));
929 return -E2BIG;
930 }
931
932 memcpy(out, &pkt->hdr, sizeof(struct smux_hdr_t));
933 out += sizeof(struct smux_hdr_t);
934 if (pkt->payload) {
935 memcpy(out, pkt->payload, pkt->hdr.payload_len);
936 out += pkt->hdr.payload_len;
937 }
938 if (pkt->hdr.pad_len) {
939 memset(out, 0x0, pkt->hdr.pad_len);
940 out += pkt->hdr.pad_len;
941 }
942 *out_len = out - data_start;
943 return 0;
944}
945
946/**
947 * Serialize header and provide pointer to the data.
948 *
949 * @pkt Packet
950 * @out[out] Pointer to the serialized header data
951 * @out_len[out] Pointer to the serialized header length
952 */
953static void smux_serialize_hdr(struct smux_pkt_t *pkt, char **out,
954 unsigned int *out_len)
955{
956 *out = (char *)&pkt->hdr;
957 *out_len = sizeof(struct smux_hdr_t);
958}
959
960/**
961 * Serialize payload and provide pointer to the data.
962 *
963 * @pkt Packet
964 * @out[out] Pointer to the serialized payload data
965 * @out_len[out] Pointer to the serialized payload length
966 */
967static void smux_serialize_payload(struct smux_pkt_t *pkt, char **out,
968 unsigned int *out_len)
969{
970 *out = pkt->payload;
971 *out_len = pkt->hdr.payload_len;
972}
973
974/**
975 * Serialize padding and provide pointer to the data.
976 *
977 * @pkt Packet
978 * @out[out] Pointer to the serialized padding (always NULL)
979 * @out_len[out] Pointer to the serialized payload length
980 *
981 * Since the padding field value is undefined, only the size of the patting
982 * (@out_len) is set and the buffer pointer (@out) will always be NULL.
983 */
984static void smux_serialize_padding(struct smux_pkt_t *pkt, char **out,
985 unsigned int *out_len)
986{
987 *out = NULL;
988 *out_len = pkt->hdr.pad_len;
989}
990
991/**
992 * Write data to TTY framework and handle breaking the writes up if needed.
993 *
994 * @data Data to write
995 * @len Length of data
996 *
997 * @returns 0 for success, < 0 for failure
998 */
999static int write_to_tty(char *data, unsigned len)
1000{
1001 int data_written;
1002
1003 if (!data)
1004 return 0;
1005
Eric Holmberged1f00c2012-06-07 09:45:18 -06001006 while (len > 0 && !smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001007 data_written = smux.tty->ops->write(smux.tty, data, len);
1008 if (data_written >= 0) {
1009 len -= data_written;
1010 data += data_written;
1011 } else {
1012 pr_err("%s: TTY write returned error %d\n",
1013 __func__, data_written);
1014 return data_written;
1015 }
1016
1017 if (len)
1018 tty_wait_until_sent(smux.tty,
1019 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001020 }
1021 return 0;
1022}
1023
1024/**
1025 * Write packet to TTY.
1026 *
1027 * @pkt packet to write
1028 *
1029 * @returns 0 on success
1030 */
1031static int smux_tx_tty(struct smux_pkt_t *pkt)
1032{
1033 char *data;
1034 unsigned int len;
1035 int ret;
1036
1037 if (!smux.tty) {
1038 pr_err("%s: TTY not initialized", __func__);
1039 return -ENOTTY;
1040 }
1041
1042 if (pkt->hdr.cmd == SMUX_CMD_BYTE) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301043 SMUX_DBG("smux: %s: tty send single byte\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001044 ret = write_to_tty(&pkt->hdr.flags, 1);
1045 return ret;
1046 }
1047
1048 smux_serialize_hdr(pkt, &data, &len);
1049 ret = write_to_tty(data, len);
1050 if (ret) {
1051 pr_err("%s: failed %d to write header %d\n",
1052 __func__, ret, len);
1053 return ret;
1054 }
1055
1056 smux_serialize_payload(pkt, &data, &len);
1057 ret = write_to_tty(data, len);
1058 if (ret) {
1059 pr_err("%s: failed %d to write payload %d\n",
1060 __func__, ret, len);
1061 return ret;
1062 }
1063
1064 smux_serialize_padding(pkt, &data, &len);
1065 while (len > 0) {
1066 char zero = 0x0;
1067 ret = write_to_tty(&zero, 1);
1068 if (ret) {
1069 pr_err("%s: failed %d to write padding %d\n",
1070 __func__, ret, len);
1071 return ret;
1072 }
1073 --len;
1074 }
1075 return 0;
1076}
1077
1078/**
1079 * Send a single character.
1080 *
1081 * @ch Character to send
1082 */
1083static void smux_send_byte(char ch)
1084{
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001085 struct smux_pkt_t *pkt;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001086
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001087 pkt = smux_alloc_pkt();
1088 if (!pkt) {
1089 pr_err("%s: alloc failure for byte %x\n", __func__, ch);
1090 return;
1091 }
1092 pkt->hdr.cmd = SMUX_CMD_BYTE;
1093 pkt->hdr.flags = ch;
1094 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001095
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001096 list_add_tail(&pkt->list, &smux.power_queue);
1097 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001098}
1099
1100/**
1101 * Receive a single-character packet (used for internal testing).
1102 *
1103 * @ch Character to receive
1104 * @lcid Logical channel ID for packet
1105 *
1106 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001107 */
1108static int smux_receive_byte(char ch, int lcid)
1109{
1110 struct smux_pkt_t pkt;
1111
1112 smux_init_pkt(&pkt);
1113 pkt.hdr.lcid = lcid;
1114 pkt.hdr.cmd = SMUX_CMD_BYTE;
1115 pkt.hdr.flags = ch;
1116
1117 return smux_dispatch_rx_pkt(&pkt);
1118}
1119
1120/**
1121 * Queue packet for transmit.
1122 *
1123 * @pkt_ptr Packet to queue
1124 * @ch Channel to queue packet on
1125 * @queue Queue channel on ready list
1126 */
1127static void smux_tx_queue(struct smux_pkt_t *pkt_ptr, struct smux_lch_t *ch,
1128 int queue)
1129{
1130 unsigned long flags;
1131
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301132 SMUX_DBG("smux: %s: queuing pkt %p\n", __func__, pkt_ptr);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001133
1134 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
1135 list_add_tail(&pkt_ptr->list, &ch->tx_queue);
1136 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
1137
1138 if (queue)
1139 list_channel(ch);
1140}
1141
1142/**
1143 * Handle receive OPEN ACK command.
1144 *
1145 * @pkt Received packet
1146 *
1147 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001148 */
1149static int smux_handle_rx_open_ack(struct smux_pkt_t *pkt)
1150{
1151 uint8_t lcid;
1152 int ret;
1153 struct smux_lch_t *ch;
1154 int enable_powerdown = 0;
1155
1156 lcid = pkt->hdr.lcid;
1157 ch = &smux_lch[lcid];
1158
1159 spin_lock(&ch->state_lock_lhb1);
1160 if (ch->local_state == SMUX_LCH_LOCAL_OPENING) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301161 SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001162 ch->local_state,
1163 SMUX_LCH_LOCAL_OPENED);
1164
1165 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1166 enable_powerdown = 1;
1167
1168 ch->local_state = SMUX_LCH_LOCAL_OPENED;
1169 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED)
1170 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1171 ret = 0;
1172 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301173 SMUX_DBG("smux: Remote loopback OPEN ACK received\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001174 ret = 0;
1175 } else {
1176 pr_err("%s: lcid %d state 0x%x open ack invalid\n",
1177 __func__, lcid, ch->local_state);
1178 ret = -EINVAL;
1179 }
1180 spin_unlock(&ch->state_lock_lhb1);
1181
1182 if (enable_powerdown) {
1183 spin_lock(&smux.tx_lock_lha2);
1184 if (!smux.powerdown_enabled) {
1185 smux.powerdown_enabled = 1;
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301186 SMUX_DBG("smux: %s: enabling power-collapse support\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001187 __func__);
1188 }
1189 spin_unlock(&smux.tx_lock_lha2);
1190 }
1191
1192 return ret;
1193}
1194
1195static int smux_handle_close_ack(struct smux_pkt_t *pkt)
1196{
1197 uint8_t lcid;
1198 int ret;
1199 struct smux_lch_t *ch;
1200 union notifier_metadata meta_disconnected;
1201 unsigned long flags;
1202
1203 lcid = pkt->hdr.lcid;
1204 ch = &smux_lch[lcid];
1205 meta_disconnected.disconnected.is_ssr = 0;
1206
1207 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1208
1209 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301210 SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001211 SMUX_LCH_LOCAL_CLOSING,
1212 SMUX_LCH_LOCAL_CLOSED);
1213 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
1214 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED)
1215 schedule_notify(lcid, SMUX_DISCONNECTED,
1216 &meta_disconnected);
1217 ret = 0;
1218 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301219 SMUX_DBG("smux: Remote loopback CLOSE ACK received\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001220 ret = 0;
1221 } else {
1222 pr_err("%s: lcid %d state 0x%x close ack invalid\n",
1223 __func__, lcid, ch->local_state);
1224 ret = -EINVAL;
1225 }
1226 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1227 return ret;
1228}
1229
1230/**
1231 * Handle receive OPEN command.
1232 *
1233 * @pkt Received packet
1234 *
1235 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001236 */
1237static int smux_handle_rx_open_cmd(struct smux_pkt_t *pkt)
1238{
1239 uint8_t lcid;
1240 int ret;
1241 struct smux_lch_t *ch;
1242 struct smux_pkt_t *ack_pkt;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001243 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001244 int tx_ready = 0;
1245 int enable_powerdown = 0;
1246
1247 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
1248 return smux_handle_rx_open_ack(pkt);
1249
1250 lcid = pkt->hdr.lcid;
1251 ch = &smux_lch[lcid];
1252
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001253 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001254
1255 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301256 SMUX_DBG("smux: lcid %d remote state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001257 SMUX_LCH_REMOTE_CLOSED,
1258 SMUX_LCH_REMOTE_OPENED);
1259
1260 ch->remote_state = SMUX_LCH_REMOTE_OPENED;
1261 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1262 enable_powerdown = 1;
1263
1264 /* Send Open ACK */
1265 ack_pkt = smux_alloc_pkt();
1266 if (!ack_pkt) {
1267 /* exit out to allow retrying this later */
1268 ret = -ENOMEM;
1269 goto out;
1270 }
1271 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1272 ack_pkt->hdr.flags = SMUX_CMD_OPEN_ACK
1273 | SMUX_CMD_OPEN_POWER_COLLAPSE;
1274 ack_pkt->hdr.lcid = lcid;
1275 ack_pkt->hdr.payload_len = 0;
1276 ack_pkt->hdr.pad_len = 0;
1277 if (pkt->hdr.flags & SMUX_CMD_OPEN_REMOTE_LOOPBACK) {
1278 ch->remote_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
1279 ack_pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
1280 }
1281 smux_tx_queue(ack_pkt, ch, 0);
1282 tx_ready = 1;
1283
1284 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1285 /*
1286 * Send an Open command to the remote side to
1287 * simulate our local client doing it.
1288 */
1289 ack_pkt = smux_alloc_pkt();
1290 if (ack_pkt) {
1291 ack_pkt->hdr.lcid = lcid;
1292 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1293 ack_pkt->hdr.flags =
1294 SMUX_CMD_OPEN_POWER_COLLAPSE;
1295 ack_pkt->hdr.payload_len = 0;
1296 ack_pkt->hdr.pad_len = 0;
1297 smux_tx_queue(ack_pkt, ch, 0);
1298 tx_ready = 1;
1299 } else {
1300 pr_err("%s: Remote loopack allocation failure\n",
1301 __func__);
1302 }
1303 } else if (ch->local_state == SMUX_LCH_LOCAL_OPENED) {
1304 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1305 }
1306 ret = 0;
1307 } else {
1308 pr_err("%s: lcid %d remote state 0x%x open invalid\n",
1309 __func__, lcid, ch->remote_state);
1310 ret = -EINVAL;
1311 }
1312
1313out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001314 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001315
1316 if (enable_powerdown) {
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001317 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8b9a6402012-06-05 13:32:57 -06001318 if (!smux.powerdown_enabled) {
1319 smux.powerdown_enabled = 1;
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301320 SMUX_DBG("smux: %s: enabling power-collapse support\n",
Eric Holmberg8b9a6402012-06-05 13:32:57 -06001321 __func__);
1322 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001323 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001324 }
1325
1326 if (tx_ready)
1327 list_channel(ch);
1328
1329 return ret;
1330}
1331
1332/**
1333 * Handle receive CLOSE command.
1334 *
1335 * @pkt Received packet
1336 *
1337 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001338 */
1339static int smux_handle_rx_close_cmd(struct smux_pkt_t *pkt)
1340{
1341 uint8_t lcid;
1342 int ret;
1343 struct smux_lch_t *ch;
1344 struct smux_pkt_t *ack_pkt;
1345 union notifier_metadata meta_disconnected;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001346 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001347 int tx_ready = 0;
1348
1349 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
1350 return smux_handle_close_ack(pkt);
1351
1352 lcid = pkt->hdr.lcid;
1353 ch = &smux_lch[lcid];
1354 meta_disconnected.disconnected.is_ssr = 0;
1355
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001356 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001357 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301358 SMUX_DBG("smux: lcid %d remote state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001359 SMUX_LCH_REMOTE_OPENED,
1360 SMUX_LCH_REMOTE_CLOSED);
1361
1362 ack_pkt = smux_alloc_pkt();
1363 if (!ack_pkt) {
1364 /* exit out to allow retrying this later */
1365 ret = -ENOMEM;
1366 goto out;
1367 }
1368 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
1369 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1370 ack_pkt->hdr.flags = SMUX_CMD_CLOSE_ACK;
1371 ack_pkt->hdr.lcid = lcid;
1372 ack_pkt->hdr.payload_len = 0;
1373 ack_pkt->hdr.pad_len = 0;
1374 smux_tx_queue(ack_pkt, ch, 0);
1375 tx_ready = 1;
1376
1377 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1378 /*
1379 * Send a Close command to the remote side to simulate
1380 * our local client doing it.
1381 */
1382 ack_pkt = smux_alloc_pkt();
1383 if (ack_pkt) {
1384 ack_pkt->hdr.lcid = lcid;
1385 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1386 ack_pkt->hdr.flags = 0;
1387 ack_pkt->hdr.payload_len = 0;
1388 ack_pkt->hdr.pad_len = 0;
1389 smux_tx_queue(ack_pkt, ch, 0);
1390 tx_ready = 1;
1391 } else {
1392 pr_err("%s: Remote loopack allocation failure\n",
1393 __func__);
1394 }
1395 }
1396
1397 if (ch->local_state == SMUX_LCH_LOCAL_CLOSED)
1398 schedule_notify(lcid, SMUX_DISCONNECTED,
1399 &meta_disconnected);
1400 ret = 0;
1401 } else {
1402 pr_err("%s: lcid %d remote state 0x%x close invalid\n",
1403 __func__, lcid, ch->remote_state);
1404 ret = -EINVAL;
1405 }
1406out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001407 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001408 if (tx_ready)
1409 list_channel(ch);
1410
1411 return ret;
1412}
1413
1414/*
1415 * Handle receive DATA command.
1416 *
1417 * @pkt Received packet
1418 *
1419 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001420 */
1421static int smux_handle_rx_data_cmd(struct smux_pkt_t *pkt)
1422{
1423 uint8_t lcid;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001424 int ret = 0;
1425 int do_retry = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001426 int tx_ready = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001427 int tmp;
1428 int rx_len;
1429 struct smux_lch_t *ch;
1430 union notifier_metadata metadata;
1431 int remote_loopback;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001432 struct smux_pkt_t *ack_pkt;
1433 unsigned long flags;
1434
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001435 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1436 ret = -ENXIO;
1437 goto out;
1438 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001439
Eric Holmbergb8435c82012-06-05 14:51:29 -06001440 rx_len = pkt->hdr.payload_len;
1441 if (rx_len == 0) {
1442 ret = -EINVAL;
1443 goto out;
1444 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001445
1446 lcid = pkt->hdr.lcid;
1447 ch = &smux_lch[lcid];
1448 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1449 remote_loopback = ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK;
1450
1451 if (ch->local_state != SMUX_LCH_LOCAL_OPENED
1452 && !remote_loopback) {
1453 pr_err("smux: ch %d error data on local state 0x%x",
1454 lcid, ch->local_state);
1455 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001456 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001457 goto out;
1458 }
1459
1460 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1461 pr_err("smux: ch %d error data on remote state 0x%x",
1462 lcid, ch->remote_state);
1463 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001464 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001465 goto out;
1466 }
1467
Eric Holmbergb8435c82012-06-05 14:51:29 -06001468 if (!list_empty(&ch->rx_retry_queue)) {
1469 do_retry = 1;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001470
1471 if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
1472 !ch->rx_flow_control_auto &&
1473 ((ch->rx_retry_queue_cnt + 1) >= SMUX_RX_WM_HIGH)) {
1474 /* need to flow control RX */
1475 ch->rx_flow_control_auto = 1;
1476 tx_ready |= smux_rx_flow_control_updated(ch);
1477 schedule_notify(ch->lcid, SMUX_RX_RETRY_HIGH_WM_HIT,
1478 NULL);
1479 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06001480 if ((ch->rx_retry_queue_cnt + 1) > SMUX_RX_RETRY_MAX_PKTS) {
1481 /* retry queue full */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001482 pr_err("%s: ch %d RX retry queue full\n",
1483 __func__, lcid);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001484 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1485 ret = -ENOMEM;
1486 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1487 goto out;
1488 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001489 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001490 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001491
Eric Holmbergb8435c82012-06-05 14:51:29 -06001492 if (remote_loopback) {
1493 /* Echo the data back to the remote client. */
1494 ack_pkt = smux_alloc_pkt();
1495 if (ack_pkt) {
1496 ack_pkt->hdr.lcid = lcid;
1497 ack_pkt->hdr.cmd = SMUX_CMD_DATA;
1498 ack_pkt->hdr.flags = 0;
1499 ack_pkt->hdr.payload_len = pkt->hdr.payload_len;
1500 if (ack_pkt->hdr.payload_len) {
1501 smux_alloc_pkt_payload(ack_pkt);
1502 memcpy(ack_pkt->payload, pkt->payload,
1503 ack_pkt->hdr.payload_len);
1504 }
1505 ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
1506 smux_tx_queue(ack_pkt, ch, 0);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001507 tx_ready = 1;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001508 } else {
1509 pr_err("%s: Remote loopack allocation failure\n",
1510 __func__);
1511 }
1512 } else if (!do_retry) {
1513 /* request buffer from client */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001514 metadata.read.pkt_priv = 0;
1515 metadata.read.buffer = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001516 tmp = ch->get_rx_buffer(ch->priv,
1517 (void **)&metadata.read.pkt_priv,
1518 (void **)&metadata.read.buffer,
1519 rx_len);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001520
Eric Holmbergb8435c82012-06-05 14:51:29 -06001521 if (tmp == 0 && metadata.read.buffer) {
1522 /* place data into RX buffer */
1523 memcpy(metadata.read.buffer, pkt->payload,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001524 rx_len);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001525 metadata.read.len = rx_len;
1526 schedule_notify(lcid, SMUX_READ_DONE,
1527 &metadata);
1528 } else if (tmp == -EAGAIN ||
1529 (tmp == 0 && !metadata.read.buffer)) {
1530 /* buffer allocation failed - add to retry queue */
1531 do_retry = 1;
1532 } else if (tmp < 0) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001533 pr_err("%s: ch %d Client RX buffer alloc failed %d\n",
1534 __func__, lcid, tmp);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001535 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1536 ret = -ENOMEM;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001537 }
1538 }
1539
Eric Holmbergb8435c82012-06-05 14:51:29 -06001540 if (do_retry) {
1541 struct smux_rx_pkt_retry *retry;
1542
1543 retry = kmalloc(sizeof(struct smux_rx_pkt_retry), GFP_KERNEL);
1544 if (!retry) {
1545 pr_err("%s: retry alloc failure\n", __func__);
1546 ret = -ENOMEM;
1547 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1548 goto out;
1549 }
1550 INIT_LIST_HEAD(&retry->rx_retry_list);
1551 retry->timeout_in_ms = SMUX_RX_RETRY_MIN_MS;
1552
1553 /* copy packet */
1554 retry->pkt = smux_alloc_pkt();
1555 if (!retry->pkt) {
1556 kfree(retry);
1557 pr_err("%s: pkt alloc failure\n", __func__);
1558 ret = -ENOMEM;
1559 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1560 goto out;
1561 }
1562 retry->pkt->hdr.lcid = lcid;
1563 retry->pkt->hdr.payload_len = pkt->hdr.payload_len;
1564 retry->pkt->hdr.pad_len = pkt->hdr.pad_len;
1565 if (retry->pkt->hdr.payload_len) {
1566 smux_alloc_pkt_payload(retry->pkt);
1567 memcpy(retry->pkt->payload, pkt->payload,
1568 retry->pkt->hdr.payload_len);
1569 }
1570
1571 /* add to retry queue */
1572 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1573 list_add_tail(&retry->rx_retry_list, &ch->rx_retry_queue);
1574 ++ch->rx_retry_queue_cnt;
1575 if (ch->rx_retry_queue_cnt == 1)
1576 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
1577 msecs_to_jiffies(retry->timeout_in_ms));
1578 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1579 }
1580
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001581 if (tx_ready)
1582 list_channel(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001583out:
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001584 return ret;
1585}
1586
1587/**
1588 * Handle receive byte command for testing purposes.
1589 *
1590 * @pkt Received packet
1591 *
1592 * @returns 0 for success
1593 */
1594static int smux_handle_rx_byte_cmd(struct smux_pkt_t *pkt)
1595{
1596 uint8_t lcid;
1597 int ret;
1598 struct smux_lch_t *ch;
1599 union notifier_metadata metadata;
1600 unsigned long flags;
1601
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001602 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1603 pr_err("%s: invalid packet or channel id\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001604 return -ENXIO;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001605 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001606
1607 lcid = pkt->hdr.lcid;
1608 ch = &smux_lch[lcid];
1609 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1610
1611 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
1612 pr_err("smux: ch %d error data on local state 0x%x",
1613 lcid, ch->local_state);
1614 ret = -EIO;
1615 goto out;
1616 }
1617
1618 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1619 pr_err("smux: ch %d error data on remote state 0x%x",
1620 lcid, ch->remote_state);
1621 ret = -EIO;
1622 goto out;
1623 }
1624
1625 metadata.read.pkt_priv = (void *)(int)pkt->hdr.flags;
1626 metadata.read.buffer = 0;
1627 schedule_notify(lcid, SMUX_READ_DONE, &metadata);
1628 ret = 0;
1629
1630out:
1631 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1632 return ret;
1633}
1634
1635/**
1636 * Handle receive status command.
1637 *
1638 * @pkt Received packet
1639 *
1640 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001641 */
1642static int smux_handle_rx_status_cmd(struct smux_pkt_t *pkt)
1643{
1644 uint8_t lcid;
1645 int ret;
1646 struct smux_lch_t *ch;
1647 union notifier_metadata meta;
1648 unsigned long flags;
1649 int tx_ready = 0;
1650
1651 lcid = pkt->hdr.lcid;
1652 ch = &smux_lch[lcid];
1653
1654 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1655 meta.tiocm.tiocm_old = ch->remote_tiocm;
1656 meta.tiocm.tiocm_new = pkt->hdr.flags;
1657
1658 /* update logical channel flow control */
1659 if ((meta.tiocm.tiocm_old & SMUX_CMD_STATUS_FLOW_CNTL) ^
1660 (meta.tiocm.tiocm_new & SMUX_CMD_STATUS_FLOW_CNTL)) {
1661 /* logical channel flow control changed */
1662 if (pkt->hdr.flags & SMUX_CMD_STATUS_FLOW_CNTL) {
1663 /* disabled TX */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301664 SMUX_DBG("smux: TX Flow control enabled\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001665 ch->tx_flow_control = 1;
1666 } else {
1667 /* re-enable channel */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301668 SMUX_DBG("smux: TX Flow control disabled\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001669 ch->tx_flow_control = 0;
1670 tx_ready = 1;
1671 }
1672 }
1673 meta.tiocm.tiocm_old = msm_smux_tiocm_get_atomic(ch);
1674 ch->remote_tiocm = pkt->hdr.flags;
1675 meta.tiocm.tiocm_new = msm_smux_tiocm_get_atomic(ch);
1676
1677 /* client notification for status change */
1678 if (IS_FULLY_OPENED(ch)) {
1679 if (meta.tiocm.tiocm_old != meta.tiocm.tiocm_new)
1680 schedule_notify(lcid, SMUX_TIOCM_UPDATE, &meta);
1681 ret = 0;
1682 }
1683 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1684 if (tx_ready)
1685 list_channel(ch);
1686
1687 return ret;
1688}
1689
1690/**
1691 * Handle receive power command.
1692 *
1693 * @pkt Received packet
1694 *
1695 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001696 */
1697static int smux_handle_rx_power_cmd(struct smux_pkt_t *pkt)
1698{
Steve Mucklef132c6c2012-06-06 18:30:57 -07001699 struct smux_pkt_t *ack_pkt = NULL;
Eric Holmberga9b06472012-06-22 09:46:34 -06001700 int power_down = 0;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001701 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001702
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001703 SMUX_PWR_PKT_RX(pkt);
1704
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001705 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001706 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK) {
1707 /* local sleep request ack */
Eric Holmberga9b06472012-06-22 09:46:34 -06001708 if (smux.power_state == SMUX_PWR_TURNING_OFF)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001709 /* Power-down complete, turn off UART */
Eric Holmberga9b06472012-06-22 09:46:34 -06001710 power_down = 1;
1711 else
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001712 pr_err("%s: sleep request ack invalid in state %d\n",
1713 __func__, smux.power_state);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001714 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001715 /*
1716 * Remote sleep request
1717 *
1718 * Even if we have data pending, we need to transition to the
1719 * POWER_OFF state and then perform a wakeup since the remote
1720 * side has requested a power-down.
1721 *
1722 * The state here is set to SMUX_PWR_TURNING_OFF_FLUSH and
1723 * the TX thread will set the state to SMUX_PWR_TURNING_OFF
1724 * when it sends the packet.
Eric Holmberga9b06472012-06-22 09:46:34 -06001725 *
1726 * If we are already powering down, then no ACK is sent.
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001727 */
Eric Holmberga9b06472012-06-22 09:46:34 -06001728 if (smux.power_state == SMUX_PWR_ON) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001729 ack_pkt = smux_alloc_pkt();
1730 if (ack_pkt) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301731 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001732 smux.power_state,
1733 SMUX_PWR_TURNING_OFF_FLUSH);
1734
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001735 smux.power_state = SMUX_PWR_TURNING_OFF_FLUSH;
1736
1737 /* send power-down ack */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001738 ack_pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
1739 ack_pkt->hdr.flags = SMUX_CMD_PWR_CTL_ACK;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001740 ack_pkt->hdr.lcid = SMUX_BROADCAST_LCID;
1741 list_add_tail(&ack_pkt->list,
1742 &smux.power_queue);
1743 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001744 }
Eric Holmberga9b06472012-06-22 09:46:34 -06001745 } else if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH) {
1746 /* Local power-down request still in TX queue */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301747 SMUX_PWR("smux: %s: Power-down shortcut - no ack\n",
Eric Holmberga9b06472012-06-22 09:46:34 -06001748 __func__);
1749 smux.power_ctl_remote_req_received = 1;
1750 } else if (smux.power_state == SMUX_PWR_TURNING_OFF) {
1751 /*
1752 * Local power-down request already sent to remote
1753 * side, so this request gets treated as an ACK.
1754 */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301755 SMUX_PWR("smux: %s: Power-down shortcut - no ack\n",
Eric Holmberga9b06472012-06-22 09:46:34 -06001756 __func__);
1757 power_down = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001758 } else {
1759 pr_err("%s: sleep request invalid in state %d\n",
1760 __func__, smux.power_state);
1761 }
1762 }
Eric Holmberga9b06472012-06-22 09:46:34 -06001763
1764 if (power_down) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301765 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberga9b06472012-06-22 09:46:34 -06001766 smux.power_state, SMUX_PWR_OFF_FLUSH);
1767 smux.power_state = SMUX_PWR_OFF_FLUSH;
1768 queue_work(smux_tx_wq, &smux_inactivity_work);
1769 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001770 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001771
1772 return 0;
1773}
1774
1775/**
1776 * Handle dispatching a completed packet for receive processing.
1777 *
1778 * @pkt Packet to process
1779 *
1780 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001781 */
1782static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt)
1783{
Eric Holmbergf9622662012-06-13 15:55:45 -06001784 int ret = -ENXIO;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001785
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001786 switch (pkt->hdr.cmd) {
1787 case SMUX_CMD_OPEN_LCH:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001788 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001789 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1790 pr_err("%s: invalid channel id %d\n",
1791 __func__, pkt->hdr.lcid);
1792 break;
1793 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001794 ret = smux_handle_rx_open_cmd(pkt);
1795 break;
1796
1797 case SMUX_CMD_DATA:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001798 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001799 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1800 pr_err("%s: invalid channel id %d\n",
1801 __func__, pkt->hdr.lcid);
1802 break;
1803 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001804 ret = smux_handle_rx_data_cmd(pkt);
1805 break;
1806
1807 case SMUX_CMD_CLOSE_LCH:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001808 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001809 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1810 pr_err("%s: invalid channel id %d\n",
1811 __func__, pkt->hdr.lcid);
1812 break;
1813 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001814 ret = smux_handle_rx_close_cmd(pkt);
1815 break;
1816
1817 case SMUX_CMD_STATUS:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001818 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001819 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1820 pr_err("%s: invalid channel id %d\n",
1821 __func__, pkt->hdr.lcid);
1822 break;
1823 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001824 ret = smux_handle_rx_status_cmd(pkt);
1825 break;
1826
1827 case SMUX_CMD_PWR_CTL:
1828 ret = smux_handle_rx_power_cmd(pkt);
1829 break;
1830
1831 case SMUX_CMD_BYTE:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001832 SMUX_LOG_PKT_RX(pkt);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001833 ret = smux_handle_rx_byte_cmd(pkt);
1834 break;
1835
1836 default:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001837 SMUX_LOG_PKT_RX(pkt);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001838 pr_err("%s: command %d unknown\n", __func__, pkt->hdr.cmd);
1839 ret = -EINVAL;
1840 }
1841 return ret;
1842}
1843
1844/**
1845 * Deserializes a packet and dispatches it to the packet receive logic.
1846 *
1847 * @data Raw data for one packet
1848 * @len Length of the data
1849 *
1850 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001851 */
1852static int smux_deserialize(unsigned char *data, int len)
1853{
1854 struct smux_pkt_t recv;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001855
1856 smux_init_pkt(&recv);
1857
1858 /*
1859 * It may be possible to optimize this to not use the
1860 * temporary buffer.
1861 */
1862 memcpy(&recv.hdr, data, sizeof(struct smux_hdr_t));
1863
1864 if (recv.hdr.magic != SMUX_MAGIC) {
1865 pr_err("%s: invalid header magic\n", __func__);
1866 return -EINVAL;
1867 }
1868
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001869 if (recv.hdr.payload_len)
1870 recv.payload = data + sizeof(struct smux_hdr_t);
1871
1872 return smux_dispatch_rx_pkt(&recv);
1873}
1874
1875/**
1876 * Handle wakeup request byte.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001877 */
1878static void smux_handle_wakeup_req(void)
1879{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001880 unsigned long flags;
1881
1882 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001883 if (smux.power_state == SMUX_PWR_OFF
1884 || smux.power_state == SMUX_PWR_TURNING_ON) {
1885 /* wakeup system */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301886 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001887 smux.power_state, SMUX_PWR_ON);
1888 smux.power_state = SMUX_PWR_ON;
1889 queue_work(smux_tx_wq, &smux_wakeup_work);
1890 queue_work(smux_tx_wq, &smux_tx_work);
1891 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1892 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1893 smux_send_byte(SMUX_WAKEUP_ACK);
Eric Holmberga9b06472012-06-22 09:46:34 -06001894 } else if (smux.power_state == SMUX_PWR_ON) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001895 smux_send_byte(SMUX_WAKEUP_ACK);
Eric Holmberga9b06472012-06-22 09:46:34 -06001896 } else {
1897 /* stale wakeup request from previous wakeup */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301898 SMUX_PWR("smux: %s: stale Wakeup REQ in state %d\n",
Eric Holmberga9b06472012-06-22 09:46:34 -06001899 __func__, smux.power_state);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001900 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001901 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001902}
1903
1904/**
1905 * Handle wakeup request ack.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001906 */
1907static void smux_handle_wakeup_ack(void)
1908{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001909 unsigned long flags;
1910
1911 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001912 if (smux.power_state == SMUX_PWR_TURNING_ON) {
1913 /* received response to wakeup request */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301914 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001915 smux.power_state, SMUX_PWR_ON);
1916 smux.power_state = SMUX_PWR_ON;
1917 queue_work(smux_tx_wq, &smux_tx_work);
1918 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1919 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1920
1921 } else if (smux.power_state != SMUX_PWR_ON) {
1922 /* invalid message */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301923 SMUX_PWR("smux: %s: stale Wakeup REQ ACK in state %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001924 __func__, smux.power_state);
1925 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001926 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001927}
1928
1929/**
1930 * RX State machine - IDLE state processing.
1931 *
1932 * @data New RX data to process
1933 * @len Length of the data
1934 * @used Return value of length processed
1935 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001936 */
1937static void smux_rx_handle_idle(const unsigned char *data,
1938 int len, int *used, int flag)
1939{
1940 int i;
1941
1942 if (flag) {
1943 if (smux_byte_loopback)
1944 smux_receive_byte(SMUX_UT_ECHO_ACK_FAIL,
1945 smux_byte_loopback);
1946 pr_err("%s: TTY error 0x%x - ignoring\n", __func__, flag);
1947 ++*used;
1948 return;
1949 }
1950
1951 for (i = *used; i < len && smux.rx_state == SMUX_RX_IDLE; i++) {
1952 switch (data[i]) {
1953 case SMUX_MAGIC_WORD1:
1954 smux.rx_state = SMUX_RX_MAGIC;
1955 break;
1956 case SMUX_WAKEUP_REQ:
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301957 SMUX_PWR("smux: smux: RX Wakeup REQ\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001958 smux_handle_wakeup_req();
1959 break;
1960 case SMUX_WAKEUP_ACK:
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301961 SMUX_PWR("smux: smux: RX Wakeup ACK\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001962 smux_handle_wakeup_ack();
1963 break;
1964 default:
1965 /* unexpected character */
1966 if (smux_byte_loopback && data[i] == SMUX_UT_ECHO_REQ)
1967 smux_receive_byte(SMUX_UT_ECHO_ACK_OK,
1968 smux_byte_loopback);
1969 pr_err("%s: parse error 0x%02x - ignoring\n", __func__,
1970 (unsigned)data[i]);
1971 break;
1972 }
1973 }
1974
1975 *used = i;
1976}
1977
1978/**
1979 * RX State machine - Header Magic state processing.
1980 *
1981 * @data New RX data to process
1982 * @len Length of the data
1983 * @used Return value of length processed
1984 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001985 */
1986static void smux_rx_handle_magic(const unsigned char *data,
1987 int len, int *used, int flag)
1988{
1989 int i;
1990
1991 if (flag) {
1992 pr_err("%s: TTY RX error %d\n", __func__, flag);
1993 smux_enter_reset();
1994 smux.rx_state = SMUX_RX_FAILURE;
1995 ++*used;
1996 return;
1997 }
1998
1999 for (i = *used; i < len && smux.rx_state == SMUX_RX_MAGIC; i++) {
2000 /* wait for completion of the magic */
2001 if (data[i] == SMUX_MAGIC_WORD2) {
2002 smux.recv_len = 0;
2003 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD1;
2004 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD2;
2005 smux.rx_state = SMUX_RX_HDR;
2006 } else {
2007 /* unexpected / trash character */
2008 pr_err("%s: rx parse error for char %c; *used=%d, len=%d\n",
2009 __func__, data[i], *used, len);
2010 smux.rx_state = SMUX_RX_IDLE;
2011 }
2012 }
2013
2014 *used = i;
2015}
2016
2017/**
2018 * RX State machine - Packet Header state processing.
2019 *
2020 * @data New RX data to process
2021 * @len Length of the data
2022 * @used Return value of length processed
2023 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002024 */
2025static void smux_rx_handle_hdr(const unsigned char *data,
2026 int len, int *used, int flag)
2027{
2028 int i;
2029 struct smux_hdr_t *hdr;
2030
2031 if (flag) {
2032 pr_err("%s: TTY RX error %d\n", __func__, flag);
2033 smux_enter_reset();
2034 smux.rx_state = SMUX_RX_FAILURE;
2035 ++*used;
2036 return;
2037 }
2038
2039 for (i = *used; i < len && smux.rx_state == SMUX_RX_HDR; i++) {
2040 smux.recv_buf[smux.recv_len++] = data[i];
2041
2042 if (smux.recv_len == sizeof(struct smux_hdr_t)) {
2043 /* complete header received */
2044 hdr = (struct smux_hdr_t *)smux.recv_buf;
2045 smux.pkt_remain = hdr->payload_len + hdr->pad_len;
2046 smux.rx_state = SMUX_RX_PAYLOAD;
2047 }
2048 }
2049 *used = i;
2050}
2051
2052/**
2053 * RX State machine - Packet Payload state processing.
2054 *
2055 * @data New RX data to process
2056 * @len Length of the data
2057 * @used Return value of length processed
2058 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002059 */
2060static void smux_rx_handle_pkt_payload(const unsigned char *data,
2061 int len, int *used, int flag)
2062{
2063 int remaining;
2064
2065 if (flag) {
2066 pr_err("%s: TTY RX error %d\n", __func__, flag);
2067 smux_enter_reset();
2068 smux.rx_state = SMUX_RX_FAILURE;
2069 ++*used;
2070 return;
2071 }
2072
2073 /* copy data into rx buffer */
2074 if (smux.pkt_remain < (len - *used))
2075 remaining = smux.pkt_remain;
2076 else
2077 remaining = len - *used;
2078
2079 memcpy(&smux.recv_buf[smux.recv_len], &data[*used], remaining);
2080 smux.recv_len += remaining;
2081 smux.pkt_remain -= remaining;
2082 *used += remaining;
2083
2084 if (smux.pkt_remain == 0) {
2085 /* complete packet received */
2086 smux_deserialize(smux.recv_buf, smux.recv_len);
2087 smux.rx_state = SMUX_RX_IDLE;
2088 }
2089}
2090
2091/**
2092 * Feed data to the receive state machine.
2093 *
2094 * @data Pointer to data block
2095 * @len Length of data
2096 * @flag TTY_NORMAL (0) for no error, otherwise TTY Error Flag
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002097 */
2098void smux_rx_state_machine(const unsigned char *data,
2099 int len, int flag)
2100{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002101 struct smux_rx_worker_data work;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002102
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002103 work.data = data;
2104 work.len = len;
2105 work.flag = flag;
2106 INIT_WORK_ONSTACK(&work.work, smux_rx_worker);
2107 work.work_complete = COMPLETION_INITIALIZER_ONSTACK(work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002108
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002109 queue_work(smux_rx_wq, &work.work);
2110 wait_for_completion(&work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002111}
2112
2113/**
2114 * Add channel to transmit-ready list and trigger transmit worker.
2115 *
2116 * @ch Channel to add
2117 */
2118static void list_channel(struct smux_lch_t *ch)
2119{
2120 unsigned long flags;
2121
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302122 SMUX_DBG("smux: %s: listing channel %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002123 __func__, ch->lcid);
2124
2125 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2126 spin_lock(&ch->tx_lock_lhb2);
2127 smux.tx_activity_flag = 1;
2128 if (list_empty(&ch->tx_ready_list))
2129 list_add_tail(&ch->tx_ready_list, &smux.lch_tx_ready_list);
2130 spin_unlock(&ch->tx_lock_lhb2);
2131 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2132
2133 queue_work(smux_tx_wq, &smux_tx_work);
2134}
2135
2136/**
2137 * Transmit packet on correct transport and then perform client
2138 * notification.
2139 *
2140 * @ch Channel to transmit on
2141 * @pkt Packet to transmit
2142 */
2143static void smux_tx_pkt(struct smux_lch_t *ch, struct smux_pkt_t *pkt)
2144{
2145 union notifier_metadata meta_write;
2146 int ret;
2147
2148 if (ch && pkt) {
2149 SMUX_LOG_PKT_TX(pkt);
2150 if (ch->local_mode == SMUX_LCH_MODE_LOCAL_LOOPBACK)
2151 ret = smux_tx_loopback(pkt);
2152 else
2153 ret = smux_tx_tty(pkt);
2154
2155 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2156 /* notify write-done */
2157 meta_write.write.pkt_priv = pkt->priv;
2158 meta_write.write.buffer = pkt->payload;
2159 meta_write.write.len = pkt->hdr.payload_len;
2160 if (ret >= 0) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302161 SMUX_DBG("smux: %s: PKT write done", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002162 schedule_notify(ch->lcid, SMUX_WRITE_DONE,
2163 &meta_write);
2164 } else {
2165 pr_err("%s: failed to write pkt %d\n",
2166 __func__, ret);
2167 schedule_notify(ch->lcid, SMUX_WRITE_FAIL,
2168 &meta_write);
2169 }
2170 }
2171 }
2172}
2173
2174/**
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002175 * Flush pending TTY TX data.
2176 */
2177static void smux_flush_tty(void)
2178{
Eric Holmberg92a67df2012-06-25 13:56:24 -06002179 mutex_lock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002180 if (!smux.tty) {
2181 pr_err("%s: ldisc not loaded\n", __func__);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002182 mutex_unlock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002183 return;
2184 }
2185
2186 tty_wait_until_sent(smux.tty,
2187 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
2188
2189 if (tty_chars_in_buffer(smux.tty) > 0)
2190 pr_err("%s: unable to flush UART queue\n", __func__);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002191
2192 mutex_unlock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002193}
2194
2195/**
Eric Holmberged1f00c2012-06-07 09:45:18 -06002196 * Purge TX queue for logical channel.
2197 *
2198 * @ch Logical channel pointer
2199 *
2200 * Must be called with the following spinlocks locked:
2201 * state_lock_lhb1
2202 * tx_lock_lhb2
2203 */
2204static void smux_purge_ch_tx_queue(struct smux_lch_t *ch)
2205{
2206 struct smux_pkt_t *pkt;
2207 int send_disconnect = 0;
2208
2209 while (!list_empty(&ch->tx_queue)) {
2210 pkt = list_first_entry(&ch->tx_queue, struct smux_pkt_t,
2211 list);
2212 list_del(&pkt->list);
2213
2214 if (pkt->hdr.cmd == SMUX_CMD_OPEN_LCH) {
2215 /* Open was never sent, just force to closed state */
2216 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
2217 send_disconnect = 1;
2218 } else if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2219 /* Notify client of failed write */
2220 union notifier_metadata meta_write;
2221
2222 meta_write.write.pkt_priv = pkt->priv;
2223 meta_write.write.buffer = pkt->payload;
2224 meta_write.write.len = pkt->hdr.payload_len;
2225 schedule_notify(ch->lcid, SMUX_WRITE_FAIL, &meta_write);
2226 }
2227 smux_free_pkt(pkt);
2228 }
2229
2230 if (send_disconnect) {
2231 union notifier_metadata meta_disconnected;
2232
2233 meta_disconnected.disconnected.is_ssr = smux.in_reset;
2234 schedule_notify(ch->lcid, SMUX_DISCONNECTED,
2235 &meta_disconnected);
2236 }
2237}
2238
2239/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002240 * Power-up the UART.
Eric Holmberg92a67df2012-06-25 13:56:24 -06002241 *
2242 * Must be called with smux.mutex_lha0 already locked.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002243 */
Eric Holmberg92a67df2012-06-25 13:56:24 -06002244static void smux_uart_power_on_atomic(void)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002245{
2246 struct uart_state *state;
2247
2248 if (!smux.tty || !smux.tty->driver_data) {
2249 pr_err("%s: unable to find UART port for tty %p\n",
2250 __func__, smux.tty);
2251 return;
2252 }
2253 state = smux.tty->driver_data;
2254 msm_hs_request_clock_on(state->uart_port);
2255}
2256
2257/**
Eric Holmberg92a67df2012-06-25 13:56:24 -06002258 * Power-up the UART.
2259 */
2260static void smux_uart_power_on(void)
2261{
2262 mutex_lock(&smux.mutex_lha0);
2263 smux_uart_power_on_atomic();
2264 mutex_unlock(&smux.mutex_lha0);
2265}
2266
2267/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002268 * Power down the UART.
Eric Holmberg06011322012-07-06 18:17:03 -06002269 *
2270 * Must be called with mutex_lha0 locked.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002271 */
Eric Holmberg06011322012-07-06 18:17:03 -06002272static void smux_uart_power_off_atomic(void)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002273{
2274 struct uart_state *state;
2275
2276 if (!smux.tty || !smux.tty->driver_data) {
2277 pr_err("%s: unable to find UART port for tty %p\n",
2278 __func__, smux.tty);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002279 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002280 return;
2281 }
2282 state = smux.tty->driver_data;
2283 msm_hs_request_clock_off(state->uart_port);
Eric Holmberg06011322012-07-06 18:17:03 -06002284}
2285
2286/**
2287 * Power down the UART.
2288 */
2289static void smux_uart_power_off(void)
2290{
2291 mutex_lock(&smux.mutex_lha0);
2292 smux_uart_power_off_atomic();
Eric Holmberg92a67df2012-06-25 13:56:24 -06002293 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002294}
2295
2296/**
2297 * TX Wakeup Worker
2298 *
2299 * @work Not used
2300 *
2301 * Do an exponential back-off wakeup sequence with a maximum period
2302 * of approximately 1 second (1 << 20 microseconds).
2303 */
2304static void smux_wakeup_worker(struct work_struct *work)
2305{
2306 unsigned long flags;
2307 unsigned wakeup_delay;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002308
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002309 if (smux.in_reset)
2310 return;
2311
2312 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2313 if (smux.power_state == SMUX_PWR_ON) {
2314 /* wakeup complete */
Eric Holmberga9b06472012-06-22 09:46:34 -06002315 smux.pwr_wakeup_delay_us = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002316 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302317 SMUX_DBG("smux: %s: wakeup complete\n", __func__);
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002318
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002319 /*
2320 * Cancel any pending retry. This avoids a race condition with
2321 * a new power-up request because:
2322 * 1) this worker doesn't modify the state
2323 * 2) this worker is processed on the same single-threaded
2324 * workqueue as new TX wakeup requests
2325 */
2326 cancel_delayed_work(&smux_wakeup_delayed_work);
Eric Holmbergd032f5b2012-06-29 19:02:00 -06002327 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberga9b06472012-06-22 09:46:34 -06002328 } else if (smux.power_state == SMUX_PWR_TURNING_ON) {
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002329 /* retry wakeup */
2330 wakeup_delay = smux.pwr_wakeup_delay_us;
2331 smux.pwr_wakeup_delay_us <<= 1;
2332 if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
2333 smux.pwr_wakeup_delay_us =
2334 SMUX_WAKEUP_DELAY_MAX;
2335
2336 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302337 SMUX_PWR("smux: %s: triggering wakeup\n", __func__);
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002338 smux_send_byte(SMUX_WAKEUP_REQ);
2339
2340 if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302341 SMUX_DBG("smux: %s: sleeping for %u us\n", __func__,
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002342 wakeup_delay);
2343 usleep_range(wakeup_delay, 2*wakeup_delay);
2344 queue_work(smux_tx_wq, &smux_wakeup_work);
2345 } else {
2346 /* schedule delayed work */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302347 SMUX_DBG(
2348 "smux: %s: scheduling delayed wakeup in %u ms\n",
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002349 __func__, wakeup_delay / 1000);
2350 queue_delayed_work(smux_tx_wq,
2351 &smux_wakeup_delayed_work,
2352 msecs_to_jiffies(wakeup_delay / 1000));
2353 }
Eric Holmberga9b06472012-06-22 09:46:34 -06002354 } else {
2355 /* wakeup aborted */
2356 smux.pwr_wakeup_delay_us = 1;
2357 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302358 SMUX_PWR("smux: %s: wakeup aborted\n", __func__);
Eric Holmberga9b06472012-06-22 09:46:34 -06002359 cancel_delayed_work(&smux_wakeup_delayed_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002360 }
2361}
2362
2363
2364/**
2365 * Inactivity timeout worker. Periodically scheduled when link is active.
2366 * When it detects inactivity, it will power-down the UART link.
2367 *
2368 * @work Work structure (not used)
2369 */
2370static void smux_inactivity_worker(struct work_struct *work)
2371{
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002372 struct smux_pkt_t *pkt;
2373 unsigned long flags;
2374
Eric Holmberg06011322012-07-06 18:17:03 -06002375 if (smux.in_reset)
2376 return;
2377
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002378 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2379 spin_lock(&smux.tx_lock_lha2);
2380
2381 if (!smux.tx_activity_flag && !smux.rx_activity_flag) {
2382 /* no activity */
2383 if (smux.powerdown_enabled) {
2384 if (smux.power_state == SMUX_PWR_ON) {
2385 /* start power-down sequence */
2386 pkt = smux_alloc_pkt();
2387 if (pkt) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302388 SMUX_PWR(
2389 "smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002390 smux.power_state,
Eric Holmberga9b06472012-06-22 09:46:34 -06002391 SMUX_PWR_TURNING_OFF_FLUSH);
2392 smux.power_state =
2393 SMUX_PWR_TURNING_OFF_FLUSH;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002394
2395 /* send power-down request */
2396 pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
2397 pkt->hdr.flags = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002398 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
2399 list_add_tail(&pkt->list,
2400 &smux.power_queue);
2401 queue_work(smux_tx_wq, &smux_tx_work);
2402 } else {
2403 pr_err("%s: packet alloc failed\n",
2404 __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002405 }
2406 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002407 }
2408 }
2409 smux.tx_activity_flag = 0;
2410 smux.rx_activity_flag = 0;
2411
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002412 if (smux.power_state == SMUX_PWR_OFF_FLUSH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002413 /* ready to power-down the UART */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302414 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002415 smux.power_state, SMUX_PWR_OFF);
Eric Holmbergff0b0112012-06-08 15:06:57 -06002416 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002417
2418 /* if data is pending, schedule a new wakeup */
2419 if (!list_empty(&smux.lch_tx_ready_list) ||
2420 !list_empty(&smux.power_queue))
2421 queue_work(smux_tx_wq, &smux_tx_work);
2422
2423 spin_unlock(&smux.tx_lock_lha2);
2424 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2425
2426 /* flush UART output queue and power down */
2427 smux_flush_tty();
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002428 smux_uart_power_off();
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002429 } else {
2430 spin_unlock(&smux.tx_lock_lha2);
2431 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002432 }
2433
2434 /* reschedule inactivity worker */
2435 if (smux.power_state != SMUX_PWR_OFF)
2436 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
2437 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
2438}
2439
2440/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002441 * Remove RX retry packet from channel and free it.
2442 *
Eric Holmbergb8435c82012-06-05 14:51:29 -06002443 * @ch Channel for retry packet
2444 * @retry Retry packet to remove
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002445 *
2446 * @returns 1 if flow control updated; 0 otherwise
2447 *
2448 * Must be called with state_lock_lhb1 locked.
Eric Holmbergb8435c82012-06-05 14:51:29 -06002449 */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002450int smux_remove_rx_retry(struct smux_lch_t *ch,
Eric Holmbergb8435c82012-06-05 14:51:29 -06002451 struct smux_rx_pkt_retry *retry)
2452{
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002453 int tx_ready = 0;
2454
Eric Holmbergb8435c82012-06-05 14:51:29 -06002455 list_del(&retry->rx_retry_list);
2456 --ch->rx_retry_queue_cnt;
2457 smux_free_pkt(retry->pkt);
2458 kfree(retry);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002459
2460 if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
2461 (ch->rx_retry_queue_cnt <= SMUX_RX_WM_LOW) &&
2462 ch->rx_flow_control_auto) {
2463 ch->rx_flow_control_auto = 0;
2464 smux_rx_flow_control_updated(ch);
2465 schedule_notify(ch->lcid, SMUX_RX_RETRY_LOW_WM_HIT, NULL);
2466 tx_ready = 1;
2467 }
2468 return tx_ready;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002469}
2470
2471/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002472 * RX worker handles all receive operations.
2473 *
2474 * @work Work structure contained in TBD structure
2475 */
2476static void smux_rx_worker(struct work_struct *work)
2477{
2478 unsigned long flags;
2479 int used;
2480 int initial_rx_state;
2481 struct smux_rx_worker_data *w;
2482 const unsigned char *data;
2483 int len;
2484 int flag;
2485
2486 w = container_of(work, struct smux_rx_worker_data, work);
2487 data = w->data;
2488 len = w->len;
2489 flag = w->flag;
2490
2491 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2492 smux.rx_activity_flag = 1;
2493 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2494
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302495 SMUX_DBG("smux: %s: %p, len=%d, flag=%d\n", __func__, data, len, flag);
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002496 used = 0;
2497 do {
Eric Holmberg06011322012-07-06 18:17:03 -06002498 if (smux.in_reset) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302499 SMUX_DBG("smux: %s: abort RX due to reset\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06002500 smux.rx_state = SMUX_RX_IDLE;
2501 break;
2502 }
2503
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302504 SMUX_DBG("smux: %s: state %d; %d of %d\n",
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002505 __func__, smux.rx_state, used, len);
2506 initial_rx_state = smux.rx_state;
2507
2508 switch (smux.rx_state) {
2509 case SMUX_RX_IDLE:
2510 smux_rx_handle_idle(data, len, &used, flag);
2511 break;
2512 case SMUX_RX_MAGIC:
2513 smux_rx_handle_magic(data, len, &used, flag);
2514 break;
2515 case SMUX_RX_HDR:
2516 smux_rx_handle_hdr(data, len, &used, flag);
2517 break;
2518 case SMUX_RX_PAYLOAD:
2519 smux_rx_handle_pkt_payload(data, len, &used, flag);
2520 break;
2521 default:
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302522 SMUX_DBG("smux: %s: invalid state %d\n",
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002523 __func__, smux.rx_state);
2524 smux.rx_state = SMUX_RX_IDLE;
2525 break;
2526 }
2527 } while (used < len || smux.rx_state != initial_rx_state);
2528
2529 complete(&w->work_complete);
2530}
2531
2532/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002533 * RX Retry worker handles retrying get_rx_buffer calls that previously failed
2534 * because the client was not ready (-EAGAIN).
2535 *
2536 * @work Work structure contained in smux_lch_t structure
2537 */
2538static void smux_rx_retry_worker(struct work_struct *work)
2539{
2540 struct smux_lch_t *ch;
2541 struct smux_rx_pkt_retry *retry;
2542 union notifier_metadata metadata;
2543 int tmp;
2544 unsigned long flags;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002545 int immediate_retry = 0;
2546 int tx_ready = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002547
2548 ch = container_of(work, struct smux_lch_t, rx_retry_work.work);
2549
2550 /* get next retry packet */
2551 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg06011322012-07-06 18:17:03 -06002552 if ((ch->local_state != SMUX_LCH_LOCAL_OPENED) || smux.in_reset) {
Eric Holmbergb8435c82012-06-05 14:51:29 -06002553 /* port has been closed - remove all retries */
2554 while (!list_empty(&ch->rx_retry_queue)) {
2555 retry = list_first_entry(&ch->rx_retry_queue,
2556 struct smux_rx_pkt_retry,
2557 rx_retry_list);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002558 (void)smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002559 }
2560 }
2561
2562 if (list_empty(&ch->rx_retry_queue)) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302563 SMUX_DBG("smux: %s: retry list empty for channel %d\n",
Eric Holmbergb8435c82012-06-05 14:51:29 -06002564 __func__, ch->lcid);
2565 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2566 return;
2567 }
2568 retry = list_first_entry(&ch->rx_retry_queue,
2569 struct smux_rx_pkt_retry,
2570 rx_retry_list);
2571 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2572
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302573 SMUX_DBG("smux: %s: ch %d retrying rx pkt %p\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002574 __func__, ch->lcid, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002575 metadata.read.pkt_priv = 0;
2576 metadata.read.buffer = 0;
2577 tmp = ch->get_rx_buffer(ch->priv,
2578 (void **)&metadata.read.pkt_priv,
2579 (void **)&metadata.read.buffer,
2580 retry->pkt->hdr.payload_len);
2581 if (tmp == 0 && metadata.read.buffer) {
2582 /* have valid RX buffer */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002583
Eric Holmbergb8435c82012-06-05 14:51:29 -06002584 memcpy(metadata.read.buffer, retry->pkt->payload,
2585 retry->pkt->hdr.payload_len);
2586 metadata.read.len = retry->pkt->hdr.payload_len;
2587
2588 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002589 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002590 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002591 schedule_notify(ch->lcid, SMUX_READ_DONE, &metadata);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002592 if (tx_ready)
2593 list_channel(ch);
2594
2595 immediate_retry = 1;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002596 } else if (tmp == -EAGAIN ||
2597 (tmp == 0 && !metadata.read.buffer)) {
2598 /* retry again */
2599 retry->timeout_in_ms <<= 1;
2600 if (retry->timeout_in_ms > SMUX_RX_RETRY_MAX_MS) {
2601 /* timed out */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002602 pr_err("%s: ch %d RX retry client timeout\n",
2603 __func__, ch->lcid);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002604 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002605 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002606 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002607 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
2608 if (tx_ready)
2609 list_channel(ch);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002610 }
2611 } else {
2612 /* client error - drop packet */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002613 pr_err("%s: ch %d RX retry client failed (%d)\n",
2614 __func__, ch->lcid, tmp);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002615 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002616 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002617 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002618 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002619 if (tx_ready)
2620 list_channel(ch);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002621 }
2622
2623 /* schedule next retry */
2624 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2625 if (!list_empty(&ch->rx_retry_queue)) {
2626 retry = list_first_entry(&ch->rx_retry_queue,
2627 struct smux_rx_pkt_retry,
2628 rx_retry_list);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002629
2630 if (immediate_retry)
2631 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
2632 else
2633 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
2634 msecs_to_jiffies(retry->timeout_in_ms));
Eric Holmbergb8435c82012-06-05 14:51:29 -06002635 }
2636 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2637}
2638
2639/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002640 * Transmit worker handles serializing and transmitting packets onto the
2641 * underlying transport.
2642 *
2643 * @work Work structure (not used)
2644 */
2645static void smux_tx_worker(struct work_struct *work)
2646{
2647 struct smux_pkt_t *pkt;
2648 struct smux_lch_t *ch;
2649 unsigned low_wm_notif;
2650 unsigned lcid;
2651 unsigned long flags;
2652
2653
2654 /*
2655 * Transmit packets in round-robin fashion based upon ready
2656 * channels.
2657 *
2658 * To eliminate the need to hold a lock for the entire
2659 * iteration through the channel ready list, the head of the
2660 * ready-channel list is always the next channel to be
2661 * processed. To send a packet, the first valid packet in
2662 * the head channel is removed and the head channel is then
2663 * rescheduled at the end of the queue by removing it and
2664 * inserting after the tail. The locks can then be released
2665 * while the packet is processed.
2666 */
Eric Holmberged1f00c2012-06-07 09:45:18 -06002667 while (!smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002668 pkt = NULL;
2669 low_wm_notif = 0;
2670
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002671 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002672
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002673 /* handle wakeup if needed */
2674 if (smux.power_state == SMUX_PWR_OFF) {
2675 if (!list_empty(&smux.lch_tx_ready_list) ||
2676 !list_empty(&smux.power_queue)) {
2677 /* data to transmit, do wakeup */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302678 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002679 smux.power_state,
2680 SMUX_PWR_TURNING_ON);
2681 smux.power_state = SMUX_PWR_TURNING_ON;
2682 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2683 flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002684 queue_work(smux_tx_wq, &smux_wakeup_work);
2685 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002686 /* no activity -- stay asleep */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002687 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2688 flags);
2689 }
2690 break;
2691 }
2692
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002693 /* process any pending power packets */
2694 if (!list_empty(&smux.power_queue)) {
2695 pkt = list_first_entry(&smux.power_queue,
2696 struct smux_pkt_t, list);
2697 list_del(&pkt->list);
2698 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2699
Eric Holmberga9b06472012-06-22 09:46:34 -06002700 /* Adjust power state if this is a flush command */
2701 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2702 if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH &&
2703 pkt->hdr.cmd == SMUX_CMD_PWR_CTL) {
2704 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK ||
2705 smux.power_ctl_remote_req_received) {
2706 /*
2707 * Sending remote power-down request ACK
2708 * or sending local power-down request
2709 * and we already received a remote
2710 * power-down request.
2711 */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302712 SMUX_PWR(
2713 "smux: %s: Power %d->%d\n", __func__,
Eric Holmberga9b06472012-06-22 09:46:34 -06002714 smux.power_state,
2715 SMUX_PWR_OFF_FLUSH);
2716 smux.power_state = SMUX_PWR_OFF_FLUSH;
2717 smux.power_ctl_remote_req_received = 0;
2718 queue_work(smux_tx_wq,
2719 &smux_inactivity_work);
2720 } else {
2721 /* sending local power-down request */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302722 SMUX_PWR(
2723 "smux: %s: Power %d->%d\n", __func__,
Eric Holmberga9b06472012-06-22 09:46:34 -06002724 smux.power_state,
2725 SMUX_PWR_TURNING_OFF);
2726 smux.power_state = SMUX_PWR_TURNING_OFF;
2727 }
2728 }
2729 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2730
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002731 /* send the packet */
Eric Holmberga9b06472012-06-22 09:46:34 -06002732 smux_uart_power_on();
2733 smux.tx_activity_flag = 1;
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06002734 SMUX_PWR_PKT_TX(pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002735 if (!smux_byte_loopback) {
2736 smux_tx_tty(pkt);
2737 smux_flush_tty();
2738 } else {
2739 smux_tx_loopback(pkt);
2740 }
2741
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002742 smux_free_pkt(pkt);
2743 continue;
2744 }
2745
2746 /* get the next ready channel */
2747 if (list_empty(&smux.lch_tx_ready_list)) {
2748 /* no ready channels */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302749 SMUX_DBG("smux: %s: no more ready channels, exiting\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002750 __func__);
2751 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2752 break;
2753 }
2754 smux.tx_activity_flag = 1;
2755
2756 if (smux.power_state != SMUX_PWR_ON) {
2757 /* channel not ready to transmit */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302758 SMUX_DBG("smux: %s: waiting for link up (state %d)\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002759 __func__,
2760 smux.power_state);
2761 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2762 break;
2763 }
2764
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002765 /* get the next packet to send and rotate channel list */
2766 ch = list_first_entry(&smux.lch_tx_ready_list,
2767 struct smux_lch_t,
2768 tx_ready_list);
2769
2770 spin_lock(&ch->state_lock_lhb1);
2771 spin_lock(&ch->tx_lock_lhb2);
2772 if (!list_empty(&ch->tx_queue)) {
2773 /*
2774 * If remote TX flow control is enabled or
2775 * the channel is not fully opened, then only
2776 * send command packets.
2777 */
2778 if (ch->tx_flow_control || !IS_FULLY_OPENED(ch)) {
2779 struct smux_pkt_t *curr;
2780 list_for_each_entry(curr, &ch->tx_queue, list) {
2781 if (curr->hdr.cmd != SMUX_CMD_DATA) {
2782 pkt = curr;
2783 break;
2784 }
2785 }
2786 } else {
2787 /* get next cmd/data packet to send */
2788 pkt = list_first_entry(&ch->tx_queue,
2789 struct smux_pkt_t, list);
2790 }
2791 }
2792
2793 if (pkt) {
2794 list_del(&pkt->list);
2795
2796 /* update packet stats */
2797 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2798 --ch->tx_pending_data_cnt;
2799 if (ch->notify_lwm &&
2800 ch->tx_pending_data_cnt
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002801 <= SMUX_TX_WM_LOW) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002802 ch->notify_lwm = 0;
2803 low_wm_notif = 1;
2804 }
2805 }
2806
2807 /* advance to the next ready channel */
2808 list_rotate_left(&smux.lch_tx_ready_list);
2809 } else {
2810 /* no data in channel to send, remove from ready list */
2811 list_del(&ch->tx_ready_list);
2812 INIT_LIST_HEAD(&ch->tx_ready_list);
2813 }
2814 lcid = ch->lcid;
2815 spin_unlock(&ch->tx_lock_lhb2);
2816 spin_unlock(&ch->state_lock_lhb1);
2817 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2818
2819 if (low_wm_notif)
2820 schedule_notify(lcid, SMUX_LOW_WM_HIT, NULL);
2821
2822 /* send the packet */
2823 smux_tx_pkt(ch, pkt);
2824 smux_free_pkt(pkt);
2825 }
2826}
2827
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002828/**
2829 * Update the RX flow control (sent in the TIOCM Status command).
2830 *
2831 * @ch Channel for update
2832 *
2833 * @returns 1 for updated, 0 for not updated
2834 *
2835 * Must be called with ch->state_lock_lhb1 locked.
2836 */
2837static int smux_rx_flow_control_updated(struct smux_lch_t *ch)
2838{
2839 int updated = 0;
2840 int prev_state;
2841
2842 prev_state = ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL;
2843
2844 if (ch->rx_flow_control_client || ch->rx_flow_control_auto)
2845 ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
2846 else
2847 ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
2848
2849 if (prev_state != (ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL)) {
2850 smux_send_status_cmd(ch);
2851 updated = 1;
2852 }
2853
2854 return updated;
2855}
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002856
Eric Holmberg06011322012-07-06 18:17:03 -06002857/**
2858 * Flush all SMUX workqueues.
2859 *
2860 * This sets the reset bit to abort any processing loops and then
2861 * flushes the workqueues to ensure that no new pending work is
2862 * running. Do not call with any locks used by workers held as
2863 * this will result in a deadlock.
2864 */
2865static void smux_flush_workqueues(void)
2866{
2867 smux.in_reset = 1;
2868
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302869 SMUX_DBG("smux: %s: flushing tx wq\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06002870 flush_workqueue(smux_tx_wq);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302871 SMUX_DBG("smux: %s: flushing rx wq\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06002872 flush_workqueue(smux_rx_wq);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302873 SMUX_DBG("smux: %s: flushing notify wq\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06002874 flush_workqueue(smux_notify_wq);
2875}
2876
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002877/**********************************************************************/
2878/* Kernel API */
2879/**********************************************************************/
2880
2881/**
2882 * Set or clear channel option using the SMUX_CH_OPTION_* channel
2883 * flags.
2884 *
2885 * @lcid Logical channel ID
2886 * @set Options to set
2887 * @clear Options to clear
2888 *
2889 * @returns 0 for success, < 0 for failure
2890 */
2891int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear)
2892{
2893 unsigned long flags;
2894 struct smux_lch_t *ch;
2895 int tx_ready = 0;
2896 int ret = 0;
2897
2898 if (smux_assert_lch_id(lcid))
2899 return -ENXIO;
2900
2901 ch = &smux_lch[lcid];
2902 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2903
2904 /* Local loopback mode */
2905 if (set & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2906 ch->local_mode = SMUX_LCH_MODE_LOCAL_LOOPBACK;
2907
2908 if (clear & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2909 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2910
2911 /* Remote loopback mode */
2912 if (set & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2913 ch->local_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
2914
2915 if (clear & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2916 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2917
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002918 /* RX Flow control */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002919 if (set & SMUX_CH_OPTION_REMOTE_TX_STOP) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002920 ch->rx_flow_control_client = 1;
2921 tx_ready |= smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002922 }
2923
2924 if (clear & SMUX_CH_OPTION_REMOTE_TX_STOP) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002925 ch->rx_flow_control_client = 0;
2926 tx_ready |= smux_rx_flow_control_updated(ch);
2927 }
2928
2929 /* Auto RX Flow Control */
2930 if (set & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302931 SMUX_DBG("smux: %s: auto rx flow control option enabled\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002932 __func__);
2933 ch->options |= SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
2934 }
2935
2936 if (clear & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302937 SMUX_DBG("smux: %s: auto rx flow control option disabled\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002938 __func__);
2939 ch->options &= ~SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
2940 ch->rx_flow_control_auto = 0;
2941 tx_ready |= smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002942 }
2943
2944 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2945
2946 if (tx_ready)
2947 list_channel(ch);
2948
2949 return ret;
2950}
2951
2952/**
2953 * Starts the opening sequence for a logical channel.
2954 *
2955 * @lcid Logical channel ID
2956 * @priv Free for client usage
2957 * @notify Event notification function
2958 * @get_rx_buffer Function used to provide a receive buffer to SMUX
2959 *
2960 * @returns 0 for success, <0 otherwise
2961 *
2962 * A channel must be fully closed (either not previously opened or
2963 * msm_smux_close() has been called and the SMUX_DISCONNECTED has been
2964 * received.
2965 *
2966 * One the remote side is opened, the client will receive a SMUX_CONNECTED
2967 * event.
2968 */
2969int msm_smux_open(uint8_t lcid, void *priv,
2970 void (*notify)(void *priv, int event_type, const void *metadata),
2971 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
2972 int size))
2973{
2974 int ret;
2975 struct smux_lch_t *ch;
2976 struct smux_pkt_t *pkt;
2977 int tx_ready = 0;
2978 unsigned long flags;
2979
2980 if (smux_assert_lch_id(lcid))
2981 return -ENXIO;
2982
2983 ch = &smux_lch[lcid];
2984 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2985
2986 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
2987 ret = -EAGAIN;
2988 goto out;
2989 }
2990
2991 if (ch->local_state != SMUX_LCH_LOCAL_CLOSED) {
2992 pr_err("%s: open lcid %d local state %x invalid\n",
2993 __func__, lcid, ch->local_state);
2994 ret = -EINVAL;
2995 goto out;
2996 }
2997
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302998 SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002999 ch->local_state,
3000 SMUX_LCH_LOCAL_OPENING);
3001
Eric Holmberg06011322012-07-06 18:17:03 -06003002 ch->rx_flow_control_auto = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003003 ch->local_state = SMUX_LCH_LOCAL_OPENING;
3004
3005 ch->priv = priv;
3006 ch->notify = notify;
3007 ch->get_rx_buffer = get_rx_buffer;
3008 ret = 0;
3009
3010 /* Send Open Command */
3011 pkt = smux_alloc_pkt();
3012 if (!pkt) {
3013 ret = -ENOMEM;
3014 goto out;
3015 }
3016 pkt->hdr.magic = SMUX_MAGIC;
3017 pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
3018 pkt->hdr.flags = SMUX_CMD_OPEN_POWER_COLLAPSE;
3019 if (ch->local_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK)
3020 pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
3021 pkt->hdr.lcid = lcid;
3022 pkt->hdr.payload_len = 0;
3023 pkt->hdr.pad_len = 0;
3024 smux_tx_queue(pkt, ch, 0);
3025 tx_ready = 1;
3026
3027out:
3028 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg06011322012-07-06 18:17:03 -06003029 smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003030 if (tx_ready)
3031 list_channel(ch);
3032 return ret;
3033}
3034
3035/**
3036 * Starts the closing sequence for a logical channel.
3037 *
3038 * @lcid Logical channel ID
3039 *
3040 * @returns 0 for success, <0 otherwise
3041 *
3042 * Once the close event has been acknowledge by the remote side, the client
3043 * will receive a SMUX_DISCONNECTED notification.
3044 */
3045int msm_smux_close(uint8_t lcid)
3046{
3047 int ret = 0;
3048 struct smux_lch_t *ch;
3049 struct smux_pkt_t *pkt;
3050 int tx_ready = 0;
3051 unsigned long flags;
3052
3053 if (smux_assert_lch_id(lcid))
3054 return -ENXIO;
3055
3056 ch = &smux_lch[lcid];
3057 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3058 ch->local_tiocm = 0x0;
3059 ch->remote_tiocm = 0x0;
3060 ch->tx_pending_data_cnt = 0;
3061 ch->notify_lwm = 0;
3062
3063 /* Purge TX queue */
3064 spin_lock(&ch->tx_lock_lhb2);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003065 smux_purge_ch_tx_queue(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003066 spin_unlock(&ch->tx_lock_lhb2);
3067
3068 /* Send Close Command */
3069 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
3070 ch->local_state == SMUX_LCH_LOCAL_OPENING) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303071 SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003072 ch->local_state,
3073 SMUX_LCH_LOCAL_CLOSING);
3074
3075 ch->local_state = SMUX_LCH_LOCAL_CLOSING;
3076 pkt = smux_alloc_pkt();
3077 if (pkt) {
3078 pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
3079 pkt->hdr.flags = 0;
3080 pkt->hdr.lcid = lcid;
3081 pkt->hdr.payload_len = 0;
3082 pkt->hdr.pad_len = 0;
3083 smux_tx_queue(pkt, ch, 0);
3084 tx_ready = 1;
3085 } else {
3086 pr_err("%s: pkt allocation failed\n", __func__);
3087 ret = -ENOMEM;
3088 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06003089
3090 /* Purge RX retry queue */
3091 if (ch->rx_retry_queue_cnt)
3092 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003093 }
3094 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3095
3096 if (tx_ready)
3097 list_channel(ch);
3098
3099 return ret;
3100}
3101
3102/**
3103 * Write data to a logical channel.
3104 *
3105 * @lcid Logical channel ID
3106 * @pkt_priv Client data that will be returned with the SMUX_WRITE_DONE or
3107 * SMUX_WRITE_FAIL notification.
3108 * @data Data to write
3109 * @len Length of @data
3110 *
3111 * @returns 0 for success, <0 otherwise
3112 *
3113 * Data may be written immediately after msm_smux_open() is called,
3114 * but the data will wait in the transmit queue until the channel has
3115 * been fully opened.
3116 *
3117 * Once the data has been written, the client will receive either a completion
3118 * (SMUX_WRITE_DONE) or a failure notice (SMUX_WRITE_FAIL).
3119 */
3120int msm_smux_write(uint8_t lcid, void *pkt_priv, const void *data, int len)
3121{
3122 struct smux_lch_t *ch;
3123 struct smux_pkt_t *pkt;
3124 int tx_ready = 0;
3125 unsigned long flags;
3126 int ret;
3127
3128 if (smux_assert_lch_id(lcid))
3129 return -ENXIO;
3130
3131 ch = &smux_lch[lcid];
3132 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3133
3134 if (ch->local_state != SMUX_LCH_LOCAL_OPENED &&
3135 ch->local_state != SMUX_LCH_LOCAL_OPENING) {
3136 pr_err("%s: hdr.invalid local state %d channel %d\n",
3137 __func__, ch->local_state, lcid);
3138 ret = -EINVAL;
3139 goto out;
3140 }
3141
3142 if (len > SMUX_MAX_PKT_SIZE - sizeof(struct smux_hdr_t)) {
3143 pr_err("%s: payload %d too large\n",
3144 __func__, len);
3145 ret = -E2BIG;
3146 goto out;
3147 }
3148
3149 pkt = smux_alloc_pkt();
3150 if (!pkt) {
3151 ret = -ENOMEM;
3152 goto out;
3153 }
3154
3155 pkt->hdr.cmd = SMUX_CMD_DATA;
3156 pkt->hdr.lcid = lcid;
3157 pkt->hdr.flags = 0;
3158 pkt->hdr.payload_len = len;
3159 pkt->payload = (void *)data;
3160 pkt->priv = pkt_priv;
3161 pkt->hdr.pad_len = 0;
3162
3163 spin_lock(&ch->tx_lock_lhb2);
3164 /* verify high watermark */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303165 SMUX_DBG("smux: %s: pending %d", __func__, ch->tx_pending_data_cnt);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003166
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003167 if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003168 pr_err("%s: ch %d high watermark %d exceeded %d\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003169 __func__, lcid, SMUX_TX_WM_HIGH,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003170 ch->tx_pending_data_cnt);
3171 ret = -EAGAIN;
3172 goto out_inner;
3173 }
3174
3175 /* queue packet for transmit */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003176 if (++ch->tx_pending_data_cnt == SMUX_TX_WM_HIGH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003177 ch->notify_lwm = 1;
3178 pr_err("%s: high watermark hit\n", __func__);
3179 schedule_notify(lcid, SMUX_HIGH_WM_HIT, NULL);
3180 }
3181 list_add_tail(&pkt->list, &ch->tx_queue);
3182
3183 /* add to ready list */
3184 if (IS_FULLY_OPENED(ch))
3185 tx_ready = 1;
3186
3187 ret = 0;
3188
3189out_inner:
3190 spin_unlock(&ch->tx_lock_lhb2);
3191
3192out:
3193 if (ret)
3194 smux_free_pkt(pkt);
3195 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3196
3197 if (tx_ready)
3198 list_channel(ch);
3199
3200 return ret;
3201}
3202
3203/**
3204 * Returns true if the TX queue is currently full (high water mark).
3205 *
3206 * @lcid Logical channel ID
3207 * @returns 0 if channel is not full
3208 * 1 if it is full
3209 * < 0 for error
3210 */
3211int msm_smux_is_ch_full(uint8_t lcid)
3212{
3213 struct smux_lch_t *ch;
3214 unsigned long flags;
3215 int is_full = 0;
3216
3217 if (smux_assert_lch_id(lcid))
3218 return -ENXIO;
3219
3220 ch = &smux_lch[lcid];
3221
3222 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003223 if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003224 is_full = 1;
3225 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
3226
3227 return is_full;
3228}
3229
3230/**
3231 * Returns true if the TX queue has space for more packets it is at or
3232 * below the low water mark).
3233 *
3234 * @lcid Logical channel ID
3235 * @returns 0 if channel is above low watermark
3236 * 1 if it's at or below the low watermark
3237 * < 0 for error
3238 */
3239int msm_smux_is_ch_low(uint8_t lcid)
3240{
3241 struct smux_lch_t *ch;
3242 unsigned long flags;
3243 int is_low = 0;
3244
3245 if (smux_assert_lch_id(lcid))
3246 return -ENXIO;
3247
3248 ch = &smux_lch[lcid];
3249
3250 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003251 if (ch->tx_pending_data_cnt <= SMUX_TX_WM_LOW)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003252 is_low = 1;
3253 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
3254
3255 return is_low;
3256}
3257
3258/**
3259 * Send TIOCM status update.
3260 *
3261 * @ch Channel for update
3262 *
3263 * @returns 0 for success, <0 for failure
3264 *
3265 * Channel lock must be held before calling.
3266 */
3267static int smux_send_status_cmd(struct smux_lch_t *ch)
3268{
3269 struct smux_pkt_t *pkt;
3270
3271 if (!ch)
3272 return -EINVAL;
3273
3274 pkt = smux_alloc_pkt();
3275 if (!pkt)
3276 return -ENOMEM;
3277
3278 pkt->hdr.lcid = ch->lcid;
3279 pkt->hdr.cmd = SMUX_CMD_STATUS;
3280 pkt->hdr.flags = ch->local_tiocm;
3281 pkt->hdr.payload_len = 0;
3282 pkt->hdr.pad_len = 0;
3283 smux_tx_queue(pkt, ch, 0);
3284
3285 return 0;
3286}
3287
3288/**
3289 * Internal helper function for getting the TIOCM status with
3290 * state_lock_lhb1 already locked.
3291 *
3292 * @ch Channel pointer
3293 *
3294 * @returns TIOCM status
3295 */
3296static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch)
3297{
3298 long status = 0x0;
3299
3300 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DSR : 0;
3301 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_CTS : 0;
3302 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RI) ? TIOCM_RI : 0;
3303 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_DCD) ? TIOCM_CD : 0;
3304
3305 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DTR : 0;
3306 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_RTS : 0;
3307
3308 return status;
3309}
3310
3311/**
3312 * Get the TIOCM status bits.
3313 *
3314 * @lcid Logical channel ID
3315 *
3316 * @returns >= 0 TIOCM status bits
3317 * < 0 Error condition
3318 */
3319long msm_smux_tiocm_get(uint8_t lcid)
3320{
3321 struct smux_lch_t *ch;
3322 unsigned long flags;
3323 long status = 0x0;
3324
3325 if (smux_assert_lch_id(lcid))
3326 return -ENXIO;
3327
3328 ch = &smux_lch[lcid];
3329 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3330 status = msm_smux_tiocm_get_atomic(ch);
3331 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3332
3333 return status;
3334}
3335
3336/**
3337 * Set/clear the TIOCM status bits.
3338 *
3339 * @lcid Logical channel ID
3340 * @set Bits to set
3341 * @clear Bits to clear
3342 *
3343 * @returns 0 for success; < 0 for failure
3344 *
3345 * If a bit is specified in both the @set and @clear masks, then the clear bit
3346 * definition will dominate and the bit will be cleared.
3347 */
3348int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear)
3349{
3350 struct smux_lch_t *ch;
3351 unsigned long flags;
3352 uint8_t old_status;
3353 uint8_t status_set = 0x0;
3354 uint8_t status_clear = 0x0;
3355 int tx_ready = 0;
3356 int ret = 0;
3357
3358 if (smux_assert_lch_id(lcid))
3359 return -ENXIO;
3360
3361 ch = &smux_lch[lcid];
3362 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3363
3364 status_set |= (set & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3365 status_set |= (set & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3366 status_set |= (set & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3367 status_set |= (set & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3368
3369 status_clear |= (clear & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3370 status_clear |= (clear & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3371 status_clear |= (clear & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3372 status_clear |= (clear & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3373
3374 old_status = ch->local_tiocm;
3375 ch->local_tiocm |= status_set;
3376 ch->local_tiocm &= ~status_clear;
3377
3378 if (ch->local_tiocm != old_status) {
3379 ret = smux_send_status_cmd(ch);
3380 tx_ready = 1;
3381 }
3382 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3383
3384 if (tx_ready)
3385 list_channel(ch);
3386
3387 return ret;
3388}
3389
3390/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003391/* Subsystem Restart */
3392/**********************************************************************/
3393static struct notifier_block ssr_notifier = {
3394 .notifier_call = ssr_notifier_cb,
3395};
3396
3397/**
3398 * Handle Subsystem Restart (SSR) notifications.
3399 *
3400 * @this Pointer to ssr_notifier
3401 * @code SSR Code
3402 * @data Data pointer (not used)
3403 */
3404static int ssr_notifier_cb(struct notifier_block *this,
3405 unsigned long code,
3406 void *data)
3407{
3408 unsigned long flags;
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003409 int i;
3410 int tmp;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003411 int power_off_uart = 0;
3412
Eric Holmbergd2697902012-06-15 09:58:46 -06003413 if (code == SUBSYS_BEFORE_SHUTDOWN) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303414 SMUX_DBG("smux: %s: ssr - before shutdown\n", __func__);
Eric Holmbergd2697902012-06-15 09:58:46 -06003415 mutex_lock(&smux.mutex_lha0);
3416 smux.in_reset = 1;
3417 mutex_unlock(&smux.mutex_lha0);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003418 return NOTIFY_DONE;
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003419 } else if (code == SUBSYS_AFTER_POWERUP) {
3420 /* re-register platform devices */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303421 SMUX_DBG("smux: %s: ssr - after power-up\n", __func__);
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003422 mutex_lock(&smux.mutex_lha0);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003423 if (smux.ld_open_count > 0
3424 && !smux.platform_devs_registered) {
3425 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303426 SMUX_DBG("smux: %s: register pdev '%s'\n",
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003427 __func__, smux_devs[i].name);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003428 smux_devs[i].dev.release = smux_pdev_release;
3429 tmp = platform_device_register(&smux_devs[i]);
3430 if (tmp)
3431 pr_err("%s: error %d registering device %s\n",
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003432 __func__, tmp, smux_devs[i].name);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003433 }
3434 smux.platform_devs_registered = 1;
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003435 }
3436 mutex_unlock(&smux.mutex_lha0);
3437 return NOTIFY_DONE;
Eric Holmbergd2697902012-06-15 09:58:46 -06003438 } else if (code != SUBSYS_AFTER_SHUTDOWN) {
3439 return NOTIFY_DONE;
3440 }
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303441 SMUX_DBG("smux: %s: ssr - after shutdown\n", __func__);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003442
3443 /* Cleanup channels */
Eric Holmberg06011322012-07-06 18:17:03 -06003444 smux_flush_workqueues();
Eric Holmbergd2697902012-06-15 09:58:46 -06003445 mutex_lock(&smux.mutex_lha0);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003446 if (smux.ld_open_count > 0) {
3447 smux_lch_purge();
3448 if (smux.tty)
3449 tty_driver_flush_buffer(smux.tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003450
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003451 /* Unregister platform devices */
3452 if (smux.platform_devs_registered) {
3453 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303454 SMUX_DBG("smux: %s: unregister pdev '%s'\n",
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003455 __func__, smux_devs[i].name);
3456 platform_device_unregister(&smux_devs[i]);
3457 }
3458 smux.platform_devs_registered = 0;
3459 }
3460
3461 /* Power-down UART */
3462 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
3463 if (smux.power_state != SMUX_PWR_OFF) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303464 SMUX_PWR("smux: %s: SSR - turning off UART\n",
3465 __func__);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003466 smux.power_state = SMUX_PWR_OFF;
3467 power_off_uart = 1;
3468 }
3469 smux.powerdown_enabled = 0;
3470 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3471
3472 if (power_off_uart)
3473 smux_uart_power_off_atomic();
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003474 }
Eric Holmberg06011322012-07-06 18:17:03 -06003475 smux.tx_activity_flag = 0;
3476 smux.rx_activity_flag = 0;
3477 smux.rx_state = SMUX_RX_IDLE;
Eric Holmbergd2697902012-06-15 09:58:46 -06003478 smux.in_reset = 0;
3479 mutex_unlock(&smux.mutex_lha0);
3480
Eric Holmberged1f00c2012-06-07 09:45:18 -06003481 return NOTIFY_DONE;
3482}
3483
3484/**********************************************************************/
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003485/* Line Discipline Interface */
3486/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003487static void smux_pdev_release(struct device *dev)
3488{
3489 struct platform_device *pdev;
3490
3491 pdev = container_of(dev, struct platform_device, dev);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303492 SMUX_DBG("smux: %s: releasing pdev %p '%s'\n",
3493 __func__, pdev, pdev->name);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003494 memset(&pdev->dev, 0x0, sizeof(pdev->dev));
3495}
3496
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003497static int smuxld_open(struct tty_struct *tty)
3498{
3499 int i;
3500 int tmp;
3501 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003502
3503 if (!smux.is_initialized)
3504 return -ENODEV;
3505
Eric Holmberged1f00c2012-06-07 09:45:18 -06003506 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003507 if (smux.ld_open_count) {
3508 pr_err("%s: %p multiple instances not supported\n",
3509 __func__, tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003510 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003511 return -EEXIST;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003512 }
3513
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003514 if (tty->ops->write == NULL) {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003515 pr_err("%s: tty->ops->write already NULL\n", __func__);
3516 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003517 return -EINVAL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003518 }
3519
3520 /* connect to TTY */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003521 ++smux.ld_open_count;
3522 smux.in_reset = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003523 smux.tty = tty;
3524 tty->disc_data = &smux;
3525 tty->receive_room = TTY_RECEIVE_ROOM;
3526 tty_driver_flush_buffer(tty);
3527
3528 /* power-down the UART if we are idle */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003529 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003530 if (smux.power_state == SMUX_PWR_OFF) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303531 SMUX_PWR("smux: %s: powering off uart\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003532 smux.power_state = SMUX_PWR_OFF_FLUSH;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003533 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003534 queue_work(smux_tx_wq, &smux_inactivity_work);
3535 } else {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003536 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003537 }
3538
3539 /* register platform devices */
3540 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303541 SMUX_DBG("smux: %s: register pdev '%s'\n",
Eric Holmberged1f00c2012-06-07 09:45:18 -06003542 __func__, smux_devs[i].name);
3543 smux_devs[i].dev.release = smux_pdev_release;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003544 tmp = platform_device_register(&smux_devs[i]);
3545 if (tmp)
3546 pr_err("%s: error %d registering device %s\n",
3547 __func__, tmp, smux_devs[i].name);
3548 }
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003549 smux.platform_devs_registered = 1;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003550 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003551 return 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003552}
3553
3554static void smuxld_close(struct tty_struct *tty)
3555{
3556 unsigned long flags;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003557 int power_up_uart = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003558 int i;
3559
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303560 SMUX_DBG("smux: %s: ldisc unload\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06003561 smux_flush_workqueues();
3562
Eric Holmberged1f00c2012-06-07 09:45:18 -06003563 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003564 if (smux.ld_open_count <= 0) {
3565 pr_err("%s: invalid ld count %d\n", __func__,
3566 smux.ld_open_count);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003567 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003568 return;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003569 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003570 --smux.ld_open_count;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003571
3572 /* Cleanup channels */
3573 smux_lch_purge();
3574
3575 /* Unregister platform devices */
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003576 if (smux.platform_devs_registered) {
3577 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303578 SMUX_DBG("smux: %s: unregister pdev '%s'\n",
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003579 __func__, smux_devs[i].name);
3580 platform_device_unregister(&smux_devs[i]);
3581 }
3582 smux.platform_devs_registered = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003583 }
3584
3585 /* Schedule UART power-up if it's down */
3586 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003587 if (smux.power_state == SMUX_PWR_OFF)
Eric Holmberged1f00c2012-06-07 09:45:18 -06003588 power_up_uart = 1;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003589 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergd2697902012-06-15 09:58:46 -06003590 smux.powerdown_enabled = 0;
Eric Holmberg06011322012-07-06 18:17:03 -06003591 smux.tx_activity_flag = 0;
3592 smux.rx_activity_flag = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003593 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3594
3595 if (power_up_uart)
Eric Holmberg92a67df2012-06-25 13:56:24 -06003596 smux_uart_power_on_atomic();
Eric Holmberged1f00c2012-06-07 09:45:18 -06003597
Eric Holmberg06011322012-07-06 18:17:03 -06003598 smux.rx_state = SMUX_RX_IDLE;
3599
Eric Holmberged1f00c2012-06-07 09:45:18 -06003600 /* Disconnect from TTY */
3601 smux.tty = NULL;
3602 mutex_unlock(&smux.mutex_lha0);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303603 SMUX_DBG("smux: %s: ldisc complete\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003604}
3605
3606/**
3607 * Receive data from TTY Line Discipline.
3608 *
3609 * @tty TTY structure
3610 * @cp Character data
3611 * @fp Flag data
3612 * @count Size of character and flag data
3613 */
3614void smuxld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
3615 char *fp, int count)
3616{
3617 int i;
3618 int last_idx = 0;
3619 const char *tty_name = NULL;
3620 char *f;
3621
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003622 /* verify error flags */
3623 for (i = 0, f = fp; i < count; ++i, ++f) {
3624 if (*f != TTY_NORMAL) {
3625 if (tty)
3626 tty_name = tty->name;
3627 pr_err("%s: TTY %s Error %d (%s)\n", __func__,
3628 tty_name, *f, tty_flag_to_str(*f));
3629
3630 /* feed all previous valid data to the parser */
3631 smux_rx_state_machine(cp + last_idx, i - last_idx,
3632 TTY_NORMAL);
3633
3634 /* feed bad data to parser */
3635 smux_rx_state_machine(cp + i, 1, *f);
3636 last_idx = i + 1;
3637 }
3638 }
3639
3640 /* feed data to RX state machine */
3641 smux_rx_state_machine(cp + last_idx, count - last_idx, TTY_NORMAL);
3642}
3643
3644static void smuxld_flush_buffer(struct tty_struct *tty)
3645{
3646 pr_err("%s: not supported\n", __func__);
3647}
3648
3649static ssize_t smuxld_chars_in_buffer(struct tty_struct *tty)
3650{
3651 pr_err("%s: not supported\n", __func__);
3652 return -ENODEV;
3653}
3654
3655static ssize_t smuxld_read(struct tty_struct *tty, struct file *file,
3656 unsigned char __user *buf, size_t nr)
3657{
3658 pr_err("%s: not supported\n", __func__);
3659 return -ENODEV;
3660}
3661
3662static ssize_t smuxld_write(struct tty_struct *tty, struct file *file,
3663 const unsigned char *buf, size_t nr)
3664{
3665 pr_err("%s: not supported\n", __func__);
3666 return -ENODEV;
3667}
3668
3669static int smuxld_ioctl(struct tty_struct *tty, struct file *file,
3670 unsigned int cmd, unsigned long arg)
3671{
3672 pr_err("%s: not supported\n", __func__);
3673 return -ENODEV;
3674}
3675
3676static unsigned int smuxld_poll(struct tty_struct *tty, struct file *file,
3677 struct poll_table_struct *tbl)
3678{
3679 pr_err("%s: not supported\n", __func__);
3680 return -ENODEV;
3681}
3682
3683static void smuxld_write_wakeup(struct tty_struct *tty)
3684{
3685 pr_err("%s: not supported\n", __func__);
3686}
3687
3688static struct tty_ldisc_ops smux_ldisc_ops = {
3689 .owner = THIS_MODULE,
3690 .magic = TTY_LDISC_MAGIC,
3691 .name = "n_smux",
3692 .open = smuxld_open,
3693 .close = smuxld_close,
3694 .flush_buffer = smuxld_flush_buffer,
3695 .chars_in_buffer = smuxld_chars_in_buffer,
3696 .read = smuxld_read,
3697 .write = smuxld_write,
3698 .ioctl = smuxld_ioctl,
3699 .poll = smuxld_poll,
3700 .receive_buf = smuxld_receive_buf,
3701 .write_wakeup = smuxld_write_wakeup
3702};
3703
3704static int __init smux_init(void)
3705{
3706 int ret;
3707
Eric Holmberged1f00c2012-06-07 09:45:18 -06003708 mutex_init(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003709
3710 spin_lock_init(&smux.rx_lock_lha1);
3711 smux.rx_state = SMUX_RX_IDLE;
3712 smux.power_state = SMUX_PWR_OFF;
3713 smux.pwr_wakeup_delay_us = 1;
3714 smux.powerdown_enabled = 0;
Eric Holmberga9b06472012-06-22 09:46:34 -06003715 smux.power_ctl_remote_req_received = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003716 INIT_LIST_HEAD(&smux.power_queue);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003717 smux.rx_activity_flag = 0;
3718 smux.tx_activity_flag = 0;
3719 smux.recv_len = 0;
3720 smux.tty = NULL;
3721 smux.ld_open_count = 0;
3722 smux.in_reset = 0;
3723 smux.is_initialized = 1;
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003724 smux.platform_devs_registered = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003725 smux_byte_loopback = 0;
3726
3727 spin_lock_init(&smux.tx_lock_lha2);
3728 INIT_LIST_HEAD(&smux.lch_tx_ready_list);
3729
3730 ret = tty_register_ldisc(N_SMUX, &smux_ldisc_ops);
3731 if (ret != 0) {
3732 pr_err("%s: error %d registering line discipline\n",
3733 __func__, ret);
3734 return ret;
3735 }
3736
Eric Holmberg6c9f2a52012-06-14 10:49:04 -06003737 subsys_notif_register_notifier("external_modem", &ssr_notifier);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003738
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003739 ret = lch_init();
3740 if (ret != 0) {
3741 pr_err("%s: lch_init failed\n", __func__);
3742 return ret;
3743 }
3744
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303745 log_ctx = ipc_log_context_create(1, "smux");
3746 if (!log_ctx) {
3747 pr_err("%s: unable to create log context\n", __func__);
3748 disable_ipc_logging = 1;
3749 }
3750
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003751 return 0;
3752}
3753
3754static void __exit smux_exit(void)
3755{
3756 int ret;
3757
3758 ret = tty_unregister_ldisc(N_SMUX);
3759 if (ret != 0) {
3760 pr_err("%s error %d unregistering line discipline\n",
3761 __func__, ret);
3762 return;
3763 }
3764}
3765
3766module_init(smux_init);
3767module_exit(smux_exit);
3768
3769MODULE_DESCRIPTION("Serial Mux TTY Line Discipline");
3770MODULE_LICENSE("GPL v2");
3771MODULE_ALIAS_LDISC(N_SMUX);