blob: e36806fba0c8329cefec132761750513883a69c4 [file] [log] [blame]
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001/* drivers/tty/n_smux.c
2 *
Eric Holmberg371b4622013-05-21 18:04:50 -06003 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06004 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/errno.h>
18#include <linux/tty.h>
19#include <linux/tty_flip.h>
20#include <linux/tty_driver.h>
21#include <linux/smux.h>
22#include <linux/list.h>
23#include <linux/kfifo.h>
24#include <linux/slab.h>
25#include <linux/types.h>
26#include <linux/platform_device.h>
27#include <linux/delay.h>
Eric Holmberged1f00c2012-06-07 09:45:18 -060028#include <mach/subsystem_notif.h>
29#include <mach/subsystem_restart.h>
Eric Holmberg8ed30f22012-05-10 19:16:51 -060030#include <mach/msm_serial_hs.h>
Angshuman Sarkarc2df7392012-07-24 14:50:42 +053031#include <mach/msm_ipc_logging.h>
Eric Holmberg8ed30f22012-05-10 19:16:51 -060032#include "smux_private.h"
33#include "smux_loopback.h"
34
35#define SMUX_NOTIFY_FIFO_SIZE 128
36#define SMUX_TX_QUEUE_SIZE 256
Eric Holmbergacd4c772012-08-30 15:38:11 -060037#define SMUX_PKT_LOG_SIZE 128
Eric Holmberg8ed30f22012-05-10 19:16:51 -060038
39/* Maximum size we can accept in a single RX buffer */
40#define TTY_RECEIVE_ROOM 65536
41#define TTY_BUFFER_FULL_WAIT_MS 50
42
43/* maximum sleep time between wakeup attempts */
44#define SMUX_WAKEUP_DELAY_MAX (1 << 20)
45
46/* minimum delay for scheduling delayed work */
47#define SMUX_WAKEUP_DELAY_MIN (1 << 15)
48
49/* inactivity timeout for no rx/tx activity */
Eric Holmberg05620172012-07-03 11:13:18 -060050#define SMUX_INACTIVITY_TIMEOUT_MS 1000000
Eric Holmberg8ed30f22012-05-10 19:16:51 -060051
Eric Holmbergb8435c82012-06-05 14:51:29 -060052/* RX get_rx_buffer retry timeout values */
53#define SMUX_RX_RETRY_MIN_MS (1 << 0) /* 1 ms */
54#define SMUX_RX_RETRY_MAX_MS (1 << 10) /* 1024 ms */
55
Eric Holmberg8ed30f22012-05-10 19:16:51 -060056enum {
57 MSM_SMUX_DEBUG = 1U << 0,
58 MSM_SMUX_INFO = 1U << 1,
59 MSM_SMUX_POWER_INFO = 1U << 2,
60 MSM_SMUX_PKT = 1U << 3,
61};
62
Angshuman Sarkarc2df7392012-07-24 14:50:42 +053063static int smux_debug_mask = MSM_SMUX_DEBUG | MSM_SMUX_POWER_INFO;
Eric Holmberg8ed30f22012-05-10 19:16:51 -060064module_param_named(debug_mask, smux_debug_mask,
65 int, S_IRUGO | S_IWUSR | S_IWGRP);
66
Angshuman Sarkarc2df7392012-07-24 14:50:42 +053067static int disable_ipc_logging;
68
Eric Holmberg8ed30f22012-05-10 19:16:51 -060069/* Simulated wakeup used for testing */
70int smux_byte_loopback;
71module_param_named(byte_loopback, smux_byte_loopback,
72 int, S_IRUGO | S_IWUSR | S_IWGRP);
73int smux_simulate_wakeup_delay = 1;
74module_param_named(simulate_wakeup_delay, smux_simulate_wakeup_delay,
75 int, S_IRUGO | S_IWUSR | S_IWGRP);
76
Angshuman Sarkarc2df7392012-07-24 14:50:42 +053077#define IPC_LOG_STR(x...) do { \
78 if (!disable_ipc_logging && log_ctx) \
79 ipc_log_string(log_ctx, x); \
80} while (0)
81
Eric Holmberg8ed30f22012-05-10 19:16:51 -060082#define SMUX_DBG(x...) do { \
83 if (smux_debug_mask & MSM_SMUX_DEBUG) \
Angshuman Sarkarc2df7392012-07-24 14:50:42 +053084 IPC_LOG_STR(x); \
Eric Holmberg8ed30f22012-05-10 19:16:51 -060085} while (0)
86
Eric Holmbergd7339a42012-08-21 16:28:12 -060087#define SMUX_ERR(x...) do { \
88 pr_err(x); \
89 IPC_LOG_STR(x); \
90} while (0)
91
Eric Holmbergff0b0112012-06-08 15:06:57 -060092#define SMUX_PWR(x...) do { \
93 if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
Angshuman Sarkarc2df7392012-07-24 14:50:42 +053094 IPC_LOG_STR(x); \
Eric Holmbergff0b0112012-06-08 15:06:57 -060095} while (0)
96
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -060097#define SMUX_PWR_PKT_RX(pkt) do { \
98 if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
99 smux_log_pkt(pkt, 1); \
100} while (0)
101
102#define SMUX_PWR_PKT_TX(pkt) do { \
103 if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
104 if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
105 pkt->hdr.flags == SMUX_WAKEUP_ACK) \
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530106 IPC_LOG_STR("smux: TX Wakeup ACK\n"); \
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -0600107 else if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
108 pkt->hdr.flags == SMUX_WAKEUP_REQ) \
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530109 IPC_LOG_STR("smux: TX Wakeup REQ\n"); \
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -0600110 else \
111 smux_log_pkt(pkt, 0); \
112 } \
113} while (0)
114
115#define SMUX_PWR_BYTE_TX(pkt) do { \
116 if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
117 smux_log_pkt(pkt, 0); \
118 } \
119} while (0)
120
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600121#define SMUX_LOG_PKT_RX(pkt) do { \
122 if (smux_debug_mask & MSM_SMUX_PKT) \
123 smux_log_pkt(pkt, 1); \
124} while (0)
125
126#define SMUX_LOG_PKT_TX(pkt) do { \
127 if (smux_debug_mask & MSM_SMUX_PKT) \
128 smux_log_pkt(pkt, 0); \
129} while (0)
130
131/**
132 * Return true if channel is fully opened (both
133 * local and remote sides are in the OPENED state).
134 */
135#define IS_FULLY_OPENED(ch) \
136 (ch && (ch)->local_state == SMUX_LCH_LOCAL_OPENED \
137 && (ch)->remote_state == SMUX_LCH_REMOTE_OPENED)
138
139static struct platform_device smux_devs[] = {
140 {.name = "SMUX_CTL", .id = -1},
141 {.name = "SMUX_RMNET", .id = -1},
142 {.name = "SMUX_DUN_DATA_HSUART", .id = 0},
143 {.name = "SMUX_RMNET_DATA_HSUART", .id = 1},
144 {.name = "SMUX_RMNET_CTL_HSUART", .id = 0},
145 {.name = "SMUX_DIAG", .id = -1},
146};
147
148enum {
149 SMUX_CMD_STATUS_RTC = 1 << 0,
150 SMUX_CMD_STATUS_RTR = 1 << 1,
151 SMUX_CMD_STATUS_RI = 1 << 2,
152 SMUX_CMD_STATUS_DCD = 1 << 3,
153 SMUX_CMD_STATUS_FLOW_CNTL = 1 << 4,
154};
155
156/* Channel mode */
157enum {
158 SMUX_LCH_MODE_NORMAL,
159 SMUX_LCH_MODE_LOCAL_LOOPBACK,
160 SMUX_LCH_MODE_REMOTE_LOOPBACK,
161};
162
163enum {
164 SMUX_RX_IDLE,
165 SMUX_RX_MAGIC,
166 SMUX_RX_HDR,
167 SMUX_RX_PAYLOAD,
168 SMUX_RX_FAILURE,
169};
170
171/**
172 * Power states.
173 *
174 * The _FLUSH states are internal transitional states and are not part of the
175 * official state machine.
176 */
177enum {
178 SMUX_PWR_OFF,
179 SMUX_PWR_TURNING_ON,
180 SMUX_PWR_ON,
Eric Holmberga9b06472012-06-22 09:46:34 -0600181 SMUX_PWR_TURNING_OFF_FLUSH, /* power-off req/ack in TX queue */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600182 SMUX_PWR_TURNING_OFF,
183 SMUX_PWR_OFF_FLUSH,
184};
185
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600186union notifier_metadata {
187 struct smux_meta_disconnected disconnected;
188 struct smux_meta_read read;
189 struct smux_meta_write write;
190 struct smux_meta_tiocm tiocm;
191};
192
193struct smux_notify_handle {
194 void (*notify)(void *priv, int event_type, const void *metadata);
195 void *priv;
196 int event_type;
197 union notifier_metadata *metadata;
198};
199
200/**
Eric Holmbergb8435c82012-06-05 14:51:29 -0600201 * Get RX Buffer Retry structure.
202 *
203 * This is used for clients that are unable to provide an RX buffer
204 * immediately. This temporary structure will be used to temporarily hold the
205 * data and perform a retry.
206 */
207struct smux_rx_pkt_retry {
208 struct smux_pkt_t *pkt;
209 struct list_head rx_retry_list;
210 unsigned timeout_in_ms;
211};
212
213/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600214 * Receive worker data structure.
215 *
216 * One instance is created for every call to smux_rx_state_machine.
217 */
218struct smux_rx_worker_data {
219 const unsigned char *data;
220 int len;
221 int flag;
222
223 struct work_struct work;
224 struct completion work_complete;
225};
226
227/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600228 * Line discipline and module structure.
229 *
230 * Only one instance since multiple instances of line discipline are not
231 * allowed.
232 */
233struct smux_ldisc_t {
Eric Holmberged1f00c2012-06-07 09:45:18 -0600234 struct mutex mutex_lha0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600235
236 int is_initialized;
Eric Holmberg2bf9c522012-08-09 13:23:21 -0600237 int platform_devs_registered;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600238 int in_reset;
Eric Holmberg0a81e8f2012-08-28 13:51:14 -0600239 int remote_is_alive;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600240 int ld_open_count;
241 struct tty_struct *tty;
242
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600243 /* RX State Machine (singled-threaded access by smux_rx_wq) */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600244 unsigned char recv_buf[SMUX_MAX_PKT_SIZE];
245 unsigned int recv_len;
246 unsigned int pkt_remain;
247 unsigned rx_state;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600248
249 /* RX Activity - accessed by multiple threads */
250 spinlock_t rx_lock_lha1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600251 unsigned rx_activity_flag;
252
253 /* TX / Power */
254 spinlock_t tx_lock_lha2;
255 struct list_head lch_tx_ready_list;
256 unsigned power_state;
257 unsigned pwr_wakeup_delay_us;
258 unsigned tx_activity_flag;
259 unsigned powerdown_enabled;
Eric Holmberga9b06472012-06-22 09:46:34 -0600260 unsigned power_ctl_remote_req_received;
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600261 struct list_head power_queue;
Eric Holmberg371b4622013-05-21 18:04:50 -0600262 unsigned remote_initiated_wakeup_count;
263 unsigned local_initiated_wakeup_count;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600264};
265
266
267/* data structures */
Eric Holmberg9d890672012-06-13 17:58:13 -0600268struct smux_lch_t smux_lch[SMUX_NUM_LOGICAL_CHANNELS];
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600269static struct smux_ldisc_t smux;
270static const char *tty_error_type[] = {
271 [TTY_NORMAL] = "normal",
272 [TTY_OVERRUN] = "overrun",
273 [TTY_BREAK] = "break",
274 [TTY_PARITY] = "parity",
275 [TTY_FRAME] = "framing",
276};
277
Eric Holmberg9d890672012-06-13 17:58:13 -0600278static const char * const smux_cmds[] = {
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600279 [SMUX_CMD_DATA] = "DATA",
280 [SMUX_CMD_OPEN_LCH] = "OPEN",
281 [SMUX_CMD_CLOSE_LCH] = "CLOSE",
282 [SMUX_CMD_STATUS] = "STATUS",
283 [SMUX_CMD_PWR_CTL] = "PWR",
Eric Holmberg371b4622013-05-21 18:04:50 -0600284 [SMUX_CMD_DELAY] = "DELAY",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600285 [SMUX_CMD_BYTE] = "Raw Byte",
286};
287
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530288static const char * const smux_events[] = {
289 [SMUX_CONNECTED] = "CONNECTED" ,
290 [SMUX_DISCONNECTED] = "DISCONNECTED",
291 [SMUX_READ_DONE] = "READ_DONE",
292 [SMUX_READ_FAIL] = "READ_FAIL",
293 [SMUX_WRITE_DONE] = "WRITE_DONE",
294 [SMUX_WRITE_FAIL] = "WRITE_FAIL",
295 [SMUX_TIOCM_UPDATE] = "TIOCM_UPDATE",
296 [SMUX_LOW_WM_HIT] = "LOW_WM_HIT",
297 [SMUX_HIGH_WM_HIT] = "HIGH_WM_HIT",
298 [SMUX_RX_RETRY_HIGH_WM_HIT] = "RX_RETRY_HIGH_WM_HIT",
299 [SMUX_RX_RETRY_LOW_WM_HIT] = "RX_RETRY_LOW_WM_HIT",
Arun Kumar Neelakantam9fdf0172013-07-09 15:55:32 +0530300 [SMUX_LOCAL_CLOSED] = "LOCAL_CLOSED",
301 [SMUX_REMOTE_CLOSED] = "REMOTE_CLOSED",
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530302};
303
Eric Holmberg9d890672012-06-13 17:58:13 -0600304static const char * const smux_local_state[] = {
305 [SMUX_LCH_LOCAL_CLOSED] = "CLOSED",
306 [SMUX_LCH_LOCAL_OPENING] = "OPENING",
307 [SMUX_LCH_LOCAL_OPENED] = "OPENED",
308 [SMUX_LCH_LOCAL_CLOSING] = "CLOSING",
309};
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530310
Eric Holmberg9d890672012-06-13 17:58:13 -0600311static const char * const smux_remote_state[] = {
312 [SMUX_LCH_REMOTE_CLOSED] = "CLOSED",
313 [SMUX_LCH_REMOTE_OPENED] = "OPENED",
314};
315
316static const char * const smux_mode[] = {
317 [SMUX_LCH_MODE_NORMAL] = "N",
318 [SMUX_LCH_MODE_LOCAL_LOOPBACK] = "L",
319 [SMUX_LCH_MODE_REMOTE_LOOPBACK] = "R",
320};
321
322static const char * const smux_undef[] = {
323 [SMUX_UNDEF_LONG] = "UNDEF",
324 [SMUX_UNDEF_SHORT] = "U",
325};
326
327static void *log_ctx;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600328static void smux_notify_local_fn(struct work_struct *work);
329static DECLARE_WORK(smux_notify_local, smux_notify_local_fn);
330
331static struct workqueue_struct *smux_notify_wq;
332static size_t handle_size;
333static struct kfifo smux_notify_fifo;
334static int queued_fifo_notifications;
335static DEFINE_SPINLOCK(notify_lock_lhc1);
336
337static struct workqueue_struct *smux_tx_wq;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600338static struct workqueue_struct *smux_rx_wq;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600339static void smux_tx_worker(struct work_struct *work);
340static DECLARE_WORK(smux_tx_work, smux_tx_worker);
341
342static void smux_wakeup_worker(struct work_struct *work);
Eric Holmbergb8435c82012-06-05 14:51:29 -0600343static void smux_rx_retry_worker(struct work_struct *work);
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600344static void smux_rx_worker(struct work_struct *work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600345static DECLARE_WORK(smux_wakeup_work, smux_wakeup_worker);
346static DECLARE_DELAYED_WORK(smux_wakeup_delayed_work, smux_wakeup_worker);
347
348static void smux_inactivity_worker(struct work_struct *work);
349static DECLARE_WORK(smux_inactivity_work, smux_inactivity_worker);
350static DECLARE_DELAYED_WORK(smux_delayed_inactivity_work,
351 smux_inactivity_worker);
352
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600353static void list_channel(struct smux_lch_t *ch);
354static int smux_send_status_cmd(struct smux_lch_t *ch);
355static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600356static void smux_flush_tty(void);
Eric Holmberg0e914082012-07-11 11:46:28 -0600357static void smux_purge_ch_tx_queue(struct smux_lch_t *ch, int is_ssr);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600358static int schedule_notify(uint8_t lcid, int event,
359 const union notifier_metadata *metadata);
360static int ssr_notifier_cb(struct notifier_block *this,
361 unsigned long code,
362 void *data);
Eric Holmberg92a67df2012-06-25 13:56:24 -0600363static void smux_uart_power_on_atomic(void);
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600364static int smux_rx_flow_control_updated(struct smux_lch_t *ch);
Eric Holmberg06011322012-07-06 18:17:03 -0600365static void smux_flush_workqueues(void);
Eric Holmbergf6a364e2012-08-07 18:41:44 -0600366static void smux_pdev_release(struct device *dev);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600367
368/**
Eric Holmberg9d890672012-06-13 17:58:13 -0600369 * local_lch_state() - Return human readable form of local logical state.
370 * @state: Local logical channel state enum.
371 *
372 */
373const char *local_lch_state(unsigned state)
374{
375 if (state < ARRAY_SIZE(smux_local_state))
376 return smux_local_state[state];
377 else
378 return smux_undef[SMUX_UNDEF_LONG];
379}
380
381/**
382 * remote_lch_state() - Return human readable for of remote logical state.
383 * @state: Remote logical channel state enum.
384 *
385 */
386const char *remote_lch_state(unsigned state)
387{
388 if (state < ARRAY_SIZE(smux_remote_state))
389 return smux_remote_state[state];
390 else
391 return smux_undef[SMUX_UNDEF_LONG];
392}
393
394/**
395 * lch_mode() - Return human readable form of mode.
396 * @mode: Mode of the logical channel.
397 *
398 */
399const char *lch_mode(unsigned mode)
400{
401 if (mode < ARRAY_SIZE(smux_mode))
402 return smux_mode[mode];
403 else
404 return smux_undef[SMUX_UNDEF_SHORT];
405}
406
407/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600408 * Convert TTY Error Flags to string for logging purposes.
409 *
410 * @flag TTY_* flag
411 * @returns String description or NULL if unknown
412 */
413static const char *tty_flag_to_str(unsigned flag)
414{
415 if (flag < ARRAY_SIZE(tty_error_type))
416 return tty_error_type[flag];
417 return NULL;
418}
419
420/**
421 * Convert SMUX Command to string for logging purposes.
422 *
423 * @cmd SMUX command
424 * @returns String description or NULL if unknown
425 */
426static const char *cmd_to_str(unsigned cmd)
427{
428 if (cmd < ARRAY_SIZE(smux_cmds))
429 return smux_cmds[cmd];
430 return NULL;
431}
432
433/**
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530434 * Convert SMUX event to string for logging purposes.
435 *
436 * @event SMUX event
437 * @returns String description or NULL if unknown
438 */
439static const char *event_to_str(unsigned cmd)
440{
441 if (cmd < ARRAY_SIZE(smux_events))
442 return smux_events[cmd];
443 return NULL;
444}
445
446/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600447 * Set the reset state due to an unrecoverable failure.
448 */
449static void smux_enter_reset(void)
450{
Eric Holmberg51f46cb2012-08-21 16:43:39 -0600451 SMUX_ERR("%s: unrecoverable failure, waiting for ssr\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600452 smux.in_reset = 1;
Eric Holmberg0a81e8f2012-08-28 13:51:14 -0600453 smux.remote_is_alive = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600454}
455
Eric Holmberg9d890672012-06-13 17:58:13 -0600456/**
457 * Initialize the lch_structs.
458 */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600459static int lch_init(void)
460{
461 unsigned int id;
462 struct smux_lch_t *ch;
463 int i = 0;
464
465 handle_size = sizeof(struct smux_notify_handle *);
466
467 smux_notify_wq = create_singlethread_workqueue("smux_notify_wq");
468 smux_tx_wq = create_singlethread_workqueue("smux_tx_wq");
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600469 smux_rx_wq = create_singlethread_workqueue("smux_rx_wq");
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600470
471 if (IS_ERR(smux_notify_wq) || IS_ERR(smux_tx_wq)) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530472 SMUX_DBG("smux: %s: create_singlethread_workqueue ENOMEM\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600473 __func__);
474 return -ENOMEM;
475 }
476
477 i |= kfifo_alloc(&smux_notify_fifo,
478 SMUX_NOTIFY_FIFO_SIZE * handle_size,
479 GFP_KERNEL);
480 i |= smux_loopback_init();
481
482 if (i) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -0600483 SMUX_ERR("%s: out of memory error\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600484 return -ENOMEM;
485 }
486
487 for (id = 0 ; id < SMUX_NUM_LOGICAL_CHANNELS; id++) {
488 ch = &smux_lch[id];
489
490 spin_lock_init(&ch->state_lock_lhb1);
491 ch->lcid = id;
492 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
493 ch->local_mode = SMUX_LCH_MODE_NORMAL;
494 ch->local_tiocm = 0x0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600495 ch->options = SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600496 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
497 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
498 ch->remote_tiocm = 0x0;
499 ch->tx_flow_control = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600500 ch->rx_flow_control_auto = 0;
501 ch->rx_flow_control_client = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600502 ch->priv = 0;
503 ch->notify = 0;
504 ch->get_rx_buffer = 0;
505
Eric Holmbergb8435c82012-06-05 14:51:29 -0600506 INIT_LIST_HEAD(&ch->rx_retry_queue);
507 ch->rx_retry_queue_cnt = 0;
508 INIT_DELAYED_WORK(&ch->rx_retry_work, smux_rx_retry_worker);
509
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600510 spin_lock_init(&ch->tx_lock_lhb2);
511 INIT_LIST_HEAD(&ch->tx_queue);
512 INIT_LIST_HEAD(&ch->tx_ready_list);
513 ch->tx_pending_data_cnt = 0;
514 ch->notify_lwm = 0;
515 }
516
517 return 0;
518}
519
Eric Holmberged1f00c2012-06-07 09:45:18 -0600520/**
521 * Empty and cleanup all SMUX logical channels for subsystem restart or line
522 * discipline disconnect.
523 */
524static void smux_lch_purge(void)
525{
526 struct smux_lch_t *ch;
527 unsigned long flags;
528 int i;
529
530 /* Empty TX ready list */
531 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
532 while (!list_empty(&smux.lch_tx_ready_list)) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530533 SMUX_DBG("smux: %s: emptying ready list %p\n",
Eric Holmberged1f00c2012-06-07 09:45:18 -0600534 __func__, smux.lch_tx_ready_list.next);
535 ch = list_first_entry(&smux.lch_tx_ready_list,
536 struct smux_lch_t,
537 tx_ready_list);
538 list_del(&ch->tx_ready_list);
539 INIT_LIST_HEAD(&ch->tx_ready_list);
540 }
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600541
542 /* Purge Power Queue */
543 while (!list_empty(&smux.power_queue)) {
544 struct smux_pkt_t *pkt;
545
546 pkt = list_first_entry(&smux.power_queue,
547 struct smux_pkt_t,
548 list);
Eric Holmberg6b19f7f2012-06-15 09:53:52 -0600549 list_del(&pkt->list);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530550 SMUX_DBG("smux: %s: emptying power queue pkt=%p\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600551 __func__, pkt);
552 smux_free_pkt(pkt);
553 }
Eric Holmberged1f00c2012-06-07 09:45:18 -0600554 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
555
556 /* Close all ports */
557 for (i = 0 ; i < SMUX_NUM_LOGICAL_CHANNELS; i++) {
Arun Kumar Neelakantam9fdf0172013-07-09 15:55:32 +0530558 union notifier_metadata meta;
559 int send_disconnect = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -0600560 ch = &smux_lch[i];
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530561 SMUX_DBG("smux: %s: cleaning up lcid %d\n", __func__, i);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600562
563 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
564
565 /* Purge TX queue */
566 spin_lock(&ch->tx_lock_lhb2);
Eric Holmberg0e914082012-07-11 11:46:28 -0600567 smux_purge_ch_tx_queue(ch, 1);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600568 spin_unlock(&ch->tx_lock_lhb2);
569
Arun Kumar Neelakantam9fdf0172013-07-09 15:55:32 +0530570 meta.disconnected.is_ssr = smux.in_reset;
Eric Holmberged1f00c2012-06-07 09:45:18 -0600571 /* Notify user of disconnect and reset channel state */
572 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
573 ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
Arun Kumar Neelakantam9fdf0172013-07-09 15:55:32 +0530574 schedule_notify(ch->lcid, SMUX_LOCAL_CLOSED, &meta);
575 send_disconnect = 1;
Eric Holmberged1f00c2012-06-07 09:45:18 -0600576 }
Arun Kumar Neelakantam9fdf0172013-07-09 15:55:32 +0530577 if (ch->remote_state != SMUX_LCH_REMOTE_CLOSED) {
578 schedule_notify(ch->lcid, SMUX_REMOTE_CLOSED, &meta);
579 send_disconnect = 1;
580 }
581 if (send_disconnect)
582 schedule_notify(ch->lcid, SMUX_DISCONNECTED, &meta);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600583
584 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
Eric Holmberged1f00c2012-06-07 09:45:18 -0600585 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
586 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
587 ch->tx_flow_control = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600588 ch->rx_flow_control_auto = 0;
589 ch->rx_flow_control_client = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -0600590
591 /* Purge RX retry queue */
592 if (ch->rx_retry_queue_cnt)
593 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
594
595 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
596 }
Eric Holmberged1f00c2012-06-07 09:45:18 -0600597}
598
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600599int smux_assert_lch_id(uint32_t lcid)
600{
601 if (lcid >= SMUX_NUM_LOGICAL_CHANNELS)
602 return -ENXIO;
603 else
604 return 0;
605}
606
607/**
608 * Log packet information for debug purposes.
609 *
610 * @pkt Packet to log
611 * @is_recv 1 = RX packet; 0 = TX Packet
612 *
613 * [DIR][LCID] [LOCAL_STATE][LOCAL_MODE]:[REMOTE_STATE][REMOTE_MODE] PKT Info
614 *
615 * PKT Info:
616 * [CMD] flags [flags] len [PAYLOAD_LEN]:[PAD_LEN] [Payload hex bytes]
617 *
618 * Direction: R = Receive, S = Send
619 * Local State: C = Closed; c = closing; o = opening; O = Opened
620 * Local Mode: L = Local loopback; R = Remote loopback; N = Normal
621 * Remote State: C = Closed; O = Opened
622 * Remote Mode: R = Remote loopback; N = Normal
623 */
624static void smux_log_pkt(struct smux_pkt_t *pkt, int is_recv)
625{
626 char logbuf[SMUX_PKT_LOG_SIZE];
627 char cmd_extra[16];
628 int i = 0;
629 int count;
630 int len;
631 char local_state;
632 char local_mode;
633 char remote_state;
634 char remote_mode;
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600635 struct smux_lch_t *ch = NULL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600636 unsigned char *data;
637
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600638 if (!smux_assert_lch_id(pkt->hdr.lcid))
639 ch = &smux_lch[pkt->hdr.lcid];
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600640
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600641 if (ch) {
642 switch (ch->local_state) {
643 case SMUX_LCH_LOCAL_CLOSED:
644 local_state = 'C';
645 break;
646 case SMUX_LCH_LOCAL_OPENING:
647 local_state = 'o';
648 break;
649 case SMUX_LCH_LOCAL_OPENED:
650 local_state = 'O';
651 break;
652 case SMUX_LCH_LOCAL_CLOSING:
653 local_state = 'c';
654 break;
655 default:
656 local_state = 'U';
657 break;
658 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600659
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600660 switch (ch->local_mode) {
661 case SMUX_LCH_MODE_LOCAL_LOOPBACK:
662 local_mode = 'L';
663 break;
664 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
665 local_mode = 'R';
666 break;
667 case SMUX_LCH_MODE_NORMAL:
668 local_mode = 'N';
669 break;
670 default:
671 local_mode = 'U';
672 break;
673 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600674
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600675 switch (ch->remote_state) {
676 case SMUX_LCH_REMOTE_CLOSED:
677 remote_state = 'C';
678 break;
679 case SMUX_LCH_REMOTE_OPENED:
680 remote_state = 'O';
681 break;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600682
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600683 default:
684 remote_state = 'U';
685 break;
686 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600687
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600688 switch (ch->remote_mode) {
689 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
690 remote_mode = 'R';
691 break;
692 case SMUX_LCH_MODE_NORMAL:
693 remote_mode = 'N';
694 break;
695 default:
696 remote_mode = 'U';
697 break;
698 }
699 } else {
700 /* broadcast channel */
701 local_state = '-';
702 local_mode = '-';
703 remote_state = '-';
704 remote_mode = '-';
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600705 }
706
707 /* determine command type (ACK, etc) */
708 cmd_extra[0] = '\0';
709 switch (pkt->hdr.cmd) {
710 case SMUX_CMD_OPEN_LCH:
711 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
712 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
713 break;
714 case SMUX_CMD_CLOSE_LCH:
715 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
716 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
717 break;
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -0600718
719 case SMUX_CMD_PWR_CTL:
720 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK)
721 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
722 break;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600723 };
724
725 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
726 "smux: %c%d %c%c:%c%c %s%s flags %x len %d:%d ",
727 is_recv ? 'R' : 'S', pkt->hdr.lcid,
728 local_state, local_mode,
729 remote_state, remote_mode,
730 cmd_to_str(pkt->hdr.cmd), cmd_extra, pkt->hdr.flags,
731 pkt->hdr.payload_len, pkt->hdr.pad_len);
732
733 len = (pkt->hdr.payload_len > 16) ? 16 : pkt->hdr.payload_len;
734 data = (unsigned char *)pkt->payload;
735 for (count = 0; count < len; count++)
736 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
737 "%02x ", (unsigned)data[count]);
738
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530739 IPC_LOG_STR(logbuf);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600740}
741
742static void smux_notify_local_fn(struct work_struct *work)
743{
744 struct smux_notify_handle *notify_handle = NULL;
745 union notifier_metadata *metadata = NULL;
746 unsigned long flags;
747 int i;
748
749 for (;;) {
750 /* retrieve notification */
751 spin_lock_irqsave(&notify_lock_lhc1, flags);
752 if (kfifo_len(&smux_notify_fifo) >= handle_size) {
753 i = kfifo_out(&smux_notify_fifo,
754 &notify_handle,
755 handle_size);
756 if (i != handle_size) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -0600757 SMUX_ERR(
758 "%s: unable to retrieve handle %d expected %d\n",
759 __func__, i, handle_size);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600760 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
761 break;
762 }
763 } else {
764 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
765 break;
766 }
767 --queued_fifo_notifications;
768 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
769
770 /* notify client */
771 metadata = notify_handle->metadata;
772 notify_handle->notify(notify_handle->priv,
773 notify_handle->event_type,
774 metadata);
775
776 kfree(metadata);
777 kfree(notify_handle);
778 }
779}
780
781/**
782 * Initialize existing packet.
783 */
784void smux_init_pkt(struct smux_pkt_t *pkt)
785{
786 memset(pkt, 0x0, sizeof(*pkt));
787 pkt->hdr.magic = SMUX_MAGIC;
788 INIT_LIST_HEAD(&pkt->list);
789}
790
791/**
792 * Allocate and initialize packet.
793 *
794 * If a payload is needed, either set it directly and ensure that it's freed or
795 * use smd_alloc_pkt_payload() to allocate a packet and it will be freed
796 * automatically when smd_free_pkt() is called.
797 */
798struct smux_pkt_t *smux_alloc_pkt(void)
799{
800 struct smux_pkt_t *pkt;
801
802 /* Consider a free list implementation instead of kmalloc */
803 pkt = kmalloc(sizeof(struct smux_pkt_t), GFP_ATOMIC);
804 if (!pkt) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -0600805 SMUX_ERR("%s: out of memory\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600806 return NULL;
807 }
808 smux_init_pkt(pkt);
809 pkt->allocated = 1;
810
811 return pkt;
812}
813
814/**
815 * Free packet.
816 *
817 * @pkt Packet to free (may be NULL)
818 *
819 * If payload was allocated using smux_alloc_pkt_payload(), then it is freed as
820 * well. Otherwise, the caller is responsible for freeing the payload.
821 */
822void smux_free_pkt(struct smux_pkt_t *pkt)
823{
824 if (pkt) {
825 if (pkt->free_payload)
826 kfree(pkt->payload);
827 if (pkt->allocated)
828 kfree(pkt);
829 }
830}
831
832/**
833 * Allocate packet payload.
834 *
835 * @pkt Packet to add payload to
836 *
837 * @returns 0 on success, <0 upon error
838 *
839 * A flag is set to signal smux_free_pkt() to free the payload.
840 */
841int smux_alloc_pkt_payload(struct smux_pkt_t *pkt)
842{
843 if (!pkt)
844 return -EINVAL;
845
846 pkt->payload = kmalloc(pkt->hdr.payload_len, GFP_ATOMIC);
847 pkt->free_payload = 1;
848 if (!pkt->payload) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -0600849 SMUX_ERR("%s: unable to malloc %d bytes for payload\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600850 __func__, pkt->hdr.payload_len);
851 return -ENOMEM;
852 }
853
854 return 0;
855}
856
857static int schedule_notify(uint8_t lcid, int event,
858 const union notifier_metadata *metadata)
859{
860 struct smux_notify_handle *notify_handle = 0;
861 union notifier_metadata *meta_copy = 0;
862 struct smux_lch_t *ch;
863 int i;
864 unsigned long flags;
865 int ret = 0;
866
Angshuman Sarkarc2df7392012-07-24 14:50:42 +0530867 IPC_LOG_STR("smux: %s ch:%d\n", event_to_str(event), lcid);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600868 ch = &smux_lch[lcid];
Arun Kumar Neelakantam9fdf0172013-07-09 15:55:32 +0530869 if (!ch->notify) {
870 SMUX_DBG("%s: [%d]lcid notify fn is NULL\n", __func__, lcid);
871 return ret;
872 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600873 notify_handle = kzalloc(sizeof(struct smux_notify_handle),
874 GFP_ATOMIC);
875 if (!notify_handle) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -0600876 SMUX_ERR("%s: out of memory\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600877 ret = -ENOMEM;
878 goto free_out;
879 }
880
881 notify_handle->notify = ch->notify;
882 notify_handle->priv = ch->priv;
883 notify_handle->event_type = event;
884 if (metadata) {
885 meta_copy = kzalloc(sizeof(union notifier_metadata),
886 GFP_ATOMIC);
887 if (!meta_copy) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -0600888 SMUX_ERR("%s: out of memory\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600889 ret = -ENOMEM;
890 goto free_out;
891 }
892 *meta_copy = *metadata;
893 notify_handle->metadata = meta_copy;
894 } else {
895 notify_handle->metadata = NULL;
896 }
897
898 spin_lock_irqsave(&notify_lock_lhc1, flags);
899 i = kfifo_avail(&smux_notify_fifo);
900 if (i < handle_size) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -0600901 SMUX_ERR("%s: fifo full error %d expected %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600902 __func__, i, handle_size);
903 ret = -ENOMEM;
904 goto unlock_out;
905 }
906
907 i = kfifo_in(&smux_notify_fifo, &notify_handle, handle_size);
908 if (i < 0 || i != handle_size) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -0600909 SMUX_ERR("%s: fifo not available error %d (expected %d)\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600910 __func__, i, handle_size);
911 ret = -ENOSPC;
912 goto unlock_out;
913 }
914 ++queued_fifo_notifications;
915
916unlock_out:
917 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
918
919free_out:
920 queue_work(smux_notify_wq, &smux_notify_local);
921 if (ret < 0 && notify_handle) {
922 kfree(notify_handle->metadata);
923 kfree(notify_handle);
924 }
925 return ret;
926}
927
928/**
929 * Returns the serialized size of a packet.
930 *
931 * @pkt Packet to serialize
932 *
933 * @returns Serialized length of packet
934 */
935static unsigned int smux_serialize_size(struct smux_pkt_t *pkt)
936{
937 unsigned int size;
938
939 size = sizeof(struct smux_hdr_t);
940 size += pkt->hdr.payload_len;
941 size += pkt->hdr.pad_len;
942
943 return size;
944}
945
946/**
947 * Serialize packet @pkt into output buffer @data.
948 *
949 * @pkt Packet to serialize
950 * @out Destination buffer pointer
951 * @out_len Size of serialized packet
952 *
953 * @returns 0 for success
954 */
955int smux_serialize(struct smux_pkt_t *pkt, char *out,
956 unsigned int *out_len)
957{
958 char *data_start = out;
959
960 if (smux_serialize_size(pkt) > SMUX_MAX_PKT_SIZE) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -0600961 SMUX_ERR("%s: packet size %d too big\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600962 __func__, smux_serialize_size(pkt));
963 return -E2BIG;
964 }
965
966 memcpy(out, &pkt->hdr, sizeof(struct smux_hdr_t));
967 out += sizeof(struct smux_hdr_t);
968 if (pkt->payload) {
969 memcpy(out, pkt->payload, pkt->hdr.payload_len);
970 out += pkt->hdr.payload_len;
971 }
972 if (pkt->hdr.pad_len) {
973 memset(out, 0x0, pkt->hdr.pad_len);
974 out += pkt->hdr.pad_len;
975 }
976 *out_len = out - data_start;
977 return 0;
978}
979
980/**
981 * Serialize header and provide pointer to the data.
982 *
983 * @pkt Packet
984 * @out[out] Pointer to the serialized header data
985 * @out_len[out] Pointer to the serialized header length
986 */
987static void smux_serialize_hdr(struct smux_pkt_t *pkt, char **out,
988 unsigned int *out_len)
989{
990 *out = (char *)&pkt->hdr;
991 *out_len = sizeof(struct smux_hdr_t);
992}
993
994/**
995 * Serialize payload and provide pointer to the data.
996 *
997 * @pkt Packet
998 * @out[out] Pointer to the serialized payload data
999 * @out_len[out] Pointer to the serialized payload length
1000 */
1001static void smux_serialize_payload(struct smux_pkt_t *pkt, char **out,
1002 unsigned int *out_len)
1003{
1004 *out = pkt->payload;
1005 *out_len = pkt->hdr.payload_len;
1006}
1007
1008/**
1009 * Serialize padding and provide pointer to the data.
1010 *
1011 * @pkt Packet
1012 * @out[out] Pointer to the serialized padding (always NULL)
1013 * @out_len[out] Pointer to the serialized payload length
1014 *
1015 * Since the padding field value is undefined, only the size of the patting
1016 * (@out_len) is set and the buffer pointer (@out) will always be NULL.
1017 */
1018static void smux_serialize_padding(struct smux_pkt_t *pkt, char **out,
1019 unsigned int *out_len)
1020{
1021 *out = NULL;
1022 *out_len = pkt->hdr.pad_len;
1023}
1024
1025/**
1026 * Write data to TTY framework and handle breaking the writes up if needed.
1027 *
1028 * @data Data to write
1029 * @len Length of data
1030 *
1031 * @returns 0 for success, < 0 for failure
1032 */
1033static int write_to_tty(char *data, unsigned len)
1034{
1035 int data_written;
1036
1037 if (!data)
1038 return 0;
1039
Eric Holmberged1f00c2012-06-07 09:45:18 -06001040 while (len > 0 && !smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001041 data_written = smux.tty->ops->write(smux.tty, data, len);
1042 if (data_written >= 0) {
1043 len -= data_written;
1044 data += data_written;
1045 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001046 SMUX_ERR("%s: TTY write returned error %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001047 __func__, data_written);
1048 return data_written;
1049 }
1050
1051 if (len)
1052 tty_wait_until_sent(smux.tty,
1053 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001054 }
1055 return 0;
1056}
1057
1058/**
1059 * Write packet to TTY.
1060 *
1061 * @pkt packet to write
1062 *
1063 * @returns 0 on success
1064 */
1065static int smux_tx_tty(struct smux_pkt_t *pkt)
1066{
1067 char *data;
1068 unsigned int len;
1069 int ret;
1070
1071 if (!smux.tty) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001072 SMUX_ERR("%s: TTY not initialized", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001073 return -ENOTTY;
1074 }
1075
1076 if (pkt->hdr.cmd == SMUX_CMD_BYTE) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301077 SMUX_DBG("smux: %s: tty send single byte\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001078 ret = write_to_tty(&pkt->hdr.flags, 1);
1079 return ret;
1080 }
1081
1082 smux_serialize_hdr(pkt, &data, &len);
1083 ret = write_to_tty(data, len);
1084 if (ret) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001085 SMUX_ERR("%s: failed %d to write header %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001086 __func__, ret, len);
1087 return ret;
1088 }
1089
1090 smux_serialize_payload(pkt, &data, &len);
1091 ret = write_to_tty(data, len);
1092 if (ret) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001093 SMUX_ERR("%s: failed %d to write payload %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001094 __func__, ret, len);
1095 return ret;
1096 }
1097
1098 smux_serialize_padding(pkt, &data, &len);
1099 while (len > 0) {
1100 char zero = 0x0;
1101 ret = write_to_tty(&zero, 1);
1102 if (ret) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001103 SMUX_ERR("%s: failed %d to write padding %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001104 __func__, ret, len);
1105 return ret;
1106 }
1107 --len;
1108 }
1109 return 0;
1110}
1111
1112/**
1113 * Send a single character.
1114 *
1115 * @ch Character to send
1116 */
1117static void smux_send_byte(char ch)
1118{
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001119 struct smux_pkt_t *pkt;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001120
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001121 pkt = smux_alloc_pkt();
1122 if (!pkt) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001123 SMUX_ERR("%s: alloc failure for byte %x\n", __func__, ch);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001124 return;
1125 }
1126 pkt->hdr.cmd = SMUX_CMD_BYTE;
1127 pkt->hdr.flags = ch;
1128 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001129
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001130 list_add_tail(&pkt->list, &smux.power_queue);
1131 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001132}
1133
1134/**
1135 * Receive a single-character packet (used for internal testing).
1136 *
1137 * @ch Character to receive
1138 * @lcid Logical channel ID for packet
1139 *
1140 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001141 */
1142static int smux_receive_byte(char ch, int lcid)
1143{
1144 struct smux_pkt_t pkt;
1145
1146 smux_init_pkt(&pkt);
1147 pkt.hdr.lcid = lcid;
1148 pkt.hdr.cmd = SMUX_CMD_BYTE;
1149 pkt.hdr.flags = ch;
1150
1151 return smux_dispatch_rx_pkt(&pkt);
1152}
1153
1154/**
1155 * Queue packet for transmit.
1156 *
1157 * @pkt_ptr Packet to queue
1158 * @ch Channel to queue packet on
1159 * @queue Queue channel on ready list
1160 */
1161static void smux_tx_queue(struct smux_pkt_t *pkt_ptr, struct smux_lch_t *ch,
1162 int queue)
1163{
1164 unsigned long flags;
1165
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301166 SMUX_DBG("smux: %s: queuing pkt %p\n", __func__, pkt_ptr);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001167
1168 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
1169 list_add_tail(&pkt_ptr->list, &ch->tx_queue);
1170 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
1171
1172 if (queue)
1173 list_channel(ch);
1174}
1175
1176/**
1177 * Handle receive OPEN ACK command.
1178 *
1179 * @pkt Received packet
1180 *
1181 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001182 */
1183static int smux_handle_rx_open_ack(struct smux_pkt_t *pkt)
1184{
1185 uint8_t lcid;
1186 int ret;
1187 struct smux_lch_t *ch;
1188 int enable_powerdown = 0;
Arun Kumar Neelakantamee84b1d2013-07-17 22:00:46 +05301189 int tx_ready = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001190
1191 lcid = pkt->hdr.lcid;
1192 ch = &smux_lch[lcid];
1193
1194 spin_lock(&ch->state_lock_lhb1);
1195 if (ch->local_state == SMUX_LCH_LOCAL_OPENING) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301196 SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001197 ch->local_state,
1198 SMUX_LCH_LOCAL_OPENED);
1199
1200 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1201 enable_powerdown = 1;
1202
1203 ch->local_state = SMUX_LCH_LOCAL_OPENED;
Arun Kumar Neelakantam60252c92013-07-02 14:47:51 +05301204 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001205 schedule_notify(lcid, SMUX_CONNECTED, NULL);
Arun Kumar Neelakantam60252c92013-07-02 14:47:51 +05301206 if (!(list_empty(&ch->tx_queue)))
Arun Kumar Neelakantamee84b1d2013-07-17 22:00:46 +05301207 tx_ready = 1;
Arun Kumar Neelakantam60252c92013-07-02 14:47:51 +05301208 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001209 ret = 0;
1210 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301211 SMUX_DBG("smux: Remote loopback OPEN ACK received\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001212 ret = 0;
1213 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001214 SMUX_ERR("%s: lcid %d state 0x%x open ack invalid\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001215 __func__, lcid, ch->local_state);
1216 ret = -EINVAL;
1217 }
1218 spin_unlock(&ch->state_lock_lhb1);
1219
1220 if (enable_powerdown) {
1221 spin_lock(&smux.tx_lock_lha2);
1222 if (!smux.powerdown_enabled) {
1223 smux.powerdown_enabled = 1;
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301224 SMUX_DBG("smux: %s: enabling power-collapse support\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001225 __func__);
1226 }
1227 spin_unlock(&smux.tx_lock_lha2);
1228 }
1229
Arun Kumar Neelakantamee84b1d2013-07-17 22:00:46 +05301230 if (tx_ready)
1231 list_channel(ch);
1232
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001233 return ret;
1234}
1235
1236static int smux_handle_close_ack(struct smux_pkt_t *pkt)
1237{
1238 uint8_t lcid;
1239 int ret;
1240 struct smux_lch_t *ch;
1241 union notifier_metadata meta_disconnected;
1242 unsigned long flags;
1243
1244 lcid = pkt->hdr.lcid;
1245 ch = &smux_lch[lcid];
1246 meta_disconnected.disconnected.is_ssr = 0;
1247
1248 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1249
1250 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301251 SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001252 SMUX_LCH_LOCAL_CLOSING,
1253 SMUX_LCH_LOCAL_CLOSED);
1254 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
Arun Kumar Neelakantam9fdf0172013-07-09 15:55:32 +05301255 schedule_notify(lcid, SMUX_LOCAL_CLOSED, &meta_disconnected);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001256 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED)
1257 schedule_notify(lcid, SMUX_DISCONNECTED,
1258 &meta_disconnected);
1259 ret = 0;
1260 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301261 SMUX_DBG("smux: Remote loopback CLOSE ACK received\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001262 ret = 0;
1263 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001264 SMUX_ERR("%s: lcid %d state 0x%x close ack invalid\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001265 __func__, lcid, ch->local_state);
1266 ret = -EINVAL;
1267 }
1268 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1269 return ret;
1270}
1271
1272/**
1273 * Handle receive OPEN command.
1274 *
1275 * @pkt Received packet
1276 *
1277 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001278 */
1279static int smux_handle_rx_open_cmd(struct smux_pkt_t *pkt)
1280{
1281 uint8_t lcid;
1282 int ret;
1283 struct smux_lch_t *ch;
1284 struct smux_pkt_t *ack_pkt;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001285 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001286 int tx_ready = 0;
1287 int enable_powerdown = 0;
1288
1289 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
1290 return smux_handle_rx_open_ack(pkt);
1291
1292 lcid = pkt->hdr.lcid;
1293 ch = &smux_lch[lcid];
1294
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001295 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001296
1297 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301298 SMUX_DBG("smux: lcid %d remote state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001299 SMUX_LCH_REMOTE_CLOSED,
1300 SMUX_LCH_REMOTE_OPENED);
1301
1302 ch->remote_state = SMUX_LCH_REMOTE_OPENED;
1303 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1304 enable_powerdown = 1;
1305
1306 /* Send Open ACK */
1307 ack_pkt = smux_alloc_pkt();
1308 if (!ack_pkt) {
1309 /* exit out to allow retrying this later */
1310 ret = -ENOMEM;
1311 goto out;
1312 }
1313 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
Eric Holmbergc89532e2013-01-15 16:43:47 -07001314 ack_pkt->hdr.flags = SMUX_CMD_OPEN_ACK;
1315 if (enable_powerdown)
1316 ack_pkt->hdr.flags |= SMUX_CMD_OPEN_POWER_COLLAPSE;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001317 ack_pkt->hdr.lcid = lcid;
1318 ack_pkt->hdr.payload_len = 0;
1319 ack_pkt->hdr.pad_len = 0;
1320 if (pkt->hdr.flags & SMUX_CMD_OPEN_REMOTE_LOOPBACK) {
1321 ch->remote_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
1322 ack_pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
1323 }
1324 smux_tx_queue(ack_pkt, ch, 0);
1325 tx_ready = 1;
1326
1327 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1328 /*
1329 * Send an Open command to the remote side to
1330 * simulate our local client doing it.
1331 */
1332 ack_pkt = smux_alloc_pkt();
1333 if (ack_pkt) {
1334 ack_pkt->hdr.lcid = lcid;
1335 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
Eric Holmbergc89532e2013-01-15 16:43:47 -07001336 if (enable_powerdown)
1337 ack_pkt->hdr.flags |=
1338 SMUX_CMD_OPEN_POWER_COLLAPSE;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001339 ack_pkt->hdr.payload_len = 0;
1340 ack_pkt->hdr.pad_len = 0;
1341 smux_tx_queue(ack_pkt, ch, 0);
1342 tx_ready = 1;
1343 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001344 SMUX_ERR(
1345 "%s: Remote loopack allocation failure\n",
1346 __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001347 }
1348 } else if (ch->local_state == SMUX_LCH_LOCAL_OPENED) {
1349 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1350 }
1351 ret = 0;
1352 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001353 SMUX_ERR("%s: lcid %d remote state 0x%x open invalid\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001354 __func__, lcid, ch->remote_state);
1355 ret = -EINVAL;
1356 }
1357
1358out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001359 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001360
1361 if (enable_powerdown) {
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001362 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8b9a6402012-06-05 13:32:57 -06001363 if (!smux.powerdown_enabled) {
1364 smux.powerdown_enabled = 1;
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301365 SMUX_DBG("smux: %s: enabling power-collapse support\n",
Eric Holmberg8b9a6402012-06-05 13:32:57 -06001366 __func__);
1367 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001368 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001369 }
1370
1371 if (tx_ready)
1372 list_channel(ch);
1373
1374 return ret;
1375}
1376
1377/**
1378 * Handle receive CLOSE command.
1379 *
1380 * @pkt Received packet
1381 *
1382 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001383 */
1384static int smux_handle_rx_close_cmd(struct smux_pkt_t *pkt)
1385{
1386 uint8_t lcid;
1387 int ret;
1388 struct smux_lch_t *ch;
1389 struct smux_pkt_t *ack_pkt;
1390 union notifier_metadata meta_disconnected;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001391 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001392 int tx_ready = 0;
1393
1394 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
1395 return smux_handle_close_ack(pkt);
1396
1397 lcid = pkt->hdr.lcid;
1398 ch = &smux_lch[lcid];
1399 meta_disconnected.disconnected.is_ssr = 0;
1400
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001401 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001402 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301403 SMUX_DBG("smux: lcid %d remote state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001404 SMUX_LCH_REMOTE_OPENED,
1405 SMUX_LCH_REMOTE_CLOSED);
1406
1407 ack_pkt = smux_alloc_pkt();
1408 if (!ack_pkt) {
1409 /* exit out to allow retrying this later */
1410 ret = -ENOMEM;
1411 goto out;
1412 }
1413 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
1414 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1415 ack_pkt->hdr.flags = SMUX_CMD_CLOSE_ACK;
1416 ack_pkt->hdr.lcid = lcid;
1417 ack_pkt->hdr.payload_len = 0;
1418 ack_pkt->hdr.pad_len = 0;
1419 smux_tx_queue(ack_pkt, ch, 0);
1420 tx_ready = 1;
1421
1422 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1423 /*
1424 * Send a Close command to the remote side to simulate
1425 * our local client doing it.
1426 */
1427 ack_pkt = smux_alloc_pkt();
1428 if (ack_pkt) {
1429 ack_pkt->hdr.lcid = lcid;
1430 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1431 ack_pkt->hdr.flags = 0;
1432 ack_pkt->hdr.payload_len = 0;
1433 ack_pkt->hdr.pad_len = 0;
1434 smux_tx_queue(ack_pkt, ch, 0);
1435 tx_ready = 1;
1436 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001437 SMUX_ERR(
1438 "%s: Remote loopack allocation failure\n",
1439 __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001440 }
1441 }
1442
Arun Kumar Neelakantam9fdf0172013-07-09 15:55:32 +05301443 schedule_notify(lcid, SMUX_REMOTE_CLOSED, &meta_disconnected);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001444 if (ch->local_state == SMUX_LCH_LOCAL_CLOSED)
1445 schedule_notify(lcid, SMUX_DISCONNECTED,
1446 &meta_disconnected);
1447 ret = 0;
1448 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001449 SMUX_ERR("%s: lcid %d remote state 0x%x close invalid\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001450 __func__, lcid, ch->remote_state);
1451 ret = -EINVAL;
1452 }
1453out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001454 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001455 if (tx_ready)
1456 list_channel(ch);
1457
1458 return ret;
1459}
1460
1461/*
1462 * Handle receive DATA command.
1463 *
1464 * @pkt Received packet
1465 *
1466 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001467 */
1468static int smux_handle_rx_data_cmd(struct smux_pkt_t *pkt)
1469{
1470 uint8_t lcid;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001471 int ret = 0;
1472 int do_retry = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001473 int tx_ready = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001474 int tmp;
1475 int rx_len;
1476 struct smux_lch_t *ch;
1477 union notifier_metadata metadata;
1478 int remote_loopback;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001479 struct smux_pkt_t *ack_pkt;
1480 unsigned long flags;
1481
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001482 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1483 ret = -ENXIO;
1484 goto out;
1485 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001486
Eric Holmbergb8435c82012-06-05 14:51:29 -06001487 rx_len = pkt->hdr.payload_len;
1488 if (rx_len == 0) {
1489 ret = -EINVAL;
1490 goto out;
1491 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001492
1493 lcid = pkt->hdr.lcid;
1494 ch = &smux_lch[lcid];
1495 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1496 remote_loopback = ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK;
1497
1498 if (ch->local_state != SMUX_LCH_LOCAL_OPENED
1499 && !remote_loopback) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001500 SMUX_ERR("smux: ch %d error data on local state 0x%x",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001501 lcid, ch->local_state);
1502 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001503 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001504 goto out;
1505 }
1506
1507 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001508 SMUX_ERR("smux: ch %d error data on remote state 0x%x",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001509 lcid, ch->remote_state);
1510 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001511 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001512 goto out;
1513 }
1514
Eric Holmbergb8435c82012-06-05 14:51:29 -06001515 if (!list_empty(&ch->rx_retry_queue)) {
1516 do_retry = 1;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001517
1518 if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
1519 !ch->rx_flow_control_auto &&
1520 ((ch->rx_retry_queue_cnt + 1) >= SMUX_RX_WM_HIGH)) {
1521 /* need to flow control RX */
1522 ch->rx_flow_control_auto = 1;
1523 tx_ready |= smux_rx_flow_control_updated(ch);
1524 schedule_notify(ch->lcid, SMUX_RX_RETRY_HIGH_WM_HIT,
1525 NULL);
1526 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06001527 if ((ch->rx_retry_queue_cnt + 1) > SMUX_RX_RETRY_MAX_PKTS) {
1528 /* retry queue full */
Eric Holmbergd7339a42012-08-21 16:28:12 -06001529 SMUX_ERR(
1530 "%s: ch %d RX retry queue full; rx flow=%d\n",
1531 __func__, lcid, ch->rx_flow_control_auto);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001532 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1533 ret = -ENOMEM;
1534 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1535 goto out;
1536 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001537 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001538 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001539
Eric Holmbergb8435c82012-06-05 14:51:29 -06001540 if (remote_loopback) {
1541 /* Echo the data back to the remote client. */
1542 ack_pkt = smux_alloc_pkt();
1543 if (ack_pkt) {
1544 ack_pkt->hdr.lcid = lcid;
1545 ack_pkt->hdr.cmd = SMUX_CMD_DATA;
1546 ack_pkt->hdr.flags = 0;
1547 ack_pkt->hdr.payload_len = pkt->hdr.payload_len;
1548 if (ack_pkt->hdr.payload_len) {
1549 smux_alloc_pkt_payload(ack_pkt);
1550 memcpy(ack_pkt->payload, pkt->payload,
1551 ack_pkt->hdr.payload_len);
1552 }
1553 ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
1554 smux_tx_queue(ack_pkt, ch, 0);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001555 tx_ready = 1;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001556 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001557 SMUX_ERR("%s: Remote loopack allocation failure\n",
Eric Holmbergb8435c82012-06-05 14:51:29 -06001558 __func__);
1559 }
1560 } else if (!do_retry) {
1561 /* request buffer from client */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001562 metadata.read.pkt_priv = 0;
1563 metadata.read.buffer = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001564 tmp = ch->get_rx_buffer(ch->priv,
1565 (void **)&metadata.read.pkt_priv,
1566 (void **)&metadata.read.buffer,
1567 rx_len);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001568
Eric Holmbergb8435c82012-06-05 14:51:29 -06001569 if (tmp == 0 && metadata.read.buffer) {
1570 /* place data into RX buffer */
1571 memcpy(metadata.read.buffer, pkt->payload,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001572 rx_len);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001573 metadata.read.len = rx_len;
1574 schedule_notify(lcid, SMUX_READ_DONE,
1575 &metadata);
1576 } else if (tmp == -EAGAIN ||
1577 (tmp == 0 && !metadata.read.buffer)) {
1578 /* buffer allocation failed - add to retry queue */
1579 do_retry = 1;
1580 } else if (tmp < 0) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001581 SMUX_ERR("%s: ch %d Client RX buffer alloc failed %d\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001582 __func__, lcid, tmp);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001583 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1584 ret = -ENOMEM;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001585 }
1586 }
1587
Eric Holmbergb8435c82012-06-05 14:51:29 -06001588 if (do_retry) {
1589 struct smux_rx_pkt_retry *retry;
1590
1591 retry = kmalloc(sizeof(struct smux_rx_pkt_retry), GFP_KERNEL);
1592 if (!retry) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001593 SMUX_ERR("%s: retry alloc failure\n", __func__);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001594 ret = -ENOMEM;
1595 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1596 goto out;
1597 }
1598 INIT_LIST_HEAD(&retry->rx_retry_list);
1599 retry->timeout_in_ms = SMUX_RX_RETRY_MIN_MS;
1600
1601 /* copy packet */
1602 retry->pkt = smux_alloc_pkt();
1603 if (!retry->pkt) {
1604 kfree(retry);
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001605 SMUX_ERR("%s: pkt alloc failure\n", __func__);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001606 ret = -ENOMEM;
1607 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1608 goto out;
1609 }
1610 retry->pkt->hdr.lcid = lcid;
1611 retry->pkt->hdr.payload_len = pkt->hdr.payload_len;
1612 retry->pkt->hdr.pad_len = pkt->hdr.pad_len;
1613 if (retry->pkt->hdr.payload_len) {
1614 smux_alloc_pkt_payload(retry->pkt);
1615 memcpy(retry->pkt->payload, pkt->payload,
1616 retry->pkt->hdr.payload_len);
1617 }
1618
1619 /* add to retry queue */
1620 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1621 list_add_tail(&retry->rx_retry_list, &ch->rx_retry_queue);
1622 ++ch->rx_retry_queue_cnt;
1623 if (ch->rx_retry_queue_cnt == 1)
1624 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
1625 msecs_to_jiffies(retry->timeout_in_ms));
1626 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1627 }
1628
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001629 if (tx_ready)
1630 list_channel(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001631out:
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001632 return ret;
1633}
1634
1635/**
1636 * Handle receive byte command for testing purposes.
1637 *
1638 * @pkt Received packet
1639 *
1640 * @returns 0 for success
1641 */
1642static int smux_handle_rx_byte_cmd(struct smux_pkt_t *pkt)
1643{
1644 uint8_t lcid;
1645 int ret;
1646 struct smux_lch_t *ch;
1647 union notifier_metadata metadata;
1648 unsigned long flags;
1649
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001650 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001651 SMUX_ERR("%s: invalid packet or channel id\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001652 return -ENXIO;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001653 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001654
1655 lcid = pkt->hdr.lcid;
1656 ch = &smux_lch[lcid];
1657 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1658
1659 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001660 SMUX_ERR("smux: ch %d error data on local state 0x%x",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001661 lcid, ch->local_state);
1662 ret = -EIO;
1663 goto out;
1664 }
1665
1666 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001667 SMUX_ERR("smux: ch %d error data on remote state 0x%x",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001668 lcid, ch->remote_state);
1669 ret = -EIO;
1670 goto out;
1671 }
1672
1673 metadata.read.pkt_priv = (void *)(int)pkt->hdr.flags;
1674 metadata.read.buffer = 0;
1675 schedule_notify(lcid, SMUX_READ_DONE, &metadata);
1676 ret = 0;
1677
1678out:
1679 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1680 return ret;
1681}
1682
1683/**
1684 * Handle receive status command.
1685 *
1686 * @pkt Received packet
1687 *
1688 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001689 */
1690static int smux_handle_rx_status_cmd(struct smux_pkt_t *pkt)
1691{
1692 uint8_t lcid;
1693 int ret;
1694 struct smux_lch_t *ch;
1695 union notifier_metadata meta;
1696 unsigned long flags;
1697 int tx_ready = 0;
1698
1699 lcid = pkt->hdr.lcid;
1700 ch = &smux_lch[lcid];
1701
1702 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1703 meta.tiocm.tiocm_old = ch->remote_tiocm;
1704 meta.tiocm.tiocm_new = pkt->hdr.flags;
1705
1706 /* update logical channel flow control */
1707 if ((meta.tiocm.tiocm_old & SMUX_CMD_STATUS_FLOW_CNTL) ^
1708 (meta.tiocm.tiocm_new & SMUX_CMD_STATUS_FLOW_CNTL)) {
1709 /* logical channel flow control changed */
1710 if (pkt->hdr.flags & SMUX_CMD_STATUS_FLOW_CNTL) {
1711 /* disabled TX */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301712 SMUX_DBG("smux: TX Flow control enabled\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001713 ch->tx_flow_control = 1;
1714 } else {
1715 /* re-enable channel */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301716 SMUX_DBG("smux: TX Flow control disabled\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001717 ch->tx_flow_control = 0;
1718 tx_ready = 1;
1719 }
1720 }
1721 meta.tiocm.tiocm_old = msm_smux_tiocm_get_atomic(ch);
1722 ch->remote_tiocm = pkt->hdr.flags;
1723 meta.tiocm.tiocm_new = msm_smux_tiocm_get_atomic(ch);
1724
1725 /* client notification for status change */
1726 if (IS_FULLY_OPENED(ch)) {
1727 if (meta.tiocm.tiocm_old != meta.tiocm.tiocm_new)
1728 schedule_notify(lcid, SMUX_TIOCM_UPDATE, &meta);
1729 ret = 0;
1730 }
1731 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1732 if (tx_ready)
1733 list_channel(ch);
1734
1735 return ret;
1736}
1737
1738/**
1739 * Handle receive power command.
1740 *
1741 * @pkt Received packet
1742 *
1743 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001744 */
1745static int smux_handle_rx_power_cmd(struct smux_pkt_t *pkt)
1746{
David Brownd2f01b52013-01-16 15:22:17 -08001747 struct smux_pkt_t *ack_pkt;
Eric Holmberga9b06472012-06-22 09:46:34 -06001748 int power_down = 0;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001749 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001750
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001751 SMUX_PWR_PKT_RX(pkt);
1752
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001753 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001754 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK) {
1755 /* local sleep request ack */
Eric Holmberga9b06472012-06-22 09:46:34 -06001756 if (smux.power_state == SMUX_PWR_TURNING_OFF)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001757 /* Power-down complete, turn off UART */
Eric Holmberga9b06472012-06-22 09:46:34 -06001758 power_down = 1;
1759 else
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001760 SMUX_ERR("%s: sleep request ack invalid in state %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001761 __func__, smux.power_state);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001762 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001763 /*
1764 * Remote sleep request
1765 *
1766 * Even if we have data pending, we need to transition to the
1767 * POWER_OFF state and then perform a wakeup since the remote
1768 * side has requested a power-down.
1769 *
1770 * The state here is set to SMUX_PWR_TURNING_OFF_FLUSH and
1771 * the TX thread will set the state to SMUX_PWR_TURNING_OFF
1772 * when it sends the packet.
Eric Holmberga9b06472012-06-22 09:46:34 -06001773 *
1774 * If we are already powering down, then no ACK is sent.
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001775 */
Eric Holmberga9b06472012-06-22 09:46:34 -06001776 if (smux.power_state == SMUX_PWR_ON) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001777 ack_pkt = smux_alloc_pkt();
1778 if (ack_pkt) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301779 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001780 smux.power_state,
1781 SMUX_PWR_TURNING_OFF_FLUSH);
1782
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001783 smux.power_state = SMUX_PWR_TURNING_OFF_FLUSH;
1784
1785 /* send power-down ack */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001786 ack_pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
1787 ack_pkt->hdr.flags = SMUX_CMD_PWR_CTL_ACK;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001788 ack_pkt->hdr.lcid = SMUX_BROADCAST_LCID;
1789 list_add_tail(&ack_pkt->list,
1790 &smux.power_queue);
1791 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001792 }
Eric Holmberga9b06472012-06-22 09:46:34 -06001793 } else if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH) {
1794 /* Local power-down request still in TX queue */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301795 SMUX_PWR("smux: %s: Power-down shortcut - no ack\n",
Eric Holmberga9b06472012-06-22 09:46:34 -06001796 __func__);
1797 smux.power_ctl_remote_req_received = 1;
1798 } else if (smux.power_state == SMUX_PWR_TURNING_OFF) {
1799 /*
1800 * Local power-down request already sent to remote
1801 * side, so this request gets treated as an ACK.
1802 */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301803 SMUX_PWR("smux: %s: Power-down shortcut - no ack\n",
Eric Holmberga9b06472012-06-22 09:46:34 -06001804 __func__);
1805 power_down = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001806 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001807 SMUX_ERR("%s: sleep request invalid in state %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001808 __func__, smux.power_state);
1809 }
1810 }
Eric Holmberga9b06472012-06-22 09:46:34 -06001811
1812 if (power_down) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301813 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberga9b06472012-06-22 09:46:34 -06001814 smux.power_state, SMUX_PWR_OFF_FLUSH);
1815 smux.power_state = SMUX_PWR_OFF_FLUSH;
1816 queue_work(smux_tx_wq, &smux_inactivity_work);
1817 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001818 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001819
1820 return 0;
1821}
1822
1823/**
1824 * Handle dispatching a completed packet for receive processing.
1825 *
1826 * @pkt Packet to process
1827 *
1828 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001829 */
1830static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt)
1831{
Eric Holmbergf9622662012-06-13 15:55:45 -06001832 int ret = -ENXIO;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001833
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001834 switch (pkt->hdr.cmd) {
1835 case SMUX_CMD_OPEN_LCH:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001836 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001837 if (smux_assert_lch_id(pkt->hdr.lcid)) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001838 SMUX_ERR("%s: invalid channel id %d\n",
Eric Holmbergf9622662012-06-13 15:55:45 -06001839 __func__, pkt->hdr.lcid);
1840 break;
1841 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001842 ret = smux_handle_rx_open_cmd(pkt);
1843 break;
1844
1845 case SMUX_CMD_DATA:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001846 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001847 if (smux_assert_lch_id(pkt->hdr.lcid)) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001848 SMUX_ERR("%s: invalid channel id %d\n",
Eric Holmbergf9622662012-06-13 15:55:45 -06001849 __func__, pkt->hdr.lcid);
1850 break;
1851 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001852 ret = smux_handle_rx_data_cmd(pkt);
1853 break;
1854
1855 case SMUX_CMD_CLOSE_LCH:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001856 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001857 if (smux_assert_lch_id(pkt->hdr.lcid)) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001858 SMUX_ERR("%s: invalid channel id %d\n",
Eric Holmbergf9622662012-06-13 15:55:45 -06001859 __func__, pkt->hdr.lcid);
1860 break;
1861 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001862 ret = smux_handle_rx_close_cmd(pkt);
1863 break;
1864
1865 case SMUX_CMD_STATUS:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001866 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001867 if (smux_assert_lch_id(pkt->hdr.lcid)) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001868 SMUX_ERR("%s: invalid channel id %d\n",
Eric Holmbergf9622662012-06-13 15:55:45 -06001869 __func__, pkt->hdr.lcid);
1870 break;
1871 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001872 ret = smux_handle_rx_status_cmd(pkt);
1873 break;
1874
1875 case SMUX_CMD_PWR_CTL:
1876 ret = smux_handle_rx_power_cmd(pkt);
1877 break;
1878
1879 case SMUX_CMD_BYTE:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001880 SMUX_LOG_PKT_RX(pkt);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001881 ret = smux_handle_rx_byte_cmd(pkt);
1882 break;
1883
1884 default:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001885 SMUX_LOG_PKT_RX(pkt);
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001886 SMUX_ERR("%s: command %d unknown\n", __func__, pkt->hdr.cmd);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001887 ret = -EINVAL;
1888 }
1889 return ret;
1890}
1891
1892/**
1893 * Deserializes a packet and dispatches it to the packet receive logic.
1894 *
1895 * @data Raw data for one packet
1896 * @len Length of the data
1897 *
1898 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001899 */
1900static int smux_deserialize(unsigned char *data, int len)
1901{
1902 struct smux_pkt_t recv;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001903
1904 smux_init_pkt(&recv);
1905
1906 /*
1907 * It may be possible to optimize this to not use the
1908 * temporary buffer.
1909 */
1910 memcpy(&recv.hdr, data, sizeof(struct smux_hdr_t));
1911
1912 if (recv.hdr.magic != SMUX_MAGIC) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001913 SMUX_ERR("%s: invalid header magic\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001914 return -EINVAL;
1915 }
1916
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001917 if (recv.hdr.payload_len)
1918 recv.payload = data + sizeof(struct smux_hdr_t);
1919
1920 return smux_dispatch_rx_pkt(&recv);
1921}
1922
1923/**
1924 * Handle wakeup request byte.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001925 */
1926static void smux_handle_wakeup_req(void)
1927{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001928 unsigned long flags;
1929
1930 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001931 if (smux.power_state == SMUX_PWR_OFF
1932 || smux.power_state == SMUX_PWR_TURNING_ON) {
1933 /* wakeup system */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301934 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001935 smux.power_state, SMUX_PWR_ON);
Eric Holmberg371b4622013-05-21 18:04:50 -06001936 smux.remote_initiated_wakeup_count++;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001937 smux.power_state = SMUX_PWR_ON;
1938 queue_work(smux_tx_wq, &smux_wakeup_work);
1939 queue_work(smux_tx_wq, &smux_tx_work);
1940 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1941 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1942 smux_send_byte(SMUX_WAKEUP_ACK);
Eric Holmberga9b06472012-06-22 09:46:34 -06001943 } else if (smux.power_state == SMUX_PWR_ON) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001944 smux_send_byte(SMUX_WAKEUP_ACK);
Eric Holmberga9b06472012-06-22 09:46:34 -06001945 } else {
1946 /* stale wakeup request from previous wakeup */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301947 SMUX_PWR("smux: %s: stale Wakeup REQ in state %d\n",
Eric Holmberga9b06472012-06-22 09:46:34 -06001948 __func__, smux.power_state);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001949 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001950 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001951}
1952
1953/**
1954 * Handle wakeup request ack.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001955 */
1956static void smux_handle_wakeup_ack(void)
1957{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001958 unsigned long flags;
1959
1960 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001961 if (smux.power_state == SMUX_PWR_TURNING_ON) {
1962 /* received response to wakeup request */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301963 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001964 smux.power_state, SMUX_PWR_ON);
1965 smux.power_state = SMUX_PWR_ON;
1966 queue_work(smux_tx_wq, &smux_tx_work);
1967 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1968 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1969
1970 } else if (smux.power_state != SMUX_PWR_ON) {
1971 /* invalid message */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05301972 SMUX_PWR("smux: %s: stale Wakeup REQ ACK in state %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001973 __func__, smux.power_state);
1974 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001975 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001976}
1977
1978/**
1979 * RX State machine - IDLE state processing.
1980 *
1981 * @data New RX data to process
1982 * @len Length of the data
1983 * @used Return value of length processed
1984 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001985 */
1986static void smux_rx_handle_idle(const unsigned char *data,
1987 int len, int *used, int flag)
1988{
1989 int i;
1990
1991 if (flag) {
1992 if (smux_byte_loopback)
1993 smux_receive_byte(SMUX_UT_ECHO_ACK_FAIL,
1994 smux_byte_loopback);
Eric Holmberg51f46cb2012-08-21 16:43:39 -06001995 SMUX_ERR("%s: TTY error 0x%x - ignoring\n", __func__, flag);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001996 ++*used;
1997 return;
1998 }
1999
2000 for (i = *used; i < len && smux.rx_state == SMUX_RX_IDLE; i++) {
2001 switch (data[i]) {
2002 case SMUX_MAGIC_WORD1:
2003 smux.rx_state = SMUX_RX_MAGIC;
2004 break;
2005 case SMUX_WAKEUP_REQ:
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302006 SMUX_PWR("smux: smux: RX Wakeup REQ\n");
Eric Holmberg0a81e8f2012-08-28 13:51:14 -06002007 if (unlikely(!smux.remote_is_alive)) {
2008 mutex_lock(&smux.mutex_lha0);
2009 smux.remote_is_alive = 1;
2010 mutex_unlock(&smux.mutex_lha0);
2011 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002012 smux_handle_wakeup_req();
2013 break;
2014 case SMUX_WAKEUP_ACK:
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302015 SMUX_PWR("smux: smux: RX Wakeup ACK\n");
Eric Holmberg0a81e8f2012-08-28 13:51:14 -06002016 if (unlikely(!smux.remote_is_alive)) {
2017 mutex_lock(&smux.mutex_lha0);
2018 smux.remote_is_alive = 1;
2019 mutex_unlock(&smux.mutex_lha0);
2020 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002021 smux_handle_wakeup_ack();
2022 break;
2023 default:
2024 /* unexpected character */
2025 if (smux_byte_loopback && data[i] == SMUX_UT_ECHO_REQ)
2026 smux_receive_byte(SMUX_UT_ECHO_ACK_OK,
2027 smux_byte_loopback);
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002028 SMUX_ERR("%s: parse error 0x%02x - ignoring\n",
2029 __func__, (unsigned)data[i]);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002030 break;
2031 }
2032 }
2033
2034 *used = i;
2035}
2036
2037/**
2038 * RX State machine - Header Magic state processing.
2039 *
2040 * @data New RX data to process
2041 * @len Length of the data
2042 * @used Return value of length processed
2043 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002044 */
2045static void smux_rx_handle_magic(const unsigned char *data,
2046 int len, int *used, int flag)
2047{
2048 int i;
2049
2050 if (flag) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002051 SMUX_ERR("%s: TTY RX error %d\n", __func__, flag);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002052 smux_enter_reset();
2053 smux.rx_state = SMUX_RX_FAILURE;
2054 ++*used;
2055 return;
2056 }
2057
2058 for (i = *used; i < len && smux.rx_state == SMUX_RX_MAGIC; i++) {
2059 /* wait for completion of the magic */
2060 if (data[i] == SMUX_MAGIC_WORD2) {
2061 smux.recv_len = 0;
2062 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD1;
2063 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD2;
2064 smux.rx_state = SMUX_RX_HDR;
2065 } else {
2066 /* unexpected / trash character */
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002067 SMUX_ERR(
2068 "%s: rx parse error for char %c; *used=%d, len=%d\n",
2069 __func__, data[i], *used, len);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002070 smux.rx_state = SMUX_RX_IDLE;
2071 }
2072 }
2073
2074 *used = i;
2075}
2076
2077/**
2078 * RX State machine - Packet Header state processing.
2079 *
2080 * @data New RX data to process
2081 * @len Length of the data
2082 * @used Return value of length processed
2083 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002084 */
2085static void smux_rx_handle_hdr(const unsigned char *data,
2086 int len, int *used, int flag)
2087{
2088 int i;
2089 struct smux_hdr_t *hdr;
2090
2091 if (flag) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002092 SMUX_ERR("%s: TTY RX error %d\n", __func__, flag);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002093 smux_enter_reset();
2094 smux.rx_state = SMUX_RX_FAILURE;
2095 ++*used;
2096 return;
2097 }
2098
2099 for (i = *used; i < len && smux.rx_state == SMUX_RX_HDR; i++) {
2100 smux.recv_buf[smux.recv_len++] = data[i];
2101
2102 if (smux.recv_len == sizeof(struct smux_hdr_t)) {
2103 /* complete header received */
2104 hdr = (struct smux_hdr_t *)smux.recv_buf;
2105 smux.pkt_remain = hdr->payload_len + hdr->pad_len;
2106 smux.rx_state = SMUX_RX_PAYLOAD;
2107 }
2108 }
2109 *used = i;
2110}
2111
2112/**
2113 * RX State machine - Packet Payload state processing.
2114 *
2115 * @data New RX data to process
2116 * @len Length of the data
2117 * @used Return value of length processed
2118 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002119 */
2120static void smux_rx_handle_pkt_payload(const unsigned char *data,
2121 int len, int *used, int flag)
2122{
2123 int remaining;
2124
2125 if (flag) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002126 SMUX_ERR("%s: TTY RX error %d\n", __func__, flag);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002127 smux_enter_reset();
2128 smux.rx_state = SMUX_RX_FAILURE;
2129 ++*used;
2130 return;
2131 }
2132
2133 /* copy data into rx buffer */
2134 if (smux.pkt_remain < (len - *used))
2135 remaining = smux.pkt_remain;
2136 else
2137 remaining = len - *used;
2138
2139 memcpy(&smux.recv_buf[smux.recv_len], &data[*used], remaining);
2140 smux.recv_len += remaining;
2141 smux.pkt_remain -= remaining;
2142 *used += remaining;
2143
2144 if (smux.pkt_remain == 0) {
2145 /* complete packet received */
2146 smux_deserialize(smux.recv_buf, smux.recv_len);
2147 smux.rx_state = SMUX_RX_IDLE;
2148 }
2149}
2150
2151/**
2152 * Feed data to the receive state machine.
2153 *
2154 * @data Pointer to data block
2155 * @len Length of data
2156 * @flag TTY_NORMAL (0) for no error, otherwise TTY Error Flag
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002157 */
2158void smux_rx_state_machine(const unsigned char *data,
2159 int len, int flag)
2160{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002161 struct smux_rx_worker_data work;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002162
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002163 work.data = data;
2164 work.len = len;
2165 work.flag = flag;
2166 INIT_WORK_ONSTACK(&work.work, smux_rx_worker);
2167 work.work_complete = COMPLETION_INITIALIZER_ONSTACK(work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002168
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002169 queue_work(smux_rx_wq, &work.work);
2170 wait_for_completion(&work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002171}
2172
2173/**
Eric Holmberg0a81e8f2012-08-28 13:51:14 -06002174 * Returns true if the remote side has acknowledged a wakeup
2175 * request previously, so we know that the link is alive and active.
2176 *
2177 * @returns true for is alive, false for not alive
2178 */
2179bool smux_remote_is_active(void)
2180{
2181 bool is_active = false;
2182
2183 mutex_lock(&smux.mutex_lha0);
2184 if (smux.remote_is_alive)
2185 is_active = true;
2186 mutex_unlock(&smux.mutex_lha0);
2187
2188 return is_active;
2189}
2190
2191/**
Eric Holmberg371b4622013-05-21 18:04:50 -06002192 * Sends a delay command to the remote side.
2193 *
2194 * @ms: Time in milliseconds for the remote side to delay
2195 *
2196 * This command defines the delay that the remote side will use
2197 * to slow the response time for DATA commands.
2198 */
2199void smux_set_loopback_data_reply_delay(uint32_t ms)
2200{
2201 struct smux_lch_t *ch = &smux_lch[SMUX_TEST_LCID];
2202 struct smux_pkt_t *pkt;
2203
2204 pkt = smux_alloc_pkt();
2205 if (!pkt) {
2206 pr_err("%s: unable to allocate packet\n", __func__);
2207 return;
2208 }
2209
2210 pkt->hdr.lcid = ch->lcid;
2211 pkt->hdr.cmd = SMUX_CMD_DELAY;
2212 pkt->hdr.flags = 0;
2213 pkt->hdr.payload_len = sizeof(uint32_t);
2214 pkt->hdr.pad_len = 0;
2215
2216 if (smux_alloc_pkt_payload(pkt)) {
2217 pr_err("%s: unable to allocate payload\n", __func__);
2218 smux_free_pkt(pkt);
2219 return;
2220 }
2221 memcpy(pkt->payload, &ms, sizeof(uint32_t));
2222
2223 smux_tx_queue(pkt, ch, 1);
2224}
2225
2226/**
2227 * Retrieve wakeup counts.
2228 *
2229 * @local_cnt: Pointer to local wakeup count
2230 * @remote_cnt: Pointer to remote wakeup count
2231 */
2232void smux_get_wakeup_counts(int *local_cnt, int *remote_cnt)
2233{
2234 unsigned long flags;
2235
2236 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2237
2238 if (local_cnt)
2239 *local_cnt = smux.local_initiated_wakeup_count;
2240
2241 if (remote_cnt)
2242 *remote_cnt = smux.remote_initiated_wakeup_count;
2243
2244 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2245}
2246
2247/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002248 * Add channel to transmit-ready list and trigger transmit worker.
2249 *
2250 * @ch Channel to add
2251 */
2252static void list_channel(struct smux_lch_t *ch)
2253{
2254 unsigned long flags;
2255
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302256 SMUX_DBG("smux: %s: listing channel %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002257 __func__, ch->lcid);
2258
2259 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2260 spin_lock(&ch->tx_lock_lhb2);
2261 smux.tx_activity_flag = 1;
2262 if (list_empty(&ch->tx_ready_list))
2263 list_add_tail(&ch->tx_ready_list, &smux.lch_tx_ready_list);
2264 spin_unlock(&ch->tx_lock_lhb2);
2265 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2266
2267 queue_work(smux_tx_wq, &smux_tx_work);
2268}
2269
2270/**
2271 * Transmit packet on correct transport and then perform client
2272 * notification.
2273 *
2274 * @ch Channel to transmit on
2275 * @pkt Packet to transmit
2276 */
2277static void smux_tx_pkt(struct smux_lch_t *ch, struct smux_pkt_t *pkt)
2278{
2279 union notifier_metadata meta_write;
2280 int ret;
2281
2282 if (ch && pkt) {
2283 SMUX_LOG_PKT_TX(pkt);
2284 if (ch->local_mode == SMUX_LCH_MODE_LOCAL_LOOPBACK)
2285 ret = smux_tx_loopback(pkt);
2286 else
2287 ret = smux_tx_tty(pkt);
2288
2289 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2290 /* notify write-done */
2291 meta_write.write.pkt_priv = pkt->priv;
2292 meta_write.write.buffer = pkt->payload;
2293 meta_write.write.len = pkt->hdr.payload_len;
2294 if (ret >= 0) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302295 SMUX_DBG("smux: %s: PKT write done", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002296 schedule_notify(ch->lcid, SMUX_WRITE_DONE,
2297 &meta_write);
2298 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002299 SMUX_ERR("%s: failed to write pkt %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002300 __func__, ret);
2301 schedule_notify(ch->lcid, SMUX_WRITE_FAIL,
2302 &meta_write);
2303 }
2304 }
2305 }
2306}
2307
2308/**
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002309 * Flush pending TTY TX data.
2310 */
2311static void smux_flush_tty(void)
2312{
Eric Holmberg92a67df2012-06-25 13:56:24 -06002313 mutex_lock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002314 if (!smux.tty) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002315 SMUX_ERR("%s: ldisc not loaded\n", __func__);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002316 mutex_unlock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002317 return;
2318 }
2319
2320 tty_wait_until_sent(smux.tty,
2321 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
2322
2323 if (tty_chars_in_buffer(smux.tty) > 0)
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002324 SMUX_ERR("%s: unable to flush UART queue\n", __func__);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002325
2326 mutex_unlock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002327}
2328
2329/**
Eric Holmberged1f00c2012-06-07 09:45:18 -06002330 * Purge TX queue for logical channel.
2331 *
2332 * @ch Logical channel pointer
Eric Holmberg0e914082012-07-11 11:46:28 -06002333 * @is_ssr 1 = this is a subsystem restart purge
Eric Holmberged1f00c2012-06-07 09:45:18 -06002334 *
2335 * Must be called with the following spinlocks locked:
2336 * state_lock_lhb1
2337 * tx_lock_lhb2
2338 */
Eric Holmberg0e914082012-07-11 11:46:28 -06002339static void smux_purge_ch_tx_queue(struct smux_lch_t *ch, int is_ssr)
Eric Holmberged1f00c2012-06-07 09:45:18 -06002340{
2341 struct smux_pkt_t *pkt;
2342 int send_disconnect = 0;
Eric Holmberg0e914082012-07-11 11:46:28 -06002343 struct smux_pkt_t *pkt_tmp;
2344 int is_state_pkt;
Eric Holmberged1f00c2012-06-07 09:45:18 -06002345
Eric Holmberg0e914082012-07-11 11:46:28 -06002346 list_for_each_entry_safe(pkt, pkt_tmp, &ch->tx_queue, list) {
2347 is_state_pkt = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06002348 if (pkt->hdr.cmd == SMUX_CMD_OPEN_LCH) {
Eric Holmberg0e914082012-07-11 11:46:28 -06002349 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK) {
2350 /* Open ACK must still be sent */
2351 is_state_pkt = 1;
2352 } else {
2353 /* Open never sent -- force to closed state */
2354 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
2355 send_disconnect = 1;
2356 }
2357 } else if (pkt->hdr.cmd == SMUX_CMD_CLOSE_LCH) {
2358 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
2359 is_state_pkt = 1;
2360 if (!send_disconnect)
2361 is_state_pkt = 1;
Eric Holmberged1f00c2012-06-07 09:45:18 -06002362 } else if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2363 /* Notify client of failed write */
2364 union notifier_metadata meta_write;
2365
2366 meta_write.write.pkt_priv = pkt->priv;
2367 meta_write.write.buffer = pkt->payload;
2368 meta_write.write.len = pkt->hdr.payload_len;
2369 schedule_notify(ch->lcid, SMUX_WRITE_FAIL, &meta_write);
2370 }
Eric Holmberg0e914082012-07-11 11:46:28 -06002371
2372 if (!is_state_pkt || is_ssr) {
2373 list_del(&pkt->list);
2374 smux_free_pkt(pkt);
2375 }
Eric Holmberged1f00c2012-06-07 09:45:18 -06002376 }
2377
2378 if (send_disconnect) {
2379 union notifier_metadata meta_disconnected;
2380
2381 meta_disconnected.disconnected.is_ssr = smux.in_reset;
Arun Kumar Neelakantam9fdf0172013-07-09 15:55:32 +05302382 schedule_notify(ch->lcid, SMUX_LOCAL_CLOSED,
Eric Holmberged1f00c2012-06-07 09:45:18 -06002383 &meta_disconnected);
Arun Kumar Neelakantam9fdf0172013-07-09 15:55:32 +05302384 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED)
2385 schedule_notify(ch->lcid, SMUX_DISCONNECTED,
2386 &meta_disconnected);
Eric Holmberged1f00c2012-06-07 09:45:18 -06002387 }
2388}
2389
2390/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002391 * Power-up the UART.
Eric Holmberg92a67df2012-06-25 13:56:24 -06002392 *
2393 * Must be called with smux.mutex_lha0 already locked.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002394 */
Eric Holmberg92a67df2012-06-25 13:56:24 -06002395static void smux_uart_power_on_atomic(void)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002396{
2397 struct uart_state *state;
2398
2399 if (!smux.tty || !smux.tty->driver_data) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002400 SMUX_ERR("%s: unable to find UART port for tty %p\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002401 __func__, smux.tty);
2402 return;
2403 }
2404 state = smux.tty->driver_data;
2405 msm_hs_request_clock_on(state->uart_port);
2406}
2407
2408/**
Eric Holmberg92a67df2012-06-25 13:56:24 -06002409 * Power-up the UART.
2410 */
2411static void smux_uart_power_on(void)
2412{
2413 mutex_lock(&smux.mutex_lha0);
2414 smux_uart_power_on_atomic();
2415 mutex_unlock(&smux.mutex_lha0);
2416}
2417
2418/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002419 * Power down the UART.
Eric Holmberg06011322012-07-06 18:17:03 -06002420 *
2421 * Must be called with mutex_lha0 locked.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002422 */
Eric Holmberg06011322012-07-06 18:17:03 -06002423static void smux_uart_power_off_atomic(void)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002424{
2425 struct uart_state *state;
2426
2427 if (!smux.tty || !smux.tty->driver_data) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002428 SMUX_ERR("%s: unable to find UART port for tty %p\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002429 __func__, smux.tty);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002430 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002431 return;
2432 }
2433 state = smux.tty->driver_data;
2434 msm_hs_request_clock_off(state->uart_port);
Eric Holmberg06011322012-07-06 18:17:03 -06002435}
2436
2437/**
2438 * Power down the UART.
2439 */
2440static void smux_uart_power_off(void)
2441{
2442 mutex_lock(&smux.mutex_lha0);
2443 smux_uart_power_off_atomic();
Eric Holmberg92a67df2012-06-25 13:56:24 -06002444 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002445}
2446
2447/**
2448 * TX Wakeup Worker
2449 *
2450 * @work Not used
2451 *
2452 * Do an exponential back-off wakeup sequence with a maximum period
2453 * of approximately 1 second (1 << 20 microseconds).
2454 */
2455static void smux_wakeup_worker(struct work_struct *work)
2456{
2457 unsigned long flags;
2458 unsigned wakeup_delay;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002459
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002460 if (smux.in_reset)
2461 return;
2462
2463 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2464 if (smux.power_state == SMUX_PWR_ON) {
2465 /* wakeup complete */
Eric Holmberga9b06472012-06-22 09:46:34 -06002466 smux.pwr_wakeup_delay_us = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002467 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302468 SMUX_DBG("smux: %s: wakeup complete\n", __func__);
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002469
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002470 /*
2471 * Cancel any pending retry. This avoids a race condition with
2472 * a new power-up request because:
2473 * 1) this worker doesn't modify the state
2474 * 2) this worker is processed on the same single-threaded
2475 * workqueue as new TX wakeup requests
2476 */
2477 cancel_delayed_work(&smux_wakeup_delayed_work);
Eric Holmbergd032f5b2012-06-29 19:02:00 -06002478 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberga9b06472012-06-22 09:46:34 -06002479 } else if (smux.power_state == SMUX_PWR_TURNING_ON) {
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002480 /* retry wakeup */
2481 wakeup_delay = smux.pwr_wakeup_delay_us;
2482 smux.pwr_wakeup_delay_us <<= 1;
2483 if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
2484 smux.pwr_wakeup_delay_us =
2485 SMUX_WAKEUP_DELAY_MAX;
2486
2487 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302488 SMUX_PWR("smux: %s: triggering wakeup\n", __func__);
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002489 smux_send_byte(SMUX_WAKEUP_REQ);
2490
2491 if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302492 SMUX_DBG("smux: %s: sleeping for %u us\n", __func__,
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002493 wakeup_delay);
2494 usleep_range(wakeup_delay, 2*wakeup_delay);
2495 queue_work(smux_tx_wq, &smux_wakeup_work);
2496 } else {
2497 /* schedule delayed work */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302498 SMUX_DBG(
2499 "smux: %s: scheduling delayed wakeup in %u ms\n",
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002500 __func__, wakeup_delay / 1000);
2501 queue_delayed_work(smux_tx_wq,
2502 &smux_wakeup_delayed_work,
2503 msecs_to_jiffies(wakeup_delay / 1000));
2504 }
Eric Holmberga9b06472012-06-22 09:46:34 -06002505 } else {
2506 /* wakeup aborted */
2507 smux.pwr_wakeup_delay_us = 1;
2508 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302509 SMUX_PWR("smux: %s: wakeup aborted\n", __func__);
Eric Holmberga9b06472012-06-22 09:46:34 -06002510 cancel_delayed_work(&smux_wakeup_delayed_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002511 }
2512}
2513
2514
2515/**
2516 * Inactivity timeout worker. Periodically scheduled when link is active.
2517 * When it detects inactivity, it will power-down the UART link.
2518 *
2519 * @work Work structure (not used)
2520 */
2521static void smux_inactivity_worker(struct work_struct *work)
2522{
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002523 struct smux_pkt_t *pkt;
2524 unsigned long flags;
2525
Eric Holmberg06011322012-07-06 18:17:03 -06002526 if (smux.in_reset)
2527 return;
2528
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002529 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2530 spin_lock(&smux.tx_lock_lha2);
2531
2532 if (!smux.tx_activity_flag && !smux.rx_activity_flag) {
2533 /* no activity */
2534 if (smux.powerdown_enabled) {
2535 if (smux.power_state == SMUX_PWR_ON) {
2536 /* start power-down sequence */
2537 pkt = smux_alloc_pkt();
2538 if (pkt) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302539 SMUX_PWR(
2540 "smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002541 smux.power_state,
Eric Holmberga9b06472012-06-22 09:46:34 -06002542 SMUX_PWR_TURNING_OFF_FLUSH);
2543 smux.power_state =
2544 SMUX_PWR_TURNING_OFF_FLUSH;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002545
2546 /* send power-down request */
2547 pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
2548 pkt->hdr.flags = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002549 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
2550 list_add_tail(&pkt->list,
2551 &smux.power_queue);
2552 queue_work(smux_tx_wq, &smux_tx_work);
2553 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002554 SMUX_ERR("%s: packet alloc failed\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002555 __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002556 }
2557 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002558 }
2559 }
2560 smux.tx_activity_flag = 0;
2561 smux.rx_activity_flag = 0;
2562
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002563 if (smux.power_state == SMUX_PWR_OFF_FLUSH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002564 /* ready to power-down the UART */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302565 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002566 smux.power_state, SMUX_PWR_OFF);
Eric Holmbergff0b0112012-06-08 15:06:57 -06002567 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002568
2569 /* if data is pending, schedule a new wakeup */
2570 if (!list_empty(&smux.lch_tx_ready_list) ||
2571 !list_empty(&smux.power_queue))
2572 queue_work(smux_tx_wq, &smux_tx_work);
2573
2574 spin_unlock(&smux.tx_lock_lha2);
2575 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2576
2577 /* flush UART output queue and power down */
2578 smux_flush_tty();
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002579 smux_uart_power_off();
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002580 } else {
2581 spin_unlock(&smux.tx_lock_lha2);
2582 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002583 }
2584
2585 /* reschedule inactivity worker */
2586 if (smux.power_state != SMUX_PWR_OFF)
2587 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
2588 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
2589}
2590
2591/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002592 * Remove RX retry packet from channel and free it.
2593 *
Eric Holmbergb8435c82012-06-05 14:51:29 -06002594 * @ch Channel for retry packet
2595 * @retry Retry packet to remove
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002596 *
2597 * @returns 1 if flow control updated; 0 otherwise
2598 *
2599 * Must be called with state_lock_lhb1 locked.
Eric Holmbergb8435c82012-06-05 14:51:29 -06002600 */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002601int smux_remove_rx_retry(struct smux_lch_t *ch,
Eric Holmbergb8435c82012-06-05 14:51:29 -06002602 struct smux_rx_pkt_retry *retry)
2603{
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002604 int tx_ready = 0;
2605
Eric Holmbergb8435c82012-06-05 14:51:29 -06002606 list_del(&retry->rx_retry_list);
2607 --ch->rx_retry_queue_cnt;
2608 smux_free_pkt(retry->pkt);
2609 kfree(retry);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002610
2611 if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
2612 (ch->rx_retry_queue_cnt <= SMUX_RX_WM_LOW) &&
2613 ch->rx_flow_control_auto) {
2614 ch->rx_flow_control_auto = 0;
2615 smux_rx_flow_control_updated(ch);
2616 schedule_notify(ch->lcid, SMUX_RX_RETRY_LOW_WM_HIT, NULL);
2617 tx_ready = 1;
2618 }
2619 return tx_ready;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002620}
2621
2622/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002623 * RX worker handles all receive operations.
2624 *
2625 * @work Work structure contained in TBD structure
2626 */
2627static void smux_rx_worker(struct work_struct *work)
2628{
2629 unsigned long flags;
2630 int used;
2631 int initial_rx_state;
2632 struct smux_rx_worker_data *w;
2633 const unsigned char *data;
2634 int len;
2635 int flag;
2636
2637 w = container_of(work, struct smux_rx_worker_data, work);
2638 data = w->data;
2639 len = w->len;
2640 flag = w->flag;
2641
2642 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2643 smux.rx_activity_flag = 1;
2644 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2645
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302646 SMUX_DBG("smux: %s: %p, len=%d, flag=%d\n", __func__, data, len, flag);
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002647 used = 0;
2648 do {
Eric Holmberg06011322012-07-06 18:17:03 -06002649 if (smux.in_reset) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302650 SMUX_DBG("smux: %s: abort RX due to reset\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06002651 smux.rx_state = SMUX_RX_IDLE;
2652 break;
2653 }
2654
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302655 SMUX_DBG("smux: %s: state %d; %d of %d\n",
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002656 __func__, smux.rx_state, used, len);
2657 initial_rx_state = smux.rx_state;
2658
2659 switch (smux.rx_state) {
2660 case SMUX_RX_IDLE:
2661 smux_rx_handle_idle(data, len, &used, flag);
2662 break;
2663 case SMUX_RX_MAGIC:
2664 smux_rx_handle_magic(data, len, &used, flag);
2665 break;
2666 case SMUX_RX_HDR:
2667 smux_rx_handle_hdr(data, len, &used, flag);
2668 break;
2669 case SMUX_RX_PAYLOAD:
2670 smux_rx_handle_pkt_payload(data, len, &used, flag);
2671 break;
2672 default:
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302673 SMUX_DBG("smux: %s: invalid state %d\n",
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002674 __func__, smux.rx_state);
2675 smux.rx_state = SMUX_RX_IDLE;
2676 break;
2677 }
2678 } while (used < len || smux.rx_state != initial_rx_state);
2679
2680 complete(&w->work_complete);
2681}
2682
2683/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002684 * RX Retry worker handles retrying get_rx_buffer calls that previously failed
2685 * because the client was not ready (-EAGAIN).
2686 *
2687 * @work Work structure contained in smux_lch_t structure
2688 */
2689static void smux_rx_retry_worker(struct work_struct *work)
2690{
2691 struct smux_lch_t *ch;
2692 struct smux_rx_pkt_retry *retry;
2693 union notifier_metadata metadata;
2694 int tmp;
2695 unsigned long flags;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002696 int immediate_retry = 0;
2697 int tx_ready = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002698
2699 ch = container_of(work, struct smux_lch_t, rx_retry_work.work);
2700
2701 /* get next retry packet */
2702 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg06011322012-07-06 18:17:03 -06002703 if ((ch->local_state != SMUX_LCH_LOCAL_OPENED) || smux.in_reset) {
Eric Holmbergb8435c82012-06-05 14:51:29 -06002704 /* port has been closed - remove all retries */
2705 while (!list_empty(&ch->rx_retry_queue)) {
2706 retry = list_first_entry(&ch->rx_retry_queue,
2707 struct smux_rx_pkt_retry,
2708 rx_retry_list);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002709 (void)smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002710 }
2711 }
2712
2713 if (list_empty(&ch->rx_retry_queue)) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302714 SMUX_DBG("smux: %s: retry list empty for channel %d\n",
Eric Holmbergb8435c82012-06-05 14:51:29 -06002715 __func__, ch->lcid);
2716 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2717 return;
2718 }
2719 retry = list_first_entry(&ch->rx_retry_queue,
2720 struct smux_rx_pkt_retry,
2721 rx_retry_list);
2722 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2723
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302724 SMUX_DBG("smux: %s: ch %d retrying rx pkt %p\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002725 __func__, ch->lcid, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002726 metadata.read.pkt_priv = 0;
2727 metadata.read.buffer = 0;
2728 tmp = ch->get_rx_buffer(ch->priv,
2729 (void **)&metadata.read.pkt_priv,
2730 (void **)&metadata.read.buffer,
2731 retry->pkt->hdr.payload_len);
2732 if (tmp == 0 && metadata.read.buffer) {
2733 /* have valid RX buffer */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002734
Eric Holmbergb8435c82012-06-05 14:51:29 -06002735 memcpy(metadata.read.buffer, retry->pkt->payload,
2736 retry->pkt->hdr.payload_len);
2737 metadata.read.len = retry->pkt->hdr.payload_len;
2738
2739 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002740 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002741 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002742 schedule_notify(ch->lcid, SMUX_READ_DONE, &metadata);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002743 if (tx_ready)
2744 list_channel(ch);
2745
2746 immediate_retry = 1;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002747 } else if (tmp == -EAGAIN ||
2748 (tmp == 0 && !metadata.read.buffer)) {
2749 /* retry again */
2750 retry->timeout_in_ms <<= 1;
2751 if (retry->timeout_in_ms > SMUX_RX_RETRY_MAX_MS) {
2752 /* timed out */
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002753 SMUX_ERR("%s: ch %d RX retry client timeout\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002754 __func__, ch->lcid);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002755 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002756 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002757 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002758 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
2759 if (tx_ready)
2760 list_channel(ch);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002761 }
2762 } else {
2763 /* client error - drop packet */
Eric Holmberg51f46cb2012-08-21 16:43:39 -06002764 SMUX_ERR("%s: ch %d RX retry client failed (%d)\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002765 __func__, ch->lcid, tmp);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002766 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002767 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002768 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002769 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002770 if (tx_ready)
2771 list_channel(ch);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002772 }
2773
2774 /* schedule next retry */
2775 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2776 if (!list_empty(&ch->rx_retry_queue)) {
2777 retry = list_first_entry(&ch->rx_retry_queue,
2778 struct smux_rx_pkt_retry,
2779 rx_retry_list);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002780
2781 if (immediate_retry)
2782 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
2783 else
2784 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
2785 msecs_to_jiffies(retry->timeout_in_ms));
Eric Holmbergb8435c82012-06-05 14:51:29 -06002786 }
2787 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2788}
2789
2790/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002791 * Transmit worker handles serializing and transmitting packets onto the
2792 * underlying transport.
2793 *
2794 * @work Work structure (not used)
2795 */
2796static void smux_tx_worker(struct work_struct *work)
2797{
2798 struct smux_pkt_t *pkt;
2799 struct smux_lch_t *ch;
2800 unsigned low_wm_notif;
2801 unsigned lcid;
2802 unsigned long flags;
2803
2804
2805 /*
2806 * Transmit packets in round-robin fashion based upon ready
2807 * channels.
2808 *
2809 * To eliminate the need to hold a lock for the entire
2810 * iteration through the channel ready list, the head of the
2811 * ready-channel list is always the next channel to be
2812 * processed. To send a packet, the first valid packet in
2813 * the head channel is removed and the head channel is then
2814 * rescheduled at the end of the queue by removing it and
2815 * inserting after the tail. The locks can then be released
2816 * while the packet is processed.
2817 */
Eric Holmberged1f00c2012-06-07 09:45:18 -06002818 while (!smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002819 pkt = NULL;
2820 low_wm_notif = 0;
2821
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002822 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002823
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002824 /* handle wakeup if needed */
2825 if (smux.power_state == SMUX_PWR_OFF) {
2826 if (!list_empty(&smux.lch_tx_ready_list) ||
2827 !list_empty(&smux.power_queue)) {
2828 /* data to transmit, do wakeup */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302829 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002830 smux.power_state,
2831 SMUX_PWR_TURNING_ON);
Eric Holmberg371b4622013-05-21 18:04:50 -06002832 smux.local_initiated_wakeup_count++;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002833 smux.power_state = SMUX_PWR_TURNING_ON;
2834 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2835 flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002836 queue_work(smux_tx_wq, &smux_wakeup_work);
2837 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002838 /* no activity -- stay asleep */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002839 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2840 flags);
2841 }
2842 break;
2843 }
2844
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002845 /* process any pending power packets */
2846 if (!list_empty(&smux.power_queue)) {
2847 pkt = list_first_entry(&smux.power_queue,
2848 struct smux_pkt_t, list);
2849 list_del(&pkt->list);
2850 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2851
Eric Holmberga9b06472012-06-22 09:46:34 -06002852 /* Adjust power state if this is a flush command */
2853 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2854 if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH &&
2855 pkt->hdr.cmd == SMUX_CMD_PWR_CTL) {
2856 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK ||
2857 smux.power_ctl_remote_req_received) {
2858 /*
2859 * Sending remote power-down request ACK
2860 * or sending local power-down request
2861 * and we already received a remote
2862 * power-down request.
2863 */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302864 SMUX_PWR(
2865 "smux: %s: Power %d->%d\n", __func__,
Eric Holmberga9b06472012-06-22 09:46:34 -06002866 smux.power_state,
2867 SMUX_PWR_OFF_FLUSH);
2868 smux.power_state = SMUX_PWR_OFF_FLUSH;
2869 smux.power_ctl_remote_req_received = 0;
2870 queue_work(smux_tx_wq,
2871 &smux_inactivity_work);
2872 } else {
2873 /* sending local power-down request */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302874 SMUX_PWR(
2875 "smux: %s: Power %d->%d\n", __func__,
Eric Holmberga9b06472012-06-22 09:46:34 -06002876 smux.power_state,
2877 SMUX_PWR_TURNING_OFF);
2878 smux.power_state = SMUX_PWR_TURNING_OFF;
2879 }
2880 }
2881 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2882
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002883 /* send the packet */
Eric Holmberga9b06472012-06-22 09:46:34 -06002884 smux_uart_power_on();
2885 smux.tx_activity_flag = 1;
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06002886 SMUX_PWR_PKT_TX(pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002887 if (!smux_byte_loopback) {
2888 smux_tx_tty(pkt);
2889 smux_flush_tty();
2890 } else {
2891 smux_tx_loopback(pkt);
2892 }
2893
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002894 smux_free_pkt(pkt);
2895 continue;
2896 }
2897
2898 /* get the next ready channel */
2899 if (list_empty(&smux.lch_tx_ready_list)) {
2900 /* no ready channels */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302901 SMUX_DBG("smux: %s: no more ready channels, exiting\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002902 __func__);
2903 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2904 break;
2905 }
2906 smux.tx_activity_flag = 1;
2907
2908 if (smux.power_state != SMUX_PWR_ON) {
2909 /* channel not ready to transmit */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05302910 SMUX_DBG("smux: %s: waiting for link up (state %d)\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002911 __func__,
2912 smux.power_state);
2913 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2914 break;
2915 }
2916
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002917 /* get the next packet to send and rotate channel list */
2918 ch = list_first_entry(&smux.lch_tx_ready_list,
2919 struct smux_lch_t,
2920 tx_ready_list);
2921
2922 spin_lock(&ch->state_lock_lhb1);
2923 spin_lock(&ch->tx_lock_lhb2);
2924 if (!list_empty(&ch->tx_queue)) {
2925 /*
2926 * If remote TX flow control is enabled or
2927 * the channel is not fully opened, then only
2928 * send command packets.
2929 */
2930 if (ch->tx_flow_control || !IS_FULLY_OPENED(ch)) {
2931 struct smux_pkt_t *curr;
2932 list_for_each_entry(curr, &ch->tx_queue, list) {
2933 if (curr->hdr.cmd != SMUX_CMD_DATA) {
2934 pkt = curr;
2935 break;
2936 }
2937 }
2938 } else {
2939 /* get next cmd/data packet to send */
2940 pkt = list_first_entry(&ch->tx_queue,
2941 struct smux_pkt_t, list);
2942 }
2943 }
2944
2945 if (pkt) {
2946 list_del(&pkt->list);
2947
2948 /* update packet stats */
2949 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2950 --ch->tx_pending_data_cnt;
2951 if (ch->notify_lwm &&
2952 ch->tx_pending_data_cnt
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002953 <= SMUX_TX_WM_LOW) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002954 ch->notify_lwm = 0;
2955 low_wm_notif = 1;
2956 }
2957 }
2958
2959 /* advance to the next ready channel */
2960 list_rotate_left(&smux.lch_tx_ready_list);
2961 } else {
2962 /* no data in channel to send, remove from ready list */
2963 list_del(&ch->tx_ready_list);
2964 INIT_LIST_HEAD(&ch->tx_ready_list);
2965 }
2966 lcid = ch->lcid;
2967 spin_unlock(&ch->tx_lock_lhb2);
2968 spin_unlock(&ch->state_lock_lhb1);
2969 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2970
2971 if (low_wm_notif)
2972 schedule_notify(lcid, SMUX_LOW_WM_HIT, NULL);
2973
2974 /* send the packet */
2975 smux_tx_pkt(ch, pkt);
2976 smux_free_pkt(pkt);
2977 }
2978}
2979
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002980/**
2981 * Update the RX flow control (sent in the TIOCM Status command).
2982 *
2983 * @ch Channel for update
2984 *
2985 * @returns 1 for updated, 0 for not updated
2986 *
2987 * Must be called with ch->state_lock_lhb1 locked.
2988 */
2989static int smux_rx_flow_control_updated(struct smux_lch_t *ch)
2990{
2991 int updated = 0;
2992 int prev_state;
2993
2994 prev_state = ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL;
2995
2996 if (ch->rx_flow_control_client || ch->rx_flow_control_auto)
2997 ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
2998 else
2999 ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
3000
3001 if (prev_state != (ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL)) {
3002 smux_send_status_cmd(ch);
3003 updated = 1;
3004 }
3005
3006 return updated;
3007}
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003008
Eric Holmberg06011322012-07-06 18:17:03 -06003009/**
3010 * Flush all SMUX workqueues.
3011 *
3012 * This sets the reset bit to abort any processing loops and then
3013 * flushes the workqueues to ensure that no new pending work is
3014 * running. Do not call with any locks used by workers held as
3015 * this will result in a deadlock.
3016 */
3017static void smux_flush_workqueues(void)
3018{
3019 smux.in_reset = 1;
3020
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303021 SMUX_DBG("smux: %s: flushing tx wq\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06003022 flush_workqueue(smux_tx_wq);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303023 SMUX_DBG("smux: %s: flushing rx wq\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06003024 flush_workqueue(smux_rx_wq);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303025 SMUX_DBG("smux: %s: flushing notify wq\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06003026 flush_workqueue(smux_notify_wq);
3027}
3028
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003029/**********************************************************************/
3030/* Kernel API */
3031/**********************************************************************/
3032
3033/**
3034 * Set or clear channel option using the SMUX_CH_OPTION_* channel
3035 * flags.
3036 *
3037 * @lcid Logical channel ID
3038 * @set Options to set
3039 * @clear Options to clear
3040 *
3041 * @returns 0 for success, < 0 for failure
3042 */
3043int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear)
3044{
3045 unsigned long flags;
3046 struct smux_lch_t *ch;
3047 int tx_ready = 0;
3048 int ret = 0;
3049
3050 if (smux_assert_lch_id(lcid))
3051 return -ENXIO;
3052
3053 ch = &smux_lch[lcid];
3054 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3055
3056 /* Local loopback mode */
3057 if (set & SMUX_CH_OPTION_LOCAL_LOOPBACK)
3058 ch->local_mode = SMUX_LCH_MODE_LOCAL_LOOPBACK;
3059
3060 if (clear & SMUX_CH_OPTION_LOCAL_LOOPBACK)
3061 ch->local_mode = SMUX_LCH_MODE_NORMAL;
3062
3063 /* Remote loopback mode */
3064 if (set & SMUX_CH_OPTION_REMOTE_LOOPBACK)
3065 ch->local_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
3066
3067 if (clear & SMUX_CH_OPTION_REMOTE_LOOPBACK)
3068 ch->local_mode = SMUX_LCH_MODE_NORMAL;
3069
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003070 /* RX Flow control */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003071 if (set & SMUX_CH_OPTION_REMOTE_TX_STOP) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003072 ch->rx_flow_control_client = 1;
3073 tx_ready |= smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003074 }
3075
3076 if (clear & SMUX_CH_OPTION_REMOTE_TX_STOP) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003077 ch->rx_flow_control_client = 0;
3078 tx_ready |= smux_rx_flow_control_updated(ch);
3079 }
3080
3081 /* Auto RX Flow Control */
3082 if (set & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303083 SMUX_DBG("smux: %s: auto rx flow control option enabled\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003084 __func__);
3085 ch->options |= SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
3086 }
3087
3088 if (clear & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303089 SMUX_DBG("smux: %s: auto rx flow control option disabled\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003090 __func__);
3091 ch->options &= ~SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
3092 ch->rx_flow_control_auto = 0;
3093 tx_ready |= smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003094 }
3095
3096 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3097
3098 if (tx_ready)
3099 list_channel(ch);
3100
3101 return ret;
3102}
3103
3104/**
3105 * Starts the opening sequence for a logical channel.
3106 *
3107 * @lcid Logical channel ID
3108 * @priv Free for client usage
3109 * @notify Event notification function
3110 * @get_rx_buffer Function used to provide a receive buffer to SMUX
3111 *
3112 * @returns 0 for success, <0 otherwise
3113 *
Arun Kumar Neelakantam9fdf0172013-07-09 15:55:32 +05303114 * The local channel state must be closed (either not previously
3115 * opened or msm_smux_close() has been called and the SMUX_LOCAL_CLOSED
3116 * notification has been received).
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003117 *
Arun Kumar Neelakantam9fdf0172013-07-09 15:55:32 +05303118 * If open is called before the SMUX_LOCAL_CLOSED has been received,
3119 * then the function will return -EAGAIN and the client will need to
3120 * retry the open later.
3121 *
3122 * Once the remote side is opened, the client will receive a SMUX_CONNECTED
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003123 * event.
3124 */
3125int msm_smux_open(uint8_t lcid, void *priv,
3126 void (*notify)(void *priv, int event_type, const void *metadata),
3127 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
3128 int size))
3129{
3130 int ret;
3131 struct smux_lch_t *ch;
3132 struct smux_pkt_t *pkt;
3133 int tx_ready = 0;
3134 unsigned long flags;
3135
3136 if (smux_assert_lch_id(lcid))
3137 return -ENXIO;
3138
3139 ch = &smux_lch[lcid];
3140 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3141
3142 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
3143 ret = -EAGAIN;
3144 goto out;
3145 }
3146
3147 if (ch->local_state != SMUX_LCH_LOCAL_CLOSED) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003148 SMUX_ERR("%s: open lcid %d local state %x invalid\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003149 __func__, lcid, ch->local_state);
3150 ret = -EINVAL;
3151 goto out;
3152 }
3153
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303154 SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003155 ch->local_state,
3156 SMUX_LCH_LOCAL_OPENING);
3157
Eric Holmberg06011322012-07-06 18:17:03 -06003158 ch->rx_flow_control_auto = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003159 ch->local_state = SMUX_LCH_LOCAL_OPENING;
3160
3161 ch->priv = priv;
3162 ch->notify = notify;
3163 ch->get_rx_buffer = get_rx_buffer;
3164 ret = 0;
3165
3166 /* Send Open Command */
3167 pkt = smux_alloc_pkt();
3168 if (!pkt) {
3169 ret = -ENOMEM;
3170 goto out;
3171 }
3172 pkt->hdr.magic = SMUX_MAGIC;
3173 pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
3174 pkt->hdr.flags = SMUX_CMD_OPEN_POWER_COLLAPSE;
3175 if (ch->local_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK)
3176 pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
3177 pkt->hdr.lcid = lcid;
3178 pkt->hdr.payload_len = 0;
3179 pkt->hdr.pad_len = 0;
3180 smux_tx_queue(pkt, ch, 0);
3181 tx_ready = 1;
3182
3183out:
3184 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg06011322012-07-06 18:17:03 -06003185 smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003186 if (tx_ready)
3187 list_channel(ch);
3188 return ret;
3189}
3190
3191/**
3192 * Starts the closing sequence for a logical channel.
3193 *
3194 * @lcid Logical channel ID
3195 *
3196 * @returns 0 for success, <0 otherwise
3197 *
3198 * Once the close event has been acknowledge by the remote side, the client
Arun Kumar Neelakantam9fdf0172013-07-09 15:55:32 +05303199 * will receive an SMUX_LOCAL_CLOSED notification. If the remote side is also
3200 * closed, then an SMUX_DISCONNECTED notification will also be sent.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003201 */
3202int msm_smux_close(uint8_t lcid)
3203{
3204 int ret = 0;
3205 struct smux_lch_t *ch;
3206 struct smux_pkt_t *pkt;
3207 int tx_ready = 0;
3208 unsigned long flags;
3209
3210 if (smux_assert_lch_id(lcid))
3211 return -ENXIO;
3212
3213 ch = &smux_lch[lcid];
3214 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3215 ch->local_tiocm = 0x0;
3216 ch->remote_tiocm = 0x0;
3217 ch->tx_pending_data_cnt = 0;
3218 ch->notify_lwm = 0;
Eric Holmbergeee5d5a2012-08-13 14:45:27 -06003219 ch->tx_flow_control = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003220
3221 /* Purge TX queue */
3222 spin_lock(&ch->tx_lock_lhb2);
Eric Holmberg0e914082012-07-11 11:46:28 -06003223 smux_purge_ch_tx_queue(ch, 0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003224 spin_unlock(&ch->tx_lock_lhb2);
3225
3226 /* Send Close Command */
3227 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
3228 ch->local_state == SMUX_LCH_LOCAL_OPENING) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303229 SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003230 ch->local_state,
3231 SMUX_LCH_LOCAL_CLOSING);
3232
3233 ch->local_state = SMUX_LCH_LOCAL_CLOSING;
3234 pkt = smux_alloc_pkt();
3235 if (pkt) {
3236 pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
3237 pkt->hdr.flags = 0;
3238 pkt->hdr.lcid = lcid;
3239 pkt->hdr.payload_len = 0;
3240 pkt->hdr.pad_len = 0;
3241 smux_tx_queue(pkt, ch, 0);
3242 tx_ready = 1;
3243 } else {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003244 SMUX_ERR("%s: pkt allocation failed\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003245 ret = -ENOMEM;
3246 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06003247
3248 /* Purge RX retry queue */
3249 if (ch->rx_retry_queue_cnt)
3250 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003251 }
3252 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3253
3254 if (tx_ready)
3255 list_channel(ch);
3256
3257 return ret;
3258}
3259
3260/**
3261 * Write data to a logical channel.
3262 *
3263 * @lcid Logical channel ID
3264 * @pkt_priv Client data that will be returned with the SMUX_WRITE_DONE or
3265 * SMUX_WRITE_FAIL notification.
3266 * @data Data to write
3267 * @len Length of @data
3268 *
3269 * @returns 0 for success, <0 otherwise
3270 *
3271 * Data may be written immediately after msm_smux_open() is called,
3272 * but the data will wait in the transmit queue until the channel has
3273 * been fully opened.
3274 *
3275 * Once the data has been written, the client will receive either a completion
3276 * (SMUX_WRITE_DONE) or a failure notice (SMUX_WRITE_FAIL).
3277 */
3278int msm_smux_write(uint8_t lcid, void *pkt_priv, const void *data, int len)
3279{
3280 struct smux_lch_t *ch;
3281 struct smux_pkt_t *pkt;
3282 int tx_ready = 0;
3283 unsigned long flags;
3284 int ret;
3285
3286 if (smux_assert_lch_id(lcid))
3287 return -ENXIO;
3288
3289 ch = &smux_lch[lcid];
3290 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3291
3292 if (ch->local_state != SMUX_LCH_LOCAL_OPENED &&
3293 ch->local_state != SMUX_LCH_LOCAL_OPENING) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003294 SMUX_ERR("%s: hdr.invalid local state %d channel %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003295 __func__, ch->local_state, lcid);
3296 ret = -EINVAL;
3297 goto out;
3298 }
3299
3300 if (len > SMUX_MAX_PKT_SIZE - sizeof(struct smux_hdr_t)) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003301 SMUX_ERR("%s: payload %d too large\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003302 __func__, len);
3303 ret = -E2BIG;
3304 goto out;
3305 }
3306
3307 pkt = smux_alloc_pkt();
3308 if (!pkt) {
3309 ret = -ENOMEM;
3310 goto out;
3311 }
3312
3313 pkt->hdr.cmd = SMUX_CMD_DATA;
3314 pkt->hdr.lcid = lcid;
3315 pkt->hdr.flags = 0;
3316 pkt->hdr.payload_len = len;
3317 pkt->payload = (void *)data;
3318 pkt->priv = pkt_priv;
3319 pkt->hdr.pad_len = 0;
3320
3321 spin_lock(&ch->tx_lock_lhb2);
3322 /* verify high watermark */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303323 SMUX_DBG("smux: %s: pending %d", __func__, ch->tx_pending_data_cnt);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003324
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003325 if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003326 SMUX_ERR("%s: ch %d high watermark %d exceeded %d\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003327 __func__, lcid, SMUX_TX_WM_HIGH,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003328 ch->tx_pending_data_cnt);
3329 ret = -EAGAIN;
3330 goto out_inner;
3331 }
3332
3333 /* queue packet for transmit */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003334 if (++ch->tx_pending_data_cnt == SMUX_TX_WM_HIGH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003335 ch->notify_lwm = 1;
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003336 SMUX_ERR("%s: high watermark hit\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003337 schedule_notify(lcid, SMUX_HIGH_WM_HIT, NULL);
3338 }
3339 list_add_tail(&pkt->list, &ch->tx_queue);
3340
3341 /* add to ready list */
3342 if (IS_FULLY_OPENED(ch))
3343 tx_ready = 1;
3344
3345 ret = 0;
3346
3347out_inner:
3348 spin_unlock(&ch->tx_lock_lhb2);
3349
3350out:
3351 if (ret)
3352 smux_free_pkt(pkt);
3353 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3354
3355 if (tx_ready)
3356 list_channel(ch);
3357
3358 return ret;
3359}
3360
3361/**
3362 * Returns true if the TX queue is currently full (high water mark).
3363 *
3364 * @lcid Logical channel ID
3365 * @returns 0 if channel is not full
3366 * 1 if it is full
3367 * < 0 for error
3368 */
3369int msm_smux_is_ch_full(uint8_t lcid)
3370{
3371 struct smux_lch_t *ch;
3372 unsigned long flags;
3373 int is_full = 0;
3374
3375 if (smux_assert_lch_id(lcid))
3376 return -ENXIO;
3377
3378 ch = &smux_lch[lcid];
3379
3380 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003381 if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003382 is_full = 1;
3383 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
3384
3385 return is_full;
3386}
3387
3388/**
3389 * Returns true if the TX queue has space for more packets it is at or
3390 * below the low water mark).
3391 *
3392 * @lcid Logical channel ID
3393 * @returns 0 if channel is above low watermark
3394 * 1 if it's at or below the low watermark
3395 * < 0 for error
3396 */
3397int msm_smux_is_ch_low(uint8_t lcid)
3398{
3399 struct smux_lch_t *ch;
3400 unsigned long flags;
3401 int is_low = 0;
3402
3403 if (smux_assert_lch_id(lcid))
3404 return -ENXIO;
3405
3406 ch = &smux_lch[lcid];
3407
3408 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003409 if (ch->tx_pending_data_cnt <= SMUX_TX_WM_LOW)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003410 is_low = 1;
3411 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
3412
3413 return is_low;
3414}
3415
3416/**
3417 * Send TIOCM status update.
3418 *
3419 * @ch Channel for update
3420 *
3421 * @returns 0 for success, <0 for failure
3422 *
3423 * Channel lock must be held before calling.
3424 */
3425static int smux_send_status_cmd(struct smux_lch_t *ch)
3426{
3427 struct smux_pkt_t *pkt;
3428
3429 if (!ch)
3430 return -EINVAL;
3431
3432 pkt = smux_alloc_pkt();
3433 if (!pkt)
3434 return -ENOMEM;
3435
3436 pkt->hdr.lcid = ch->lcid;
3437 pkt->hdr.cmd = SMUX_CMD_STATUS;
3438 pkt->hdr.flags = ch->local_tiocm;
3439 pkt->hdr.payload_len = 0;
3440 pkt->hdr.pad_len = 0;
3441 smux_tx_queue(pkt, ch, 0);
3442
3443 return 0;
3444}
3445
3446/**
3447 * Internal helper function for getting the TIOCM status with
3448 * state_lock_lhb1 already locked.
3449 *
3450 * @ch Channel pointer
3451 *
3452 * @returns TIOCM status
3453 */
Eric Holmberg9d890672012-06-13 17:58:13 -06003454long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003455{
3456 long status = 0x0;
3457
3458 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DSR : 0;
3459 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_CTS : 0;
3460 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RI) ? TIOCM_RI : 0;
3461 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_DCD) ? TIOCM_CD : 0;
3462
3463 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DTR : 0;
3464 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_RTS : 0;
3465
3466 return status;
3467}
3468
3469/**
3470 * Get the TIOCM status bits.
3471 *
3472 * @lcid Logical channel ID
3473 *
3474 * @returns >= 0 TIOCM status bits
3475 * < 0 Error condition
3476 */
3477long msm_smux_tiocm_get(uint8_t lcid)
3478{
3479 struct smux_lch_t *ch;
3480 unsigned long flags;
3481 long status = 0x0;
3482
3483 if (smux_assert_lch_id(lcid))
3484 return -ENXIO;
3485
3486 ch = &smux_lch[lcid];
3487 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3488 status = msm_smux_tiocm_get_atomic(ch);
3489 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3490
3491 return status;
3492}
3493
3494/**
3495 * Set/clear the TIOCM status bits.
3496 *
3497 * @lcid Logical channel ID
3498 * @set Bits to set
3499 * @clear Bits to clear
3500 *
3501 * @returns 0 for success; < 0 for failure
3502 *
3503 * If a bit is specified in both the @set and @clear masks, then the clear bit
3504 * definition will dominate and the bit will be cleared.
3505 */
3506int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear)
3507{
3508 struct smux_lch_t *ch;
3509 unsigned long flags;
3510 uint8_t old_status;
3511 uint8_t status_set = 0x0;
3512 uint8_t status_clear = 0x0;
3513 int tx_ready = 0;
3514 int ret = 0;
3515
3516 if (smux_assert_lch_id(lcid))
3517 return -ENXIO;
3518
3519 ch = &smux_lch[lcid];
3520 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3521
3522 status_set |= (set & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3523 status_set |= (set & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3524 status_set |= (set & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3525 status_set |= (set & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3526
3527 status_clear |= (clear & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3528 status_clear |= (clear & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3529 status_clear |= (clear & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3530 status_clear |= (clear & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3531
3532 old_status = ch->local_tiocm;
3533 ch->local_tiocm |= status_set;
3534 ch->local_tiocm &= ~status_clear;
3535
3536 if (ch->local_tiocm != old_status) {
3537 ret = smux_send_status_cmd(ch);
3538 tx_ready = 1;
3539 }
3540 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3541
3542 if (tx_ready)
3543 list_channel(ch);
3544
3545 return ret;
3546}
3547
3548/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003549/* Subsystem Restart */
3550/**********************************************************************/
3551static struct notifier_block ssr_notifier = {
3552 .notifier_call = ssr_notifier_cb,
3553};
3554
3555/**
3556 * Handle Subsystem Restart (SSR) notifications.
3557 *
3558 * @this Pointer to ssr_notifier
3559 * @code SSR Code
3560 * @data Data pointer (not used)
3561 */
3562static int ssr_notifier_cb(struct notifier_block *this,
3563 unsigned long code,
3564 void *data)
3565{
3566 unsigned long flags;
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003567 int i;
3568 int tmp;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003569 int power_off_uart = 0;
3570
Eric Holmbergd2697902012-06-15 09:58:46 -06003571 if (code == SUBSYS_BEFORE_SHUTDOWN) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303572 SMUX_DBG("smux: %s: ssr - before shutdown\n", __func__);
Eric Holmbergd2697902012-06-15 09:58:46 -06003573 mutex_lock(&smux.mutex_lha0);
3574 smux.in_reset = 1;
Eric Holmberg0a81e8f2012-08-28 13:51:14 -06003575 smux.remote_is_alive = 0;
Eric Holmbergd2697902012-06-15 09:58:46 -06003576 mutex_unlock(&smux.mutex_lha0);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003577 return NOTIFY_DONE;
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003578 } else if (code == SUBSYS_AFTER_POWERUP) {
3579 /* re-register platform devices */
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303580 SMUX_DBG("smux: %s: ssr - after power-up\n", __func__);
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003581 mutex_lock(&smux.mutex_lha0);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003582 if (smux.ld_open_count > 0
3583 && !smux.platform_devs_registered) {
3584 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303585 SMUX_DBG("smux: %s: register pdev '%s'\n",
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003586 __func__, smux_devs[i].name);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003587 smux_devs[i].dev.release = smux_pdev_release;
3588 tmp = platform_device_register(&smux_devs[i]);
3589 if (tmp)
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003590 SMUX_ERR(
3591 "%s: error %d registering device %s\n",
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003592 __func__, tmp, smux_devs[i].name);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003593 }
3594 smux.platform_devs_registered = 1;
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003595 }
3596 mutex_unlock(&smux.mutex_lha0);
3597 return NOTIFY_DONE;
Eric Holmbergd2697902012-06-15 09:58:46 -06003598 } else if (code != SUBSYS_AFTER_SHUTDOWN) {
3599 return NOTIFY_DONE;
3600 }
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303601 SMUX_DBG("smux: %s: ssr - after shutdown\n", __func__);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003602
3603 /* Cleanup channels */
Eric Holmberg06011322012-07-06 18:17:03 -06003604 smux_flush_workqueues();
Eric Holmbergd2697902012-06-15 09:58:46 -06003605 mutex_lock(&smux.mutex_lha0);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003606 if (smux.ld_open_count > 0) {
3607 smux_lch_purge();
3608 if (smux.tty)
3609 tty_driver_flush_buffer(smux.tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003610
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003611 /* Unregister platform devices */
3612 if (smux.platform_devs_registered) {
3613 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303614 SMUX_DBG("smux: %s: unregister pdev '%s'\n",
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003615 __func__, smux_devs[i].name);
3616 platform_device_unregister(&smux_devs[i]);
3617 }
3618 smux.platform_devs_registered = 0;
3619 }
3620
3621 /* Power-down UART */
3622 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
3623 if (smux.power_state != SMUX_PWR_OFF) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303624 SMUX_PWR("smux: %s: SSR - turning off UART\n",
3625 __func__);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003626 smux.power_state = SMUX_PWR_OFF;
3627 power_off_uart = 1;
3628 }
3629 smux.powerdown_enabled = 0;
3630 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3631
3632 if (power_off_uart)
3633 smux_uart_power_off_atomic();
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003634 }
Eric Holmberg06011322012-07-06 18:17:03 -06003635 smux.tx_activity_flag = 0;
3636 smux.rx_activity_flag = 0;
3637 smux.rx_state = SMUX_RX_IDLE;
Eric Holmbergd2697902012-06-15 09:58:46 -06003638 smux.in_reset = 0;
Eric Holmberg0a81e8f2012-08-28 13:51:14 -06003639 smux.remote_is_alive = 0;
Eric Holmbergd2697902012-06-15 09:58:46 -06003640 mutex_unlock(&smux.mutex_lha0);
3641
Eric Holmberged1f00c2012-06-07 09:45:18 -06003642 return NOTIFY_DONE;
3643}
3644
3645/**********************************************************************/
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003646/* Line Discipline Interface */
3647/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003648static void smux_pdev_release(struct device *dev)
3649{
3650 struct platform_device *pdev;
3651
3652 pdev = container_of(dev, struct platform_device, dev);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303653 SMUX_DBG("smux: %s: releasing pdev %p '%s'\n",
3654 __func__, pdev, pdev->name);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003655 memset(&pdev->dev, 0x0, sizeof(pdev->dev));
3656}
3657
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003658static int smuxld_open(struct tty_struct *tty)
3659{
3660 int i;
3661 int tmp;
3662 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003663
3664 if (!smux.is_initialized)
3665 return -ENODEV;
3666
Eric Holmberged1f00c2012-06-07 09:45:18 -06003667 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003668 if (smux.ld_open_count) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003669 SMUX_ERR("%s: %p multiple instances not supported\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003670 __func__, tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003671 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003672 return -EEXIST;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003673 }
3674
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003675 if (tty->ops->write == NULL) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003676 SMUX_ERR("%s: tty->ops->write already NULL\n", __func__);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003677 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003678 return -EINVAL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003679 }
3680
3681 /* connect to TTY */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003682 ++smux.ld_open_count;
3683 smux.in_reset = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003684 smux.tty = tty;
3685 tty->disc_data = &smux;
3686 tty->receive_room = TTY_RECEIVE_ROOM;
3687 tty_driver_flush_buffer(tty);
3688
3689 /* power-down the UART if we are idle */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003690 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003691 if (smux.power_state == SMUX_PWR_OFF) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303692 SMUX_PWR("smux: %s: powering off uart\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003693 smux.power_state = SMUX_PWR_OFF_FLUSH;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003694 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003695 queue_work(smux_tx_wq, &smux_inactivity_work);
3696 } else {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003697 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003698 }
3699
3700 /* register platform devices */
3701 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303702 SMUX_DBG("smux: %s: register pdev '%s'\n",
Eric Holmberged1f00c2012-06-07 09:45:18 -06003703 __func__, smux_devs[i].name);
3704 smux_devs[i].dev.release = smux_pdev_release;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003705 tmp = platform_device_register(&smux_devs[i]);
3706 if (tmp)
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003707 SMUX_ERR("%s: error %d registering device %s\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003708 __func__, tmp, smux_devs[i].name);
3709 }
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003710 smux.platform_devs_registered = 1;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003711 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003712 return 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003713}
3714
3715static void smuxld_close(struct tty_struct *tty)
3716{
3717 unsigned long flags;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003718 int power_up_uart = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003719 int i;
3720
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303721 SMUX_DBG("smux: %s: ldisc unload\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06003722 smux_flush_workqueues();
3723
Eric Holmberged1f00c2012-06-07 09:45:18 -06003724 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003725 if (smux.ld_open_count <= 0) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003726 SMUX_ERR("%s: invalid ld count %d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003727 smux.ld_open_count);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003728 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003729 return;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003730 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003731 --smux.ld_open_count;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003732
3733 /* Cleanup channels */
3734 smux_lch_purge();
3735
3736 /* Unregister platform devices */
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003737 if (smux.platform_devs_registered) {
3738 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303739 SMUX_DBG("smux: %s: unregister pdev '%s'\n",
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003740 __func__, smux_devs[i].name);
3741 platform_device_unregister(&smux_devs[i]);
3742 }
3743 smux.platform_devs_registered = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003744 }
3745
3746 /* Schedule UART power-up if it's down */
3747 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003748 if (smux.power_state == SMUX_PWR_OFF)
Eric Holmberged1f00c2012-06-07 09:45:18 -06003749 power_up_uart = 1;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003750 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergd2697902012-06-15 09:58:46 -06003751 smux.powerdown_enabled = 0;
Eric Holmberg06011322012-07-06 18:17:03 -06003752 smux.tx_activity_flag = 0;
3753 smux.rx_activity_flag = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003754 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3755
3756 if (power_up_uart)
Eric Holmberg92a67df2012-06-25 13:56:24 -06003757 smux_uart_power_on_atomic();
Eric Holmberged1f00c2012-06-07 09:45:18 -06003758
Eric Holmberg06011322012-07-06 18:17:03 -06003759 smux.rx_state = SMUX_RX_IDLE;
3760
Eric Holmberged1f00c2012-06-07 09:45:18 -06003761 /* Disconnect from TTY */
3762 smux.tty = NULL;
Eric Holmberg0a81e8f2012-08-28 13:51:14 -06003763 smux.remote_is_alive = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003764 mutex_unlock(&smux.mutex_lha0);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303765 SMUX_DBG("smux: %s: ldisc complete\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003766}
3767
3768/**
3769 * Receive data from TTY Line Discipline.
3770 *
3771 * @tty TTY structure
3772 * @cp Character data
3773 * @fp Flag data
3774 * @count Size of character and flag data
3775 */
3776void smuxld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
3777 char *fp, int count)
3778{
3779 int i;
3780 int last_idx = 0;
3781 const char *tty_name = NULL;
3782 char *f;
3783
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003784 /* verify error flags */
3785 for (i = 0, f = fp; i < count; ++i, ++f) {
3786 if (*f != TTY_NORMAL) {
3787 if (tty)
3788 tty_name = tty->name;
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003789 SMUX_ERR("%s: TTY %s Error %d (%s)\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003790 tty_name, *f, tty_flag_to_str(*f));
3791
3792 /* feed all previous valid data to the parser */
3793 smux_rx_state_machine(cp + last_idx, i - last_idx,
3794 TTY_NORMAL);
3795
3796 /* feed bad data to parser */
3797 smux_rx_state_machine(cp + i, 1, *f);
3798 last_idx = i + 1;
3799 }
3800 }
3801
3802 /* feed data to RX state machine */
3803 smux_rx_state_machine(cp + last_idx, count - last_idx, TTY_NORMAL);
3804}
3805
3806static void smuxld_flush_buffer(struct tty_struct *tty)
3807{
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003808 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003809}
3810
3811static ssize_t smuxld_chars_in_buffer(struct tty_struct *tty)
3812{
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003813 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003814 return -ENODEV;
3815}
3816
3817static ssize_t smuxld_read(struct tty_struct *tty, struct file *file,
3818 unsigned char __user *buf, size_t nr)
3819{
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003820 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003821 return -ENODEV;
3822}
3823
3824static ssize_t smuxld_write(struct tty_struct *tty, struct file *file,
3825 const unsigned char *buf, size_t nr)
3826{
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003827 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003828 return -ENODEV;
3829}
3830
3831static int smuxld_ioctl(struct tty_struct *tty, struct file *file,
3832 unsigned int cmd, unsigned long arg)
3833{
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003834 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003835 return -ENODEV;
3836}
3837
3838static unsigned int smuxld_poll(struct tty_struct *tty, struct file *file,
3839 struct poll_table_struct *tbl)
3840{
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003841 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003842 return -ENODEV;
3843}
3844
3845static void smuxld_write_wakeup(struct tty_struct *tty)
3846{
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003847 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003848}
3849
3850static struct tty_ldisc_ops smux_ldisc_ops = {
3851 .owner = THIS_MODULE,
3852 .magic = TTY_LDISC_MAGIC,
3853 .name = "n_smux",
3854 .open = smuxld_open,
3855 .close = smuxld_close,
3856 .flush_buffer = smuxld_flush_buffer,
3857 .chars_in_buffer = smuxld_chars_in_buffer,
3858 .read = smuxld_read,
3859 .write = smuxld_write,
3860 .ioctl = smuxld_ioctl,
3861 .poll = smuxld_poll,
3862 .receive_buf = smuxld_receive_buf,
3863 .write_wakeup = smuxld_write_wakeup
3864};
3865
3866static int __init smux_init(void)
3867{
3868 int ret;
3869
Eric Holmberged1f00c2012-06-07 09:45:18 -06003870 mutex_init(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003871
3872 spin_lock_init(&smux.rx_lock_lha1);
3873 smux.rx_state = SMUX_RX_IDLE;
3874 smux.power_state = SMUX_PWR_OFF;
3875 smux.pwr_wakeup_delay_us = 1;
3876 smux.powerdown_enabled = 0;
Eric Holmberga9b06472012-06-22 09:46:34 -06003877 smux.power_ctl_remote_req_received = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003878 INIT_LIST_HEAD(&smux.power_queue);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003879 smux.rx_activity_flag = 0;
3880 smux.tx_activity_flag = 0;
3881 smux.recv_len = 0;
3882 smux.tty = NULL;
3883 smux.ld_open_count = 0;
3884 smux.in_reset = 0;
Eric Holmberg0a81e8f2012-08-28 13:51:14 -06003885 smux.remote_is_alive = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003886 smux.is_initialized = 1;
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003887 smux.platform_devs_registered = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003888 smux_byte_loopback = 0;
3889
3890 spin_lock_init(&smux.tx_lock_lha2);
3891 INIT_LIST_HEAD(&smux.lch_tx_ready_list);
3892
3893 ret = tty_register_ldisc(N_SMUX, &smux_ldisc_ops);
3894 if (ret != 0) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003895 SMUX_ERR("%s: error %d registering line discipline\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003896 __func__, ret);
3897 return ret;
3898 }
3899
Eric Holmberg6c9f2a52012-06-14 10:49:04 -06003900 subsys_notif_register_notifier("external_modem", &ssr_notifier);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003901
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003902 ret = lch_init();
3903 if (ret != 0) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003904 SMUX_ERR("%s: lch_init failed\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003905 return ret;
3906 }
3907
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303908 log_ctx = ipc_log_context_create(1, "smux");
3909 if (!log_ctx) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003910 SMUX_ERR("%s: unable to create log context\n", __func__);
Angshuman Sarkarc2df7392012-07-24 14:50:42 +05303911 disable_ipc_logging = 1;
3912 }
3913
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003914 return 0;
3915}
3916
3917static void __exit smux_exit(void)
3918{
3919 int ret;
3920
3921 ret = tty_unregister_ldisc(N_SMUX);
3922 if (ret != 0) {
Eric Holmberg51f46cb2012-08-21 16:43:39 -06003923 SMUX_ERR("%s error %d unregistering line discipline\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003924 __func__, ret);
3925 return;
3926 }
3927}
3928
3929module_init(smux_init);
3930module_exit(smux_exit);
3931
3932MODULE_DESCRIPTION("Serial Mux TTY Line Discipline");
3933MODULE_LICENSE("GPL v2");
3934MODULE_ALIAS_LDISC(N_SMUX);