blob: d03b4b4ea6a70ab0cceb23cb2bc1ceb64916f617 [file] [log] [blame]
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001/* drivers/tty/n_smux.c
2 *
3 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/errno.h>
18#include <linux/tty.h>
19#include <linux/tty_flip.h>
20#include <linux/tty_driver.h>
21#include <linux/smux.h>
22#include <linux/list.h>
23#include <linux/kfifo.h>
24#include <linux/slab.h>
25#include <linux/types.h>
26#include <linux/platform_device.h>
27#include <linux/delay.h>
Eric Holmberged1f00c2012-06-07 09:45:18 -060028#include <mach/subsystem_notif.h>
29#include <mach/subsystem_restart.h>
Eric Holmberg8ed30f22012-05-10 19:16:51 -060030#include <mach/msm_serial_hs.h>
31#include "smux_private.h"
32#include "smux_loopback.h"
33
34#define SMUX_NOTIFY_FIFO_SIZE 128
35#define SMUX_TX_QUEUE_SIZE 256
Eric Holmberg8ed30f22012-05-10 19:16:51 -060036#define SMUX_WM_LOW 2
37#define SMUX_WM_HIGH 4
38#define SMUX_PKT_LOG_SIZE 80
39
40/* Maximum size we can accept in a single RX buffer */
41#define TTY_RECEIVE_ROOM 65536
42#define TTY_BUFFER_FULL_WAIT_MS 50
43
44/* maximum sleep time between wakeup attempts */
45#define SMUX_WAKEUP_DELAY_MAX (1 << 20)
46
47/* minimum delay for scheduling delayed work */
48#define SMUX_WAKEUP_DELAY_MIN (1 << 15)
49
50/* inactivity timeout for no rx/tx activity */
51#define SMUX_INACTIVITY_TIMEOUT_MS 1000
52
Eric Holmbergb8435c82012-06-05 14:51:29 -060053/* RX get_rx_buffer retry timeout values */
54#define SMUX_RX_RETRY_MIN_MS (1 << 0) /* 1 ms */
55#define SMUX_RX_RETRY_MAX_MS (1 << 10) /* 1024 ms */
56
Eric Holmberg8ed30f22012-05-10 19:16:51 -060057enum {
58 MSM_SMUX_DEBUG = 1U << 0,
59 MSM_SMUX_INFO = 1U << 1,
60 MSM_SMUX_POWER_INFO = 1U << 2,
61 MSM_SMUX_PKT = 1U << 3,
62};
63
64static int smux_debug_mask;
65module_param_named(debug_mask, smux_debug_mask,
66 int, S_IRUGO | S_IWUSR | S_IWGRP);
67
68/* Simulated wakeup used for testing */
69int smux_byte_loopback;
70module_param_named(byte_loopback, smux_byte_loopback,
71 int, S_IRUGO | S_IWUSR | S_IWGRP);
72int smux_simulate_wakeup_delay = 1;
73module_param_named(simulate_wakeup_delay, smux_simulate_wakeup_delay,
74 int, S_IRUGO | S_IWUSR | S_IWGRP);
75
76#define SMUX_DBG(x...) do { \
77 if (smux_debug_mask & MSM_SMUX_DEBUG) \
78 pr_info(x); \
79} while (0)
80
Eric Holmbergff0b0112012-06-08 15:06:57 -060081#define SMUX_PWR(x...) do { \
82 if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
83 pr_info(x); \
84} while (0)
85
Eric Holmberg8ed30f22012-05-10 19:16:51 -060086#define SMUX_LOG_PKT_RX(pkt) do { \
87 if (smux_debug_mask & MSM_SMUX_PKT) \
88 smux_log_pkt(pkt, 1); \
89} while (0)
90
91#define SMUX_LOG_PKT_TX(pkt) do { \
92 if (smux_debug_mask & MSM_SMUX_PKT) \
93 smux_log_pkt(pkt, 0); \
94} while (0)
95
96/**
97 * Return true if channel is fully opened (both
98 * local and remote sides are in the OPENED state).
99 */
100#define IS_FULLY_OPENED(ch) \
101 (ch && (ch)->local_state == SMUX_LCH_LOCAL_OPENED \
102 && (ch)->remote_state == SMUX_LCH_REMOTE_OPENED)
103
104static struct platform_device smux_devs[] = {
105 {.name = "SMUX_CTL", .id = -1},
106 {.name = "SMUX_RMNET", .id = -1},
107 {.name = "SMUX_DUN_DATA_HSUART", .id = 0},
108 {.name = "SMUX_RMNET_DATA_HSUART", .id = 1},
109 {.name = "SMUX_RMNET_CTL_HSUART", .id = 0},
110 {.name = "SMUX_DIAG", .id = -1},
111};
112
113enum {
114 SMUX_CMD_STATUS_RTC = 1 << 0,
115 SMUX_CMD_STATUS_RTR = 1 << 1,
116 SMUX_CMD_STATUS_RI = 1 << 2,
117 SMUX_CMD_STATUS_DCD = 1 << 3,
118 SMUX_CMD_STATUS_FLOW_CNTL = 1 << 4,
119};
120
121/* Channel mode */
122enum {
123 SMUX_LCH_MODE_NORMAL,
124 SMUX_LCH_MODE_LOCAL_LOOPBACK,
125 SMUX_LCH_MODE_REMOTE_LOOPBACK,
126};
127
128enum {
129 SMUX_RX_IDLE,
130 SMUX_RX_MAGIC,
131 SMUX_RX_HDR,
132 SMUX_RX_PAYLOAD,
133 SMUX_RX_FAILURE,
134};
135
136/**
137 * Power states.
138 *
139 * The _FLUSH states are internal transitional states and are not part of the
140 * official state machine.
141 */
142enum {
143 SMUX_PWR_OFF,
144 SMUX_PWR_TURNING_ON,
145 SMUX_PWR_ON,
146 SMUX_PWR_TURNING_OFF_FLUSH,
147 SMUX_PWR_TURNING_OFF,
148 SMUX_PWR_OFF_FLUSH,
149};
150
151/**
152 * Logical Channel Structure. One instance per channel.
153 *
154 * Locking Hierarchy
155 * Each lock has a postfix that describes the locking level. If multiple locks
156 * are required, only increasing lock hierarchy numbers may be locked which
157 * ensures avoiding a deadlock.
158 *
159 * Locking Example
160 * If state_lock_lhb1 is currently held and the TX list needs to be
161 * manipulated, then tx_lock_lhb2 may be locked since it's locking hierarchy
162 * is greater. However, if tx_lock_lhb2 is held, then state_lock_lhb1 may
163 * not be acquired since it would result in a deadlock.
164 *
165 * Note that the Line Discipline locks (*_lha) should always be acquired
166 * before the logical channel locks.
167 */
168struct smux_lch_t {
169 /* channel state */
170 spinlock_t state_lock_lhb1;
171 uint8_t lcid;
172 unsigned local_state;
173 unsigned local_mode;
174 uint8_t local_tiocm;
175
176 unsigned remote_state;
177 unsigned remote_mode;
178 uint8_t remote_tiocm;
179
180 int tx_flow_control;
181
182 /* client callbacks and private data */
183 void *priv;
184 void (*notify)(void *priv, int event_type, const void *metadata);
185 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
186 int size);
187
Eric Holmbergb8435c82012-06-05 14:51:29 -0600188 /* RX Info */
189 struct list_head rx_retry_queue;
190 unsigned rx_retry_queue_cnt;
191 struct delayed_work rx_retry_work;
192
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600193 /* TX Info */
194 spinlock_t tx_lock_lhb2;
195 struct list_head tx_queue;
196 struct list_head tx_ready_list;
197 unsigned tx_pending_data_cnt;
198 unsigned notify_lwm;
199};
200
201union notifier_metadata {
202 struct smux_meta_disconnected disconnected;
203 struct smux_meta_read read;
204 struct smux_meta_write write;
205 struct smux_meta_tiocm tiocm;
206};
207
208struct smux_notify_handle {
209 void (*notify)(void *priv, int event_type, const void *metadata);
210 void *priv;
211 int event_type;
212 union notifier_metadata *metadata;
213};
214
215/**
Eric Holmbergb8435c82012-06-05 14:51:29 -0600216 * Get RX Buffer Retry structure.
217 *
218 * This is used for clients that are unable to provide an RX buffer
219 * immediately. This temporary structure will be used to temporarily hold the
220 * data and perform a retry.
221 */
222struct smux_rx_pkt_retry {
223 struct smux_pkt_t *pkt;
224 struct list_head rx_retry_list;
225 unsigned timeout_in_ms;
226};
227
228/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600229 * Receive worker data structure.
230 *
231 * One instance is created for every call to smux_rx_state_machine.
232 */
233struct smux_rx_worker_data {
234 const unsigned char *data;
235 int len;
236 int flag;
237
238 struct work_struct work;
239 struct completion work_complete;
240};
241
242/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600243 * Line discipline and module structure.
244 *
245 * Only one instance since multiple instances of line discipline are not
246 * allowed.
247 */
248struct smux_ldisc_t {
Eric Holmberged1f00c2012-06-07 09:45:18 -0600249 struct mutex mutex_lha0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600250
251 int is_initialized;
252 int in_reset;
253 int ld_open_count;
254 struct tty_struct *tty;
255
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600256 /* RX State Machine (singled-threaded access by smux_rx_wq) */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600257 unsigned char recv_buf[SMUX_MAX_PKT_SIZE];
258 unsigned int recv_len;
259 unsigned int pkt_remain;
260 unsigned rx_state;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600261
262 /* RX Activity - accessed by multiple threads */
263 spinlock_t rx_lock_lha1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600264 unsigned rx_activity_flag;
265
266 /* TX / Power */
267 spinlock_t tx_lock_lha2;
268 struct list_head lch_tx_ready_list;
269 unsigned power_state;
270 unsigned pwr_wakeup_delay_us;
271 unsigned tx_activity_flag;
272 unsigned powerdown_enabled;
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600273 struct list_head power_queue;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600274};
275
276
277/* data structures */
278static struct smux_lch_t smux_lch[SMUX_NUM_LOGICAL_CHANNELS];
279static struct smux_ldisc_t smux;
280static const char *tty_error_type[] = {
281 [TTY_NORMAL] = "normal",
282 [TTY_OVERRUN] = "overrun",
283 [TTY_BREAK] = "break",
284 [TTY_PARITY] = "parity",
285 [TTY_FRAME] = "framing",
286};
287
288static const char *smux_cmds[] = {
289 [SMUX_CMD_DATA] = "DATA",
290 [SMUX_CMD_OPEN_LCH] = "OPEN",
291 [SMUX_CMD_CLOSE_LCH] = "CLOSE",
292 [SMUX_CMD_STATUS] = "STATUS",
293 [SMUX_CMD_PWR_CTL] = "PWR",
294 [SMUX_CMD_BYTE] = "Raw Byte",
295};
296
297static void smux_notify_local_fn(struct work_struct *work);
298static DECLARE_WORK(smux_notify_local, smux_notify_local_fn);
299
300static struct workqueue_struct *smux_notify_wq;
301static size_t handle_size;
302static struct kfifo smux_notify_fifo;
303static int queued_fifo_notifications;
304static DEFINE_SPINLOCK(notify_lock_lhc1);
305
306static struct workqueue_struct *smux_tx_wq;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600307static struct workqueue_struct *smux_rx_wq;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600308static void smux_tx_worker(struct work_struct *work);
309static DECLARE_WORK(smux_tx_work, smux_tx_worker);
310
311static void smux_wakeup_worker(struct work_struct *work);
Eric Holmbergb8435c82012-06-05 14:51:29 -0600312static void smux_rx_retry_worker(struct work_struct *work);
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600313static void smux_rx_worker(struct work_struct *work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600314static DECLARE_WORK(smux_wakeup_work, smux_wakeup_worker);
315static DECLARE_DELAYED_WORK(smux_wakeup_delayed_work, smux_wakeup_worker);
316
317static void smux_inactivity_worker(struct work_struct *work);
318static DECLARE_WORK(smux_inactivity_work, smux_inactivity_worker);
319static DECLARE_DELAYED_WORK(smux_delayed_inactivity_work,
320 smux_inactivity_worker);
321
322static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch);
323static void list_channel(struct smux_lch_t *ch);
324static int smux_send_status_cmd(struct smux_lch_t *ch);
325static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600326static void smux_flush_tty(void);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600327static void smux_purge_ch_tx_queue(struct smux_lch_t *ch);
328static int schedule_notify(uint8_t lcid, int event,
329 const union notifier_metadata *metadata);
330static int ssr_notifier_cb(struct notifier_block *this,
331 unsigned long code,
332 void *data);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600333
334/**
335 * Convert TTY Error Flags to string for logging purposes.
336 *
337 * @flag TTY_* flag
338 * @returns String description or NULL if unknown
339 */
340static const char *tty_flag_to_str(unsigned flag)
341{
342 if (flag < ARRAY_SIZE(tty_error_type))
343 return tty_error_type[flag];
344 return NULL;
345}
346
347/**
348 * Convert SMUX Command to string for logging purposes.
349 *
350 * @cmd SMUX command
351 * @returns String description or NULL if unknown
352 */
353static const char *cmd_to_str(unsigned cmd)
354{
355 if (cmd < ARRAY_SIZE(smux_cmds))
356 return smux_cmds[cmd];
357 return NULL;
358}
359
360/**
361 * Set the reset state due to an unrecoverable failure.
362 */
363static void smux_enter_reset(void)
364{
365 pr_err("%s: unrecoverable failure, waiting for ssr\n", __func__);
366 smux.in_reset = 1;
367}
368
369static int lch_init(void)
370{
371 unsigned int id;
372 struct smux_lch_t *ch;
373 int i = 0;
374
375 handle_size = sizeof(struct smux_notify_handle *);
376
377 smux_notify_wq = create_singlethread_workqueue("smux_notify_wq");
378 smux_tx_wq = create_singlethread_workqueue("smux_tx_wq");
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600379 smux_rx_wq = create_singlethread_workqueue("smux_rx_wq");
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600380
381 if (IS_ERR(smux_notify_wq) || IS_ERR(smux_tx_wq)) {
382 SMUX_DBG("%s: create_singlethread_workqueue ENOMEM\n",
383 __func__);
384 return -ENOMEM;
385 }
386
387 i |= kfifo_alloc(&smux_notify_fifo,
388 SMUX_NOTIFY_FIFO_SIZE * handle_size,
389 GFP_KERNEL);
390 i |= smux_loopback_init();
391
392 if (i) {
393 pr_err("%s: out of memory error\n", __func__);
394 return -ENOMEM;
395 }
396
397 for (id = 0 ; id < SMUX_NUM_LOGICAL_CHANNELS; id++) {
398 ch = &smux_lch[id];
399
400 spin_lock_init(&ch->state_lock_lhb1);
401 ch->lcid = id;
402 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
403 ch->local_mode = SMUX_LCH_MODE_NORMAL;
404 ch->local_tiocm = 0x0;
405 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
406 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
407 ch->remote_tiocm = 0x0;
408 ch->tx_flow_control = 0;
409 ch->priv = 0;
410 ch->notify = 0;
411 ch->get_rx_buffer = 0;
412
Eric Holmbergb8435c82012-06-05 14:51:29 -0600413 INIT_LIST_HEAD(&ch->rx_retry_queue);
414 ch->rx_retry_queue_cnt = 0;
415 INIT_DELAYED_WORK(&ch->rx_retry_work, smux_rx_retry_worker);
416
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600417 spin_lock_init(&ch->tx_lock_lhb2);
418 INIT_LIST_HEAD(&ch->tx_queue);
419 INIT_LIST_HEAD(&ch->tx_ready_list);
420 ch->tx_pending_data_cnt = 0;
421 ch->notify_lwm = 0;
422 }
423
424 return 0;
425}
426
Eric Holmberged1f00c2012-06-07 09:45:18 -0600427/**
428 * Empty and cleanup all SMUX logical channels for subsystem restart or line
429 * discipline disconnect.
430 */
431static void smux_lch_purge(void)
432{
433 struct smux_lch_t *ch;
434 unsigned long flags;
435 int i;
436
437 /* Empty TX ready list */
438 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
439 while (!list_empty(&smux.lch_tx_ready_list)) {
440 SMUX_DBG("%s: emptying ready list %p\n",
441 __func__, smux.lch_tx_ready_list.next);
442 ch = list_first_entry(&smux.lch_tx_ready_list,
443 struct smux_lch_t,
444 tx_ready_list);
445 list_del(&ch->tx_ready_list);
446 INIT_LIST_HEAD(&ch->tx_ready_list);
447 }
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600448
449 /* Purge Power Queue */
450 while (!list_empty(&smux.power_queue)) {
451 struct smux_pkt_t *pkt;
452
453 pkt = list_first_entry(&smux.power_queue,
454 struct smux_pkt_t,
455 list);
Eric Holmberg6b19f7f2012-06-15 09:53:52 -0600456 list_del(&pkt->list);
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600457 SMUX_DBG("%s: emptying power queue pkt=%p\n",
458 __func__, pkt);
459 smux_free_pkt(pkt);
460 }
Eric Holmberged1f00c2012-06-07 09:45:18 -0600461 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
462
463 /* Close all ports */
464 for (i = 0 ; i < SMUX_NUM_LOGICAL_CHANNELS; i++) {
465 ch = &smux_lch[i];
466 SMUX_DBG("%s: cleaning up lcid %d\n", __func__, i);
467
468 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
469
470 /* Purge TX queue */
471 spin_lock(&ch->tx_lock_lhb2);
472 smux_purge_ch_tx_queue(ch);
473 spin_unlock(&ch->tx_lock_lhb2);
474
475 /* Notify user of disconnect and reset channel state */
476 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
477 ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
478 union notifier_metadata meta;
479
480 meta.disconnected.is_ssr = smux.in_reset;
481 schedule_notify(ch->lcid, SMUX_DISCONNECTED, &meta);
482 }
483
484 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
485 ch->local_mode = SMUX_LCH_MODE_NORMAL;
486 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
487 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
488 ch->tx_flow_control = 0;
489
490 /* Purge RX retry queue */
491 if (ch->rx_retry_queue_cnt)
492 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
493
494 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
495 }
496
497 /* Flush TX/RX workqueues */
498 SMUX_DBG("%s: flushing tx wq\n", __func__);
499 flush_workqueue(smux_tx_wq);
500 SMUX_DBG("%s: flushing rx wq\n", __func__);
501 flush_workqueue(smux_rx_wq);
502}
503
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600504int smux_assert_lch_id(uint32_t lcid)
505{
506 if (lcid >= SMUX_NUM_LOGICAL_CHANNELS)
507 return -ENXIO;
508 else
509 return 0;
510}
511
512/**
513 * Log packet information for debug purposes.
514 *
515 * @pkt Packet to log
516 * @is_recv 1 = RX packet; 0 = TX Packet
517 *
518 * [DIR][LCID] [LOCAL_STATE][LOCAL_MODE]:[REMOTE_STATE][REMOTE_MODE] PKT Info
519 *
520 * PKT Info:
521 * [CMD] flags [flags] len [PAYLOAD_LEN]:[PAD_LEN] [Payload hex bytes]
522 *
523 * Direction: R = Receive, S = Send
524 * Local State: C = Closed; c = closing; o = opening; O = Opened
525 * Local Mode: L = Local loopback; R = Remote loopback; N = Normal
526 * Remote State: C = Closed; O = Opened
527 * Remote Mode: R = Remote loopback; N = Normal
528 */
529static void smux_log_pkt(struct smux_pkt_t *pkt, int is_recv)
530{
531 char logbuf[SMUX_PKT_LOG_SIZE];
532 char cmd_extra[16];
533 int i = 0;
534 int count;
535 int len;
536 char local_state;
537 char local_mode;
538 char remote_state;
539 char remote_mode;
540 struct smux_lch_t *ch;
541 unsigned char *data;
542
543 ch = &smux_lch[pkt->hdr.lcid];
544
545 switch (ch->local_state) {
546 case SMUX_LCH_LOCAL_CLOSED:
547 local_state = 'C';
548 break;
549 case SMUX_LCH_LOCAL_OPENING:
550 local_state = 'o';
551 break;
552 case SMUX_LCH_LOCAL_OPENED:
553 local_state = 'O';
554 break;
555 case SMUX_LCH_LOCAL_CLOSING:
556 local_state = 'c';
557 break;
558 default:
559 local_state = 'U';
560 break;
561 }
562
563 switch (ch->local_mode) {
564 case SMUX_LCH_MODE_LOCAL_LOOPBACK:
565 local_mode = 'L';
566 break;
567 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
568 local_mode = 'R';
569 break;
570 case SMUX_LCH_MODE_NORMAL:
571 local_mode = 'N';
572 break;
573 default:
574 local_mode = 'U';
575 break;
576 }
577
578 switch (ch->remote_state) {
579 case SMUX_LCH_REMOTE_CLOSED:
580 remote_state = 'C';
581 break;
582 case SMUX_LCH_REMOTE_OPENED:
583 remote_state = 'O';
584 break;
585
586 default:
587 remote_state = 'U';
588 break;
589 }
590
591 switch (ch->remote_mode) {
592 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
593 remote_mode = 'R';
594 break;
595 case SMUX_LCH_MODE_NORMAL:
596 remote_mode = 'N';
597 break;
598 default:
599 remote_mode = 'U';
600 break;
601 }
602
603 /* determine command type (ACK, etc) */
604 cmd_extra[0] = '\0';
605 switch (pkt->hdr.cmd) {
606 case SMUX_CMD_OPEN_LCH:
607 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
608 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
609 break;
610 case SMUX_CMD_CLOSE_LCH:
611 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
612 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
613 break;
614 };
615
616 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
617 "smux: %c%d %c%c:%c%c %s%s flags %x len %d:%d ",
618 is_recv ? 'R' : 'S', pkt->hdr.lcid,
619 local_state, local_mode,
620 remote_state, remote_mode,
621 cmd_to_str(pkt->hdr.cmd), cmd_extra, pkt->hdr.flags,
622 pkt->hdr.payload_len, pkt->hdr.pad_len);
623
624 len = (pkt->hdr.payload_len > 16) ? 16 : pkt->hdr.payload_len;
625 data = (unsigned char *)pkt->payload;
626 for (count = 0; count < len; count++)
627 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
628 "%02x ", (unsigned)data[count]);
629
630 pr_info("%s\n", logbuf);
631}
632
633static void smux_notify_local_fn(struct work_struct *work)
634{
635 struct smux_notify_handle *notify_handle = NULL;
636 union notifier_metadata *metadata = NULL;
637 unsigned long flags;
638 int i;
639
640 for (;;) {
641 /* retrieve notification */
642 spin_lock_irqsave(&notify_lock_lhc1, flags);
643 if (kfifo_len(&smux_notify_fifo) >= handle_size) {
644 i = kfifo_out(&smux_notify_fifo,
645 &notify_handle,
646 handle_size);
647 if (i != handle_size) {
648 pr_err("%s: unable to retrieve handle %d expected %d\n",
649 __func__, i, handle_size);
650 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
651 break;
652 }
653 } else {
654 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
655 break;
656 }
657 --queued_fifo_notifications;
658 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
659
660 /* notify client */
661 metadata = notify_handle->metadata;
662 notify_handle->notify(notify_handle->priv,
663 notify_handle->event_type,
664 metadata);
665
666 kfree(metadata);
667 kfree(notify_handle);
668 }
669}
670
671/**
672 * Initialize existing packet.
673 */
674void smux_init_pkt(struct smux_pkt_t *pkt)
675{
676 memset(pkt, 0x0, sizeof(*pkt));
677 pkt->hdr.magic = SMUX_MAGIC;
678 INIT_LIST_HEAD(&pkt->list);
679}
680
681/**
682 * Allocate and initialize packet.
683 *
684 * If a payload is needed, either set it directly and ensure that it's freed or
685 * use smd_alloc_pkt_payload() to allocate a packet and it will be freed
686 * automatically when smd_free_pkt() is called.
687 */
688struct smux_pkt_t *smux_alloc_pkt(void)
689{
690 struct smux_pkt_t *pkt;
691
692 /* Consider a free list implementation instead of kmalloc */
693 pkt = kmalloc(sizeof(struct smux_pkt_t), GFP_ATOMIC);
694 if (!pkt) {
695 pr_err("%s: out of memory\n", __func__);
696 return NULL;
697 }
698 smux_init_pkt(pkt);
699 pkt->allocated = 1;
700
701 return pkt;
702}
703
704/**
705 * Free packet.
706 *
707 * @pkt Packet to free (may be NULL)
708 *
709 * If payload was allocated using smux_alloc_pkt_payload(), then it is freed as
710 * well. Otherwise, the caller is responsible for freeing the payload.
711 */
712void smux_free_pkt(struct smux_pkt_t *pkt)
713{
714 if (pkt) {
715 if (pkt->free_payload)
716 kfree(pkt->payload);
717 if (pkt->allocated)
718 kfree(pkt);
719 }
720}
721
722/**
723 * Allocate packet payload.
724 *
725 * @pkt Packet to add payload to
726 *
727 * @returns 0 on success, <0 upon error
728 *
729 * A flag is set to signal smux_free_pkt() to free the payload.
730 */
731int smux_alloc_pkt_payload(struct smux_pkt_t *pkt)
732{
733 if (!pkt)
734 return -EINVAL;
735
736 pkt->payload = kmalloc(pkt->hdr.payload_len, GFP_ATOMIC);
737 pkt->free_payload = 1;
738 if (!pkt->payload) {
739 pr_err("%s: unable to malloc %d bytes for payload\n",
740 __func__, pkt->hdr.payload_len);
741 return -ENOMEM;
742 }
743
744 return 0;
745}
746
747static int schedule_notify(uint8_t lcid, int event,
748 const union notifier_metadata *metadata)
749{
750 struct smux_notify_handle *notify_handle = 0;
751 union notifier_metadata *meta_copy = 0;
752 struct smux_lch_t *ch;
753 int i;
754 unsigned long flags;
755 int ret = 0;
756
757 ch = &smux_lch[lcid];
758 notify_handle = kzalloc(sizeof(struct smux_notify_handle),
759 GFP_ATOMIC);
760 if (!notify_handle) {
761 pr_err("%s: out of memory\n", __func__);
762 ret = -ENOMEM;
763 goto free_out;
764 }
765
766 notify_handle->notify = ch->notify;
767 notify_handle->priv = ch->priv;
768 notify_handle->event_type = event;
769 if (metadata) {
770 meta_copy = kzalloc(sizeof(union notifier_metadata),
771 GFP_ATOMIC);
772 if (!meta_copy) {
773 pr_err("%s: out of memory\n", __func__);
774 ret = -ENOMEM;
775 goto free_out;
776 }
777 *meta_copy = *metadata;
778 notify_handle->metadata = meta_copy;
779 } else {
780 notify_handle->metadata = NULL;
781 }
782
783 spin_lock_irqsave(&notify_lock_lhc1, flags);
784 i = kfifo_avail(&smux_notify_fifo);
785 if (i < handle_size) {
786 pr_err("%s: fifo full error %d expected %d\n",
787 __func__, i, handle_size);
788 ret = -ENOMEM;
789 goto unlock_out;
790 }
791
792 i = kfifo_in(&smux_notify_fifo, &notify_handle, handle_size);
793 if (i < 0 || i != handle_size) {
794 pr_err("%s: fifo not available error %d (expected %d)\n",
795 __func__, i, handle_size);
796 ret = -ENOSPC;
797 goto unlock_out;
798 }
799 ++queued_fifo_notifications;
800
801unlock_out:
802 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
803
804free_out:
805 queue_work(smux_notify_wq, &smux_notify_local);
806 if (ret < 0 && notify_handle) {
807 kfree(notify_handle->metadata);
808 kfree(notify_handle);
809 }
810 return ret;
811}
812
813/**
814 * Returns the serialized size of a packet.
815 *
816 * @pkt Packet to serialize
817 *
818 * @returns Serialized length of packet
819 */
820static unsigned int smux_serialize_size(struct smux_pkt_t *pkt)
821{
822 unsigned int size;
823
824 size = sizeof(struct smux_hdr_t);
825 size += pkt->hdr.payload_len;
826 size += pkt->hdr.pad_len;
827
828 return size;
829}
830
831/**
832 * Serialize packet @pkt into output buffer @data.
833 *
834 * @pkt Packet to serialize
835 * @out Destination buffer pointer
836 * @out_len Size of serialized packet
837 *
838 * @returns 0 for success
839 */
840int smux_serialize(struct smux_pkt_t *pkt, char *out,
841 unsigned int *out_len)
842{
843 char *data_start = out;
844
845 if (smux_serialize_size(pkt) > SMUX_MAX_PKT_SIZE) {
846 pr_err("%s: packet size %d too big\n",
847 __func__, smux_serialize_size(pkt));
848 return -E2BIG;
849 }
850
851 memcpy(out, &pkt->hdr, sizeof(struct smux_hdr_t));
852 out += sizeof(struct smux_hdr_t);
853 if (pkt->payload) {
854 memcpy(out, pkt->payload, pkt->hdr.payload_len);
855 out += pkt->hdr.payload_len;
856 }
857 if (pkt->hdr.pad_len) {
858 memset(out, 0x0, pkt->hdr.pad_len);
859 out += pkt->hdr.pad_len;
860 }
861 *out_len = out - data_start;
862 return 0;
863}
864
865/**
866 * Serialize header and provide pointer to the data.
867 *
868 * @pkt Packet
869 * @out[out] Pointer to the serialized header data
870 * @out_len[out] Pointer to the serialized header length
871 */
872static void smux_serialize_hdr(struct smux_pkt_t *pkt, char **out,
873 unsigned int *out_len)
874{
875 *out = (char *)&pkt->hdr;
876 *out_len = sizeof(struct smux_hdr_t);
877}
878
879/**
880 * Serialize payload and provide pointer to the data.
881 *
882 * @pkt Packet
883 * @out[out] Pointer to the serialized payload data
884 * @out_len[out] Pointer to the serialized payload length
885 */
886static void smux_serialize_payload(struct smux_pkt_t *pkt, char **out,
887 unsigned int *out_len)
888{
889 *out = pkt->payload;
890 *out_len = pkt->hdr.payload_len;
891}
892
893/**
894 * Serialize padding and provide pointer to the data.
895 *
896 * @pkt Packet
897 * @out[out] Pointer to the serialized padding (always NULL)
898 * @out_len[out] Pointer to the serialized payload length
899 *
900 * Since the padding field value is undefined, only the size of the patting
901 * (@out_len) is set and the buffer pointer (@out) will always be NULL.
902 */
903static void smux_serialize_padding(struct smux_pkt_t *pkt, char **out,
904 unsigned int *out_len)
905{
906 *out = NULL;
907 *out_len = pkt->hdr.pad_len;
908}
909
910/**
911 * Write data to TTY framework and handle breaking the writes up if needed.
912 *
913 * @data Data to write
914 * @len Length of data
915 *
916 * @returns 0 for success, < 0 for failure
917 */
918static int write_to_tty(char *data, unsigned len)
919{
920 int data_written;
921
922 if (!data)
923 return 0;
924
Eric Holmberged1f00c2012-06-07 09:45:18 -0600925 while (len > 0 && !smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600926 data_written = smux.tty->ops->write(smux.tty, data, len);
927 if (data_written >= 0) {
928 len -= data_written;
929 data += data_written;
930 } else {
931 pr_err("%s: TTY write returned error %d\n",
932 __func__, data_written);
933 return data_written;
934 }
935
936 if (len)
937 tty_wait_until_sent(smux.tty,
938 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600939 }
940 return 0;
941}
942
943/**
944 * Write packet to TTY.
945 *
946 * @pkt packet to write
947 *
948 * @returns 0 on success
949 */
950static int smux_tx_tty(struct smux_pkt_t *pkt)
951{
952 char *data;
953 unsigned int len;
954 int ret;
955
956 if (!smux.tty) {
957 pr_err("%s: TTY not initialized", __func__);
958 return -ENOTTY;
959 }
960
961 if (pkt->hdr.cmd == SMUX_CMD_BYTE) {
962 SMUX_DBG("%s: tty send single byte\n", __func__);
963 ret = write_to_tty(&pkt->hdr.flags, 1);
964 return ret;
965 }
966
967 smux_serialize_hdr(pkt, &data, &len);
968 ret = write_to_tty(data, len);
969 if (ret) {
970 pr_err("%s: failed %d to write header %d\n",
971 __func__, ret, len);
972 return ret;
973 }
974
975 smux_serialize_payload(pkt, &data, &len);
976 ret = write_to_tty(data, len);
977 if (ret) {
978 pr_err("%s: failed %d to write payload %d\n",
979 __func__, ret, len);
980 return ret;
981 }
982
983 smux_serialize_padding(pkt, &data, &len);
984 while (len > 0) {
985 char zero = 0x0;
986 ret = write_to_tty(&zero, 1);
987 if (ret) {
988 pr_err("%s: failed %d to write padding %d\n",
989 __func__, ret, len);
990 return ret;
991 }
992 --len;
993 }
994 return 0;
995}
996
997/**
998 * Send a single character.
999 *
1000 * @ch Character to send
1001 */
1002static void smux_send_byte(char ch)
1003{
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001004 struct smux_pkt_t *pkt;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001005
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001006 pkt = smux_alloc_pkt();
1007 if (!pkt) {
1008 pr_err("%s: alloc failure for byte %x\n", __func__, ch);
1009 return;
1010 }
1011 pkt->hdr.cmd = SMUX_CMD_BYTE;
1012 pkt->hdr.flags = ch;
1013 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001014
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001015 list_add_tail(&pkt->list, &smux.power_queue);
1016 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001017}
1018
1019/**
1020 * Receive a single-character packet (used for internal testing).
1021 *
1022 * @ch Character to receive
1023 * @lcid Logical channel ID for packet
1024 *
1025 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001026 */
1027static int smux_receive_byte(char ch, int lcid)
1028{
1029 struct smux_pkt_t pkt;
1030
1031 smux_init_pkt(&pkt);
1032 pkt.hdr.lcid = lcid;
1033 pkt.hdr.cmd = SMUX_CMD_BYTE;
1034 pkt.hdr.flags = ch;
1035
1036 return smux_dispatch_rx_pkt(&pkt);
1037}
1038
1039/**
1040 * Queue packet for transmit.
1041 *
1042 * @pkt_ptr Packet to queue
1043 * @ch Channel to queue packet on
1044 * @queue Queue channel on ready list
1045 */
1046static void smux_tx_queue(struct smux_pkt_t *pkt_ptr, struct smux_lch_t *ch,
1047 int queue)
1048{
1049 unsigned long flags;
1050
1051 SMUX_DBG("%s: queuing pkt %p\n", __func__, pkt_ptr);
1052
1053 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
1054 list_add_tail(&pkt_ptr->list, &ch->tx_queue);
1055 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
1056
1057 if (queue)
1058 list_channel(ch);
1059}
1060
1061/**
1062 * Handle receive OPEN ACK command.
1063 *
1064 * @pkt Received packet
1065 *
1066 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001067 */
1068static int smux_handle_rx_open_ack(struct smux_pkt_t *pkt)
1069{
1070 uint8_t lcid;
1071 int ret;
1072 struct smux_lch_t *ch;
1073 int enable_powerdown = 0;
1074
1075 lcid = pkt->hdr.lcid;
1076 ch = &smux_lch[lcid];
1077
1078 spin_lock(&ch->state_lock_lhb1);
1079 if (ch->local_state == SMUX_LCH_LOCAL_OPENING) {
1080 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
1081 ch->local_state,
1082 SMUX_LCH_LOCAL_OPENED);
1083
1084 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1085 enable_powerdown = 1;
1086
1087 ch->local_state = SMUX_LCH_LOCAL_OPENED;
1088 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED)
1089 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1090 ret = 0;
1091 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1092 SMUX_DBG("Remote loopback OPEN ACK received\n");
1093 ret = 0;
1094 } else {
1095 pr_err("%s: lcid %d state 0x%x open ack invalid\n",
1096 __func__, lcid, ch->local_state);
1097 ret = -EINVAL;
1098 }
1099 spin_unlock(&ch->state_lock_lhb1);
1100
1101 if (enable_powerdown) {
1102 spin_lock(&smux.tx_lock_lha2);
1103 if (!smux.powerdown_enabled) {
1104 smux.powerdown_enabled = 1;
1105 SMUX_DBG("%s: enabling power-collapse support\n",
1106 __func__);
1107 }
1108 spin_unlock(&smux.tx_lock_lha2);
1109 }
1110
1111 return ret;
1112}
1113
1114static int smux_handle_close_ack(struct smux_pkt_t *pkt)
1115{
1116 uint8_t lcid;
1117 int ret;
1118 struct smux_lch_t *ch;
1119 union notifier_metadata meta_disconnected;
1120 unsigned long flags;
1121
1122 lcid = pkt->hdr.lcid;
1123 ch = &smux_lch[lcid];
1124 meta_disconnected.disconnected.is_ssr = 0;
1125
1126 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1127
1128 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
1129 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
1130 SMUX_LCH_LOCAL_CLOSING,
1131 SMUX_LCH_LOCAL_CLOSED);
1132 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
1133 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED)
1134 schedule_notify(lcid, SMUX_DISCONNECTED,
1135 &meta_disconnected);
1136 ret = 0;
1137 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1138 SMUX_DBG("Remote loopback CLOSE ACK received\n");
1139 ret = 0;
1140 } else {
1141 pr_err("%s: lcid %d state 0x%x close ack invalid\n",
1142 __func__, lcid, ch->local_state);
1143 ret = -EINVAL;
1144 }
1145 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1146 return ret;
1147}
1148
1149/**
1150 * Handle receive OPEN command.
1151 *
1152 * @pkt Received packet
1153 *
1154 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001155 */
1156static int smux_handle_rx_open_cmd(struct smux_pkt_t *pkt)
1157{
1158 uint8_t lcid;
1159 int ret;
1160 struct smux_lch_t *ch;
1161 struct smux_pkt_t *ack_pkt;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001162 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001163 int tx_ready = 0;
1164 int enable_powerdown = 0;
1165
1166 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
1167 return smux_handle_rx_open_ack(pkt);
1168
1169 lcid = pkt->hdr.lcid;
1170 ch = &smux_lch[lcid];
1171
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001172 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001173
1174 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED) {
1175 SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
1176 SMUX_LCH_REMOTE_CLOSED,
1177 SMUX_LCH_REMOTE_OPENED);
1178
1179 ch->remote_state = SMUX_LCH_REMOTE_OPENED;
1180 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1181 enable_powerdown = 1;
1182
1183 /* Send Open ACK */
1184 ack_pkt = smux_alloc_pkt();
1185 if (!ack_pkt) {
1186 /* exit out to allow retrying this later */
1187 ret = -ENOMEM;
1188 goto out;
1189 }
1190 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1191 ack_pkt->hdr.flags = SMUX_CMD_OPEN_ACK
1192 | SMUX_CMD_OPEN_POWER_COLLAPSE;
1193 ack_pkt->hdr.lcid = lcid;
1194 ack_pkt->hdr.payload_len = 0;
1195 ack_pkt->hdr.pad_len = 0;
1196 if (pkt->hdr.flags & SMUX_CMD_OPEN_REMOTE_LOOPBACK) {
1197 ch->remote_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
1198 ack_pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
1199 }
1200 smux_tx_queue(ack_pkt, ch, 0);
1201 tx_ready = 1;
1202
1203 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1204 /*
1205 * Send an Open command to the remote side to
1206 * simulate our local client doing it.
1207 */
1208 ack_pkt = smux_alloc_pkt();
1209 if (ack_pkt) {
1210 ack_pkt->hdr.lcid = lcid;
1211 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1212 ack_pkt->hdr.flags =
1213 SMUX_CMD_OPEN_POWER_COLLAPSE;
1214 ack_pkt->hdr.payload_len = 0;
1215 ack_pkt->hdr.pad_len = 0;
1216 smux_tx_queue(ack_pkt, ch, 0);
1217 tx_ready = 1;
1218 } else {
1219 pr_err("%s: Remote loopack allocation failure\n",
1220 __func__);
1221 }
1222 } else if (ch->local_state == SMUX_LCH_LOCAL_OPENED) {
1223 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1224 }
1225 ret = 0;
1226 } else {
1227 pr_err("%s: lcid %d remote state 0x%x open invalid\n",
1228 __func__, lcid, ch->remote_state);
1229 ret = -EINVAL;
1230 }
1231
1232out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001233 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001234
1235 if (enable_powerdown) {
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001236 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8b9a6402012-06-05 13:32:57 -06001237 if (!smux.powerdown_enabled) {
1238 smux.powerdown_enabled = 1;
1239 SMUX_DBG("%s: enabling power-collapse support\n",
1240 __func__);
1241 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001242 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001243 }
1244
1245 if (tx_ready)
1246 list_channel(ch);
1247
1248 return ret;
1249}
1250
1251/**
1252 * Handle receive CLOSE command.
1253 *
1254 * @pkt Received packet
1255 *
1256 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001257 */
1258static int smux_handle_rx_close_cmd(struct smux_pkt_t *pkt)
1259{
1260 uint8_t lcid;
1261 int ret;
1262 struct smux_lch_t *ch;
1263 struct smux_pkt_t *ack_pkt;
1264 union notifier_metadata meta_disconnected;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001265 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001266 int tx_ready = 0;
1267
1268 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
1269 return smux_handle_close_ack(pkt);
1270
1271 lcid = pkt->hdr.lcid;
1272 ch = &smux_lch[lcid];
1273 meta_disconnected.disconnected.is_ssr = 0;
1274
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001275 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001276 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED) {
1277 SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
1278 SMUX_LCH_REMOTE_OPENED,
1279 SMUX_LCH_REMOTE_CLOSED);
1280
1281 ack_pkt = smux_alloc_pkt();
1282 if (!ack_pkt) {
1283 /* exit out to allow retrying this later */
1284 ret = -ENOMEM;
1285 goto out;
1286 }
1287 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
1288 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1289 ack_pkt->hdr.flags = SMUX_CMD_CLOSE_ACK;
1290 ack_pkt->hdr.lcid = lcid;
1291 ack_pkt->hdr.payload_len = 0;
1292 ack_pkt->hdr.pad_len = 0;
1293 smux_tx_queue(ack_pkt, ch, 0);
1294 tx_ready = 1;
1295
1296 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1297 /*
1298 * Send a Close command to the remote side to simulate
1299 * our local client doing it.
1300 */
1301 ack_pkt = smux_alloc_pkt();
1302 if (ack_pkt) {
1303 ack_pkt->hdr.lcid = lcid;
1304 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1305 ack_pkt->hdr.flags = 0;
1306 ack_pkt->hdr.payload_len = 0;
1307 ack_pkt->hdr.pad_len = 0;
1308 smux_tx_queue(ack_pkt, ch, 0);
1309 tx_ready = 1;
1310 } else {
1311 pr_err("%s: Remote loopack allocation failure\n",
1312 __func__);
1313 }
1314 }
1315
1316 if (ch->local_state == SMUX_LCH_LOCAL_CLOSED)
1317 schedule_notify(lcid, SMUX_DISCONNECTED,
1318 &meta_disconnected);
1319 ret = 0;
1320 } else {
1321 pr_err("%s: lcid %d remote state 0x%x close invalid\n",
1322 __func__, lcid, ch->remote_state);
1323 ret = -EINVAL;
1324 }
1325out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001326 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001327 if (tx_ready)
1328 list_channel(ch);
1329
1330 return ret;
1331}
1332
1333/*
1334 * Handle receive DATA command.
1335 *
1336 * @pkt Received packet
1337 *
1338 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001339 */
1340static int smux_handle_rx_data_cmd(struct smux_pkt_t *pkt)
1341{
1342 uint8_t lcid;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001343 int ret = 0;
1344 int do_retry = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001345 int tmp;
1346 int rx_len;
1347 struct smux_lch_t *ch;
1348 union notifier_metadata metadata;
1349 int remote_loopback;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001350 struct smux_pkt_t *ack_pkt;
1351 unsigned long flags;
1352
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001353 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1354 ret = -ENXIO;
1355 goto out;
1356 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001357
Eric Holmbergb8435c82012-06-05 14:51:29 -06001358 rx_len = pkt->hdr.payload_len;
1359 if (rx_len == 0) {
1360 ret = -EINVAL;
1361 goto out;
1362 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001363
1364 lcid = pkt->hdr.lcid;
1365 ch = &smux_lch[lcid];
1366 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1367 remote_loopback = ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK;
1368
1369 if (ch->local_state != SMUX_LCH_LOCAL_OPENED
1370 && !remote_loopback) {
1371 pr_err("smux: ch %d error data on local state 0x%x",
1372 lcid, ch->local_state);
1373 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001374 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001375 goto out;
1376 }
1377
1378 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1379 pr_err("smux: ch %d error data on remote state 0x%x",
1380 lcid, ch->remote_state);
1381 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001382 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001383 goto out;
1384 }
1385
Eric Holmbergb8435c82012-06-05 14:51:29 -06001386 if (!list_empty(&ch->rx_retry_queue)) {
1387 do_retry = 1;
1388 if ((ch->rx_retry_queue_cnt + 1) > SMUX_RX_RETRY_MAX_PKTS) {
1389 /* retry queue full */
1390 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1391 ret = -ENOMEM;
1392 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1393 goto out;
1394 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001395 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001396 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001397
Eric Holmbergb8435c82012-06-05 14:51:29 -06001398 if (remote_loopback) {
1399 /* Echo the data back to the remote client. */
1400 ack_pkt = smux_alloc_pkt();
1401 if (ack_pkt) {
1402 ack_pkt->hdr.lcid = lcid;
1403 ack_pkt->hdr.cmd = SMUX_CMD_DATA;
1404 ack_pkt->hdr.flags = 0;
1405 ack_pkt->hdr.payload_len = pkt->hdr.payload_len;
1406 if (ack_pkt->hdr.payload_len) {
1407 smux_alloc_pkt_payload(ack_pkt);
1408 memcpy(ack_pkt->payload, pkt->payload,
1409 ack_pkt->hdr.payload_len);
1410 }
1411 ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
1412 smux_tx_queue(ack_pkt, ch, 0);
1413 list_channel(ch);
1414 } else {
1415 pr_err("%s: Remote loopack allocation failure\n",
1416 __func__);
1417 }
1418 } else if (!do_retry) {
1419 /* request buffer from client */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001420 metadata.read.pkt_priv = 0;
1421 metadata.read.buffer = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001422 tmp = ch->get_rx_buffer(ch->priv,
1423 (void **)&metadata.read.pkt_priv,
1424 (void **)&metadata.read.buffer,
1425 rx_len);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001426
Eric Holmbergb8435c82012-06-05 14:51:29 -06001427 if (tmp == 0 && metadata.read.buffer) {
1428 /* place data into RX buffer */
1429 memcpy(metadata.read.buffer, pkt->payload,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001430 rx_len);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001431 metadata.read.len = rx_len;
1432 schedule_notify(lcid, SMUX_READ_DONE,
1433 &metadata);
1434 } else if (tmp == -EAGAIN ||
1435 (tmp == 0 && !metadata.read.buffer)) {
1436 /* buffer allocation failed - add to retry queue */
1437 do_retry = 1;
1438 } else if (tmp < 0) {
1439 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1440 ret = -ENOMEM;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001441 }
1442 }
1443
Eric Holmbergb8435c82012-06-05 14:51:29 -06001444 if (do_retry) {
1445 struct smux_rx_pkt_retry *retry;
1446
1447 retry = kmalloc(sizeof(struct smux_rx_pkt_retry), GFP_KERNEL);
1448 if (!retry) {
1449 pr_err("%s: retry alloc failure\n", __func__);
1450 ret = -ENOMEM;
1451 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1452 goto out;
1453 }
1454 INIT_LIST_HEAD(&retry->rx_retry_list);
1455 retry->timeout_in_ms = SMUX_RX_RETRY_MIN_MS;
1456
1457 /* copy packet */
1458 retry->pkt = smux_alloc_pkt();
1459 if (!retry->pkt) {
1460 kfree(retry);
1461 pr_err("%s: pkt alloc failure\n", __func__);
1462 ret = -ENOMEM;
1463 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1464 goto out;
1465 }
1466 retry->pkt->hdr.lcid = lcid;
1467 retry->pkt->hdr.payload_len = pkt->hdr.payload_len;
1468 retry->pkt->hdr.pad_len = pkt->hdr.pad_len;
1469 if (retry->pkt->hdr.payload_len) {
1470 smux_alloc_pkt_payload(retry->pkt);
1471 memcpy(retry->pkt->payload, pkt->payload,
1472 retry->pkt->hdr.payload_len);
1473 }
1474
1475 /* add to retry queue */
1476 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1477 list_add_tail(&retry->rx_retry_list, &ch->rx_retry_queue);
1478 ++ch->rx_retry_queue_cnt;
1479 if (ch->rx_retry_queue_cnt == 1)
1480 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
1481 msecs_to_jiffies(retry->timeout_in_ms));
1482 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1483 }
1484
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001485out:
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001486 return ret;
1487}
1488
1489/**
1490 * Handle receive byte command for testing purposes.
1491 *
1492 * @pkt Received packet
1493 *
1494 * @returns 0 for success
1495 */
1496static int smux_handle_rx_byte_cmd(struct smux_pkt_t *pkt)
1497{
1498 uint8_t lcid;
1499 int ret;
1500 struct smux_lch_t *ch;
1501 union notifier_metadata metadata;
1502 unsigned long flags;
1503
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001504 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1505 pr_err("%s: invalid packet or channel id\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001506 return -ENXIO;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001507 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001508
1509 lcid = pkt->hdr.lcid;
1510 ch = &smux_lch[lcid];
1511 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1512
1513 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
1514 pr_err("smux: ch %d error data on local state 0x%x",
1515 lcid, ch->local_state);
1516 ret = -EIO;
1517 goto out;
1518 }
1519
1520 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1521 pr_err("smux: ch %d error data on remote state 0x%x",
1522 lcid, ch->remote_state);
1523 ret = -EIO;
1524 goto out;
1525 }
1526
1527 metadata.read.pkt_priv = (void *)(int)pkt->hdr.flags;
1528 metadata.read.buffer = 0;
1529 schedule_notify(lcid, SMUX_READ_DONE, &metadata);
1530 ret = 0;
1531
1532out:
1533 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1534 return ret;
1535}
1536
1537/**
1538 * Handle receive status command.
1539 *
1540 * @pkt Received packet
1541 *
1542 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001543 */
1544static int smux_handle_rx_status_cmd(struct smux_pkt_t *pkt)
1545{
1546 uint8_t lcid;
1547 int ret;
1548 struct smux_lch_t *ch;
1549 union notifier_metadata meta;
1550 unsigned long flags;
1551 int tx_ready = 0;
1552
1553 lcid = pkt->hdr.lcid;
1554 ch = &smux_lch[lcid];
1555
1556 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1557 meta.tiocm.tiocm_old = ch->remote_tiocm;
1558 meta.tiocm.tiocm_new = pkt->hdr.flags;
1559
1560 /* update logical channel flow control */
1561 if ((meta.tiocm.tiocm_old & SMUX_CMD_STATUS_FLOW_CNTL) ^
1562 (meta.tiocm.tiocm_new & SMUX_CMD_STATUS_FLOW_CNTL)) {
1563 /* logical channel flow control changed */
1564 if (pkt->hdr.flags & SMUX_CMD_STATUS_FLOW_CNTL) {
1565 /* disabled TX */
1566 SMUX_DBG("TX Flow control enabled\n");
1567 ch->tx_flow_control = 1;
1568 } else {
1569 /* re-enable channel */
1570 SMUX_DBG("TX Flow control disabled\n");
1571 ch->tx_flow_control = 0;
1572 tx_ready = 1;
1573 }
1574 }
1575 meta.tiocm.tiocm_old = msm_smux_tiocm_get_atomic(ch);
1576 ch->remote_tiocm = pkt->hdr.flags;
1577 meta.tiocm.tiocm_new = msm_smux_tiocm_get_atomic(ch);
1578
1579 /* client notification for status change */
1580 if (IS_FULLY_OPENED(ch)) {
1581 if (meta.tiocm.tiocm_old != meta.tiocm.tiocm_new)
1582 schedule_notify(lcid, SMUX_TIOCM_UPDATE, &meta);
1583 ret = 0;
1584 }
1585 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1586 if (tx_ready)
1587 list_channel(ch);
1588
1589 return ret;
1590}
1591
1592/**
1593 * Handle receive power command.
1594 *
1595 * @pkt Received packet
1596 *
1597 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001598 */
1599static int smux_handle_rx_power_cmd(struct smux_pkt_t *pkt)
1600{
Steve Mucklef132c6c2012-06-06 18:30:57 -07001601 struct smux_pkt_t *ack_pkt = NULL;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001602 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001603
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001604 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001605 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK) {
1606 /* local sleep request ack */
1607 if (smux.power_state == SMUX_PWR_TURNING_OFF) {
1608 /* Power-down complete, turn off UART */
Eric Holmbergff0b0112012-06-08 15:06:57 -06001609 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001610 smux.power_state, SMUX_PWR_OFF_FLUSH);
1611 smux.power_state = SMUX_PWR_OFF_FLUSH;
1612 queue_work(smux_tx_wq, &smux_inactivity_work);
1613 } else {
1614 pr_err("%s: sleep request ack invalid in state %d\n",
1615 __func__, smux.power_state);
1616 }
1617 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001618 /*
1619 * Remote sleep request
1620 *
1621 * Even if we have data pending, we need to transition to the
1622 * POWER_OFF state and then perform a wakeup since the remote
1623 * side has requested a power-down.
1624 *
1625 * The state here is set to SMUX_PWR_TURNING_OFF_FLUSH and
1626 * the TX thread will set the state to SMUX_PWR_TURNING_OFF
1627 * when it sends the packet.
1628 */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001629 if (smux.power_state == SMUX_PWR_ON
1630 || smux.power_state == SMUX_PWR_TURNING_OFF) {
1631 ack_pkt = smux_alloc_pkt();
1632 if (ack_pkt) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06001633 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001634 smux.power_state,
1635 SMUX_PWR_TURNING_OFF_FLUSH);
1636
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001637 smux.power_state = SMUX_PWR_TURNING_OFF_FLUSH;
1638
1639 /* send power-down ack */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001640 ack_pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
1641 ack_pkt->hdr.flags = SMUX_CMD_PWR_CTL_ACK;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001642 ack_pkt->hdr.lcid = SMUX_BROADCAST_LCID;
1643 list_add_tail(&ack_pkt->list,
1644 &smux.power_queue);
1645 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001646 }
1647 } else {
1648 pr_err("%s: sleep request invalid in state %d\n",
1649 __func__, smux.power_state);
1650 }
1651 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001652 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001653
1654 return 0;
1655}
1656
1657/**
1658 * Handle dispatching a completed packet for receive processing.
1659 *
1660 * @pkt Packet to process
1661 *
1662 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001663 */
1664static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt)
1665{
Eric Holmbergf9622662012-06-13 15:55:45 -06001666 int ret = -ENXIO;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001667
1668 SMUX_LOG_PKT_RX(pkt);
1669
1670 switch (pkt->hdr.cmd) {
1671 case SMUX_CMD_OPEN_LCH:
Eric Holmbergf9622662012-06-13 15:55:45 -06001672 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1673 pr_err("%s: invalid channel id %d\n",
1674 __func__, pkt->hdr.lcid);
1675 break;
1676 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001677 ret = smux_handle_rx_open_cmd(pkt);
1678 break;
1679
1680 case SMUX_CMD_DATA:
Eric Holmbergf9622662012-06-13 15:55:45 -06001681 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1682 pr_err("%s: invalid channel id %d\n",
1683 __func__, pkt->hdr.lcid);
1684 break;
1685 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001686 ret = smux_handle_rx_data_cmd(pkt);
1687 break;
1688
1689 case SMUX_CMD_CLOSE_LCH:
Eric Holmbergf9622662012-06-13 15:55:45 -06001690 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1691 pr_err("%s: invalid channel id %d\n",
1692 __func__, pkt->hdr.lcid);
1693 break;
1694 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001695 ret = smux_handle_rx_close_cmd(pkt);
1696 break;
1697
1698 case SMUX_CMD_STATUS:
Eric Holmbergf9622662012-06-13 15:55:45 -06001699 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1700 pr_err("%s: invalid channel id %d\n",
1701 __func__, pkt->hdr.lcid);
1702 break;
1703 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001704 ret = smux_handle_rx_status_cmd(pkt);
1705 break;
1706
1707 case SMUX_CMD_PWR_CTL:
1708 ret = smux_handle_rx_power_cmd(pkt);
1709 break;
1710
1711 case SMUX_CMD_BYTE:
1712 ret = smux_handle_rx_byte_cmd(pkt);
1713 break;
1714
1715 default:
1716 pr_err("%s: command %d unknown\n", __func__, pkt->hdr.cmd);
1717 ret = -EINVAL;
1718 }
1719 return ret;
1720}
1721
1722/**
1723 * Deserializes a packet and dispatches it to the packet receive logic.
1724 *
1725 * @data Raw data for one packet
1726 * @len Length of the data
1727 *
1728 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001729 */
1730static int smux_deserialize(unsigned char *data, int len)
1731{
1732 struct smux_pkt_t recv;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001733
1734 smux_init_pkt(&recv);
1735
1736 /*
1737 * It may be possible to optimize this to not use the
1738 * temporary buffer.
1739 */
1740 memcpy(&recv.hdr, data, sizeof(struct smux_hdr_t));
1741
1742 if (recv.hdr.magic != SMUX_MAGIC) {
1743 pr_err("%s: invalid header magic\n", __func__);
1744 return -EINVAL;
1745 }
1746
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001747 if (recv.hdr.payload_len)
1748 recv.payload = data + sizeof(struct smux_hdr_t);
1749
1750 return smux_dispatch_rx_pkt(&recv);
1751}
1752
1753/**
1754 * Handle wakeup request byte.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001755 */
1756static void smux_handle_wakeup_req(void)
1757{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001758 unsigned long flags;
1759
1760 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001761 if (smux.power_state == SMUX_PWR_OFF
1762 || smux.power_state == SMUX_PWR_TURNING_ON) {
1763 /* wakeup system */
Eric Holmbergff0b0112012-06-08 15:06:57 -06001764 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001765 smux.power_state, SMUX_PWR_ON);
1766 smux.power_state = SMUX_PWR_ON;
1767 queue_work(smux_tx_wq, &smux_wakeup_work);
1768 queue_work(smux_tx_wq, &smux_tx_work);
1769 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1770 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1771 smux_send_byte(SMUX_WAKEUP_ACK);
1772 } else {
1773 smux_send_byte(SMUX_WAKEUP_ACK);
1774 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001775 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001776}
1777
1778/**
1779 * Handle wakeup request ack.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001780 */
1781static void smux_handle_wakeup_ack(void)
1782{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001783 unsigned long flags;
1784
1785 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001786 if (smux.power_state == SMUX_PWR_TURNING_ON) {
1787 /* received response to wakeup request */
Eric Holmbergff0b0112012-06-08 15:06:57 -06001788 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001789 smux.power_state, SMUX_PWR_ON);
1790 smux.power_state = SMUX_PWR_ON;
1791 queue_work(smux_tx_wq, &smux_tx_work);
1792 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1793 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1794
1795 } else if (smux.power_state != SMUX_PWR_ON) {
1796 /* invalid message */
1797 pr_err("%s: wakeup request ack invalid in state %d\n",
1798 __func__, smux.power_state);
1799 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001800 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001801}
1802
1803/**
1804 * RX State machine - IDLE state processing.
1805 *
1806 * @data New RX data to process
1807 * @len Length of the data
1808 * @used Return value of length processed
1809 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001810 */
1811static void smux_rx_handle_idle(const unsigned char *data,
1812 int len, int *used, int flag)
1813{
1814 int i;
1815
1816 if (flag) {
1817 if (smux_byte_loopback)
1818 smux_receive_byte(SMUX_UT_ECHO_ACK_FAIL,
1819 smux_byte_loopback);
1820 pr_err("%s: TTY error 0x%x - ignoring\n", __func__, flag);
1821 ++*used;
1822 return;
1823 }
1824
1825 for (i = *used; i < len && smux.rx_state == SMUX_RX_IDLE; i++) {
1826 switch (data[i]) {
1827 case SMUX_MAGIC_WORD1:
1828 smux.rx_state = SMUX_RX_MAGIC;
1829 break;
1830 case SMUX_WAKEUP_REQ:
1831 smux_handle_wakeup_req();
1832 break;
1833 case SMUX_WAKEUP_ACK:
1834 smux_handle_wakeup_ack();
1835 break;
1836 default:
1837 /* unexpected character */
1838 if (smux_byte_loopback && data[i] == SMUX_UT_ECHO_REQ)
1839 smux_receive_byte(SMUX_UT_ECHO_ACK_OK,
1840 smux_byte_loopback);
1841 pr_err("%s: parse error 0x%02x - ignoring\n", __func__,
1842 (unsigned)data[i]);
1843 break;
1844 }
1845 }
1846
1847 *used = i;
1848}
1849
1850/**
1851 * RX State machine - Header Magic state processing.
1852 *
1853 * @data New RX data to process
1854 * @len Length of the data
1855 * @used Return value of length processed
1856 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001857 */
1858static void smux_rx_handle_magic(const unsigned char *data,
1859 int len, int *used, int flag)
1860{
1861 int i;
1862
1863 if (flag) {
1864 pr_err("%s: TTY RX error %d\n", __func__, flag);
1865 smux_enter_reset();
1866 smux.rx_state = SMUX_RX_FAILURE;
1867 ++*used;
1868 return;
1869 }
1870
1871 for (i = *used; i < len && smux.rx_state == SMUX_RX_MAGIC; i++) {
1872 /* wait for completion of the magic */
1873 if (data[i] == SMUX_MAGIC_WORD2) {
1874 smux.recv_len = 0;
1875 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD1;
1876 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD2;
1877 smux.rx_state = SMUX_RX_HDR;
1878 } else {
1879 /* unexpected / trash character */
1880 pr_err("%s: rx parse error for char %c; *used=%d, len=%d\n",
1881 __func__, data[i], *used, len);
1882 smux.rx_state = SMUX_RX_IDLE;
1883 }
1884 }
1885
1886 *used = i;
1887}
1888
1889/**
1890 * RX State machine - Packet Header state processing.
1891 *
1892 * @data New RX data to process
1893 * @len Length of the data
1894 * @used Return value of length processed
1895 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001896 */
1897static void smux_rx_handle_hdr(const unsigned char *data,
1898 int len, int *used, int flag)
1899{
1900 int i;
1901 struct smux_hdr_t *hdr;
1902
1903 if (flag) {
1904 pr_err("%s: TTY RX error %d\n", __func__, flag);
1905 smux_enter_reset();
1906 smux.rx_state = SMUX_RX_FAILURE;
1907 ++*used;
1908 return;
1909 }
1910
1911 for (i = *used; i < len && smux.rx_state == SMUX_RX_HDR; i++) {
1912 smux.recv_buf[smux.recv_len++] = data[i];
1913
1914 if (smux.recv_len == sizeof(struct smux_hdr_t)) {
1915 /* complete header received */
1916 hdr = (struct smux_hdr_t *)smux.recv_buf;
1917 smux.pkt_remain = hdr->payload_len + hdr->pad_len;
1918 smux.rx_state = SMUX_RX_PAYLOAD;
1919 }
1920 }
1921 *used = i;
1922}
1923
1924/**
1925 * RX State machine - Packet Payload state processing.
1926 *
1927 * @data New RX data to process
1928 * @len Length of the data
1929 * @used Return value of length processed
1930 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001931 */
1932static void smux_rx_handle_pkt_payload(const unsigned char *data,
1933 int len, int *used, int flag)
1934{
1935 int remaining;
1936
1937 if (flag) {
1938 pr_err("%s: TTY RX error %d\n", __func__, flag);
1939 smux_enter_reset();
1940 smux.rx_state = SMUX_RX_FAILURE;
1941 ++*used;
1942 return;
1943 }
1944
1945 /* copy data into rx buffer */
1946 if (smux.pkt_remain < (len - *used))
1947 remaining = smux.pkt_remain;
1948 else
1949 remaining = len - *used;
1950
1951 memcpy(&smux.recv_buf[smux.recv_len], &data[*used], remaining);
1952 smux.recv_len += remaining;
1953 smux.pkt_remain -= remaining;
1954 *used += remaining;
1955
1956 if (smux.pkt_remain == 0) {
1957 /* complete packet received */
1958 smux_deserialize(smux.recv_buf, smux.recv_len);
1959 smux.rx_state = SMUX_RX_IDLE;
1960 }
1961}
1962
1963/**
1964 * Feed data to the receive state machine.
1965 *
1966 * @data Pointer to data block
1967 * @len Length of data
1968 * @flag TTY_NORMAL (0) for no error, otherwise TTY Error Flag
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001969 */
1970void smux_rx_state_machine(const unsigned char *data,
1971 int len, int flag)
1972{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001973 struct smux_rx_worker_data work;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001974
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001975 work.data = data;
1976 work.len = len;
1977 work.flag = flag;
1978 INIT_WORK_ONSTACK(&work.work, smux_rx_worker);
1979 work.work_complete = COMPLETION_INITIALIZER_ONSTACK(work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001980
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001981 queue_work(smux_rx_wq, &work.work);
1982 wait_for_completion(&work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001983}
1984
1985/**
1986 * Add channel to transmit-ready list and trigger transmit worker.
1987 *
1988 * @ch Channel to add
1989 */
1990static void list_channel(struct smux_lch_t *ch)
1991{
1992 unsigned long flags;
1993
1994 SMUX_DBG("%s: listing channel %d\n",
1995 __func__, ch->lcid);
1996
1997 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
1998 spin_lock(&ch->tx_lock_lhb2);
1999 smux.tx_activity_flag = 1;
2000 if (list_empty(&ch->tx_ready_list))
2001 list_add_tail(&ch->tx_ready_list, &smux.lch_tx_ready_list);
2002 spin_unlock(&ch->tx_lock_lhb2);
2003 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2004
2005 queue_work(smux_tx_wq, &smux_tx_work);
2006}
2007
2008/**
2009 * Transmit packet on correct transport and then perform client
2010 * notification.
2011 *
2012 * @ch Channel to transmit on
2013 * @pkt Packet to transmit
2014 */
2015static void smux_tx_pkt(struct smux_lch_t *ch, struct smux_pkt_t *pkt)
2016{
2017 union notifier_metadata meta_write;
2018 int ret;
2019
2020 if (ch && pkt) {
2021 SMUX_LOG_PKT_TX(pkt);
2022 if (ch->local_mode == SMUX_LCH_MODE_LOCAL_LOOPBACK)
2023 ret = smux_tx_loopback(pkt);
2024 else
2025 ret = smux_tx_tty(pkt);
2026
2027 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2028 /* notify write-done */
2029 meta_write.write.pkt_priv = pkt->priv;
2030 meta_write.write.buffer = pkt->payload;
2031 meta_write.write.len = pkt->hdr.payload_len;
2032 if (ret >= 0) {
2033 SMUX_DBG("%s: PKT write done", __func__);
2034 schedule_notify(ch->lcid, SMUX_WRITE_DONE,
2035 &meta_write);
2036 } else {
2037 pr_err("%s: failed to write pkt %d\n",
2038 __func__, ret);
2039 schedule_notify(ch->lcid, SMUX_WRITE_FAIL,
2040 &meta_write);
2041 }
2042 }
2043 }
2044}
2045
2046/**
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002047 * Flush pending TTY TX data.
2048 */
2049static void smux_flush_tty(void)
2050{
2051 if (!smux.tty) {
2052 pr_err("%s: ldisc not loaded\n", __func__);
2053 return;
2054 }
2055
2056 tty_wait_until_sent(smux.tty,
2057 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
2058
2059 if (tty_chars_in_buffer(smux.tty) > 0)
2060 pr_err("%s: unable to flush UART queue\n", __func__);
2061}
2062
2063/**
Eric Holmberged1f00c2012-06-07 09:45:18 -06002064 * Purge TX queue for logical channel.
2065 *
2066 * @ch Logical channel pointer
2067 *
2068 * Must be called with the following spinlocks locked:
2069 * state_lock_lhb1
2070 * tx_lock_lhb2
2071 */
2072static void smux_purge_ch_tx_queue(struct smux_lch_t *ch)
2073{
2074 struct smux_pkt_t *pkt;
2075 int send_disconnect = 0;
2076
2077 while (!list_empty(&ch->tx_queue)) {
2078 pkt = list_first_entry(&ch->tx_queue, struct smux_pkt_t,
2079 list);
2080 list_del(&pkt->list);
2081
2082 if (pkt->hdr.cmd == SMUX_CMD_OPEN_LCH) {
2083 /* Open was never sent, just force to closed state */
2084 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
2085 send_disconnect = 1;
2086 } else if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2087 /* Notify client of failed write */
2088 union notifier_metadata meta_write;
2089
2090 meta_write.write.pkt_priv = pkt->priv;
2091 meta_write.write.buffer = pkt->payload;
2092 meta_write.write.len = pkt->hdr.payload_len;
2093 schedule_notify(ch->lcid, SMUX_WRITE_FAIL, &meta_write);
2094 }
2095 smux_free_pkt(pkt);
2096 }
2097
2098 if (send_disconnect) {
2099 union notifier_metadata meta_disconnected;
2100
2101 meta_disconnected.disconnected.is_ssr = smux.in_reset;
2102 schedule_notify(ch->lcid, SMUX_DISCONNECTED,
2103 &meta_disconnected);
2104 }
2105}
2106
2107/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002108 * Power-up the UART.
2109 */
2110static void smux_uart_power_on(void)
2111{
2112 struct uart_state *state;
2113
2114 if (!smux.tty || !smux.tty->driver_data) {
2115 pr_err("%s: unable to find UART port for tty %p\n",
2116 __func__, smux.tty);
2117 return;
2118 }
2119 state = smux.tty->driver_data;
2120 msm_hs_request_clock_on(state->uart_port);
2121}
2122
2123/**
2124 * Power down the UART.
2125 */
2126static void smux_uart_power_off(void)
2127{
2128 struct uart_state *state;
2129
2130 if (!smux.tty || !smux.tty->driver_data) {
2131 pr_err("%s: unable to find UART port for tty %p\n",
2132 __func__, smux.tty);
2133 return;
2134 }
2135 state = smux.tty->driver_data;
2136 msm_hs_request_clock_off(state->uart_port);
2137}
2138
2139/**
2140 * TX Wakeup Worker
2141 *
2142 * @work Not used
2143 *
2144 * Do an exponential back-off wakeup sequence with a maximum period
2145 * of approximately 1 second (1 << 20 microseconds).
2146 */
2147static void smux_wakeup_worker(struct work_struct *work)
2148{
2149 unsigned long flags;
2150 unsigned wakeup_delay;
2151 int complete = 0;
2152
Eric Holmberged1f00c2012-06-07 09:45:18 -06002153 while (!smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002154 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2155 if (smux.power_state == SMUX_PWR_ON) {
2156 /* wakeup complete */
2157 complete = 1;
2158 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2159 break;
2160 } else {
2161 /* retry */
2162 wakeup_delay = smux.pwr_wakeup_delay_us;
2163 smux.pwr_wakeup_delay_us <<= 1;
2164 if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
2165 smux.pwr_wakeup_delay_us =
2166 SMUX_WAKEUP_DELAY_MAX;
2167 }
2168 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2169 SMUX_DBG("%s: triggering wakeup\n", __func__);
2170 smux_send_byte(SMUX_WAKEUP_REQ);
2171
2172 if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
2173 SMUX_DBG("%s: sleeping for %u us\n", __func__,
2174 wakeup_delay);
2175 usleep_range(wakeup_delay, 2*wakeup_delay);
2176 } else {
2177 /* schedule delayed work */
2178 SMUX_DBG("%s: scheduling delayed wakeup in %u ms\n",
2179 __func__, wakeup_delay / 1000);
2180 queue_delayed_work(smux_tx_wq,
2181 &smux_wakeup_delayed_work,
2182 msecs_to_jiffies(wakeup_delay / 1000));
2183 break;
2184 }
2185 }
2186
2187 if (complete) {
2188 SMUX_DBG("%s: wakeup complete\n", __func__);
2189 /*
2190 * Cancel any pending retry. This avoids a race condition with
2191 * a new power-up request because:
2192 * 1) this worker doesn't modify the state
2193 * 2) this worker is processed on the same single-threaded
2194 * workqueue as new TX wakeup requests
2195 */
2196 cancel_delayed_work(&smux_wakeup_delayed_work);
2197 }
2198}
2199
2200
2201/**
2202 * Inactivity timeout worker. Periodically scheduled when link is active.
2203 * When it detects inactivity, it will power-down the UART link.
2204 *
2205 * @work Work structure (not used)
2206 */
2207static void smux_inactivity_worker(struct work_struct *work)
2208{
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002209 struct smux_pkt_t *pkt;
2210 unsigned long flags;
2211
2212 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2213 spin_lock(&smux.tx_lock_lha2);
2214
2215 if (!smux.tx_activity_flag && !smux.rx_activity_flag) {
2216 /* no activity */
2217 if (smux.powerdown_enabled) {
2218 if (smux.power_state == SMUX_PWR_ON) {
2219 /* start power-down sequence */
2220 pkt = smux_alloc_pkt();
2221 if (pkt) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06002222 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002223 smux.power_state,
2224 SMUX_PWR_TURNING_OFF);
2225 smux.power_state = SMUX_PWR_TURNING_OFF;
2226
2227 /* send power-down request */
2228 pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
2229 pkt->hdr.flags = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002230 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
2231 list_add_tail(&pkt->list,
2232 &smux.power_queue);
2233 queue_work(smux_tx_wq, &smux_tx_work);
2234 } else {
2235 pr_err("%s: packet alloc failed\n",
2236 __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002237 }
2238 }
2239 } else {
2240 SMUX_DBG("%s: link inactive, but powerdown disabled\n",
2241 __func__);
2242 }
2243 }
2244 smux.tx_activity_flag = 0;
2245 smux.rx_activity_flag = 0;
2246
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002247 if (smux.power_state == SMUX_PWR_OFF_FLUSH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002248 /* ready to power-down the UART */
Eric Holmbergff0b0112012-06-08 15:06:57 -06002249 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002250 smux.power_state, SMUX_PWR_OFF);
Eric Holmbergff0b0112012-06-08 15:06:57 -06002251 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002252
2253 /* if data is pending, schedule a new wakeup */
2254 if (!list_empty(&smux.lch_tx_ready_list) ||
2255 !list_empty(&smux.power_queue))
2256 queue_work(smux_tx_wq, &smux_tx_work);
2257
2258 spin_unlock(&smux.tx_lock_lha2);
2259 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2260
2261 /* flush UART output queue and power down */
2262 smux_flush_tty();
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002263 smux_uart_power_off();
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002264 } else {
2265 spin_unlock(&smux.tx_lock_lha2);
2266 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002267 }
2268
2269 /* reschedule inactivity worker */
2270 if (smux.power_state != SMUX_PWR_OFF)
2271 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
2272 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
2273}
2274
2275/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002276 * Remove RX retry packet from channel and free it.
2277 *
2278 * Must be called with state_lock_lhb1 locked.
2279 *
2280 * @ch Channel for retry packet
2281 * @retry Retry packet to remove
2282 */
2283void smux_remove_rx_retry(struct smux_lch_t *ch,
2284 struct smux_rx_pkt_retry *retry)
2285{
2286 list_del(&retry->rx_retry_list);
2287 --ch->rx_retry_queue_cnt;
2288 smux_free_pkt(retry->pkt);
2289 kfree(retry);
2290}
2291
2292/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002293 * RX worker handles all receive operations.
2294 *
2295 * @work Work structure contained in TBD structure
2296 */
2297static void smux_rx_worker(struct work_struct *work)
2298{
2299 unsigned long flags;
2300 int used;
2301 int initial_rx_state;
2302 struct smux_rx_worker_data *w;
2303 const unsigned char *data;
2304 int len;
2305 int flag;
2306
2307 w = container_of(work, struct smux_rx_worker_data, work);
2308 data = w->data;
2309 len = w->len;
2310 flag = w->flag;
2311
2312 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2313 smux.rx_activity_flag = 1;
2314 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2315
2316 SMUX_DBG("%s: %p, len=%d, flag=%d\n", __func__, data, len, flag);
2317 used = 0;
2318 do {
2319 SMUX_DBG("%s: state %d; %d of %d\n",
2320 __func__, smux.rx_state, used, len);
2321 initial_rx_state = smux.rx_state;
2322
2323 switch (smux.rx_state) {
2324 case SMUX_RX_IDLE:
2325 smux_rx_handle_idle(data, len, &used, flag);
2326 break;
2327 case SMUX_RX_MAGIC:
2328 smux_rx_handle_magic(data, len, &used, flag);
2329 break;
2330 case SMUX_RX_HDR:
2331 smux_rx_handle_hdr(data, len, &used, flag);
2332 break;
2333 case SMUX_RX_PAYLOAD:
2334 smux_rx_handle_pkt_payload(data, len, &used, flag);
2335 break;
2336 default:
2337 SMUX_DBG("%s: invalid state %d\n",
2338 __func__, smux.rx_state);
2339 smux.rx_state = SMUX_RX_IDLE;
2340 break;
2341 }
2342 } while (used < len || smux.rx_state != initial_rx_state);
2343
2344 complete(&w->work_complete);
2345}
2346
2347/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002348 * RX Retry worker handles retrying get_rx_buffer calls that previously failed
2349 * because the client was not ready (-EAGAIN).
2350 *
2351 * @work Work structure contained in smux_lch_t structure
2352 */
2353static void smux_rx_retry_worker(struct work_struct *work)
2354{
2355 struct smux_lch_t *ch;
2356 struct smux_rx_pkt_retry *retry;
2357 union notifier_metadata metadata;
2358 int tmp;
2359 unsigned long flags;
2360
2361 ch = container_of(work, struct smux_lch_t, rx_retry_work.work);
2362
2363 /* get next retry packet */
2364 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2365 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
2366 /* port has been closed - remove all retries */
2367 while (!list_empty(&ch->rx_retry_queue)) {
2368 retry = list_first_entry(&ch->rx_retry_queue,
2369 struct smux_rx_pkt_retry,
2370 rx_retry_list);
2371 smux_remove_rx_retry(ch, retry);
2372 }
2373 }
2374
2375 if (list_empty(&ch->rx_retry_queue)) {
2376 SMUX_DBG("%s: retry list empty for channel %d\n",
2377 __func__, ch->lcid);
2378 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2379 return;
2380 }
2381 retry = list_first_entry(&ch->rx_retry_queue,
2382 struct smux_rx_pkt_retry,
2383 rx_retry_list);
2384 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2385
2386 SMUX_DBG("%s: retrying rx pkt %p\n", __func__, retry);
2387 metadata.read.pkt_priv = 0;
2388 metadata.read.buffer = 0;
2389 tmp = ch->get_rx_buffer(ch->priv,
2390 (void **)&metadata.read.pkt_priv,
2391 (void **)&metadata.read.buffer,
2392 retry->pkt->hdr.payload_len);
2393 if (tmp == 0 && metadata.read.buffer) {
2394 /* have valid RX buffer */
2395 memcpy(metadata.read.buffer, retry->pkt->payload,
2396 retry->pkt->hdr.payload_len);
2397 metadata.read.len = retry->pkt->hdr.payload_len;
2398
2399 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2400 smux_remove_rx_retry(ch, retry);
2401 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2402
2403 schedule_notify(ch->lcid, SMUX_READ_DONE, &metadata);
2404 } else if (tmp == -EAGAIN ||
2405 (tmp == 0 && !metadata.read.buffer)) {
2406 /* retry again */
2407 retry->timeout_in_ms <<= 1;
2408 if (retry->timeout_in_ms > SMUX_RX_RETRY_MAX_MS) {
2409 /* timed out */
2410 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2411 smux_remove_rx_retry(ch, retry);
2412 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
2413 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2414 }
2415 } else {
2416 /* client error - drop packet */
2417 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2418 smux_remove_rx_retry(ch, retry);
2419 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2420
2421 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
2422 }
2423
2424 /* schedule next retry */
2425 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2426 if (!list_empty(&ch->rx_retry_queue)) {
2427 retry = list_first_entry(&ch->rx_retry_queue,
2428 struct smux_rx_pkt_retry,
2429 rx_retry_list);
2430 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
2431 msecs_to_jiffies(retry->timeout_in_ms));
2432 }
2433 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2434}
2435
2436/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002437 * Transmit worker handles serializing and transmitting packets onto the
2438 * underlying transport.
2439 *
2440 * @work Work structure (not used)
2441 */
2442static void smux_tx_worker(struct work_struct *work)
2443{
2444 struct smux_pkt_t *pkt;
2445 struct smux_lch_t *ch;
2446 unsigned low_wm_notif;
2447 unsigned lcid;
2448 unsigned long flags;
2449
2450
2451 /*
2452 * Transmit packets in round-robin fashion based upon ready
2453 * channels.
2454 *
2455 * To eliminate the need to hold a lock for the entire
2456 * iteration through the channel ready list, the head of the
2457 * ready-channel list is always the next channel to be
2458 * processed. To send a packet, the first valid packet in
2459 * the head channel is removed and the head channel is then
2460 * rescheduled at the end of the queue by removing it and
2461 * inserting after the tail. The locks can then be released
2462 * while the packet is processed.
2463 */
Eric Holmberged1f00c2012-06-07 09:45:18 -06002464 while (!smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002465 pkt = NULL;
2466 low_wm_notif = 0;
2467
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002468 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002469
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002470 /* handle wakeup if needed */
2471 if (smux.power_state == SMUX_PWR_OFF) {
2472 if (!list_empty(&smux.lch_tx_ready_list) ||
2473 !list_empty(&smux.power_queue)) {
2474 /* data to transmit, do wakeup */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002475 smux.pwr_wakeup_delay_us = 1;
Eric Holmbergff0b0112012-06-08 15:06:57 -06002476 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002477 smux.power_state,
2478 SMUX_PWR_TURNING_ON);
2479 smux.power_state = SMUX_PWR_TURNING_ON;
2480 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2481 flags);
2482 smux_uart_power_on();
2483 queue_work(smux_tx_wq, &smux_wakeup_work);
2484 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002485 /* no activity -- stay asleep */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002486 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2487 flags);
2488 }
2489 break;
2490 }
2491
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002492 /* process any pending power packets */
2493 if (!list_empty(&smux.power_queue)) {
2494 pkt = list_first_entry(&smux.power_queue,
2495 struct smux_pkt_t, list);
2496 list_del(&pkt->list);
2497 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2498
2499 /* send the packet */
2500 SMUX_LOG_PKT_TX(pkt);
2501 if (!smux_byte_loopback) {
2502 smux_tx_tty(pkt);
2503 smux_flush_tty();
2504 } else {
2505 smux_tx_loopback(pkt);
2506 }
2507
2508 /* Adjust power state if this is a flush command */
2509 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2510 if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH &&
2511 pkt->hdr.cmd == SMUX_CMD_PWR_CTL &&
2512 (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK)) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06002513 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002514 smux.power_state,
2515 SMUX_PWR_OFF_FLUSH);
2516 smux.power_state = SMUX_PWR_OFF_FLUSH;
2517 queue_work(smux_tx_wq, &smux_inactivity_work);
2518 }
2519 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2520
2521 smux_free_pkt(pkt);
2522 continue;
2523 }
2524
2525 /* get the next ready channel */
2526 if (list_empty(&smux.lch_tx_ready_list)) {
2527 /* no ready channels */
2528 SMUX_DBG("%s: no more ready channels, exiting\n",
2529 __func__);
2530 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2531 break;
2532 }
2533 smux.tx_activity_flag = 1;
2534
2535 if (smux.power_state != SMUX_PWR_ON) {
2536 /* channel not ready to transmit */
2537 SMUX_DBG("%s: can not tx with power state %d\n",
2538 __func__,
2539 smux.power_state);
2540 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2541 break;
2542 }
2543
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002544 /* get the next packet to send and rotate channel list */
2545 ch = list_first_entry(&smux.lch_tx_ready_list,
2546 struct smux_lch_t,
2547 tx_ready_list);
2548
2549 spin_lock(&ch->state_lock_lhb1);
2550 spin_lock(&ch->tx_lock_lhb2);
2551 if (!list_empty(&ch->tx_queue)) {
2552 /*
2553 * If remote TX flow control is enabled or
2554 * the channel is not fully opened, then only
2555 * send command packets.
2556 */
2557 if (ch->tx_flow_control || !IS_FULLY_OPENED(ch)) {
2558 struct smux_pkt_t *curr;
2559 list_for_each_entry(curr, &ch->tx_queue, list) {
2560 if (curr->hdr.cmd != SMUX_CMD_DATA) {
2561 pkt = curr;
2562 break;
2563 }
2564 }
2565 } else {
2566 /* get next cmd/data packet to send */
2567 pkt = list_first_entry(&ch->tx_queue,
2568 struct smux_pkt_t, list);
2569 }
2570 }
2571
2572 if (pkt) {
2573 list_del(&pkt->list);
2574
2575 /* update packet stats */
2576 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2577 --ch->tx_pending_data_cnt;
2578 if (ch->notify_lwm &&
2579 ch->tx_pending_data_cnt
2580 <= SMUX_WM_LOW) {
2581 ch->notify_lwm = 0;
2582 low_wm_notif = 1;
2583 }
2584 }
2585
2586 /* advance to the next ready channel */
2587 list_rotate_left(&smux.lch_tx_ready_list);
2588 } else {
2589 /* no data in channel to send, remove from ready list */
2590 list_del(&ch->tx_ready_list);
2591 INIT_LIST_HEAD(&ch->tx_ready_list);
2592 }
2593 lcid = ch->lcid;
2594 spin_unlock(&ch->tx_lock_lhb2);
2595 spin_unlock(&ch->state_lock_lhb1);
2596 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2597
2598 if (low_wm_notif)
2599 schedule_notify(lcid, SMUX_LOW_WM_HIT, NULL);
2600
2601 /* send the packet */
2602 smux_tx_pkt(ch, pkt);
2603 smux_free_pkt(pkt);
2604 }
2605}
2606
2607
2608/**********************************************************************/
2609/* Kernel API */
2610/**********************************************************************/
2611
2612/**
2613 * Set or clear channel option using the SMUX_CH_OPTION_* channel
2614 * flags.
2615 *
2616 * @lcid Logical channel ID
2617 * @set Options to set
2618 * @clear Options to clear
2619 *
2620 * @returns 0 for success, < 0 for failure
2621 */
2622int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear)
2623{
2624 unsigned long flags;
2625 struct smux_lch_t *ch;
2626 int tx_ready = 0;
2627 int ret = 0;
2628
2629 if (smux_assert_lch_id(lcid))
2630 return -ENXIO;
2631
2632 ch = &smux_lch[lcid];
2633 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2634
2635 /* Local loopback mode */
2636 if (set & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2637 ch->local_mode = SMUX_LCH_MODE_LOCAL_LOOPBACK;
2638
2639 if (clear & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2640 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2641
2642 /* Remote loopback mode */
2643 if (set & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2644 ch->local_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
2645
2646 if (clear & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2647 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2648
2649 /* Flow control */
2650 if (set & SMUX_CH_OPTION_REMOTE_TX_STOP) {
2651 ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
2652 ret = smux_send_status_cmd(ch);
2653 tx_ready = 1;
2654 }
2655
2656 if (clear & SMUX_CH_OPTION_REMOTE_TX_STOP) {
2657 ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
2658 ret = smux_send_status_cmd(ch);
2659 tx_ready = 1;
2660 }
2661
2662 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2663
2664 if (tx_ready)
2665 list_channel(ch);
2666
2667 return ret;
2668}
2669
2670/**
2671 * Starts the opening sequence for a logical channel.
2672 *
2673 * @lcid Logical channel ID
2674 * @priv Free for client usage
2675 * @notify Event notification function
2676 * @get_rx_buffer Function used to provide a receive buffer to SMUX
2677 *
2678 * @returns 0 for success, <0 otherwise
2679 *
2680 * A channel must be fully closed (either not previously opened or
2681 * msm_smux_close() has been called and the SMUX_DISCONNECTED has been
2682 * received.
2683 *
2684 * One the remote side is opened, the client will receive a SMUX_CONNECTED
2685 * event.
2686 */
2687int msm_smux_open(uint8_t lcid, void *priv,
2688 void (*notify)(void *priv, int event_type, const void *metadata),
2689 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
2690 int size))
2691{
2692 int ret;
2693 struct smux_lch_t *ch;
2694 struct smux_pkt_t *pkt;
2695 int tx_ready = 0;
2696 unsigned long flags;
2697
2698 if (smux_assert_lch_id(lcid))
2699 return -ENXIO;
2700
2701 ch = &smux_lch[lcid];
2702 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2703
2704 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
2705 ret = -EAGAIN;
2706 goto out;
2707 }
2708
2709 if (ch->local_state != SMUX_LCH_LOCAL_CLOSED) {
2710 pr_err("%s: open lcid %d local state %x invalid\n",
2711 __func__, lcid, ch->local_state);
2712 ret = -EINVAL;
2713 goto out;
2714 }
2715
2716 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
2717 ch->local_state,
2718 SMUX_LCH_LOCAL_OPENING);
2719
2720 ch->local_state = SMUX_LCH_LOCAL_OPENING;
2721
2722 ch->priv = priv;
2723 ch->notify = notify;
2724 ch->get_rx_buffer = get_rx_buffer;
2725 ret = 0;
2726
2727 /* Send Open Command */
2728 pkt = smux_alloc_pkt();
2729 if (!pkt) {
2730 ret = -ENOMEM;
2731 goto out;
2732 }
2733 pkt->hdr.magic = SMUX_MAGIC;
2734 pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
2735 pkt->hdr.flags = SMUX_CMD_OPEN_POWER_COLLAPSE;
2736 if (ch->local_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK)
2737 pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
2738 pkt->hdr.lcid = lcid;
2739 pkt->hdr.payload_len = 0;
2740 pkt->hdr.pad_len = 0;
2741 smux_tx_queue(pkt, ch, 0);
2742 tx_ready = 1;
2743
2744out:
2745 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2746 if (tx_ready)
2747 list_channel(ch);
2748 return ret;
2749}
2750
2751/**
2752 * Starts the closing sequence for a logical channel.
2753 *
2754 * @lcid Logical channel ID
2755 *
2756 * @returns 0 for success, <0 otherwise
2757 *
2758 * Once the close event has been acknowledge by the remote side, the client
2759 * will receive a SMUX_DISCONNECTED notification.
2760 */
2761int msm_smux_close(uint8_t lcid)
2762{
2763 int ret = 0;
2764 struct smux_lch_t *ch;
2765 struct smux_pkt_t *pkt;
2766 int tx_ready = 0;
2767 unsigned long flags;
2768
2769 if (smux_assert_lch_id(lcid))
2770 return -ENXIO;
2771
2772 ch = &smux_lch[lcid];
2773 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2774 ch->local_tiocm = 0x0;
2775 ch->remote_tiocm = 0x0;
2776 ch->tx_pending_data_cnt = 0;
2777 ch->notify_lwm = 0;
2778
2779 /* Purge TX queue */
2780 spin_lock(&ch->tx_lock_lhb2);
Eric Holmberged1f00c2012-06-07 09:45:18 -06002781 smux_purge_ch_tx_queue(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002782 spin_unlock(&ch->tx_lock_lhb2);
2783
2784 /* Send Close Command */
2785 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
2786 ch->local_state == SMUX_LCH_LOCAL_OPENING) {
2787 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
2788 ch->local_state,
2789 SMUX_LCH_LOCAL_CLOSING);
2790
2791 ch->local_state = SMUX_LCH_LOCAL_CLOSING;
2792 pkt = smux_alloc_pkt();
2793 if (pkt) {
2794 pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
2795 pkt->hdr.flags = 0;
2796 pkt->hdr.lcid = lcid;
2797 pkt->hdr.payload_len = 0;
2798 pkt->hdr.pad_len = 0;
2799 smux_tx_queue(pkt, ch, 0);
2800 tx_ready = 1;
2801 } else {
2802 pr_err("%s: pkt allocation failed\n", __func__);
2803 ret = -ENOMEM;
2804 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06002805
2806 /* Purge RX retry queue */
2807 if (ch->rx_retry_queue_cnt)
2808 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002809 }
2810 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2811
2812 if (tx_ready)
2813 list_channel(ch);
2814
2815 return ret;
2816}
2817
2818/**
2819 * Write data to a logical channel.
2820 *
2821 * @lcid Logical channel ID
2822 * @pkt_priv Client data that will be returned with the SMUX_WRITE_DONE or
2823 * SMUX_WRITE_FAIL notification.
2824 * @data Data to write
2825 * @len Length of @data
2826 *
2827 * @returns 0 for success, <0 otherwise
2828 *
2829 * Data may be written immediately after msm_smux_open() is called,
2830 * but the data will wait in the transmit queue until the channel has
2831 * been fully opened.
2832 *
2833 * Once the data has been written, the client will receive either a completion
2834 * (SMUX_WRITE_DONE) or a failure notice (SMUX_WRITE_FAIL).
2835 */
2836int msm_smux_write(uint8_t lcid, void *pkt_priv, const void *data, int len)
2837{
2838 struct smux_lch_t *ch;
2839 struct smux_pkt_t *pkt;
2840 int tx_ready = 0;
2841 unsigned long flags;
2842 int ret;
2843
2844 if (smux_assert_lch_id(lcid))
2845 return -ENXIO;
2846
2847 ch = &smux_lch[lcid];
2848 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2849
2850 if (ch->local_state != SMUX_LCH_LOCAL_OPENED &&
2851 ch->local_state != SMUX_LCH_LOCAL_OPENING) {
2852 pr_err("%s: hdr.invalid local state %d channel %d\n",
2853 __func__, ch->local_state, lcid);
2854 ret = -EINVAL;
2855 goto out;
2856 }
2857
2858 if (len > SMUX_MAX_PKT_SIZE - sizeof(struct smux_hdr_t)) {
2859 pr_err("%s: payload %d too large\n",
2860 __func__, len);
2861 ret = -E2BIG;
2862 goto out;
2863 }
2864
2865 pkt = smux_alloc_pkt();
2866 if (!pkt) {
2867 ret = -ENOMEM;
2868 goto out;
2869 }
2870
2871 pkt->hdr.cmd = SMUX_CMD_DATA;
2872 pkt->hdr.lcid = lcid;
2873 pkt->hdr.flags = 0;
2874 pkt->hdr.payload_len = len;
2875 pkt->payload = (void *)data;
2876 pkt->priv = pkt_priv;
2877 pkt->hdr.pad_len = 0;
2878
2879 spin_lock(&ch->tx_lock_lhb2);
2880 /* verify high watermark */
2881 SMUX_DBG("%s: pending %d", __func__, ch->tx_pending_data_cnt);
2882
2883 if (ch->tx_pending_data_cnt >= SMUX_WM_HIGH) {
2884 pr_err("%s: ch %d high watermark %d exceeded %d\n",
2885 __func__, lcid, SMUX_WM_HIGH,
2886 ch->tx_pending_data_cnt);
2887 ret = -EAGAIN;
2888 goto out_inner;
2889 }
2890
2891 /* queue packet for transmit */
2892 if (++ch->tx_pending_data_cnt == SMUX_WM_HIGH) {
2893 ch->notify_lwm = 1;
2894 pr_err("%s: high watermark hit\n", __func__);
2895 schedule_notify(lcid, SMUX_HIGH_WM_HIT, NULL);
2896 }
2897 list_add_tail(&pkt->list, &ch->tx_queue);
2898
2899 /* add to ready list */
2900 if (IS_FULLY_OPENED(ch))
2901 tx_ready = 1;
2902
2903 ret = 0;
2904
2905out_inner:
2906 spin_unlock(&ch->tx_lock_lhb2);
2907
2908out:
2909 if (ret)
2910 smux_free_pkt(pkt);
2911 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2912
2913 if (tx_ready)
2914 list_channel(ch);
2915
2916 return ret;
2917}
2918
2919/**
2920 * Returns true if the TX queue is currently full (high water mark).
2921 *
2922 * @lcid Logical channel ID
2923 * @returns 0 if channel is not full
2924 * 1 if it is full
2925 * < 0 for error
2926 */
2927int msm_smux_is_ch_full(uint8_t lcid)
2928{
2929 struct smux_lch_t *ch;
2930 unsigned long flags;
2931 int is_full = 0;
2932
2933 if (smux_assert_lch_id(lcid))
2934 return -ENXIO;
2935
2936 ch = &smux_lch[lcid];
2937
2938 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
2939 if (ch->tx_pending_data_cnt >= SMUX_WM_HIGH)
2940 is_full = 1;
2941 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
2942
2943 return is_full;
2944}
2945
2946/**
2947 * Returns true if the TX queue has space for more packets it is at or
2948 * below the low water mark).
2949 *
2950 * @lcid Logical channel ID
2951 * @returns 0 if channel is above low watermark
2952 * 1 if it's at or below the low watermark
2953 * < 0 for error
2954 */
2955int msm_smux_is_ch_low(uint8_t lcid)
2956{
2957 struct smux_lch_t *ch;
2958 unsigned long flags;
2959 int is_low = 0;
2960
2961 if (smux_assert_lch_id(lcid))
2962 return -ENXIO;
2963
2964 ch = &smux_lch[lcid];
2965
2966 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
2967 if (ch->tx_pending_data_cnt <= SMUX_WM_LOW)
2968 is_low = 1;
2969 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
2970
2971 return is_low;
2972}
2973
2974/**
2975 * Send TIOCM status update.
2976 *
2977 * @ch Channel for update
2978 *
2979 * @returns 0 for success, <0 for failure
2980 *
2981 * Channel lock must be held before calling.
2982 */
2983static int smux_send_status_cmd(struct smux_lch_t *ch)
2984{
2985 struct smux_pkt_t *pkt;
2986
2987 if (!ch)
2988 return -EINVAL;
2989
2990 pkt = smux_alloc_pkt();
2991 if (!pkt)
2992 return -ENOMEM;
2993
2994 pkt->hdr.lcid = ch->lcid;
2995 pkt->hdr.cmd = SMUX_CMD_STATUS;
2996 pkt->hdr.flags = ch->local_tiocm;
2997 pkt->hdr.payload_len = 0;
2998 pkt->hdr.pad_len = 0;
2999 smux_tx_queue(pkt, ch, 0);
3000
3001 return 0;
3002}
3003
3004/**
3005 * Internal helper function for getting the TIOCM status with
3006 * state_lock_lhb1 already locked.
3007 *
3008 * @ch Channel pointer
3009 *
3010 * @returns TIOCM status
3011 */
3012static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch)
3013{
3014 long status = 0x0;
3015
3016 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DSR : 0;
3017 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_CTS : 0;
3018 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RI) ? TIOCM_RI : 0;
3019 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_DCD) ? TIOCM_CD : 0;
3020
3021 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DTR : 0;
3022 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_RTS : 0;
3023
3024 return status;
3025}
3026
3027/**
3028 * Get the TIOCM status bits.
3029 *
3030 * @lcid Logical channel ID
3031 *
3032 * @returns >= 0 TIOCM status bits
3033 * < 0 Error condition
3034 */
3035long msm_smux_tiocm_get(uint8_t lcid)
3036{
3037 struct smux_lch_t *ch;
3038 unsigned long flags;
3039 long status = 0x0;
3040
3041 if (smux_assert_lch_id(lcid))
3042 return -ENXIO;
3043
3044 ch = &smux_lch[lcid];
3045 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3046 status = msm_smux_tiocm_get_atomic(ch);
3047 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3048
3049 return status;
3050}
3051
3052/**
3053 * Set/clear the TIOCM status bits.
3054 *
3055 * @lcid Logical channel ID
3056 * @set Bits to set
3057 * @clear Bits to clear
3058 *
3059 * @returns 0 for success; < 0 for failure
3060 *
3061 * If a bit is specified in both the @set and @clear masks, then the clear bit
3062 * definition will dominate and the bit will be cleared.
3063 */
3064int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear)
3065{
3066 struct smux_lch_t *ch;
3067 unsigned long flags;
3068 uint8_t old_status;
3069 uint8_t status_set = 0x0;
3070 uint8_t status_clear = 0x0;
3071 int tx_ready = 0;
3072 int ret = 0;
3073
3074 if (smux_assert_lch_id(lcid))
3075 return -ENXIO;
3076
3077 ch = &smux_lch[lcid];
3078 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3079
3080 status_set |= (set & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3081 status_set |= (set & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3082 status_set |= (set & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3083 status_set |= (set & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3084
3085 status_clear |= (clear & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3086 status_clear |= (clear & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3087 status_clear |= (clear & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3088 status_clear |= (clear & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3089
3090 old_status = ch->local_tiocm;
3091 ch->local_tiocm |= status_set;
3092 ch->local_tiocm &= ~status_clear;
3093
3094 if (ch->local_tiocm != old_status) {
3095 ret = smux_send_status_cmd(ch);
3096 tx_ready = 1;
3097 }
3098 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3099
3100 if (tx_ready)
3101 list_channel(ch);
3102
3103 return ret;
3104}
3105
3106/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003107/* Subsystem Restart */
3108/**********************************************************************/
3109static struct notifier_block ssr_notifier = {
3110 .notifier_call = ssr_notifier_cb,
3111};
3112
3113/**
3114 * Handle Subsystem Restart (SSR) notifications.
3115 *
3116 * @this Pointer to ssr_notifier
3117 * @code SSR Code
3118 * @data Data pointer (not used)
3119 */
3120static int ssr_notifier_cb(struct notifier_block *this,
3121 unsigned long code,
3122 void *data)
3123{
3124 unsigned long flags;
3125 int power_off_uart = 0;
3126
Eric Holmbergd2697902012-06-15 09:58:46 -06003127 if (code == SUBSYS_BEFORE_SHUTDOWN) {
3128 SMUX_DBG("%s: ssr - before shutdown\n", __func__);
3129 mutex_lock(&smux.mutex_lha0);
3130 smux.in_reset = 1;
3131 mutex_unlock(&smux.mutex_lha0);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003132 return NOTIFY_DONE;
Eric Holmbergd2697902012-06-15 09:58:46 -06003133 } else if (code != SUBSYS_AFTER_SHUTDOWN) {
3134 return NOTIFY_DONE;
3135 }
3136 SMUX_DBG("%s: ssr - after shutdown\n", __func__);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003137
3138 /* Cleanup channels */
Eric Holmbergd2697902012-06-15 09:58:46 -06003139 mutex_lock(&smux.mutex_lha0);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003140 smux_lch_purge();
Eric Holmbergd2697902012-06-15 09:58:46 -06003141 if (smux.tty)
3142 tty_driver_flush_buffer(smux.tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003143
3144 /* Power-down UART */
3145 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
3146 if (smux.power_state != SMUX_PWR_OFF) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06003147 SMUX_PWR("%s: SSR - turning off UART\n", __func__);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003148 smux.power_state = SMUX_PWR_OFF;
3149 power_off_uart = 1;
3150 }
Eric Holmbergd2697902012-06-15 09:58:46 -06003151 smux.powerdown_enabled = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003152 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3153
3154 if (power_off_uart)
3155 smux_uart_power_off();
3156
Eric Holmbergd2697902012-06-15 09:58:46 -06003157 smux.in_reset = 0;
3158 mutex_unlock(&smux.mutex_lha0);
3159
Eric Holmberged1f00c2012-06-07 09:45:18 -06003160 return NOTIFY_DONE;
3161}
3162
3163/**********************************************************************/
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003164/* Line Discipline Interface */
3165/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003166static void smux_pdev_release(struct device *dev)
3167{
3168 struct platform_device *pdev;
3169
3170 pdev = container_of(dev, struct platform_device, dev);
3171 SMUX_DBG("%s: releasing pdev %p '%s'\n", __func__, pdev, pdev->name);
3172 memset(&pdev->dev, 0x0, sizeof(pdev->dev));
3173}
3174
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003175static int smuxld_open(struct tty_struct *tty)
3176{
3177 int i;
3178 int tmp;
3179 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003180
3181 if (!smux.is_initialized)
3182 return -ENODEV;
3183
Eric Holmberged1f00c2012-06-07 09:45:18 -06003184 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003185 if (smux.ld_open_count) {
3186 pr_err("%s: %p multiple instances not supported\n",
3187 __func__, tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003188 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003189 return -EEXIST;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003190 }
3191
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003192 if (tty->ops->write == NULL) {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003193 pr_err("%s: tty->ops->write already NULL\n", __func__);
3194 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003195 return -EINVAL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003196 }
3197
3198 /* connect to TTY */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003199 ++smux.ld_open_count;
3200 smux.in_reset = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003201 smux.tty = tty;
3202 tty->disc_data = &smux;
3203 tty->receive_room = TTY_RECEIVE_ROOM;
3204 tty_driver_flush_buffer(tty);
3205
3206 /* power-down the UART if we are idle */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003207 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003208 if (smux.power_state == SMUX_PWR_OFF) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06003209 SMUX_PWR("%s: powering off uart\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003210 smux.power_state = SMUX_PWR_OFF_FLUSH;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003211 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003212 queue_work(smux_tx_wq, &smux_inactivity_work);
3213 } else {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003214 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003215 }
3216
3217 /* register platform devices */
3218 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003219 SMUX_DBG("%s: register pdev '%s'\n",
3220 __func__, smux_devs[i].name);
3221 smux_devs[i].dev.release = smux_pdev_release;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003222 tmp = platform_device_register(&smux_devs[i]);
3223 if (tmp)
3224 pr_err("%s: error %d registering device %s\n",
3225 __func__, tmp, smux_devs[i].name);
3226 }
Eric Holmberged1f00c2012-06-07 09:45:18 -06003227 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003228 return 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003229}
3230
3231static void smuxld_close(struct tty_struct *tty)
3232{
3233 unsigned long flags;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003234 int power_up_uart = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003235 int i;
3236
Eric Holmberged1f00c2012-06-07 09:45:18 -06003237 SMUX_DBG("%s: ldisc unload\n", __func__);
3238 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003239 if (smux.ld_open_count <= 0) {
3240 pr_err("%s: invalid ld count %d\n", __func__,
3241 smux.ld_open_count);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003242 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003243 return;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003244 }
Eric Holmberged1f00c2012-06-07 09:45:18 -06003245 smux.in_reset = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003246 --smux.ld_open_count;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003247
3248 /* Cleanup channels */
3249 smux_lch_purge();
3250
3251 /* Unregister platform devices */
3252 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
3253 SMUX_DBG("%s: unregister pdev '%s'\n",
3254 __func__, smux_devs[i].name);
3255 platform_device_unregister(&smux_devs[i]);
3256 }
3257
3258 /* Schedule UART power-up if it's down */
3259 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003260 if (smux.power_state == SMUX_PWR_OFF)
Eric Holmberged1f00c2012-06-07 09:45:18 -06003261 power_up_uart = 1;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003262 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergd2697902012-06-15 09:58:46 -06003263 smux.powerdown_enabled = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003264 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3265
3266 if (power_up_uart)
3267 smux_uart_power_on();
3268
3269 /* Disconnect from TTY */
3270 smux.tty = NULL;
3271 mutex_unlock(&smux.mutex_lha0);
3272 SMUX_DBG("%s: ldisc complete\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003273}
3274
3275/**
3276 * Receive data from TTY Line Discipline.
3277 *
3278 * @tty TTY structure
3279 * @cp Character data
3280 * @fp Flag data
3281 * @count Size of character and flag data
3282 */
3283void smuxld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
3284 char *fp, int count)
3285{
3286 int i;
3287 int last_idx = 0;
3288 const char *tty_name = NULL;
3289 char *f;
3290
3291 if (smux_debug_mask & MSM_SMUX_DEBUG)
3292 print_hex_dump(KERN_INFO, "smux tty rx: ", DUMP_PREFIX_OFFSET,
3293 16, 1, cp, count, true);
3294
3295 /* verify error flags */
3296 for (i = 0, f = fp; i < count; ++i, ++f) {
3297 if (*f != TTY_NORMAL) {
3298 if (tty)
3299 tty_name = tty->name;
3300 pr_err("%s: TTY %s Error %d (%s)\n", __func__,
3301 tty_name, *f, tty_flag_to_str(*f));
3302
3303 /* feed all previous valid data to the parser */
3304 smux_rx_state_machine(cp + last_idx, i - last_idx,
3305 TTY_NORMAL);
3306
3307 /* feed bad data to parser */
3308 smux_rx_state_machine(cp + i, 1, *f);
3309 last_idx = i + 1;
3310 }
3311 }
3312
3313 /* feed data to RX state machine */
3314 smux_rx_state_machine(cp + last_idx, count - last_idx, TTY_NORMAL);
3315}
3316
3317static void smuxld_flush_buffer(struct tty_struct *tty)
3318{
3319 pr_err("%s: not supported\n", __func__);
3320}
3321
3322static ssize_t smuxld_chars_in_buffer(struct tty_struct *tty)
3323{
3324 pr_err("%s: not supported\n", __func__);
3325 return -ENODEV;
3326}
3327
3328static ssize_t smuxld_read(struct tty_struct *tty, struct file *file,
3329 unsigned char __user *buf, size_t nr)
3330{
3331 pr_err("%s: not supported\n", __func__);
3332 return -ENODEV;
3333}
3334
3335static ssize_t smuxld_write(struct tty_struct *tty, struct file *file,
3336 const unsigned char *buf, size_t nr)
3337{
3338 pr_err("%s: not supported\n", __func__);
3339 return -ENODEV;
3340}
3341
3342static int smuxld_ioctl(struct tty_struct *tty, struct file *file,
3343 unsigned int cmd, unsigned long arg)
3344{
3345 pr_err("%s: not supported\n", __func__);
3346 return -ENODEV;
3347}
3348
3349static unsigned int smuxld_poll(struct tty_struct *tty, struct file *file,
3350 struct poll_table_struct *tbl)
3351{
3352 pr_err("%s: not supported\n", __func__);
3353 return -ENODEV;
3354}
3355
3356static void smuxld_write_wakeup(struct tty_struct *tty)
3357{
3358 pr_err("%s: not supported\n", __func__);
3359}
3360
3361static struct tty_ldisc_ops smux_ldisc_ops = {
3362 .owner = THIS_MODULE,
3363 .magic = TTY_LDISC_MAGIC,
3364 .name = "n_smux",
3365 .open = smuxld_open,
3366 .close = smuxld_close,
3367 .flush_buffer = smuxld_flush_buffer,
3368 .chars_in_buffer = smuxld_chars_in_buffer,
3369 .read = smuxld_read,
3370 .write = smuxld_write,
3371 .ioctl = smuxld_ioctl,
3372 .poll = smuxld_poll,
3373 .receive_buf = smuxld_receive_buf,
3374 .write_wakeup = smuxld_write_wakeup
3375};
3376
3377static int __init smux_init(void)
3378{
3379 int ret;
3380
Eric Holmberged1f00c2012-06-07 09:45:18 -06003381 mutex_init(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003382
3383 spin_lock_init(&smux.rx_lock_lha1);
3384 smux.rx_state = SMUX_RX_IDLE;
3385 smux.power_state = SMUX_PWR_OFF;
3386 smux.pwr_wakeup_delay_us = 1;
3387 smux.powerdown_enabled = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003388 INIT_LIST_HEAD(&smux.power_queue);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003389 smux.rx_activity_flag = 0;
3390 smux.tx_activity_flag = 0;
3391 smux.recv_len = 0;
3392 smux.tty = NULL;
3393 smux.ld_open_count = 0;
3394 smux.in_reset = 0;
3395 smux.is_initialized = 1;
3396 smux_byte_loopback = 0;
3397
3398 spin_lock_init(&smux.tx_lock_lha2);
3399 INIT_LIST_HEAD(&smux.lch_tx_ready_list);
3400
3401 ret = tty_register_ldisc(N_SMUX, &smux_ldisc_ops);
3402 if (ret != 0) {
3403 pr_err("%s: error %d registering line discipline\n",
3404 __func__, ret);
3405 return ret;
3406 }
3407
Eric Holmberg6c9f2a52012-06-14 10:49:04 -06003408 subsys_notif_register_notifier("external_modem", &ssr_notifier);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003409
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003410 ret = lch_init();
3411 if (ret != 0) {
3412 pr_err("%s: lch_init failed\n", __func__);
3413 return ret;
3414 }
3415
3416 return 0;
3417}
3418
3419static void __exit smux_exit(void)
3420{
3421 int ret;
3422
3423 ret = tty_unregister_ldisc(N_SMUX);
3424 if (ret != 0) {
3425 pr_err("%s error %d unregistering line discipline\n",
3426 __func__, ret);
3427 return;
3428 }
3429}
3430
3431module_init(smux_init);
3432module_exit(smux_exit);
3433
3434MODULE_DESCRIPTION("Serial Mux TTY Line Discipline");
3435MODULE_LICENSE("GPL v2");
3436MODULE_ALIAS_LDISC(N_SMUX);