tty: n_smux: Add Dedicated Power Control Queue
The current design uses the existing channel queues for sending power
commands along with a command filter in the TX worker that always sends
all command messages for queues as long as the TTY UART link is up.
This can allow open and close events to be sent between a request to
power down the link and before the ACK has been received that actually
powers down the link.
Update the implementation to add a dedicated control channel for
power-control commands and bytes. All power commands and bytes are now
sent by the TX worker that properly prioritizes the power commands over
logical channel commands.
CRs-Fixed: 369044
Change-Id: I05c080885c79c3510d02cc360bdfe879d68a962c
Signed-off-by: Eric Holmberg <eholmber@codeaurora.org>
diff --git a/drivers/tty/n_smux.c b/drivers/tty/n_smux.c
index 0006d23..568f010 100644
--- a/drivers/tty/n_smux.c
+++ b/drivers/tty/n_smux.c
@@ -265,6 +265,7 @@
unsigned pwr_wakeup_delay_us;
unsigned tx_activity_flag;
unsigned powerdown_enabled;
+ struct list_head power_queue;
};
@@ -317,6 +318,7 @@
static void list_channel(struct smux_lch_t *ch);
static int smux_send_status_cmd(struct smux_lch_t *ch);
static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt);
+static void smux_flush_tty(void);
static void smux_purge_ch_tx_queue(struct smux_lch_t *ch);
static int schedule_notify(uint8_t lcid, int event,
const union notifier_metadata *metadata);
@@ -438,6 +440,18 @@
list_del(&ch->tx_ready_list);
INIT_LIST_HEAD(&ch->tx_ready_list);
}
+
+ /* Purge Power Queue */
+ while (!list_empty(&smux.power_queue)) {
+ struct smux_pkt_t *pkt;
+
+ pkt = list_first_entry(&smux.power_queue,
+ struct smux_pkt_t,
+ list);
+ SMUX_DBG("%s: emptying power queue pkt=%p\n",
+ __func__, pkt);
+ smux_free_pkt(pkt);
+ }
spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
/* Close all ports */
@@ -981,19 +995,20 @@
*/
static void smux_send_byte(char ch)
{
- struct smux_pkt_t pkt;
+ struct smux_pkt_t *pkt;
- smux_init_pkt(&pkt);
+ pkt = smux_alloc_pkt();
+ if (!pkt) {
+ pr_err("%s: alloc failure for byte %x\n", __func__, ch);
+ return;
+ }
+ pkt->hdr.cmd = SMUX_CMD_BYTE;
+ pkt->hdr.flags = ch;
+ pkt->hdr.lcid = SMUX_BROADCAST_LCID;
+ pkt->hdr.flags = ch;
- pkt.hdr.cmd = SMUX_CMD_BYTE;
- pkt.hdr.flags = ch;
- pkt.hdr.lcid = 0;
- pkt.hdr.flags = ch;
- SMUX_LOG_PKT_TX(&pkt);
- if (!smux_byte_loopback)
- smux_tx_tty(&pkt);
- else
- smux_tx_loopback(&pkt);
+ list_add_tail(&pkt->list, &smux.power_queue);
+ queue_work(smux_tx_wq, &smux_tx_work);
}
/**
@@ -1481,8 +1496,10 @@
union notifier_metadata metadata;
unsigned long flags;
- if (!pkt || smux_assert_lch_id(pkt->hdr.lcid))
+ if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
+ pr_err("%s: invalid packet or channel id\n", __func__);
return -ENXIO;
+ }
lcid = pkt->hdr.lcid;
ch = &smux_lch[lcid];
@@ -1576,7 +1593,6 @@
*/
static int smux_handle_rx_power_cmd(struct smux_pkt_t *pkt)
{
- int tx_ready = 0;
struct smux_pkt_t *ack_pkt;
unsigned long flags;
@@ -1594,7 +1610,17 @@
__func__, smux.power_state);
}
} else {
- /* remote sleep request */
+ /*
+ * Remote sleep request
+ *
+ * Even if we have data pending, we need to transition to the
+ * POWER_OFF state and then perform a wakeup since the remote
+ * side has requested a power-down.
+ *
+ * The state here is set to SMUX_PWR_TURNING_OFF_FLUSH and
+ * the TX thread will set the state to SMUX_PWR_TURNING_OFF
+ * when it sends the packet.
+ */
if (smux.power_state == SMUX_PWR_ON
|| smux.power_state == SMUX_PWR_TURNING_OFF) {
ack_pkt = smux_alloc_pkt();
@@ -1603,18 +1629,15 @@
smux.power_state,
SMUX_PWR_TURNING_OFF_FLUSH);
- /* send power-down request */
+ smux.power_state = SMUX_PWR_TURNING_OFF_FLUSH;
+
+ /* send power-down ack */
ack_pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
ack_pkt->hdr.flags = SMUX_CMD_PWR_CTL_ACK;
- ack_pkt->hdr.lcid = pkt->hdr.lcid;
- smux_tx_queue(ack_pkt,
- &smux_lch[ack_pkt->hdr.lcid], 0);
- tx_ready = 1;
- smux.power_state = SMUX_PWR_TURNING_OFF_FLUSH;
- queue_delayed_work(smux_tx_wq,
- &smux_delayed_inactivity_work,
- msecs_to_jiffies(
- SMUX_INACTIVITY_TIMEOUT_MS));
+ ack_pkt->hdr.lcid = SMUX_BROADCAST_LCID;
+ list_add_tail(&ack_pkt->list,
+ &smux.power_queue);
+ queue_work(smux_tx_wq, &smux_tx_work);
}
} else {
pr_err("%s: sleep request invalid in state %d\n",
@@ -1623,9 +1646,6 @@
}
spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
- if (tx_ready)
- list_channel(&smux_lch[ack_pkt->hdr.lcid]);
-
return 0;
}
@@ -2006,6 +2026,23 @@
}
/**
+ * Flush pending TTY TX data.
+ */
+static void smux_flush_tty(void)
+{
+ if (!smux.tty) {
+ pr_err("%s: ldisc not loaded\n", __func__);
+ return;
+ }
+
+ tty_wait_until_sent(smux.tty,
+ msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
+
+ if (tty_chars_in_buffer(smux.tty) > 0)
+ pr_err("%s: unable to flush UART queue\n", __func__);
+}
+
+/**
* Purge TX queue for logical channel.
*
* @ch Logical channel pointer
@@ -2151,7 +2188,6 @@
*/
static void smux_inactivity_worker(struct work_struct *work)
{
- int tx_ready = 0;
struct smux_pkt_t *pkt;
unsigned long flags;
@@ -2173,11 +2209,13 @@
/* send power-down request */
pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
pkt->hdr.flags = 0;
- pkt->hdr.lcid = 0;
- smux_tx_queue(pkt,
- &smux_lch[SMUX_TEST_LCID],
- 0);
- tx_ready = 1;
+ pkt->hdr.lcid = SMUX_BROADCAST_LCID;
+ list_add_tail(&pkt->list,
+ &smux.power_queue);
+ queue_work(smux_tx_wq, &smux_tx_work);
+ } else {
+ pr_err("%s: packet alloc failed\n",
+ __func__);
}
}
} else {
@@ -2188,21 +2226,26 @@
smux.tx_activity_flag = 0;
smux.rx_activity_flag = 0;
- spin_unlock(&smux.tx_lock_lha2);
- spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
-
- if (tx_ready)
- list_channel(&smux_lch[SMUX_TEST_LCID]);
-
- if ((smux.power_state == SMUX_PWR_OFF_FLUSH) ||
- (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH)) {
+ if (smux.power_state == SMUX_PWR_OFF_FLUSH) {
/* ready to power-down the UART */
+ smux.power_state = SMUX_PWR_OFF;
SMUX_DBG("%s: Power %d->%d\n", __func__,
smux.power_state, SMUX_PWR_OFF);
+
+ /* if data is pending, schedule a new wakeup */
+ if (!list_empty(&smux.lch_tx_ready_list) ||
+ !list_empty(&smux.power_queue))
+ queue_work(smux_tx_wq, &smux_tx_work);
+
+ spin_unlock(&smux.tx_lock_lha2);
+ spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
+
+ /* flush UART output queue and power down */
+ smux_flush_tty();
smux_uart_power_off();
- spin_lock_irqsave(&smux.tx_lock_lha2, flags);
- smux.power_state = SMUX_PWR_OFF;
- spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
+ } else {
+ spin_unlock(&smux.tx_lock_lha2);
+ spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
}
/* reschedule inactivity worker */
@@ -2404,23 +2447,13 @@
pkt = NULL;
low_wm_notif = 0;
- /* get the next ready channel */
spin_lock_irqsave(&smux.tx_lock_lha2, flags);
- if (list_empty(&smux.lch_tx_ready_list)) {
- /* no ready channels */
- SMUX_DBG("%s: no more ready channels, exiting\n",
- __func__);
- spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
- break;
- }
- smux.tx_activity_flag = 1;
- if (smux.power_state != SMUX_PWR_ON
- && smux.power_state != SMUX_PWR_TURNING_OFF
- && smux.power_state != SMUX_PWR_TURNING_OFF_FLUSH) {
- /* Link isn't ready to transmit */
- if (smux.power_state == SMUX_PWR_OFF) {
- /* link is off, trigger wakeup */
+ /* handle wakeup if needed */
+ if (smux.power_state == SMUX_PWR_OFF) {
+ if (!list_empty(&smux.lch_tx_ready_list) ||
+ !list_empty(&smux.power_queue)) {
+ /* data to transmit, do wakeup */
smux.pwr_wakeup_delay_us = 1;
SMUX_DBG("%s: Power %d->%d\n", __func__,
smux.power_state,
@@ -2431,15 +2464,65 @@
smux_uart_power_on();
queue_work(smux_tx_wq, &smux_wakeup_work);
} else {
- SMUX_DBG("%s: can not tx with power state %d\n",
- __func__,
- smux.power_state);
+ /* no activity -- stay asleep */
spin_unlock_irqrestore(&smux.tx_lock_lha2,
flags);
}
break;
}
+ /* process any pending power packets */
+ if (!list_empty(&smux.power_queue)) {
+ pkt = list_first_entry(&smux.power_queue,
+ struct smux_pkt_t, list);
+ list_del(&pkt->list);
+ spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
+
+ /* send the packet */
+ SMUX_LOG_PKT_TX(pkt);
+ if (!smux_byte_loopback) {
+ smux_tx_tty(pkt);
+ smux_flush_tty();
+ } else {
+ smux_tx_loopback(pkt);
+ }
+
+ /* Adjust power state if this is a flush command */
+ spin_lock_irqsave(&smux.tx_lock_lha2, flags);
+ if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH &&
+ pkt->hdr.cmd == SMUX_CMD_PWR_CTL &&
+ (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK)) {
+ SMUX_DBG("%s: Power %d->%d\n", __func__,
+ smux.power_state,
+ SMUX_PWR_OFF_FLUSH);
+ smux.power_state = SMUX_PWR_OFF_FLUSH;
+ queue_work(smux_tx_wq, &smux_inactivity_work);
+ }
+ spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
+
+ smux_free_pkt(pkt);
+ continue;
+ }
+
+ /* get the next ready channel */
+ if (list_empty(&smux.lch_tx_ready_list)) {
+ /* no ready channels */
+ SMUX_DBG("%s: no more ready channels, exiting\n",
+ __func__);
+ spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
+ break;
+ }
+ smux.tx_activity_flag = 1;
+
+ if (smux.power_state != SMUX_PWR_ON) {
+ /* channel not ready to transmit */
+ SMUX_DBG("%s: can not tx with power state %d\n",
+ __func__,
+ smux.power_state);
+ spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
+ break;
+ }
+
/* get the next packet to send and rotate channel list */
ch = list_first_entry(&smux.lch_tx_ready_list,
struct smux_lch_t,
@@ -3141,13 +3224,9 @@
/* Schedule UART power-up if it's down */
spin_lock_irqsave(&smux.tx_lock_lha2, flags);
- if (smux.power_state == SMUX_PWR_OFF ||
- smux.power_state == SMUX_PWR_OFF_FLUSH) {
- smux.power_state = SMUX_PWR_OFF;
+ if (smux.power_state == SMUX_PWR_OFF)
power_up_uart = 1;
- } else {
- smux.power_state = SMUX_PWR_OFF;
- }
+ smux.power_state = SMUX_PWR_OFF;
spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
if (power_up_uart)
@@ -3272,6 +3351,7 @@
smux.power_state = SMUX_PWR_OFF;
smux.pwr_wakeup_delay_us = 1;
smux.powerdown_enabled = 0;
+ INIT_LIST_HEAD(&smux.power_queue);
smux.rx_activity_flag = 0;
smux.tx_activity_flag = 0;
smux.recv_len = 0;
diff --git a/drivers/tty/smux_private.h b/drivers/tty/smux_private.h
index 2c8819c..f644ff0 100644
--- a/drivers/tty/smux_private.h
+++ b/drivers/tty/smux_private.h
@@ -16,6 +16,7 @@
#define SMUX_PRIVATE_H
#define SMUX_MAX_PKT_SIZE 8192
+#define SMUX_BROADCAST_LCID 0xFF
/* SMUX Protocol Characters */
#define SMUX_MAGIC 0x33FC