Merge changes I05c08088,I0fea4c5f into msm-3.0

* changes:
  tty: n_smux: Add Dedicated Power Control Queue
  tty: n_smux: Add support for SMUX subsystem restart
diff --git a/drivers/tty/n_smux.c b/drivers/tty/n_smux.c
index 5b5de03..568f010 100644
--- a/drivers/tty/n_smux.c
+++ b/drivers/tty/n_smux.c
@@ -25,6 +25,8 @@
 #include <linux/types.h>
 #include <linux/platform_device.h>
 #include <linux/delay.h>
+#include <mach/subsystem_notif.h>
+#include <mach/subsystem_restart.h>
 #include <mach/msm_serial_hs.h>
 #include "smux_private.h"
 #include "smux_loopback.h"
@@ -239,7 +241,7 @@
  * allowed.
  */
 struct smux_ldisc_t {
-	spinlock_t lock_lha0;
+	struct mutex mutex_lha0;
 
 	int is_initialized;
 	int in_reset;
@@ -263,6 +265,7 @@
 	unsigned pwr_wakeup_delay_us;
 	unsigned tx_activity_flag;
 	unsigned powerdown_enabled;
+	struct list_head power_queue;
 };
 
 
@@ -315,6 +318,13 @@
 static void list_channel(struct smux_lch_t *ch);
 static int smux_send_status_cmd(struct smux_lch_t *ch);
 static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt);
+static void smux_flush_tty(void);
+static void smux_purge_ch_tx_queue(struct smux_lch_t *ch);
+static int schedule_notify(uint8_t lcid, int event,
+			const union notifier_metadata *metadata);
+static int ssr_notifier_cb(struct notifier_block *this,
+				unsigned long code,
+				void *data);
 
 /**
  * Convert TTY Error Flags to string for logging purposes.
@@ -409,6 +419,82 @@
 	return 0;
 }
 
+/**
+ * Empty and cleanup all SMUX logical channels for subsystem restart or line
+ * discipline disconnect.
+ */
+static void smux_lch_purge(void)
+{
+	struct smux_lch_t *ch;
+	unsigned long flags;
+	int i;
+
+	/* Empty TX ready list */
+	spin_lock_irqsave(&smux.tx_lock_lha2, flags);
+	while (!list_empty(&smux.lch_tx_ready_list)) {
+		SMUX_DBG("%s: emptying ready list %p\n",
+				__func__, smux.lch_tx_ready_list.next);
+		ch = list_first_entry(&smux.lch_tx_ready_list,
+						struct smux_lch_t,
+						tx_ready_list);
+		list_del(&ch->tx_ready_list);
+		INIT_LIST_HEAD(&ch->tx_ready_list);
+	}
+
+	/* Purge Power Queue */
+	while (!list_empty(&smux.power_queue)) {
+		struct smux_pkt_t *pkt;
+
+		pkt =  list_first_entry(&smux.power_queue,
+						struct smux_pkt_t,
+						list);
+		SMUX_DBG("%s: emptying power queue pkt=%p\n",
+				__func__, pkt);
+		smux_free_pkt(pkt);
+	}
+	spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
+
+	/* Close all ports */
+	for (i = 0 ; i < SMUX_NUM_LOGICAL_CHANNELS; i++) {
+		ch = &smux_lch[i];
+		SMUX_DBG("%s: cleaning up lcid %d\n", __func__, i);
+
+		spin_lock_irqsave(&ch->state_lock_lhb1, flags);
+
+		/* Purge TX queue */
+		spin_lock(&ch->tx_lock_lhb2);
+		smux_purge_ch_tx_queue(ch);
+		spin_unlock(&ch->tx_lock_lhb2);
+
+		/* Notify user of disconnect and reset channel state */
+		if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
+			ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
+			union notifier_metadata meta;
+
+			meta.disconnected.is_ssr = smux.in_reset;
+			schedule_notify(ch->lcid, SMUX_DISCONNECTED, &meta);
+		}
+
+		ch->local_state = SMUX_LCH_LOCAL_CLOSED;
+		ch->local_mode = SMUX_LCH_MODE_NORMAL;
+		ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
+		ch->remote_mode = SMUX_LCH_MODE_NORMAL;
+		ch->tx_flow_control = 0;
+
+		/* Purge RX retry queue */
+		if (ch->rx_retry_queue_cnt)
+			queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
+
+		spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+	}
+
+	/* Flush TX/RX workqueues */
+	SMUX_DBG("%s: flushing tx wq\n", __func__);
+	flush_workqueue(smux_tx_wq);
+	SMUX_DBG("%s: flushing rx wq\n", __func__);
+	flush_workqueue(smux_rx_wq);
+}
+
 int smux_assert_lch_id(uint32_t lcid)
 {
 	if (lcid >= SMUX_NUM_LOGICAL_CHANNELS)
@@ -830,7 +916,7 @@
 	if (!data)
 		return 0;
 
-	while (len > 0) {
+	while (len > 0 && !smux.in_reset) {
 		data_written = smux.tty->ops->write(smux.tty, data, len);
 		if (data_written >= 0) {
 			len -= data_written;
@@ -844,8 +930,6 @@
 		if (len)
 			tty_wait_until_sent(smux.tty,
 				msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
-
-		/* FUTURE - add SSR logic */
 	}
 	return 0;
 }
@@ -911,19 +995,20 @@
  */
 static void smux_send_byte(char ch)
 {
-	struct smux_pkt_t pkt;
+	struct smux_pkt_t *pkt;
 
-	smux_init_pkt(&pkt);
+	pkt = smux_alloc_pkt();
+	if (!pkt) {
+		pr_err("%s: alloc failure for byte %x\n", __func__, ch);
+		return;
+	}
+	pkt->hdr.cmd = SMUX_CMD_BYTE;
+	pkt->hdr.flags = ch;
+	pkt->hdr.lcid = SMUX_BROADCAST_LCID;
+	pkt->hdr.flags = ch;
 
-	pkt.hdr.cmd = SMUX_CMD_BYTE;
-	pkt.hdr.flags = ch;
-	pkt.hdr.lcid = 0;
-	pkt.hdr.flags = ch;
-	SMUX_LOG_PKT_TX(&pkt);
-	if (!smux_byte_loopback)
-		smux_tx_tty(&pkt);
-	else
-		smux_tx_loopback(&pkt);
+	list_add_tail(&pkt->list, &smux.power_queue);
+	queue_work(smux_tx_wq, &smux_tx_work);
 }
 
 /**
@@ -1411,8 +1496,10 @@
 	union notifier_metadata metadata;
 	unsigned long flags;
 
-	if (!pkt || smux_assert_lch_id(pkt->hdr.lcid))
+	if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
+		pr_err("%s: invalid packet or channel id\n", __func__);
 		return -ENXIO;
+	}
 
 	lcid = pkt->hdr.lcid;
 	ch = &smux_lch[lcid];
@@ -1506,7 +1593,6 @@
  */
 static int smux_handle_rx_power_cmd(struct smux_pkt_t *pkt)
 {
-	int tx_ready = 0;
 	struct smux_pkt_t *ack_pkt;
 	unsigned long flags;
 
@@ -1524,7 +1610,17 @@
 					__func__, smux.power_state);
 		}
 	} else {
-		/* remote sleep request */
+		/*
+		 * Remote sleep request
+		 *
+		 * Even if we have data pending, we need to transition to the
+		 * POWER_OFF state and then perform a wakeup since the remote
+		 * side has requested a power-down.
+		 *
+		 * The state here is set to SMUX_PWR_TURNING_OFF_FLUSH and
+		 * the TX thread will set the state to SMUX_PWR_TURNING_OFF
+		 * when it sends the packet.
+		 */
 		if (smux.power_state == SMUX_PWR_ON
 			|| smux.power_state == SMUX_PWR_TURNING_OFF) {
 			ack_pkt = smux_alloc_pkt();
@@ -1533,18 +1629,15 @@
 						smux.power_state,
 						SMUX_PWR_TURNING_OFF_FLUSH);
 
-				/* send power-down request */
+				smux.power_state = SMUX_PWR_TURNING_OFF_FLUSH;
+
+				/* send power-down ack */
 				ack_pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
 				ack_pkt->hdr.flags = SMUX_CMD_PWR_CTL_ACK;
-				ack_pkt->hdr.lcid = pkt->hdr.lcid;
-				smux_tx_queue(ack_pkt,
-					      &smux_lch[ack_pkt->hdr.lcid], 0);
-				tx_ready = 1;
-				smux.power_state = SMUX_PWR_TURNING_OFF_FLUSH;
-				queue_delayed_work(smux_tx_wq,
-					&smux_delayed_inactivity_work,
-					msecs_to_jiffies(
-						SMUX_INACTIVITY_TIMEOUT_MS));
+				ack_pkt->hdr.lcid = SMUX_BROADCAST_LCID;
+				list_add_tail(&ack_pkt->list,
+						&smux.power_queue);
+				queue_work(smux_tx_wq, &smux_tx_work);
 			}
 		} else {
 			pr_err("%s: sleep request invalid in state %d\n",
@@ -1553,9 +1646,6 @@
 	}
 	spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
 
-	if (tx_ready)
-		list_channel(&smux_lch[ack_pkt->hdr.lcid]);
-
 	return 0;
 }
 
@@ -1936,6 +2026,67 @@
 }
 
 /**
+ * Flush pending TTY TX data.
+ */
+static void smux_flush_tty(void)
+{
+	if (!smux.tty) {
+		pr_err("%s: ldisc not loaded\n", __func__);
+		return;
+	}
+
+	tty_wait_until_sent(smux.tty,
+			msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
+
+	if (tty_chars_in_buffer(smux.tty) > 0)
+		pr_err("%s: unable to flush UART queue\n", __func__);
+}
+
+/**
+ * Purge TX queue for logical channel.
+ *
+ * @ch     Logical channel pointer
+ *
+ * Must be called with the following spinlocks locked:
+ *  state_lock_lhb1
+ *  tx_lock_lhb2
+ */
+static void smux_purge_ch_tx_queue(struct smux_lch_t *ch)
+{
+	struct smux_pkt_t *pkt;
+	int send_disconnect = 0;
+
+	while (!list_empty(&ch->tx_queue)) {
+		pkt = list_first_entry(&ch->tx_queue, struct smux_pkt_t,
+							list);
+		list_del(&pkt->list);
+
+		if (pkt->hdr.cmd == SMUX_CMD_OPEN_LCH) {
+			/* Open was never sent, just force to closed state */
+			ch->local_state = SMUX_LCH_LOCAL_CLOSED;
+			send_disconnect = 1;
+		} else if (pkt->hdr.cmd == SMUX_CMD_DATA) {
+			/* Notify client of failed write */
+			union notifier_metadata meta_write;
+
+			meta_write.write.pkt_priv = pkt->priv;
+			meta_write.write.buffer = pkt->payload;
+			meta_write.write.len = pkt->hdr.payload_len;
+			schedule_notify(ch->lcid, SMUX_WRITE_FAIL, &meta_write);
+		}
+		smux_free_pkt(pkt);
+	}
+
+	if (send_disconnect) {
+		union notifier_metadata meta_disconnected;
+
+		meta_disconnected.disconnected.is_ssr = smux.in_reset;
+		schedule_notify(ch->lcid, SMUX_DISCONNECTED,
+			&meta_disconnected);
+	}
+}
+
+/**
  * Power-up the UART.
  */
 static void smux_uart_power_on(void)
@@ -1981,7 +2132,7 @@
 	unsigned wakeup_delay;
 	int complete = 0;
 
-	for (;;) {
+	while (!smux.in_reset) {
 		spin_lock_irqsave(&smux.tx_lock_lha2, flags);
 		if (smux.power_state == SMUX_PWR_ON) {
 			/* wakeup complete */
@@ -2037,7 +2188,6 @@
  */
 static void smux_inactivity_worker(struct work_struct *work)
 {
-	int tx_ready = 0;
 	struct smux_pkt_t *pkt;
 	unsigned long flags;
 
@@ -2059,11 +2209,13 @@
 					/* send power-down request */
 					pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
 					pkt->hdr.flags = 0;
-					pkt->hdr.lcid = 0;
-					smux_tx_queue(pkt,
-						&smux_lch[SMUX_TEST_LCID],
-						0);
-					tx_ready = 1;
+					pkt->hdr.lcid = SMUX_BROADCAST_LCID;
+					list_add_tail(&pkt->list,
+							&smux.power_queue);
+					queue_work(smux_tx_wq, &smux_tx_work);
+				} else {
+					pr_err("%s: packet alloc failed\n",
+							__func__);
 				}
 			}
 		} else {
@@ -2074,21 +2226,26 @@
 	smux.tx_activity_flag = 0;
 	smux.rx_activity_flag = 0;
 
-	spin_unlock(&smux.tx_lock_lha2);
-	spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
-
-	if (tx_ready)
-		list_channel(&smux_lch[SMUX_TEST_LCID]);
-
-	if ((smux.power_state == SMUX_PWR_OFF_FLUSH) ||
-	    (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH)) {
+	if (smux.power_state == SMUX_PWR_OFF_FLUSH) {
 		/* ready to power-down the UART */
+		smux.power_state = SMUX_PWR_OFF;
 		SMUX_DBG("%s: Power %d->%d\n", __func__,
 				smux.power_state, SMUX_PWR_OFF);
+
+		/* if data is pending, schedule a new wakeup */
+		if (!list_empty(&smux.lch_tx_ready_list) ||
+		   !list_empty(&smux.power_queue))
+			queue_work(smux_tx_wq, &smux_tx_work);
+
+		spin_unlock(&smux.tx_lock_lha2);
+		spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
+
+		/* flush UART output queue and power down */
+		smux_flush_tty();
 		smux_uart_power_off();
-		spin_lock_irqsave(&smux.tx_lock_lha2, flags);
-		smux.power_state = SMUX_PWR_OFF;
-		spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
+	} else {
+		spin_unlock(&smux.tx_lock_lha2);
+		spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
 	}
 
 	/* reschedule inactivity worker */
@@ -2286,27 +2443,17 @@
 	 * inserting after the tail.  The locks can then be released
 	 * while the packet is processed.
 	 */
-	for (;;) {
+	while (!smux.in_reset) {
 		pkt = NULL;
 		low_wm_notif = 0;
 
-		/* get the next ready channel */
 		spin_lock_irqsave(&smux.tx_lock_lha2, flags);
-		if (list_empty(&smux.lch_tx_ready_list)) {
-			/* no ready channels */
-			SMUX_DBG("%s: no more ready channels, exiting\n",
-					__func__);
-			spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
-			break;
-		}
-		smux.tx_activity_flag = 1;
 
-		if (smux.power_state != SMUX_PWR_ON
-			&& smux.power_state != SMUX_PWR_TURNING_OFF
-			&& smux.power_state != SMUX_PWR_TURNING_OFF_FLUSH) {
-			/* Link isn't ready to transmit */
-			if (smux.power_state == SMUX_PWR_OFF) {
-				/* link is off, trigger wakeup */
+		/* handle wakeup if needed */
+		if (smux.power_state == SMUX_PWR_OFF) {
+			if (!list_empty(&smux.lch_tx_ready_list) ||
+			   !list_empty(&smux.power_queue)) {
+				/* data to transmit, do wakeup */
 				smux.pwr_wakeup_delay_us = 1;
 				SMUX_DBG("%s: Power %d->%d\n", __func__,
 						smux.power_state,
@@ -2317,15 +2464,65 @@
 				smux_uart_power_on();
 				queue_work(smux_tx_wq, &smux_wakeup_work);
 			} else {
-				SMUX_DBG("%s: can not tx with power state %d\n",
-						__func__,
-						smux.power_state);
+				/* no activity -- stay asleep */
 				spin_unlock_irqrestore(&smux.tx_lock_lha2,
 						flags);
 			}
 			break;
 		}
 
+		/* process any pending power packets */
+		if (!list_empty(&smux.power_queue)) {
+			pkt = list_first_entry(&smux.power_queue,
+					struct smux_pkt_t, list);
+			list_del(&pkt->list);
+			spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
+
+			/* send the packet */
+			SMUX_LOG_PKT_TX(pkt);
+			if (!smux_byte_loopback) {
+				smux_tx_tty(pkt);
+				smux_flush_tty();
+			} else {
+				smux_tx_loopback(pkt);
+			}
+
+			/* Adjust power state if this is a flush command */
+			spin_lock_irqsave(&smux.tx_lock_lha2, flags);
+			if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH &&
+				pkt->hdr.cmd == SMUX_CMD_PWR_CTL &&
+				(pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK)) {
+				SMUX_DBG("%s: Power %d->%d\n", __func__,
+						smux.power_state,
+						SMUX_PWR_OFF_FLUSH);
+				smux.power_state = SMUX_PWR_OFF_FLUSH;
+				queue_work(smux_tx_wq, &smux_inactivity_work);
+			}
+			spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
+
+			smux_free_pkt(pkt);
+			continue;
+		}
+
+		/* get the next ready channel */
+		if (list_empty(&smux.lch_tx_ready_list)) {
+			/* no ready channels */
+			SMUX_DBG("%s: no more ready channels, exiting\n",
+					__func__);
+			spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
+			break;
+		}
+		smux.tx_activity_flag = 1;
+
+		if (smux.power_state != SMUX_PWR_ON) {
+			/* channel not ready to transmit */
+			SMUX_DBG("%s: can not tx with power state %d\n",
+					__func__,
+					smux.power_state);
+			spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
+			break;
+		}
+
 		/* get the next packet to send and rotate channel list */
 		ch = list_first_entry(&smux.lch_tx_ready_list,
 						struct smux_lch_t,
@@ -2563,30 +2760,7 @@
 
 	/* Purge TX queue */
 	spin_lock(&ch->tx_lock_lhb2);
-	while (!list_empty(&ch->tx_queue)) {
-		pkt = list_first_entry(&ch->tx_queue, struct smux_pkt_t,
-							list);
-		list_del(&pkt->list);
-
-		if (pkt->hdr.cmd == SMUX_CMD_OPEN_LCH) {
-			/* Open was never sent, just force to closed state */
-			union notifier_metadata meta_disconnected;
-
-			ch->local_state = SMUX_LCH_LOCAL_CLOSED;
-			meta_disconnected.disconnected.is_ssr = 0;
-			schedule_notify(lcid, SMUX_DISCONNECTED,
-				&meta_disconnected);
-		} else if (pkt->hdr.cmd == SMUX_CMD_DATA) {
-			/* Notify client of failed write */
-			union notifier_metadata meta_write;
-
-			meta_write.write.pkt_priv = pkt->priv;
-			meta_write.write.buffer = pkt->payload;
-			meta_write.write.len = pkt->hdr.payload_len;
-			schedule_notify(ch->lcid, SMUX_WRITE_FAIL, &meta_write);
-		}
-		smux_free_pkt(pkt);
-	}
+	smux_purge_ch_tx_queue(ch);
 	spin_unlock(&ch->tx_lock_lhb2);
 
 	/* Send Close Command */
@@ -2912,8 +3086,59 @@
 }
 
 /**********************************************************************/
+/* Subsystem Restart                                                  */
+/**********************************************************************/
+static struct notifier_block ssr_notifier = {
+	.notifier_call = ssr_notifier_cb,
+};
+
+/**
+ * Handle Subsystem Restart (SSR) notifications.
+ *
+ * @this Pointer to ssr_notifier
+ * @code SSR Code
+ * @data Data pointer (not used)
+ */
+static int ssr_notifier_cb(struct notifier_block *this,
+				unsigned long code,
+				void *data)
+{
+	unsigned long flags;
+	int power_off_uart = 0;
+
+	if (code != SUBSYS_AFTER_SHUTDOWN)
+		return NOTIFY_DONE;
+
+	/* Cleanup channels */
+	smux_lch_purge();
+
+	/* Power-down UART */
+	spin_lock_irqsave(&smux.tx_lock_lha2, flags);
+	if (smux.power_state != SMUX_PWR_OFF) {
+		SMUX_DBG("%s: SSR - turning off UART\n", __func__);
+		smux.power_state = SMUX_PWR_OFF;
+		power_off_uart = 1;
+	}
+	spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
+
+	if (power_off_uart)
+		smux_uart_power_off();
+
+	return NOTIFY_DONE;
+}
+
+/**********************************************************************/
 /* Line Discipline Interface                                          */
 /**********************************************************************/
+static void smux_pdev_release(struct device *dev)
+{
+	struct platform_device *pdev;
+
+	pdev = container_of(dev, struct platform_device, dev);
+	SMUX_DBG("%s: releasing pdev %p '%s'\n", __func__, pdev, pdev->name);
+	memset(&pdev->dev, 0x0, sizeof(pdev->dev));
+}
+
 static int smuxld_open(struct tty_struct *tty)
 {
 	int i;
@@ -2923,66 +3148,94 @@
 	if (!smux.is_initialized)
 		return -ENODEV;
 
-	spin_lock_irqsave(&smux.lock_lha0, flags);
+	mutex_lock(&smux.mutex_lha0);
 	if (smux.ld_open_count) {
 		pr_err("%s: %p multiple instances not supported\n",
 			__func__, tty);
-		spin_unlock_irqrestore(&smux.lock_lha0, flags);
+		mutex_unlock(&smux.mutex_lha0);
 		return -EEXIST;
 	}
 
-	++smux.ld_open_count;
 	if (tty->ops->write == NULL) {
-		spin_unlock_irqrestore(&smux.lock_lha0, flags);
+		pr_err("%s: tty->ops->write already NULL\n", __func__);
+		mutex_unlock(&smux.mutex_lha0);
 		return -EINVAL;
 	}
 
 	/* connect to TTY */
+	++smux.ld_open_count;
+	smux.in_reset = 0;
 	smux.tty = tty;
 	tty->disc_data = &smux;
 	tty->receive_room = TTY_RECEIVE_ROOM;
 	tty_driver_flush_buffer(tty);
 
 	/* power-down the UART if we are idle */
-	spin_lock(&smux.tx_lock_lha2);
+	spin_lock_irqsave(&smux.tx_lock_lha2, flags);
 	if (smux.power_state == SMUX_PWR_OFF) {
 		SMUX_DBG("%s: powering off uart\n", __func__);
 		smux.power_state = SMUX_PWR_OFF_FLUSH;
-		spin_unlock(&smux.tx_lock_lha2);
+		spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
 		queue_work(smux_tx_wq, &smux_inactivity_work);
 	} else {
-		spin_unlock(&smux.tx_lock_lha2);
+		spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
 	}
-	spin_unlock_irqrestore(&smux.lock_lha0, flags);
 
 	/* register platform devices */
 	for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
+		SMUX_DBG("%s: register pdev '%s'\n",
+				__func__, smux_devs[i].name);
+		smux_devs[i].dev.release = smux_pdev_release;
 		tmp = platform_device_register(&smux_devs[i]);
 		if (tmp)
 			pr_err("%s: error %d registering device %s\n",
 				   __func__, tmp, smux_devs[i].name);
 	}
+	mutex_unlock(&smux.mutex_lha0);
 	return 0;
 }
 
 static void smuxld_close(struct tty_struct *tty)
 {
 	unsigned long flags;
+	int power_up_uart = 0;
 	int i;
 
-	spin_lock_irqsave(&smux.lock_lha0, flags);
+	SMUX_DBG("%s: ldisc unload\n", __func__);
+	mutex_lock(&smux.mutex_lha0);
 	if (smux.ld_open_count <= 0) {
 		pr_err("%s: invalid ld count %d\n", __func__,
 			smux.ld_open_count);
-		spin_unlock_irqrestore(&smux.lock_lha0, flags);
+		mutex_unlock(&smux.mutex_lha0);
 		return;
 	}
-	spin_unlock_irqrestore(&smux.lock_lha0, flags);
-
-	for (i = 0; i < ARRAY_SIZE(smux_devs); ++i)
-		platform_device_unregister(&smux_devs[i]);
-
+	smux.in_reset = 1;
 	--smux.ld_open_count;
+
+	/* Cleanup channels */
+	smux_lch_purge();
+
+	/* Unregister platform devices */
+	for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
+		SMUX_DBG("%s: unregister pdev '%s'\n",
+				__func__, smux_devs[i].name);
+		platform_device_unregister(&smux_devs[i]);
+	}
+
+	/* Schedule UART power-up if it's down */
+	spin_lock_irqsave(&smux.tx_lock_lha2, flags);
+	if (smux.power_state == SMUX_PWR_OFF)
+		power_up_uart = 1;
+	smux.power_state = SMUX_PWR_OFF;
+	spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
+
+	if (power_up_uart)
+		smux_uart_power_on();
+
+	/* Disconnect from TTY */
+	smux.tty = NULL;
+	mutex_unlock(&smux.mutex_lha0);
+	SMUX_DBG("%s: ldisc complete\n", __func__);
 }
 
 /**
@@ -3091,13 +3344,14 @@
 {
 	int ret;
 
-	spin_lock_init(&smux.lock_lha0);
+	mutex_init(&smux.mutex_lha0);
 
 	spin_lock_init(&smux.rx_lock_lha1);
 	smux.rx_state = SMUX_RX_IDLE;
 	smux.power_state = SMUX_PWR_OFF;
 	smux.pwr_wakeup_delay_us = 1;
 	smux.powerdown_enabled = 0;
+	INIT_LIST_HEAD(&smux.power_queue);
 	smux.rx_activity_flag = 0;
 	smux.tx_activity_flag = 0;
 	smux.recv_len = 0;
@@ -3117,6 +3371,8 @@
 		return ret;
 	}
 
+	subsys_notif_register_notifier("qsc", &ssr_notifier);
+
 	ret = lch_init();
 	if (ret != 0) {
 		pr_err("%s: lch_init failed\n", __func__);
diff --git a/drivers/tty/smux_private.h b/drivers/tty/smux_private.h
index 2c8819c..f644ff0 100644
--- a/drivers/tty/smux_private.h
+++ b/drivers/tty/smux_private.h
@@ -16,6 +16,7 @@
 #define SMUX_PRIVATE_H
 
 #define SMUX_MAX_PKT_SIZE   8192
+#define SMUX_BROADCAST_LCID 0xFF
 
 /* SMUX Protocol Characters */
 #define SMUX_MAGIC          0x33FC