Merge "msm: ipa: stop remote IPA channels if tethering is not enabled"
diff --git a/arch/arm64/configs/vendor/bengal-perf_defconfig b/arch/arm64/configs/vendor/bengal-perf_defconfig
index dea94f3..ea8f1a0 100644
--- a/arch/arm64/configs/vendor/bengal-perf_defconfig
+++ b/arch/arm64/configs/vendor/bengal-perf_defconfig
@@ -406,8 +406,10 @@
 CONFIG_RNDIS_IPA=y
 CONFIG_IPA_UT=y
 CONFIG_QCOM_GENI_SE=y
+CONFIG_QCOM_CLK_SMD_RPM=y
 CONFIG_SM_GPUCC_BENGAL=y
 CONFIG_SM_DISPCC_BENGAL=y
+CONFIG_SM_DEBUGCC_BENGAL=y
 CONFIG_HWSPINLOCK=y
 CONFIG_HWSPINLOCK_QCOM=y
 CONFIG_MAILBOX=y
@@ -424,7 +426,6 @@
 CONFIG_RPMSG_QCOM_GLINK_SMEM=y
 CONFIG_MSM_RPM_SMD=y
 CONFIG_QCOM_COMMAND_DB=y
-CONFIG_QCOM_CPUSS_DUMP=y
 CONFIG_QCOM_RUN_QUEUE_STATS=y
 CONFIG_QCOM_MDT_LOADER=y
 CONFIG_QPNP_PBS=y
diff --git a/arch/arm64/configs/vendor/bengal_defconfig b/arch/arm64/configs/vendor/bengal_defconfig
index 566e223..794437c 100644
--- a/arch/arm64/configs/vendor/bengal_defconfig
+++ b/arch/arm64/configs/vendor/bengal_defconfig
@@ -424,6 +424,7 @@
 CONFIG_RNDIS_IPA=y
 CONFIG_IPA_UT=y
 CONFIG_QCOM_GENI_SE=y
+CONFIG_QCOM_CLK_SMD_RPM=y
 CONFIG_SM_GPUCC_BENGAL=y
 CONFIG_SM_DISPCC_BENGAL=y
 CONFIG_SM_DEBUGCC_BENGAL=y
@@ -443,7 +444,6 @@
 CONFIG_RPMSG_QCOM_GLINK_SMEM=y
 CONFIG_MSM_RPM_SMD=y
 CONFIG_QCOM_COMMAND_DB=y
-CONFIG_QCOM_CPUSS_DUMP=y
 CONFIG_QCOM_RUN_QUEUE_STATS=y
 CONFIG_QCOM_MDT_LOADER=y
 CONFIG_QPNP_PBS=y
diff --git a/drivers/bus/mhi/controllers/mhi_qcom.c b/drivers/bus/mhi/controllers/mhi_qcom.c
index 67e12b5..69dcec7 100644
--- a/drivers/bus/mhi/controllers/mhi_qcom.c
+++ b/drivers/bus/mhi/controllers/mhi_qcom.c
@@ -243,7 +243,7 @@
 	mutex_unlock(&mhi_cntrl->pm_mutex);
 	MHI_LOG("Exited with ret:%d\n", ret);
 
-	return ret;
+	return (ret < 0) ? -EBUSY : 0;
 }
 
 static int mhi_runtime_idle(struct device *dev)
@@ -302,7 +302,7 @@
 	mutex_unlock(&mhi_cntrl->pm_mutex);
 	MHI_LOG("Exited with :%d\n", ret);
 
-	return ret;
+	return (ret < 0) ? -EBUSY : 0;
 }
 
 static int mhi_system_resume(struct device *dev)
diff --git a/drivers/bus/mhi/core/mhi_init.c b/drivers/bus/mhi/core/mhi_init.c
index 35a084b..0f94d78 100644
--- a/drivers/bus/mhi/core/mhi_init.c
+++ b/drivers/bus/mhi/core/mhi_init.c
@@ -62,6 +62,7 @@
 	[MHI_PM_BIT_M3] = "M3",
 	[MHI_PM_BIT_M3_EXIT] = "M3->M0",
 	[MHI_PM_BIT_FW_DL_ERR] = "FW DL Error",
+	[MHI_PM_BIT_DEVICE_ERR_DETECT] = "Device Error Detect",
 	[MHI_PM_BIT_SYS_ERR_DETECT] = "SYS_ERR Detect",
 	[MHI_PM_BIT_SYS_ERR_PROCESS] = "SYS_ERR Process",
 	[MHI_PM_BIT_SHUTDOWN_PROCESS] = "SHUTDOWN Process",
diff --git a/drivers/bus/mhi/core/mhi_internal.h b/drivers/bus/mhi/core/mhi_internal.h
index bb2379e..96c6ff6 100644
--- a/drivers/bus/mhi/core/mhi_internal.h
+++ b/drivers/bus/mhi/core/mhi_internal.h
@@ -455,6 +455,7 @@
 	MHI_PM_BIT_M3,
 	MHI_PM_BIT_M3_EXIT,
 	MHI_PM_BIT_FW_DL_ERR,
+	MHI_PM_BIT_DEVICE_ERR_DETECT,
 	MHI_PM_BIT_SYS_ERR_DETECT,
 	MHI_PM_BIT_SYS_ERR_PROCESS,
 	MHI_PM_BIT_SHUTDOWN_PROCESS,
@@ -474,6 +475,8 @@
 	MHI_PM_M3_EXIT = BIT(MHI_PM_BIT_M3_EXIT),
 	/* firmware download failure state */
 	MHI_PM_FW_DL_ERR = BIT(MHI_PM_BIT_FW_DL_ERR),
+	/* error or shutdown detected or processing state */
+	MHI_PM_DEVICE_ERR_DETECT = BIT(MHI_PM_BIT_DEVICE_ERR_DETECT),
 	MHI_PM_SYS_ERR_DETECT = BIT(MHI_PM_BIT_SYS_ERR_DETECT),
 	MHI_PM_SYS_ERR_PROCESS = BIT(MHI_PM_BIT_SYS_ERR_PROCESS),
 	MHI_PM_SHUTDOWN_PROCESS = BIT(MHI_PM_BIT_SHUTDOWN_PROCESS),
@@ -484,8 +487,9 @@
 
 #define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \
 		MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \
-		MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \
-		MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR)))
+		MHI_PM_DEVICE_ERR_DETECT | MHI_PM_SYS_ERR_DETECT | \
+		MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS | \
+		MHI_PM_FW_DL_ERR)))
 #define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR)
 #define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state >= MHI_PM_LD_ERR_FATAL_DETECT)
 #define MHI_DB_ACCESS_VALID(mhi_cntrl) (mhi_cntrl->pm_state & \
diff --git a/drivers/bus/mhi/core/mhi_main.c b/drivers/bus/mhi/core/mhi_main.c
index caa1d79f..51d80de 100644
--- a/drivers/bus/mhi/core/mhi_main.c
+++ b/drivers/bus/mhi/core/mhi_main.c
@@ -1588,6 +1588,9 @@
 			mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data,
 					     MHI_CB_EE_RDDM);
 			wake_up_all(&mhi_cntrl->state_event);
+
+			/* notify critical clients with early notifications */
+			mhi_control_error(mhi_cntrl);
 		}
 		goto exit_intvec;
 	}
diff --git a/drivers/bus/mhi/core/mhi_pm.c b/drivers/bus/mhi/core/mhi_pm.c
index b296f07..1da081f 100644
--- a/drivers/bus/mhi/core/mhi_pm.c
+++ b/drivers/bus/mhi/core/mhi_pm.c
@@ -33,7 +33,8 @@
  *     M0 <--> M0
  *     M0 -> FW_DL_ERR
  *     M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
- * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
+ * L1: DEVICE_ERR_DETECT -> SYS_ERR_DETECT
+ *     SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
  * L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT
  *     SHUTDOWN_PROCESS -> DISABLE
  * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
@@ -49,45 +50,54 @@
 	{
 		MHI_PM_POR,
 		MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 |
-		MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
-		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR |
-		MHI_PM_SHUTDOWN_NO_ACCESS
+		MHI_PM_DEVICE_ERR_DETECT | MHI_PM_SYS_ERR_DETECT |
+		MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT |
+		MHI_PM_FW_DL_ERR | MHI_PM_SHUTDOWN_NO_ACCESS
 	},
 	{
 		MHI_PM_M0,
 		MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER |
-		MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
-		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR |
-		MHI_PM_SHUTDOWN_NO_ACCESS
+		MHI_PM_DEVICE_ERR_DETECT | MHI_PM_SYS_ERR_DETECT |
+		MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT |
+		MHI_PM_FW_DL_ERR | MHI_PM_SHUTDOWN_NO_ACCESS
 	},
 	{
 		MHI_PM_M2,
-		MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
-		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_NO_ACCESS
-	},
-	{
-		MHI_PM_M3_ENTER,
-		MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
-		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_NO_ACCESS
-	},
-	{
-		MHI_PM_M3,
-		MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT |
-		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_NO_ACCESS
-	},
-	{
-		MHI_PM_M3_EXIT,
-		MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
-		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_NO_ACCESS
-	},
-	{
-		MHI_PM_FW_DL_ERR,
-		MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT |
+		MHI_PM_M0 | MHI_PM_DEVICE_ERR_DETECT | MHI_PM_SYS_ERR_DETECT |
 		MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT |
 		MHI_PM_SHUTDOWN_NO_ACCESS
 	},
+	{
+		MHI_PM_M3_ENTER,
+		MHI_PM_M3 | MHI_PM_DEVICE_ERR_DETECT | MHI_PM_SYS_ERR_DETECT |
+		MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT |
+		MHI_PM_SHUTDOWN_NO_ACCESS
+	},
+	{
+		MHI_PM_M3,
+		MHI_PM_M3_EXIT | MHI_PM_DEVICE_ERR_DETECT |
+		MHI_PM_SYS_ERR_DETECT | MHI_PM_LD_ERR_FATAL_DETECT |
+		MHI_PM_SHUTDOWN_NO_ACCESS
+	},
+	{
+		MHI_PM_M3_EXIT,
+		MHI_PM_M0 | MHI_PM_DEVICE_ERR_DETECT | MHI_PM_SYS_ERR_DETECT |
+		MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT |
+		MHI_PM_SHUTDOWN_NO_ACCESS
+	},
+	{
+		MHI_PM_FW_DL_ERR,
+		MHI_PM_FW_DL_ERR | MHI_PM_DEVICE_ERR_DETECT |
+		MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_NO_ACCESS
+	},
 	/* L1 States */
 	{
+		MHI_PM_DEVICE_ERR_DETECT,
+		MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_NO_ACCESS
+	},
+	{
 		MHI_PM_SYS_ERR_DETECT,
 		MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS |
 		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_NO_ACCESS
@@ -935,19 +945,24 @@
 /* Transition MHI into error state and notify critical clients */
 void mhi_control_error(struct mhi_controller *mhi_cntrl)
 {
-	enum MHI_PM_STATE cur_state;
+	enum MHI_PM_STATE cur_state, transition_state;
 
 	MHI_LOG("Enter with pm_state:%s MHI_STATE:%s\n",
 		to_mhi_pm_state_str(mhi_cntrl->pm_state),
 		TO_MHI_STATE_STR(mhi_cntrl->dev_state));
 
+	/* link is not down if device is in RDDM */
+	transition_state = (mhi_cntrl->ee == MHI_EE_RDDM) ?
+		MHI_PM_DEVICE_ERR_DETECT : MHI_PM_LD_ERR_FATAL_DETECT;
+
 	write_lock_irq(&mhi_cntrl->pm_lock);
-	cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_LD_ERR_FATAL_DETECT);
+	cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state);
 	write_unlock_irq(&mhi_cntrl->pm_lock);
 
-	if (cur_state != MHI_PM_LD_ERR_FATAL_DETECT) {
+	/* proceed if we move to device error or are already in error state */
+	if (!MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
 		MHI_ERR("Failed to transition to state:%s from:%s\n",
-			to_mhi_pm_state_str(MHI_PM_LD_ERR_FATAL_DETECT),
+			to_mhi_pm_state_str(transition_state),
 			to_mhi_pm_state_str(cur_state));
 		goto exit_control_error;
 	}
diff --git a/drivers/bus/mhi/devices/mhi_satellite.c b/drivers/bus/mhi/devices/mhi_satellite.c
index d9de31a..4088b24 100644
--- a/drivers/bus/mhi/devices/mhi_satellite.c
+++ b/drivers/bus/mhi/devices/mhi_satellite.c
@@ -463,10 +463,10 @@
 			code = MHI_EV_CC_SUCCESS;
 
 iommu_map_cmd_completion:
-			MHI_SAT_LOG("IOMMU MAP 0x%llx CMD processing %s\n",
-				   MHI_TRE_GET_PTR(pkt),
-				   (code == MHI_EV_CC_SUCCESS) ? "successful" :
-				   "failed");
+			MHI_SAT_LOG("IOMMU MAP 0x%llx len:%d CMD %s:%llx\n",
+				    MHI_TRE_GET_PTR(pkt), MHI_TRE_GET_SIZE(pkt),
+				    (code == MHI_EV_CC_SUCCESS) ? "successful" :
+				    "failed", iova);
 
 			pkt->ptr = MHI_TRE_EVT_CMD_COMPLETION_PTR(iova);
 			pkt->dword[0] = MHI_TRE_EVT_CMD_COMPLETION_D0(code);
@@ -504,9 +504,9 @@
 			if (!ret)
 				code = MHI_EV_CC_SUCCESS;
 
-			MHI_SAT_LOG("CTXT UPDATE CMD %s:%d processing %s\n",
-				buf.name, id, (code == MHI_EV_CC_SUCCESS) ?
-				"successful" : "failed");
+			MHI_SAT_LOG("CTXT UPDATE CMD %s:%d %s\n", buf.name, id,
+				    (code == MHI_EV_CC_SUCCESS) ? "successful" :
+				    "failed");
 
 			pkt->ptr = MHI_TRE_EVT_CMD_COMPLETION_PTR(0);
 			pkt->dword[0] = MHI_TRE_EVT_CMD_COMPLETION_D0(code);
@@ -533,9 +533,9 @@
 				code = MHI_EV_CC_SUCCESS;
 			}
 
-			MHI_SAT_LOG("START CHANNEL %d CMD processing %s\n",
-				id, (code == MHI_EV_CC_SUCCESS) ? "successful" :
-				"failure");
+			MHI_SAT_LOG("START CHANNEL %d CMD %s\n", id,
+				    (code == MHI_EV_CC_SUCCESS) ? "successful" :
+				    "failure");
 
 			pkt->ptr = MHI_TRE_EVT_CMD_COMPLETION_PTR(0);
 			pkt->dword[0] = MHI_TRE_EVT_CMD_COMPLETION_D0(code);
@@ -550,17 +550,15 @@
 						   SAT_CTXT_TYPE_CHAN);
 
 			MHI_SAT_ASSERT(!sat_dev,
-				"No device with given channel ID\n");
+					"No device with given channel ID\n");
 
 			MHI_SAT_ASSERT(!sat_dev->chan_started,
-				"Resetting unstarted channel!");
+					"Resetting unstarted channel!");
 
 			mhi_unprepare_from_transfer(sat_dev->mhi_dev);
 			sat_dev->chan_started = false;
 
-			MHI_SAT_LOG(
-				"RESET CHANNEL %d CMD processing successful\n",
-				id);
+			MHI_SAT_LOG("RESET CHANNEL %d CMD successful\n", id);
 
 			pkt->ptr = MHI_TRE_EVT_CMD_COMPLETION_PTR(0);
 			pkt->dword[0] = MHI_TRE_EVT_CMD_COMPLETION_D0(
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 8762733..defe8bf 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -280,6 +280,7 @@
 	uint32_t earlyWakeTime;
 	/* work done status flag */
 	bool isWorkDone;
+	bool pm_awake_voted;
 };
 
 struct fastrpc_ctx_lst {
@@ -526,6 +527,9 @@
 static int hlosvm[1] = {VMID_HLOS};
 static int hlosvmperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
 
+static void fastrpc_pm_awake(int fl_wake_enable, bool *pm_awake_voted);
+static void fastrpc_pm_relax(bool *pm_awake_voted);
+
 static inline int64_t getnstimediff(struct timespec *start)
 {
 	int64_t ns;
@@ -1355,6 +1359,7 @@
 	ctx->magic = FASTRPC_CTX_MAGIC;
 	ctx->rspFlags = NORMAL_RESPONSE;
 	ctx->isWorkDone = false;
+	ctx->pm_awake_voted = false;
 
 	spin_lock(&fl->hlock);
 	hlist_add_head(&ctx->hn, &clst->pending);
@@ -1450,6 +1455,7 @@
 		break;
 	}
 	ctx->rspFlags = (enum fastrpc_response_flags)rspFlags;
+	fastrpc_pm_awake(ctx->fl->wake_enable, &ctx->pm_awake_voted);
 	complete(&ctx->work);
 }
 
@@ -2050,7 +2056,7 @@
 	me->channel[CDSP_DOMAIN_ID].secure = NON_SECURE_CHANNEL;
 }
 
-static inline void fastrpc_pm_awake(int fl_wake_enable, int *wake_enable)
+static inline void fastrpc_pm_awake(int fl_wake_enable, bool *pm_awake_voted)
 {
 	struct fastrpc_apps *me = &gfa;
 
@@ -2062,14 +2068,14 @@
 		__pm_stay_awake(me->wake_source);
 	me->wake_count++;
 	spin_unlock(&me->hlock);
-	*wake_enable = 1;
+	*pm_awake_voted = true;
 }
 
-static inline void fastrpc_pm_relax(int *wake_enable)
+static inline void fastrpc_pm_relax(bool *pm_awake_voted)
 {
 	struct fastrpc_apps *me = &gfa;
 
-	if (!(*wake_enable))
+	if (!(*pm_awake_voted))
 		return;
 
 	spin_lock(&me->hlock);
@@ -2078,7 +2084,7 @@
 	if (!me->wake_count)
 		__pm_relax(me->wake_source);
 	spin_unlock(&me->hlock);
-	*wake_enable = 0;
+	*pm_awake_voted = false;
 }
 
 static inline int fastrpc_wait_for_response(struct smq_invoke_ctx *ctx,
@@ -2188,13 +2194,12 @@
 {
 	struct smq_invoke_ctx *ctx = NULL;
 	struct fastrpc_ioctl_invoke *invoke = &inv->inv;
-	int cid = fl->cid;
-	int interrupted = 0;
-	int err = 0, wake_enable = 0;
+	int err = 0, interrupted = 0, cid = fl->cid;
 	struct timespec invoket = {0};
 	int64_t *perf_counter = NULL;
+	bool pm_awake_voted = false;
 
-	fastrpc_pm_awake(fl->wake_enable, &wake_enable);
+	fastrpc_pm_awake(fl->wake_enable, &pm_awake_voted);
 	if (fl->profile) {
 		perf_counter = getperfcounter(fl, PERF_COUNT);
 		getnstimeofday(&invoket);
@@ -2263,10 +2268,9 @@
 	if (err)
 		goto bail;
  wait:
-	fastrpc_pm_relax(&wake_enable);
+	fastrpc_pm_relax(&pm_awake_voted);
 	fastrpc_wait_for_completion(ctx, &interrupted, kernel);
-	if (interrupted != -ERESTARTSYS)
-		fastrpc_pm_awake(fl->wake_enable, &wake_enable);
+	pm_awake_voted = ctx->pm_awake_voted;
 	VERIFY(err, 0 == (err = interrupted));
 	if (err)
 		goto bail;
@@ -2305,7 +2309,7 @@
 	if (fl->profile && !interrupted)
 		fastrpc_update_invoke_count(invoke->handle, perf_counter,
 						&invoket);
-	fastrpc_pm_relax(&wake_enable);
+	fastrpc_pm_relax(&pm_awake_voted);
 	return err;
 }
 
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index 850c02a..d1bdfe3 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2016, Linaro Limited
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2016-2019, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -23,10 +23,15 @@
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/soc/qcom/smd-rpm.h>
+#include <soc/qcom/rpm-smd.h>
+#include <linux/clk.h>
 
 #include <dt-bindings/clock/qcom,rpmcc.h>
 #include <dt-bindings/mfd/qcom-rpm.h>
 
+#include "clk-voter.h"
+#include "clk-debug.h"
+
 #define QCOM_RPM_KEY_SOFTWARE_ENABLE			0x6e657773
 #define QCOM_RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY	0x62636370
 #define QCOM_RPM_SMD_KEY_RATE				0x007a484b
@@ -37,6 +42,8 @@
 #define __DEFINE_CLK_SMD_RPM(_platform, _name, _active, type, r_id, stat_id,  \
 			     key)					      \
 	static struct clk_smd_rpm _platform##_##_active;		      \
+	static unsigned long _name##_##last_active_set_vote;		      \
+	static unsigned long _name##_##last_sleep_set_vote;		      \
 	static struct clk_smd_rpm _platform##_##_name = {		      \
 		.rpm_res_type = (type),					      \
 		.rpm_clk_id = (r_id),					      \
@@ -44,9 +51,12 @@
 		.rpm_key = (key),					      \
 		.peer = &_platform##_##_active,				      \
 		.rate = INT_MAX,					      \
+		.last_active_set_vote = &_name##_##last_active_set_vote,      \
+		.last_sleep_set_vote = &_name##_##last_sleep_set_vote,	      \
 		.hw.init = &(struct clk_init_data){			      \
 			.ops = &clk_smd_rpm_ops,			      \
 			.name = #_name,					      \
+			.flags = CLK_ENABLE_HAND_OFF,			      \
 			.parent_names = (const char *[]){ "xo_board" },       \
 			.num_parents = 1,				      \
 		},							      \
@@ -59,9 +69,12 @@
 		.rpm_key = (key),					      \
 		.peer = &_platform##_##_name,				      \
 		.rate = INT_MAX,					      \
+		.last_active_set_vote = &_name##_##last_active_set_vote,      \
+		.last_sleep_set_vote = &_name##_##last_sleep_set_vote,	      \
 		.hw.init = &(struct clk_init_data){			      \
 			.ops = &clk_smd_rpm_ops,			      \
 			.name = #_active,				      \
+			.flags = CLK_ENABLE_HAND_OFF,			      \
 			.parent_names = (const char *[]){ "xo_board" },	      \
 			.num_parents = 1,				      \
 		},							      \
@@ -70,6 +83,8 @@
 #define __DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, type, r_id,    \
 				    stat_id, r, key)			      \
 	static struct clk_smd_rpm _platform##_##_active;		      \
+	static unsigned long _name##_##last_active_set_vote;		      \
+	static unsigned long _name##_##last_sleep_set_vote;		      \
 	static struct clk_smd_rpm _platform##_##_name = {		      \
 		.rpm_res_type = (type),					      \
 		.rpm_clk_id = (r_id),					      \
@@ -78,9 +93,12 @@
 		.branch = true,						      \
 		.peer = &_platform##_##_active,				      \
 		.rate = (r),						      \
+		.last_active_set_vote = &_name##_##last_active_set_vote,      \
+		.last_sleep_set_vote = &_name##_##last_sleep_set_vote,	      \
 		.hw.init = &(struct clk_init_data){			      \
 			.ops = &clk_smd_rpm_branch_ops,			      \
 			.name = #_name,					      \
+			.flags = CLK_ENABLE_HAND_OFF,			      \
 			.parent_names = (const char *[]){ "xo_board" },	      \
 			.num_parents = 1,				      \
 		},							      \
@@ -94,9 +112,12 @@
 		.branch = true,						      \
 		.peer = &_platform##_##_name,				      \
 		.rate = (r),						      \
+		.last_active_set_vote = &_name##_##last_active_set_vote,      \
+		.last_sleep_set_vote = &_name##_##last_sleep_set_vote,	      \
 		.hw.init = &(struct clk_init_data){			      \
 			.ops = &clk_smd_rpm_branch_ops,			      \
 			.name = #_active,				      \
+			.flags = CLK_ENABLE_HAND_OFF,			      \
 			.parent_names = (const char *[]){ "xo_board" },	      \
 			.num_parents = 1,				      \
 		},							      \
@@ -137,7 +158,8 @@
 	struct clk_smd_rpm *peer;
 	struct clk_hw hw;
 	unsigned long rate;
-	struct qcom_smd_rpm *rpm;
+	unsigned long *last_active_set_vote;
+	unsigned long *last_sleep_set_vote;
 };
 
 struct clk_smd_rpm_req {
@@ -148,72 +170,76 @@
 
 struct rpm_cc {
 	struct qcom_rpm *rpm;
-	struct clk_smd_rpm **clks;
-	size_t num_clks;
+	struct clk_onecell_data data;
+	struct clk *clks[];
 };
 
 struct rpm_smd_clk_desc {
-	struct clk_smd_rpm **clks;
+	struct clk_hw **clks;
+	size_t num_rpm_clks;
 	size_t num_clks;
 };
 
 static DEFINE_MUTEX(rpm_smd_clk_lock);
 
-static int clk_smd_rpm_handoff(struct clk_smd_rpm *r)
+static int clk_smd_rpm_prepare(struct clk_hw *hw);
+
+static int clk_smd_rpm_handoff(struct clk_hw *hw)
 {
-	int ret;
-	struct clk_smd_rpm_req req = {
-		.key = cpu_to_le32(r->rpm_key),
-		.nbytes = cpu_to_le32(sizeof(u32)),
-		.value = cpu_to_le32(r->branch ? 1 : INT_MAX),
-	};
-
-	ret = qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_ACTIVE_STATE,
-				 r->rpm_res_type, r->rpm_clk_id, &req,
-				 sizeof(req));
-	if (ret)
-		return ret;
-	ret = qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_SLEEP_STATE,
-				 r->rpm_res_type, r->rpm_clk_id, &req,
-				 sizeof(req));
-	if (ret)
-		return ret;
-
-	return 0;
+	return clk_smd_rpm_prepare(hw);
 }
 
 static int clk_smd_rpm_set_rate_active(struct clk_smd_rpm *r,
-				       unsigned long rate)
+					uint32_t rate)
 {
-	struct clk_smd_rpm_req req = {
+	int ret = 0;
+	struct msm_rpm_kvp req = {
 		.key = cpu_to_le32(r->rpm_key),
-		.nbytes = cpu_to_le32(sizeof(u32)),
-		.value = cpu_to_le32(DIV_ROUND_UP(rate, 1000)), /* to kHz */
+		.data = (void *)&rate,
+		.length = sizeof(rate),
 	};
 
-	return qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_ACTIVE_STATE,
-				  r->rpm_res_type, r->rpm_clk_id, &req,
-				  sizeof(req));
+	if (*r->last_active_set_vote == rate)
+		return ret;
+
+	ret = msm_rpm_send_message(QCOM_SMD_RPM_ACTIVE_STATE, r->rpm_res_type,
+			r->rpm_clk_id, &req, 1);
+	if (ret)
+		return ret;
+
+	*r->last_active_set_vote = rate;
+
+	return ret;
 }
 
 static int clk_smd_rpm_set_rate_sleep(struct clk_smd_rpm *r,
-				      unsigned long rate)
+					uint32_t rate)
 {
-	struct clk_smd_rpm_req req = {
+	int ret = 0;
+	struct msm_rpm_kvp req = {
 		.key = cpu_to_le32(r->rpm_key),
-		.nbytes = cpu_to_le32(sizeof(u32)),
-		.value = cpu_to_le32(DIV_ROUND_UP(rate, 1000)), /* to kHz */
+		.data = (void *)&rate,
+		.length = sizeof(rate),
 	};
 
-	return qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_SLEEP_STATE,
-				  r->rpm_res_type, r->rpm_clk_id, &req,
-				  sizeof(req));
+	if (*r->last_sleep_set_vote == rate)
+		return ret;
+
+	ret = msm_rpm_send_message(QCOM_SMD_RPM_SLEEP_STATE, r->rpm_res_type,
+			r->rpm_clk_id, &req, 1);
+	if (ret)
+		return ret;
+
+	*r->last_sleep_set_vote = rate;
+
+	return ret;
 }
 
 static void to_active_sleep(struct clk_smd_rpm *r, unsigned long rate,
 			    unsigned long *active, unsigned long *sleep)
 {
-	*active = rate;
+	/* Convert the rate (hz) to khz */
+	*active = DIV_ROUND_UP(rate, 1000);
 
 	/*
 	 * Active-only clocks don't care what the rate is during sleep. So,
@@ -231,17 +257,17 @@
 	struct clk_smd_rpm *peer = r->peer;
 	unsigned long this_rate = 0, this_sleep_rate = 0;
 	unsigned long peer_rate = 0, peer_sleep_rate = 0;
-	unsigned long active_rate, sleep_rate;
+	uint32_t active_rate, sleep_rate;
 	int ret = 0;
 
 	mutex_lock(&rpm_smd_clk_lock);
 
-	/* Don't send requests to the RPM if the rate has not been set. */
-	if (!r->rate)
-		goto out;
-
 	to_active_sleep(r, r->rate, &this_rate, &this_sleep_rate);
 
+	/* Don't send requests to the RPM if the rate has not been set. */
+	if (this_rate == 0)
+		goto out;
+
 	/* Take peer clock's rate into account only if it's enabled. */
 	if (peer->enabled)
 		to_active_sleep(peer, peer->rate,
@@ -279,13 +305,13 @@
 	struct clk_smd_rpm *r = to_clk_smd_rpm(hw);
 	struct clk_smd_rpm *peer = r->peer;
 	unsigned long peer_rate = 0, peer_sleep_rate = 0;
-	unsigned long active_rate, sleep_rate;
+	uint32_t active_rate, sleep_rate;
 	int ret;
 
 	mutex_lock(&rpm_smd_clk_lock);
 
 	if (!r->rate)
-		goto out;
+		goto enable;
 
 	/* Take peer clock's rate into account only if it's enabled. */
 	if (peer->enabled)
@@ -302,6 +328,7 @@
 	if (ret)
 		goto out;
 
+enable:
 	r->enabled = false;
 
 out:
@@ -313,7 +340,7 @@
 {
 	struct clk_smd_rpm *r = to_clk_smd_rpm(hw);
 	struct clk_smd_rpm *peer = r->peer;
-	unsigned long active_rate, sleep_rate;
+	uint32_t active_rate, sleep_rate;
 	unsigned long this_rate = 0, this_sleep_rate = 0;
 	unsigned long peer_rate = 0, peer_sleep_rate = 0;
 	int ret = 0;
@@ -372,33 +399,62 @@
 	return r->rate;
 }
 
-static int clk_smd_rpm_enable_scaling(struct qcom_smd_rpm *rpm)
+static int clk_smd_rpm_enable_scaling(void)
 {
-	int ret;
-	struct clk_smd_rpm_req req = {
+	int ret = 0;
+	uint32_t value = cpu_to_le32(1);
+	struct msm_rpm_kvp req = {
 		.key = cpu_to_le32(QCOM_RPM_SMD_KEY_ENABLE),
-		.nbytes = cpu_to_le32(sizeof(u32)),
-		.value = cpu_to_le32(1),
+		.data = (void *)&value,
+		.length = sizeof(value),
 	};
 
-	ret = qcom_rpm_smd_write(rpm, QCOM_SMD_RPM_SLEEP_STATE,
-				 QCOM_SMD_RPM_MISC_CLK,
-				 QCOM_RPM_SCALING_ENABLE_ID, &req, sizeof(req));
+	ret = msm_rpm_send_message(QCOM_SMD_RPM_SLEEP_STATE,
+			QCOM_SMD_RPM_MISC_CLK,
+			QCOM_RPM_SCALING_ENABLE_ID, &req, 1);
 	if (ret) {
 		pr_err("RPM clock scaling (sleep set) not enabled!\n");
 		return ret;
 	}
 
-	ret = qcom_rpm_smd_write(rpm, QCOM_SMD_RPM_ACTIVE_STATE,
-				 QCOM_SMD_RPM_MISC_CLK,
-				 QCOM_RPM_SCALING_ENABLE_ID, &req, sizeof(req));
+	ret = msm_rpm_send_message(QCOM_SMD_RPM_ACTIVE_STATE,
+			QCOM_SMD_RPM_MISC_CLK,
+			QCOM_RPM_SCALING_ENABLE_ID, &req, 1);
 	if (ret) {
 		pr_err("RPM clock scaling (active set) not enabled!\n");
 		return ret;
 	}
 
 	pr_debug("%s: RPM clock scaling is enabled\n", __func__);
-	return 0;
+	return ret;
+}
+
+static int clk_vote_bimc(struct clk_hw *hw, uint32_t rate)
+{
+	int ret = 0;
+	struct clk_smd_rpm *r = to_clk_smd_rpm(hw);
+	struct msm_rpm_kvp req = {
+		.key = r->rpm_key,
+		.data = (void *)&rate,
+		.length = sizeof(rate),
+	};
+
+	ret = msm_rpm_send_message(QCOM_SMD_RPM_ACTIVE_STATE,
+		r->rpm_res_type, r->rpm_clk_id, &req, 1);
+	if (ret < 0) {
+		if (ret != -EPROBE_DEFER)
+			WARN(1, "BIMC vote not sent!\n");
+		return ret;
+	}
+
+	return ret;
+}
+
+static int clk_smd_rpm_is_enabled(struct clk_hw *hw)
+{
+	struct clk_smd_rpm *r = to_clk_smd_rpm(hw);
+
+	return r->enabled;
 }
 
 static const struct clk_ops clk_smd_rpm_ops = {
@@ -407,11 +463,17 @@
 	.set_rate	= clk_smd_rpm_set_rate,
 	.round_rate	= clk_smd_rpm_round_rate,
 	.recalc_rate	= clk_smd_rpm_recalc_rate,
+	.is_enabled	= clk_smd_rpm_is_enabled,
+	.debug_init	= clk_debug_measure_add,
 };
 
 static const struct clk_ops clk_smd_rpm_branch_ops = {
 	.prepare	= clk_smd_rpm_prepare,
 	.unprepare	= clk_smd_rpm_unprepare,
+	.round_rate	= clk_smd_rpm_round_rate,
+	.recalc_rate	= clk_smd_rpm_recalc_rate,
+	.is_enabled	= clk_smd_rpm_is_enabled,
+	.debug_init	= clk_debug_measure_add,
 };
 
 /* msm8916 */
@@ -428,35 +490,36 @@
 DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, rf_clk1_pin, rf_clk1_a_pin, 4);
 DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, rf_clk2_pin, rf_clk2_a_pin, 5);
 
-static struct clk_smd_rpm *msm8916_clks[] = {
-	[RPM_SMD_PCNOC_CLK]		= &msm8916_pcnoc_clk,
-	[RPM_SMD_PCNOC_A_CLK]		= &msm8916_pcnoc_a_clk,
-	[RPM_SMD_SNOC_CLK]		= &msm8916_snoc_clk,
-	[RPM_SMD_SNOC_A_CLK]		= &msm8916_snoc_a_clk,
-	[RPM_SMD_BIMC_CLK]		= &msm8916_bimc_clk,
-	[RPM_SMD_BIMC_A_CLK]		= &msm8916_bimc_a_clk,
-	[RPM_SMD_QDSS_CLK]		= &msm8916_qdss_clk,
-	[RPM_SMD_QDSS_A_CLK]		= &msm8916_qdss_a_clk,
-	[RPM_SMD_BB_CLK1]		= &msm8916_bb_clk1,
-	[RPM_SMD_BB_CLK1_A]		= &msm8916_bb_clk1_a,
-	[RPM_SMD_BB_CLK2]		= &msm8916_bb_clk2,
-	[RPM_SMD_BB_CLK2_A]		= &msm8916_bb_clk2_a,
-	[RPM_SMD_RF_CLK1]		= &msm8916_rf_clk1,
-	[RPM_SMD_RF_CLK1_A]		= &msm8916_rf_clk1_a,
-	[RPM_SMD_RF_CLK2]		= &msm8916_rf_clk2,
-	[RPM_SMD_RF_CLK2_A]		= &msm8916_rf_clk2_a,
-	[RPM_SMD_BB_CLK1_PIN]		= &msm8916_bb_clk1_pin,
-	[RPM_SMD_BB_CLK1_A_PIN]		= &msm8916_bb_clk1_a_pin,
-	[RPM_SMD_BB_CLK2_PIN]		= &msm8916_bb_clk2_pin,
-	[RPM_SMD_BB_CLK2_A_PIN]		= &msm8916_bb_clk2_a_pin,
-	[RPM_SMD_RF_CLK1_PIN]		= &msm8916_rf_clk1_pin,
-	[RPM_SMD_RF_CLK1_A_PIN]		= &msm8916_rf_clk1_a_pin,
-	[RPM_SMD_RF_CLK2_PIN]		= &msm8916_rf_clk2_pin,
-	[RPM_SMD_RF_CLK2_A_PIN]		= &msm8916_rf_clk2_a_pin,
+static struct clk_hw *msm8916_clks[] = {
+	[RPM_SMD_PCNOC_CLK]		= &msm8916_pcnoc_clk.hw,
+	[RPM_SMD_PCNOC_A_CLK]		= &msm8916_pcnoc_a_clk.hw,
+	[RPM_SMD_SNOC_CLK]		= &msm8916_snoc_clk.hw,
+	[RPM_SMD_SNOC_A_CLK]		= &msm8916_snoc_a_clk.hw,
+	[RPM_SMD_BIMC_CLK]		= &msm8916_bimc_clk.hw,
+	[RPM_SMD_BIMC_A_CLK]		= &msm8916_bimc_a_clk.hw,
+	[RPM_SMD_QDSS_CLK]		= &msm8916_qdss_clk.hw,
+	[RPM_SMD_QDSS_A_CLK]		= &msm8916_qdss_a_clk.hw,
+	[RPM_SMD_BB_CLK1]		= &msm8916_bb_clk1.hw,
+	[RPM_SMD_BB_CLK1_A]		= &msm8916_bb_clk1_a.hw,
+	[RPM_SMD_BB_CLK2]		= &msm8916_bb_clk2.hw,
+	[RPM_SMD_BB_CLK2_A]		= &msm8916_bb_clk2_a.hw,
+	[RPM_SMD_RF_CLK1]		= &msm8916_rf_clk1.hw,
+	[RPM_SMD_RF_CLK1_A]		= &msm8916_rf_clk1_a.hw,
+	[RPM_SMD_RF_CLK2]		= &msm8916_rf_clk2.hw,
+	[RPM_SMD_RF_CLK2_A]		= &msm8916_rf_clk2_a.hw,
+	[RPM_SMD_BB_CLK1_PIN]		= &msm8916_bb_clk1_pin.hw,
+	[RPM_SMD_BB_CLK1_A_PIN]		= &msm8916_bb_clk1_a_pin.hw,
+	[RPM_SMD_BB_CLK2_PIN]		= &msm8916_bb_clk2_pin.hw,
+	[RPM_SMD_BB_CLK2_A_PIN]		= &msm8916_bb_clk2_a_pin.hw,
+	[RPM_SMD_RF_CLK1_PIN]		= &msm8916_rf_clk1_pin.hw,
+	[RPM_SMD_RF_CLK1_A_PIN]		= &msm8916_rf_clk1_a_pin.hw,
+	[RPM_SMD_RF_CLK2_PIN]		= &msm8916_rf_clk2_pin.hw,
+	[RPM_SMD_RF_CLK2_A_PIN]		= &msm8916_rf_clk2_a_pin.hw,
 };
 
 static const struct rpm_smd_clk_desc rpm_clk_msm8916 = {
 	.clks = msm8916_clks,
+	.num_rpm_clks = RPM_SMD_RF_CLK2_A_PIN,
 	.num_clks = ARRAY_SIZE(msm8916_clks),
 };
 
@@ -483,51 +546,52 @@
 DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8974, cxo_a1_pin, cxo_a1_a_pin, 5);
 DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8974, cxo_a2_pin, cxo_a2_a_pin, 6);
 
-static struct clk_smd_rpm *msm8974_clks[] = {
-	[RPM_SMD_PNOC_CLK]		= &msm8974_pnoc_clk,
-	[RPM_SMD_PNOC_A_CLK]		= &msm8974_pnoc_a_clk,
-	[RPM_SMD_SNOC_CLK]		= &msm8974_snoc_clk,
-	[RPM_SMD_SNOC_A_CLK]		= &msm8974_snoc_a_clk,
-	[RPM_SMD_CNOC_CLK]		= &msm8974_cnoc_clk,
-	[RPM_SMD_CNOC_A_CLK]		= &msm8974_cnoc_a_clk,
-	[RPM_SMD_MMSSNOC_AHB_CLK]	= &msm8974_mmssnoc_ahb_clk,
-	[RPM_SMD_MMSSNOC_AHB_A_CLK]	= &msm8974_mmssnoc_ahb_a_clk,
-	[RPM_SMD_BIMC_CLK]		= &msm8974_bimc_clk,
-	[RPM_SMD_BIMC_A_CLK]		= &msm8974_bimc_a_clk,
-	[RPM_SMD_OCMEMGX_CLK]		= &msm8974_ocmemgx_clk,
-	[RPM_SMD_OCMEMGX_A_CLK]		= &msm8974_ocmemgx_a_clk,
-	[RPM_SMD_QDSS_CLK]		= &msm8974_qdss_clk,
-	[RPM_SMD_QDSS_A_CLK]		= &msm8974_qdss_a_clk,
-	[RPM_SMD_CXO_D0]		= &msm8974_cxo_d0,
-	[RPM_SMD_CXO_D0_A]		= &msm8974_cxo_d0_a,
-	[RPM_SMD_CXO_D1]		= &msm8974_cxo_d1,
-	[RPM_SMD_CXO_D1_A]		= &msm8974_cxo_d1_a,
-	[RPM_SMD_CXO_A0]		= &msm8974_cxo_a0,
-	[RPM_SMD_CXO_A0_A]		= &msm8974_cxo_a0_a,
-	[RPM_SMD_CXO_A1]		= &msm8974_cxo_a1,
-	[RPM_SMD_CXO_A1_A]		= &msm8974_cxo_a1_a,
-	[RPM_SMD_CXO_A2]		= &msm8974_cxo_a2,
-	[RPM_SMD_CXO_A2_A]		= &msm8974_cxo_a2_a,
-	[RPM_SMD_DIFF_CLK]		= &msm8974_diff_clk,
-	[RPM_SMD_DIFF_A_CLK]		= &msm8974_diff_a_clk,
-	[RPM_SMD_DIV_CLK1]		= &msm8974_div_clk1,
-	[RPM_SMD_DIV_A_CLK1]		= &msm8974_div_a_clk1,
-	[RPM_SMD_DIV_CLK2]		= &msm8974_div_clk2,
-	[RPM_SMD_DIV_A_CLK2]		= &msm8974_div_a_clk2,
-	[RPM_SMD_CXO_D0_PIN]		= &msm8974_cxo_d0_pin,
-	[RPM_SMD_CXO_D0_A_PIN]		= &msm8974_cxo_d0_a_pin,
-	[RPM_SMD_CXO_D1_PIN]		= &msm8974_cxo_d1_pin,
-	[RPM_SMD_CXO_D1_A_PIN]		= &msm8974_cxo_d1_a_pin,
-	[RPM_SMD_CXO_A0_PIN]		= &msm8974_cxo_a0_pin,
-	[RPM_SMD_CXO_A0_A_PIN]		= &msm8974_cxo_a0_a_pin,
-	[RPM_SMD_CXO_A1_PIN]		= &msm8974_cxo_a1_pin,
-	[RPM_SMD_CXO_A1_A_PIN]		= &msm8974_cxo_a1_a_pin,
-	[RPM_SMD_CXO_A2_PIN]		= &msm8974_cxo_a2_pin,
-	[RPM_SMD_CXO_A2_A_PIN]		= &msm8974_cxo_a2_a_pin,
+static struct clk_hw *msm8974_clks[] = {
+	[RPM_SMD_PNOC_CLK]		= &msm8974_pnoc_clk.hw,
+	[RPM_SMD_PNOC_A_CLK]		= &msm8974_pnoc_a_clk.hw,
+	[RPM_SMD_SNOC_CLK]		= &msm8974_snoc_clk.hw,
+	[RPM_SMD_SNOC_A_CLK]		= &msm8974_snoc_a_clk.hw,
+	[RPM_SMD_CNOC_CLK]		= &msm8974_cnoc_clk.hw,
+	[RPM_SMD_CNOC_A_CLK]		= &msm8974_cnoc_a_clk.hw,
+	[RPM_SMD_MMSSNOC_AHB_CLK]	= &msm8974_mmssnoc_ahb_clk.hw,
+	[RPM_SMD_MMSSNOC_AHB_A_CLK]	= &msm8974_mmssnoc_ahb_a_clk.hw,
+	[RPM_SMD_BIMC_CLK]		= &msm8974_bimc_clk.hw,
+	[RPM_SMD_BIMC_A_CLK]		= &msm8974_bimc_a_clk.hw,
+	[RPM_SMD_OCMEMGX_CLK]		= &msm8974_ocmemgx_clk.hw,
+	[RPM_SMD_OCMEMGX_A_CLK]		= &msm8974_ocmemgx_a_clk.hw,
+	[RPM_SMD_QDSS_CLK]		= &msm8974_qdss_clk.hw,
+	[RPM_SMD_QDSS_A_CLK]		= &msm8974_qdss_a_clk.hw,
+	[RPM_SMD_CXO_D0]		= &msm8974_cxo_d0.hw,
+	[RPM_SMD_CXO_D0_A]		= &msm8974_cxo_d0_a.hw,
+	[RPM_SMD_CXO_D1]		= &msm8974_cxo_d1.hw,
+	[RPM_SMD_CXO_D1_A]		= &msm8974_cxo_d1_a.hw,
+	[RPM_SMD_CXO_A0]		= &msm8974_cxo_a0.hw,
+	[RPM_SMD_CXO_A0_A]		= &msm8974_cxo_a0_a.hw,
+	[RPM_SMD_CXO_A1]		= &msm8974_cxo_a1.hw,
+	[RPM_SMD_CXO_A1_A]		= &msm8974_cxo_a1_a.hw,
+	[RPM_SMD_CXO_A2]		= &msm8974_cxo_a2.hw,
+	[RPM_SMD_CXO_A2_A]		= &msm8974_cxo_a2_a.hw,
+	[RPM_SMD_DIFF_CLK]		= &msm8974_diff_clk.hw,
+	[RPM_SMD_DIFF_A_CLK]		= &msm8974_diff_a_clk.hw,
+	[RPM_SMD_DIV_CLK1]		= &msm8974_div_clk1.hw,
+	[RPM_SMD_DIV_A_CLK1]		= &msm8974_div_a_clk1.hw,
+	[RPM_SMD_DIV_CLK2]		= &msm8974_div_clk2.hw,
+	[RPM_SMD_DIV_A_CLK2]		= &msm8974_div_a_clk2.hw,
+	[RPM_SMD_CXO_D0_PIN]		= &msm8974_cxo_d0_pin.hw,
+	[RPM_SMD_CXO_D0_A_PIN]		= &msm8974_cxo_d0_a_pin.hw,
+	[RPM_SMD_CXO_D1_PIN]		= &msm8974_cxo_d1_pin.hw,
+	[RPM_SMD_CXO_D1_A_PIN]		= &msm8974_cxo_d1_a_pin.hw,
+	[RPM_SMD_CXO_A0_PIN]		= &msm8974_cxo_a0_pin.hw,
+	[RPM_SMD_CXO_A0_A_PIN]		= &msm8974_cxo_a0_a_pin.hw,
+	[RPM_SMD_CXO_A1_PIN]		= &msm8974_cxo_a1_pin.hw,
+	[RPM_SMD_CXO_A1_A_PIN]		= &msm8974_cxo_a1_a_pin.hw,
+	[RPM_SMD_CXO_A2_PIN]		= &msm8974_cxo_a2_pin.hw,
+	[RPM_SMD_CXO_A2_A_PIN]		= &msm8974_cxo_a2_a_pin.hw,
 };
 
 static const struct rpm_smd_clk_desc rpm_clk_msm8974 = {
 	.clks = msm8974_clks,
+	.num_rpm_clks = RPM_SMD_CXO_A2_A_PIN,
 	.num_clks = ARRAY_SIZE(msm8974_clks),
 };
 
@@ -559,150 +623,369 @@
 DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8996, rf_clk1_pin, rf_clk1_a_pin, 4);
 DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8996, rf_clk2_pin, rf_clk2_a_pin, 5);
 
-static struct clk_smd_rpm *msm8996_clks[] = {
-	[RPM_SMD_PCNOC_CLK] = &msm8996_pcnoc_clk,
-	[RPM_SMD_PCNOC_A_CLK] = &msm8996_pcnoc_a_clk,
-	[RPM_SMD_SNOC_CLK] = &msm8996_snoc_clk,
-	[RPM_SMD_SNOC_A_CLK] = &msm8996_snoc_a_clk,
-	[RPM_SMD_CNOC_CLK] = &msm8996_cnoc_clk,
-	[RPM_SMD_CNOC_A_CLK] = &msm8996_cnoc_a_clk,
-	[RPM_SMD_BIMC_CLK] = &msm8996_bimc_clk,
-	[RPM_SMD_BIMC_A_CLK] = &msm8996_bimc_a_clk,
-	[RPM_SMD_MMAXI_CLK] = &msm8996_mmssnoc_axi_rpm_clk,
-	[RPM_SMD_MMAXI_A_CLK] = &msm8996_mmssnoc_axi_rpm_a_clk,
-	[RPM_SMD_IPA_CLK] = &msm8996_ipa_clk,
-	[RPM_SMD_IPA_A_CLK] = &msm8996_ipa_a_clk,
-	[RPM_SMD_CE1_CLK] = &msm8996_ce1_clk,
-	[RPM_SMD_CE1_A_CLK] = &msm8996_ce1_a_clk,
-	[RPM_SMD_AGGR1_NOC_CLK] = &msm8996_aggre1_noc_clk,
-	[RPM_SMD_AGGR1_NOC_A_CLK] = &msm8996_aggre1_noc_a_clk,
-	[RPM_SMD_AGGR2_NOC_CLK] = &msm8996_aggre2_noc_clk,
-	[RPM_SMD_AGGR2_NOC_A_CLK] = &msm8996_aggre2_noc_a_clk,
-	[RPM_SMD_QDSS_CLK] = &msm8996_qdss_clk,
-	[RPM_SMD_QDSS_A_CLK] = &msm8996_qdss_a_clk,
-	[RPM_SMD_BB_CLK1] = &msm8996_bb_clk1,
-	[RPM_SMD_BB_CLK1_A] = &msm8996_bb_clk1_a,
-	[RPM_SMD_BB_CLK2] = &msm8996_bb_clk2,
-	[RPM_SMD_BB_CLK2_A] = &msm8996_bb_clk2_a,
-	[RPM_SMD_RF_CLK1] = &msm8996_rf_clk1,
-	[RPM_SMD_RF_CLK1_A] = &msm8996_rf_clk1_a,
-	[RPM_SMD_RF_CLK2] = &msm8996_rf_clk2,
-	[RPM_SMD_RF_CLK2_A] = &msm8996_rf_clk2_a,
-	[RPM_SMD_LN_BB_CLK] = &msm8996_ln_bb_clk,
-	[RPM_SMD_LN_BB_A_CLK] = &msm8996_ln_bb_a_clk,
-	[RPM_SMD_DIV_CLK1] = &msm8996_div_clk1,
-	[RPM_SMD_DIV_A_CLK1] = &msm8996_div_clk1_a,
-	[RPM_SMD_DIV_CLK2] = &msm8996_div_clk2,
-	[RPM_SMD_DIV_A_CLK2] = &msm8996_div_clk2_a,
-	[RPM_SMD_DIV_CLK3] = &msm8996_div_clk3,
-	[RPM_SMD_DIV_A_CLK3] = &msm8996_div_clk3_a,
-	[RPM_SMD_BB_CLK1_PIN] = &msm8996_bb_clk1_pin,
-	[RPM_SMD_BB_CLK1_A_PIN] = &msm8996_bb_clk1_a_pin,
-	[RPM_SMD_BB_CLK2_PIN] = &msm8996_bb_clk2_pin,
-	[RPM_SMD_BB_CLK2_A_PIN] = &msm8996_bb_clk2_a_pin,
-	[RPM_SMD_RF_CLK1_PIN] = &msm8996_rf_clk1_pin,
-	[RPM_SMD_RF_CLK1_A_PIN] = &msm8996_rf_clk1_a_pin,
-	[RPM_SMD_RF_CLK2_PIN] = &msm8996_rf_clk2_pin,
-	[RPM_SMD_RF_CLK2_A_PIN] = &msm8996_rf_clk2_a_pin,
+static struct clk_hw *msm8996_clks[] = {
+	[RPM_SMD_PCNOC_CLK] = &msm8996_pcnoc_clk.hw,
+	[RPM_SMD_PCNOC_A_CLK] = &msm8996_pcnoc_a_clk.hw,
+	[RPM_SMD_SNOC_CLK] = &msm8996_snoc_clk.hw,
+	[RPM_SMD_SNOC_A_CLK] = &msm8996_snoc_a_clk.hw,
+	[RPM_SMD_CNOC_CLK] = &msm8996_cnoc_clk.hw,
+	[RPM_SMD_CNOC_A_CLK] = &msm8996_cnoc_a_clk.hw,
+	[RPM_SMD_BIMC_CLK] = &msm8996_bimc_clk.hw,
+	[RPM_SMD_BIMC_A_CLK] = &msm8996_bimc_a_clk.hw,
+	[RPM_SMD_MMAXI_CLK] = &msm8996_mmssnoc_axi_rpm_clk.hw,
+	[RPM_SMD_MMAXI_A_CLK] = &msm8996_mmssnoc_axi_rpm_a_clk.hw,
+	[RPM_SMD_IPA_CLK] = &msm8996_ipa_clk.hw,
+	[RPM_SMD_IPA_A_CLK] = &msm8996_ipa_a_clk.hw,
+	[RPM_SMD_CE1_CLK] = &msm8996_ce1_clk.hw,
+	[RPM_SMD_CE1_A_CLK] = &msm8996_ce1_a_clk.hw,
+	[RPM_SMD_AGGR1_NOC_CLK] = &msm8996_aggre1_noc_clk.hw,
+	[RPM_SMD_AGGR1_NOC_A_CLK] = &msm8996_aggre1_noc_a_clk.hw,
+	[RPM_SMD_AGGR2_NOC_CLK] = &msm8996_aggre2_noc_clk.hw,
+	[RPM_SMD_AGGR2_NOC_A_CLK] = &msm8996_aggre2_noc_a_clk.hw,
+	[RPM_SMD_QDSS_CLK] = &msm8996_qdss_clk.hw,
+	[RPM_SMD_QDSS_A_CLK] = &msm8996_qdss_a_clk.hw,
+	[RPM_SMD_BB_CLK1] = &msm8996_bb_clk1.hw,
+	[RPM_SMD_BB_CLK1_A] = &msm8996_bb_clk1_a.hw,
+	[RPM_SMD_BB_CLK2] = &msm8996_bb_clk2.hw,
+	[RPM_SMD_BB_CLK2_A] = &msm8996_bb_clk2_a.hw,
+	[RPM_SMD_RF_CLK1] = &msm8996_rf_clk1.hw,
+	[RPM_SMD_RF_CLK1_A] = &msm8996_rf_clk1_a.hw,
+	[RPM_SMD_RF_CLK2] = &msm8996_rf_clk2.hw,
+	[RPM_SMD_RF_CLK2_A] = &msm8996_rf_clk2_a.hw,
+	[RPM_SMD_LN_BB_CLK] = &msm8996_ln_bb_clk.hw,
+	[RPM_SMD_LN_BB_CLK_A] = &msm8996_ln_bb_a_clk.hw,
+	[RPM_SMD_DIV_CLK1] = &msm8996_div_clk1.hw,
+	[RPM_SMD_DIV_A_CLK1] = &msm8996_div_clk1_a.hw,
+	[RPM_SMD_DIV_CLK2] = &msm8996_div_clk2.hw,
+	[RPM_SMD_DIV_A_CLK2] = &msm8996_div_clk2_a.hw,
+	[RPM_SMD_DIV_CLK3] = &msm8996_div_clk3.hw,
+	[RPM_SMD_DIV_A_CLK3] = &msm8996_div_clk3_a.hw,
+	[RPM_SMD_BB_CLK1_PIN] = &msm8996_bb_clk1_pin.hw,
+	[RPM_SMD_BB_CLK1_A_PIN] = &msm8996_bb_clk1_a_pin.hw,
+	[RPM_SMD_BB_CLK2_PIN] = &msm8996_bb_clk2_pin.hw,
+	[RPM_SMD_BB_CLK2_A_PIN] = &msm8996_bb_clk2_a_pin.hw,
+	[RPM_SMD_RF_CLK1_PIN] = &msm8996_rf_clk1_pin.hw,
+	[RPM_SMD_RF_CLK1_A_PIN] = &msm8996_rf_clk1_a_pin.hw,
+	[RPM_SMD_RF_CLK2_PIN] = &msm8996_rf_clk2_pin.hw,
+	[RPM_SMD_RF_CLK2_A_PIN] = &msm8996_rf_clk2_a_pin.hw,
 };
 
 static const struct rpm_smd_clk_desc rpm_clk_msm8996 = {
 	.clks = msm8996_clks,
+	.num_rpm_clks = RPM_SMD_RF_CLK2_A_PIN,
 	.num_clks = ARRAY_SIZE(msm8996_clks),
 };
 
+/* bengal */
+DEFINE_CLK_SMD_RPM_BRANCH(bengal, bi_tcxo, bi_tcxo_ao,
+					QCOM_SMD_RPM_MISC_CLK, 0, 19200000);
+DEFINE_CLK_SMD_RPM(bengal, cnoc_clk, cnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1);
+DEFINE_CLK_SMD_RPM(bengal, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0);
+DEFINE_CLK_SMD_RPM(bengal, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2);
+DEFINE_CLK_SMD_RPM_BRANCH(bengal, qdss_clk, qdss_a_clk,
+					QCOM_SMD_RPM_MISC_CLK, 1, 19200000);
+DEFINE_CLK_SMD_RPM(bengal, ce1_clk, ce1_a_clk, QCOM_SMD_RPM_CE_CLK, 0);
+DEFINE_CLK_SMD_RPM(bengal, ipa_clk, ipa_a_clk, QCOM_SMD_RPM_IPA_CLK, 0);
+DEFINE_CLK_SMD_RPM(bengal, qup_clk, qup_a_clk, QCOM_SMD_RPM_QUP_CLK, 0);
+DEFINE_CLK_SMD_RPM(bengal, mmnrt_clk, mmnrt_a_clk, QCOM_SMD_RPM_MMXI_CLK, 0);
+DEFINE_CLK_SMD_RPM(bengal, mmrt_clk, mmrt_a_clk, QCOM_SMD_RPM_MMXI_CLK, 1);
+DEFINE_CLK_SMD_RPM(bengal, snoc_periph_clk, snoc_periph_a_clk,
+						QCOM_SMD_RPM_BUS_CLK, 0);
+DEFINE_CLK_SMD_RPM(bengal, snoc_lpass_clk, snoc_lpass_a_clk,
+						QCOM_SMD_RPM_BUS_CLK, 5);
+
+/* SMD_XO_BUFFER */
+DEFINE_CLK_SMD_RPM_XO_BUFFER(bengal, rf_clk1, rf_clk1_a, 4);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(bengal, rf_clk2, rf_clk2_a, 5);
+
+/* Voter clocks */
+static DEFINE_CLK_VOTER(snoc_msmbus_clk, snoc_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(bimc_msmbus_clk, bimc_clk, LONG_MAX);
+
+static DEFINE_CLK_VOTER(snoc_msmbus_a_clk, snoc_a_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(bimc_msmbus_a_clk, bimc_a_clk, LONG_MAX);
+
+static DEFINE_CLK_VOTER(mcd_ce1_clk, ce1_clk, 85710000);
+static DEFINE_CLK_VOTER(qcedev_ce1_clk, ce1_clk, 85710000);
+static DEFINE_CLK_VOTER(qcrypto_ce1_clk, ce1_clk, 85710000);
+static DEFINE_CLK_VOTER(qseecom_ce1_clk, ce1_clk, 85710000);
+static DEFINE_CLK_VOTER(scm_ce1_clk, ce1_clk, 85710000);
+
+static DEFINE_CLK_VOTER(cnoc_msmbus_clk, cnoc_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(cnoc_msmbus_a_clk, cnoc_a_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(cnoc_keepalive_a_clk, cnoc_a_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(snoc_keepalive_a_clk, snoc_a_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(vfe_mmrt_msmbus_clk, mmrt_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(vfe_mmrt_msmbus_a_clk, mmrt_a_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(mdp_mmrt_msmbus_clk, mmrt_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(mdp_mmrt_msmbus_a_clk, mmrt_a_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(cpp_mmnrt_msmbus_clk, mmnrt_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(cpp_mmnrt_msmbus_a_clk, mmnrt_a_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(jpeg_mmnrt_msmbus_clk, mmnrt_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(jpeg_mmnrt_msmbus_a_clk, mmnrt_a_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(venus_mmnrt_msmbus_clk, mmnrt_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(venus_mmnrt_msmbus_a_clk, mmnrt_a_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(arm9_mmnrt_msmbus_clk, mmnrt_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(arm9_mmnrt_msmbus_a_clk, mmnrt_a_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(qup0_msmbus_snoc_periph_clk, snoc_periph_clk,
+								LONG_MAX);
+static DEFINE_CLK_VOTER(qup0_msmbus_snoc_periph_a_clk, snoc_periph_a_clk,
+								LONG_MAX);
+static DEFINE_CLK_VOTER(qup1_msmbus_snoc_periph_clk, snoc_periph_clk,
+								LONG_MAX);
+static DEFINE_CLK_VOTER(qup1_msmbus_snoc_periph_a_clk, snoc_periph_a_clk,
+								LONG_MAX);
+static DEFINE_CLK_VOTER(dap_msmbus_snoc_periph_clk, snoc_periph_clk,
+								LONG_MAX);
+static DEFINE_CLK_VOTER(dap_msmbus_snoc_periph_a_clk, snoc_periph_a_clk,
+								LONG_MAX);
+static DEFINE_CLK_VOTER(sdc1_msmbus_snoc_periph_clk, snoc_periph_clk,
+								LONG_MAX);
+static DEFINE_CLK_VOTER(sdc1_msmbus_snoc_periph_a_clk, snoc_periph_a_clk,
+								LONG_MAX);
+static DEFINE_CLK_VOTER(sdc2_msmbus_snoc_periph_clk, snoc_periph_clk,
+								LONG_MAX);
+static DEFINE_CLK_VOTER(sdc2_msmbus_snoc_periph_a_clk, snoc_periph_a_clk,
+								LONG_MAX);
+static DEFINE_CLK_VOTER(crypto_msmbus_snoc_periph_clk, snoc_periph_clk,
+								LONG_MAX);
+static DEFINE_CLK_VOTER(crypto_msmbus_snoc_periph_a_clk, snoc_periph_a_clk,
+								LONG_MAX);
+static DEFINE_CLK_VOTER(sdc1_slv_msmbus_snoc_periph_clk, snoc_periph_clk,
+								LONG_MAX);
+static DEFINE_CLK_VOTER(sdc1_slv_msmbus_snoc_periph_a_clk, snoc_periph_a_clk,
+								LONG_MAX);
+static DEFINE_CLK_VOTER(sdc2_slv_msmbus_snoc_periph_clk, snoc_periph_clk,
+								LONG_MAX);
+static DEFINE_CLK_VOTER(sdc2_slv_msmbus_snoc_periph_a_clk, snoc_periph_a_clk,
+								LONG_MAX);
+
+/* Branch Voter clocks */
+static DEFINE_CLK_BRANCH_VOTER(bi_tcxo_otg_clk, bi_tcxo);
+static DEFINE_CLK_BRANCH_VOTER(bi_tcxo_pil_pronto_clk, bi_tcxo);
+static DEFINE_CLK_BRANCH_VOTER(bi_tcxo_pil_mss_clk, bi_tcxo);
+static DEFINE_CLK_BRANCH_VOTER(bi_tcxo_wlan_clk, bi_tcxo);
+static DEFINE_CLK_BRANCH_VOTER(bi_tcxo_pil_lpass_clk, bi_tcxo);
+static DEFINE_CLK_BRANCH_VOTER(bi_tcxo_pil_cdsp_clk, bi_tcxo);
+
+static struct clk_hw *bengal_clks[] = {
+	[RPM_SMD_XO_CLK_SRC] = &bengal_bi_tcxo.hw,
+	[RPM_SMD_XO_A_CLK_SRC] = &bengal_bi_tcxo_ao.hw,
+	[RPM_SMD_SNOC_CLK] = &bengal_snoc_clk.hw,
+	[RPM_SMD_SNOC_A_CLK] = &bengal_snoc_a_clk.hw,
+	[RPM_SMD_BIMC_CLK] = &bengal_bimc_clk.hw,
+	[RPM_SMD_BIMC_A_CLK] = &bengal_bimc_a_clk.hw,
+	[RPM_SMD_QDSS_CLK] = &bengal_qdss_clk.hw,
+	[RPM_SMD_QDSS_A_CLK] = &bengal_qdss_a_clk.hw,
+	[RPM_SMD_RF_CLK1] = &bengal_rf_clk1.hw,
+	[RPM_SMD_RF_CLK1_A] = &bengal_rf_clk1_a.hw,
+	[RPM_SMD_RF_CLK2] = &bengal_rf_clk2.hw,
+	[RPM_SMD_RF_CLK2_A] = &bengal_rf_clk2_a.hw,
+	[RPM_SMD_CNOC_CLK] = &bengal_cnoc_clk.hw,
+	[RPM_SMD_CNOC_A_CLK] = &bengal_cnoc_a_clk.hw,
+	[RPM_SMD_IPA_CLK] = &bengal_ipa_clk.hw,
+	[RPM_SMD_IPA_A_CLK] = &bengal_ipa_a_clk.hw,
+	[RPM_SMD_QUP_CLK] = &bengal_qup_clk.hw,
+	[RPM_SMD_QUP_A_CLK] = &bengal_qup_a_clk.hw,
+	[RPM_SMD_MMRT_CLK] = &bengal_mmrt_clk.hw,
+	[RPM_SMD_MMRT_A_CLK] = &bengal_mmrt_a_clk.hw,
+	[RPM_SMD_MMNRT_CLK] = &bengal_mmnrt_clk.hw,
+	[RPM_SMD_MMNRT_A_CLK] = &bengal_mmnrt_a_clk.hw,
+	[RPM_SMD_SNOC_PERIPH_CLK] = &bengal_snoc_periph_clk.hw,
+	[RPM_SMD_SNOC_PERIPH_A_CLK] = &bengal_snoc_periph_a_clk.hw,
+	[RPM_SMD_SNOC_LPASS_CLK] = &bengal_snoc_lpass_clk.hw,
+	[RPM_SMD_SNOC_LPASS_A_CLK] = &bengal_snoc_lpass_a_clk.hw,
+	[RPM_SMD_CE1_CLK] = &bengal_ce1_clk.hw,
+	[RPM_SMD_CE1_A_CLK] = &bengal_ce1_a_clk.hw,
+	[CNOC_MSMBUS_CLK] = &cnoc_msmbus_clk.hw,
+	[CNOC_MSMBUS_A_CLK] = &cnoc_msmbus_a_clk.hw,
+	[SNOC_KEEPALIVE_A_CLK] = &snoc_keepalive_a_clk.hw,
+	[CNOC_KEEPALIVE_A_CLK] = &cnoc_keepalive_a_clk.hw,
+	[SNOC_MSMBUS_CLK] = &snoc_msmbus_clk.hw,
+	[SNOC_MSMBUS_A_CLK] = &snoc_msmbus_a_clk.hw,
+	[BIMC_MSMBUS_CLK] = &bimc_msmbus_clk.hw,
+	[BIMC_MSMBUS_A_CLK] = &bimc_msmbus_a_clk.hw,
+	[CPP_MMNRT_MSMBUS_CLK] = &cpp_mmnrt_msmbus_clk.hw,
+	[CPP_MMNRT_MSMBUS_A_CLK] = &cpp_mmnrt_msmbus_a_clk.hw,
+	[JPEG_MMNRT_MSMBUS_CLK] = &jpeg_mmnrt_msmbus_clk.hw,
+	[JPEG_MMNRT_MSMBUS_A_CLK] = &jpeg_mmnrt_msmbus_a_clk.hw,
+	[VENUS_MMNRT_MSMBUS_CLK] = &venus_mmnrt_msmbus_clk.hw,
+	[VENUS_MMNRT_MSMBUS_A_CLK] = &venus_mmnrt_msmbus_a_clk.hw,
+	[ARM9_MMNRT_MSMBUS_CLK] = &arm9_mmnrt_msmbus_clk.hw,
+	[ARM9_MMNRT_MSMBUS_A_CLK] = &arm9_mmnrt_msmbus_a_clk.hw,
+	[VFE_MMRT_MSMBUS_CLK] = &vfe_mmrt_msmbus_clk.hw,
+	[VFE_MMRT_MSMBUS_A_CLK] = &vfe_mmrt_msmbus_a_clk.hw,
+	[MDP_MMRT_MSMBUS_CLK] = &mdp_mmrt_msmbus_clk.hw,
+	[MDP_MMRT_MSMBUS_A_CLK] = &mdp_mmrt_msmbus_a_clk.hw,
+	[QUP0_MSMBUS_SNOC_PERIPH_CLK] = &qup0_msmbus_snoc_periph_clk.hw,
+	[QUP0_MSMBUS_SNOC_PERIPH_A_CLK] = &qup0_msmbus_snoc_periph_a_clk.hw,
+	[QUP1_MSMBUS_SNOC_PERIPH_CLK] = &qup1_msmbus_snoc_periph_clk.hw,
+	[QUP1_MSMBUS_SNOC_PERIPH_A_CLK] = &qup1_msmbus_snoc_periph_a_clk.hw,
+	[DAP_MSMBUS_SNOC_PERIPH_CLK] = &dap_msmbus_snoc_periph_clk.hw,
+	[DAP_MSMBUS_SNOC_PERIPH_A_CLK] = &dap_msmbus_snoc_periph_a_clk.hw,
+	[SDC1_MSMBUS_SNOC_PERIPH_CLK] = &sdc1_msmbus_snoc_periph_clk.hw,
+	[SDC1_MSMBUS_SNOC_PERIPH_A_CLK] = &sdc1_msmbus_snoc_periph_a_clk.hw,
+	[SDC2_MSMBUS_SNOC_PERIPH_CLK] = &sdc2_msmbus_snoc_periph_clk.hw,
+	[SDC2_MSMBUS_SNOC_PERIPH_A_CLK] = &sdc2_msmbus_snoc_periph_a_clk.hw,
+	[CRYPTO_MSMBUS_SNOC_PERIPH_CLK] = &crypto_msmbus_snoc_periph_clk.hw,
+	[CRYPTO_MSMBUS_SNOC_PERIPH_A_CLK] =
+				&crypto_msmbus_snoc_periph_a_clk.hw,
+	[SDC1_SLV_MSMBUS_SNOC_PERIPH_CLK] =
+				&sdc1_slv_msmbus_snoc_periph_clk.hw,
+	[SDC1_SLV_MSMBUS_SNOC_PERIPH_A_CLK] =
+				&sdc1_slv_msmbus_snoc_periph_a_clk.hw,
+	[SDC2_SLV_MSMBUS_SNOC_PERIPH_CLK] =
+				&sdc2_slv_msmbus_snoc_periph_clk.hw,
+	[SDC2_SLV_MSMBUS_SNOC_PERIPH_A_CLK] =
+				&sdc2_slv_msmbus_snoc_periph_a_clk.hw,
+	[MCD_CE1_CLK] = &mcd_ce1_clk.hw,
+	[QCEDEV_CE1_CLK] = &qcedev_ce1_clk.hw,
+	[QCRYPTO_CE1_CLK] = &qcrypto_ce1_clk.hw,
+	[QSEECOM_CE1_CLK] = &qseecom_ce1_clk.hw,
+	[SCM_CE1_CLK] = &scm_ce1_clk.hw,
+	[CXO_SMD_OTG_CLK] = &bi_tcxo_otg_clk.hw,
+	[CXO_SMD_PIL_PRONTO_CLK] = &bi_tcxo_pil_pronto_clk.hw,
+	[CXO_SMD_PIL_MSS_CLK] = &bi_tcxo_pil_mss_clk.hw,
+	[CXO_SMD_WLAN_CLK] = &bi_tcxo_wlan_clk.hw,
+	[CXO_SMD_PIL_LPASS_CLK] = &bi_tcxo_pil_lpass_clk.hw,
+	[CXO_SMD_PIL_CDSP_CLK] = &bi_tcxo_pil_cdsp_clk.hw,
+};
+
+static const struct rpm_smd_clk_desc rpm_clk_bengal = {
+	.clks = bengal_clks,
+	.num_rpm_clks = RPM_SMD_CE1_A_CLK,
+	.num_clks = ARRAY_SIZE(bengal_clks),
+};
+
 static const struct of_device_id rpm_smd_clk_match_table[] = {
 	{ .compatible = "qcom,rpmcc-msm8916", .data = &rpm_clk_msm8916 },
 	{ .compatible = "qcom,rpmcc-msm8974", .data = &rpm_clk_msm8974 },
 	{ .compatible = "qcom,rpmcc-msm8996", .data = &rpm_clk_msm8996 },
+	{ .compatible = "qcom,rpmcc-bengal", .data = &rpm_clk_bengal},
 	{ }
 };
 MODULE_DEVICE_TABLE(of, rpm_smd_clk_match_table);
 
-static struct clk_hw *qcom_smdrpm_clk_hw_get(struct of_phandle_args *clkspec,
-					     void *data)
-{
-	struct rpm_cc *rcc = data;
-	unsigned int idx = clkspec->args[0];
-
-	if (idx >= rcc->num_clks) {
-		pr_err("%s: invalid index %u\n", __func__, idx);
-		return ERR_PTR(-EINVAL);
-	}
-
-	return rcc->clks[idx] ? &rcc->clks[idx]->hw : ERR_PTR(-ENOENT);
-}
-
 static int rpm_smd_clk_probe(struct platform_device *pdev)
 {
+	struct clk **clks;
+	struct clk *clk;
 	struct rpm_cc *rcc;
-	int ret;
+	struct clk_onecell_data *data;
+	int ret, is_bengal;
 	size_t num_clks, i;
-	struct qcom_smd_rpm *rpm;
-	struct clk_smd_rpm **rpm_smd_clks;
+	struct clk_hw **hw_clks;
 	const struct rpm_smd_clk_desc *desc;
 
-	rpm = dev_get_drvdata(pdev->dev.parent);
-	if (!rpm) {
-		dev_err(&pdev->dev, "Unable to retrieve handle to RPM\n");
-		return -ENODEV;
+
+	is_bengal = of_device_is_compatible(pdev->dev.of_node,
+						"qcom,rpmcc-bengal");
+	if (is_bengal) {
+		ret = clk_vote_bimc(&bengal_bimc_clk.hw, INT_MAX);
+		if (ret < 0)
+			return ret;
 	}
 
 	desc = of_device_get_match_data(&pdev->dev);
 	if (!desc)
 		return -EINVAL;
 
-	rpm_smd_clks = desc->clks;
+	hw_clks = desc->clks;
 	num_clks = desc->num_clks;
 
-	rcc = devm_kzalloc(&pdev->dev, sizeof(*rcc), GFP_KERNEL);
+	rcc = devm_kzalloc(&pdev->dev, sizeof(*rcc) + sizeof(*clks) * num_clks,
+			   GFP_KERNEL);
 	if (!rcc)
 		return -ENOMEM;
 
-	rcc->clks = rpm_smd_clks;
-	rcc->num_clks = num_clks;
+	clks = rcc->clks;
+	data = &rcc->data;
+	data->clks = clks;
+	data->clk_num = num_clks;
 
-	for (i = 0; i < num_clks; i++) {
-		if (!rpm_smd_clks[i])
+	for (i = 0; i <= desc->num_rpm_clks; i++) {
+		if (!hw_clks[i]) {
+			clks[i] = ERR_PTR(-ENOENT);
 			continue;
+		}
 
-		rpm_smd_clks[i]->rpm = rpm;
-
-		ret = clk_smd_rpm_handoff(rpm_smd_clks[i]);
+		ret = clk_smd_rpm_handoff(hw_clks[i]);
 		if (ret)
 			goto err;
 	}
 
-	ret = clk_smd_rpm_enable_scaling(rpm);
-	if (ret)
-		goto err;
-
-	for (i = 0; i < num_clks; i++) {
-		if (!rpm_smd_clks[i])
+	for (i = (desc->num_rpm_clks + 1); i < num_clks; i++) {
+		if (!hw_clks[i]) {
+			clks[i] = ERR_PTR(-ENOENT);
 			continue;
+		}
 
-		ret = devm_clk_hw_register(&pdev->dev, &rpm_smd_clks[i]->hw);
+		ret = voter_clk_handoff(hw_clks[i]);
 		if (ret)
 			goto err;
 	}
 
-	ret = devm_of_clk_add_hw_provider(&pdev->dev, qcom_smdrpm_clk_hw_get,
-				     rcc);
+	ret = clk_smd_rpm_enable_scaling();
 	if (ret)
 		goto err;
 
+	for (i = 0; i < num_clks; i++) {
+		if (!hw_clks[i]) {
+			clks[i] = ERR_PTR(-ENOENT);
+			continue;
+		}
+
+		clk = devm_clk_register(&pdev->dev, hw_clks[i]);
+		if (IS_ERR(clk)) {
+			ret = PTR_ERR(clk);
+			goto err;
+		}
+
+		clks[i] = clk;
+	}
+
+	ret = of_clk_add_provider(pdev->dev.of_node, of_clk_src_onecell_get,
+				  data);
+	if (ret)
+		goto err;
+
+	if (is_bengal) {
+		/*
+		 * Keep an active vote on CXO in case no other driver
+		 * votes for it.
+		 */
+		clk_prepare_enable(bengal_bi_tcxo_ao.hw.clk);
+
+		/* Hold an active set vote for the cnoc_keepalive_a_clk */
+		clk_set_rate(cnoc_keepalive_a_clk.hw.clk, 19200000);
+		clk_prepare_enable(cnoc_keepalive_a_clk.hw.clk);
+
+		/* Hold an active set vote for the snoc_keepalive_a_clk */
+		clk_set_rate(snoc_keepalive_a_clk.hw.clk, 19200000);
+		clk_prepare_enable(snoc_keepalive_a_clk.hw.clk);
+	}
+
+	dev_info(&pdev->dev, "Registered RPM clocks\n");
+
 	return 0;
 err:
 	dev_err(&pdev->dev, "Error registering SMD clock driver (%d)\n", ret);
 	return ret;
 }
 
+static int rpm_smd_clk_remove(struct platform_device *pdev)
+{
+	of_clk_del_provider(pdev->dev.of_node);
+	return 0;
+}
+
 static struct platform_driver rpm_smd_clk_driver = {
 	.driver = {
 		.name = "qcom-clk-smd-rpm",
 		.of_match_table = rpm_smd_clk_match_table,
 	},
 	.probe = rpm_smd_clk_probe,
+	.remove = rpm_smd_clk_remove,
 };
 
 static int __init rpm_smd_clk_init(void)
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index de15bf5..da9c986 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -43,6 +43,15 @@
 	  Say Y here to enable GPIO based extcon support. Note that GPIO
 	  extcon supports single state per extcon instance.
 
+config EXTCON_STORAGE_CD_GPIO
+	tristate "Storage card detect GPIO extcon support"
+	depends on GPIOLIB || COMPILE_TEST
+	help
+	  Say Y here to enable removable storage card detect GPIO based
+	  extcon support. It helps when different kinds of storage cards
+	  share one detect GPIO. Note that storage card detect GPIO extcon
+	  supports single state per extcon instance.
+
 config EXTCON_INTEL_INT3496
 	tristate "Intel INT3496 ACPI device extcon driver"
 	depends on GPIOLIB && ACPI && (X86 || COMPILE_TEST)
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index 0888fde..3ecee74 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -9,6 +9,7 @@
 obj-$(CONFIG_EXTCON_ARIZONA)	+= extcon-arizona.o
 obj-$(CONFIG_EXTCON_AXP288)	+= extcon-axp288.o
 obj-$(CONFIG_EXTCON_GPIO)	+= extcon-gpio.o
+obj-$(CONFIG_EXTCON_STORAGE_CD_GPIO)	+= extcon-storage-cd-gpio.o
 obj-$(CONFIG_EXTCON_INTEL_INT3496) += extcon-intel-int3496.o
 obj-$(CONFIG_EXTCON_INTEL_CHT_WC) += extcon-intel-cht-wc.o
 obj-$(CONFIG_EXTCON_MAX14577)	+= extcon-max14577.o
diff --git a/drivers/extcon/extcon-storage-cd-gpio.c b/drivers/extcon/extcon-storage-cd-gpio.c
new file mode 100644
index 0000000..28189ec
--- /dev/null
+++ b/drivers/extcon/extcon-storage-cd-gpio.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2019, Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/extcon-provider.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/of_gpio.h>
+
+struct cd_gpio_extcon_data {
+	struct extcon_dev *edev;
+	int irq;
+	struct gpio_desc *gpiod;
+	unsigned int extcon_id;
+	unsigned long irq_flags;
+	struct pinctrl *pctrl;
+	struct pinctrl_state *pins_default;
+	unsigned int *supported_cable;
+};
+
+static irqreturn_t cd_gpio_threaded_irq_handler(int irq, void *dev_id)
+{
+	int state;
+	struct cd_gpio_extcon_data *data = dev_id;
+
+	state = gpiod_get_value_cansleep(data->gpiod);
+	extcon_set_state_sync(data->edev, data->extcon_id, state);
+
+	return IRQ_HANDLED;
+}
+
+static int extcon_parse_pinctrl_data(struct device *dev,
+				     struct cd_gpio_extcon_data *data)
+{
+	struct pinctrl *pctrl;
+	int ret = 0;
+
+	/* Try to obtain pinctrl handle */
+	pctrl = devm_pinctrl_get(dev);
+	if (IS_ERR(pctrl)) {
+		ret = PTR_ERR(pctrl);
+		goto out;
+	}
+	data->pctrl = pctrl;
+
+	/* Look-up and keep the state handy to be used later */
+	data->pins_default = pinctrl_lookup_state(data->pctrl, "default");
+	if (IS_ERR(data->pins_default)) {
+		ret = PTR_ERR(data->pins_default);
+		dev_err(dev, "Can't get default pinctrl state, ret %d\n", ret);
+	}
+out:
+	return ret;
+}
+
+static int extcon_populate_data(struct device *dev,
+				struct cd_gpio_extcon_data *data)
+{
+	struct device_node *np = dev->of_node;
+	u32 val;
+	int ret = 0;
+
+	ret = of_property_read_u32(np, "extcon-id", &data->extcon_id);
+	if (ret) {
+		dev_err(dev, "failed to read extcon-id property, %d\n", ret);
+		goto out;
+	}
+
+	ret = of_property_read_u32(np, "irq-flags", &val);
+	if (ret) {
+		dev_err(dev, "failed to read irq-flags property, %d\n", ret);
+		goto out;
+	}
+	data->irq_flags = val;
+
+	ret = extcon_parse_pinctrl_data(dev, data);
+	if (ret)
+		dev_err(dev, "failed to parse pinctrl data\n");
+
+out:
+	return ret;
+}
+
+static int cd_gpio_extcon_probe(struct platform_device *pdev)
+{
+	struct cd_gpio_extcon_data *data;
+	struct device *dev = &pdev->dev;
+	int state, ret;
+
+	data = devm_kzalloc(dev, sizeof(struct cd_gpio_extcon_data),
+			    GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	if (!data->irq_flags) {
+		/* try populating cd gpio extcon data from device tree */
+		ret = extcon_populate_data(dev, data);
+		if (ret)
+			return ret;
+	}
+	if (!data->irq_flags || data->extcon_id >= EXTCON_NUM)
+		return -EINVAL;
+
+	ret = pinctrl_select_state(data->pctrl, data->pins_default);
+	if (ret < 0)
+		dev_err(dev, "pinctrl state select failed, ret %d\n", ret);
+
+	data->gpiod = devm_gpiod_get(dev, "extcon", GPIOD_IN);
+	if (IS_ERR(data->gpiod))
+		return PTR_ERR(data->gpiod);
+
+	data->irq = gpiod_to_irq(data->gpiod);
+	if (data->irq <= 0)
+		return data->irq;
+
+	data->supported_cable = devm_kzalloc(dev,
+					     sizeof(*data->supported_cable) * 2,
+					     GFP_KERNEL);
+	if (!data->supported_cable)
+		return -ENOMEM;
+
+	data->supported_cable[0] = data->extcon_id;
+	data->supported_cable[1] = EXTCON_NONE;
+	/* Allocate the memory of extcon devie and register extcon device */
+	data->edev = devm_extcon_dev_allocate(dev, data->supported_cable);
+	if (IS_ERR(data->edev)) {
+		dev_err(dev, "failed to allocate extcon device\n");
+		return -ENOMEM;
+	}
+
+	ret = devm_extcon_dev_register(dev, data->edev);
+	if (ret < 0)
+		return ret;
+
+	ret = devm_request_threaded_irq(dev, data->irq, NULL,
+				  cd_gpio_threaded_irq_handler,
+				  data->irq_flags | IRQF_ONESHOT,
+				  pdev->name, data);
+	if (ret < 0)
+		return ret;
+
+	ret = enable_irq_wake(data->irq);
+	if (ret)
+		return ret;
+
+	platform_set_drvdata(pdev, data);
+
+	/* Update initial state */
+	state = gpiod_get_value_cansleep(data->gpiod);
+	extcon_set_state(data->edev, data->extcon_id, state);
+
+	return 0;
+}
+
+static int cd_gpio_extcon_remove(struct platform_device *pdev)
+{
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int cd_gpio_extcon_resume(struct device *dev)
+{
+	struct cd_gpio_extcon_data *data;
+	int state, ret = 0;
+
+	data = dev_get_drvdata(dev);
+	state = gpiod_get_value_cansleep(data->gpiod);
+	ret = extcon_set_state_sync(data->edev, data->extcon_id, state);
+	if (ret)
+		dev_err(dev, "%s: Failed to set extcon gpio state\n",
+				__func__);
+
+	return ret;
+}
+
+static const struct dev_pm_ops cd_gpio_extcon_pm_ops = {
+	SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL, cd_gpio_extcon_resume)
+};
+
+#define EXTCON_GPIO_PMOPS (&cd_gpio_extcon_pm_ops)
+
+#else
+#define EXTCON_GPIO_PMOPS NULL
+#endif
+
+static const struct of_device_id extcon_cd_gpio_of_match[] = {
+	{ .compatible = "extcon-storage-cd-gpio"},
+	{},
+};
+
+static struct platform_driver cd_gpio_extcon_driver = {
+	.probe		= cd_gpio_extcon_probe,
+	.remove		= cd_gpio_extcon_remove,
+	.driver		= {
+		.name	= "extcon-storage-cd-gpio",
+		.pm	= EXTCON_GPIO_PMOPS,
+		.of_match_table = of_match_ptr(extcon_cd_gpio_of_match),
+	},
+};
+
+module_platform_driver(cd_gpio_extcon_driver);
+
+MODULE_DESCRIPTION("Storage card detect GPIO based extcon driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
index 450a51b..063a0fc 100644
--- a/drivers/gpu/drm/drm_property.c
+++ b/drivers/gpu/drm/drm_property.c
@@ -793,8 +793,10 @@
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		return -EINVAL;
 
+	mutex_lock(&dev->mode_config.blob_lock);
 	list_for_each_entry(bt, &file_priv->blobs, head_file)
 		count++;
+	mutex_unlock(&dev->mode_config.blob_lock);
 
 	if (count >= MAX_BLOB_PROP_COUNT)
 		return -EINVAL;
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 1057b4c..df0f025 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -1427,6 +1427,12 @@
 	if (ADRENO_FEATURE(adreno_dev, ADRENO_IOCOHERENT))
 		device->mmu.features |= KGSL_MMU_IO_COHERENT;
 
+	/* Allocate the memstore for storing timestamps and other useful info */
+	status = kgsl_allocate_global(device, &device->memstore,
+		KGSL_MEMSTORE_SIZE, 0, KGSL_MEMDESC_CONTIG, "memstore");
+	if (status)
+		goto out;
+
 	status = adreno_ringbuffer_probe(adreno_dev);
 	if (status)
 		goto out;
@@ -1478,6 +1484,7 @@
 out:
 	if (status) {
 		adreno_ringbuffer_close(adreno_dev);
+		kgsl_free_global(device, &device->memstore);
 		kgsl_device_platform_remove(device);
 		device->pdev = NULL;
 	}
@@ -1558,6 +1565,8 @@
 	if (efuse_base != NULL)
 		iounmap(efuse_base);
 
+	kgsl_free_global(device, &device->memstore);
+
 	kgsl_device_platform_remove(device);
 
 	gmu_core_remove(device);
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index b5fb54e..20c33f2 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -606,7 +606,6 @@
 	ADRENO_REG_CP_ME_RAM_DATA,
 	ADRENO_REG_CP_PFP_UCODE_DATA,
 	ADRENO_REG_CP_PFP_UCODE_ADDR,
-	ADRENO_REG_CP_WFI_PEND_CTR,
 	ADRENO_REG_CP_RB_BASE,
 	ADRENO_REG_CP_RB_BASE_HI,
 	ADRENO_REG_CP_RB_RPTR_ADDR_LO,
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index 680afa0..0f8f834 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -1232,7 +1232,6 @@
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_ME_RAM_DATA, A3XX_CP_ME_RAM_DATA),
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_PFP_UCODE_DATA, A3XX_CP_PFP_UCODE_DATA),
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_PFP_UCODE_ADDR, A3XX_CP_PFP_UCODE_ADDR),
-	ADRENO_REG_DEFINE(ADRENO_REG_CP_WFI_PEND_CTR, A3XX_CP_WFI_PEND_CTR),
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE, A3XX_CP_RB_BASE),
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE_HI, ADRENO_REG_SKIP),
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR, A3XX_CP_RB_RPTR),
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 12f5c21..911ee41 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -2498,7 +2498,6 @@
 
 /* Register offset defines for A5XX, in order of enum adreno_regs */
 static unsigned int a5xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
-	ADRENO_REG_DEFINE(ADRENO_REG_CP_WFI_PEND_CTR, A5XX_CP_WFI_PEND_CTR),
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE, A5XX_CP_RB_BASE),
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE_HI, A5XX_CP_RB_BASE_HI),
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR_ADDR_LO,
diff --git a/drivers/gpu/msm/adreno_a6xx.h b/drivers/gpu/msm/adreno_a6xx.h
index ed45f1a..cb45de5 100644
--- a/drivers/gpu/msm/adreno_a6xx.h
+++ b/drivers/gpu/msm/adreno_a6xx.h
@@ -151,8 +151,6 @@
 #define A6XX_CP_CTXRECORD_MAGIC_REF     0xAE399D6EUL
 /* Size of each CP preemption record */
 #define A6XX_CP_CTXRECORD_SIZE_IN_BYTES     (2112 * 1024)
-/* Size of the preemption counter block (in bytes) */
-#define A6XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE   (16 * 4)
 /* Size of the user context record block (in bytes) */
 #define A6XX_CP_CTXRECORD_USER_RESTORE_SIZE (192 * 1024)
 /* Size of the performance counter save/restore block (in bytes) */
diff --git a/drivers/gpu/msm/adreno_a6xx_preempt.c b/drivers/gpu/msm/adreno_a6xx_preempt.c
index 966c256..a8a1569 100644
--- a/drivers/gpu/msm/adreno_a6xx_preempt.c
+++ b/drivers/gpu/msm/adreno_a6xx_preempt.c
@@ -590,7 +590,7 @@
 }
 
 static int a6xx_preemption_ringbuffer_init(struct adreno_device *adreno_dev,
-	struct adreno_ringbuffer *rb, uint64_t counteraddr)
+	struct adreno_ringbuffer *rb)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	int ret;
@@ -636,7 +636,7 @@
 	kgsl_sharedmem_writeq(device, &rb->preemption_desc,
 		PREEMPT_RECORD(rbase), rb->buffer_desc.gpuaddr);
 	kgsl_sharedmem_writeq(device, &rb->preemption_desc,
-		PREEMPT_RECORD(counter), counteraddr);
+		PREEMPT_RECORD(counter), 0);
 
 	return 0;
 }
@@ -679,7 +679,6 @@
 	unsigned int i;
 
 	del_timer(&preempt->timer);
-	kgsl_free_global(device, &preempt->counters);
 	a6xx_preemption_iommu_close(adreno_dev);
 
 	FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
@@ -706,7 +705,6 @@
 	struct adreno_ringbuffer *rb;
 	int ret;
 	unsigned int i;
-	uint64_t addr;
 
 	/* We are dependent on IOMMU to make preemption go on the CP side */
 	if (kgsl_mmu_get_mmutype(device) != KGSL_MMU_TYPE_IOMMU)
@@ -716,23 +714,11 @@
 
 	timer_setup(&preempt->timer, _a6xx_preemption_timer, 0);
 
-	/* Allocate mem for storing preemption counters */
-	ret = kgsl_allocate_global(device, &preempt->counters,
-		adreno_dev->num_ringbuffers *
-		A6XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE, 0, 0,
-		"preemption_counters");
-	if (ret)
-		goto err;
-
-	addr = preempt->counters.gpuaddr;
-
 	/* Allocate mem for storing preemption switch record */
 	FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
-		ret = a6xx_preemption_ringbuffer_init(adreno_dev, rb, addr);
+		ret = a6xx_preemption_ringbuffer_init(adreno_dev, rb);
 		if (ret)
 			goto err;
-
-		addr += A6XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE;
 	}
 
 	ret = a6xx_preemption_iommu_init(adreno_dev);
diff --git a/drivers/gpu/msm/adreno_iommu.c b/drivers/gpu/msm/adreno_iommu.c
index 6463881..a90e25a 100644
--- a/drivers/gpu/msm/adreno_iommu.c
+++ b/drivers/gpu/msm/adreno_iommu.c
@@ -11,42 +11,30 @@
 #include "adreno_pm4types.h"
 
 /*
- * _wait_reg() - make CP poll on a register
+ * a3xx_wait_reg() - make CP poll on a register
  * @cmds:	Pointer to memory where commands are to be added
  * @addr:	Register address to poll for
  * @val:	Value to poll for
  * @mask:	The value against which register value is masked
  * @interval:	wait interval
  */
-static unsigned int _wait_reg(struct adreno_device *adreno_dev,
+static unsigned int a3xx_wait_reg(struct adreno_device *adreno_dev,
 			unsigned int *cmds, unsigned int addr,
 			unsigned int val, unsigned int mask,
 			unsigned int interval)
 {
 	unsigned int *start = cmds;
 
-	if (adreno_is_a3xx(adreno_dev)) {
-		*cmds++ = cp_packet(adreno_dev, CP_WAIT_REG_EQ, 4);
-		*cmds++ = addr;
-		*cmds++ = val;
-		*cmds++ = mask;
-		*cmds++ = interval;
-	} else {
-		*cmds++ = cp_mem_packet(adreno_dev, CP_WAIT_REG_MEM, 5, 1);
-		*cmds++ = 0x3; /* Mem Space = Register,  Function = Equals */
-		cmds += cp_gpuaddr(adreno_dev, cmds, addr); /* Poll address */
-		*cmds++ = val; /* ref val */
-		*cmds++ = mask;
-		*cmds++ = interval;
-
-		/* WAIT_REG_MEM turns back on protected mode - push it off */
-		cmds += cp_protected_mode(adreno_dev, cmds, 0);
-	}
+	*cmds++ = cp_packet(adreno_dev, CP_WAIT_REG_EQ, 4);
+	*cmds++ = addr;
+	*cmds++ = val;
+	*cmds++ = mask;
+	*cmds++ = interval;
 
 	return cmds - start;
 }
 
-static unsigned int _vbif_lock(struct adreno_device *adreno_dev,
+static unsigned int a3xx_vbif_lock(struct adreno_device *adreno_dev,
 			unsigned int *cmds)
 {
 	unsigned int *start = cmds;
@@ -54,8 +42,7 @@
 	 * glue commands together until next
 	 * WAIT_FOR_ME
 	 */
-	cmds += _wait_reg(adreno_dev, cmds,
-			adreno_getreg(adreno_dev, ADRENO_REG_CP_WFI_PEND_CTR),
+	cmds += a3xx_wait_reg(adreno_dev, cmds, A3XX_CP_WFI_PEND_CTR,
 			1, 0xFFFFFFFF, 0xF);
 
 	/* MMU-500 VBIF stall */
@@ -67,14 +54,14 @@
 	*cmds++ = 0x1;
 
 	/* Wait for acknowledgment */
-	cmds += _wait_reg(adreno_dev, cmds,
+	cmds += a3xx_wait_reg(adreno_dev, cmds,
 			A3XX_VBIF_DDR_OUTPUT_RECOVERABLE_HALT_CTRL1,
 			1, 0xFFFFFFFF, 0xF);
 
 	return cmds - start;
 }
 
-static unsigned int _vbif_unlock(struct adreno_device *adreno_dev,
+static unsigned int a3xx_vbif_unlock(struct adreno_device *adreno_dev,
 				unsigned int *cmds)
 {
 	unsigned int *start = cmds;
@@ -95,7 +82,7 @@
 #define A3XX_GPU_OFFSET 0xa000
 
 /* This function is only needed for A3xx targets */
-static unsigned int _cp_smmu_reg(struct adreno_device *adreno_dev,
+static unsigned int a3xx_cp_smmu_reg(struct adreno_device *adreno_dev,
 				unsigned int *cmds,
 				enum kgsl_iommu_reg_map reg,
 				unsigned int num)
@@ -110,20 +97,20 @@
 }
 
 /* This function is only needed for A3xx targets */
-static unsigned int _tlbiall(struct adreno_device *adreno_dev,
+static unsigned int a3xx_tlbiall(struct adreno_device *adreno_dev,
 				unsigned int *cmds)
 {
 	unsigned int *start = cmds;
 	unsigned int tlbstatus = (A3XX_GPU_OFFSET +
 		kgsl_iommu_reg_list[KGSL_IOMMU_CTX_TLBSTATUS]) >> 2;
 
-	cmds += _cp_smmu_reg(adreno_dev, cmds, KGSL_IOMMU_CTX_TLBIALL, 1);
+	cmds += a3xx_cp_smmu_reg(adreno_dev, cmds, KGSL_IOMMU_CTX_TLBIALL, 1);
 	*cmds++ = 1;
 
-	cmds += _cp_smmu_reg(adreno_dev, cmds, KGSL_IOMMU_CTX_TLBSYNC, 1);
+	cmds += a3xx_cp_smmu_reg(adreno_dev, cmds, KGSL_IOMMU_CTX_TLBSYNC, 1);
 	*cmds++ = 0;
 
-	cmds += _wait_reg(adreno_dev, cmds, tlbstatus, 0,
+	cmds += a3xx_wait_reg(adreno_dev, cmds, tlbstatus, 0,
 			KGSL_IOMMU_CTX_TLBSTATUS_SACTIVE, 0xF);
 
 	return cmds - start;
@@ -205,17 +192,18 @@
 
 	cmds += _adreno_iommu_add_idle_cmds(adreno_dev, cmds);
 
-	cmds += _vbif_lock(adreno_dev, cmds);
+	cmds += a3xx_vbif_lock(adreno_dev, cmds);
 
-	cmds += _cp_smmu_reg(adreno_dev, cmds, KGSL_IOMMU_CTX_TTBR0, 2);
+	cmds += a3xx_cp_smmu_reg(adreno_dev, cmds, KGSL_IOMMU_CTX_TTBR0, 2);
 	*cmds++ = lower_32_bits(ttbr0);
 	*cmds++ = upper_32_bits(ttbr0);
-	cmds += _cp_smmu_reg(adreno_dev, cmds, KGSL_IOMMU_CTX_CONTEXTIDR, 1);
+	cmds += a3xx_cp_smmu_reg(adreno_dev, cmds, KGSL_IOMMU_CTX_CONTEXTIDR,
+		1);
 	*cmds++ = contextidr;
 
-	cmds += _vbif_unlock(adreno_dev, cmds);
+	cmds += a3xx_vbif_unlock(adreno_dev, cmds);
 
-	cmds += _tlbiall(adreno_dev, cmds);
+	cmds += a3xx_tlbiall(adreno_dev, cmds);
 
 	/* wait for me to finish the TLBI */
 	cmds += cp_wait_for_me(adreno_dev, cmds);
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 41047a0..57dca4a 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -5084,12 +5084,6 @@
 	/* Initialize the memory pools */
 	kgsl_init_page_pools(device->pdev);
 
-	status = kgsl_allocate_global(device, &device->memstore,
-		KGSL_MEMSTORE_SIZE, 0, KGSL_MEMDESC_CONTIG, "memstore");
-
-	if (status != 0)
-		goto error_close_mmu;
-
 	/*
 	 * The default request type PM_QOS_REQ_ALL_CORES is
 	 * applicable to all CPU cores that are online and
@@ -5162,8 +5156,6 @@
 
 	idr_destroy(&device->context_idr);
 
-	kgsl_free_global(device, &device->memstore);
-
 	kgsl_mmu_close(device);
 
 	kgsl_pwrctrl_close(device);
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 8813993..927418e 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -292,6 +292,7 @@
 
 	u32 snapshot_faultcount;	/* Total number of faults since boot */
 	bool force_panic;		/* Force panic after snapshot dump */
+	bool skip_ib_capture;		/* Skip IB capture after snapshot */
 	bool prioritize_unrecoverable;	/* Overwrite with new GMU snapshots */
 	bool set_isdb_breakpoint;	/* Set isdb registers before snapshot */
 
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index 20e6024..b157a60 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -662,7 +662,7 @@
 				&kgsl_driver.stats.secure);
 
 			kgsl_cma_unlock_secure(memdesc);
-			attrs = (unsigned long)&memdesc->attrs;
+			attrs = memdesc->attrs;
 		} else
 			atomic_long_sub(memdesc->size,
 				&kgsl_driver.stats.coherent);
diff --git a/drivers/gpu/msm/kgsl_snapshot.c b/drivers/gpu/msm/kgsl_snapshot.c
index b3a56a8..99bb67b 100644
--- a/drivers/gpu/msm/kgsl_snapshot.c
+++ b/drivers/gpu/msm/kgsl_snapshot.c
@@ -752,6 +752,9 @@
 	dev_err(device->dev, "%s snapshot created at pa %pa++0x%zx\n",
 			gmu_fault ? "GMU" : "GPU", &pa, snapshot->size);
 
+	if (device->skip_ib_capture)
+		BUG_ON(device->force_panic);
+
 	sysfs_notify(&device->snapshot_kobj, NULL, "timestamp");
 
 	/*
@@ -956,6 +959,22 @@
 	return count;
 }
 
+/* Show the break_ib request status */
+static ssize_t skip_ib_capture_show(struct kgsl_device *device, char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "%d\n", device->skip_ib_capture);
+}
+
+/* Store the panic request value to break_ib */
+static ssize_t skip_ib_capture_store(struct kgsl_device *device,
+						const char *buf, size_t count)
+{
+	int ret;
+
+	ret = kstrtobool(buf, &device->skip_ib_capture);
+	return ret ? ret : count;
+}
+
 /* Show the prioritize_unrecoverable status */
 static ssize_t prioritize_unrecoverable_show(
 		struct kgsl_device *device, char *buf)
@@ -1038,6 +1057,8 @@
 	snapshot_crashdumper_store);
 static SNAPSHOT_ATTR(snapshot_legacy, 0644, snapshot_legacy_show,
 	snapshot_legacy_store);
+static SNAPSHOT_ATTR(skip_ib_capture, 0644, skip_ib_capture_show,
+		skip_ib_capture_store);
 
 static ssize_t snapshot_sysfs_show(struct kobject *kobj,
 	struct attribute *attr, char *buf)
@@ -1083,6 +1104,7 @@
 	&attr_prioritize_unrecoverable.attr,
 	&attr_snapshot_crashdumper.attr,
 	&attr_snapshot_legacy.attr,
+	&attr_skip_ib_capture.attr,
 	NULL,
 };
 
@@ -1308,5 +1330,6 @@
 
 gmu_only:
 	complete_all(&snapshot->dump_gate);
-	BUG_ON(snapshot->device->force_panic);
+	BUG_ON(!snapshot->device->skip_ib_capture &
+				snapshot->device->force_panic);
 }
diff --git a/drivers/hwtracing/coresight/coresight-byte-cntr.c b/drivers/hwtracing/coresight/coresight-byte-cntr.c
index 6216417..af638f6 100644
--- a/drivers/hwtracing/coresight/coresight-byte-cntr.c
+++ b/drivers/hwtracing/coresight/coresight-byte-cntr.c
@@ -239,7 +239,7 @@
 
 	mutex_lock(&byte_cntr_data->byte_cntr_lock);
 
-	if (!byte_cntr_data->enable || !byte_cntr_data->block_size) {
+	if (!tmcdrvdata->enable || !byte_cntr_data->block_size) {
 		mutex_unlock(&byte_cntr_data->byte_cntr_lock);
 		return -EINVAL;
 	}
@@ -252,6 +252,7 @@
 
 	fp->private_data = byte_cntr_data;
 	nonseekable_open(in, fp);
+	byte_cntr_data->enable = true;
 	byte_cntr_data->read_active = true;
 	mutex_unlock(&byte_cntr_data->byte_cntr_lock);
 	return 0;
diff --git a/drivers/input/touchscreen/st/fts.c b/drivers/input/touchscreen/st/fts.c
index be3f1ae..350c547 100644
--- a/drivers/input/touchscreen/st/fts.c
+++ b/drivers/input/touchscreen/st/fts.c
@@ -205,7 +205,7 @@
 	}
 
 	fwD.data = NULL;
-	ret = getFWdata(PATH_FILE_FW, &orig_data, &orig_size, 0);
+	ret = getFWdata_nocheck(PATH_FILE_FW, &orig_data, &orig_size, 0);
 	if (ret < OK) {
 		logError(1, "%s %s: impossible retrieve FW... ERROR %08X\n",
 			tag, __func__, ERROR_MEMH_READ);
diff --git a/drivers/input/touchscreen/st/fts_lib/ftsFlash.c b/drivers/input/touchscreen/st/fts_lib/ftsFlash.c
index 3472532..9fc3926 100644
--- a/drivers/input/touchscreen/st/fts_lib/ftsFlash.c
+++ b/drivers/input/touchscreen/st/fts_lib/ftsFlash.c
@@ -119,6 +119,38 @@
 	return OK;
 }
 
+int getFWdata_nocheck(const char *pathToFile, u8 **data, int *size, int from)
+{
+	const struct firmware *fw = NULL;
+	struct device *dev = getDev();
+	int res;
+
+	if (dev == NULL)
+		return ERROR_OP_NOT_ALLOW;
+
+	logError(0, "%s Read FW from BIN file!\n", tag);
+
+	res = firmware_request_nowarn(&fw, pathToFile, dev);
+	if (res) {
+		logError(1, "%s %s:No File found! ERROR %08X\n",
+			tag, __func__, ERROR_FILE_NOT_FOUND);
+		return ERROR_FILE_NOT_FOUND;
+	}
+
+	*size = fw->size;
+	*data = (u8 *)kmalloc_array((*size), sizeof(u8), GFP_KERNEL);
+	if (*data == NULL) {
+		logError(1, "%s %s:Impossible to allocate! %08X\n", __func__);
+		release_firmware(fw);
+		return ERROR_ALLOC;
+	}
+	memcpy(*data, (u8 *)fw->data, (*size));
+	release_firmware(fw);
+
+	logError(0, "%s %s:Finshed!\n", tag, __func__);
+	return OK;
+}
+
 int getFWdata(const char *pathToFile, u8 **data, int *size, int from)
 {
 	const struct firmware *fw = NULL;
@@ -145,7 +177,7 @@
 	default:
 		logError(0, "%s Read FW from BIN file!\n", tag);
 
-		if (ftsInfo.u16_fwVer == FTS_LATEST_VERSION)
+		if (ftsInfo.u16_fwVer >= FTS_LATEST_VERSION)
 			return ERROR_FW_NO_UPDATE;
 
 		dev = getDev();
diff --git a/drivers/input/touchscreen/st/fts_lib/ftsFlash.h b/drivers/input/touchscreen/st/fts_lib/ftsFlash.h
index e712fe5..844c5da 100644
--- a/drivers/input/touchscreen/st/fts_lib/ftsFlash.h
+++ b/drivers/input/touchscreen/st/fts_lib/ftsFlash.h
@@ -100,6 +100,7 @@
 int fillMemory(u32 address, u8 *data, int size);
 int getFirmwareVersion(u16 *fw_vers, u16 *config_id);
 int getFWdata(const char *pathToFile, u8 **data, int *size, int from);
+int getFWdata_nocheck(const char *pathToFile, u8 **data, int *size, int from);
 int parseBinFile(u8 *fw_data, int fw_size, struct Firmware *fw, int keep_cx);
 int readFwFile(const char *path, struct Firmware *fw, int keep_cx);
 int flash_burn(struct Firmware *fw, int force_burn, int keep_cx);
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index e34995b..324b78e 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -17,7 +17,7 @@
 obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
 obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o
 obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
-obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
+obj-$(CONFIG_ARM_SMMU) += arm-smmu.o arm-smmu-debug.o
 obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
 obj-$(CONFIG_DMAR_TABLE) += dmar.o
 obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o
diff --git a/drivers/iommu/arm-smmu-debug.c b/drivers/iommu/arm-smmu-debug.c
new file mode 100644
index 0000000..40cd853
--- /dev/null
+++ b/drivers/iommu/arm-smmu-debug.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/device.h>
+#include "arm-smmu-regs.h"
+#include "arm-smmu-debug.h"
+
+u32 arm_smmu_debug_tbu_testbus_select(void __iomem *tbu_base,
+				bool write, u32 val)
+{
+	if (write) {
+		writel_relaxed(val, tbu_base + DEBUG_TESTBUS_SEL_TBU);
+		/* Make sure tbu select register is written to */
+		wmb();
+	} else {
+		return readl_relaxed(tbu_base + DEBUG_TESTBUS_SEL_TBU);
+	}
+	return 0;
+}
+
+u32 arm_smmu_debug_tbu_testbus_output(void __iomem *tbu_base)
+{
+	return readl_relaxed(tbu_base + DEBUG_TESTBUS_TBU);
+}
+
+u32 arm_smmu_debug_tcu_testbus_select(void __iomem *base,
+		void __iomem *tcu_base, enum tcu_testbus testbus,
+		bool write, u32 val)
+{
+	int offset;
+
+	if (testbus == CLK_TESTBUS) {
+		base = tcu_base;
+		offset = ARM_SMMU_TESTBUS_SEL_HLOS1_NS;
+	} else {
+		offset = ARM_SMMU_TESTBUS_SEL;
+	}
+
+	if (write) {
+		writel_relaxed(val, base + offset);
+		/* Make sure tcu select register is written to */
+		wmb();
+	} else {
+		return readl_relaxed(base + offset);
+	}
+
+	return 0;
+}
+
+u32 arm_smmu_debug_tcu_testbus_output(void __iomem *base)
+{
+	return readl_relaxed(base + ARM_SMMU_TESTBUS);
+}
+
+static void arm_smmu_debug_dump_tbu_qns4_testbus(struct device *dev,
+					void __iomem *tbu_base)
+{
+	int i;
+	u32 reg;
+
+	for (i = 0 ; i < TBU_QNS4_BRIDGE_SIZE; ++i) {
+		reg = arm_smmu_debug_tbu_testbus_select(tbu_base, READ, 0);
+		reg = (reg & ~GENMASK(4, 0)) | i << 0;
+		arm_smmu_debug_tbu_testbus_select(tbu_base, WRITE, reg);
+		dev_info(dev, "testbus_sel: 0x%lx Index: %d val: 0x%llx\n",
+			arm_smmu_debug_tbu_testbus_select(tbu_base,
+						READ, 0), i,
+			arm_smmu_debug_tbu_testbus_output(tbu_base));
+	}
+}
+
+static void arm_smmu_debug_program_tbu_testbus(void __iomem *tbu_base,
+					int tbu_testbus)
+{
+	u32 reg;
+
+	reg = arm_smmu_debug_tbu_testbus_select(tbu_base, READ, 0);
+	reg = (reg & ~GENMASK(7, 0)) | tbu_testbus;
+	arm_smmu_debug_tbu_testbus_select(tbu_base, WRITE, reg);
+}
+
+void arm_smmu_debug_dump_tbu_testbus(struct device *dev, void __iomem *tbu_base,
+			int tbu_testbus_sel)
+{
+	if (tbu_testbus_sel & TBU_CLK_GATE_CONTROLLER_TESTBUS_SEL) {
+		dev_info(dev, "Dumping TBU clk gate controller:\n");
+		arm_smmu_debug_program_tbu_testbus(tbu_base,
+				TBU_CLK_GATE_CONTROLLER_TESTBUS);
+		dev_info(dev, "testbus_sel: 0x%lx val: 0x%llx\n",
+			arm_smmu_debug_tbu_testbus_select(tbu_base,
+						READ, 0),
+			arm_smmu_debug_tbu_testbus_output(tbu_base));
+	}
+
+	if (tbu_testbus_sel & TBU_QNS4_A2Q_TESTBUS_SEL) {
+		dev_info(dev, "Dumping TBU qns4 a2q test bus:\n");
+		arm_smmu_debug_program_tbu_testbus(tbu_base,
+				TBU_QNS4_A2Q_TESTBUS);
+		arm_smmu_debug_dump_tbu_qns4_testbus(dev, tbu_base);
+	}
+
+	if (tbu_testbus_sel & TBU_QNS4_Q2A_TESTBUS_SEL) {
+		dev_info(dev, "Dumping qns4 q2a test bus:\n");
+		arm_smmu_debug_program_tbu_testbus(tbu_base,
+				TBU_QNS4_Q2A_TESTBUS);
+		arm_smmu_debug_dump_tbu_qns4_testbus(dev, tbu_base);
+	}
+
+	if (tbu_testbus_sel & TBU_MULTIMASTER_QCHANNEL_TESTBUS_SEL) {
+		dev_info(dev, "Dumping multi master qchannel:\n");
+		arm_smmu_debug_program_tbu_testbus(tbu_base,
+				TBU_MULTIMASTER_QCHANNEL_TESTBUS);
+		dev_info(dev, "testbus_sel: 0x%lx val: 0x%llx\n",
+			arm_smmu_debug_tbu_testbus_select(tbu_base,
+						READ, 0),
+			arm_smmu_debug_tbu_testbus_output(tbu_base));
+	}
+}
+
+static void arm_smmu_debug_program_tcu_testbus(struct device *dev,
+		void __iomem *base, void __iomem *tcu_base,
+		unsigned long mask, int start, int end, int shift,
+		bool print)
+{
+	u32 reg;
+	int i;
+
+	for (i = start; i < end; i++) {
+		reg = arm_smmu_debug_tcu_testbus_select(base, tcu_base,
+				PTW_AND_CACHE_TESTBUS, READ, 0);
+		reg &= mask;
+		reg |= i << shift;
+		arm_smmu_debug_tcu_testbus_select(base, tcu_base,
+				PTW_AND_CACHE_TESTBUS, WRITE, reg);
+		if (print)
+			dev_info(dev, "testbus_sel: 0x%lx Index: %d val: 0x%lx\n",
+				 arm_smmu_debug_tcu_testbus_select(base,
+				 tcu_base, PTW_AND_CACHE_TESTBUS, READ, 0),
+				 i, arm_smmu_debug_tcu_testbus_output(base));
+	}
+}
+
+void arm_smmu_debug_dump_tcu_testbus(struct device *dev, void __iomem *base,
+			void __iomem *tcu_base, int tcu_testbus_sel)
+{
+	int i;
+
+	if (tcu_testbus_sel & TCU_CACHE_TESTBUS_SEL) {
+		dev_info(dev, "Dumping TCU cache testbus:\n");
+		arm_smmu_debug_program_tcu_testbus(dev, base, tcu_base,
+				TCU_CACHE_TESTBUS, 0, 1, 0, false);
+		arm_smmu_debug_program_tcu_testbus(dev, base, tcu_base,
+				~GENMASK(7, 0), 0, TCU_CACHE_LOOKUP_QUEUE_SIZE,
+				2, true);
+	}
+
+	if (tcu_testbus_sel & TCU_PTW_TESTBUS_SEL) {
+		dev_info(dev, "Dumping TCU PTW test bus:\n");
+		arm_smmu_debug_program_tcu_testbus(dev, base, tcu_base, 1,
+				TCU_PTW_TESTBUS, TCU_PTW_TESTBUS + 1, 0, false);
+
+		arm_smmu_debug_program_tcu_testbus(dev, base, tcu_base,
+				~GENMASK(7, 2), 0, TCU_PTW_INTERNAL_STATES,
+				2, true);
+
+		for (i = TCU_PTW_QUEUE_START;
+			i < TCU_PTW_QUEUE_START + TCU_PTW_QUEUE_SIZE; ++i) {
+			arm_smmu_debug_program_tcu_testbus(dev, base, tcu_base,
+					~GENMASK(7, 0), i, i + 1, 2, true);
+			arm_smmu_debug_program_tcu_testbus(dev, base, tcu_base,
+					~GENMASK(1, 0), TCU_PTW_TESTBUS_SEL2,
+					TCU_PTW_TESTBUS_SEL2 + 1, 0, false);
+			dev_info(dev, "testbus_sel: 0x%lx Index: %d val: 0x%lx\n",
+				 arm_smmu_debug_tcu_testbus_select(base,
+				 tcu_base, PTW_AND_CACHE_TESTBUS, READ, 0),
+				 i, arm_smmu_debug_tcu_testbus_output(base));
+		}
+	}
+
+	/* program ARM_SMMU_TESTBUS_SEL_HLOS1_NS to select TCU clk testbus*/
+	arm_smmu_debug_tcu_testbus_select(base, tcu_base,
+			CLK_TESTBUS, WRITE, TCU_CLK_TESTBUS_SEL);
+	dev_info(dev, "Programming Tcu clk gate controller: testbus_sel: 0x%lx\n",
+		arm_smmu_debug_tcu_testbus_select(base, tcu_base,
+						CLK_TESTBUS, READ, 0));
+}
diff --git a/drivers/iommu/arm-smmu-debug.h b/drivers/iommu/arm-smmu-debug.h
new file mode 100644
index 0000000..3202a17
--- /dev/null
+++ b/drivers/iommu/arm-smmu-debug.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define ARM_SMMU_TESTBUS_SEL			0x25E4
+#define ARM_SMMU_TESTBUS			0x25E8
+#define ARM_SMMU_TESTBUS_SEL_HLOS1_NS		0x8
+#define DEBUG_TESTBUS_SEL_TBU			0x50
+#define DEBUG_TESTBUS_TBU			0x58
+
+#define TCU_PTW_TESTBUS				(0x1 << 8)
+#define TCU_CACHE_TESTBUS			~TCU_PTW_TESTBUS
+#define TCU_PTW_TESTBUS_SEL			(0x1 << 1)
+#define TCU_PTW_INTERNAL_STATES			3
+#define TCU_PTW_TESTBUS_SEL2			3
+#define TCU_PTW_QUEUE_START			32
+#define TCU_PTW_QUEUE_SIZE			32
+#define TCU_CACHE_TESTBUS_SEL			0x1
+#define TCU_CACHE_LOOKUP_QUEUE_SIZE		32
+#define TCU_CLK_TESTBUS_SEL			0x200
+
+#define TBU_CLK_GATE_CONTROLLER_TESTBUS_SEL	0x1
+#define TBU_QNS4_A2Q_TESTBUS_SEL		(0x1 << 1)
+#define TBU_QNS4_Q2A_TESTBUS_SEL		(0x1 << 2)
+#define TBU_MULTIMASTER_QCHANNEL_TESTBUS_SEL	(0x1 << 3)
+#define TBU_CLK_GATE_CONTROLLER_TESTBUS		(0x1 << 6)
+#define TBU_QNS4_A2Q_TESTBUS			(0x2 << 6)
+#define TBU_QNS4_Q2A_TESTBUS			(0x5 << 5)
+#define TBU_MULTIMASTER_QCHANNEL_TESTBUS	(0x3 << 6)
+#define TBU_QNS4_BRIDGE_SIZE			32
+
+enum tcu_testbus {
+	PTW_AND_CACHE_TESTBUS,
+	CLK_TESTBUS,
+};
+
+enum testbus_sel {
+	SEL_TCU,
+	SEL_TBU,
+};
+
+enum testbus_ops {
+	TESTBUS_SELECT,
+	TESTBUS_OUTPUT,
+};
+
+#ifdef CONFIG_ARM_SMMU
+
+u32 arm_smmu_debug_tbu_testbus_select(void __iomem *tbu_base,
+					bool write, u32 val);
+u32 arm_smmu_debug_tbu_testbus_output(void __iomem *tbu_base);
+u32 arm_smmu_debug_tcu_testbus_select(void __iomem *base,
+		void __iomem *tcu_base, enum tcu_testbus testbus,
+		bool write, u32 val);
+u32 arm_smmu_debug_tcu_testbus_output(void __iomem *base);
+void arm_smmu_debug_dump_tbu_testbus(struct device *dev, void __iomem *tbu_base,
+			int tbu_testbus_sel);
+void arm_smmu_debug_dump_tcu_testbus(struct device *dev, void __iomem *base,
+			void __iomem *tcu_base, int tcu_testbus_sel);
+
+#else
+static inline u32 arm_smmu_debug_tbu_testbus_select(void __iomem *tbu_base,
+				bool write, u32 val)
+{
+}
+static inline u32 arm_smmu_debug_tbu_testbus_output(void __iomem *tbu_base)
+{
+}
+u32 arm_smmu_debug_tcu_testbus_select(void __iomem *base,
+		void __iomem *tcu_base, enum tcu_testbus testbus,
+		bool write, u32 val)
+{
+}
+static inline u32 arm_smmu_debug_tcu_testbus_output(void __iomem *base)
+{
+}
+static inline void arm_smmu_debug_dump_tbu_testbus(struct device *dev,
+			void __iomem *tbu_base, int tbu_testbus_sel)
+{
+}
+static inline void arm_smmu_debug_dump_tcu_testbus(struct device *dev,
+			void __iomem *base, void __iomem *tcu_base,
+			int tcu_testbus_sel)
+{
+}
+#endif
+
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index fb793de..8a2b6ca 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -66,6 +66,9 @@
 #include <asm/dma-iommu.h>
 #include "io-pgtable.h"
 #include "arm-smmu-regs.h"
+#include "arm-smmu-debug.h"
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
 
 /*
  * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
@@ -345,6 +348,16 @@
 #define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + \
 							(cfg)->cbndx + 1)
 
+#define TCU_TESTBUS_SEL_ALL 0x3
+#define TBU_TESTBUS_SEL_ALL 0xf
+
+static int tbu_testbus_sel = TBU_TESTBUS_SEL_ALL;
+static int tcu_testbus_sel = TCU_TESTBUS_SEL_ALL;
+static struct dentry *debugfs_testbus_dir;
+
+module_param_named(tcu_testbus_sel, tcu_testbus_sel, int, 0644);
+module_param_named(tbu_testbus_sel, tbu_testbus_sel, int, 0644);
+
 enum arm_smmu_domain_stage {
 	ARM_SMMU_DOMAIN_S1 = 0,
 	ARM_SMMU_DOMAIN_S2,
@@ -5571,6 +5584,288 @@
 	return 0;
 }
 
+static ssize_t arm_smmu_debug_testbus_read(struct file *file,
+		char __user *ubuf, size_t count, loff_t *offset,
+		enum testbus_sel tbu, enum testbus_ops ops)
+
+{
+	char buf[100];
+	ssize_t retval;
+	size_t buflen;
+	int buf_len = sizeof(buf);
+
+	if (*offset)
+		return 0;
+
+	memset(buf, 0, buf_len);
+
+	if (tbu == SEL_TBU) {
+		struct qsmmuv500_tbu_device *tbu = file->private_data;
+		void __iomem *tbu_base = tbu->base;
+		long val;
+
+		arm_smmu_power_on(tbu->pwr);
+		if (ops == TESTBUS_SELECT)
+			val = arm_smmu_debug_tbu_testbus_select(tbu_base,
+							READ, 0);
+		else
+			val = arm_smmu_debug_tbu_testbus_output(tbu_base);
+		arm_smmu_power_off(tbu->pwr);
+
+		snprintf(buf, buf_len, "0x%0x\n", val);
+	} else {
+
+		struct arm_smmu_device *smmu = file->private_data;
+		struct qsmmuv500_archdata *data = smmu->archdata;
+		void __iomem *base = ARM_SMMU_GR0(smmu);
+		void __iomem *tcu_base = data->tcu_base;
+
+		arm_smmu_power_on(smmu->pwr);
+
+		if (ops == TESTBUS_SELECT) {
+			snprintf(buf, buf_len, "TCU clk testbus sel: 0x%0x\n",
+				arm_smmu_debug_tcu_testbus_select(base,
+					tcu_base, CLK_TESTBUS, READ, 0));
+			snprintf(buf + strlen(buf), buf_len - strlen(buf),
+				 "TCU testbus sel : 0x%0x\n",
+				 arm_smmu_debug_tcu_testbus_select(base,
+					 tcu_base, PTW_AND_CACHE_TESTBUS,
+					 READ, 0));
+		} else {
+			snprintf(buf, buf_len, "0x%0x\n",
+				 arm_smmu_debug_tcu_testbus_output(base));
+		}
+
+		arm_smmu_power_off(smmu->pwr);
+	}
+	buflen = min(count, strlen(buf));
+	if (copy_to_user(ubuf, buf, buflen)) {
+		pr_err_ratelimited("Couldn't copy_to_user\n");
+		retval = -EFAULT;
+	} else {
+		*offset = 1;
+		retval = buflen;
+	}
+
+	return retval;
+}
+static ssize_t arm_smmu_debug_tcu_testbus_sel_write(struct file *file,
+		const char __user *ubuf, size_t count, loff_t *offset)
+{
+	struct arm_smmu_device *smmu = file->private_data;
+	struct qsmmuv500_archdata *data = smmu->archdata;
+	void __iomem *tcu_base = data->tcu_base;
+	void __iomem *base = ARM_SMMU_GR0(smmu);
+	char *comma;
+	char buf[100];
+	u64 sel, val;
+
+	if (count >= 100) {
+		pr_err_ratelimited("Value too large\n");
+		return -EINVAL;
+	}
+
+	memset(buf, 0, 100);
+
+	if (copy_from_user(buf, ubuf, count)) {
+		pr_err_ratelimited("Couldn't copy from user\n");
+		return -EFAULT;
+	}
+
+	comma = strnchr(buf, count, ',');
+	if (!comma)
+		goto invalid_format;
+
+	/* split up the words */
+	*comma = '\0';
+
+	if (kstrtou64(buf, 0, &sel))
+		goto invalid_format;
+
+	if (kstrtou64(comma + 1, 0, &val))
+		goto invalid_format;
+
+	arm_smmu_power_on(smmu->pwr);
+
+	if (sel == 1)
+		arm_smmu_debug_tcu_testbus_select(base,
+				tcu_base, CLK_TESTBUS, WRITE, val);
+	else if (sel == 2)
+		arm_smmu_debug_tcu_testbus_select(base,
+				tcu_base, PTW_AND_CACHE_TESTBUS, WRITE, val);
+	else
+		goto invalid_format;
+
+	arm_smmu_power_off(smmu->pwr);
+
+	return count;
+
+invalid_format:
+	pr_err_ratelimited("Invalid format. Expected: <1, testbus select> for tcu CLK testbus (or) <2, testbus select> for tcu PTW/CACHE testbuses\n");
+	return -EINVAL;
+}
+
+static ssize_t arm_smmu_debug_tcu_testbus_sel_read(struct file *file,
+		char __user *ubuf, size_t count, loff_t *offset)
+{
+	return arm_smmu_debug_testbus_read(file, ubuf,
+			count, offset, SEL_TCU, TESTBUS_SELECT);
+}
+
+static const struct file_operations arm_smmu_debug_tcu_testbus_sel_fops = {
+	.open	= simple_open,
+	.write	= arm_smmu_debug_tcu_testbus_sel_write,
+	.read	= arm_smmu_debug_tcu_testbus_sel_read,
+};
+
+static ssize_t arm_smmu_debug_tcu_testbus_read(struct file *file,
+		char __user *ubuf, size_t count, loff_t *offset)
+{
+	return arm_smmu_debug_testbus_read(file, ubuf,
+			count, offset, SEL_TCU, TESTBUS_OUTPUT);
+}
+
+static const struct file_operations arm_smmu_debug_tcu_testbus_fops = {
+	.open	= simple_open,
+	.read	= arm_smmu_debug_tcu_testbus_read,
+};
+
+static int qsmmuv500_tcu_testbus_init(struct arm_smmu_device *smmu)
+{
+	struct dentry *testbus_dir;
+
+	if (!iommu_debugfs_top)
+		return 0;
+
+	if (!debugfs_testbus_dir) {
+		debugfs_testbus_dir = debugfs_create_dir("testbus",
+						       iommu_debugfs_top);
+		if (!debugfs_testbus_dir) {
+			pr_err_ratelimited("Couldn't create iommu/testbus debugfs directory\n");
+			return -ENODEV;
+		}
+	}
+
+	testbus_dir = debugfs_create_dir(dev_name(smmu->dev),
+				debugfs_testbus_dir);
+
+	if (!testbus_dir) {
+		pr_err_ratelimited("Couldn't create iommu/testbus/%s debugfs directory\n",
+		       dev_name(smmu->dev));
+		goto err;
+	}
+
+	if (!debugfs_create_file("tcu_testbus_sel", 0400, testbus_dir, smmu,
+			&arm_smmu_debug_tcu_testbus_sel_fops)) {
+		pr_err_ratelimited("Couldn't create iommu/testbus/%s/tcu_testbus_sel debugfs file\n",
+		       dev_name(smmu->dev));
+		goto err_rmdir;
+	}
+
+	if (!debugfs_create_file("tcu_testbus_output", 0400, testbus_dir, smmu,
+			&arm_smmu_debug_tcu_testbus_fops)) {
+		pr_err_ratelimited("Couldn't create iommu/testbus/%s/tcu_testbus_output debugfs file\n",
+		       dev_name(smmu->dev));
+		goto err_rmdir;
+	}
+
+	return 0;
+err_rmdir:
+	debugfs_remove_recursive(testbus_dir);
+err:
+	return 0;
+}
+
+static ssize_t arm_smmu_debug_tbu_testbus_sel_write(struct file *file,
+		const char __user *ubuf, size_t count, loff_t *offset)
+{
+	struct qsmmuv500_tbu_device *tbu = file->private_data;
+	void __iomem *tbu_base = tbu->base;
+	u64 val;
+
+	if (kstrtoull_from_user(ubuf, count, 0, &val)) {
+		pr_err_ratelimited("Invalid format for tbu testbus select\n");
+		return -EINVAL;
+	}
+
+	arm_smmu_power_on(tbu->pwr);
+	arm_smmu_debug_tbu_testbus_select(tbu_base, WRITE, val);
+	arm_smmu_power_off(tbu->pwr);
+
+	return count;
+}
+
+static ssize_t arm_smmu_debug_tbu_testbus_sel_read(struct file *file,
+		char __user *ubuf, size_t count, loff_t *offset)
+{
+	return arm_smmu_debug_testbus_read(file, ubuf,
+			count, offset, SEL_TBU, TESTBUS_SELECT);
+}
+
+static const struct file_operations arm_smmu_debug_tbu_testbus_sel_fops = {
+	.open	= simple_open,
+	.write	= arm_smmu_debug_tbu_testbus_sel_write,
+	.read	= arm_smmu_debug_tbu_testbus_sel_read,
+};
+
+static ssize_t arm_smmu_debug_tbu_testbus_read(struct file *file,
+		char __user *ubuf, size_t count, loff_t *offset)
+{
+	return arm_smmu_debug_testbus_read(file, ubuf,
+			count, offset, SEL_TBU, TESTBUS_OUTPUT);
+}
+
+static const struct file_operations arm_smmu_debug_tbu_testbus_fops = {
+	.open	= simple_open,
+	.read	= arm_smmu_debug_tbu_testbus_read,
+};
+
+static int qsmmuv500_tbu_testbus_init(struct qsmmuv500_tbu_device *tbu)
+{
+	struct dentry *testbus_dir;
+
+	if (!iommu_debugfs_top)
+		return 0;
+
+	if (!debugfs_testbus_dir) {
+		debugfs_testbus_dir = debugfs_create_dir("testbus",
+						       iommu_debugfs_top);
+		if (!debugfs_testbus_dir) {
+			pr_err_ratelimited("Couldn't create iommu/testbus debugfs directory\n");
+			return -ENODEV;
+		}
+	}
+
+	testbus_dir = debugfs_create_dir(dev_name(tbu->dev),
+				debugfs_testbus_dir);
+
+	if (!testbus_dir) {
+		pr_err_ratelimited("Couldn't create iommu/testbus/%s debugfs directory\n",
+		       dev_name(tbu->dev));
+		goto err;
+	}
+
+	if (!debugfs_create_file("tbu_testbus_sel", 0400, testbus_dir, tbu,
+			&arm_smmu_debug_tbu_testbus_sel_fops)) {
+		pr_err_ratelimited("Couldn't create iommu/testbus/%s/tbu_testbus_sel debugfs file\n",
+		       dev_name(tbu->dev));
+		goto err_rmdir;
+	}
+
+	if (!debugfs_create_file("tbu_testbus_output", 0400, testbus_dir, tbu,
+			&arm_smmu_debug_tbu_testbus_fops)) {
+		pr_err_ratelimited("Couldn't create iommu/testbus/%s/tbu_testbus_output debugfs file\n",
+		       dev_name(tbu->dev));
+		goto err_rmdir;
+	}
+
+	return 0;
+err_rmdir:
+	debugfs_remove_recursive(testbus_dir);
+err:
+	return 0;
+}
+
 static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
 {
 	struct resource *res;
@@ -5600,6 +5895,8 @@
 	data->version = readl_relaxed(data->tcu_base + TCU_HW_VERSION_HLOS1);
 	smmu->archdata = data;
 
+	qsmmuv500_tcu_testbus_init(smmu);
+
 	if (arm_smmu_is_static_cb(smmu))
 		return 0;
 
@@ -5681,6 +5978,8 @@
 		return PTR_ERR(tbu->pwr);
 
 	dev_set_drvdata(dev, tbu);
+	qsmmuv500_tbu_testbus_init(tbu);
+
 	return 0;
 }
 
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c
index 082faac..394597d 100644
--- a/drivers/iommu/dma-mapping-fast.c
+++ b/drivers/iommu/dma-mapping-fast.c
@@ -634,7 +634,6 @@
 	dma_addr_t dma_addr;
 	int prot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
 
-	size = ALIGN(size, FAST_PAGE_SIZE);
 	if (coherent) {
 		page = alloc_pages(gfp, get_order(size));
 		addr = page ? page_address(page) : NULL;
@@ -695,6 +694,54 @@
 	return pages;
 }
 
+static void *__fast_smmu_alloc_contiguous(struct device *dev, size_t size,
+			dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
+{
+	struct dma_fast_smmu_mapping *mapping = dev_get_mapping(dev);
+	bool is_coherent = is_dma_coherent(dev, attrs);
+	int prot = dma_info_to_prot(DMA_BIDIRECTIONAL, is_coherent, attrs);
+	pgprot_t remap_prot = __get_dma_pgprot(attrs, PAGE_KERNEL, is_coherent);
+	struct page *page;
+	dma_addr_t iova;
+	unsigned long flags;
+	void *coherent_addr;
+
+	page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
+					get_order(size), gfp & __GFP_NOWARN);
+	if (!page)
+		return NULL;
+
+
+	spin_lock_irqsave(&mapping->lock, flags);
+	iova = __fast_smmu_alloc_iova(mapping, attrs, size);
+	spin_unlock_irqrestore(&mapping->lock, flags);
+	if (iova == DMA_ERROR_CODE)
+		goto release_page;
+
+	if (av8l_fast_map_public(mapping->pgtbl_ops, iova, page_to_phys(page),
+				 size, prot))
+		goto release_iova;
+
+	coherent_addr = dma_common_contiguous_remap(page, size, VM_USERMAP,
+				remap_prot, __fast_smmu_alloc_contiguous);
+	if (!coherent_addr)
+		goto release_mapping;
+
+	if (!is_coherent)
+		__dma_flush_area(page_to_virt(page), size);
+
+	*handle = iova;
+	return coherent_addr;
+
+release_mapping:
+	av8l_fast_unmap_public(mapping->pgtbl_ops, iova, size);
+release_iova:
+	__fast_smmu_free_iova(mapping, iova, size);
+release_page:
+	dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
+	return NULL;
+}
+
 static void *fast_smmu_alloc(struct device *dev, size_t size,
 			     dma_addr_t *handle, gfp_t gfp,
 			     unsigned long attrs)
@@ -722,11 +769,14 @@
 	}
 
 	*handle = DMA_ERROR_CODE;
+	size = ALIGN(size, SZ_4K);
 
-	if (!gfpflags_allow_blocking(gfp)) {
+	if (!gfpflags_allow_blocking(gfp))
 		return fast_smmu_alloc_atomic(mapping, size, gfp, attrs, handle,
 					      is_coherent);
-	}
+	else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS)
+		return __fast_smmu_alloc_contiguous(dev, size, handle, gfp,
+						    attrs);
 
 	pages = __fast_smmu_alloc_pages(count, gfp);
 	if (!pages) {
@@ -734,7 +784,6 @@
 		return NULL;
 	}
 
-	size = ALIGN(size, SZ_4K);
 	if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, gfp)) {
 		dev_err(dev, "no sg tablen\n");
 		goto out_free_pages;
@@ -802,52 +851,47 @@
 }
 
 static void fast_smmu_free(struct device *dev, size_t size,
-			   void *vaddr, dma_addr_t dma_handle,
+			   void *cpu_addr, dma_addr_t dma_handle,
 			   unsigned long attrs)
 {
 	struct dma_fast_smmu_mapping *mapping = dev_get_mapping(dev);
 	struct vm_struct *area;
-	struct page **pages = NULL;
-	size_t count = ALIGN(size, SZ_4K) >> FAST_PAGE_SHIFT;
 	unsigned long flags;
 
-	size = ALIGN(size, SZ_4K);
+	size = ALIGN(size, FAST_PAGE_SIZE);
 
-	if (__in_atomic_pool(vaddr, size) || !is_vmalloc_addr(vaddr))
-		goto no_remap;
-
-	area = find_vm_area(vaddr);
-	if (WARN_ON_ONCE(!area))
-		return;
-
-	pages = area->pages;
-	dma_common_free_remap(vaddr, size, VM_USERMAP, false);
-no_remap:
 	spin_lock_irqsave(&mapping->lock, flags);
 	av8l_fast_unmap_public(mapping->pgtbl_ops, dma_handle, size);
 	__fast_smmu_free_iova(mapping, dma_handle, size);
 	spin_unlock_irqrestore(&mapping->lock, flags);
-	if (__in_atomic_pool(vaddr, size))
-		__free_from_pool(vaddr, size);
-	else if (is_vmalloc_addr(vaddr))
-		__fast_smmu_free_pages(pages, count);
-	else
-		__free_pages(virt_to_page(vaddr), get_order(size));
+
+	area = find_vm_area(cpu_addr);
+	if (area && area->pages) {
+		struct page **pages = area->pages;
+
+		dma_common_free_remap(cpu_addr, size, VM_USERMAP, false);
+		__fast_smmu_free_pages(pages, size >> FAST_PAGE_SHIFT);
+	} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
+		struct page *page = vmalloc_to_page(cpu_addr);
+
+		dma_common_free_remap(cpu_addr, size, VM_USERMAP, false);
+		dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
+	} else if (!is_vmalloc_addr(cpu_addr)) {
+		__free_pages(virt_to_page(cpu_addr), get_order(size));
+	} else if (__in_atomic_pool(cpu_addr, size)) {
+		// Keep remap
+		__free_from_pool(cpu_addr, size);
+	}
 }
 
-static int __vma_remap_range(struct vm_area_struct *vma, void *cpu_addr,
+/* __swiotlb_mmap_pfn is not currently exported. */
+static int fast_smmu_mmap_pfn(struct vm_area_struct *vma, unsigned long pfn,
 			     size_t size)
 {
 	int ret = -ENXIO;
 	unsigned long nr_vma_pages = vma_pages(vma);
 	unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
 	unsigned long off = vma->vm_pgoff;
-	unsigned long pfn;
-
-	if (__in_atomic_pool(cpu_addr, size))
-		pfn = __atomic_get_phys(cpu_addr) >> PAGE_SHIFT;
-	else
-		pfn = page_to_pfn(virt_to_page(cpu_addr));
 
 	if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
 		ret = remap_pfn_range(vma, vma->vm_start, pfn + off,
@@ -863,31 +907,26 @@
 				size_t size, unsigned long attrs)
 {
 	struct vm_struct *area;
-	unsigned long uaddr = vma->vm_start;
-	struct page **pages;
-	int i, nr_pages, ret = 0;
 	bool coherent = is_dma_coherent(dev, attrs);
+	unsigned long pfn = 0;
 
 	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
 					     coherent);
-
-	if (__in_atomic_pool(cpu_addr, size) || !is_vmalloc_addr(cpu_addr))
-		return __vma_remap_range(vma, cpu_addr, size);
-
 	area = find_vm_area(cpu_addr);
-	if (!area)
-		return -EINVAL;
+	if (area && area->pages)
+		return iommu_dma_mmap(area->pages, size, vma);
+	else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS)
+		pfn = vmalloc_to_pfn(cpu_addr);
+	else if (!is_vmalloc_addr(cpu_addr))
+		pfn = page_to_pfn(virt_to_page(cpu_addr));
+	else if (__in_atomic_pool(cpu_addr, size))
+		pfn = __atomic_get_phys(cpu_addr) >> PAGE_SHIFT;
 
-	pages = area->pages;
-	nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
-	for (i = vma->vm_pgoff; i < nr_pages && uaddr < vma->vm_end; i++) {
-		ret = vm_insert_page(vma, uaddr, pages[i]);
-		if (ret)
-			break;
-		uaddr += PAGE_SIZE;
-	}
 
-	return ret;
+	if (pfn)
+		return fast_smmu_mmap_pfn(vma, pfn, size);
+
+	return -EINVAL;
 }
 
 static int fast_smmu_get_sgtable(struct device *dev, struct sg_table *sgt,
@@ -903,6 +942,8 @@
 	if (area && area->pages)
 		return sg_alloc_table_from_pages(sgt, area->pages, n_pages, 0,
 						 size, GFP_KERNEL);
+	else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS)
+		page = vmalloc_to_page(cpu_addr);
 	else if (!is_vmalloc_addr(cpu_addr))
 		page = virt_to_page(cpu_addr);
 	else if (__in_atomic_pool(cpu_addr, size))
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index fc57b6b..8a01cb4 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -150,10 +150,16 @@
 	LSNR_UNREG_KT_WAKEUP,
 };
 
+enum qseecom_unload_app_kthread_state {
+	UNLOAD_APP_KT_SLEEP = 0,
+	UNLOAD_APP_KT_WAKEUP,
+};
+
 static DEFINE_MUTEX(qsee_bw_mutex);
 static DEFINE_MUTEX(app_access_lock);
 static DEFINE_MUTEX(clk_access_lock);
 static DEFINE_MUTEX(listener_access_lock);
+static DEFINE_MUTEX(unload_app_pending_list_lock);
 
 
 struct sglist_info {
@@ -317,6 +323,16 @@
 	struct task_struct *unregister_lsnr_kthread_task;
 	wait_queue_head_t unregister_lsnr_kthread_wq;
 	atomic_t unregister_lsnr_kthread_state;
+
+	struct list_head  unload_app_pending_list_head;
+	struct task_struct *unload_app_kthread_task;
+	wait_queue_head_t unload_app_kthread_wq;
+	atomic_t unload_app_kthread_state;
+};
+
+struct qseecom_unload_app_pending_list {
+	struct list_head		list;
+	struct qseecom_dev_handle	*data;
 };
 
 struct qseecom_sec_buf_fd_info {
@@ -346,6 +362,7 @@
 	struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
 	bool from_smcinvoke;
 	struct qtee_shm shm; /* kernel client's shm for req/rsp buf */
+	bool unload_pending;
 };
 
 struct qseecom_listener_handle {
@@ -424,6 +441,8 @@
 						void __user *argp);
 static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
 						void __user *argp);
+static int __qseecom_unload_app(struct qseecom_dev_handle *data,
+				uint32_t app_id);
 
 static int get_qseecom_keymaster_status(char *str)
 {
@@ -2851,8 +2870,11 @@
 		if (resp.result == QSEOS_RESULT_INCOMPLETE) {
 			ret = __qseecom_process_incomplete_cmd(data, &resp);
 			if (ret) {
-				pr_err("process_incomplete_cmd failed err: %d\n",
-					ret);
+				/* TZ has created app_id, need to unload it */
+				pr_err("incomp_cmd err %d, %d, unload %d %s\n",
+					ret, resp.result, resp.data,
+					load_img_req.img_name);
+				__qseecom_unload_app(data, resp.data);
 				ret = -EFAULT;
 				goto loadapp_err;
 			}
@@ -2953,18 +2975,61 @@
 	return ret;
 }
 
+static int __qseecom_unload_app(struct qseecom_dev_handle *data,
+				uint32_t app_id)
+{
+	struct qseecom_unload_app_ireq req;
+	struct qseecom_command_scm_resp resp;
+	int ret = 0;
+
+	/* Populate the structure for sending scm call to load image */
+	req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
+	req.app_id = app_id;
+
+	/* SCM_CALL to unload the app */
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
+			sizeof(struct qseecom_unload_app_ireq),
+			&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call to unload app (id = %d) failed\n", app_id);
+		return -EFAULT;
+	}
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		pr_warn("App (%d) is unloaded\n", app_id);
+		break;
+	case QSEOS_RESULT_INCOMPLETE:
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret)
+			pr_err("unload app %d fail proc incom cmd: %d,%d,%d\n",
+				app_id, ret, resp.result, resp.data);
+		else
+			pr_warn("App (%d) is unloaded\n", app_id);
+		break;
+	case QSEOS_RESULT_FAILURE:
+		pr_err("app (%d) unload_failed!!\n", app_id);
+		ret = -EFAULT;
+		break;
+	default:
+		pr_err("unload app %d get unknown resp.result %d\n",
+				app_id, resp.result);
+		ret = -EFAULT;
+		break;
+	}
+	return ret;
+}
+
 static int qseecom_unload_app(struct qseecom_dev_handle *data,
 				bool app_crash)
 {
 	unsigned long flags;
 	unsigned long flags1;
 	int ret = 0;
-	struct qseecom_command_scm_resp resp;
 	struct qseecom_registered_app_list *ptr_app = NULL;
 	bool unload = false;
 	bool found_app = false;
 	bool found_dead_app = false;
-	bool scm_called = false;
+	bool doublecheck = false;
 
 	if (!data) {
 		pr_err("Invalid/uninitialized device handle\n");
@@ -3014,48 +3079,9 @@
 			(char *)data->client.app_name);
 
 	if (unload) {
-		struct qseecom_unload_app_ireq req;
-		/* Populate the structure for sending scm call to load image */
-		req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
-		req.app_id = data->client.app_id;
+		ret = __qseecom_unload_app(data, data->client.app_id);
 
-		/* SCM_CALL to unload the app */
-		ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
-				sizeof(struct qseecom_unload_app_ireq),
-				&resp, sizeof(resp));
-		scm_called = true;
-		if (ret) {
-			pr_err("scm_call to unload app (id = %d) failed\n",
-								req.app_id);
-			ret = -EFAULT;
-			goto scm_exit;
-		} else {
-			pr_warn("App id %d now unloaded\n", req.app_id);
-		}
-		if (resp.result == QSEOS_RESULT_FAILURE) {
-			pr_err("app (%d) unload_failed!!\n",
-					data->client.app_id);
-			ret = -EFAULT;
-			goto scm_exit;
-		}
-		if (resp.result == QSEOS_RESULT_SUCCESS)
-			pr_debug("App (%d) is unloaded!!\n",
-					data->client.app_id);
-		if (resp.result == QSEOS_RESULT_INCOMPLETE) {
-			ret = __qseecom_process_incomplete_cmd(data, &resp);
-			if (ret) {
-				pr_err("process_incomplete_cmd fail err: %d\n",
-									ret);
-				goto scm_exit;
-			}
-		}
-	}
-
-scm_exit:
-	if (scm_called) {
 		/* double check if this app_entry still exists */
-		bool doublecheck = false;
-
 		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
 		list_for_each_entry(ptr_app,
 			&qseecom.registered_app_list_head, list) {
@@ -3075,6 +3101,7 @@
 			found_app = false;
 		}
 	}
+
 unload_exit:
 	if (found_app) {
 		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
@@ -3106,6 +3133,103 @@
 	return ret;
 }
 
+
+static int qseecom_prepare_unload_app(struct qseecom_dev_handle *data)
+{
+	struct qseecom_unload_app_pending_list *entry = NULL;
+
+	pr_debug("prepare to unload app(%d)(%s), pending %d\n",
+		data->client.app_id, data->client.app_name,
+		data->client.unload_pending);
+	if (data->client.unload_pending)
+		return 0;
+	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry)
+		return -ENOMEM;
+	entry->data = data;
+	mutex_lock(&unload_app_pending_list_lock);
+	list_add_tail(&entry->list,
+		&qseecom.unload_app_pending_list_head);
+	mutex_unlock(&unload_app_pending_list_lock);
+	data->client.unload_pending = true;
+	pr_debug("unload ta %d pending\n", data->client.app_id);
+	return 0;
+}
+
+static void __wakeup_unload_app_kthread(void)
+{
+	atomic_set(&qseecom.unload_app_kthread_state,
+				UNLOAD_APP_KT_WAKEUP);
+	wake_up_interruptible(&qseecom.unload_app_kthread_wq);
+}
+
+static bool __qseecom_find_pending_unload_app(uint32_t app_id, char *app_name)
+{
+	struct qseecom_unload_app_pending_list *entry = NULL;
+	bool found = false;
+
+	mutex_lock(&unload_app_pending_list_lock);
+	list_for_each_entry(entry, &qseecom.unload_app_pending_list_head,
+					list) {
+		if ((entry->data->client.app_id == app_id) &&
+			(!strcmp(entry->data->client.app_name, app_name))) {
+			found = true;
+			break;
+		}
+	}
+	mutex_unlock(&unload_app_pending_list_lock);
+	return found;
+}
+
+static void __qseecom_processing_pending_unload_app(void)
+{
+	struct qseecom_unload_app_pending_list *entry = NULL;
+	struct list_head *pos;
+	int ret = 0;
+
+	mutex_lock(&unload_app_pending_list_lock);
+	while (!list_empty(&qseecom.unload_app_pending_list_head)) {
+		pos = qseecom.unload_app_pending_list_head.next;
+		entry = list_entry(pos,
+			struct qseecom_unload_app_pending_list, list);
+		if (entry && entry->data) {
+			pr_debug("process pending unload app %d (%s)\n",
+				entry->data->client.app_id,
+				entry->data->client.app_name);
+			mutex_unlock(&unload_app_pending_list_lock);
+			mutex_lock(&app_access_lock);
+			ret = qseecom_unload_app(entry->data, true);
+			if (ret)
+				pr_err("unload app %d pending failed %d\n",
+					entry->data->client.app_id, ret);
+			mutex_unlock(&app_access_lock);
+			mutex_lock(&unload_app_pending_list_lock);
+			__qseecom_free_tzbuf(&entry->data->sglistinfo_shm);
+			kzfree(entry->data);
+		}
+		list_del(pos);
+		kzfree(entry);
+	}
+	mutex_unlock(&unload_app_pending_list_lock);
+}
+
+static int __qseecom_unload_app_kthread_func(void *data)
+{
+	while (!kthread_should_stop()) {
+		wait_event_interruptible(
+			qseecom.unload_app_kthread_wq,
+			atomic_read(&qseecom.unload_app_kthread_state)
+				== UNLOAD_APP_KT_WAKEUP);
+		pr_debug("kthread to unload app is called, state %d\n",
+			atomic_read(&qseecom.unload_app_kthread_state));
+		__qseecom_processing_pending_unload_app();
+		atomic_set(&qseecom.unload_app_kthread_state,
+				UNLOAD_APP_KT_SLEEP);
+	}
+	pr_warn("kthread to unload app stopped\n");
+	return 0;
+}
+
 static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
 						unsigned long virt)
 {
@@ -3552,6 +3676,13 @@
 		return -ENOENT;
 	}
 
+	if (__qseecom_find_pending_unload_app(data->client.app_id,
+						data->client.app_name)) {
+		pr_err("app %d (%s) unload is pending\n",
+			data->client.app_id, data->client.app_name);
+		return -ENOENT;
+	}
+
 	if (qseecom.qsee_version < QSEE_VERSION_40) {
 		send_data_req.app_id = data->client.app_id;
 		send_data_req.req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
@@ -4632,10 +4763,14 @@
 		break;
 	case QSEOS_RESULT_INCOMPLETE:
 		ret = __qseecom_process_incomplete_cmd(data, &resp);
-		if (ret)
-			pr_err("process_incomplete_cmd FAILED\n");
-		else
+		if (ret) {
+			pr_err("incomp_cmd err %d, %d, unload %d %s\n",
+				ret, resp.result, resp.data, appname);
+			__qseecom_unload_app(data, resp.data);
+			ret = -EFAULT;
+		} else {
 			*app_id = resp.data;
+		}
 		break;
 	case QSEOS_RESULT_FAILURE:
 		pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
@@ -4824,6 +4959,7 @@
 	uint32_t app_id = 0;
 
 	__wakeup_unregister_listener_kthread();
+	__wakeup_unload_app_kthread();
 
 	if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
 		pr_err("Not allowed to be called in %d state\n",
@@ -4961,6 +5097,7 @@
 	spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
 
 	mutex_unlock(&app_access_lock);
+	__wakeup_unload_app_kthread();
 	return 0;
 
 err:
@@ -4971,6 +5108,7 @@
 	kfree(*handle);
 	*handle = NULL;
 	mutex_unlock(&app_access_lock);
+	__wakeup_unload_app_kthread();
 	return ret;
 }
 EXPORT_SYMBOL(qseecom_start_app);
@@ -4985,6 +5123,7 @@
 	bool found_handle = false;
 
 	__wakeup_unregister_listener_kthread();
+	__wakeup_unload_app_kthread();
 
 	if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
 		pr_err("Not allowed to be called in %d state\n",
@@ -5025,7 +5164,7 @@
 		kzfree(kclient);
 		*handle = NULL;
 	}
-
+	__wakeup_unload_app_kthread();
 	return ret;
 }
 EXPORT_SYMBOL(qseecom_shutdown_app);
@@ -5039,6 +5178,7 @@
 	bool perf_enabled = false;
 
 	__wakeup_unregister_listener_kthread();
+	__wakeup_unload_app_kthread();
 
 	if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
 		pr_err("Not allowed to be called in %d state\n",
@@ -7022,6 +7162,12 @@
 			(char *)data->client.app_name);
 		return -ENOENT;
 	}
+	if (__qseecom_find_pending_unload_app(data->client.app_id,
+						data->client.app_name)) {
+		pr_err("app %d (%s) unload is pending\n",
+			data->client.app_id, data->client.app_name);
+		return -ENOENT;
+	}
 
 	req->req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
 						(uintptr_t)req->req_ptr);
@@ -7221,6 +7367,12 @@
 			(char *)data->client.app_name);
 		return -ENOENT;
 	}
+	if (__qseecom_find_pending_unload_app(data->client.app_id,
+						data->client.app_name)) {
+		pr_err("app %d (%s) unload is pending\n",
+			data->client.app_id, data->client.app_name);
+		return -ENOENT;
+	}
 
 	/* validate offsets */
 	for (i = 0; i < MAX_ION_FD; i++) {
@@ -7370,6 +7522,7 @@
 		cmd != QSEECOM_IOCTL_SEND_MODFD_RESP &&
 		cmd != QSEECOM_IOCTL_SEND_MODFD_RESP_64)
 		__wakeup_unregister_listener_kthread();
+	__wakeup_unload_app_kthread();
 
 	switch (cmd) {
 	case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
@@ -7610,6 +7763,7 @@
 		mutex_unlock(&app_access_lock);
 		if (ret)
 			pr_err("failed load_app request: %d\n", ret);
+		__wakeup_unload_app_kthread();
 		break;
 	}
 	case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
@@ -7628,6 +7782,7 @@
 		mutex_unlock(&app_access_lock);
 		if (ret)
 			pr_err("failed unload_app request: %d\n", ret);
+		__wakeup_unload_app_kthread();
 		break;
 	}
 	case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
@@ -8122,9 +8277,12 @@
 			mutex_unlock(&listener_access_lock);
 			break;
 		case QSEECOM_CLIENT_APP:
-			mutex_lock(&app_access_lock);
-			ret = qseecom_unload_app(data, true);
-			mutex_unlock(&app_access_lock);
+			pr_debug("release app %d (%s)\n",
+				data->client.app_id, data->client.app_name);
+			if (data->client.app_id) {
+				free_private_data = false;
+				ret = qseecom_prepare_unload_app(data);
+			}
 			break;
 		case QSEECOM_SECURE_SERVICE:
 		case QSEECOM_GENERIC:
@@ -9164,6 +9322,8 @@
 	init_waitqueue_head(&qseecom.send_resp_wq);
 	init_waitqueue_head(&qseecom.register_lsnr_pending_wq);
 	init_waitqueue_head(&qseecom.unregister_lsnr_kthread_wq);
+	INIT_LIST_HEAD(&qseecom.unload_app_pending_list_head);
+	init_waitqueue_head(&qseecom.unload_app_kthread_wq);
 	qseecom.send_resp_flag = 0;
 	qseecom.qseos_version = QSEOS_VERSION_14;
 	qseecom.commonlib_loaded = false;
@@ -9223,7 +9383,7 @@
 	return 0;
 }
 
-static int qseecom_create_kthread_unregister_lsnr(void)
+static int qseecom_create_kthreads(void)
 {
 	int rc = 0;
 
@@ -9237,6 +9397,19 @@
 	}
 	atomic_set(&qseecom.unregister_lsnr_kthread_state,
 					LSNR_UNREG_KT_SLEEP);
+
+	/*create a kthread to process pending ta unloading task */
+	qseecom.unload_app_kthread_task = kthread_run(
+			__qseecom_unload_app_kthread_func,
+			NULL, "qseecom-unload-ta");
+	if (IS_ERR(qseecom.unload_app_kthread_task)) {
+		rc = PTR_ERR(qseecom.unload_app_kthread_task);
+		pr_err("failed to create kthread to unload ta, rc = %x\n", rc);
+		kthread_stop(qseecom.unregister_lsnr_kthread_task);
+		return rc;
+	}
+	atomic_set(&qseecom.unload_app_kthread_state,
+					UNLOAD_APP_KT_SLEEP);
 	return 0;
 }
 
@@ -9345,17 +9518,20 @@
 	if (rc)
 		goto exit_deinit_bus;
 
-	rc = qseecom_create_kthread_unregister_lsnr();
+	rc = qseecom_create_kthreads();
 	if (rc)
 		goto exit_deinit_bus;
 
 	rc = qseecom_register_shmbridge();
 	if (rc)
-		goto exit_deinit_bus;
+		goto exit_stop_kthreads;
 
 	atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
 	return 0;
 
+exit_stop_kthreads:
+	kthread_stop(qseecom.unload_app_kthread_task);
+	kthread_stop(qseecom.unregister_lsnr_kthread_task);
 exit_deinit_bus:
 	qseecom_deinit_bus();
 exit_deinit_clock:
@@ -9405,6 +9581,7 @@
 		qseecom_unload_commonlib_image();
 
 	qseecom_deregister_shmbridge();
+	kthread_stop(qseecom.unload_app_kthread_task);
 	kthread_stop(qseecom.unregister_lsnr_kthread_task);
 	qseecom_deinit_bus();
 	qseecom_deinit_clk();
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index f25c9cd..98f67ec 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -2278,7 +2278,7 @@
 		host->desc_sz = 12;
 
 	sdhci_cqe_enable(mmc);
-	/* Set maximum timeout as per qcom spec */
+	/* Set maximum timeout as per qti spec */
 	sdhci_writeb(host, 0xF, SDHCI_TIMEOUT_CONTROL);
 }
 
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index d1eb821..d94c651 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -123,7 +123,7 @@
 
 	/* determine the priority */
 	if (wil_is_special_packet(skb))
-		return 7;
+		skb->priority = 7;
 	else if (skb->priority == 0 || skb->priority > 7)
 		skb->priority = cfg80211_classify8021d(skb, NULL);
 
diff --git a/drivers/net/wireless/cnss2/debug.c b/drivers/net/wireless/cnss2/debug.c
index fd427c4..8eafc35 100644
--- a/drivers/net/wireless/cnss2/debug.c
+++ b/drivers/net/wireless/cnss2/debug.c
@@ -110,6 +110,9 @@
 		case CNSS_IN_SUSPEND_RESUME:
 			seq_puts(s, "IN_SUSPEND_RESUME");
 			continue;
+		case CNSS_IN_REBOOT:
+			seq_puts(s, "IN_REBOOT");
+			continue;
 		}
 
 		seq_printf(s, "UNKNOWN-%d", i);
diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c
index 5beeaa3..d0c3bfe 100644
--- a/drivers/net/wireless/cnss2/main.c
+++ b/drivers/net/wireless/cnss2/main.c
@@ -7,6 +7,7 @@
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/pm_wakeup.h>
+#include <linux/reboot.h>
 #include <linux/rwsem.h>
 #include <linux/suspend.h>
 #include <linux/timer.h>
@@ -1049,6 +1050,12 @@
 		goto out;
 	}
 
+	if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
+		cnss_pr_err("Reboot is in progress, ignore recovery\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
 	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
 		cnss_pr_err("Recovery is already in progress\n");
 		ret = -EINVAL;
@@ -1922,6 +1929,19 @@
 	destroy_workqueue(plat_priv->event_wq);
 }
 
+static int cnss_reboot_notifier(struct notifier_block *nb,
+				unsigned long action,
+				void *data)
+{
+	struct cnss_plat_data *plat_priv =
+		container_of(nb, struct cnss_plat_data, reboot_nb);
+
+	set_bit(CNSS_IN_REBOOT, &plat_priv->driver_state);
+	cnss_pr_dbg("Reboot is in progress with action %d\n", action);
+
+	return NOTIFY_DONE;
+}
+
 static int cnss_misc_init(struct cnss_plat_data *plat_priv)
 {
 	int ret;
@@ -1929,7 +1949,15 @@
 	timer_setup(&plat_priv->fw_boot_timer,
 		    cnss_bus_fw_boot_timeout_hdlr, 0);
 
-	register_pm_notifier(&cnss_pm_notifier);
+	ret = register_pm_notifier(&cnss_pm_notifier);
+	if (ret)
+		cnss_pr_err("Failed to register PM notifier, err = %d\n", ret);
+
+	plat_priv->reboot_nb.notifier_call = cnss_reboot_notifier;
+	ret = register_reboot_notifier(&plat_priv->reboot_nb);
+	if (ret)
+		cnss_pr_err("Failed to register reboot notifier, err = %d\n",
+			    ret);
 
 	ret = device_init_wakeup(&plat_priv->plat_dev->dev, true);
 	if (ret)
@@ -1952,6 +1980,7 @@
 	complete_all(&plat_priv->cal_complete);
 	complete_all(&plat_priv->power_up_complete);
 	device_init_wakeup(&plat_priv->plat_dev->dev, false);
+	unregister_reboot_notifier(&plat_priv->reboot_nb);
 	unregister_pm_notifier(&cnss_pm_notifier);
 	del_timer(&plat_priv->fw_boot_timer);
 }
diff --git a/drivers/net/wireless/cnss2/main.h b/drivers/net/wireless/cnss2/main.h
index 9e9068a..c251c2a 100644
--- a/drivers/net/wireless/cnss2/main.h
+++ b/drivers/net/wireless/cnss2/main.h
@@ -203,6 +203,7 @@
 	CNSS_COEX_CONNECTED,
 	CNSS_IMS_CONNECTED,
 	CNSS_IN_SUSPEND_RESUME,
+	CNSS_IN_REBOOT,
 };
 
 struct cnss_recovery_data {
@@ -305,6 +306,7 @@
 	struct cnss_esoc_info esoc_info;
 	struct cnss_bus_bw_info bus_bw_info;
 	struct notifier_block modem_nb;
+	struct notifier_block reboot_nb;
 	struct cnss_platform_cap cap;
 	struct pm_qos_request qos_request;
 	struct cnss_device_version device_version;
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index 29a50a4..0969bca 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -811,6 +811,8 @@
 		return "DEINIT";
 	case CNSS_MHI_POWER_ON:
 		return "POWER_ON";
+	case CNSS_MHI_POWERING_OFF:
+		return "POWERING_OFF";
 	case CNSS_MHI_POWER_OFF:
 		return "POWER_OFF";
 	case CNSS_MHI_FORCE_POWER_OFF:
@@ -888,9 +890,13 @@
 	case CNSS_MHI_POWER_ON:
 		set_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
 		break;
+	case CNSS_MHI_POWERING_OFF:
+		set_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state);
+		break;
 	case CNSS_MHI_POWER_OFF:
 	case CNSS_MHI_FORCE_POWER_OFF:
 		clear_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
+		clear_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state);
 		clear_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state);
 		clear_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state);
 		break;
@@ -1025,6 +1031,7 @@
 		return;
 
 	cnss_pci_set_mhi_state_bit(pci_priv, CNSS_MHI_RESUME);
+	cnss_pci_set_mhi_state_bit(pci_priv, CNSS_MHI_POWERING_OFF);
 
 	if (!pci_priv->pci_link_down_ind)
 		cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_POWER_OFF);
@@ -3417,6 +3424,10 @@
 	if (!plat_priv)
 		return -ENODEV;
 
+	if (!test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) ||
+	    test_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state))
+		return -EINVAL;
+
 	cnss_auto_resume(&pci_priv->pci_dev->dev);
 	cnss_pci_dump_misc_reg(pci_priv);
 	cnss_pci_dump_shadow_reg(pci_priv);
@@ -3424,9 +3435,7 @@
 	ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_TRIGGER_RDDM);
 	if (ret) {
 		if (!test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) ||
-		    test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state) ||
-		    test_bit(CNSS_DRIVER_IDLE_SHUTDOWN,
-			     &plat_priv->driver_state)) {
+		    test_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state)) {
 			cnss_pr_dbg("MHI is not powered on, ignore RDDM failure\n");
 			return 0;
 		}
diff --git a/drivers/net/wireless/cnss2/pci.h b/drivers/net/wireless/cnss2/pci.h
index 17c56d0..d6b72de 100644
--- a/drivers/net/wireless/cnss2/pci.h
+++ b/drivers/net/wireless/cnss2/pci.h
@@ -16,6 +16,7 @@
 	CNSS_MHI_INIT,
 	CNSS_MHI_DEINIT,
 	CNSS_MHI_POWER_ON,
+	CNSS_MHI_POWERING_OFF,
 	CNSS_MHI_POWER_OFF,
 	CNSS_MHI_FORCE_POWER_OFF,
 	CNSS_MHI_SUSPEND,
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index e16bd41..0a8ca53 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -925,6 +925,29 @@
 	return 0;
 }
 
+static int msm_gpio_irq_set_affinity(struct irq_data *d,
+				const struct cpumask *dest, bool force)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+
+	if (d->parent_data && test_bit(d->hwirq, pctrl->wakeup_masked_irqs))
+		return irq_chip_set_affinity_parent(d, dest, force);
+
+	return 0;
+}
+
+static int msm_gpio_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+
+	if (d->parent_data && test_bit(d->hwirq, pctrl->wakeup_masked_irqs))
+		return irq_chip_set_vcpu_affinity_parent(d, vcpu_info);
+
+	return 0;
+}
+
 static void msm_gpio_irq_handler(struct irq_desc *desc)
 {
 	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
@@ -1110,6 +1133,8 @@
 	pctrl->irq_chip.irq_ack = msm_gpio_irq_ack;
 	pctrl->irq_chip.irq_set_type = msm_gpio_irq_set_type;
 	pctrl->irq_chip.irq_set_wake = msm_gpio_irq_set_wake;
+	pctrl->irq_chip.irq_set_affinity = msm_gpio_irq_set_affinity;
+	pctrl->irq_chip.irq_set_vcpu_affinity = msm_gpio_irq_set_vcpu_affinity;
 
 	chip->irq.chip = &pctrl->irq_chip;
 	chip->irq.handler = handle_edge_irq;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index eb89235..b1afd8c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -894,6 +894,11 @@
 			usleep_range(SUSPEND_MIN_SLEEP_RX,
 				SUSPEND_MAX_SLEEP_RX);
 			IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PIPE_SUSPEND_ODL");
+		} else if (sys->ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
+			IPA_ACTIVE_CLIENTS_INC_SPECIAL("PIPE_SUSPEND_COAL");
+			usleep_range(SUSPEND_MIN_SLEEP_RX,
+				SUSPEND_MAX_SLEEP_RX);
+			IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PIPE_SUSPEND_COAL");
 		} else
 			IPAERR("Unexpected event %d\n for client %d\n",
 				event, sys->ep->client);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
index 8f0933b..42054dd 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
@@ -1221,6 +1221,8 @@
 		(u32) ((ring->phys_base & 0xFFFFFFFF00000000) >> 32);
 	ring_info->event.ring_size = IPA_UC_EVENT_RING_SIZE;
 
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
 	res = ipa3_uc_send_cmd((u32)(cmd.phys_base),
 		IPA_CPU_2_HW_CMD_SETUP_EVENT_RING, 0,
 		false, 10 * HZ);
@@ -1250,6 +1252,7 @@
 free_cmd:
 	dma_free_coherent(ipa3_ctx->uc_pdev,
 		cmd.size, cmd.base, cmd.phys_base);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
 	return res;
 }
 
@@ -1299,6 +1302,7 @@
 	quota_info->params.WdiQM.info.Interval =
 		IPA_UC_MON_INTERVAL;
 
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
 	res = ipa3_uc_send_cmd((u32)(cmd.phys_base),
 		IPA_CPU_2_HW_CMD_QUOTA_MONITORING,
 		IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
@@ -1319,6 +1323,7 @@
 
 free_cmd:
 	dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
 
 	return res;
 }
@@ -1418,6 +1423,8 @@
 	bw_info->params.WdiBw.info.Interval =
 		IPA_UC_MON_INTERVAL;
 
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
 	res = ipa3_uc_send_cmd((u32)(cmd.phys_base),
 		IPA_CPU_2_HW_CMD_BW_MONITORING,
 			IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
@@ -1432,6 +1439,7 @@
 
 free_cmd:
 	dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
 
 	return res;
 }
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 85db36c3..2e5f185 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -93,7 +93,7 @@
 	depends on SCSI_UFSHCD_PLATFORM && ARCH_QCOM
 	select PHY_QCOM_UFS
 	select EXTCON
-	select EXTCON_GPIO
+	select EXTCON_STORAGE_CD_GPIO
 	help
 	  This selects the QCOM specific additions to UFSHCD platform driver.
 	  UFS host on QCOM needs some vendor specific configuration before
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index f1b2da9..ace39b4 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -376,23 +376,12 @@
 	return (atomic_read(&hba->card_state) == UFS_CARD_STATE_OFFLINE);
 }
 
-static bool ufshcd_is_card_present(struct ufs_hba *hba)
+static inline bool ufshcd_is_device_offline(struct ufs_hba *hba)
 {
-	if (ufshcd_is_card_online(hba))
-		/*
-		 * TODO: need better way to ensure that this delay is
-		 * more than extcon's debounce-ms
-		 */
-		msleep(300);
-
-	/*
-	 * Check if card was online and offline/removed now or
-	 * card was already offline.
-	 */
-	if (ufshcd_is_card_offline(hba))
+	if (hba->extcon && ufshcd_is_card_offline(hba))
+		return true;
+	else
 		return false;
-
-	return true;
 }
 
 static int ufshcd_card_get_extcon_state(struct ufs_hba *hba)
@@ -512,6 +501,9 @@
 static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg);
 static void ufshcd_register_pm_notifier(struct ufs_hba *hba);
 static void ufshcd_unregister_pm_notifier(struct ufs_hba *hba);
+static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
+static void ufshcd_remove_scsi_devices(struct ufs_hba *hba);
+static void ufshcd_detect_card(struct ufs_hba *hba, unsigned long delay);
 
 #if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
 static struct devfreq_simple_ondemand_data ufshcd_ondemand_data = {
@@ -1776,7 +1768,7 @@
 {
 	int ret = 0;
 
-	if (hba->extcon && ufshcd_is_card_offline(hba))
+	if (ufshcd_is_device_offline(hba))
 		return 0;
 
 	/* let's not get into low power until clock scaling is completed */
@@ -2158,6 +2150,9 @@
 	ufshcd_hba_vreg_set_hpm(hba);
 	ufshcd_enable_clocks(hba);
 
+	if (ufshcd_is_device_offline(hba))
+		goto unblock_reqs;
+
 	/* Exit from hibern8 */
 	if (ufshcd_can_hibern8_during_gating(hba)) {
 		/* Prevent gating in this path */
@@ -2200,7 +2195,7 @@
 start:
 	switch (hba->clk_gating.state) {
 	case CLKS_ON:
-		if (hba->extcon && ufshcd_is_card_offline(hba))
+		if (ufshcd_is_device_offline(hba))
 			break;
 		/*
 		 * Wait for the ungate work to complete if in progress.
@@ -2305,6 +2300,9 @@
 
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 
+	if (ufshcd_is_device_offline(hba))
+		goto disable_clocks;
+
 	if (ufshcd_is_hibern8_on_idle_allowed(hba) &&
 	    hba->hibern8_on_idle.is_enabled)
 		/*
@@ -2324,6 +2322,7 @@
 		ufshcd_set_link_hibern8(hba);
 	}
 
+disable_clocks:
 	/*
 	 * If auto hibern8 is enabled then the link will already
 	 * be in hibern8 state and the ref clock can be gated.
@@ -3027,6 +3026,8 @@
 {
 	hba->lrb[task_tag].issue_time_stamp = ktime_get();
 	hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0);
+	if (ufshcd_is_device_offline(hba))
+		return -ENOLINK;
 	ufshcd_clk_scaling_start_busy(hba);
 	__set_bit(task_tag, &hba->outstanding_reqs);
 	ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
@@ -3216,6 +3217,9 @@
 		return -EIO;
 	}
 
+	if (ufshcd_is_device_offline(hba))
+		return -ENOLINK;
+
 	if (completion)
 		init_completion(&uic_cmd->done);
 
@@ -3706,7 +3710,7 @@
 		goto out_unlock;
 	}
 
-	if (hba->extcon && ufshcd_is_card_offline(hba)) {
+	if (ufshcd_is_device_offline(hba)) {
 		set_host_byte(cmd, DID_BAD_TARGET);
 		cmd->scsi_done(cmd);
 		goto out_unlock;
@@ -4042,6 +4046,9 @@
 	unsigned long flags;
 	bool has_read_lock = false;
 
+	if (ufshcd_is_device_offline(hba))
+		return -ENOLINK;
+
 	/*
 	 * May get invoked from shutdown and IOCTL contexts.
 	 * In shutdown context, it comes in with lock acquired.
@@ -5197,9 +5204,7 @@
 	ufshcd_dme_cmd_log(hba, "dme_cmpl_2", hba->active_uic_cmd->command);
 
 out:
-	if (ret) {
-		if (hba->extcon && !ufshcd_is_card_present(hba))
-			goto skip_dump;
+	if (ret && !ufshcd_is_device_offline(hba)) {
 		ufsdbg_set_err_state(hba);
 		ufshcd_print_host_state(hba);
 		ufshcd_print_pwr_info(hba);
@@ -5208,7 +5213,6 @@
 		BUG_ON(hba->crash_on_err);
 	}
 
-skip_dump:
 	ufshcd_save_tstamp_of_last_dme_cmd(hba);
 	spin_lock_irqsave(hba->host->host_lock, flags);
 	hba->active_uic_cmd = NULL;
@@ -5260,6 +5264,9 @@
 	int ret = 0;
 	unsigned long flags;
 
+	if (ufshcd_is_device_offline(hba))
+		return -ENOLINK;
+
 	/*
 	 * Check if there is any race with fatal error handling.
 	 * If so, wait for it to complete. Even though fatal error
@@ -5363,10 +5370,9 @@
 
 	for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
 		ret = __ufshcd_uic_hibern8_enter(hba);
-		if (!ret)
+		if (!ret || ufshcd_is_device_offline(hba))
 			goto out;
-		else if (ret != -EAGAIN &&
-			 !(hba->extcon && ufshcd_is_card_offline(hba)))
+		else if (ret != -EAGAIN)
 			/* Unable to recover the link, so no point proceeding */
 			BUG_ON(1);
 	}
@@ -5396,7 +5402,7 @@
 			__func__, ret);
 		ret = ufshcd_link_recovery(hba);
 		/* Unable to recover the link, so no point proceeding */
-		if (ret && !(hba->extcon && ufshcd_is_card_offline(hba)))
+		if (ret && !ufshcd_is_device_offline(hba))
 			BUG_ON(1);
 	} else {
 		ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
@@ -7026,7 +7032,7 @@
 
 	hba = container_of(work, struct ufs_hba, eh_work);
 
-	if (hba->extcon && !ufshcd_is_card_present(hba)) {
+	if (ufshcd_is_device_offline(hba)) {
 		spin_lock_irqsave(hba->host->host_lock, flags);
 		hba->saved_err = 0;
 		hba->saved_uic_err = 0;
@@ -7226,7 +7232,7 @@
 
 	hba = container_of(work, struct ufs_hba, rls_work);
 
-	if (hba->extcon && !ufshcd_is_card_present(hba))
+	if (ufshcd_is_device_offline(hba))
 		return;
 
 	pm_runtime_get_sync(hba->dev);
@@ -7398,7 +7404,7 @@
 			queue_eh_work = true;
 	}
 
-	if (hba->extcon && ufshcd_is_card_offline(hba)) {
+	if (ufshcd_is_device_offline(hba)) {
 		/* ignore UIC errors if card is offline */
 		retval |= IRQ_HANDLED;
 	} else if (queue_eh_work) {
@@ -7528,7 +7534,7 @@
 		intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
 	} while (intr_status && --retries);
 
-	if (retval == IRQ_NONE) {
+	if (retval == IRQ_NONE && !ufshcd_is_device_offline(hba)) {
 		dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
 					__func__, intr_status);
 		ufshcd_hex_dump(hba, "host regs: ", hba->mmio_base,
@@ -8007,7 +8013,7 @@
 	 * There is no point proceeding even after failing
 	 * to recover after multiple retries.
 	 */
-	BUG_ON(err && ufshcd_is_embedded_dev(hba));
+	BUG_ON(err && ufshcd_is_embedded_dev(hba) && !hba->extcon);
 
 	/*
 	 * After reset the door-bell might be cleared, complete
@@ -8782,11 +8788,6 @@
 	ktime_t start = ktime_get();
 
 reinit:
-	if (hba->extcon && (ufshcd_card_get_extcon_state(hba) <= 0)) {
-		ret = -ENOLINK;
-		goto out;
-	}
-
 	ret = ufshcd_link_startup(hba);
 	if (ret)
 		goto out;
@@ -8946,10 +8947,13 @@
 					goto out;
 			}
 			hba->clk_scaling.is_allowed = true;
+			hba->clk_scaling.is_suspended = false;
 		}
 
 		scsi_scan_host(hba->host);
 		pm_runtime_put_sync(hba->dev);
+		if (hba->extcon)
+			hba->card_rpm_paired = true;
 	}
 
 	/*
@@ -8962,21 +8966,17 @@
 	if (ret) {
 		ufshcd_set_ufs_dev_poweroff(hba);
 		ufshcd_set_link_off(hba);
-		if (hba->extcon) {
-			if (!ufshcd_is_card_online(hba))
-				ufsdbg_clr_err_state(hba);
-			ufshcd_set_card_offline(hba);
-		}
-	} else if (hba->extcon) {
-		ufshcd_set_card_online(hba);
 	}
 
 	/*
 	 * If we failed to initialize the device or the device is not
 	 * present, turn off the power/clocks etc.
 	 */
-	if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress)
+	if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
 		pm_runtime_put_sync(hba->dev);
+		if (hba->extcon)
+			hba->card_rpm_paired = true;
+	}
 
 	trace_ufshcd_init(dev_name(hba->dev), ret,
 		ktime_to_us(ktime_sub(ktime_get(), start)),
@@ -8984,92 +8984,160 @@
 	return ret;
 }
 
-static void ufshcd_remove_device(struct ufs_hba *hba)
+static void ufshcd_remove_scsi_devices(struct ufs_hba *hba)
 {
+	struct Scsi_Host *shost = hba->host;
 	struct scsi_device *sdev;
-	struct scsi_device *sdev_cache[UFS_MAX_LUS];
-	int sdev_count = 0, i;
 	unsigned long flags;
 
-	hba->card_removal_in_progress = 1;
-	ufshcd_hold_all(hba);
-	/* Reset the host controller */
+	spin_lock_irqsave(shost->host_lock, flags);
+restart:
+	list_for_each_entry(sdev, &shost->__devices, siblings) {
+		if (sdev->sdev_state == SDEV_DEL ||
+		    sdev->sdev_state == SDEV_CANCEL ||
+		    !get_device(&sdev->sdev_gendev))
+			continue;
+		spin_unlock_irqrestore(shost->host_lock, flags);
+		scsi_remove_device(sdev);
+		put_device(&sdev->sdev_gendev);
+		spin_lock_irqsave(shost->host_lock, flags);
+		goto restart;
+	}
+	spin_unlock_irqrestore(shost->host_lock, flags);
+}
+
+static void ufshcd_remove_card(struct ufs_hba *hba)
+{
+	unsigned long flags;
+
 	spin_lock_irqsave(hba->host->host_lock, flags);
-	hba->silence_err_logs = true;
-	ufshcd_hba_stop(hba, false);
+	ufshcd_set_card_removal_ongoing(hba);
+	ufshcd_set_card_offline(hba);
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
-
-	ufshcd_set_ufs_dev_poweroff(hba);
-	ufshcd_set_link_off(hba);
-	__ufshcd_shutdown_clkscaling(hba);
-
+	/* Turn on host vreg and clocks */
+	ufshcd_setup_hba_vreg(hba, true);
+	ufshcd_enable_clocks(hba);
+	/* Make sure clocks are stable */
+	usleep_range(50, 60);
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	ufshcd_hba_stop(hba, false);
+	/* Clear interrupt status and disable interrupts */
+	ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
+		      REG_INTERRUPT_STATUS);
+	ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
+	/*
+	 * Make sure that UFS interrupts are disabled and
+	 * any pending interrupt status is cleared.
+	 */
+	mb();
+	hba->silence_err_logs = true;
 	/* Complete requests that have door-bell cleared by h/w */
 	ufshcd_complete_requests(hba);
-
-	/* remove all scsi devices */
-	list_for_each_entry(sdev, &hba->host->__devices, siblings) {
-		if (sdev_count < UFS_MAX_LUS) {
-			sdev_cache[sdev_count] = sdev;
-			sdev_count++;
-		}
-	}
-
-	for (i = 0; i < sdev_count; i++)
-		scsi_remove_device(sdev_cache[i]);
-
-	spin_lock_irqsave(hba->host->host_lock, flags);
-	/* Complete the flying async UIC command if there is one */
+	/* Complete the sync/async UIC command if there is one */
 	if (hba->uic_async_done)
 		complete(hba->uic_async_done);
-	hba->silence_err_logs = false;
+	else if (hba->active_uic_cmd)
+		complete(&hba->active_uic_cmd->done);
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
-
-	ufshcd_release_all(hba);
-	hba->card_removal_in_progress = 0;
+	cancel_delayed_work_sync(&hba->card_detect_work);
+	/* Flush runtime PM events */
+	pm_runtime_get_sync(hba->dev);
+	/* Clear runtime PM errors if any */
+	pm_runtime_set_active(hba->dev);
+	cancel_work_sync(&hba->rls_work);
+	cancel_work_sync(&hba->eh_work);
+	cancel_work_sync(&hba->eeh_work);
+	hba->auto_bkops_enabled = false;
+	__ufshcd_shutdown_clkscaling(hba);
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	ufshcd_clear_eh_in_progress(hba);
+	hba->saved_err = 0;
+	hba->saved_uic_err = 0;
+	hba->saved_ce_err = 0;
+	hba->auto_h8_err = false;
+	hba->force_host_reset = false;
+	hba->ufshcd_state = UFSHCD_STATE_RESET;
+	hba->silence_err_logs = false;
+	ufsdbg_clr_err_state(hba);
+	ufshcd_set_ufs_dev_poweroff(hba);
+	ufshcd_set_link_off(hba);
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+	/*
+	 * Remove scsi devices only when we are not in middle
+	 * of system resume events.
+	 */
+	if (!down_trylock(&hba->sdev_sema)) {
+		ufshcd_remove_scsi_devices(hba);
+		up(&hba->sdev_sema);
+	}
+	ufshcd_clear_card_removal_ongoing(hba);
+	pm_runtime_put_sync(hba->dev);
 }
 
 static void ufshcd_card_detect_handler(struct work_struct *work)
 {
 	struct ufs_hba *hba;
+	unsigned long flags;
+	int ret;
 
-	hba = container_of(work, struct ufs_hba, card_detect_work);
+	hba = container_of(to_delayed_work(work), struct ufs_hba,
+			   card_detect_work);
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	if (!ufshcd_is_card_removal_ongoing(hba))
+		ufshcd_set_card_online(hba);
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
 
 	if (ufshcd_is_card_online(hba) && !hba->sdev_ufs_device) {
 		pm_runtime_get_sync(hba->dev);
-		ufshcd_detect_device(hba);
-		/* ufshcd_probe_hba() calls pm_runtime_put_sync() on exit */
-	} else if (ufshcd_is_card_offline(hba) && hba->sdev_ufs_device) {
-		pm_runtime_get_sync(hba->dev);
-		ufshcd_remove_device(hba);
-		pm_runtime_put_sync(hba->dev);
-		ufsdbg_clr_err_state(hba);
+		if (ufshcd_is_clkgating_allowed(hba)) {
+			spin_lock_irqsave(hba->host->host_lock, flags);
+			hba->clk_gating.active_reqs = 0;
+			spin_unlock_irqrestore(hba->host->host_lock, flags);
+		}
+		hba->card_rpm_paired = false;
+		ret = ufshcd_detect_device(hba);
+		if (ret) {
+			ufshcd_set_card_offline(hba);
+			ufsdbg_clr_err_state(hba);
+			dev_err(hba->dev, "%s: device detect failed: %d\n",
+				__func__, ret);
+		}
+
+		/*
+		 * pm_runtime_put_sync() may not be called if
+		 * failure happens before or inside ufshcd_probe_hba()
+		 */
+		if (!hba->card_rpm_paired) {
+			cancel_work_sync(&hba->eh_work);
+			pm_runtime_put_sync(hba->dev);
+		}
 	}
 }
 
+static void ufshcd_detect_card(struct ufs_hba *hba, unsigned long delay)
+{
+	if (hba->extcon && !hba->card_detect_disabled)
+		schedule_delayed_work(&hba->card_detect_work, delay);
+}
+
 static int ufshcd_card_detect_notifier(struct notifier_block *nb,
 				       unsigned long event, void *ptr)
 {
 	struct ufs_hba *hba = container_of(nb, struct ufs_hba, card_detect_nb);
 
-	if (event) {
-		if (hba->card_removal_in_progress)
-			goto out;
-		ufshcd_set_card_online(hba);
-	} else
-		ufshcd_set_card_offline(hba);
-
-	if (ufshcd_is_card_offline(hba) && !hba->sdev_ufs_device)
-		goto out;
-
 	/*
-	 * card insertion/removal are very infrequent events and having this
+	 * card insertion/removal are not frequent events and having this
 	 * message helps if there is some issue with card detection/removal.
 	 */
 	dev_info(hba->dev, "%s: card %s notification rcvd\n",
-		__func__, ufshcd_is_card_online(hba) ? "inserted" : "removed");
+		__func__, event ? "inserted" : "removed");
 
-	schedule_work(&hba->card_detect_work);
-out:
+	if (event)
+		ufshcd_detect_card(hba, msecs_to_jiffies(200));
+	else
+		ufshcd_remove_card(hba);
+
 	return NOTIFY_DONE;
 }
 
@@ -9116,15 +9184,24 @@
 {
 	struct ufs_hba *hba = (struct ufs_hba *)data;
 
-	/*
-	 * Don't allow clock gating and hibern8 enter for faster device
-	 * detection.
-	 */
-	ufshcd_hold_all(hba);
-	ufshcd_probe_hba(hba);
-	ufshcd_release_all(hba);
-
-	ufshcd_extcon_register(hba);
+	if (hba->extcon) {
+		ufshcd_hba_stop(hba, true);
+		ufshcd_set_ufs_dev_poweroff(hba);
+		ufshcd_set_link_off(hba);
+		ufshcd_set_card_offline(hba);
+		pm_runtime_put_sync(hba->dev);
+		ufshcd_extcon_register(hba);
+		if (ufshcd_card_get_extcon_state(hba) > 0)
+			ufshcd_detect_card(hba, 0);
+	} else {
+		/*
+		 * Don't allow clock gating and hibern8 enter for faster device
+		 * detection.
+		 */
+		ufshcd_hold_all(hba);
+		ufshcd_probe_hba(hba);
+		ufshcd_release_all(hba);
+	}
 }
 
 static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
@@ -9606,6 +9683,12 @@
 	struct ufs_vreg_info *info = &hba->vreg_info;
 	int ret = 0;
 
+	if (hba->extcon)
+		mutex_lock(&hba->card_mutex);
+
+	if (!on && ufshcd_is_card_removal_ongoing(hba))
+		goto out;
+
 	if (info->vdd_hba) {
 		ret = ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
 
@@ -9613,6 +9696,9 @@
 			ufshcd_vops_update_sec_cfg(hba, on);
 	}
 
+out:
+	if (hba->extcon)
+		mutex_unlock(&hba->card_mutex);
 	return ret;
 }
 
@@ -9706,13 +9792,19 @@
 	bool clk_state_changed = false;
 
 	if (list_empty(head))
-		goto out;
+		return ret;
+
+	if (hba->extcon)
+		mutex_lock(&hba->card_mutex);
+
+	if (!on && ufshcd_is_card_removal_ongoing(hba))
+		goto out_unlock;
 
 	/* call vendor specific bus vote before enabling the clocks */
 	if (on) {
 		ret = ufshcd_vops_set_bus_vote(hba, on);
 		if (ret)
-			return ret;
+			goto out_unlock;
 	}
 
 	/*
@@ -9723,7 +9815,7 @@
 	if (!on) {
 		ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
 		if (ret)
-			return ret;
+			goto out_unlock;
 	}
 
 	list_for_each_entry(clki, head, list) {
@@ -9795,6 +9887,9 @@
 		trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
 			(on ? "on" : "off"),
 			ktime_to_us(ktime_sub(ktime_get(), start)), ret);
+out_unlock:
+	if (hba->extcon)
+		mutex_unlock(&hba->card_mutex);
 	return ret;
 }
 
@@ -10215,26 +10310,27 @@
 {
 	struct ufs_hba *hba = container_of(
 		notify_block, struct ufs_hba, pm_notify);
-	int ret = 0;
 
 	if (!hba->extcon)
-		return ret;
+		return 0;
 
 	switch (mode) {
 	case PM_SUSPEND_PREPARE:
-		ret = ufshcd_extcon_unregister(hba);
-		if (ret)
-			break;
-		cancel_work_sync(&hba->card_detect_work);
+		hba->card_detect_disabled = true;
+		cancel_delayed_work_sync(&hba->card_detect_work);
+		down(&hba->sdev_sema);
 		break;
 	case PM_POST_SUSPEND:
-		ret = ufshcd_extcon_register(hba);
-		if (ret)
-			break;
-		extcon_sync(hba->extcon, EXTCON_MECHANICAL);
+		if (ufshcd_is_card_offline(hba) && hba->sdev_ufs_device)
+			ufshcd_remove_scsi_devices(hba);
+		up(&hba->sdev_sema);
+		hba->card_detect_disabled = false;
+		if (ufshcd_card_get_extcon_state(hba) > 0 &&
+		    !hba->sdev_ufs_device)
+			ufshcd_detect_card(hba, 0);
 	}
 
-	return ret;
+	return 0;
 }
 
 static void ufshcd_register_pm_notifier(struct ufs_hba *hba)
@@ -10462,8 +10558,7 @@
 
 	if (hba->extcon &&
 	    (ufshcd_is_card_offline(hba) ||
-	     (ufshcd_is_card_online(hba) && !hba->sdev_ufs_device) ||
-	     !ufshcd_card_get_extcon_state(hba)))
+	     (ufshcd_is_card_online(hba) && !hba->sdev_ufs_device)))
 		goto skip_dev_ops;
 
 	if (ufshcd_is_link_hibern8(hba)) {
@@ -10752,6 +10847,11 @@
 	if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
 		goto out;
 
+	if (hba->extcon) {
+		hba->card_detect_disabled = true;
+		cancel_delayed_work_sync(&hba->card_detect_work);
+	}
+
 	pm_runtime_get_sync(hba->dev);
 	ufshcd_hold_all(hba);
 	ufshcd_mark_shutdown_ongoing(hba);
@@ -10962,9 +11062,12 @@
 	/* Initialize work queues */
 	INIT_WORK(&hba->eh_work, ufshcd_err_handler);
 	INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
-	INIT_WORK(&hba->card_detect_work, ufshcd_card_detect_handler);
+	INIT_DELAYED_WORK(&hba->card_detect_work, ufshcd_card_detect_handler);
 	INIT_WORK(&hba->rls_work, ufshcd_rls_handler);
 
+	sema_init(&hba->sdev_sema, 1);
+	mutex_init(&hba->card_mutex);
+
 	/* Initialize UIC command mutex */
 	mutex_init(&hba->uic_cmd_mutex);
 
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 15122de..70ea549 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -766,8 +766,13 @@
  * @card_detect_nb: card detector notifier registered with @extcon
  * @card_detect_work: work to exectute the card detect function
  * @card_state: card state event, enum ufshcd_card_state defines possible states
- * @card_removal_in_progress: to track card removal progress
+ * @card_removal_in_prog: flag to track card removal progress
  * @pm_notify: used to register for PM events
+ * @sdev_sema: semaphore to protect scsi devices from being removed
+ * @card_mutex: mutex to serialize ON/OFF sequences of hba vregs and clocks
+ * @card_rpm_paired: indicates whether runtime PM events are paired after card
+ *  detection is finished
+ * @card_detect_disabled: to enable/disable card detect
  * @vreg_info: UFS device voltage regulator information
  * @clk_list_head: UFS host controller clocks list node head
  * @pwr_info: holds current power mode
@@ -1003,10 +1008,14 @@
 
 	struct extcon_dev *extcon;
 	struct notifier_block card_detect_nb;
-	struct work_struct card_detect_work;
+	struct delayed_work card_detect_work;
 	atomic_t card_state;
-	int card_removal_in_progress;
+	unsigned long card_removal_in_prog;
 	struct notifier_block pm_notify;
+	struct semaphore sdev_sema;
+	struct mutex card_mutex;
+	bool card_rpm_paired;
+	bool card_detect_disabled;
 
 	struct ufs_pa_layer_attr pwr_info;
 	struct ufs_pwr_mode_info max_pwr_info;
@@ -1075,6 +1084,21 @@
 	bool force_g4;
 };
 
+static inline void ufshcd_set_card_removal_ongoing(struct ufs_hba *hba)
+{
+	set_bit(0, &hba->card_removal_in_prog);
+}
+
+static inline void ufshcd_clear_card_removal_ongoing(struct ufs_hba *hba)
+{
+	clear_bit(0, &hba->card_removal_in_prog);
+}
+
+static inline bool ufshcd_is_card_removal_ongoing(struct ufs_hba *hba)
+{
+	return !!(test_bit(0, &hba->card_removal_in_prog));
+}
+
 static inline void ufshcd_mark_shutdown_ongoing(struct ufs_hba *hba)
 {
 	set_bit(0, &hba->shutdown_in_prog);
diff --git a/drivers/soc/qcom/memory_dump_v2.c b/drivers/soc/qcom/memory_dump_v2.c
index 459ebea..3472b6c 100644
--- a/drivers/soc/qcom/memory_dump_v2.c
+++ b/drivers/soc/qcom/memory_dump_v2.c
@@ -13,6 +13,8 @@
 #include <linux/of_address.h>
 #include <soc/qcom/minidump.h>
 #include <soc/qcom/memory_dump.h>
+#include <soc/qcom/qtee_shmbridge.h>
+#include <soc/qcom/secure_buffer.h>
 #include <soc/qcom/scm.h>
 #include <linux/of_device.h>
 #include <linux/dma-mapping.h>
@@ -167,7 +169,7 @@
 }
 EXPORT_SYMBOL(msm_dump_data_register_nominidump);
 
-static int __init init_memory_dump(void)
+static int init_memory_dump(void *dump_vaddr, phys_addr_t phys_addr)
 {
 	struct msm_dump_table *table;
 	struct msm_dump_entry entry;
@@ -188,47 +190,31 @@
 		return -ENOMEM;
 	}
 
-	memdump.table = kzalloc(sizeof(struct msm_dump_table), GFP_KERNEL);
-	if (!memdump.table) {
-		ret = -ENOMEM;
-		goto err0;
-	}
+	memdump.table = dump_vaddr;
 	memdump.table->version = MSM_DUMP_TABLE_VERSION;
-	memdump.table_phys = virt_to_phys(memdump.table);
-	memcpy_toio(imem_base, &memdump.table_phys, sizeof(memdump.table_phys));
+	memdump.table_phys = phys_addr;
+	memcpy_toio(imem_base, &memdump.table_phys,
+			sizeof(memdump.table_phys));
 	/* Ensure write to imem_base is complete before unmapping */
 	mb();
 	pr_info("MSM Memory Dump base table set up\n");
 
 	iounmap(imem_base);
-
-	table = kzalloc(sizeof(struct msm_dump_table), GFP_KERNEL);
-	if (!table) {
-		ret = -ENOMEM;
-		goto err1;
-	}
+	dump_vaddr +=  sizeof(*table);
+	phys_addr += sizeof(*table);
+	table = dump_vaddr;
 	table->version = MSM_DUMP_TABLE_VERSION;
-
 	entry.id = MSM_DUMP_TABLE_APPS;
-	entry.addr = virt_to_phys(table);
+	entry.addr = phys_addr;
 	ret = msm_dump_table_register(&entry);
 	if (ret) {
-		pr_info("mem dump apps data table register failed\n");
-		goto err2;
+		pr_err("mem dump apps data table register failed\n");
+		return ret;
 	}
 	pr_info("MSM Memory Dump apps data table set up\n");
 
 	return 0;
-err2:
-	kfree(table);
-err1:
-	kfree(memdump.table);
-	return ret;
-err0:
-	iounmap(imem_base);
-	return ret;
 }
-early_initcall(init_memory_dump);
 
 #ifdef CONFIG_MSM_DEBUG_LAR_UNLOCK
 static int __init init_debug_lar_unlock(void)
@@ -253,17 +239,28 @@
 early_initcall(init_debug_lar_unlock);
 #endif
 
-static int mem_dump_probe(struct platform_device *pdev)
+#define MSM_DUMP_DATA_SIZE sizeof(struct msm_dump_data)
+static int mem_dump_alloc(struct platform_device *pdev)
 {
 	struct device_node *child_node;
 	const struct device_node *node = pdev->dev.of_node;
-	static dma_addr_t dump_addr;
-	static void *dump_vaddr;
 	struct msm_dump_data *dump_data;
 	struct msm_dump_entry dump_entry;
-	int ret;
+	struct md_region md_entry;
+	size_t total_size;
 	u32 size, id;
+	int ret, no_of_nodes;
+	dma_addr_t dma_handle;
+	phys_addr_t phys_addr;
+	struct sg_table mem_dump_sgt;
+	void *dump_vaddr;
+	uint32_t ns_vmids[] = {VMID_HLOS};
+	uint32_t ns_vm_perms[] = {PERM_READ | PERM_WRITE};
+	u64 shm_bridge_handle;
 
+	total_size = size = ret = no_of_nodes = 0;
+	/* For dump table registration with IMEM */
+	total_size = sizeof(struct msm_dump_table) * 2;
 	for_each_available_child_of_node(node, child_node) {
 		ret = of_property_read_u32(child_node, "qcom,dump-size", &size);
 		if (ret) {
@@ -272,6 +269,53 @@
 			continue;
 		}
 
+		total_size += size;
+		no_of_nodes++;
+	}
+
+	total_size += (MSM_DUMP_DATA_SIZE * no_of_nodes);
+	total_size = ALIGN(total_size, SZ_4K);
+	dump_vaddr = dma_alloc_coherent(&pdev->dev, total_size,
+						&dma_handle, GFP_KERNEL);
+	if (!dump_vaddr) {
+		dev_err(&pdev->dev, "Couldn't get memory for dump entries\n");
+		return -ENOMEM;
+	}
+
+	dma_get_sgtable(&pdev->dev, &mem_dump_sgt, dump_vaddr,
+						dma_handle, total_size);
+	phys_addr = page_to_phys(sg_page(mem_dump_sgt.sgl));
+	sg_free_table(&mem_dump_sgt);
+
+	ret = qtee_shmbridge_register(phys_addr, total_size, ns_vmids,
+		ns_vm_perms, 1, PERM_READ|PERM_WRITE, &shm_bridge_handle);
+
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to create shm bridge.ret=%d\n",
+						ret);
+		dma_free_coherent(&pdev->dev, total_size,
+						dump_vaddr, dma_handle);
+		return ret;
+	}
+
+	memset(dump_vaddr, 0x0, total_size);
+
+	ret = init_memory_dump(dump_vaddr, phys_addr);
+	if (ret) {
+		dev_err(&pdev->dev, "Memory Dump table set up is failed\n");
+		qtee_shmbridge_deregister(shm_bridge_handle);
+		dma_free_coherent(&pdev->dev, total_size,
+						dump_vaddr, dma_handle);
+		return ret;
+	}
+
+	dump_vaddr += (sizeof(struct msm_dump_table) * 2);
+	phys_addr += (sizeof(struct msm_dump_table) * 2);
+	for_each_available_child_of_node(node, child_node) {
+		ret = of_property_read_u32(child_node, "qcom,dump-size", &size);
+		if (ret)
+			continue;
+
 		ret = of_property_read_u32(child_node, "qcom,dump-id", &id);
 		if (ret) {
 			dev_err(&pdev->dev, "Unable to find id for %s\n",
@@ -279,40 +323,41 @@
 			continue;
 		}
 
-		dump_vaddr = (void *) dma_alloc_coherent(&pdev->dev, size,
-						&dump_addr, GFP_KERNEL);
-
-		if (!dump_vaddr) {
-			dev_err(&pdev->dev, "Couldn't get memory for dumping\n");
-			continue;
-		}
-
-		memset(dump_vaddr, 0x0, size);
-
-		dump_data = devm_kzalloc(&pdev->dev,
-				sizeof(struct msm_dump_data), GFP_KERNEL);
-		if (!dump_data) {
-			dma_free_coherent(&pdev->dev, size, dump_vaddr,
-					dump_addr);
-			continue;
-		}
-
-		dump_data->addr = dump_addr;
+		dump_data = dump_vaddr;
+		dump_data->addr = phys_addr + MSM_DUMP_DATA_SIZE;
 		dump_data->len = size;
 		dump_entry.id = id;
 		strlcpy(dump_data->name, child_node->name,
 					sizeof(dump_data->name));
-		dump_entry.addr = virt_to_phys(dump_data);
-		ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS, &dump_entry);
-		if (ret) {
+		dump_entry.addr = phys_addr;
+		ret = msm_dump_data_register_nominidump(MSM_DUMP_TABLE_APPS,
+					&dump_entry);
+		if (ret)
 			dev_err(&pdev->dev, "Data dump setup failed, id = %d\n",
 				id);
-			dma_free_coherent(&pdev->dev, size, dump_vaddr,
-					dump_addr);
-			devm_kfree(&pdev->dev, dump_data);
-		}
+
+		md_entry.phys_addr = dump_data->addr;
+		md_entry.virt_addr = (uintptr_t)dump_vaddr + MSM_DUMP_DATA_SIZE;
+		md_entry.size = size;
+		md_entry.id = id;
+		strlcpy(md_entry.name, child_node->name, sizeof(md_entry.name));
+		if (msm_minidump_add_region(&md_entry))
+			dev_err(&pdev->dev, "Mini dump entry failed id = %d\n",
+				id);
+
+		dump_vaddr += (size + MSM_DUMP_DATA_SIZE);
+		phys_addr += (size  + MSM_DUMP_DATA_SIZE);
 	}
-	return 0;
+
+	return ret;
+}
+
+static int mem_dump_probe(struct platform_device *pdev)
+{
+	int ret;
+
+	ret = mem_dump_alloc(pdev);
+	return ret;
 }
 
 static const struct of_device_id mem_dump_match_table[] = {
diff --git a/drivers/soc/qcom/peripheral-loader.h b/drivers/soc/qcom/peripheral-loader.h
index 62e5d4a..2c1373d 100644
--- a/drivers/soc/qcom/peripheral-loader.h
+++ b/drivers/soc/qcom/peripheral-loader.h
@@ -34,6 +34,7 @@
  * @modem_ssr: true if modem is restarting, false if booting for first time.
  * @clear_fw_region: Clear fw region on failure in loading.
  * @subsys_vmid: memprot id for the subsystem.
+ * @extra_size: extra memory allocated at the end of the image.
  */
 struct pil_desc {
 	const char *name;
@@ -63,6 +64,7 @@
 	int *aux_minidump_ids;
 	int num_aux_minidump_ids;
 	bool minidump_as_elf32;
+	u32 extra_size;
 };
 
 /**
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 4b378ea..1ba689d 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -53,6 +53,7 @@
 	HW_PLATFORM_RCM	= 21,
 	HW_PLATFORM_STP = 23,
 	HW_PLATFORM_SBC = 24,
+	HW_PLATFORM_HDK = 31,
 	HW_PLATFORM_INVALID
 };
 
@@ -73,6 +74,7 @@
 	[HW_PLATFORM_DTV] = "DTV",
 	[HW_PLATFORM_STP] = "STP",
 	[HW_PLATFORM_SBC] = "SBC",
+	[HW_PLATFORM_HDK] = "HDK",
 };
 
 enum {
diff --git a/drivers/soc/qcom/subsys-pil-tz.c b/drivers/soc/qcom/subsys-pil-tz.c
index 16c1dab..4908449 100644
--- a/drivers/soc/qcom/subsys-pil-tz.c
+++ b/drivers/soc/qcom/subsys-pil-tz.c
@@ -658,7 +658,7 @@
 
 	desc.args[0] = d->pas_id;
 	desc.args[1] = addr;
-	desc.args[2] = size;
+	desc.args[2] = size + pil->extra_size;
 	desc.arginfo = SCM_ARGS(3);
 	ret = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL, PAS_MEM_SETUP_CMD),
 			&desc);
@@ -1197,6 +1197,10 @@
 		}
 		mask_scsr_irqs(d);
 
+		rc = of_property_read_u32(pdev->dev.of_node, "qcom,extra-size",
+						&d->desc.extra_size);
+		if (rc)
+			d->desc.extra_size = 0;
 	} else {
 		d->subsys_desc.err_fatal_handler =
 						subsys_err_fatal_intr_handler;
diff --git a/include/dt-bindings/clock/qcom,rpmcc.h b/include/dt-bindings/clock/qcom,rpmcc.h
index c0846fb..bdef14d 100644
--- a/include/dt-bindings/clock/qcom,rpmcc.h
+++ b/include/dt-bindings/clock/qcom,rpmcc.h
@@ -47,43 +47,43 @@
 #define RPM_XO_A2				29
 
 /* SMD RPM clocks */
-#define RPM_SMD_XO_CLK_SRC				0
+#define RPM_SMD_XO_CLK_SRC			0
 #define RPM_SMD_XO_A_CLK_SRC			1
-#define RPM_SMD_PCNOC_CLK				2
-#define RPM_SMD_PCNOC_A_CLK				3
-#define RPM_SMD_SNOC_CLK				4
-#define RPM_SMD_SNOC_A_CLK				5
-#define RPM_SMD_BIMC_CLK				6
-#define RPM_SMD_BIMC_A_CLK				7
-#define RPM_SMD_QDSS_CLK				8
-#define RPM_SMD_QDSS_A_CLK				9
+#define RPM_SMD_PCNOC_CLK			2
+#define RPM_SMD_PCNOC_A_CLK			3
+#define RPM_SMD_SNOC_CLK			4
+#define RPM_SMD_SNOC_A_CLK			5
+#define RPM_SMD_BIMC_CLK			6
+#define RPM_SMD_BIMC_A_CLK			7
+#define RPM_SMD_QDSS_CLK			8
+#define RPM_SMD_QDSS_A_CLK			9
 #define RPM_SMD_IPA_CLK				10
-#define RPM_SMD_IPA_A_CLK				11
+#define RPM_SMD_IPA_A_CLK			11
 #define RPM_SMD_QUP_CLK				12
-#define RPM_SMD_QUP_A_CLK				13
-#define RPM_SMD_MMRT_CLK				14
-#define RPM_SMD_MMRT_A_CLK				15
-#define RPM_SMD_MMNRT_CLK				16
-#define RPM_SMD_MMNRT_A_CLK				17
-#define RPM_SMD_SNOC_PERIPH_CLK				18
-#define RPM_SMD_SNOC_PERIPH_A_CLK				19
-#define RPM_SMD_SNOC_LPASS_CLK				20
-#define RPM_SMD_SNOC_LPASS_A_CLK				21
+#define RPM_SMD_QUP_A_CLK			13
+#define RPM_SMD_MMRT_CLK			14
+#define RPM_SMD_MMRT_A_CLK			15
+#define RPM_SMD_MMNRT_CLK			16
+#define RPM_SMD_MMNRT_A_CLK			17
+#define RPM_SMD_SNOC_PERIPH_CLK			18
+#define RPM_SMD_SNOC_PERIPH_A_CLK		19
+#define RPM_SMD_SNOC_LPASS_CLK			20
+#define RPM_SMD_SNOC_LPASS_A_CLK		21
 #define RPM_SMD_BB_CLK1				22
-#define RPM_SMD_BB_CLK1_A				23
-#define RPM_SMD_BB_CLK2					24
-#define RPM_SMD_BB_CLK2_A				25
+#define RPM_SMD_BB_CLK1_A			23
+#define RPM_SMD_BB_CLK2				24
+#define RPM_SMD_BB_CLK2_A			25
 #define RPM_SMD_RF_CLK1				26
-#define RPM_SMD_RF_CLK1_A				27
+#define RPM_SMD_RF_CLK1_A			27
 #define RPM_SMD_RF_CLK2				28
-#define RPM_SMD_RF_CLK2_A				29
-#define RPM_SMD_BB_CLK1_PIN				30
+#define RPM_SMD_RF_CLK2_A			29
+#define RPM_SMD_BB_CLK1_PIN			30
 #define RPM_SMD_BB_CLK1_A_PIN			31
-#define RPM_SMD_BB_CLK2_PIN				32
+#define RPM_SMD_BB_CLK2_PIN			32
 #define RPM_SMD_BB_CLK2_A_PIN			33
-#define RPM_SMD_RF_CLK1_PIN				34
+#define RPM_SMD_RF_CLK1_PIN			34
 #define RPM_SMD_RF_CLK1_A_PIN			35
-#define RPM_SMD_RF_CLK2_PIN				36
+#define RPM_SMD_RF_CLK2_PIN			36
 #define RPM_SMD_RF_CLK2_A_PIN			37
 #define RPM_SMD_PNOC_CLK			38
 #define RPM_SMD_PNOC_A_CLK			39
@@ -109,8 +109,10 @@
 #define RPM_SMD_DIV_A_CLK1			59
 #define RPM_SMD_DIV_CLK2			60
 #define RPM_SMD_DIV_A_CLK2			61
-#define RPM_SMD_DIFF_CLK			62
-#define RPM_SMD_DIFF_A_CLK			63
+#define RPM_SMD_DIV_CLK3			61
+#define RPM_SMD_DIV_A_CLK3			62
+#define RPM_SMD_DIFF_CLK			63
+#define RPM_SMD_DIFF_A_CLK			64
 #define RPM_SMD_CXO_D0_PIN			64
 #define RPM_SMD_CXO_D0_A_PIN			65
 #define RPM_SMD_CXO_D1_PIN			66
@@ -125,8 +127,8 @@
 #define RPM_SMD_QPIC_A_CLK			75
 #define RPM_SMD_CE1_CLK				76
 #define RPM_SMD_CE1_A_CLK			77
-#define RPM_SMD_BIMC_GPU_CLK				78
-#define RPM_SMD_BIMC_GPU_A_CLK				79
+#define RPM_SMD_BIMC_GPU_CLK			78
+#define RPM_SMD_BIMC_GPU_A_CLK			79
 #define RPM_SMD_LN_BB_CLK			80
 #define RPM_SMD_LN_BB_CLK_A			81
 #define RPM_SMD_LN_BB_CLK_PIN			82
@@ -135,72 +137,78 @@
 #define RPM_SMD_RF_CLK3_A			85
 #define RPM_SMD_RF_CLK3_PIN			86
 #define RPM_SMD_RF_CLK3_A_PIN			87
-#define RPM_SMD_LN_BB_CLK1				88
-#define RPM_SMD_LN_BB_CLK1_A				89
-#define RPM_SMD_LN_BB_CLK2				90
-#define RPM_SMD_LN_BB_CLK2_A				91
-#define RPM_SMD_LN_BB_CLK3				92
-#define RPM_SMD_LN_BB_CLK3_A				93
-#define PNOC_MSMBUS_CLK				94
-#define PNOC_MSMBUS_A_CLK			95
-#define PNOC_KEEPALIVE_A_CLK			96
-#define SNOC_MSMBUS_CLK				97
-#define SNOC_MSMBUS_A_CLK			98
-#define BIMC_MSMBUS_CLK				99
-#define BIMC_MSMBUS_A_CLK			100
-#define PNOC_USB_CLK				101
-#define PNOC_USB_A_CLK				102
-#define SNOC_USB_CLK				103
-#define SNOC_USB_A_CLK				104
-#define BIMC_USB_CLK				105
-#define BIMC_USB_A_CLK				106
-#define SNOC_WCNSS_A_CLK			107
-#define BIMC_WCNSS_A_CLK			108
-#define MCD_CE1_CLK				109
-#define QCEDEV_CE1_CLK				110
-#define QCRYPTO_CE1_CLK				111
-#define QSEECOM_CE1_CLK				112
-#define SCM_CE1_CLK				113
-#define CXO_SMD_OTG_CLK				114
-#define CXO_SMD_LPM_CLK				115
-#define CXO_SMD_PIL_PRONTO_CLK			116
-#define CXO_SMD_PIL_MSS_CLK			117
-#define CXO_SMD_WLAN_CLK			118
-#define CXO_SMD_PIL_LPASS_CLK			119
-#define CXO_SMD_PIL_CDSP_CLK			120
-#define CNOC_MSMBUS_CLK				121
-#define CNOC_MSMBUS_A_CLK				122
-#define CNOC_KEEPALIVE_A_CLK				123
-#define SNOC_KEEPALIVE_A_CLK				124
-#define CPP_MMNRT_MSMBUS_CLK				125
-#define CPP_MMNRT_MSMBUS_A_CLK				126
-#define JPEG_MMNRT_MSMBUS_CLK				127
-#define JPEG_MMNRT_MSMBUS_A_CLK				128
-#define VENUS_MMNRT_MSMBUS_CLK				129
-#define VENUS_MMNRT_MSMBUS_A_CLK			130
-#define ARM9_MMNRT_MSMBUS_CLK				131
-#define ARM9_MMNRT_MSMBUS_A_CLK				132
-#define MDP_MMRT_MSMBUS_CLK				133
-#define MDP_MMRT_MSMBUS_A_CLK				134
-#define VFE_MMRT_MSMBUS_CLK				135
-#define VFE_MMRT_MSMBUS_A_CLK				136
-#define QUP0_MSMBUS_SNOC_PERIPH_CLK			137
-#define QUP0_MSMBUS_SNOC_PERIPH_A_CLK			138
-#define QUP1_MSMBUS_SNOC_PERIPH_CLK			139
-#define QUP1_MSMBUS_SNOC_PERIPH_A_CLK			140
-#define QUP2_MSMBUS_SNOC_PERIPH_CLK                     141
-#define QUP2_MSMBUS_SNOC_PERIPH_A_CLK                   142
-#define DAP_MSMBUS_SNOC_PERIPH_CLK			143
-#define DAP_MSMBUS_SNOC_PERIPH_A_CLK			144
-#define SDC1_MSMBUS_SNOC_PERIPH_CLK			145
-#define SDC1_MSMBUS_SNOC_PERIPH_A_CLK			146
-#define SDC2_MSMBUS_SNOC_PERIPH_CLK			147
-#define SDC2_MSMBUS_SNOC_PERIPH_A_CLK			148
-#define CRYPTO_MSMBUS_SNOC_PERIPH_CLK			149
-#define CRYPTO_MSMBUS_SNOC_PERIPH_A_CLK			150
-#define SDC1_SLV_MSMBUS_SNOC_PERIPH_CLK			151
-#define SDC1_SLV_MSMBUS_SNOC_PERIPH_A_CLK		152
-#define SDC2_SLV_MSMBUS_SNOC_PERIPH_CLK			153
-#define SDC2_SLV_MSMBUS_SNOC_PERIPH_A_CLK		154
+#define RPM_SMD_LN_BB_CLK1			88
+#define RPM_SMD_LN_BB_CLK1_A			89
+#define RPM_SMD_LN_BB_CLK2			90
+#define RPM_SMD_LN_BB_CLK2_A			91
+#define RPM_SMD_LN_BB_CLK3			92
+#define RPM_SMD_LN_BB_CLK3_A			93
+#define RPM_SMD_MMAXI_CLK			94
+#define RPM_SMD_MMAXI_A_CLK			95
+#define RPM_SMD_AGGR1_NOC_CLK			96
+#define RPM_SMD_AGGR1_NOC_A_CLK			97
+#define RPM_SMD_AGGR2_NOC_CLK			98
+#define RPM_SMD_AGGR2_NOC_A_CLK			99
+#define PNOC_MSMBUS_CLK				100
+#define PNOC_MSMBUS_A_CLK			101
+#define PNOC_KEEPALIVE_A_CLK			102
+#define SNOC_MSMBUS_CLK				103
+#define SNOC_MSMBUS_A_CLK			104
+#define BIMC_MSMBUS_CLK				105
+#define BIMC_MSMBUS_A_CLK			106
+#define PNOC_USB_CLK				107
+#define PNOC_USB_A_CLK				108
+#define SNOC_USB_CLK				109
+#define SNOC_USB_A_CLK				110
+#define BIMC_USB_CLK				111
+#define BIMC_USB_A_CLK				112
+#define SNOC_WCNSS_A_CLK			113
+#define BIMC_WCNSS_A_CLK			114
+#define MCD_CE1_CLK				115
+#define QCEDEV_CE1_CLK				116
+#define QCRYPTO_CE1_CLK				117
+#define QSEECOM_CE1_CLK				118
+#define SCM_CE1_CLK				119
+#define CXO_SMD_OTG_CLK				120
+#define CXO_SMD_LPM_CLK				121
+#define CXO_SMD_PIL_PRONTO_CLK			122
+#define CXO_SMD_PIL_MSS_CLK			123
+#define CXO_SMD_WLAN_CLK			124
+#define CXO_SMD_PIL_LPASS_CLK			125
+#define CXO_SMD_PIL_CDSP_CLK			126
+#define CNOC_MSMBUS_CLK				127
+#define CNOC_MSMBUS_A_CLK			128
+#define CNOC_KEEPALIVE_A_CLK			129
+#define SNOC_KEEPALIVE_A_CLK			130
+#define CPP_MMNRT_MSMBUS_CLK			131
+#define CPP_MMNRT_MSMBUS_A_CLK			132
+#define JPEG_MMNRT_MSMBUS_CLK			133
+#define JPEG_MMNRT_MSMBUS_A_CLK			134
+#define VENUS_MMNRT_MSMBUS_CLK			135
+#define VENUS_MMNRT_MSMBUS_A_CLK		136
+#define ARM9_MMNRT_MSMBUS_CLK			137
+#define ARM9_MMNRT_MSMBUS_A_CLK			138
+#define MDP_MMRT_MSMBUS_CLK			139
+#define MDP_MMRT_MSMBUS_A_CLK			140
+#define VFE_MMRT_MSMBUS_CLK			141
+#define VFE_MMRT_MSMBUS_A_CLK			142
+#define QUP0_MSMBUS_SNOC_PERIPH_CLK		143
+#define QUP0_MSMBUS_SNOC_PERIPH_A_CLK		144
+#define QUP1_MSMBUS_SNOC_PERIPH_CLK		145
+#define QUP1_MSMBUS_SNOC_PERIPH_A_CLK		146
+#define QUP2_MSMBUS_SNOC_PERIPH_CLK             147
+#define QUP2_MSMBUS_SNOC_PERIPH_A_CLK           148
+#define DAP_MSMBUS_SNOC_PERIPH_CLK		149
+#define DAP_MSMBUS_SNOC_PERIPH_A_CLK		150
+#define SDC1_MSMBUS_SNOC_PERIPH_CLK		151
+#define SDC1_MSMBUS_SNOC_PERIPH_A_CLK		152
+#define SDC2_MSMBUS_SNOC_PERIPH_CLK		153
+#define SDC2_MSMBUS_SNOC_PERIPH_A_CLK		154
+#define CRYPTO_MSMBUS_SNOC_PERIPH_CLK		155
+#define CRYPTO_MSMBUS_SNOC_PERIPH_A_CLK		156
+#define SDC1_SLV_MSMBUS_SNOC_PERIPH_CLK		157
+#define SDC1_SLV_MSMBUS_SNOC_PERIPH_A_CLK	158
+#define SDC2_SLV_MSMBUS_SNOC_PERIPH_CLK		159
+#define SDC2_SLV_MSMBUS_SNOC_PERIPH_A_CLK	160
 
 #endif
diff --git a/include/linux/sched/stat.h b/include/linux/sched/stat.h
index 3cfb48b..8046ddd 100644
--- a/include/linux/sched/stat.h
+++ b/include/linux/sched/stat.h
@@ -22,9 +22,11 @@
 extern unsigned long nr_iowait_cpu(int cpu);
 extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
 
-#ifdef CONFIG_SMP
+#ifdef CONFIG_SCHED_WALT
 extern void sched_update_nr_prod(int cpu, long delta, bool inc);
 extern unsigned int sched_get_cpu_util(int cpu);
+extern void sched_update_hyst_times(void);
+extern u64 sched_lpm_disallowed_time(int cpu);
 #else
 static inline void sched_update_nr_prod(int cpu, long delta, bool inc)
 {
@@ -33,12 +35,6 @@
 {
 	return 0;
 }
-#endif
-
-#ifdef CONFIG_SCHED_WALT
-extern void sched_update_hyst_times(void);
-extern u64 sched_lpm_disallowed_time(int cpu);
-#else
 static inline void sched_update_hyst_times(void)
 {
 }
diff --git a/include/linux/soc/qcom/smd-rpm.h b/include/linux/soc/qcom/smd-rpm.h
index 9e4fdd8..fb24896 100644
--- a/include/linux/soc/qcom/smd-rpm.h
+++ b/include/linux/soc/qcom/smd-rpm.h
@@ -32,6 +32,8 @@
 #define QCOM_SMD_RPM_IPA_CLK	0x617069
 #define QCOM_SMD_RPM_CE_CLK	0x6563
 #define QCOM_SMD_RPM_AGGR_CLK	0x72676761
+#define QCOM_SMD_RPM_QUP_CLK    0x00707571
+#define QCOM_SMD_RPM_MMXI_CLK   0x69786D6D
 
 int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm,
 		       int state,
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index 9512fd7..6742017 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -19,8 +19,8 @@
 obj-y += core.o loadavg.o clock.o cputime.o
 obj-y += idle.o fair.o rt.o deadline.o
 obj-y += wait.o wait_bit.o swait.o completion.o
-obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o pelt.o sched_avg.o
-obj-$(CONFIG_SCHED_WALT) += walt.o boost.o
+obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o pelt.o
+obj-$(CONFIG_SCHED_WALT) += walt.o boost.o sched_avg.o
 obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o
 obj-$(CONFIG_SCHEDSTATS) += stats.o
 obj-$(CONFIG_SCHED_DEBUG) += debug.o
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 3024e85..652424f 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -991,7 +991,6 @@
 	struct walt_sched_stats walt_stats;
 
 	u64			window_start;
-	s64			cum_window_start;
 	unsigned long		walt_flags;
 
 	u64			cur_irqload;
diff --git a/kernel/sched/sched_avg.c b/kernel/sched/sched_avg.c
index 89a1e07..1e7bb64 100644
--- a/kernel/sched/sched_avg.c
+++ b/kernel/sched/sched_avg.c
@@ -25,14 +25,12 @@
 static DEFINE_PER_CPU(spinlock_t, nr_lock) = __SPIN_LOCK_UNLOCKED(nr_lock);
 static s64 last_get_time;
 
-#ifdef CONFIG_SCHED_WALT
 unsigned int sysctl_sched_busy_hyst_enable_cpus;
 unsigned int sysctl_sched_busy_hyst;
 unsigned int sysctl_sched_coloc_busy_hyst_enable_cpus = 112;
 unsigned int sysctl_sched_coloc_busy_hyst = 39000000;
 unsigned int sysctl_sched_coloc_busy_hyst_max_ms = 5000;
 static DEFINE_PER_CPU(atomic64_t, busy_hyst_end_time) = ATOMIC64_INIT(0);
-#endif
 static DEFINE_PER_CPU(u64, hyst_time);
 
 #define NR_THRESHOLD_PCT		15
@@ -114,7 +112,6 @@
 }
 EXPORT_SYMBOL(sched_get_nr_running_avg);
 
-#ifdef CONFIG_SCHED_WALT
 void sched_update_hyst_times(void)
 {
 	u64 std_time, rtgb_time;
@@ -156,12 +153,6 @@
 		atomic64_set(&per_cpu(busy_hyst_end_time, cpu),
 				curr_time + per_cpu(hyst_time, cpu));
 }
-#else
-static inline void update_busy_hyst_end_time(int cpu, bool dequeue,
-				unsigned long prev_nr_run, u64 curr_time)
-{
-}
-#endif
 
 /**
  * sched_update_nr_prod
@@ -215,10 +206,9 @@
 	util = rq->cfs.avg.util_avg;
 	capacity = capacity_orig_of(cpu);
 
-#ifdef CONFIG_SCHED_WALT
 	util = rq->prev_runnable_sum + rq->grp_time.prev_runnable_sum;
 	util = div64_u64(util, sched_ravg_window >> SCHED_CAPACITY_SHIFT);
-#endif
+
 	raw_spin_unlock_irqrestore(&rq->lock, flags);
 
 	util = (util >= capacity) ? capacity : util;
@@ -226,7 +216,6 @@
 	return busy;
 }
 
-#ifdef CONFIG_SCHED_WALT
 u64 sched_lpm_disallowed_time(int cpu)
 {
 	u64 now = sched_clock();
@@ -237,4 +226,3 @@
 
 	return 0;
 }
-#endif
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index f54771f..3d8a491 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -3465,7 +3465,6 @@
 
 	rq->walt_stats.cumulative_runnable_avg_scaled = 0;
 	rq->window_start = 0;
-	rq->cum_window_start = 0;
 	rq->walt_stats.nr_big_tasks = 0;
 	rq->walt_flags = 0;
 	rq->cur_irqload = 0;