Merge "defconfig: msm: Enable CONFIG_HID_SONY for kona target"
diff --git a/arch/arm64/configs/vendor/kona-perf_defconfig b/arch/arm64/configs/vendor/kona-perf_defconfig
index df1e614..1d02cb2 100644
--- a/arch/arm64/configs/vendor/kona-perf_defconfig
+++ b/arch/arm64/configs/vendor/kona-perf_defconfig
@@ -106,6 +106,7 @@
 CONFIG_CMA=y
 CONFIG_CMA_AREAS=16
 CONFIG_ZSMALLOC=y
+CONFIG_HAVE_USERSPACE_LOW_MEMORY_KILLER=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -306,6 +307,7 @@
 CONFIG_USB_LAN78XX=y
 CONFIG_USB_USBNET=y
 CONFIG_WIL6210=m
+CONFIG_WCNSS_MEM_PRE_ALLOC=y
 CONFIG_CLD_LL_CORE=y
 CONFIG_CNSS2=y
 CONFIG_CNSS2_QMI=y
@@ -374,6 +376,7 @@
 CONFIG_QTI_BCL_PMIC5=y
 CONFIG_QTI_BCL_SOC_DRIVER=y
 CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_QTI_THERMAL_LIMITS_DCVS=y
 CONFIG_QTI_CPU_ISOLATE_COOLING_DEVICE=y
 CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
@@ -494,6 +497,7 @@
 CONFIG_IPA_WDI_UNIFIED_API=y
 CONFIG_RMNET_IPA3=y
 CONFIG_RNDIS_IPA=y
+CONFIG_IPA3_MHI_PRIME_MANAGER=y
 CONFIG_IPA_UT=y
 CONFIG_MSM_11AD=m
 CONFIG_USB_BAM=y
diff --git a/arch/arm64/configs/vendor/kona_defconfig b/arch/arm64/configs/vendor/kona_defconfig
index 9ae794e..1f8ad89 100644
--- a/arch/arm64/configs/vendor/kona_defconfig
+++ b/arch/arm64/configs/vendor/kona_defconfig
@@ -113,6 +113,7 @@
 CONFIG_CMA_DEBUGFS=y
 CONFIG_CMA_AREAS=16
 CONFIG_ZSMALLOC=y
+CONFIG_HAVE_USERSPACE_LOW_MEMORY_KILLER=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -314,6 +315,7 @@
 CONFIG_USB_LAN78XX=y
 CONFIG_USB_USBNET=y
 CONFIG_WIL6210=m
+CONFIG_WCNSS_MEM_PRE_ALLOC=y
 CONFIG_CLD_LL_CORE=y
 CONFIG_CNSS2=y
 CONFIG_CNSS2_DEBUG=y
@@ -386,6 +388,7 @@
 CONFIG_QTI_BCL_PMIC5=y
 CONFIG_QTI_BCL_SOC_DRIVER=y
 CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_QTI_THERMAL_LIMITS_DCVS=y
 CONFIG_QTI_CPU_ISOLATE_COOLING_DEVICE=y
 CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
@@ -685,6 +688,7 @@
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_PAGEALLOC=y
 CONFIG_SLUB_DEBUG_PANIC_ON=y
+CONFIG_DEBUG_PANIC_ON_OOM=y
 CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
 CONFIG_PAGE_POISONING=y
 CONFIG_PAGE_POISONING_ENABLE_DEFAULT=y
diff --git a/arch/arm64/configs/vendor/lito-perf_defconfig b/arch/arm64/configs/vendor/lito-perf_defconfig
index b679fe8..c2cb03f 100644
--- a/arch/arm64/configs/vendor/lito-perf_defconfig
+++ b/arch/arm64/configs/vendor/lito-perf_defconfig
@@ -247,6 +247,7 @@
 CONFIG_QRTR_SMD=y
 CONFIG_SOCKEV_NLMCAST=y
 CONFIG_BT=y
+CONFIG_MSM_BT_POWER=y
 CONFIG_CFG80211=y
 CONFIG_RFKILL=y
 CONFIG_FW_LOADER_USER_HELPER=y
@@ -547,6 +548,7 @@
 CONFIG_MSM_PM=y
 CONFIG_QTI_RPM_STATS_LOG=y
 CONFIG_QTEE_SHM_BRIDGE=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
 CONFIG_QMP_DEBUGFS_CLIENT=y
 CONFIG_ICNSS=y
 CONFIG_ICNSS_QMI=y
diff --git a/arch/arm64/configs/vendor/lito_defconfig b/arch/arm64/configs/vendor/lito_defconfig
index 03b96fa..e770dc3 100644
--- a/arch/arm64/configs/vendor/lito_defconfig
+++ b/arch/arm64/configs/vendor/lito_defconfig
@@ -251,6 +251,7 @@
 CONFIG_QRTR_SMD=y
 CONFIG_SOCKEV_NLMCAST=y
 CONFIG_BT=y
+CONFIG_MSM_BT_POWER=y
 CONFIG_CFG80211=y
 CONFIG_RFKILL=y
 CONFIG_FW_LOADER_USER_HELPER=y
@@ -560,6 +561,7 @@
 CONFIG_MSM_PM=y
 CONFIG_QTI_RPM_STATS_LOG=y
 CONFIG_QTEE_SHM_BRIDGE=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
 CONFIG_QMP_DEBUGFS_CLIENT=y
 CONFIG_ICNSS=y
 CONFIG_ICNSS_DEBUG=y
diff --git a/drivers/bus/mhi/devices/mhi_satellite.c b/drivers/bus/mhi/devices/mhi_satellite.c
index 925d364..f3556ff 100644
--- a/drivers/bus/mhi/devices/mhi_satellite.c
+++ b/drivers/bus/mhi/devices/mhi_satellite.c
@@ -681,6 +681,7 @@
 	struct sat_tre *pkt = SAT_TRE_OFFSET(data);
 	struct mhi_sat_cntrl *sat_cntrl;
 	struct mhi_sat_packet *packet;
+	unsigned long flags;
 
 	MHI_SAT_ASSERT(!mhi_sat_isvalid_header(hdr, len), "Invalid header!\n");
 
@@ -710,9 +711,9 @@
 	packet->msg = packet + 1;
 	memcpy(packet->msg, data, len);
 
-	spin_lock_irq(&sat_cntrl->pkt_lock);
+	spin_lock_irqsave(&sat_cntrl->pkt_lock, flags);
 	list_add_tail(&packet->node, &sat_cntrl->packet_list);
-	spin_unlock_irq(&sat_cntrl->pkt_lock);
+	spin_unlock_irqrestore(&sat_cntrl->pkt_lock, flags);
 
 	schedule_work(&sat_cntrl->process_work);
 
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index c3dca33..5c071c2 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -541,6 +541,15 @@
 source "drivers/char/xillybus/Kconfig"
 source "drivers/char/diag/Kconfig"
 
+config MSM_FASTCVPD
+	bool "QTI FASTCVP driver"
+	depends on QCOM_GLINK
+	help
+	  This driver exposes APIs (Application Program Interface) to video driver
+	  to share HFI command queue address to CDSP (Compute Digital Signal
+	  Processor) and handle errors to exit gracefully in case video and cdsp
+	  subsystems crash.
+
 config MSM_ADSPRPC
         tristate "QTI ADSP RPC driver"
         depends on QCOM_GLINK
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index d9e4dfe..07ac833 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -61,6 +61,7 @@
 ifdef CONFIG_COMPAT
   obj-$(CONFIG_MSM_ADSPRPC)	+= adsprpc_compat.o
 endif
+obj-$(CONFIG_MSM_FASTCVPD)	+= fastcvpd.o
 obj-$(CONFIG_ADI)		+= adi.o
 obj-$(CONFIG_DIAG_CHAR)		+= diag/
 obj-$(CONFIG_OKL4_PIPE)		+= okl4_pipe.o
diff --git a/drivers/char/fastcvpd.c b/drivers/char/fastcvpd.c
new file mode 100644
index 0000000..7b6ba44
--- /dev/null
+++ b/drivers/char/fastcvpd.c
@@ -0,0 +1,341 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/module.h>
+#include <linux/rpmsg.h>
+#include <linux/of_platform.h>
+#include <soc/qcom/secure_buffer.h>
+#include "linux/fastcvpd.h"
+
+#define VMID_CDSP_Q6 (30)
+#define SRC_VM_NUM 1
+#define DEST_VM_NUM 2
+#define FASTCVPD_VIDEO_SEND_HFI_CMD_QUEUE 0
+#define FASTCVPD_VIDEO_SUSPEND 1
+#define FASTCVPD_VIDEO_RESUME 2
+#define FASTCVPD_VIDEO_SHUTDOWN 3
+#define STATUS_INIT 0
+#define STATUS_DEINIT 1
+#define STATUS_OK 2
+#define STATUS_SSR 3
+
+struct fastcvpd_cmd_msg {
+	uint32_t cmd_msg_type;
+	int ret_val;
+	uint64_t msg_ptr;
+	uint32_t msg_ptr_len;
+};
+
+struct fastcvpd_cmd_msg_rsp {
+	int ret_val;
+};
+
+struct fastcvpd_apps {
+	struct rpmsg_device *chan;
+	struct mutex smd_mutex;
+	int rpmsg_register;
+	uint32_t cdsp_state;
+	uint32_t video_shutdown;
+};
+
+static struct completion work;
+
+static struct fastcvpd_apps gfa_cv;
+
+static struct fastcvpd_cmd_msg cmd_msg;
+
+static struct fastcvpd_cmd_msg_rsp cmd_msg_rsp;
+
+static int fastcvpd_send_cmd(void *msg, uint32_t len)
+{
+	struct fastcvpd_apps *me = &gfa_cv;
+	int err;
+
+	if (IS_ERR_OR_NULL(me->chan)) {
+		err = -EINVAL;
+		goto bail;
+	}
+	err = rpmsg_send(me->chan->ept, msg, len);
+
+bail:
+	return err;
+}
+
+static int fastcvpd_rpmsg_probe(struct rpmsg_device *rpdev)
+{
+	int err = 0;
+	struct fastcvpd_apps *me = &gfa_cv;
+	uint32_t cdsp_state, video_shutdown;
+	uint64_t msg_ptr;
+	uint32_t msg_ptr_len;
+	int srcVM[DEST_VM_NUM] = {VMID_HLOS, VMID_CDSP_Q6};
+	int destVM[SRC_VM_NUM] = {VMID_HLOS};
+	int destVMperm[SRC_VM_NUM] = { PERM_READ | PERM_WRITE | PERM_EXEC };
+
+	if (strcmp(rpdev->dev.parent->of_node->name, "cdsp")) {
+		pr_err("%s: Failed to probe rpmsg device.Node name:%s\n",
+			__func__, rpdev->dev.parent->of_node->name);
+		err = -EINVAL;
+		goto bail;
+	}
+	mutex_lock(&me->smd_mutex);
+	me->chan = rpdev;
+	cdsp_state = me->cdsp_state;
+	video_shutdown = me->video_shutdown;
+	msg_ptr = cmd_msg.msg_ptr;
+	msg_ptr_len =  cmd_msg.msg_ptr_len;
+	mutex_unlock(&me->smd_mutex);
+
+	if (cdsp_state == STATUS_SSR && video_shutdown == STATUS_OK) {
+		err = hyp_assign_phys((uint64_t)msg_ptr,
+			msg_ptr_len, srcVM, DEST_VM_NUM, destVM,
+			destVMperm, SRC_VM_NUM);
+		if (err) {
+			pr_err("%s: Failed to hyp_assign. err=%d\n",
+				__func__, err);
+			return err;
+		}
+		err = fastcvpd_video_send_cmd_hfi_queue(
+			(phys_addr_t *)msg_ptr, msg_ptr_len);
+		if (err) {
+			pr_err("%s: Failed to send HFI Queue address. err=%d\n",
+			__func__, err);
+			goto bail;
+		}
+		mutex_lock(&me->smd_mutex);
+		cdsp_state = me->cdsp_state;
+		mutex_unlock(&me->smd_mutex);
+	}
+
+	pr_info("%s: Successfully probed. cdsp_state=%d video_shutdown=%d\n",
+		__func__, cdsp_state, video_shutdown);
+bail:
+	return err;
+}
+
+static void fastcvpd_rpmsg_remove(struct rpmsg_device *rpdev)
+{
+	struct fastcvpd_apps *me = &gfa_cv;
+
+	mutex_lock(&me->smd_mutex);
+	me->chan = NULL;
+	me->cdsp_state = STATUS_SSR;
+	mutex_unlock(&me->smd_mutex);
+	pr_info("%s: CDSP SSR triggered\n", __func__);
+}
+
+static int fastcvpd_rpmsg_callback(struct rpmsg_device *rpdev,
+	void *data, int len, void *priv, u32 addr)
+{
+	int *rpmsg_resp = (int *)data;
+
+	cmd_msg_rsp.ret_val = *rpmsg_resp;
+	complete(&work);
+
+	return 0;
+}
+
+int fastcvpd_video_send_cmd_hfi_queue(phys_addr_t *phys_addr,
+	uint32_t size_in_bytes)
+{
+	int err;
+	struct fastcvpd_cmd_msg local_cmd_msg;
+	struct fastcvpd_apps *me = &gfa_cv;
+	int srcVM[SRC_VM_NUM] = {VMID_HLOS};
+	int destVM[DEST_VM_NUM] = {VMID_HLOS, VMID_CDSP_Q6};
+	int destVMperm[DEST_VM_NUM] = { PERM_READ | PERM_WRITE | PERM_EXEC,
+		PERM_READ | PERM_WRITE | PERM_EXEC };
+
+	local_cmd_msg.cmd_msg_type = FASTCVPD_VIDEO_SEND_HFI_CMD_QUEUE;
+	local_cmd_msg.msg_ptr = (uint64_t)phys_addr;
+	local_cmd_msg.msg_ptr_len = size_in_bytes;
+	mutex_lock(&me->smd_mutex);
+	cmd_msg.msg_ptr = (uint64_t)phys_addr;
+	cmd_msg.msg_ptr_len = (size_in_bytes);
+	mutex_unlock(&me->smd_mutex);
+
+	pr_debug("%s :: address of buffer, PA=0x%pK  size_buff=%d\n",
+		__func__, phys_addr, size_in_bytes);
+
+	err = hyp_assign_phys((uint64_t)local_cmd_msg.msg_ptr,
+		local_cmd_msg.msg_ptr_len, srcVM, SRC_VM_NUM, destVM,
+		destVMperm, DEST_VM_NUM);
+	if (err) {
+		pr_err("%s: Failed in hyp_assign. err=%d\n",
+			__func__, err);
+		return err;
+	}
+
+	err = fastcvpd_send_cmd
+			 (&local_cmd_msg, sizeof(struct fastcvpd_cmd_msg));
+	if (err != 0)
+		pr_err("%s: fastcvpd_send_cmd failed with err=%d\n",
+			__func__, err);
+	else {
+		mutex_lock(&me->smd_mutex);
+		me->video_shutdown = STATUS_OK;
+		me->cdsp_state = STATUS_OK;
+		mutex_unlock(&me->smd_mutex);
+	}
+
+	return err;
+}
+EXPORT_SYMBOL(fastcvpd_video_send_cmd_hfi_queue);
+
+int fastcvpd_video_suspend(uint32_t session_flag)
+{
+	int err = 0;
+	struct fastcvpd_cmd_msg local_cmd_msg;
+	struct fastcvpd_apps *me = &gfa_cv;
+	uint32_t cdsp_state;
+
+	mutex_lock(&me->smd_mutex);
+	cdsp_state = me->cdsp_state;
+	mutex_unlock(&me->smd_mutex);
+
+	if (cdsp_state == STATUS_SSR)
+		return 0;
+
+	local_cmd_msg.cmd_msg_type = FASTCVPD_VIDEO_SUSPEND;
+	err = fastcvpd_send_cmd
+			 (&local_cmd_msg, sizeof(struct fastcvpd_cmd_msg));
+	if (err != 0)
+		pr_err("%s: fastcvpd_send_cmd failed with err=%d\n",
+			__func__, err);
+
+	return err;
+}
+EXPORT_SYMBOL(fastcvpd_video_suspend);
+
+int fastcvpd_video_resume(uint32_t session_flag)
+{
+	int err;
+	struct fastcvpd_cmd_msg local_cmd_msg;
+	struct fastcvpd_apps *me = &gfa_cv;
+	uint32_t cdsp_state;
+
+	mutex_lock(&me->smd_mutex);
+	cdsp_state = me->cdsp_state;
+	mutex_unlock(&me->smd_mutex);
+
+	if (cdsp_state == STATUS_SSR)
+		return 0;
+
+	local_cmd_msg.cmd_msg_type = FASTCVPD_VIDEO_RESUME;
+	err = fastcvpd_send_cmd
+			 (&local_cmd_msg, sizeof(struct fastcvpd_cmd_msg));
+	if (err != 0)
+		pr_err("%s: fastcvpd_send_cmd failed with err=%d\n",
+			__func__, err);
+
+	return err;
+}
+EXPORT_SYMBOL(fastcvpd_video_resume);
+
+int fastcvpd_video_shutdown(uint32_t session_flag)
+{
+	struct fastcvpd_apps *me = &gfa_cv;
+	int err, local_cmd_msg_rsp;
+	struct fastcvpd_cmd_msg local_cmd_msg;
+	int srcVM[DEST_VM_NUM] = {VMID_HLOS, VMID_CDSP_Q6};
+	int destVM[SRC_VM_NUM] = {VMID_HLOS};
+	int destVMperm[SRC_VM_NUM] = { PERM_READ | PERM_WRITE | PERM_EXEC };
+
+	local_cmd_msg.cmd_msg_type = FASTCVPD_VIDEO_SHUTDOWN;
+	err = fastcvpd_send_cmd
+			 (&local_cmd_msg, sizeof(struct fastcvpd_cmd_msg));
+	if (err != 0)
+		pr_err("%s: fastcvpd_send_cmd failed with err=%d\n",
+			__func__, err);
+
+	wait_for_completion(&work);
+
+	mutex_lock(&me->smd_mutex);
+	me->video_shutdown = STATUS_SSR;
+	local_cmd_msg.msg_ptr = cmd_msg.msg_ptr;
+	local_cmd_msg.msg_ptr_len = cmd_msg.msg_ptr_len;
+	mutex_unlock(&me->smd_mutex);
+	local_cmd_msg_rsp = cmd_msg_rsp.ret_val;
+	if (local_cmd_msg_rsp == 0) {
+		err = hyp_assign_phys((uint64_t)local_cmd_msg.msg_ptr,
+			local_cmd_msg.msg_ptr_len, srcVM, DEST_VM_NUM, destVM,
+			destVMperm, SRC_VM_NUM);
+		if (err) {
+			pr_err("%s: Failed to hyp_assign. err=%d\n",
+				__func__, err);
+			return err;
+		}
+	} else {
+		pr_err("%s: Skipping hyp_assign as CDSP sent invalid response=%d\n",
+			__func__, local_cmd_msg_rsp);
+	}
+
+	return err;
+}
+EXPORT_SYMBOL(fastcvpd_video_shutdown);
+
+static const struct rpmsg_device_id fastcvpd_rpmsg_match[] = {
+	{ FASTCVPD_GLINK_GUID },
+	{ },
+};
+
+static struct rpmsg_driver fastcvpd_rpmsg_client = {
+	.id_table = fastcvpd_rpmsg_match,
+	.probe = fastcvpd_rpmsg_probe,
+	.remove = fastcvpd_rpmsg_remove,
+	.callback = fastcvpd_rpmsg_callback,
+	.drv = {
+		.name = "qcom,msm_fastcvpd_rpmsg",
+	},
+};
+
+static int __init fastcvpd_device_init(void)
+{
+	struct fastcvpd_apps *me = &gfa_cv;
+	int err;
+
+	init_completion(&work);
+	mutex_init(&me->smd_mutex);
+	me->video_shutdown = STATUS_INIT;
+	me->cdsp_state = STATUS_INIT;
+	err = register_rpmsg_driver(&fastcvpd_rpmsg_client);
+	if (err) {
+		pr_err("%s : register_rpmsg_driver failed with err %d\n",
+			__func__, err);
+		goto register_bail;
+	}
+	me->rpmsg_register = 1;
+	return 0;
+
+register_bail:
+	me->video_shutdown = STATUS_DEINIT;
+	me->cdsp_state = STATUS_DEINIT;
+	return err;
+}
+
+static void __exit fastcvpd_device_exit(void)
+{
+	struct fastcvpd_apps *me = &gfa_cv;
+
+	me->video_shutdown = STATUS_DEINIT;
+	me->cdsp_state = STATUS_DEINIT;
+	mutex_destroy(&me->smd_mutex);
+	if (me->rpmsg_register == 1)
+		unregister_rpmsg_driver(&fastcvpd_rpmsg_client);
+}
+
+late_initcall(fastcvpd_device_init);
+module_exit(fastcvpd_device_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/camcc-lito.c b/drivers/clk/qcom/camcc-lito.c
index 014be22..f1eb220 100644
--- a/drivers/clk/qcom/camcc-lito.c
+++ b/drivers/clk/qcom/camcc-lito.c
@@ -198,7 +198,7 @@
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002261,
 	.config_ctl_hi1_val = 0x029A699C,
-	.user_ctl_val = 0x00000001,
+	.user_ctl_val = 0x00000007,
 	.user_ctl_hi_val = 0x00000805,
 	.user_ctl_hi1_val = 0x00000000,
 };
@@ -274,7 +274,7 @@
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002261,
 	.config_ctl_hi1_val = 0x029A699C,
-	.user_ctl_val = 0x00000001,
+	.user_ctl_val = 0x00000007,
 	.user_ctl_hi_val = 0x00000805,
 	.user_ctl_hi1_val = 0x00000000,
 };
@@ -327,9 +327,9 @@
 	.cal_l = 0x32,
 	.alpha = 0x0,
 	.config_ctl_val = 0x08200920,
-	.config_ctl_hi_val = 0x05008001,
+	.config_ctl_hi_val = 0x05008011,
 	.config_ctl_hi1_val = 0x00000000,
-	.user_ctl_val = 0x00000108,
+	.user_ctl_val = 0x0000010F,
 };
 
 static struct clk_alpha_pll cam_cc_pll2 = {
@@ -403,7 +403,7 @@
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002261,
 	.config_ctl_hi1_val = 0x029A699C,
-	.user_ctl_val = 0x00000001,
+	.user_ctl_val = 0x00000007,
 	.user_ctl_hi_val = 0x00000805,
 	.user_ctl_hi1_val = 0x00000000,
 };
@@ -458,7 +458,7 @@
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002261,
 	.config_ctl_hi1_val = 0x029A699C,
-	.user_ctl_val = 0x00000001,
+	.user_ctl_val = 0x00000007,
 	.user_ctl_hi_val = 0x00000805,
 	.user_ctl_hi1_val = 0x00000000,
 };
@@ -1215,13 +1215,14 @@
 	.mnd_width = 0,
 	.hid_width = 5,
 	.parent_map = cam_cc_parent_map_8,
+	.enable_safe_config = true,
 	.freq_tbl = ftbl_cam_cc_slow_ahb_clk_src,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_slow_ahb_clk_src",
 		.parent_names = cam_cc_parent_names_8,
 		.num_parents = 3,
-		.flags = CLK_SET_RATE_PARENT,
 		.ops = &clk_rcg2_ops,
+		.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
 		.vdd_class = &vdd_cx,
 		.num_rate_max = VDD_NUM,
 		.rate_max = (unsigned long[VDD_NUM]) {
diff --git a/drivers/clk/qcom/dispcc-lito.c b/drivers/clk/qcom/dispcc-lito.c
index b275e730..6019e57 100644
--- a/drivers/clk/qcom/dispcc-lito.c
+++ b/drivers/clk/qcom/dispcc-lito.c
@@ -144,9 +144,9 @@
 };
 
 static const struct alpha_pll_config disp_cc_pll0_config = {
-	.l = 0x16,
+	.l = 0x47,
 	.cal_l = 0x44,
-	.alpha = 0x6555,
+	.alpha = 0xE000,
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002261,
 	.config_ctl_hi1_val = 0x029A699C,
@@ -527,6 +527,7 @@
 		.parent_names = disp_cc_parent_names_3,
 		.num_parents = 5,
 		.ops = &clk_rcg2_ops,
+		.flags = CLK_SET_RATE_PARENT,
 		.vdd_class = &vdd_cx,
 		.num_rate_max = VDD_NUM,
 		.rate_max = (unsigned long[VDD_NUM]) {
diff --git a/drivers/clk/qcom/gcc-lito.c b/drivers/clk/qcom/gcc-lito.c
index fd5428f..52faf73 100644
--- a/drivers/clk/qcom/gcc-lito.c
+++ b/drivers/clk/qcom/gcc-lito.c
@@ -1020,7 +1020,7 @@
 
 static struct clk_branch gcc_camera_hf_axi_clk = {
 	.halt_reg = 0xb028,
-	.halt_check = BRANCH_HALT_DELAY,
+	.halt_check = BRANCH_HALT,
 	.clkr = {
 		.enable_reg = 0xb028,
 		.enable_mask = BIT(0),
@@ -1033,7 +1033,7 @@
 
 static struct clk_branch gcc_camera_sf_axi_clk = {
 	.halt_reg = 0xb02c,
-	.halt_check = BRANCH_HALT_DELAY,
+	.halt_check = BRANCH_HALT,
 	.clkr = {
 		.enable_reg = 0xb02c,
 		.enable_mask = BIT(0),
@@ -1170,7 +1170,7 @@
 
 static struct clk_branch gcc_disp_hf_axi_clk = {
 	.halt_reg = 0xb030,
-	.halt_check = BRANCH_HALT_DELAY,
+	.halt_check = BRANCH_HALT,
 	.clkr = {
 		.enable_reg = 0xb030,
 		.enable_mask = BIT(0),
@@ -1183,7 +1183,7 @@
 
 static struct clk_branch gcc_disp_sf_axi_clk = {
 	.halt_reg = 0xb034,
-	.halt_check = BRANCH_HALT_DELAY,
+	.halt_check = BRANCH_HALT,
 	.clkr = {
 		.enable_reg = 0xb034,
 		.enable_mask = BIT(0),
@@ -1366,7 +1366,7 @@
 
 static struct clk_branch gcc_npu_bwmon2_axi_clk = {
 	.halt_reg = 0x7000c,
-	.halt_check = BRANCH_HALT_DELAY,
+	.halt_check = BRANCH_HALT,
 	.clkr = {
 		.enable_reg = 0x7000c,
 		.enable_mask = BIT(0),
@@ -1380,7 +1380,7 @@
 
 static struct clk_branch gcc_npu_bwmon_axi_clk = {
 	.halt_reg = 0x70008,
-	.halt_check = BRANCH_HALT_DELAY,
+	.halt_check = BRANCH_HALT,
 	.clkr = {
 		.enable_reg = 0x70008,
 		.enable_mask = BIT(0),
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index a5e390e..3216e86 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -438,10 +438,10 @@
 
 static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
 {
-	int rc;
 	struct cpu_cycle_counter_cb cycle_counter_cb = {
 		.get_cpu_cycle_counter = qcom_cpufreq_get_cpu_cycle_counter,
 	};
+	int rc, cpu;
 
 	/* Get the bases of cpufreq for domains */
 	rc = qcom_resources_init(pdev);
@@ -456,6 +456,9 @@
 		return rc;
 	}
 
+	for_each_possible_cpu(cpu)
+		spin_lock_init(&qcom_cpufreq_counter[cpu].lock);
+
 	rc = register_cpu_cycle_counter_cb(&cycle_counter_cb);
 	if (rc) {
 		dev_err(&pdev->dev, "cycle counter cb failed to register\n");
diff --git a/drivers/devfreq/devfreq_qcom_fw.c b/drivers/devfreq/devfreq_qcom_fw.c
index f4e1bc7..87e58971 100644
--- a/drivers/devfreq/devfreq_qcom_fw.c
+++ b/drivers/devfreq/devfreq_qcom_fw.c
@@ -23,9 +23,9 @@
 #define FTBL_MAX_ENTRIES		40U
 #define FTBL_ROW_SIZE			4
 
-#define SRC_MASK	GENMASK(31, 30)
-#define SRC_SHIFT	30
-#define MULT_MASK	GENMASK(7, 0)
+#define SRC_MASK			GENMASK(31, 30)
+#define SRC_SHIFT			30
+#define MULT_MASK			GENMASK(7, 0)
 
 struct devfreq_qcom_fw {
 	void __iomem *perf_base;
@@ -36,6 +36,7 @@
 };
 
 static DEFINE_SPINLOCK(voter_lock);
+static unsigned int ftbl_row_size = FTBL_ROW_SIZE;
 
 static int devfreq_qcom_fw_target(struct device *dev, unsigned long *freq,
 				  u32 flags)
@@ -118,8 +119,11 @@
 		return -ENOMEM;
 	}
 
+	of_property_read_u32(pdev->dev.of_node, "qcom,ftbl-row-size",
+						     &ftbl_row_size);
+
 	for (i = 0; i < FTBL_MAX_ENTRIES; i++) {
-		data = readl_relaxed(ftbl_base + i * FTBL_ROW_SIZE);
+		data = readl_relaxed(ftbl_base + i * ftbl_row_size);
 		src = ((data & SRC_MASK) >> SRC_SHIFT);
 		mult = (data & MULT_MASK);
 		freq = src ? XO_HZ * mult : INIT_HZ;
diff --git a/drivers/firmware/qcom/tz_log.c b/drivers/firmware/qcom/tz_log.c
index b7fa43d..855b1ca 100644
--- a/drivers/firmware/qcom/tz_log.c
+++ b/drivers/firmware/qcom/tz_log.c
@@ -20,6 +20,7 @@
 
 #include <soc/qcom/scm.h>
 #include <soc/qcom/qseecomi.h>
+#include <soc/qcom/qtee_shmbridge.h>
 
 /* QSEE_LOG_BUF_SIZE = 32K */
 #define QSEE_LOG_BUF_SIZE 0x8000
@@ -319,6 +320,7 @@
 static struct tzdbg_log_t *g_qsee_log;
 static dma_addr_t coh_pmem;
 static uint32_t debug_rw_buf_size;
+static struct qtee_shm shm;
 
 /*
  * Debugfs data structure and functions
@@ -856,14 +858,13 @@
 	void *buf = NULL;
 
 	len = QSEE_LOG_BUF_SIZE;
-	buf = dma_alloc_coherent(&pdev->dev, len, &coh_pmem, GFP_KERNEL);
-	if (buf == NULL) {
-		pr_err("Failed to alloc memory for size %zu\n", len);
+	ret = qtee_shmbridge_allocate_shm(len, &shm);
+	if (ret)
 		return;
-	}
+	buf = shm.vaddr;
+	coh_pmem = shm.paddr;
 
 	g_qsee_log = (struct tzdbg_log_t *)buf;
-
 	desc.args[0] = coh_pmem;
 	desc.args[1] = len;
 	desc.arginfo = 0x22;
@@ -886,7 +887,7 @@
 	return;
 
 err:
-	dma_free_coherent(&pdev->dev, len, (void *)g_qsee_log, coh_pmem);
+	qtee_shmbridge_free_shm(&shm);
 }
 
 static int  tzdbgfs_init(struct platform_device *pdev)
@@ -933,8 +934,7 @@
 	dent_dir = platform_get_drvdata(pdev);
 	debugfs_remove_recursive(dent_dir);
 	if (g_qsee_log)
-		dma_free_coherent(&pdev->dev, QSEE_LOG_BUF_SIZE,
-					 (void *)g_qsee_log, coh_pmem);
+		qtee_shmbridge_free_shm(&shm);
 }
 
 static int __update_hypdbg_base(struct platform_device *pdev,
diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h
index 42fb7b3..ef54463 100644
--- a/drivers/gpu/msm/adreno-gpulist.h
+++ b/drivers/gpu/msm/adreno-gpulist.h
@@ -334,7 +334,8 @@
 		.minor = 0,
 		.patchid = 0,
 		.features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_GPMU |
-			ADRENO_CONTENT_PROTECTION | ADRENO_IOCOHERENT,
+			ADRENO_CONTENT_PROTECTION | ADRENO_IOCOHERENT |
+			ADRENO_IFPC,
 		.sqefw_name = "a650_sqe.fw",
 		.zap_name = "a620_zap",
 		.gpudev = &adreno_a6xx_gpudev,
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index 26ac5f3..95a2bd0 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -1803,7 +1803,7 @@
 	 * On A640, the GPUHTW SCID is configured via a NoC override in the
 	 * XBL image.
 	 */
-	if (adreno_is_a640(adreno_dev) || adreno_is_a612(adreno_dev))
+	if (adreno_is_a640_family(adreno_dev) || adreno_is_a612(adreno_dev))
 		return;
 
 	gpuhtw_scid = adreno_llc_get_scid(adreno_dev->gpuhtw_llc_slice);
@@ -1824,7 +1824,7 @@
 	 * Attributes override through GBIF is not supported with MMU-500.
 	 * Attributes are used as configured through SMMU pagetable entries.
 	 */
-	if (adreno_is_a640(adreno_dev) || adreno_is_a612(adreno_dev))
+	if (adreno_is_a640_family(adreno_dev) || adreno_is_a612(adreno_dev))
 		return;
 
 	/*
diff --git a/drivers/gpu/msm/adreno_a6xx_gmu.c b/drivers/gpu/msm/adreno_a6xx_gmu.c
index f49ed65..ab2d0ef 100644
--- a/drivers/gpu/msm/adreno_a6xx_gmu.c
+++ b/drivers/gpu/msm/adreno_a6xx_gmu.c
@@ -8,6 +8,7 @@
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/of_platform.h>
+#include <soc/qcom/cmd-db.h>
 
 #include "kgsl_gmu_core.h"
 #include "kgsl_gmu.h"
@@ -83,6 +84,7 @@
 	struct resource *res_pdc, *res_cfg, *res_seq;
 	void __iomem *cfg = NULL, *seq = NULL, *rscc;
 	unsigned int cfg_offset, seq_offset;
+	u32 vrm_resource_addr = cmd_db_read_addr("vrm.soc");
 
 	/* Offsets from the base PDC (if no PDC subsections in the DTSI) */
 	if (adreno_is_a640v2(adreno_dev)) {
@@ -200,6 +202,16 @@
 			adreno_dev->gpucore->pdc_address_offset);
 
 	_regwrite(cfg, PDC_GPU_TCS1_CMD0_DATA + PDC_CMD_OFFSET * 2, 0x0);
+
+	if (vrm_resource_addr && adreno_is_a620(adreno_dev)) {
+		_regwrite(cfg, PDC_GPU_TCS1_CMD0_MSGID + PDC_CMD_OFFSET * 3,
+				0x10108);
+		_regwrite(cfg, PDC_GPU_TCS1_CMD0_ADDR + PDC_CMD_OFFSET * 3,
+				vrm_resource_addr + 0x4);
+		_regwrite(cfg, PDC_GPU_TCS1_CMD0_DATA + PDC_CMD_OFFSET * 3,
+				0x0);
+	}
+
 	_regwrite(cfg, PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
 	_regwrite(cfg, PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
 	_regwrite(cfg, PDC_GPU_TCS3_CONTROL, 0);
@@ -221,6 +233,15 @@
 
 	_regwrite(cfg, PDC_GPU_TCS3_CMD0_DATA + PDC_CMD_OFFSET * 2, 0x3);
 
+	if (vrm_resource_addr && adreno_is_a620(adreno_dev)) {
+		_regwrite(cfg, PDC_GPU_TCS3_CMD0_MSGID + PDC_CMD_OFFSET * 3,
+				0x10108);
+		_regwrite(cfg, PDC_GPU_TCS3_CMD0_ADDR + PDC_CMD_OFFSET * 3,
+				vrm_resource_addr + 0x4);
+		_regwrite(cfg, PDC_GPU_TCS3_CMD0_DATA + PDC_CMD_OFFSET * 3,
+				0x1);
+	}
+
 	/* Setup GPU PDC */
 	_regwrite(cfg, PDC_GPU_SEQ_START_ADDR, 0);
 	_regwrite(cfg, PDC_GPU_ENABLE_PDC, 0x80000001);
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index e1ec421..6423495 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -2662,6 +2662,12 @@
 		return -ENOMEM;
 
 	attach = dma_buf_attach(dmabuf, device->dev);
+
+	if (IS_ERR(attach)) {
+		ret = PTR_ERR(attach);
+		goto out;
+	}
+
 	/*
 	 * If dma buffer is marked IO coherent, skip sync at attach,
 	 * which involves flushing the buffer on CPU.
@@ -2670,11 +2676,6 @@
 	if (entry->memdesc.flags & KGSL_MEMFLAGS_IOCOHERENT)
 		attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
 
-	if (IS_ERR_OR_NULL(attach)) {
-		ret = attach ? PTR_ERR(attach) : -EINVAL;
-		goto out;
-	}
-
 	meta->dmabuf = dmabuf;
 	meta->attach = attach;
 	meta->entry = entry;
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index a62afd9..ed9fdca 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -469,7 +469,7 @@
 	mutex_unlock(&kernel_map_global_lock);
 }
 
-static int kgsl_lock_sgt(struct sg_table *sgt)
+static int kgsl_lock_sgt(struct sg_table *sgt, u64 size)
 {
 	struct scatterlist *sg;
 	int dest_perms = PERM_READ | PERM_WRITE;
@@ -479,12 +479,26 @@
 	int i;
 
 	ret = hyp_assign_table(sgt, &source_vm, 1, &dest_vm, &dest_perms, 1);
-	if (!ret) {
-		/* Set private bit for each sg to indicate that its secured */
-		for_each_sg(sgt->sgl, sg, sgt->nents, i)
-			SetPagePrivate(sg_page(sg));
+	if (ret) {
+		/*
+		 * If returned error code is EADDRNOTAVAIL, then this
+		 * memory may no longer be in a usable state as security
+		 * state of the pages is unknown after this failure. This
+		 * memory can neither be added back to the pool nor buddy
+		 * system.
+		 */
+		if (ret == -EADDRNOTAVAIL)
+			pr_err("Failure to lock secure GPU memory 0x%llx bytes will not be recoverable\n",
+				size);
+
+		return ret;
 	}
-	return ret;
+
+	/* Set private bit for each sg to indicate that its secured */
+	for_each_sg(sgt->sgl, sg, sgt->nents, i)
+		SetPagePrivate(sg_page(sg));
+
+	return 0;
 }
 
 static int kgsl_unlock_sgt(struct sg_table *sgt)
@@ -935,11 +949,18 @@
 			goto done;
 		}
 
-		ret = kgsl_lock_sgt(memdesc->sgt);
+		ret = kgsl_lock_sgt(memdesc->sgt, memdesc->size);
 		if (ret) {
 			sg_free_table(memdesc->sgt);
 			kfree(memdesc->sgt);
 			memdesc->sgt = NULL;
+
+			if (ret == -EADDRNOTAVAIL) {
+				kgsl_free(memdesc->pages);
+				memset(memdesc, 0, sizeof(*memdesc));
+				return ret;
+			}
+
 			goto done;
 		}
 
@@ -1039,8 +1060,11 @@
 	sg_init_table(&sgl, 1);
 	sg_set_page(&sgl, page, PAGE_SIZE, 0);
 
-	status = kgsl_lock_sgt(&sgt);
+	status = kgsl_lock_sgt(&sgt, PAGE_SIZE);
 	if (status) {
+		if (status == -EADDRNOTAVAIL)
+			return NULL;
+
 		__free_page(page);
 		return NULL;
 	}
diff --git a/drivers/input/touchscreen/st/fts.c b/drivers/input/touchscreen/st/fts.c
index e8d5f6c..c4a14a1 100644
--- a/drivers/input/touchscreen/st/fts.c
+++ b/drivers/input/touchscreen/st/fts.c
@@ -3293,7 +3293,8 @@
 
 	fts_chip_powercycle(info);
 	retval = flashProcedure(PATH_FILE_FW, crc_status, 1);
-	if (retval == (ERROR_FW_NO_UPDATE | ERROR_FLASH_BURN_FAILED)) {
+	if ((retval & ERROR_FILE_NOT_FOUND) == ERROR_FILE_NOT_FOUND ||
+		retval == (ERROR_FW_NO_UPDATE | ERROR_FLASH_BURN_FAILED)) {
 		logError(1, "%s %s: no firmware file or no newer firmware!\n",
 			tag, __func__);
 		goto NO_FIRMWARE_UPDATE;
@@ -3705,6 +3706,12 @@
 	/* enable the interrupt */
 	error |= fts_enableInterrupt();
 
+#if defined(CONFIG_FB_MSM)
+	error |= fb_register_client(&info->notifier);
+#else
+	error |= msm_drm_register_client(&info->notifier);
+#endif
+
 	if (error < OK)
 		logError(1, "%s %s Init after Probe error (ERROR = %08X)\n",
 			tag, __func__, error);
@@ -4800,12 +4807,6 @@
 			msecs_to_jiffies(EXP_FN_WORK_DELAY_MS));
 	logError(1, "%s Probe Finished!\n", tag);
 
-#if defined(CONFIG_FB_MSM)
-	error |= fb_register_client(&info->notifier);
-#else
-	error |= msm_drm_register_client(&info->notifier);
-#endif
-
 	return OK;
 
 	/* error exit path */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
index f7bf5a2..d180b4e 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
@@ -377,15 +377,13 @@
 	if (!CAM_CPAS_CLIENT_VALID(client_indx))
 		return -EINVAL;
 
-	mutex_lock(&cpas_core->client_mutex[client_indx]);
 	cpas_client = cpas_core->cpas_client[client_indx];
 
 	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
 		CAM_ERR(CAM_CPAS, "client=[%d][%s][%d] has not started",
 			client_indx, cpas_client->data.identifier,
 			cpas_client->data.cell_index);
-		rc = -EPERM;
-		goto unlock_client;
+		return -EPERM;
 	}
 
 	if (mb)
@@ -397,8 +395,6 @@
 
 	*value = reg_value;
 
-unlock_client:
-	mutex_unlock(&cpas_core->client_mutex[client_indx]);
 	return rc;
 }
 
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
index fc16743..e32366b 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -3824,16 +3824,12 @@
 	uint32_t *fw_cmd_buf_iova_addr)
 {
 	int rc = 0;
-	int i, j, k;
+	int i;
 	int num_cmd_buf = 0;
 	uint64_t addr;
 	size_t len;
 	struct cam_cmd_buf_desc *cmd_desc = NULL;
 	uintptr_t cpu_addr = 0;
-	struct ipe_frame_process_data *frame_process_data = NULL;
-	struct bps_frame_process_data *bps_frame_process_data = NULL;
-	struct frame_set *ipe_set = NULL;
-	struct frame_buffer *bps_bufs = NULL;
 
 	cmd_desc = (struct cam_cmd_buf_desc *)
 		((uint32_t *) &packet->payload + packet->cmd_buf_offset/4);
@@ -3881,49 +3877,6 @@
 		return -EINVAL;
 	}
 
-	if (ctx_data->icp_dev_acquire_info->dev_type !=
-		CAM_ICP_RES_TYPE_BPS) {
-		CAM_DBG(CAM_ICP, "cpu addr = %zx", cpu_addr);
-		frame_process_data = (struct ipe_frame_process_data *)cpu_addr;
-		CAM_DBG(CAM_ICP, "%u %u %u", frame_process_data->max_num_cores,
-			frame_process_data->target_time,
-			frame_process_data->frames_in_batch);
-		frame_process_data->strip_lib_out_addr = 0;
-		frame_process_data->iq_settings_addr = 0;
-		frame_process_data->scratch_buffer_addr = 0;
-		frame_process_data->ubwc_stats_buffer_addr = 0;
-		frame_process_data->cdm_buffer_addr = 0;
-		frame_process_data->cdm_prog_base = 0;
-		for (i = 0; i < frame_process_data->frames_in_batch; i++) {
-			ipe_set = &frame_process_data->framesets[i];
-			for (j = 0; j < IPE_IO_IMAGES_MAX; j++) {
-				for (k = 0; k < MAX_NUM_OF_IMAGE_PLANES; k++) {
-					ipe_set->buffers[j].buf_ptr[k] = 0;
-					ipe_set->buffers[j].meta_buf_ptr[k] = 0;
-				}
-			}
-		}
-	} else {
-		CAM_DBG(CAM_ICP, "cpu addr = %zx", cpu_addr);
-		bps_frame_process_data =
-			(struct bps_frame_process_data *)cpu_addr;
-		CAM_DBG(CAM_ICP, "%u %u",
-			bps_frame_process_data->max_num_cores,
-			bps_frame_process_data->target_time);
-		bps_frame_process_data->ubwc_stats_buffer_addr = 0;
-		bps_frame_process_data->cdm_buffer_addr = 0;
-		bps_frame_process_data->iq_settings_addr = 0;
-		bps_frame_process_data->strip_lib_out_addr = 0;
-		bps_frame_process_data->cdm_prog_addr = 0;
-		for (i = 0; i < BPS_IO_IMAGES_MAX; i++) {
-			bps_bufs = &bps_frame_process_data->buffers[i];
-			for (j = 0; j < MAX_NUM_OF_IMAGE_PLANES; j++) {
-				bps_bufs->buf_ptr[j] = 0;
-				bps_bufs->meta_buf_ptr[j] = 0;
-			}
-		}
-	}
-
 	return rc;
 }
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
index 7e1fa259..8120fada 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
@@ -22,6 +22,8 @@
 
 static const char isp_dev_name[] = "isp";
 
+static struct cam_isp_ctx_debug isp_ctx_debug;
+
 #define INC_STATE_MONITOR_HEAD(head) \
 	(atomic64_add_return(1, head) % \
 	CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES)
@@ -3440,12 +3442,6 @@
 		(struct cam_isp_context *) ctx->ctx_priv;
 	struct cam_isp_stop_args         stop_isp;
 
-	/* Mask off all the incoming hardware events */
-	spin_lock_bh(&ctx->lock);
-	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT;
-	spin_unlock_bh(&ctx->lock);
-	CAM_DBG(CAM_ISP, "next substate %d", ctx_isp->substate_activated);
-
 	/* stop hw first */
 	if (ctx_isp->hw_ctx) {
 		stop.ctxt_to_hw_map = ctx_isp->hw_ctx;
@@ -3462,6 +3458,12 @@
 			&stop);
 	}
 
+	/* Mask off all the incoming hardware events */
+	spin_lock_bh(&ctx->lock);
+	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT;
+	spin_unlock_bh(&ctx->lock);
+	CAM_DBG(CAM_ISP, "next substate %d", ctx_isp->substate_activated);
+
 	while (!list_empty(&ctx->pending_req_list)) {
 		req = list_first_entry(&ctx->pending_req_list,
 				struct cam_ctx_request, list);
@@ -3725,7 +3727,8 @@
 	} else {
 		CAM_DBG(CAM_ISP, "No handle function for substate %d",
 			ctx_isp->substate_activated);
-		__cam_isp_ctx_dump_state_monitor_array(ctx_isp);
+		if (isp_ctx_debug.enable_state_monitor_dump)
+			__cam_isp_ctx_dump_state_monitor_array(ctx_isp);
 	}
 
 	CAM_DBG(CAM_ISP, "Exit: State %d Substate %d",
@@ -3867,6 +3870,31 @@
 	return rc;
 }
 
+static int cam_isp_context_debug_register(void)
+{
+	isp_ctx_debug.dentry = debugfs_create_dir("camera_isp_ctx",
+		NULL);
+
+	if (!isp_ctx_debug.dentry) {
+		CAM_ERR(CAM_ISP, "failed to create dentry");
+		return -ENOMEM;
+	}
+
+	if (!debugfs_create_u32("enable_state_monitor_dump",
+		0644,
+		isp_ctx_debug.dentry,
+		&isp_ctx_debug.enable_state_monitor_dump)) {
+		CAM_ERR(CAM_ISP, "failed to create enable_state_monitor_dump");
+		goto err;
+	}
+
+	return 0;
+
+err:
+	debugfs_remove_recursive(isp_ctx_debug.dentry);
+	return -ENOMEM;
+}
+
 int cam_isp_context_init(struct cam_isp_context *ctx,
 	struct cam_context *ctx_base,
 	struct cam_req_mgr_kmd_ops *crm_node_intf,
@@ -3918,6 +3946,8 @@
 		CAM_ISP_CTX_ACTIVATED_MAX;
 	}
 	atomic64_set(&ctx->state_monitor_head, -1);
+
+	cam_isp_context_debug_register();
 err:
 	return rc;
 }
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
index ed4b2e2..ccec589 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
@@ -71,6 +71,18 @@
 };
 
 /**
+ * struct cam_isp_ctx_debug -  Contains debug parameters
+ *
+ * @dentry:                    Debugfs entry
+ * @enable_state_monitor_dump: Enable isp state monitor dump
+ *
+ */
+struct cam_isp_ctx_debug {
+	struct dentry  *dentry;
+	uint32_t        enable_state_monitor_dump;
+};
+
+/**
  * struct cam_isp_ctx_irq_ops - Function table for handling IRQ callbacks
  *
  * @irq_ops:               Array of handle function pointers.
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
index 7944cbc8..a1d1c6a 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
@@ -60,6 +60,7 @@
 	CAM_IFE_IRQ_CAMIF_REG_STATUS1           = 1,
 	CAM_IFE_IRQ_CAMIF_REG_STATUS2           = 2,
 	CAM_IFE_IRQ_VIOLATION_STATUS            = 3,
+	CAM_IFE_IRQ_BUS_OVERFLOW_STATUS         = 4,
 	CAM_IFE_IRQ_REGISTERS_MAX,
 };
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe480.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe480.h
index e855a54..48eb7f1 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe480.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe480.h
@@ -80,6 +80,7 @@
 	.enable_diagnostic_hw            = 0x1,
 	.pp_camif_cfg_en_shift           = 0,
 	.pp_camif_cfg_ife_out_en_shift   = 8,
+	.top_debug_cfg_en                = 1,
 };
 
 static struct cam_vfe_top_ver3_reg_offset_common vfe480_top_common_reg = {
@@ -106,6 +107,21 @@
 	.diag_sensor_status_0     = 0x00000068,
 	.diag_sensor_status_1     = 0x00000098,
 	.bus_overflow_status      = 0x0000AA68,
+	.top_debug_cfg            = 0x000000DC,
+	.top_debug_0              = 0x00000080,
+	.top_debug_1              = 0x00000084,
+	.top_debug_2              = 0x00000088,
+	.top_debug_3              = 0x0000008C,
+	.top_debug_4              = 0x0000009C,
+	.top_debug_5              = 0x000000A0,
+	.top_debug_6              = 0x000000A4,
+	.top_debug_7              = 0x000000A8,
+	.top_debug_8              = 0x000000AC,
+	.top_debug_9              = 0x000000B0,
+	.top_debug_10             = 0x000000B4,
+	.top_debug_11             = 0x000000B8,
+	.top_debug_12             = 0x000000BC,
+	.top_debug_13             = 0x000000C0,
 };
 
 static struct cam_vfe_camif_lite_ver3_reg vfe480_camif_rdi[3] = {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite48x.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite48x.h
index c9d66ed..221c372 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite48x.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite48x.h
@@ -48,6 +48,11 @@
 	.diag_config              = 0x00000050,
 	.diag_sensor_status_0     = 0x00000054,
 	.bus_overflow_status      = 0x00001A68,
+	.top_debug_cfg            = 0x00000074,
+	.top_debug_0              = 0x0000005C,
+	.top_debug_1              = 0x00000068,
+	.top_debug_2              = 0x0000006C,
+	.top_debug_3              = 0x00000070,
 };
 
 static struct cam_vfe_camif_lite_ver3_reg vfe48x_camif_rdi[4] = {
@@ -118,6 +123,7 @@
 		.error_irq_mask2                 = 0x100,
 		.subscribe_irq_mask1             = 0x3,
 		.enable_diagnostic_hw            = 0x1,
+		.top_debug_cfg_en                = 0x1,
 	},
 	{
 		.extern_reg_update_shift         = 0,
@@ -131,6 +137,7 @@
 		.error_irq_mask2                 = 0x200,
 		.subscribe_irq_mask1             = 0x30,
 		.enable_diagnostic_hw            = 0x1,
+		.top_debug_cfg_en                = 0x1,
 	},
 	{
 		.extern_reg_update_shift         = 0,
@@ -144,6 +151,7 @@
 		.error_irq_mask2                 = 0x400,
 		.subscribe_irq_mask1             = 0x300,
 		.enable_diagnostic_hw            = 0x1,
+		.top_debug_cfg_en                = 0x1,
 	},
 	{
 		.extern_reg_update_shift         = 0,
@@ -157,6 +165,7 @@
 		.error_irq_mask2                 = 0x800,
 		.subscribe_irq_mask1             = 0x3000,
 		.enable_diagnostic_hw            = 0x1,
+		.top_debug_cfg_en                = 0x1,
 	},
 };
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.c
index 0cd357a..347109b 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.c
@@ -17,6 +17,7 @@
 #include "cam_vfe_camif_lite_ver3.h"
 #include "cam_debug_util.h"
 #include "cam_cdm_util.h"
+#include "cam_cpas_api.h"
 
 struct cam_vfe_mux_camif_lite_data {
 	void __iomem                                *mem_base;
@@ -142,9 +143,8 @@
 	evt_payload->irq_reg_val[i] = cam_io_r(camif_lite_priv->mem_base +
 		camif_lite_priv->common_reg->violation_status);
 
-	if (error_flag && !soc_private->is_ife_lite)
-		CAM_INFO(CAM_ISP, "Violation status = 0x%X",
-			evt_payload->irq_reg_val[i]);
+	evt_payload->irq_reg_val[++i] = cam_io_r(camif_lite_priv->mem_base +
+		camif_lite_priv->common_reg->bus_overflow_status);
 
 	th_payload->evt_payload_priv = evt_payload;
 
@@ -308,6 +308,10 @@
 	memset(err_irq_mask, 0, sizeof(err_irq_mask));
 	memset(irq_mask, 0, sizeof(irq_mask));
 
+	/* config debug status registers */
+	cam_io_w_mb(rsrc_data->reg_data->top_debug_cfg_en, rsrc_data->mem_base +
+		rsrc_data->common_reg->top_debug_cfg);
+
 	err_irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS0] =
 		rsrc_data->reg_data->error_irq_mask0;
 	err_irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS2] =
@@ -726,23 +730,93 @@
 	return rc;
 }
 
+static void cam_vfe_camif_lite_overflow_debug_info(uint32_t *status,
+	struct cam_vfe_mux_camif_lite_data *camif_lite_priv)
+{
+	uint32_t bus_overflow_status = 0;
+	struct cam_vfe_soc_private *soc_private = NULL;
+	uint32_t val0, val1, val2, val3;
+
+	bus_overflow_status = status[CAM_IFE_IRQ_BUS_OVERFLOW_STATUS];
+	soc_private = camif_lite_priv->soc_info->soc_private;
+
+	if (bus_overflow_status) {
+		cam_cpas_reg_read(soc_private->cpas_handle,
+			CAM_CPAS_REG_CAMNOC, 0xA20, true, &val0);
+		cam_cpas_reg_read(soc_private->cpas_handle,
+			CAM_CPAS_REG_CAMNOC, 0x1420, true, &val1);
+		cam_cpas_reg_read(soc_private->cpas_handle,
+			CAM_CPAS_REG_CAMNOC, 0x1A20, true, &val2);
+		CAM_INFO(CAM_ISP,
+			"CAMNOC REG ife_linear: 0x%X ife_rdi_wr: 0x%X ife_ubwc_stats: 0x%X",
+			val0, val1, val2);
+
+	} else {
+		val0 = cam_io_r(camif_lite_priv->mem_base +
+			camif_lite_priv->common_reg->top_debug_0);
+		val1 = cam_io_r(camif_lite_priv->mem_base +
+			camif_lite_priv->common_reg->top_debug_1);
+		val2 = cam_io_r(camif_lite_priv->mem_base +
+			camif_lite_priv->common_reg->top_debug_2);
+		val3 = cam_io_r(camif_lite_priv->mem_base +
+			camif_lite_priv->common_reg->top_debug_3);
+		CAM_INFO(CAM_ISP,
+			"status_0: 0x%X status_1: 0x%X status_2: 0x%X status_3: 0x%X",
+			val0, val1, val2, val3);
+
+		if (soc_private->is_ife_lite)
+			return;
+
+		val0 = cam_io_r(camif_lite_priv->mem_base +
+			camif_lite_priv->common_reg->top_debug_4);
+		val1 = cam_io_r(camif_lite_priv->mem_base +
+			camif_lite_priv->common_reg->top_debug_5);
+		val2 = cam_io_r(camif_lite_priv->mem_base +
+			camif_lite_priv->common_reg->top_debug_6);
+		val3 = cam_io_r(camif_lite_priv->mem_base +
+			camif_lite_priv->common_reg->top_debug_7);
+		CAM_INFO(CAM_ISP,
+			"status_4: 0x%X status_5: 0x%X status_6: 0x%X status_7: 0x%X",
+			val0, val1, val2, val3);
+		val0 = cam_io_r(camif_lite_priv->mem_base +
+			camif_lite_priv->common_reg->top_debug_8);
+		val1 = cam_io_r(camif_lite_priv->mem_base +
+			camif_lite_priv->common_reg->top_debug_9);
+		val2 = cam_io_r(camif_lite_priv->mem_base +
+			camif_lite_priv->common_reg->top_debug_10);
+		val3 = cam_io_r(camif_lite_priv->mem_base +
+			camif_lite_priv->common_reg->top_debug_11);
+		CAM_INFO(CAM_ISP,
+			"status_8: 0x%X status_9: 0x%X status_10: 0x%X status_11: 0x%X",
+			val0, val1, val2, val3);
+		val0 = cam_io_r(camif_lite_priv->mem_base +
+			camif_lite_priv->common_reg->top_debug_12);
+		val1 = cam_io_r(camif_lite_priv->mem_base +
+			camif_lite_priv->common_reg->top_debug_13);
+		CAM_INFO(CAM_ISP, "status_12: 0x%X status_13: 0x%X",
+			val0, val1);
+	}
+}
+
 static void cam_vfe_camif_lite_print_status(uint32_t *status,
-	int err_type, bool is_ife_lite)
+	int err_type, struct cam_vfe_mux_camif_lite_data *camif_lite_priv)
 {
 	uint32_t violation_mask = 0x3F00, violation_status = 0;
 	uint32_t bus_overflow_status = 0, status_0 = 0, status_2 = 0;
+	struct cam_vfe_soc_private *soc_private = NULL;
 
 	if (!status) {
 		CAM_ERR(CAM_ISP, "Invalid params");
 		return;
 	}
 
-	bus_overflow_status = status[CAM_IFE_IRQ_REGISTERS_MAX];
+	bus_overflow_status = status[CAM_IFE_IRQ_BUS_OVERFLOW_STATUS];
 	violation_status = status[CAM_IFE_IRQ_VIOLATION_STATUS];
 	status_0 = status[CAM_IFE_IRQ_CAMIF_REG_STATUS0];
 	status_2 = status[CAM_IFE_IRQ_CAMIF_REG_STATUS2];
+	soc_private = camif_lite_priv->soc_info->soc_private;
 
-	if (is_ife_lite)
+	if (soc_private->is_ife_lite)
 		goto ife_lite;
 
 	if (err_type == CAM_VFE_IRQ_STATUS_OVERFLOW) {
@@ -792,7 +866,8 @@
 
 	if (err_type == CAM_VFE_IRQ_STATUS_OVERFLOW && !bus_overflow_status) {
 		CAM_INFO(CAM_ISP, "PDLIB / LCR Module hang");
-		/* print debug registers here */
+		/* print debug registers */
+		cam_vfe_camif_lite_overflow_debug_info(status, camif_lite_priv);
 		return;
 	}
 
@@ -874,6 +949,13 @@
 			CAM_INFO(CAM_ISP, "RDI3 BUS OVERFLOW");
 	}
 
+	if (err_type == CAM_VFE_IRQ_STATUS_OVERFLOW && !bus_overflow_status) {
+		CAM_INFO(CAM_ISP, "RDI hang");
+		/* print debug registers */
+		cam_vfe_camif_lite_overflow_debug_info(status, camif_lite_priv);
+		return;
+	}
+
 	if (err_type == CAM_VFE_IRQ_STATUS_VIOLATION) {
 		if (status_2 & 0x100)
 			CAM_INFO(CAM_ISP, "RDI0 CAMIF VIOLATION");
@@ -942,9 +1024,8 @@
 	struct cam_vfe_top_irq_evt_payload *payload;
 	struct cam_isp_hw_event_info evt_info;
 	struct cam_vfe_soc_private *soc_private = NULL;
-	uint32_t irq_status[CAM_IFE_IRQ_REGISTERS_MAX + 1];
+	uint32_t irq_status[CAM_IFE_IRQ_REGISTERS_MAX] = {0};
 	int i = 0;
-	bool is_ife_lite = true;
 
 	if (!handler_priv || !evt_payload_priv) {
 		CAM_ERR(CAM_ISP, "Invalid params");
@@ -961,10 +1042,6 @@
 		return -ENODEV;
 	}
 
-	is_ife_lite = soc_private->is_ife_lite;
-
-	memset(irq_status, 0,
-		sizeof(uint32_t) * (CAM_IFE_IRQ_REGISTERS_MAX + 1));
 	for (i = 0; i < CAM_IFE_IRQ_REGISTERS_MAX; i++)
 		irq_status[i] = payload->irq_reg_val[i];
 
@@ -1023,13 +1100,10 @@
 			camif_lite_priv->event_cb(camif_lite_priv->priv,
 				CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info);
 
-		irq_status[CAM_IFE_IRQ_REGISTERS_MAX] =
-			cam_io_r(camif_lite_priv->mem_base +
-			camif_lite_priv->common_reg->bus_overflow_status);
-
 		ret = CAM_VFE_IRQ_STATUS_OVERFLOW;
 
-		cam_vfe_camif_lite_print_status(irq_status, ret, is_ife_lite);
+		cam_vfe_camif_lite_print_status(irq_status, ret,
+			camif_lite_priv);
 
 		if (camif_lite_priv->camif_debug & CAMIF_DEBUG_ENABLE_REG_DUMP)
 			cam_vfe_camif_lite_reg_dump(camif_lite_node);
@@ -1047,7 +1121,8 @@
 
 		ret = CAM_VFE_IRQ_STATUS_VIOLATION;
 
-		cam_vfe_camif_lite_print_status(irq_status, ret, is_ife_lite);
+		cam_vfe_camif_lite_print_status(irq_status, ret,
+			camif_lite_priv);
 
 		if (camif_lite_priv->camif_debug & CAMIF_DEBUG_ENABLE_REG_DUMP)
 			cam_vfe_camif_lite_reg_dump(camif_lite_node);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.h
index ad8e44e..54a38bd 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.h
@@ -40,6 +40,7 @@
 	uint32_t     error_irq_mask2;
 	uint32_t     subscribe_irq_mask1;
 	uint32_t     enable_diagnostic_hw;
+	uint32_t     top_debug_cfg_en;
 };
 
 struct cam_vfe_camif_lite_ver3_hw_info {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c
index 810e887..ccf57ee 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c
@@ -141,9 +141,8 @@
 	evt_payload->irq_reg_val[i] = cam_io_r(camif_priv->mem_base +
 		camif_priv->common_reg->violation_status);
 
-	if (error_flag)
-		CAM_INFO(CAM_ISP, "Violation status = 0x%X",
-			evt_payload->irq_reg_val[i]);
+	evt_payload->irq_reg_val[++i] = cam_io_r(camif_priv->mem_base +
+		camif_priv->common_reg->bus_overflow_status);
 
 	th_payload->evt_payload_priv = evt_payload;
 
@@ -374,6 +373,10 @@
 		return -ENODEV;
 	}
 
+	/* config debug status registers */
+	cam_io_w_mb(rsrc_data->reg_data->top_debug_cfg_en, rsrc_data->mem_base +
+		rsrc_data->common_reg->top_debug_cfg);
+
 	/*config vfe core*/
 	val = (rsrc_data->pix_pattern <<
 		rsrc_data->reg_data->pixel_pattern_shift);
@@ -733,8 +736,76 @@
 	return rc;
 }
 
+
+static void cam_vfe_camif_ver3_overflow_debug_info(uint32_t *status,
+	struct cam_vfe_mux_camif_ver3_data *camif_priv)
+{
+	struct cam_vfe_soc_private *soc_private;
+	uint32_t bus_overflow_status;
+	uint32_t val0, val1, val2, val3;
+
+	bus_overflow_status = status[CAM_IFE_IRQ_BUS_OVERFLOW_STATUS];
+	soc_private = camif_priv->soc_info->soc_private;
+
+	if (bus_overflow_status) {
+		cam_cpas_reg_read(soc_private->cpas_handle,
+			CAM_CPAS_REG_CAMNOC, 0xA20, true, &val0);
+		cam_cpas_reg_read(soc_private->cpas_handle,
+			CAM_CPAS_REG_CAMNOC, 0x1420, true, &val1);
+		cam_cpas_reg_read(soc_private->cpas_handle,
+			CAM_CPAS_REG_CAMNOC, 0x1A20, true, &val2);
+		CAM_INFO(CAM_ISP,
+			"CAMNOC REG ife_linear: 0x%X ife_rdi_wr: 0x%X ife_ubwc_stats: 0x%X",
+			val0, val1, val2);
+	} else {
+		val0 = cam_io_r(camif_priv->mem_base +
+			camif_priv->common_reg->top_debug_0);
+		val1 = cam_io_r(camif_priv->mem_base +
+			camif_priv->common_reg->top_debug_1);
+		val2 = cam_io_r(camif_priv->mem_base +
+			camif_priv->common_reg->top_debug_2);
+		val3 = cam_io_r(camif_priv->mem_base +
+			camif_priv->common_reg->top_debug_3);
+		CAM_INFO(CAM_ISP,
+			"status_0: 0x%X status_1: 0x%X status_2: 0x%X status_3: 0x%X",
+			val0, val1, val2, val3);
+
+		val0 = cam_io_r(camif_priv->mem_base +
+			camif_priv->common_reg->top_debug_4);
+		val1 = cam_io_r(camif_priv->mem_base +
+			camif_priv->common_reg->top_debug_5);
+		val2 = cam_io_r(camif_priv->mem_base +
+			camif_priv->common_reg->top_debug_6);
+		val3 = cam_io_r(camif_priv->mem_base +
+			camif_priv->common_reg->top_debug_7);
+		CAM_INFO(CAM_ISP,
+			"status_4: 0x%X status_5: 0x%X status_6: 0x%X status_7: 0x%X",
+			val0, val1, val2, val3);
+
+		val0 = cam_io_r(camif_priv->mem_base +
+			camif_priv->common_reg->top_debug_8);
+		val1 = cam_io_r(camif_priv->mem_base +
+			camif_priv->common_reg->top_debug_9);
+		val2 = cam_io_r(camif_priv->mem_base +
+			camif_priv->common_reg->top_debug_10);
+		val3 = cam_io_r(camif_priv->mem_base +
+			camif_priv->common_reg->top_debug_11);
+		CAM_INFO(CAM_ISP,
+			"status_8: 0x%X status_9: 0x%X status_10: 0x%X status_11: 0x%X",
+			val0, val1, val2, val3);
+
+		val0 = cam_io_r(camif_priv->mem_base +
+			camif_priv->common_reg->top_debug_12);
+		val1 = cam_io_r(camif_priv->mem_base +
+			camif_priv->common_reg->top_debug_13);
+		CAM_INFO(CAM_ISP, "status_12: 0x%X status_13: 0x%X",
+			val0, val1);
+	}
+
+}
+
 static void cam_vfe_camif_ver3_print_status(uint32_t *status,
-	int err_type)
+	int err_type, struct cam_vfe_mux_camif_ver3_data *camif_priv)
 {
 	uint32_t violation_mask = 0x3F, module_id = 0;
 	uint32_t bus_overflow_status = 0, status_0 = 0, status_2 = 0;
@@ -744,7 +815,7 @@
 		return;
 	}
 
-	bus_overflow_status = status[CAM_IFE_IRQ_REGISTERS_MAX];
+	bus_overflow_status = status[CAM_IFE_IRQ_BUS_OVERFLOW_STATUS];
 	status_0 = status[CAM_IFE_IRQ_CAMIF_REG_STATUS0];
 	status_2 = status[CAM_IFE_IRQ_CAMIF_REG_STATUS2];
 
@@ -829,6 +900,7 @@
 	if (err_type == CAM_VFE_IRQ_STATUS_OVERFLOW && !bus_overflow_status) {
 		CAM_INFO(CAM_ISP, "PIXEL PIPE Module hang");
 		/* print debug registers */
+		cam_vfe_camif_ver3_overflow_debug_info(status, camif_priv);
 		return;
 	}
 
@@ -1100,7 +1172,7 @@
 	struct cam_vfe_mux_camif_ver3_data *camif_priv;
 	struct cam_vfe_top_irq_evt_payload *payload;
 	struct cam_isp_hw_event_info evt_info;
-	uint32_t irq_status[CAM_IFE_IRQ_REGISTERS_MAX + 1] = {0};
+	uint32_t irq_status[CAM_IFE_IRQ_REGISTERS_MAX] = {0};
 	int i = 0;
 
 	if (!handler_priv || !evt_payload_priv) {
@@ -1177,13 +1249,9 @@
 			camif_priv->event_cb(camif_priv->priv,
 				CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info);
 
-		irq_status[CAM_IFE_IRQ_REGISTERS_MAX] =
-			cam_io_r(camif_priv->mem_base +
-			camif_priv->common_reg->bus_overflow_status);
-
 		ret = CAM_VFE_IRQ_STATUS_OVERFLOW;
 
-		cam_vfe_camif_ver3_print_status(irq_status, ret);
+		cam_vfe_camif_ver3_print_status(irq_status, ret, camif_priv);
 
 		if (camif_priv->camif_debug & CAMIF_DEBUG_ENABLE_REG_DUMP)
 			cam_vfe_camif_ver3_reg_dump(camif_node);
@@ -1198,7 +1266,7 @@
 
 		ret = CAM_VFE_IRQ_STATUS_VIOLATION;
 
-		cam_vfe_camif_ver3_print_status(irq_status, ret);
+		cam_vfe_camif_ver3_print_status(irq_status, ret, camif_priv);
 
 		if (camif_priv->camif_debug & CAMIF_DEBUG_ENABLE_REG_DUMP)
 			cam_vfe_camif_ver3_reg_dump(camif_node);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.h
index 3c82ca2..40d8e40 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.h
@@ -56,6 +56,7 @@
 	uint32_t     enable_diagnostic_hw;
 	uint32_t     pp_camif_cfg_en_shift;
 	uint32_t     pp_camif_cfg_ife_out_en_shift;
+	uint32_t     top_debug_cfg_en;
 };
 
 struct cam_vfe_camif_ver3_hw_info {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.h
index 1ae8e5d..dd0bb94 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.h
@@ -48,6 +48,21 @@
 	uint32_t diag_sensor_status_0;
 	uint32_t diag_sensor_status_1;
 	uint32_t bus_overflow_status;
+	uint32_t top_debug_cfg;
+	uint32_t top_debug_0;
+	uint32_t top_debug_1;
+	uint32_t top_debug_2;
+	uint32_t top_debug_3;
+	uint32_t top_debug_4;
+	uint32_t top_debug_5;
+	uint32_t top_debug_6;
+	uint32_t top_debug_7;
+	uint32_t top_debug_8;
+	uint32_t top_debug_9;
+	uint32_t top_debug_10;
+	uint32_t top_debug_11;
+	uint32_t top_debug_12;
+	uint32_t top_debug_13;
 };
 
 struct cam_vfe_camif_common_cfg {
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
index 73ac694..b7f7750 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
@@ -234,6 +234,11 @@
 		return -EINVAL;
 	}
 
+	if (!atomic_read(&cam_mem_mgr_state)) {
+		CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
+		return -EINVAL;
+	}
+
 	if (!buf_handle || !vaddr_ptr || !len)
 		return -EINVAL;
 
diff --git a/drivers/media/radio/rtc6226/radio-rtc6226-i2c.c b/drivers/media/radio/rtc6226/radio-rtc6226-i2c.c
index 16119b3..d67ac8b 100644
--- a/drivers/media/radio/rtc6226/radio-rtc6226-i2c.c
+++ b/drivers/media/radio/rtc6226/radio-rtc6226-i2c.c
@@ -530,11 +530,9 @@
 		FMDERR("%s fail to open v4l2\n", __func__);
 		return retval;
 	}
-
-	if (radio->users == 0)
-		radio->users++;
-	else {
+	if (atomic_inc_return(&radio->users) != 1) {
 		FMDERR("Device already in use. Try again later\n");
+		atomic_dec(&radio->users);
 		return -EBUSY;
 	}
 
@@ -556,15 +554,13 @@
 		FMDERR("%s:enable irq failed\n", __func__);
 		goto open_err_req_irq;
 	}
-
-	if (retval)
-		v4l2_fh_release(file);
 	return retval;
 
 open_err_req_irq:
 	rtc6226_fm_power_cfg(radio, TURNING_OFF);
 open_err_setup:
-	radio->users--;
+	atomic_dec(&radio->users);
+	v4l2_fh_release(file);
 	return retval;
 }
 
@@ -584,7 +580,7 @@
 		}
 	}
 	rtc6226_disable_irq(radio);
-	radio->users--;
+	atomic_dec(&radio->users);
 	retval = rtc6226_fm_power_cfg(radio, TURNING_OFF);
 	if (retval < 0)
 		FMDERR("%s: failed to apply voltage\n", __func__);
@@ -751,7 +747,8 @@
 	FMDBG("v4l2_device_register successfully\n");
 	hdl = &radio->ctrl_handler;
 
-	radio->users = 0;
+	/* initialize the device count */
+	atomic_set(&radio->users, 0);
 	radio->client = client;
 	mutex_init(&radio->lock);
 	init_completion(&radio->completion);
diff --git a/drivers/media/radio/rtc6226/radio-rtc6226.h b/drivers/media/radio/rtc6226/radio-rtc6226.h
index e9ce8f4..6b1abd3 100644
--- a/drivers/media/radio/rtc6226/radio-rtc6226.h
+++ b/drivers/media/radio/rtc6226/radio-rtc6226.h
@@ -481,7 +481,7 @@
 	struct fm_power_vreg_data *vioreg;
 	int band;
 	int space;
-	unsigned int users;
+	atomic_t users;
 	unsigned int mode;
 	u8 seek_tune_status;
 	u8 rssi_th;
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index d09fdbe..cc37013 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -812,6 +812,7 @@
 		host->ios.clock);
 
 	host->clk_scaling.enable = true;
+	host->clk_scaling.is_suspended = false;
 
 	return err;
 }
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 8270d35..cb036f5 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -501,6 +501,7 @@
 		blk_mq_unquiesce_queue(q);
 
 	blk_cleanup_queue(q);
+	blk_mq_free_tag_set(&mq->tag_set);
 
 	/*
 	 * A request can be completed before the next request, potentially
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index a24a715..d3cec34 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -38,6 +38,9 @@
 EXPORT_TRACEPOINT_SYMBOL(rmnet_low);
 EXPORT_TRACEPOINT_SYMBOL(rmnet_high);
 EXPORT_TRACEPOINT_SYMBOL(rmnet_err);
+EXPORT_TRACEPOINT_SYMBOL(rmnet_freq_update);
+EXPORT_TRACEPOINT_SYMBOL(rmnet_freq_reset);
+EXPORT_TRACEPOINT_SYMBOL(rmnet_freq_boost);
 
 /* Helper Functions */
 
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index 2cea1e3..85aa855 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -18,6 +18,19 @@
 #define RMNET_MAP_DEAGGR_SPACING  64
 #define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
 
+struct rmnet_map_coal_metadata {
+	void *ip_header;
+	void *trans_header;
+	u16 ip_len;
+	u16 trans_len;
+	u16 data_offset;
+	u16 data_len;
+	u8 ip_proto;
+	u8 trans_proto;
+	u8 pkt_id;
+	u8 pkt_count;
+};
+
 static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
 					 const void *txporthdr)
 {
@@ -529,144 +542,180 @@
 	}
 }
 
+static void rmnet_map_move_headers(struct sk_buff *skb)
+{
+	struct iphdr *iph;
+	u16 ip_len;
+	u16 trans_len = 0;
+	u8 proto;
+
+	/* This only applies to non-linear SKBs */
+	if (!skb_is_nonlinear(skb))
+		return;
+
+	iph = (struct iphdr *)rmnet_map_data_ptr(skb);
+	if (iph->version == 4) {
+		ip_len = iph->ihl * 4;
+		proto = iph->protocol;
+		if (iph->frag_off & htons(IP_OFFSET))
+			/* No transport header information */
+			goto pull;
+	} else if (iph->version == 6) {
+		struct ipv6hdr *ip6h = (struct ipv6hdr *)iph;
+		__be16 frag_off;
+		u8 nexthdr = ip6h->nexthdr;
+
+		ip_len = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr,
+					  &frag_off);
+		if (ip_len < 0)
+			return;
+
+		proto = nexthdr;
+	} else {
+		return;
+	}
+
+	if (proto == IPPROTO_TCP) {
+		struct tcphdr *tp = (struct tcphdr *)((u8 *)iph + ip_len);
+
+		trans_len = tp->doff * 4;
+	} else if (proto == IPPROTO_UDP) {
+		trans_len = sizeof(struct udphdr);
+	} else if (proto == NEXTHDR_FRAGMENT) {
+		/* Non-first fragments don't have the fragment length added by
+		 * ipv6_skip_exthdr() and sho up as proto NEXTHDR_FRAGMENT, so
+		 * we account for the length here.
+		 */
+		ip_len += sizeof(struct frag_hdr);
+	}
+
+pull:
+	__pskb_pull_tail(skb, ip_len + trans_len);
+	skb_reset_network_header(skb);
+	if (trans_len)
+		skb_set_transport_header(skb, ip_len);
+}
+
 static void rmnet_map_nonlinear_copy(struct sk_buff *coal_skb,
-				     u32 hdr_len,
-				     u32 start,
-				     u16 pkt_len, u8 pkt_count,
+				     struct rmnet_map_coal_metadata *coal_meta,
 				     struct sk_buff *dest)
 {
-	unsigned char *data_start = rmnet_map_data_ptr(coal_skb) + hdr_len;
-	u32 copy_len = pkt_len * pkt_count;
+	unsigned char *data_start = rmnet_map_data_ptr(coal_skb) +
+				    coal_meta->ip_len + coal_meta->trans_len;
+	u32 copy_len = coal_meta->data_len * coal_meta->pkt_count;
 
 	if (skb_is_nonlinear(coal_skb)) {
 		skb_frag_t *frag0 = skb_shinfo(coal_skb)->frags;
 		struct page *page = skb_frag_page(frag0);
 
 		skb_append_pagefrags(dest, page,
-				     frag0->page_offset + hdr_len + start,
+				     frag0->page_offset + coal_meta->ip_len +
+				     coal_meta->trans_len +
+				     coal_meta->data_offset,
 				     copy_len);
 		dest->data_len += copy_len;
 		dest->len += copy_len;
 	} else {
-		skb_put_data(dest, data_start + start, copy_len);
+		skb_put_data(dest, data_start + coal_meta->data_offset,
+			     copy_len);
 	}
 }
 
 /* Fill in GSO metadata to allow the SKB to be segmented by the NW stack
  * if needed (i.e. forwarding, UDP GRO)
  */
-static void rmnet_map_gso_stamp(struct sk_buff *skb, u16 gso_size, u8 gso_segs)
+static void rmnet_map_gso_stamp(struct sk_buff *skb,
+				struct rmnet_map_coal_metadata *coal_meta)
 {
 	struct skb_shared_info *shinfo = skb_shinfo(skb);
 	struct iphdr *iph = ip_hdr(skb);
-	void *addr;
-	__sum16 *check;
-	__wsum partial;
-	int csum_len;
-	u16 pkt_len = gso_size * gso_segs;
-	u8 protocol;
-	bool ipv4 = iph->version == 4;
+	__sum16 pseudo;
+	u16 pkt_len = skb->len - coal_meta->ip_len;
+	bool ipv4 = coal_meta->ip_proto == 4;
 
 	if (ipv4) {
-		addr = &iph->saddr;
-		csum_len = sizeof(iph->saddr) * 2;
-		protocol = iph->protocol;
+		pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+					    pkt_len, coal_meta->trans_proto,
+					    0);
 	} else {
 		struct ipv6hdr *ip6h = ipv6_hdr(skb);
 
-		addr = &ip6h->saddr;
-		csum_len = sizeof(ip6h->saddr) * 2;
-		protocol = ip6h->nexthdr;
+		pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+					  pkt_len, coal_meta->trans_proto, 0);
 	}
 
-	if (protocol == IPPROTO_TCP) {
+	if (coal_meta->trans_proto == IPPROTO_TCP) {
 		struct tcphdr *tp = tcp_hdr(skb);
 
-		pkt_len += tp->doff * 4;
-		check = &tp->check;
+		tp->check = pseudo;
 		shinfo->gso_type = (ipv4) ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
 		skb->csum_offset = offsetof(struct tcphdr, check);
 	} else {
 		struct udphdr *up = udp_hdr(skb);
 
-		pkt_len += sizeof(*up);
-		check = &up->check;
+		up->check = pseudo;
 		shinfo->gso_type = SKB_GSO_UDP_L4;
 		skb->csum_offset = offsetof(struct udphdr, check);
 	}
 
-	partial = csum_partial(addr, csum_len, 0);
-	partial = csum16_add(partial, htons((u16)protocol));
-	partial = csum16_add(partial, htons(pkt_len));
-	*check = ~csum_fold(partial);
-
 	skb->ip_summed = CHECKSUM_PARTIAL;
 	skb->csum_start = skb_transport_header(skb) - skb->head;
-	shinfo->gso_size = gso_size;
-	shinfo->gso_segs = gso_segs;
+	shinfo->gso_size = coal_meta->data_len;
+	shinfo->gso_segs = coal_meta->pkt_count;
 }
 
-/* Create a new UDP SKB from the coalesced SKB. Appropriate IP and UDP headers
- * will be added.
- */
-static struct sk_buff *rmnet_map_segment_udp_skb(struct sk_buff *coal_skb,
-						 u32 start,
-						 int start_pkt_num,
-						 u16 pkt_len, u8 pkt_count)
+static void
+__rmnet_map_segment_coal_skb(struct sk_buff *coal_skb,
+			     struct rmnet_map_coal_metadata *coal_meta,
+			     struct sk_buff_head *list, u8 pkt_id)
 {
 	struct sk_buff *skbn;
-	struct iphdr *iph = (struct iphdr *)rmnet_map_data_ptr(coal_skb);
 	struct rmnet_priv *priv = netdev_priv(coal_skb->dev);
-	struct udphdr *uh;
 	u32 alloc_len;
-	u16 ip_len, udp_len = sizeof(*uh);
-
-	if (iph->version == 4) {
-		ip_len = iph->ihl * 4;
-	} else if (iph->version == 6) {
-		ip_len = sizeof(struct ipv6hdr);
-	} else {
-		priv->stats.coal.coal_ip_invalid++;
-		return NULL;
-	}
-
-	uh = (struct udphdr *)(rmnet_map_data_ptr(coal_skb) + ip_len);
 
 	/* We can avoid copying the data if the SKB we got from the lower-level
 	 * drivers was nonlinear.
 	 */
 	if (skb_is_nonlinear(coal_skb))
-		alloc_len = ip_len + udp_len;
+		alloc_len = coal_meta->ip_len + coal_meta->trans_len;
 	else
-		alloc_len = ip_len + udp_len + pkt_len;
+		alloc_len = coal_meta->ip_len + coal_meta->trans_len +
+			    coal_meta->data_len;
 
 	skbn = alloc_skb(alloc_len, GFP_ATOMIC);
 	if (!skbn)
-		return NULL;
+		return;
 
-	skb_reserve(skbn, ip_len + udp_len);
-	rmnet_map_nonlinear_copy(coal_skb, ip_len + udp_len,
-				 start, pkt_len, pkt_count, skbn);
+	skb_reserve(skbn, coal_meta->ip_len + coal_meta->trans_len);
+	rmnet_map_nonlinear_copy(coal_skb, coal_meta, skbn);
 
-	/* Push UDP header and update length */
-	skb_push(skbn, udp_len);
-	memcpy(skbn->data, uh, udp_len);
+	/* Push transport header and update necessary fields */
+	skb_push(skbn, coal_meta->trans_len);
+	memcpy(skbn->data, coal_meta->trans_header, coal_meta->trans_len);
 	skb_reset_transport_header(skbn);
-	udp_hdr(skbn)->len = htons(skbn->len);
+	if (coal_meta->trans_proto == IPPROTO_TCP) {
+		struct tcphdr *th = tcp_hdr(skbn);
+
+		th->seq = htonl(ntohl(th->seq) + coal_meta->data_offset);
+	} else if (coal_meta->trans_proto == IPPROTO_UDP) {
+		udp_hdr(skbn)->len = htons(skbn->len);
+	}
 
 	/* Push IP header and update necessary fields */
-	skb_push(skbn, ip_len);
-	memcpy(skbn->data, iph, ip_len);
+	skb_push(skbn, coal_meta->ip_len);
+	memcpy(skbn->data, coal_meta->ip_header, coal_meta->ip_len);
 	skb_reset_network_header(skbn);
-	if (iph->version == 4) {
-		iph = ip_hdr(skbn);
-		iph->id = htons(ntohs(iph->id) + start_pkt_num);
+	if (coal_meta->ip_proto == 4) {
+		struct iphdr *iph = ip_hdr(skbn);
+
+		iph->id = htons(ntohs(iph->id) + coal_meta->pkt_id);
 		iph->tot_len = htons(skbn->len);
 		iph->check = 0;
 		iph->check = ip_fast_csum(iph, iph->ihl);
 	} else {
+		/* Payload length includes any extension headers */
 		ipv6_hdr(skbn)->payload_len = htons(skbn->len -
-						    ip_len);
+						    sizeof(struct ipv6hdr));
 	}
 
 	skbn->ip_summed = CHECKSUM_UNNECESSARY;
@@ -674,85 +723,15 @@
 	priv->stats.coal.coal_reconstruct++;
 
 	/* Stamp GSO information if necessary */
-	if (pkt_count > 1)
-		rmnet_map_gso_stamp(skbn, pkt_len, pkt_count);
+	if (coal_meta->pkt_count > 1)
+		rmnet_map_gso_stamp(skbn, coal_meta);
 
-	return skbn;
-}
+	__skb_queue_tail(list, skbn);
 
-/* Create a new TCP SKB from the coalesced SKB. Appropriate IP and TCP headers
- * will be added.
- */
-static struct sk_buff *rmnet_map_segment_tcp_skb(struct sk_buff *coal_skb,
-						 u32 start,
-						 int start_pkt_num,
-						 u16 pkt_len, u8 pkt_count)
-{
-	struct sk_buff *skbn;
-	struct iphdr *iph = (struct iphdr *)rmnet_map_data_ptr(coal_skb);
-	struct rmnet_priv *priv = netdev_priv(coal_skb->dev);
-	struct tcphdr *th;
-	u32 alloc_len;
-	u16 ip_len, tcp_len;
-
-	if (iph->version == 4) {
-		ip_len = iph->ihl * 4;
-	} else if (iph->version == 6) {
-		ip_len = sizeof(struct ipv6hdr);
-	} else {
-		priv->stats.coal.coal_ip_invalid++;
-		return NULL;
-	}
-
-	th = (struct tcphdr *)(rmnet_map_data_ptr(coal_skb) + ip_len);
-	tcp_len = th->doff * 4;
-
-	/* We can avoid copying the data if the SKB we got from the lower-level
-	 * drivers was nonlinear.
-	 */
-	if (skb_is_nonlinear(coal_skb))
-		alloc_len = ip_len + tcp_len;
-	else
-		alloc_len = ip_len + tcp_len + pkt_len;
-
-	skbn = alloc_skb(alloc_len, GFP_ATOMIC);
-	if (!skbn)
-		return NULL;
-
-	skb_reserve(skbn, ip_len + tcp_len);
-	rmnet_map_nonlinear_copy(coal_skb, ip_len + tcp_len,
-				 start, pkt_len, pkt_count, skbn);
-
-	/* Push TCP header and update sequence number */
-	skb_push(skbn, tcp_len);
-	memcpy(skbn->data, th, tcp_len);
-	skb_reset_transport_header(skbn);
-	th = tcp_hdr(skbn);
-	th->seq = htonl(ntohl(th->seq) + start);
-
-	/* Push IP header and update necessary fields */
-	skb_push(skbn, ip_len);
-	memcpy(skbn->data, iph, ip_len);
-	skb_reset_network_header(skbn);
-	if (iph->version == 4) {
-		iph = ip_hdr(skbn);
-		iph->id = htons(ntohs(iph->id) + start_pkt_num);
-		iph->tot_len = htons(skbn->len);
-		iph->check = 0;
-		iph->check = ip_fast_csum(iph, iph->ihl);
-	} else {
-		ipv6_hdr(skbn)->payload_len = htons(skbn->len - ip_len);
-	}
-
-	skbn->ip_summed = CHECKSUM_UNNECESSARY;
-	skbn->dev = coal_skb->dev;
-	priv->stats.coal.coal_reconstruct++;
-
-	/* Stamp GSO information if necessary */
-	if (pkt_count > 1)
-		rmnet_map_gso_stamp(skbn, pkt_len, pkt_count);
-
-	return skbn;
+	/* Update meta information to move past the data we just segmented */
+	coal_meta->data_offset += coal_meta->data_len * coal_meta->pkt_count;
+	coal_meta->pkt_id = pkt_id + 1;
+	coal_meta->pkt_count = 0;
 }
 
 /* Converts the coalesced SKB into a list of SKBs.
@@ -760,25 +739,21 @@
  * The original coalesced SKB should be treated as invalid and
  * must be freed by the caller
  */
-static void rmnet_map_segment_coal_data(struct sk_buff *coal_skb,
-					u64 nlo_err_mask,
-					struct sk_buff_head *list)
+static void rmnet_map_segment_coal_skb(struct sk_buff *coal_skb,
+				       u64 nlo_err_mask,
+				       struct sk_buff_head *list)
 {
-	struct sk_buff *new_skb;
-	struct sk_buff *(*segment)(struct sk_buff *coal_skb,
-				   u32 start,
-				   int start_pkt_num,
-				   u16 pkt_len, u8 pkt_count);
 	struct iphdr *iph;
 	struct rmnet_priv *priv = netdev_priv(coal_skb->dev);
 	struct rmnet_map_v5_coal_header *coal_hdr;
-	u32 start = 0;
-	u16 pkt_len, ip_len, trans_len;
-	u8 protocol, start_pkt_num = 0;
+	struct rmnet_map_coal_metadata coal_meta;
+	u16 pkt_len;
 	u8 pkt, total_pkt = 0;
-	u8 nlo, gro_count = 0;
+	u8 nlo;
 	bool gro = coal_skb->dev->features & NETIF_F_GRO_HW;
 
+	memset(&coal_meta, 0, sizeof(coal_meta));
+
 	/* Pull off the headers we no longer need */
 	pskb_pull(coal_skb, sizeof(struct rmnet_map_header));
 	coal_hdr = (struct rmnet_map_v5_coal_header *)
@@ -788,27 +763,33 @@
 	iph = (struct iphdr *)rmnet_map_data_ptr(coal_skb);
 
 	if (iph->version == 4) {
-		protocol = iph->protocol;
-		ip_len = iph->ihl * 4;
+		coal_meta.ip_proto = 4;
+		coal_meta.ip_len = iph->ihl * 4;
+		coal_meta.trans_proto = iph->protocol;
+		coal_meta.ip_header = iph;
 
 		/* Don't allow coalescing of any packets with IP options */
 		if (iph->ihl != 5)
 			gro = false;
 	} else if (iph->version == 6) {
+		struct ipv6hdr *ip6h = (struct ipv6hdr *)iph;
 		__be16 frag_off;
+		u8 protocol = ip6h->nexthdr;
 
-		protocol = ((struct ipv6hdr *)iph)->nexthdr;
-		ip_len = ipv6_skip_exthdr(coal_skb, sizeof(struct ipv6hdr),
-					  &protocol, &frag_off);
+		coal_meta.ip_proto = 6;
+		coal_meta.ip_len = ipv6_skip_exthdr(coal_skb, sizeof(*ip6h),
+						    &protocol, &frag_off);
+		coal_meta.trans_proto = protocol;
+		coal_meta.ip_header = ip6h;
 
 		/* If we run into a problem, or this has a fragment header
 		 * (which should technically not be possible, if the HW
 		 * works as intended...), bail.
 		 */
-		if (ip_len < 0 || frag_off) {
+		if (coal_meta.ip_len < 0 || frag_off) {
 			priv->stats.coal.coal_ip_invalid++;
 			return;
-		} else if (ip_len > sizeof(struct ipv6hdr)) {
+		} else if (coal_meta.ip_len > sizeof(*ip6h)) {
 			/* Don't allow coalescing of any packets with IPv6
 			 * extension headers.
 			 */
@@ -819,14 +800,18 @@
 		return;
 	}
 
-	if (protocol == IPPROTO_TCP) {
-		struct tcphdr *th = (struct tcphdr *)
-				    ((unsigned char *)iph + ip_len);
-		trans_len = th->doff * 4;
-		segment = rmnet_map_segment_tcp_skb;
-	} else if (protocol == IPPROTO_UDP) {
-		trans_len = sizeof(struct udphdr);
-		segment = rmnet_map_segment_udp_skb;
+	if (coal_meta.trans_proto == IPPROTO_TCP) {
+		struct tcphdr *th;
+
+		th = (struct tcphdr *)((u8 *)iph + coal_meta.ip_len);
+		coal_meta.trans_len = th->doff * 4;
+		coal_meta.trans_header = th;
+	} else if (coal_meta.trans_proto == IPPROTO_UDP) {
+		struct udphdr *uh;
+
+		uh = (struct udphdr *)((u8 *)iph + coal_meta.ip_len);
+		coal_meta.trans_len = sizeof(*uh);
+		coal_meta.trans_header = uh;
 	} else {
 		priv->stats.coal.coal_trans_invalid++;
 		return;
@@ -834,7 +819,8 @@
 
 	for (nlo = 0; nlo < coal_hdr->num_nlos; nlo++) {
 		pkt_len = ntohs(coal_hdr->nl_pairs[nlo].pkt_len);
-		pkt_len -= ip_len + trans_len;
+		pkt_len -= coal_meta.ip_len + coal_meta.trans_len;
+		coal_meta.data_len = pkt_len;
 		for (pkt = 0; pkt < coal_hdr->nl_pairs[nlo].num_packets;
 		     pkt++, total_pkt++) {
 			nlo_err_mask <<= 1;
@@ -842,40 +828,27 @@
 				priv->stats.coal.coal_csum_err++;
 
 				/* Segment out the good data */
-				if (gro && gro_count) {
-					new_skb = segment(coal_skb, start,
-							  start_pkt_num,
-							  pkt_len, gro_count);
-					if (!new_skb)
-						return;
-
-					__skb_queue_tail(list, new_skb);
-					start += pkt_len * gro_count;
-					gro_count = 0;
+				if (gro && coal_meta.pkt_count) {
+					__rmnet_map_segment_coal_skb(coal_skb,
+								     &coal_meta,
+								     list,
+								     total_pkt);
 				}
 
 				/* skip over bad packet */
-				start += pkt_len;
-				start_pkt_num = total_pkt + 1;
+				coal_meta.data_offset += pkt_len;
+				coal_meta.pkt_id = total_pkt + 1;
 			} else {
-				gro_count++;
+				coal_meta.pkt_count++;
 
 				/* Segment the packet if we aren't sending the
 				 * larger packet up the stack.
 				 */
-				if (!gro) {
-					new_skb = segment(coal_skb, start,
-							  start_pkt_num,
-							  pkt_len, 1);
-					if (!new_skb)
-						return;
-
-					__skb_queue_tail(list, new_skb);
-
-					start += pkt_len;
-					start_pkt_num = total_pkt + 1;
-					gro_count = 0;
-				}
+				if (!gro)
+					__rmnet_map_segment_coal_skb(coal_skb,
+								     &coal_meta,
+								     list,
+								     total_pkt);
 			}
 		}
 
@@ -883,17 +856,26 @@
 		 * the previous one, if we haven't done so. NLOs only switch
 		 * when the packet length changes.
 		 */
-		if (gro && gro_count) {
-			new_skb = segment(coal_skb, start, start_pkt_num,
-					  pkt_len, gro_count);
-			if (!new_skb)
+		if (gro && coal_meta.pkt_count) {
+			/* Fast forward the (hopefully) common case.
+			 * Frames with only one NLO (i.e. one packet length) and
+			 * no checksum errors don't need to be segmented here.
+			 * We can just pass off the original skb.
+			 */
+			if (pkt_len * coal_meta.pkt_count ==
+			    coal_skb->len - coal_meta.ip_len -
+			    coal_meta.trans_len) {
+				rmnet_map_move_headers(coal_skb);
+				coal_skb->ip_summed = CHECKSUM_UNNECESSARY;
+				if (coal_meta.pkt_count > 1)
+					rmnet_map_gso_stamp(coal_skb,
+							    &coal_meta);
+				__skb_queue_tail(list, coal_skb);
 				return;
+			}
 
-			__skb_queue_tail(list, new_skb);
-
-			start += pkt_len * gro_count;
-			start_pkt_num = total_pkt + 1;
-			gro_count = 0;
+			__rmnet_map_segment_coal_skb(coal_skb, &coal_meta, list,
+						     total_pkt);
 		}
 	}
 }
@@ -1015,8 +997,9 @@
 		if (rc)
 			return rc;
 
-		rmnet_map_segment_coal_data(skb, nlo_err_mask, list);
-		consume_skb(skb);
+		rmnet_map_segment_coal_skb(skb, nlo_err_mask, list);
+		if (skb_peek(list) != skb)
+			consume_skb(skb);
 		break;
 	case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD:
 		if (rmnet_map_get_csum_valid(skb)) {
@@ -1026,9 +1009,13 @@
 			priv->stats.csum_valid_unset++;
 		}
 
+		/* Pull unnecessary headers and move the rest to the linear
+		 * section of the skb.
+		 */
 		pskb_pull(skb,
 			  (sizeof(struct rmnet_map_header) +
 			   sizeof(struct rmnet_map_v5_csum_header)));
+		rmnet_map_move_headers(skb);
 
 		/* Remove padding only for csum offload packets.
 		 * Coalesced packets should never have padding.
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 94a56fc..a4c9cbf 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -646,8 +646,8 @@
 
 	wil_dbg_misc(wil, "get_station: %pM CID %d MID %d\n", mac, cid,
 		     vif->mid);
-	if (cid < 0)
-		return cid;
+	if (!wil_cid_valid(cid))
+		return -ENOENT;
 
 	rc = wil_cid_fill_sinfo(vif, cid, sinfo);
 
@@ -683,7 +683,7 @@
 	int rc;
 	int cid = wil_find_cid_by_idx(wil, vif->mid, idx);
 
-	if (cid < 0)
+	if (!wil_cid_valid(cid))
 		return -ENOENT;
 
 	ether_addr_copy(mac, wil->sta[cid].addr);
@@ -1455,8 +1455,14 @@
 	rc = wmi_send(wil, WMI_CONNECT_CMDID, vif->mid, &conn, sizeof(conn));
 	if (rc == 0) {
 		netif_carrier_on(ndev);
-		if (!wil_has_other_active_ifaces(wil, ndev, false, true))
-			wil6210_bus_request(wil, WIL_MAX_BUS_REQUEST_KBPS);
+		if (!wil_has_other_active_ifaces(wil, ndev, false, true)) {
+			if (wil->force_edmg_channel)
+				wil6210_bus_request(wil,
+						    WIL_11AY_BUS_REQUEST_KBPS);
+			else
+				wil6210_bus_request(wil,
+						    WIL_11AD_BUS_REQUEST_KBPS);
+		}
 		vif->bss = bss;
 		/* Connect can take lots of time */
 		mod_timer(&vif->connect_timer,
@@ -2099,8 +2105,13 @@
 	}
 
 	netif_carrier_on(ndev);
-	if (!wil_has_other_active_ifaces(wil, ndev, false, true))
-		wil6210_bus_request(wil, WIL_MAX_BUS_REQUEST_KBPS);
+	if (!wil_has_other_active_ifaces(wil, ndev, false, true)) {
+		if (wil->force_edmg_channel)
+			wil6210_bus_request(wil, WIL_11AY_BUS_REQUEST_KBPS);
+		else
+			wil6210_bus_request(wil, WIL_11AD_BUS_REQUEST_KBPS);
+	}
+
 
 	rc = wmi_pcp_start(vif, bi, wmi_nettype, chan, hidden_ssid, is_go);
 	if (rc)
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index bd879af..7ba793b 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -342,7 +342,7 @@
 		wil_dbg_misc(wil,
 			     "Disconnect complete %pM, CID=%d, reason=%d\n",
 			     bssid, cid, reason_code);
-		if (cid >= 0) /* disconnect 1 peer */
+		if (wil_cid_valid(cid)) /* disconnect 1 peer */
 			wil_disconnect_cid_complete(vif, cid, reason_code);
 	} else { /* all */
 		wil_dbg_misc(wil, "Disconnect complete all\n");
@@ -456,7 +456,7 @@
 		cid = wil_find_cid(wil, vif->mid, bssid);
 		wil_dbg_misc(wil, "Disconnect %pM, CID=%d, reason=%d\n",
 			     bssid, cid, reason_code);
-		if (cid >= 0) /* disconnect 1 peer */
+		if (wil_cid_valid(cid)) /* disconnect 1 peer */
 			wil_disconnect_cid(vif, cid, reason_code);
 	} else { /* all */
 		wil_dbg_misc(wil, "Disconnect all\n");
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index e24f9b1..daf3717 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -52,6 +52,8 @@
 
 #define WIL_DEFAULT_BUS_REQUEST_KBPS 128000 /* ~1Gbps */
 #define WIL_MAX_BUS_REQUEST_KBPS 800000 /* ~6.1Gbps */
+#define WIL_11AD_BUS_REQUEST_KBPS 600000 /* ~4.6Gbps */
+#define WIL_11AY_BUS_REQUEST_KBPS 1300000 /* ~10.1Gbps */
 
 #define WIL_NUM_LATENCY_BINS 200
 
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index cc93d6f..464371dd 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -1998,6 +1998,25 @@
 			       CNSS_REASON_TIMEOUT);
 }
 
+static int cnss_pci_smmu_fault_handler(struct iommu_domain *domain,
+				       struct device *dev, unsigned long iova,
+				       int flags, void *handler_token)
+{
+	struct cnss_pci_data *pci_priv = handler_token;
+
+	cnss_pr_err("SMMU fault happened with IOVA 0x%lx\n", iova);
+
+	if (!pci_priv) {
+		cnss_pr_err("pci_priv is NULL\n");
+		return -ENODEV;
+	}
+
+	cnss_force_fw_assert(&pci_priv->pci_dev->dev);
+
+	/* IOMMU driver requires non-zero return value to print debug info. */
+	return -EINVAL;
+}
+
 static int cnss_pci_init_smmu(struct cnss_pci_data *pci_priv)
 {
 	struct pci_dev *pci_dev = pci_priv->pci_dev;
@@ -2021,6 +2040,8 @@
 	if (!ret && !strcmp("fastmap", iommu_dma_type)) {
 		cnss_pr_dbg("Enabling SMMU S1 stage\n");
 		pci_priv->smmu_s1_enable = true;
+		iommu_set_fault_handler(pci_priv->iommu_domain,
+					cnss_pci_smmu_fault_handler, pci_priv);
 	}
 
 	ret = of_property_read_u32_array(of_node,  "qcom,iommu-dma-addr-pool",
diff --git a/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c b/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c
index dc94aca..45d6c86 100644
--- a/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c
+++ b/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c
@@ -44,59 +44,27 @@
 	{0, 8  * 1024, NULL},
 	{0, 8  * 1024, NULL},
 	{0, 8  * 1024, NULL},
+	{0, 8  * 1024, NULL},
+	{0, 8  * 1024, NULL},
+	{0, 8  * 1024, NULL},
+	{0, 8  * 1024, NULL},
+	{0, 8  * 1024, NULL},
+	{0, 8  * 1024, NULL},
+	{0, 8  * 1024, NULL},
+	{0, 8  * 1024, NULL},
+	{0, 8  * 1024, NULL},
+	{0, 8  * 1024, NULL},
+	{0, 8  * 1024, NULL},
+	{0, 8  * 1024, NULL},
+	{0, 8  * 1024, NULL},
+	{0, 8  * 1024, NULL},
+	{0, 8  * 1024, NULL},
 	{0, 16 * 1024, NULL},
 	{0, 16 * 1024, NULL},
 	{0, 16 * 1024, NULL},
 	{0, 16 * 1024, NULL},
 	{0, 16 * 1024, NULL},
 	{0, 16 * 1024, NULL},
-	{0, 16 * 1024, NULL},
-	{0, 16 * 1024, NULL},
-	{0, 16 * 1024, NULL},
-	{0, 16 * 1024, NULL},
-	{0, 16 * 1024, NULL},
-	{0, 16 * 1024, NULL},
-	{0, 16 * 1024, NULL},
-	{0, 16 * 1024, NULL},
-	{0, 16 * 1024, NULL},
-	{0, 16 * 1024, NULL},
-	{0, 16 * 1024, NULL},
-	{0, 16 * 1024, NULL},
-	{0, 16 * 1024, NULL},
-	{0, 16 * 1024, NULL},
-	{0, 16 * 1024, NULL},
-	{0, 16 * 1024, NULL},
-	{0, 16 * 1024, NULL},
-	{0, 16 * 1024, NULL},
-	{0, 16 * 1024, NULL},
-	{0, 16 * 1024, NULL},
-	{0, 16 * 1024, NULL},
-	{0, 16 * 1024, NULL},
-	{0, 16 * 1024, NULL},
-	{0, 16 * 1024, NULL},
-	{0, 16 * 1024, NULL},
-	{0, 16 * 1024, NULL},
-	{0, 16 * 1024, NULL},
-	{0, 16 * 1024, NULL},
-	{0, 16 * 1024, NULL},
-	{0, 16 * 1024, NULL},
-	{0, 16 * 1024, NULL},
-	{0, 16 * 1024, NULL},
-	{0, 16 * 1024, NULL},
-	{0, 16 * 1024, NULL},
-	{0, 16 * 1024, NULL},
-	{0, 16 * 1024, NULL},
-	{0, 32 * 1024, NULL},
-	{0, 32 * 1024, NULL},
-	{0, 32 * 1024, NULL},
-	{0, 32 * 1024, NULL},
-	{0, 32 * 1024, NULL},
-	{0, 32 * 1024, NULL},
-	{0, 32 * 1024, NULL},
-	{0, 32 * 1024, NULL},
-	{0, 32 * 1024, NULL},
-	{0, 32 * 1024, NULL},
-	{0, 32 * 1024, NULL},
 	{0, 32 * 1024, NULL},
 	{0, 32 * 1024, NULL},
 	{0, 32 * 1024, NULL},
@@ -105,10 +73,6 @@
 	{0, 32 * 1024, NULL},
 	{0, 64 * 1024, NULL},
 	{0, 64 * 1024, NULL},
-	{0, 64 * 1024, NULL},
-	{0, 64 * 1024, NULL},
-	{0, 128 * 1024, NULL},
-	{0, 128 * 1024, NULL},
 };
 
 int wcnss_prealloc_init(void)
diff --git a/drivers/phy/qualcomm/Makefile b/drivers/phy/qualcomm/Makefile
index bdd1fb0..62b15b1 100644
--- a/drivers/phy/qualcomm/Makefile
+++ b/drivers/phy/qualcomm/Makefile
@@ -7,5 +7,6 @@
 obj-$(CONFIG_PHY_QCOM_UFS)		+= phy-qcom-ufs.o
 obj-$(CONFIG_PHY_QCOM_UFS)		+= phy-qcom-ufs-qrbtc-sdm845.o
 obj-$(CONFIG_PHY_QCOM_UFS) 		+= phy-qcom-ufs-qmp-v4.o
+obj-$(CONFIG_PHY_QCOM_UFS) 		+= phy-qcom-ufs-qmp-v4-lito.o
 obj-$(CONFIG_PHY_QCOM_USB_HS) 		+= phy-qcom-usb-hs.o
 obj-$(CONFIG_PHY_QCOM_USB_HSIC) 	+= phy-qcom-usb-hsic.o
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4-lito.c b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4-lito.c
new file mode 100644
index 0000000..b8cb819
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4-lito.c
@@ -0,0 +1,248 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "phy-qcom-ufs-qmp-v4-lito.h"
+
+#define UFS_PHY_NAME "ufs_phy_qmp_v4_lito"
+
+static
+int ufs_qcom_phy_qmp_v4_lito_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
+					bool is_rate_B)
+{
+
+	writel_relaxed(0x01, ufs_qcom_phy->mmio + UFS_PHY_SW_RESET);
+	/* Ensure PHY is in reset before writing PHY calibration data */
+	wmb();
+	/*
+	 * Writing PHY calibration in this order:
+	 * 1. Write Rate-A calibration first (1-lane mode).
+	 * 2. Write 2nd lane configuration if needed.
+	 * 3. Write Rate-B calibration overrides
+	 */
+	ufs_qcom_phy_write_tbl(ufs_qcom_phy, phy_cal_table_rate_A_no_g4,
+			       ARRAY_SIZE(phy_cal_table_rate_A_no_g4));
+	if (ufs_qcom_phy->lanes_per_direction == 2)
+		ufs_qcom_phy_write_tbl(ufs_qcom_phy,
+			      phy_cal_table_2nd_lane_no_g4,
+			      ARRAY_SIZE(phy_cal_table_2nd_lane_no_g4));
+	if (is_rate_B)
+		ufs_qcom_phy_write_tbl(ufs_qcom_phy, phy_cal_table_rate_B,
+				       ARRAY_SIZE(phy_cal_table_rate_B));
+
+	writel_relaxed(0x00, ufs_qcom_phy->mmio + UFS_PHY_SW_RESET);
+	/* flush buffered writes */
+	wmb();
+
+	return 0;
+}
+
+static int ufs_qcom_phy_qmp_v4_lito_init(struct phy *generic_phy)
+{
+	struct ufs_qcom_phy_qmp_v4_lito *phy = phy_get_drvdata(generic_phy);
+	struct ufs_qcom_phy *phy_common = &phy->common_cfg;
+	int err;
+
+	err = ufs_qcom_phy_init_clks(phy_common);
+	if (err) {
+		dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_clks() failed %d\n",
+			__func__, err);
+		goto out;
+	}
+
+	err = ufs_qcom_phy_init_vregulators(phy_common);
+	if (err) {
+		dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_vregulators() failed %d\n",
+			__func__, err);
+		goto out;
+	}
+
+out:
+	return err;
+}
+
+static int ufs_qcom_phy_qmp_v4_lito_exit(struct phy *generic_phy)
+{
+	return 0;
+}
+
+static
+void ufs_qcom_phy_qmp_v4_lito_power_control(struct ufs_qcom_phy *phy,
+					 bool power_ctrl)
+{
+	if (!power_ctrl) {
+		/* apply analog power collapse */
+		writel_relaxed(0x0, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
+		/*
+		 * Make sure that PHY knows its analog rail is going to be
+		 * powered OFF.
+		 */
+		mb();
+	} else {
+		/* bring PHY out of analog power collapse */
+		writel_relaxed(0x1, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
+
+		/*
+		 * Before any transactions involving PHY, ensure PHY knows
+		 * that it's analog rail is powered ON.
+		 */
+		mb();
+	}
+}
+
+static inline
+void ufs_qcom_phy_qmp_v4_lito_set_tx_lane_enable(struct ufs_qcom_phy *phy,
+			u32 val)
+{
+	/*
+	 * v4 PHY does not have TX_LANE_ENABLE register.
+	 * Implement this function so as not to propagate error to caller.
+	 */
+}
+
+static
+void ufs_qcom_phy_qmp_v4_lito_ctrl_rx_linecfg(struct ufs_qcom_phy *phy,
+			bool ctrl)
+{
+	u32 temp;
+
+	temp = readl_relaxed(phy->mmio + UFS_PHY_LINECFG_DISABLE);
+
+	if (ctrl) /* enable RX LineCfg */
+		temp &= ~UFS_PHY_RX_LINECFG_DISABLE_BIT;
+	else /* disable RX LineCfg */
+		temp |= UFS_PHY_RX_LINECFG_DISABLE_BIT;
+
+	writel_relaxed(temp, phy->mmio + UFS_PHY_LINECFG_DISABLE);
+	/* make sure that RX LineCfg config applied before we return */
+	mb();
+}
+
+static inline
+void ufs_qcom_phy_qmp_v4_lito_start_serdes(struct ufs_qcom_phy *phy)
+{
+	u32 tmp;
+
+	tmp = readl_relaxed(phy->mmio + UFS_PHY_PHY_START);
+	tmp &= ~MASK_SERDES_START;
+	tmp |= (1 << OFFSET_SERDES_START);
+	writel_relaxed(tmp, phy->mmio + UFS_PHY_PHY_START);
+	/* Ensure register value is committed */
+	mb();
+}
+
+static
+int ufs_qcom_phy_qmp_v4_lito_is_pcs_ready(struct ufs_qcom_phy *phy_common)
+{
+	int err = 0;
+	u32 val;
+
+	err = readl_poll_timeout(phy_common->mmio + UFS_PHY_PCS_READY_STATUS,
+		val, (val & MASK_PCS_READY), 10, 1000000);
+	if (err) {
+		dev_err(phy_common->dev, "%s: poll for pcs failed err = %d\n",
+			__func__, err);
+		goto out;
+	}
+
+out:
+	return err;
+}
+
+static
+void ufs_qcom_phy_qmp_v4_lito_dbg_register_dump(struct ufs_qcom_phy *phy)
+{
+	ufs_qcom_phy_dump_regs(phy, COM_BASE, COM_SIZE,
+					"PHY QSERDES COM Registers ");
+	ufs_qcom_phy_dump_regs(phy, PHY_BASE, PHY_SIZE,
+					"PHY Registers ");
+	ufs_qcom_phy_dump_regs(phy, RX_BASE(0), RX_SIZE,
+					"PHY RX0 Registers ");
+	ufs_qcom_phy_dump_regs(phy, TX_BASE(0), TX_SIZE,
+					"PHY TX0 Registers ");
+	ufs_qcom_phy_dump_regs(phy, RX_BASE(1), RX_SIZE,
+					"PHY RX1 Registers ");
+	ufs_qcom_phy_dump_regs(phy, TX_BASE(1), TX_SIZE,
+					"PHY TX1 Registers ");
+}
+
+struct phy_ops ufs_qcom_phy_qmp_v4_lito_phy_ops = {
+	.init		= ufs_qcom_phy_qmp_v4_lito_init,
+	.exit		= ufs_qcom_phy_qmp_v4_lito_exit,
+	.power_on	= ufs_qcom_phy_power_on,
+	.power_off	= ufs_qcom_phy_power_off,
+	.owner		= THIS_MODULE,
+};
+
+struct ufs_qcom_phy_specific_ops phy_v4_lito_ops = {
+	.calibrate_phy		= ufs_qcom_phy_qmp_v4_lito_phy_calibrate,
+	.start_serdes		= ufs_qcom_phy_qmp_v4_lito_start_serdes,
+	.is_physical_coding_sublayer_ready =
+		ufs_qcom_phy_qmp_v4_lito_is_pcs_ready,
+	.set_tx_lane_enable	= ufs_qcom_phy_qmp_v4_lito_set_tx_lane_enable,
+	.ctrl_rx_linecfg	= ufs_qcom_phy_qmp_v4_lito_ctrl_rx_linecfg,
+	.power_control		= ufs_qcom_phy_qmp_v4_lito_power_control,
+	.dbg_register_dump	= ufs_qcom_phy_qmp_v4_lito_dbg_register_dump,
+};
+
+static int ufs_qcom_phy_qmp_v4_lito_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct phy *generic_phy;
+	struct ufs_qcom_phy_qmp_v4_lito *phy;
+	int err = 0;
+
+	phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+	if (!phy) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	generic_phy = ufs_qcom_phy_generic_probe(pdev, &phy->common_cfg,
+				&ufs_qcom_phy_qmp_v4_lito_phy_ops,
+				&phy_v4_lito_ops);
+
+	if (!generic_phy) {
+		dev_err(dev, "%s: ufs_qcom_phy_generic_probe() failed\n",
+			__func__);
+		err = -EIO;
+		goto out;
+	}
+
+	phy_set_drvdata(generic_phy, phy);
+
+	strlcpy(phy->common_cfg.name, UFS_PHY_NAME,
+		sizeof(phy->common_cfg.name));
+
+out:
+	return err;
+}
+
+static const struct of_device_id ufs_qcom_phy_qmp_v4_lito_of_match[] = {
+	{.compatible = "qcom,ufs-phy-qmp-v4-lito"},
+	{},
+};
+MODULE_DEVICE_TABLE(of, ufs_qcom_phy_qmp_v4_lito_of_match);
+
+static struct platform_driver ufs_qcom_phy_qmp_v4_lito_driver = {
+	.probe = ufs_qcom_phy_qmp_v4_lito_probe,
+	.driver = {
+		.of_match_table = ufs_qcom_phy_qmp_v4_lito_of_match,
+		.name = "ufs_qcom_phy_qmp_v4_lito",
+	},
+};
+
+module_platform_driver(ufs_qcom_phy_qmp_v4_lito_driver);
+
+MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY QMP v4 LITO");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4-lito.h b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4-lito.h
new file mode 100644
index 0000000..88253ff
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4-lito.h
@@ -0,0 +1,324 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef UFS_QCOM_PHY_QMP_V4_LITO_H_
+#define UFS_QCOM_PHY_QMP_V4_LITO_H_
+
+#include "phy-qcom-ufs-i.h"
+
+/* QCOM UFS PHY control registers */
+#define COM_BASE	0x000
+#define COM_SIZE	0x1C4
+#define PHY_BASE	0xC00
+#define PHY_SIZE	0x200
+#define TX_BASE(n)	(0x400 + (0x400 * n))
+#define TX_SIZE		0x178
+#define RX_BASE(n)	(0x600 + (0x400 * n))
+#define RX_SIZE		0x200
+#define COM_OFF(x)	(COM_BASE + x)
+#define PHY_OFF(x)	(PHY_BASE + x)
+#define TX_OFF(n, x)	(TX_BASE(n) + x)
+#define RX_OFF(n, x)	(RX_BASE(n) + x)
+
+/* UFS PHY QSERDES COM registers */
+#define QSERDES_COM_SYSCLK_EN_SEL		COM_OFF(0x94)
+#define QSERDES_COM_HSCLK_SEL			COM_OFF(0x158)
+#define QSERDES_COM_HSCLK_HS_SWITCH_SEL		COM_OFF(0x15C)
+#define QSERDES_COM_LOCK_CMP_EN			COM_OFF(0xA4)
+#define QSERDES_COM_VCO_TUNE_MAP		COM_OFF(0x10C)
+#define QSERDES_COM_PLL_IVCO			COM_OFF(0x58)
+#define QSERDES_COM_VCO_TUNE_INITVAL2		COM_OFF(0x124)
+#define QSERDES_COM_BIN_VCOCAL_HSCLK_SEL	COM_OFF(0x1BC)
+#define QSERDES_COM_DEC_START_MODE0		COM_OFF(0xBC)
+#define QSERDES_COM_CP_CTRL_MODE0		COM_OFF(0x74)
+#define QSERDES_COM_PLL_RCTRL_MODE0		COM_OFF(0x7C)
+#define QSERDES_COM_PLL_CCTRL_MODE0		COM_OFF(0x84)
+#define QSERDES_COM_LOCK_CMP1_MODE0		COM_OFF(0xAC)
+#define QSERDES_COM_LOCK_CMP2_MODE0		COM_OFF(0xB0)
+#define QSERDES_COM_BIN_VCOCAL_CMP_CODE1_MODE0	COM_OFF(0x1AC)
+#define QSERDES_COM_BIN_VCOCAL_CMP_CODE2_MODE0	COM_OFF(0x1B0)
+#define QSERDES_COM_DEC_START_MODE1		COM_OFF(0xC4)
+#define QSERDES_COM_CP_CTRL_MODE1		COM_OFF(0x78)
+#define QSERDES_COM_PLL_RCTRL_MODE1		COM_OFF(0x80)
+#define QSERDES_COM_PLL_CCTRL_MODE1		COM_OFF(0x88)
+#define QSERDES_COM_LOCK_CMP1_MODE1		COM_OFF(0xB4)
+#define QSERDES_COM_LOCK_CMP2_MODE1		COM_OFF(0xB8)
+#define QSERDES_COM_BIN_VCOCAL_CMP_CODE1_MODE1	COM_OFF(0x1B4)
+#define QSERDES_COM_BIN_VCOCAL_CMP_CODE2_MODE1	COM_OFF(0x1B8)
+#define QSERDES_COM_CMN_IPTRIM			COM_OFF(0x60)
+
+/* UFS PHY registers */
+#define UFS_PHY_PHY_START			PHY_OFF(0x00)
+#define UFS_PHY_POWER_DOWN_CONTROL		PHY_OFF(0x04)
+#define UFS_PHY_SW_RESET			PHY_OFF(0x08)
+#define UFS_PHY_PCS_READY_STATUS		PHY_OFF(0x180)
+#define UFS_PHY_LINECFG_DISABLE			PHY_OFF(0x148)
+#define UFS_PHY_MULTI_LANE_CTRL1		PHY_OFF(0x1E0)
+#define UFS_PHY_RX_SIGDET_CTRL2			PHY_OFF(0x158)
+#define UFS_PHY_TX_LARGE_AMP_DRV_LVL		PHY_OFF(0x30)
+#define UFS_PHY_TX_SMALL_AMP_DRV_LVL		PHY_OFF(0x38)
+#define UFS_PHY_TX_MID_TERM_CTRL1		PHY_OFF(0x1D8)
+#define UFS_PHY_DEBUG_BUS_CLKSEL		PHY_OFF(0x124)
+#define UFS_PHY_PLL_CNTL			PHY_OFF(0x2C)
+#define UFS_PHY_TIMER_20US_CORECLK_STEPS_MSB	PHY_OFF(0x0C)
+#define UFS_PHY_TIMER_20US_CORECLK_STEPS_LSB	PHY_OFF(0x10)
+#define UFS_PHY_TX_PWM_GEAR_BAND		PHY_OFF(0x160)
+#define UFS_PHY_TX_HS_GEAR_BAND			PHY_OFF(0x168)
+#define UFS_PHY_TX_HSGEAR_CAPABILITY		PHY_OFF(0x74)
+#define UFS_PHY_RX_HSGEAR_CAPABILITY		PHY_OFF(0xB4)
+#define UFS_PHY_RX_MIN_HIBERN8_TIME		PHY_OFF(0x150)
+#define UFS_PHY_BIST_FIXED_PAT_CTRL		PHY_OFF(0x60)
+
+/* UFS PHY TX registers */
+#define QSERDES_TX0_PWM_GEAR_1_DIVIDER_BAND0_1	TX_OFF(0, 0x168)
+#define QSERDES_TX0_PWM_GEAR_2_DIVIDER_BAND0_1	TX_OFF(0, 0x16C)
+#define QSERDES_TX0_PWM_GEAR_3_DIVIDER_BAND0_1	TX_OFF(0, 0x170)
+#define QSERDES_TX0_PWM_GEAR_4_DIVIDER_BAND0_1	TX_OFF(0, 0x174)
+#define QSERDES_TX0_LANE_MODE_1			TX_OFF(0, 0x84)
+#define QSERDES_TX0_TRAN_DRVR_EMP_EN		TX_OFF(0, 0xC0)
+
+#define QSERDES_TX1_PWM_GEAR_1_DIVIDER_BAND0_1	TX_OFF(1, 0x168)
+#define QSERDES_TX1_PWM_GEAR_2_DIVIDER_BAND0_1	TX_OFF(1, 0x16C)
+#define QSERDES_TX1_PWM_GEAR_3_DIVIDER_BAND0_1	TX_OFF(1, 0x170)
+#define QSERDES_TX1_PWM_GEAR_4_DIVIDER_BAND0_1	TX_OFF(1, 0x174)
+#define QSERDES_TX1_LANE_MODE_1			TX_OFF(1, 0x84)
+#define QSERDES_TX1_TRAN_DRVR_EMP_EN		TX_OFF(1, 0xC0)
+
+/* UFS PHY RX registers */
+#define QSERDES_RX0_SIGDET_LVL				RX_OFF(0, 0x120)
+#define QSERDES_RX0_SIGDET_CNTRL			RX_OFF(0, 0x11C)
+#define QSERDES_RX0_SIGDET_DEGLITCH_CNTRL		RX_OFF(0, 0x124)
+#define QSERDES_RX0_RX_BAND				RX_OFF(0, 0x128)
+#define QSERDES_RX0_UCDR_FASTLOCK_FO_GAIN		RX_OFF(0, 0x30)
+#define QSERDES_RX0_UCDR_SO_SATURATION_AND_ENABLE	RX_OFF(0, 0x34)
+#define QSERDES_RX0_UCDR_PI_CONTROLS			RX_OFF(0, 0x44)
+#define QSERDES_RX0_UCDR_FASTLOCK_COUNT_LOW		RX_OFF(0, 0x3C)
+#define QSERDES_RX0_UCDR_PI_CTRL2			RX_OFF(0, 0x48)
+#define QSERDES_RX0_RX_TERM_BW				RX_OFF(0, 0x80)
+#define QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL1		RX_OFF(0, 0xE8)
+#define QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL2		RX_OFF(0, 0xEC)
+#define QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL3		RX_OFF(0, 0xF0)
+#define QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL4		RX_OFF(0, 0xF4)
+#define QSERDES_RX0_RX_EQ_OFFSET_ADAPTOR_CNTRL1		RX_OFF(0, 0x110)
+#define QSERDES_RX0_RX_OFFSET_ADAPTOR_CNTRL2		RX_OFF(0, 0x114)
+#define QSERDES_RX0_RX_IDAC_MEASURE_TIME		RX_OFF(0, 0x100)
+#define QSERDES_RX0_RX_IDAC_TSETTLE_LOW			RX_OFF(0, 0xF8)
+#define QSERDES_RX0_RX_IDAC_TSETTLE_HIGH		RX_OFF(0, 0xFC)
+#define QSERDES_RX0_RX_MODE_00_LOW			RX_OFF(0, 0x15C)
+#define QSERDES_RX0_RX_MODE_00_HIGH			RX_OFF(0, 0x160)
+#define QSERDES_RX0_RX_MODE_00_HIGH2			RX_OFF(0, 0x164)
+#define QSERDES_RX0_RX_MODE_00_HIGH3			RX_OFF(0, 0x168)
+#define QSERDES_RX0_RX_MODE_00_HIGH4			RX_OFF(0, 0x16C)
+#define QSERDES_RX0_RX_MODE_01_LOW			RX_OFF(0, 0x170)
+#define QSERDES_RX0_RX_MODE_01_HIGH			RX_OFF(0, 0x174)
+#define QSERDES_RX0_RX_MODE_01_HIGH2			RX_OFF(0, 0x178)
+#define QSERDES_RX0_RX_MODE_01_HIGH3			RX_OFF(0, 0x17C)
+#define QSERDES_RX0_RX_MODE_01_HIGH4			RX_OFF(0, 0x180)
+#define QSERDES_RX0_RX_MODE_10_LOW			RX_OFF(0, 0x184)
+#define QSERDES_RX0_RX_MODE_10_HIGH			RX_OFF(0, 0x188)
+#define QSERDES_RX0_RX_MODE_10_HIGH2			RX_OFF(0, 0x18C)
+#define QSERDES_RX0_RX_MODE_10_HIGH3			RX_OFF(0, 0x190)
+#define QSERDES_RX0_RX_MODE_10_HIGH4			RX_OFF(0, 0x194)
+#define QSERDES_RX0_DCC_CTRL1				RX_OFF(0, 0x1A8)
+#define QSERDES_RX0_GM_CAL				RX_OFF(0, 0xDC)
+#define QSERDES_RX0_AC_JTAG_ENABLE			RX_OFF(0, 0x68)
+#define QSERDES_RX0_UCDR_FO_GAIN			RX_OFF(0, 0x08)
+#define QSERDES_RX0_UCDR_SO_GAIN			RX_OFF(0, 0x14)
+
+#define QSERDES_RX1_SIGDET_LVL				RX_OFF(1, 0x120)
+#define QSERDES_RX1_SIGDET_CNTRL			RX_OFF(1, 0x11C)
+#define QSERDES_RX1_SIGDET_DEGLITCH_CNTRL		RX_OFF(1, 0x124)
+#define QSERDES_RX1_RX_BAND				RX_OFF(1, 0x128)
+#define QSERDES_RX1_UCDR_FASTLOCK_FO_GAIN		RX_OFF(1, 0x30)
+#define QSERDES_RX1_UCDR_SO_SATURATION_AND_ENABLE	RX_OFF(1, 0x34)
+#define QSERDES_RX1_UCDR_PI_CONTROLS			RX_OFF(1, 0x44)
+#define QSERDES_RX1_UCDR_FASTLOCK_COUNT_LOW		RX_OFF(1, 0x3C)
+#define QSERDES_RX1_UCDR_PI_CTRL2			RX_OFF(1, 0x48)
+#define QSERDES_RX1_RX_TERM_BW				RX_OFF(1, 0x80)
+#define QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL1		RX_OFF(1, 0xE8)
+#define QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL2		RX_OFF(1, 0xEC)
+#define QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL3		RX_OFF(1, 0xF0)
+#define QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL4		RX_OFF(1, 0xF4)
+#define QSERDES_RX1_RX_EQ_OFFSET_ADAPTOR_CNTRL1		RX_OFF(1, 0x110)
+#define QSERDES_RX1_RX_OFFSET_ADAPTOR_CNTRL2		RX_OFF(1, 0x114)
+#define QSERDES_RX1_RX_IDAC_MEASURE_TIME		RX_OFF(1, 0x100)
+#define QSERDES_RX1_RX_IDAC_TSETTLE_LOW			RX_OFF(1, 0xF8)
+#define QSERDES_RX1_RX_IDAC_TSETTLE_HIGH		RX_OFF(1, 0xFC)
+#define QSERDES_RX1_RX_MODE_00_LOW			RX_OFF(1, 0x15C)
+#define QSERDES_RX1_RX_MODE_00_HIGH			RX_OFF(1, 0x160)
+#define QSERDES_RX1_RX_MODE_00_HIGH2			RX_OFF(1, 0x164)
+#define QSERDES_RX1_RX_MODE_00_HIGH3			RX_OFF(1, 0x168)
+#define QSERDES_RX1_RX_MODE_00_HIGH4			RX_OFF(1, 0x16C)
+#define QSERDES_RX1_RX_MODE_01_LOW			RX_OFF(1, 0x170)
+#define QSERDES_RX1_RX_MODE_01_HIGH			RX_OFF(1, 0x174)
+#define QSERDES_RX1_RX_MODE_01_HIGH2			RX_OFF(1, 0x178)
+#define QSERDES_RX1_RX_MODE_01_HIGH3			RX_OFF(1, 0x17C)
+#define QSERDES_RX1_RX_MODE_01_HIGH4			RX_OFF(1, 0x180)
+#define QSERDES_RX1_RX_MODE_10_LOW			RX_OFF(1, 0x184)
+#define QSERDES_RX1_RX_MODE_10_HIGH			RX_OFF(1, 0x188)
+#define QSERDES_RX1_RX_MODE_10_HIGH2			RX_OFF(1, 0x18C)
+#define QSERDES_RX1_RX_MODE_10_HIGH3			RX_OFF(1, 0x190)
+#define QSERDES_RX1_RX_MODE_10_HIGH4			RX_OFF(1, 0x194)
+#define QSERDES_RX1_DCC_CTRL1				RX_OFF(1, 0x1A8)
+#define QSERDES_RX1_GM_CAL				RX_OFF(1, 0xDC)
+#define QSERDES_RX1_AC_JTAG_ENABLE			RX_OFF(1, 0x68)
+#define QSERDES_RX1_UCDR_FO_GAIN			RX_OFF(1, 0x08)
+#define QSERDES_RX1_UCDR_SO_GAIN			RX_OFF(1, 0x14)
+
+#define UFS_PHY_RX_LINECFG_DISABLE_BIT		BIT(1)
+
+/*
+ * This structure represents the v4 lito specific phy.
+ * common_cfg MUST remain the first field in this structure
+ * in case extra fields are added. This way, when calling
+ * get_ufs_qcom_phy() of generic phy, we can extract the
+ * common phy structure (struct ufs_qcom_phy) out of it
+ * regardless of the relevant specific phy.
+ */
+struct ufs_qcom_phy_qmp_v4_lito {
+	struct ufs_qcom_phy common_cfg;
+};
+
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_no_g4[] = {
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_POWER_DOWN_CONTROL, 0x01),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL, 0xD9),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_HSCLK_SEL, 0x11),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_HSCLK_HS_SWITCH_SEL, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP_EN, 0x01),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x02),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IVCO, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_INITVAL2, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE0, 0x82),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE0, 0x06),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE0, 0x36),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE0, 0xFF),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE0, 0x0C),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xAC),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1E),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE1, 0x98),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE1, 0x06),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE1, 0x16),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE1, 0x36),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE1, 0x32),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE1, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xDD),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x23),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX0_PWM_GEAR_1_DIVIDER_BAND0_1, 0x06),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX0_PWM_GEAR_2_DIVIDER_BAND0_1, 0x03),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX0_PWM_GEAR_3_DIVIDER_BAND0_1, 0x01),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX0_PWM_GEAR_4_DIVIDER_BAND0_1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX0_LANE_MODE_1, 0x35),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX0_TRAN_DRVR_EMP_EN, 0x0C),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_SIGDET_LVL, 0x24),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_SIGDET_CNTRL, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_SIGDET_DEGLITCH_CNTRL, 0x1E),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_BAND, 0x18),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_FASTLOCK_FO_GAIN, 0x0A),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_SO_SATURATION_AND_ENABLE, 0x5A),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_PI_CONTROLS, 0xF1),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_FASTLOCK_COUNT_LOW, 0x80),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_PI_CTRL2, 0x80),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_FO_GAIN, 0x0E),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_SO_GAIN, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_TERM_BW, 0x1B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL2, 0x06),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL3, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL4, 0x1D),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_OFFSET_ADAPTOR_CNTRL2, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_IDAC_MEASURE_TIME, 0x10),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_IDAC_TSETTLE_LOW, 0xC0),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_IDAC_TSETTLE_HIGH, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_00_LOW, 0x6D),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_00_HIGH, 0x6D),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_00_HIGH2, 0xED),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_00_HIGH3, 0x3B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_00_HIGH4, 0x3C),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_01_LOW, 0xE0),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_01_HIGH, 0xC8),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_01_HIGH2, 0xC8),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_01_HIGH3, 0x3B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_01_HIGH4, 0xB1),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_10_LOW, 0xE0),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_10_HIGH, 0xC8),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_10_HIGH2, 0xC8),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_10_HIGH3, 0x3B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_10_HIGH4, 0xB1),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_DCC_CTRL1, 0x0C),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL2, 0x6D),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_LARGE_AMP_DRV_LVL, 0x0A),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_SMALL_AMP_DRV_LVL, 0x02),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_MID_TERM_CTRL1, 0x43),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_DEBUG_BUS_CLKSEL, 0x1F),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_MIN_HIBERN8_TIME, 0xFF),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_PLL_CNTL, 0x03),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TIMER_20US_CORECLK_STEPS_MSB, 0x16),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TIMER_20US_CORECLK_STEPS_LSB, 0xD8),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_PWM_GEAR_BAND, 0xAA),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_HS_GEAR_BAND, 0x06),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_HSGEAR_CAPABILITY, 0x03),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_HSGEAR_CAPABILITY, 0x03),
+};
+
+static struct ufs_qcom_phy_calibration phy_cal_table_2nd_lane_no_g4[] = {
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX1_PWM_GEAR_1_DIVIDER_BAND0_1, 0x06),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX1_PWM_GEAR_2_DIVIDER_BAND0_1, 0x03),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX1_PWM_GEAR_3_DIVIDER_BAND0_1, 0x01),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX1_PWM_GEAR_4_DIVIDER_BAND0_1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX1_LANE_MODE_1, 0x35),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX1_TRAN_DRVR_EMP_EN, 0x0C),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_SIGDET_LVL, 0x24),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_SIGDET_CNTRL, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_SIGDET_DEGLITCH_CNTRL, 0x1E),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_BAND, 0x18),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_FASTLOCK_FO_GAIN, 0x0A),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_SO_SATURATION_AND_ENABLE, 0x5A),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_PI_CONTROLS, 0xF1),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_FASTLOCK_COUNT_LOW, 0x80),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_PI_CTRL2, 0x80),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_FO_GAIN, 0x0E),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_SO_GAIN, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_TERM_BW, 0x1B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL2, 0x06),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL3, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL4, 0x1D),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_OFFSET_ADAPTOR_CNTRL2, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_IDAC_MEASURE_TIME, 0x10),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_IDAC_TSETTLE_LOW, 0xC0),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_IDAC_TSETTLE_HIGH, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_00_LOW, 0x6D),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_00_HIGH, 0x6D),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_00_HIGH2, 0xED),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_00_HIGH3, 0x3B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_00_HIGH4, 0x3C),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_01_LOW, 0xE0),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_01_HIGH, 0xC8),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_01_HIGH2, 0xC8),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_01_HIGH3, 0x3B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_01_HIGH4, 0xB1),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_10_LOW, 0xE0),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_10_HIGH, 0xC8),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_10_HIGH2, 0xC8),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_10_HIGH3, 0x3B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_10_HIGH4, 0xB1),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_DCC_CTRL1, 0x0C),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_MULTI_LANE_CTRL1, 0x02),
+};
+
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_B[] = {
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x06),
+};
+
+#endif
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 35db9fe..d606dae 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -2486,11 +2486,13 @@
 			continue;
 
 		/* disable statuses for all modem controlled prod pipes */
-		if (IPA_CLIENT_IS_Q6_PROD(client_idx) ||
-			(ipa3_ctx->ep[ep_idx].valid &&
+		if (!IPA_CLIENT_IS_TEST(client_idx) &&
+			(IPA_CLIENT_IS_Q6_PROD(client_idx) ||
+			(IPA_CLIENT_IS_PROD(client_idx) &&
+			ipa3_ctx->ep[ep_idx].valid &&
 			ipa3_ctx->ep[ep_idx].skip_ep_cfg) ||
 			(ipa3_ctx->ep[ep_idx].client == IPA_CLIENT_APPS_WAN_PROD
-			&& ipa3_ctx->modem_cfg_emb_pipe_flt)) {
+			&& ipa3_ctx->modem_cfg_emb_pipe_flt))) {
 			ipa_assert_on(num_descs >= ipa3_ctx->ipa_num_pipes);
 
 			ipa3_ctx->ep[ep_idx].status.status_en = false;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
index 244ab47b..bb3b1a5 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
@@ -218,7 +218,7 @@
 		.hdr_payload_len_inc_padding = true,
 	},
 	.aggr = {
-		.aggr_en = IPA_BYPASS_AGGR, /* temporarily disabled */
+		.aggr_en = IPA_ENABLE_DEAGGR,
 		.aggr = IPA_QCMAP,
 		.aggr_byte_limit = TETH_AGGR_DL_BYTE_LIMIT,
 		.aggr_time_limit = TETH_AGGR_TIME_LIMIT,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index afb8d5f..3dca52b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -1952,7 +1952,6 @@
 	/* replace to right qmap format */
 	aggr_req.aggr_info[1].aggr_type = aggr_enum_type;
 	aggr_req.aggr_info[2].aggr_type = aggr_enum_type;
-	aggr_req.aggr_info[2].pkt_count = 1; /*disable aggregation */
 	aggr_req.aggr_info[3].aggr_type = aggr_enum_type;
 	aggr_req.aggr_info[4].aggr_type = aggr_enum_type;
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index e07c548..bede437 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -1936,32 +1936,32 @@
 
 	/* MHI PRIME PIPES - Client producer / IPA Consumer pipes */
 	[IPA_4_1_APQ][IPA_CLIENT_MHI_PRIME_DPL_PROD] = {
-			true, IPA_v4_0_MHI_GROUP_PCIE,
+			true, IPA_v4_0_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
 			QMB_MASTER_SELECT_DDR,
 			{7, 9, 8, 16, IPA_EE_AP } },
 	[IPA_4_1_APQ][IPA_CLIENT_MHI_PRIME_TETH_PROD] = {
-			true, IPA_v4_0_MHI_GROUP_PCIE,
+			true, IPA_v4_0_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 1, 0, 8, 16, IPA_EE_AP } },
 	[IPA_4_1_APQ][IPA_CLIENT_MHI_PRIME_RMNET_PROD] = {
-			true, IPA_v4_0_MHI_GROUP_PCIE,
+			true, IPA_v4_0_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
 			QMB_MASTER_SELECT_DDR,
 			{ 2, 3, 16, 32, IPA_EE_AP } },
 	/* MHI PRIME PIPES - Client Consumer / IPA Producer pipes */
 	[IPA_4_1_APQ][IPA_CLIENT_MHI_PRIME_TETH_CONS] = {
-			true, IPA_v4_0_MHI_GROUP_PCIE,
+			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 20, 13, 9, 9, IPA_EE_AP } },
 	[IPA_4_1_APQ][IPA_CLIENT_MHI_PRIME_RMNET_CONS] = {
-			true, IPA_v4_0_MHI_GROUP_PCIE,
+			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_wigig_i.c b/drivers/platform/msm/ipa/ipa_v3/ipa_wigig_i.c
index 6f0f33e..64de92b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_wigig_i.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_wigig_i.c
@@ -450,8 +450,8 @@
 	evt_props.exclusive = true;
 	evt_props.err_cb = ipa_gsi_evt_ring_err_cb;
 	evt_props.user_data = NULL;
-	evt_props.int_modc = 1;
-	evt_props.int_modt = 1;
+	evt_props.int_modc = 200;
+	evt_props.int_modt = 15;
 	evt_props.ring_base_vaddr = NULL;
 
 	if (smmu_en) {
@@ -480,7 +480,7 @@
 		union __packed gsi_evt_scratch evt_scratch;
 
 		memset(&evt_scratch, 0, sizeof(evt_scratch));
-		evt_scratch.w11ad.update_status_hwtail_mod_threshold = 1;
+		evt_scratch.w11ad.update_status_hwtail_mod_threshold = 200;
 		gsi_res = gsi_write_evt_ring_scratch(ep->gsi_evt_ring_hdl,
 			evt_scratch);
 		if (gsi_res != GSI_STATUS_SUCCESS) {
@@ -626,7 +626,7 @@
 			gsi_scratch.tx_11ad.fixed_data_buffer_size_pow_2 =
 				ilog2(tx_dbuff->data_buffer_size);
 		}
-		gsi_scratch.tx_11ad.update_status_hwtail_mod_threshold = 1;
+		gsi_scratch.tx_11ad.update_status_hwtail_mod_threshold = 200;
 		IPADBG("tx scratch: status_ring_hwtail_address_lsb 0x%X\n",
 			gsi_scratch.tx_11ad.status_ring_hwtail_address_lsb);
 		IPADBG("tx scratch: status_ring_hwhead_address_lsb 0x%X\n",
@@ -829,12 +829,12 @@
 		ep->priv = input_smmu->priv;
 
 		IPADBG(
-		"desc_ring_base_iova %lld desc_ring_size %d status_ring_base_iova %lld status_ring_size %d",
+		"desc_ring_base_iova 0x%llX desc_ring_size %d status_ring_base_iova 0x%llX status_ring_size %d",
 		(unsigned long long)input_smmu->pipe_smmu.desc_ring_base_iova,
 		input_smmu->pipe_smmu.desc_ring_size,
 		(unsigned long long)input_smmu->pipe_smmu.status_ring_base_iova,
 		input_smmu->pipe_smmu.status_ring_size);
-		IPADBG("data_buffer_base_iova %lld data_buffer_size %d",
+		IPADBG("data_buffer_base_iova 0x%llX data_buffer_size %d",
 			(unsigned long long)dbuff_smmu->data_buffer_base_iova,
 			input_smmu->dbuff_smmu.data_buffer_size);
 
@@ -842,7 +842,7 @@
 			dbuff_smmu->data_buffer_base_iova) &
 			0xFFFFFF00) {
 			IPAERR(
-			"data_buffers_base_address_msb is over the 8 bit limit (%lld)\n",
+			"data_buffers_base_address_msb is over the 8 bit limit (0x%llX)\n",
 			(unsigned long long)dbuff_smmu->data_buffer_base_iova);
 			IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
 			return -EFAULT;
@@ -989,7 +989,7 @@
 		input_smmu = (struct ipa_wigig_conn_tx_in_params_smmu *)in;
 
 		IPADBG(
-		"desc_ring_base_iova %lld desc_ring_size %d status_ring_base_iova %lld status_ring_size %d",
+		"desc_ring_base_iova 0x%llX desc_ring_size %d status_ring_base_iova 0x%llX status_ring_size %d",
 		(unsigned long long)input_smmu->pipe_smmu.desc_ring_base_iova,
 		input_smmu->pipe_smmu.desc_ring_size,
 		(unsigned long long)input_smmu->pipe_smmu.status_ring_base_iova,
diff --git a/drivers/platform/msm/msm_11ad/msm_11ad.c b/drivers/platform/msm/msm_11ad/msm_11ad.c
index 56004a2..6b76e57 100644
--- a/drivers/platform/msm/msm_11ad/msm_11ad.c
+++ b/drivers/platform/msm/msm_11ad/msm_11ad.c
@@ -1396,7 +1396,7 @@
 		 * assume we have single path (vectors[0]). If we ever
 		 * have multiple paths, need to define the behavior
 		 */
-		usecase_kbps = div64_u64(usecase->vectors[0].ib, 1000);
+		usecase_kbps = div64_u64(usecase->vectors[0].ab, 1000);
 		if (usecase_kbps >= kbps && usecase_kbps < min_kbps) {
 			min_kbps = usecase_kbps;
 			vote = i;
diff --git a/drivers/platform/msm/qcom-geni-se.c b/drivers/platform/msm/qcom-geni-se.c
index ef506f4..5d0d4f2 100644
--- a/drivers/platform/msm/qcom-geni-se.c
+++ b/drivers/platform/msm/qcom-geni-se.c
@@ -79,6 +79,8 @@
  * @num_usecases:	One usecase to vote for both QUPv3 clock and DDR paths.
  * @pdata:		To register our client handle with the ICB driver.
  * @update:		Usecase index for icb voting.
+ * @vote_for_bw:	To check if we have to vote for BW or BCM threashold
+			in ab/ib ICB voting.
  */
 struct geni_se_device {
 	struct device *dev;
@@ -114,6 +116,7 @@
 	int num_usecases;
 	struct msm_bus_scale_pdata *pdata;
 	int update;
+	bool vote_for_bw;
 };
 
 /* Offset of QUPV3 Hardware Version Register */
@@ -736,9 +739,11 @@
 
 	if (geni_se_dev->num_paths == 2) {
 		geni_se_dev->pdata->usecase[new_update].vectors[0].ab  =
-			CONV_TO_BW(geni_se_dev->cur_ab);
+			geni_se_dev->vote_for_bw ?
+			CONV_TO_BW(geni_se_dev->cur_ab) : geni_se_dev->cur_ab;
 		geni_se_dev->pdata->usecase[new_update].vectors[0].ib  =
-			CONV_TO_BW(geni_se_dev->cur_ib);
+			geni_se_dev->vote_for_bw ?
+			CONV_TO_BW(geni_se_dev->cur_ib) : geni_se_dev->cur_ib;
 	}
 
 	if (bus_bw_update && geni_se_dev->num_paths != 2)
@@ -887,9 +892,11 @@
 
 	if (geni_se_dev->num_paths == 2) {
 		geni_se_dev->pdata->usecase[new_update].vectors[0].ab  =
-			CONV_TO_BW(geni_se_dev->cur_ab);
+			geni_se_dev->vote_for_bw ?
+			CONV_TO_BW(geni_se_dev->cur_ab) : geni_se_dev->cur_ab;
 		geni_se_dev->pdata->usecase[new_update].vectors[0].ib  =
-			CONV_TO_BW(geni_se_dev->cur_ib);
+			geni_se_dev->vote_for_bw ?
+			CONV_TO_BW(geni_se_dev->cur_ib) : geni_se_dev->cur_ib;
 	}
 
 	if (bus_bw_update && geni_se_dev->num_paths != 2)
@@ -1803,6 +1810,8 @@
 		}
 	}
 
+	geni_se_dev->vote_for_bw = of_property_read_bool(dev->of_node,
+							"qcom,vote-for-bw");
 	geni_se_dev->iommu_s1_bypass = of_property_read_bool(dev->of_node,
 							"qcom,iommu-s1-bypass");
 	geni_se_dev->bus_bw_set = default_bus_bw_set;
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index d326ecc..9bb0cb4 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -463,9 +463,12 @@
 	POWER_SUPPLY_ATTR(cp_switcher_en),
 	POWER_SUPPLY_ATTR(cp_die_temp),
 	POWER_SUPPLY_ATTR(cp_isns),
+	POWER_SUPPLY_ATTR(cp_isns_slave),
 	POWER_SUPPLY_ATTR(cp_toggle_switcher),
 	POWER_SUPPLY_ATTR(cp_irq_status),
 	POWER_SUPPLY_ATTR(cp_ilim),
+	POWER_SUPPLY_ATTR(irq_status),
+	POWER_SUPPLY_ATTR(parallel_output_mode),
 	/* Local extensions of type int64_t */
 	POWER_SUPPLY_ATTR(charge_counter_ext),
 	/* Properties of type `const char *' */
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
index 575ffd7..2331403 100644
--- a/drivers/power/supply/qcom/battery.c
+++ b/drivers/power/supply/qcom/battery.c
@@ -92,6 +92,8 @@
 	bool			cp_disabled;
 	int			taper_entry_fv;
 	int			main_fcc_max;
+	int			fcc_step_size_ua;
+	int			fcc_step_delay_ms;
 	/* debugfs directory */
 	struct dentry		*dfs_root;
 };
@@ -420,8 +422,8 @@
  *  FCC  *
  **********/
 #define EFFICIENCY_PCT	80
-#define FCC_STEP_SIZE_UA 100000
-#define FCC_STEP_UPDATE_DELAY_MS 1000
+#define DEFAULT_FCC_STEP_SIZE_UA 100000
+#define DEFAULT_FCC_STEP_UPDATE_DELAY_MS 1000
 #define STEP_UP 1
 #define STEP_DOWN -1
 static void get_fcc_split(struct pl_data *chip, int total_ua,
@@ -528,6 +530,11 @@
 	union power_supply_propval pval = {0, };
 	int rc;
 
+	if (!chip->fcc_step_size_ua) {
+		pr_err("Invalid fcc stepper step size, value 0\n");
+		return;
+	}
+
 	/* Read current FCC of main charger */
 	rc = power_supply_get_property(chip->main_psy,
 		POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
@@ -540,16 +547,16 @@
 	chip->main_step_fcc_dir = (main_fcc_ua > pval.intval) ?
 				STEP_UP : STEP_DOWN;
 	chip->main_step_fcc_count = abs((main_fcc_ua - pval.intval) /
-				FCC_STEP_SIZE_UA);
+				chip->fcc_step_size_ua);
 	chip->main_step_fcc_residual = (main_fcc_ua - pval.intval) %
-				FCC_STEP_SIZE_UA;
+				chip->fcc_step_size_ua;
 
 	chip->parallel_step_fcc_dir = (parallel_fcc_ua > chip->slave_fcc_ua) ?
 				STEP_UP : STEP_DOWN;
 	chip->parallel_step_fcc_count = abs((parallel_fcc_ua -
-				chip->slave_fcc_ua) / FCC_STEP_SIZE_UA);
+				chip->slave_fcc_ua) / chip->fcc_step_size_ua);
 	chip->parallel_step_fcc_residual = (parallel_fcc_ua -
-				chip->slave_fcc_ua) % FCC_STEP_SIZE_UA;
+				chip->slave_fcc_ua) % chip->fcc_step_size_ua;
 
 	if (chip->parallel_step_fcc_count || chip->parallel_step_fcc_residual
 		|| chip->main_step_fcc_count || chip->main_step_fcc_residual)
@@ -753,19 +760,19 @@
 	}
 
 	if (chip->main_step_fcc_count) {
-		main_fcc += (FCC_STEP_SIZE_UA * chip->main_step_fcc_dir);
+		main_fcc += (chip->fcc_step_size_ua * chip->main_step_fcc_dir);
 		chip->main_step_fcc_count--;
-		reschedule_ms = FCC_STEP_UPDATE_DELAY_MS;
+		reschedule_ms = chip->fcc_step_delay_ms;
 	} else if (chip->main_step_fcc_residual) {
 		main_fcc += chip->main_step_fcc_residual;
 		chip->main_step_fcc_residual = 0;
 	}
 
 	if (chip->parallel_step_fcc_count) {
-		parallel_fcc += (FCC_STEP_SIZE_UA *
+		parallel_fcc += (chip->fcc_step_size_ua *
 			chip->parallel_step_fcc_dir);
 		chip->parallel_step_fcc_count--;
-		reschedule_ms = FCC_STEP_UPDATE_DELAY_MS;
+		reschedule_ms = chip->fcc_step_delay_ms;
 	} else if (chip->parallel_step_fcc_residual) {
 		parallel_fcc += chip->parallel_step_fcc_residual;
 		chip->parallel_step_fcc_residual = 0;
@@ -1628,7 +1635,13 @@
 
 static void pl_config_init(struct pl_data *chip, int smb_version)
 {
+	chip->fcc_step_size_ua = DEFAULT_FCC_STEP_SIZE_UA;
+	chip->fcc_step_delay_ms = DEFAULT_FCC_STEP_UPDATE_DELAY_MS;
+
 	switch (smb_version) {
+	case PM8150B_SUBTYPE:
+		chip->fcc_step_delay_ms = 100;
+		break;
 	case PMI8998_SUBTYPE:
 	case PM660_SUBTYPE:
 		chip->wa_flags = AICL_RERUN_WA_BIT | FORCE_INOV_DISABLE_BIT;
diff --git a/drivers/power/supply/qcom/qpnp-smb5.c b/drivers/power/supply/qcom/qpnp-smb5.c
index 98ece48..c69c47b 100644
--- a/drivers/power/supply/qcom/qpnp-smb5.c
+++ b/drivers/power/supply/qcom/qpnp-smb5.c
@@ -2902,6 +2902,7 @@
 	},
 	[SMB_EN_IRQ] = {
 		.name		= "smb-en",
+		.handler	= smb_en_irq_handler,
 	},
 	[IMP_TRIGGER_IRQ] = {
 		.name		= "imp-trigger",
diff --git a/drivers/power/supply/qcom/smb5-lib.c b/drivers/power/supply/qcom/smb5-lib.c
index 093908d..6a4e82d 100644
--- a/drivers/power/supply/qcom/smb5-lib.c
+++ b/drivers/power/supply/qcom/smb5-lib.c
@@ -4567,6 +4567,41 @@
 	return IRQ_HANDLED;
 }
 
+irqreturn_t smb_en_irq_handler(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+	int rc, input_present;
+
+	if (!chg->cp_disable_votable) {
+		chg->cp_disable_votable = find_votable("CP_DISABLE");
+		if (!chg->cp_disable_votable)
+			return IRQ_HANDLED;
+	}
+
+	if (chg->pd_hard_reset) {
+		vote(chg->cp_disable_votable, BOOST_BACK_VOTER, true, 0);
+		return IRQ_HANDLED;
+	}
+
+	rc = smblib_is_input_present(chg, &input_present);
+	if (rc < 0) {
+		pr_err("Couldn't get usb presence status rc=%d\n", rc);
+		return IRQ_HANDLED;
+	}
+
+	if (input_present) {
+		/*
+		 * Add some delay to enable SMB1390 switcher after SMB_EN
+		 * pin goes high
+		 */
+		usleep_range(1000, 1100);
+		vote(chg->cp_disable_votable, BOOST_BACK_VOTER, false, 0);
+	}
+
+	return IRQ_HANDLED;
+}
+
 #define CHG_TERM_WA_ENTRY_DELAY_MS		300000		/* 5 min */
 #define CHG_TERM_WA_EXIT_DELAY_MS		60000		/* 1 min */
 static void smblib_eval_chg_termination(struct smb_charger *chg, u8 batt_status)
diff --git a/drivers/power/supply/qcom/smb5-lib.h b/drivers/power/supply/qcom/smb5-lib.h
index 82fec3a..38ee099 100644
--- a/drivers/power/supply/qcom/smb5-lib.h
+++ b/drivers/power/supply/qcom/smb5-lib.h
@@ -574,6 +574,7 @@
 int smblib_vconn_regulator_is_enabled(struct regulator_dev *rdev);
 
 irqreturn_t default_irq_handler(int irq, void *data);
+irqreturn_t smb_en_irq_handler(int irq, void *data);
 irqreturn_t chg_state_change_irq_handler(int irq, void *data);
 irqreturn_t batt_temp_changed_irq_handler(int irq, void *data);
 irqreturn_t batt_psy_changed_irq_handler(int irq, void *data);
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index a9dfbaf..0d2933d 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -724,8 +724,8 @@
 	}
 
 	ret = devm_request_threaded_irq(dev, irq, NULL, fw_error_fatal_handler,
-					IRQF_ONESHOT, "wlanfw-err",
-					priv);
+					IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+					"wlanfw-err", priv);
 	if (ret < 0) {
 		icnss_pr_err("Unable to register for error fatal IRQ handler %d ret = %d",
 			     irq, ret);
@@ -764,9 +764,8 @@
 
 	ret = devm_request_threaded_irq(dev, irq, NULL,
 					fw_crash_indication_handler,
-					IRQF_ONESHOT,
-					"wlanfw-early-crash-ind",
-					priv);
+					IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+					"wlanfw-early-crash-ind", priv);
 	if (ret < 0) {
 		icnss_pr_err("Unable to register for early crash indication IRQ handler %d ret = %d",
 			     irq, ret);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_dbg.c b/drivers/soc/qcom/msm_bus/msm_bus_dbg.c
index 3ee5a84..5cb058d 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_dbg.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_dbg.c
@@ -43,6 +43,7 @@
 	int index;
 	uint32_t clid;
 	int size;
+	int vote_count;
 	struct dentry *file;
 	struct list_head list;
 	char buffer[MAX_BUFF_SIZE];
@@ -432,6 +433,7 @@
 	cldata->clid = clid;
 	cldata->file = file;
 	cldata->size = 0;
+	cldata->vote_count = 0;
 	rt_mutex_lock(&msm_bus_dbg_cllist_lock);
 	list_add_tail(&cldata->list, &cl_list);
 	rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
@@ -486,6 +488,7 @@
 			clients, clid);
 	}
 
+	cldata->vote_count++;
 	if (cldata->size < (MAX_BUFF_SIZE - FILL_LIMIT))
 		i = cldata->size;
 	else {
@@ -767,7 +770,8 @@
 			cldata->pdata->usecase[cldata->index].vectors[j].dst,
 			cldata->pdata->usecase[cldata->index].vectors[j].ab,
 			cldata->pdata->usecase[cldata->index].vectors[j].ib,
-			cldata->pdata->active_only);
+			cldata->pdata->active_only,
+			cldata->vote_count);
 		}
 	}
 	rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c
index 7f793a9..9007fa3 100644
--- a/drivers/soc/qcom/peripheral-loader.c
+++ b/drivers/soc/qcom/peripheral-loader.c
@@ -49,7 +49,7 @@
 #define pil_memset_io(d, c, count) memset_io(d, c, count)
 #endif
 
-#define PIL_NUM_DESC		10
+#define PIL_NUM_DESC		16
 #define MAX_LEN 96
 #define NUM_OF_ENCRYPTED_KEY	3
 
diff --git a/drivers/soc/qcom/qbt_handler.c b/drivers/soc/qcom/qbt_handler.c
index 0c93e1b..2080175 100644
--- a/drivers/soc/qcom/qbt_handler.c
+++ b/drivers/soc/qcom/qbt_handler.c
@@ -26,6 +26,7 @@
 #include <linux/kfifo.h>
 #include <linux/poll.h>
 #include <uapi/linux/qbt_handler.h>
+#include <linux/input/touch_event_notify.h>
 
 #define QBT_DEV "qbt"
 #define MAX_FW_EVENTS 128
@@ -44,10 +45,24 @@
 	bool irq_enabled;
 };
 
-struct fw_event_desc {
+struct ipc_event {
 	enum qbt_fw_event ev;
 };
 
+struct fd_event {
+	struct timeval timestamp;
+	int X;
+	int Y;
+	int id;
+	int state;
+	bool touch_valid;
+};
+
+struct fd_userspace_buf {
+	uint32_t num_events;
+	struct fd_event fd_events[MAX_FW_EVENTS];
+};
+
 struct fw_ipc_info {
 	int gpio;
 	int irq;
@@ -69,11 +84,123 @@
 	struct mutex	ipc_events_mutex;
 	struct fw_ipc_info	fw_ipc;
 	struct finger_detect_gpio fd_gpio;
-	DECLARE_KFIFO(fd_events, struct fw_event_desc, MAX_FW_EVENTS);
-	DECLARE_KFIFO(ipc_events, struct fw_event_desc, MAX_FW_EVENTS);
+	DECLARE_KFIFO(fd_events, struct fd_event, MAX_FW_EVENTS);
+	DECLARE_KFIFO(ipc_events, struct ipc_event, MAX_FW_EVENTS);
 	wait_queue_head_t read_wait_queue_fd;
 	wait_queue_head_t read_wait_queue_ipc;
 	bool is_wuhb_connected;
+	struct qbt_touch_config touch_config;
+	struct fd_userspace_buf scrath_buf;
+};
+
+static struct qbt_drvdata *drvdata_g;
+
+static void qbt_add_touch_event(struct touch_event *evt)
+{
+	struct qbt_drvdata *drvdata = drvdata_g;
+	struct fd_event event;
+
+	memset(&event, 0, sizeof(event));
+	memcpy(&event.timestamp, &evt->time, sizeof(struct timeval));
+	event.X = evt->x;
+	event.Y = evt->y;
+	event.id = evt->fid;
+	event.touch_valid = true;
+	switch (evt->type) {
+	case 'D':
+		event.state = 1;
+		break;
+	case 'U':
+		event.state = 0;
+		break;
+	case 'M':
+		event.state = 2;
+		break;
+	default:
+		pr_err("Invalid touch event type\n");
+	}
+	pr_debug("Adding event id: %d state: %d x: %d y: %d\n",
+			event.id, event.state, event.X, event.Y);
+	pr_debug("timestamp: %ld.%06ld\n", event.timestamp.tv_sec,
+			event.timestamp.tv_usec);
+	if (!kfifo_put(&drvdata->fd_events, event))
+		pr_err("FD events fifo: error adding item\n");
+}
+
+static void qbt_radius_filter(struct touch_event *evt)
+{
+	struct qbt_drvdata *drvdata = drvdata_g;
+	struct fd_event event;
+	int fifo_len = 0, last_x = 0, last_y = 0, last_state = 0,
+			delta_x = 0, delta_y = 0, i = 0;
+
+	fifo_len = kfifo_len(&drvdata->fd_events);
+	for (i = 0; i < fifo_len; i++) {
+		if (!kfifo_get(&drvdata->fd_events, &event))
+			pr_err("FD events fifo: error removing item\n");
+		else {
+			if (event.id == evt->fid) {
+				last_state = event.state;
+				last_x = event.X;
+				last_y = event.Y;
+			}
+			kfifo_put(&drvdata->fd_events, event);
+		}
+	}
+	if (last_state == 1 || last_state == 3) {
+		delta_x = abs(last_x - evt->x);
+		delta_y = abs(last_y - evt->y);
+		if (delta_x > drvdata->touch_config.rad_x ||
+				delta_y > drvdata->touch_config.rad_y)
+			qbt_add_touch_event(evt);
+	} else
+		qbt_add_touch_event(evt);
+}
+
+static void qbt_filter_touch_event(struct touch_event *evt)
+{
+	struct qbt_drvdata *drvdata = drvdata_g;
+
+	pr_debug("Received event id: %d type: %c x: %d y: %d\n",
+			evt->fid, evt->type, evt->x, evt->y);
+	pr_debug("timestamp: %ld.%06ld\n", evt->time.tv_sec,
+			evt->time.tv_usec);
+
+	mutex_lock(&drvdata->fd_events_mutex);
+	switch (evt->type) {
+	case 'D':
+	case 'U':
+		qbt_add_touch_event(evt);
+		break;
+	case 'M':
+		if (drvdata->touch_config.rad_filter_enable)
+			qbt_radius_filter(evt);
+		else
+			qbt_add_touch_event(evt);
+		break;
+	default:
+		pr_err("Invalid touch event type\n");
+	}
+	mutex_unlock(&drvdata->fd_events_mutex);
+	wake_up_interruptible(&drvdata->read_wait_queue_fd);
+}
+static int qfp_touch_event_notify(struct notifier_block *self,
+			unsigned long action, void *data)
+{
+	int i = 0;
+	struct touch_event *event = (struct touch_event *)data;
+
+	while (action > 0 && i < sizeof(action)) {
+		if (__test_and_clear_bit(i, &action))
+			qbt_filter_touch_event(event);
+		i++;
+		event++;
+	}
+	return NOTIFY_OK;
+}
+
+struct notifier_block _input_event_notifier = {
+	.notifier_call = qfp_touch_event_notify,
 };
 
 /**
@@ -108,7 +235,8 @@
 
 	file->private_data = drvdata;
 
-	pr_debug("entry minor_no=%d\n", minor_no);
+	pr_debug("entry minor_no=%d fd_available=%d\n",
+			minor_no, drvdata->fd_available);
 
 	/* disallowing concurrent opens */
 	if (minor_no == MINOR_NUM_FD &&
@@ -121,7 +249,8 @@
 		rc = -EBUSY;
 	}
 
-	pr_debug("exit : %d\n", rc);
+	pr_debug("exit : %d  fd_available=%d\n",
+			rc, drvdata->fd_available);
 	return rc;
 }
 
@@ -144,6 +273,8 @@
 	}
 	drvdata = file->private_data;
 	minor_no = iminor(inode);
+	pr_debug("entry minor_no=%d fd_available=%d\n",
+			minor_no, drvdata->fd_available);
 	if (minor_no == MINOR_NUM_FD) {
 		atomic_inc(&drvdata->fd_available);
 	} else if (minor_no == MINOR_NUM_IPC) {
@@ -152,6 +283,7 @@
 		pr_err("Invalid minor number\n");
 		return -EINVAL;
 	}
+	pr_debug("exit : fd_available=%d\n", drvdata->fd_available);
 	return 0;
 }
 
@@ -263,6 +395,22 @@
 		input_sync(drvdata->in_dev);
 		break;
 	}
+	case QBT_CONFIGURE_TOUCH_FD:
+	{
+		if (copy_from_user(&drvdata->touch_config, priv_arg,
+			sizeof(drvdata->touch_config))
+				!= 0) {
+			rc = -EFAULT;
+			pr_err("failed copy from user space %d\n", rc);
+			goto end;
+		}
+		pr_debug("Touch FD Radius Filter enable: %d\n",
+			drvdata->touch_config.rad_filter_enable);
+		pr_debug("rad_x: %d rad_y: %d\n",
+			drvdata->touch_config.rad_x,
+			drvdata->touch_config.rad_y);
+		break;
+	}
 	default:
 		pr_err("invalid cmd %d\n", cmd);
 		rc = -ENOIOCTLCMD;
@@ -295,12 +443,15 @@
 static ssize_t qbt_read(struct file *filp, char __user *ubuf,
 		size_t cnt, loff_t *ppos)
 {
-	struct fw_event_desc fw_event;
+	struct ipc_event fw_event;
+	struct fd_event *fd_evt;
 	struct qbt_drvdata *drvdata;
+	struct fd_userspace_buf *scratch_buf;
 	wait_queue_head_t *read_wait_queue = NULL;
-	int rc = 0;
+	int i = 0;
 	int minor_no = -1;
-	int fifo_len;
+	int fifo_len = 0;
+	ssize_t num_bytes = 0;
 
 	pr_debug("entry with numBytes = %zd, minor_no = %d\n", cnt, minor_no);
 
@@ -310,15 +461,21 @@
 	}
 	drvdata = filp->private_data;
 
-	if (cnt < sizeof(fw_event.ev)) {
-		pr_err("Num bytes to read is too small\n");
-		return -EINVAL;
-	}
-
 	minor_no = iminor(filp->f_path.dentry->d_inode);
+	scratch_buf = &drvdata->scrath_buf;
+	memset(scratch_buf, 0, sizeof(*scratch_buf));
+
 	if (minor_no == MINOR_NUM_FD) {
+		if (cnt < sizeof(*scratch_buf)) {
+			pr_err("Num bytes to read is too small\n");
+			return -EINVAL;
+		}
 		read_wait_queue = &drvdata->read_wait_queue_fd;
 	} else if (minor_no == MINOR_NUM_IPC) {
+		if (cnt < sizeof(fw_event.ev)) {
+			pr_err("Num bytes to read is too small\n");
+			return -EINVAL;
+		}
 		read_wait_queue = &drvdata->read_wait_queue_ipc;
 	} else {
 		pr_err("Invalid minor number\n");
@@ -341,25 +498,45 @@
 
 	if (minor_no == MINOR_NUM_FD) {
 		mutex_lock(&drvdata->fd_events_mutex);
-		rc = kfifo_get(&drvdata->fd_events, &fw_event);
+
+		scratch_buf->num_events = kfifo_len(&drvdata->fd_events);
+
+		for (i = 0; i < scratch_buf->num_events; i++) {
+			fd_evt = &scratch_buf->fd_events[i];
+			if (!kfifo_get(&drvdata->fd_events, fd_evt)) {
+				pr_err("FD event fifo: err popping item\n");
+				scratch_buf->num_events = i;
+				break;
+			}
+			pr_debug("Reading event id: %d state: %d\n",
+					fd_evt->id, fd_evt->state);
+			pr_debug("x: %d y: %d timestamp: %ld.%06ld\n",
+					fd_evt->X, fd_evt->Y,
+					fd_evt->timestamp.tv_sec,
+					fd_evt->timestamp.tv_usec);
+		}
+		pr_debug("%d FD events read at time %lu uS\n",
+				scratch_buf->num_events,
+				(unsigned long)ktime_to_us(ktime_get()));
+		num_bytes = copy_to_user(ubuf, scratch_buf,
+				sizeof(*scratch_buf));
 		mutex_unlock(&drvdata->fd_events_mutex);
 	} else if (minor_no == MINOR_NUM_IPC) {
 		mutex_lock(&drvdata->ipc_events_mutex);
-		rc = kfifo_get(&drvdata->ipc_events, &fw_event);
+		if (!kfifo_get(&drvdata->ipc_events, &fw_event))
+			pr_err("IPC events fifo: error removing item\n");
+		pr_debug("IPC event %d at minor no %d read at time %lu uS\n",
+				(int)fw_event.ev, minor_no,
+				(unsigned long)ktime_to_us(ktime_get()));
+		num_bytes = copy_to_user(ubuf, &fw_event.ev,
+				sizeof(fw_event.ev));
 		mutex_unlock(&drvdata->ipc_events_mutex);
 	} else {
 		pr_err("Invalid minor number\n");
 	}
-
-	if (!rc) {
-		pr_err("fw_events fifo: unexpectedly empty\n");
-		return -EINVAL;
-	}
-
-	pr_debug("Firmware event %d at minor no %d read at time %lu uS\n",
-			(int)fw_event.ev, minor_no,
-			(unsigned long)ktime_to_us(ktime_get()));
-	return copy_to_user(ubuf, &fw_event.ev, sizeof(fw_event.ev));
+	if (num_bytes != 0)
+		pr_warn("Could not copy %d bytes\n");
+	return num_bytes;
 }
 
 static unsigned int qbt_poll(struct file *filp,
@@ -546,7 +723,9 @@
 
 static void qbt_fd_report_event(struct qbt_drvdata *drvdata, int state)
 {
-	struct fw_event_desc fw_event;
+	struct fd_event event;
+
+	memset(&event, 0, sizeof(event));
 
 	if (!drvdata->is_wuhb_connected) {
 		pr_err("Skipping as WUHB_INT is disconnected\n");
@@ -564,16 +743,16 @@
 	drvdata->fd_gpio.event_reported = 1;
 	drvdata->fd_gpio.last_gpio_state = state;
 
-	fw_event.ev = (state ? FW_EVENT_FINGER_DOWN : FW_EVENT_FINGER_UP);
+	event.state = state ? 1 : 2;
+	event.touch_valid = false;
+	do_gettimeofday(&event.timestamp);
 
 	mutex_lock(&drvdata->fd_events_mutex);
 
-	kfifo_reset(&drvdata->fd_events);
-
-	if (!kfifo_put(&drvdata->fd_events, fw_event)) {
+	if (!kfifo_put(&drvdata->fd_events, event)) {
 		pr_err("FD events fifo: error adding item\n");
 	} else {
-		pr_debug("FD event %d queued at time %lu uS\n", fw_event.ev,
+		pr_debug("FD event %d queued at time %lu uS\n", event.id,
 				(unsigned long)ktime_to_us(ktime_get()));
 	}
 	mutex_unlock(&drvdata->fd_events_mutex);
@@ -596,7 +775,6 @@
 			^ drvdata->fd_gpio.active_low;
 
 	qbt_fd_report_event(drvdata, state);
-
 	pm_relax(drvdata->dev);
 }
 
@@ -627,7 +805,7 @@
 static void qbt_irq_report_event(struct work_struct *work)
 {
 	struct qbt_drvdata *drvdata;
-	struct fw_event_desc fw_ev_des;
+	struct ipc_event fw_ev_des;
 
 	if (!work) {
 		pr_err("NULL pointer passed\n");
@@ -872,6 +1050,11 @@
 	if (rc < 0)
 		goto end;
 
+	rc = touch_event_register_notifier(&_input_event_notifier);
+	if (rc < 0)
+		pr_err("Touch Event Registration failed: %d\n", rc);
+	drvdata_g = drvdata;
+
 end:
 	pr_debug("exit : %d\n", rc);
 	return rc;
@@ -895,6 +1078,8 @@
 	unregister_chrdev_region(drvdata->qbt_ipc_cdev.dev, 1);
 
 	device_init_wakeup(&pdev->dev, 0);
+	touch_event_unregister_notifier(&_input_event_notifier);
+	drvdata_g = NULL;
 
 	return 0;
 }
diff --git a/drivers/soc/qcom/subsys-pil-tz.c b/drivers/soc/qcom/subsys-pil-tz.c
index 65c5aef..16c1dab 100644
--- a/drivers/soc/qcom/subsys-pil-tz.c
+++ b/drivers/soc/qcom/subsys-pil-tz.c
@@ -89,6 +89,7 @@
 	int proxy_clk_count;
 	int smem_id;
 	void *ramdump_dev;
+	void *minidump_dev;
 	u32 pas_id;
 	u32 bus_client;
 	bool enable_bus_scaling;
@@ -862,7 +863,7 @@
 	if (!enable)
 		return 0;
 
-	return pil_do_ramdump(&d->desc, d->ramdump_dev, NULL);
+	return pil_do_ramdump(&d->desc, d->ramdump_dev, d->minidump_dev);
 }
 
 static void subsys_free_memory(const struct subsys_desc *subsys)
@@ -1065,6 +1066,7 @@
 	struct resource *res;
 	u32 proxy_timeout;
 	int len, rc;
+	char md_node[20];
 
 	d = devm_kzalloc(&pdev->dev, sizeof(*d), GFP_KERNEL);
 	if (!d)
@@ -1228,6 +1230,16 @@
 		goto err_ramdump;
 	}
 
+	scnprintf(md_node, sizeof(md_node), "md_%s", d->subsys_desc.name);
+
+	d->minidump_dev = create_ramdump_device(md_node, &pdev->dev);
+	if (!d->minidump_dev) {
+		pr_err("%s: Unable to create a %s minidump device.\n",
+				__func__, d->subsys_desc.name);
+		rc = -ENOMEM;
+		goto err_minidump;
+	}
+
 	d->subsys = subsys_register(&d->subsys_desc);
 	if (IS_ERR(d->subsys)) {
 		rc = PTR_ERR(d->subsys);
@@ -1236,6 +1248,8 @@
 
 	return 0;
 err_subsys:
+	destroy_ramdump_device(d->minidump_dev);
+err_minidump:
 	destroy_ramdump_device(d->ramdump_dev);
 err_ramdump:
 	pil_desc_release(&d->desc);
@@ -1253,6 +1267,7 @@
 
 	subsys_unregister(d->subsys);
 	destroy_ramdump_device(d->ramdump_dev);
+	destroy_ramdump_device(d->minidump_dev);
 	pil_desc_release(&d->desc);
 
 	return 0;
diff --git a/drivers/thermal/qcom/msm_lmh_dcvs.c b/drivers/thermal/qcom/msm_lmh_dcvs.c
index aa0c86a..671677b 100644
--- a/drivers/thermal/qcom/msm_lmh_dcvs.c
+++ b/drivers/thermal/qcom/msm_lmh_dcvs.c
@@ -543,16 +543,22 @@
 		of_node_put(lmh_node);
 	}
 
-	/*
-	 * We return error if none of the CPUs have
-	 * reference to our LMH node
-	 */
-	if (cpumask_empty(&mask))
-		return -EINVAL;
-
 	hw = devm_kzalloc(&pdev->dev, sizeof(*hw), GFP_KERNEL);
 	if (!hw)
 		return -ENOMEM;
+	/*
+	 * We just init regulator if none of the CPUs have
+	 * reference to our LMH node
+	 */
+	if (cpumask_empty(&mask)) {
+		limits_isens_vref_ldo_init(pdev, hw);
+		mutex_lock(&lmh_dcvs_list_access);
+		INIT_LIST_HEAD(&hw->list);
+		list_add_tail(&hw->list, &lmh_dcvs_hw_list);
+		mutex_unlock(&lmh_dcvs_list_access);
+		return 0;
+	}
+
 	hw->cdev_data = devm_kcalloc(&pdev->dev, cpumask_weight(&mask),
 				   sizeof(*hw->cdev_data),
 				   GFP_KERNEL);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index fe1a288..4777bde 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -226,7 +226,8 @@
 		mult = 3;
 
 	if ((dep->endpoint.maxburst > 2) &&
-			dep->endpoint.ep_type == EP_TYPE_GSI)
+			dep->endpoint.ep_type == EP_TYPE_GSI
+			&& dwc3_is_usb31(dwc))
 		mult = 6;
 
 	tmp = ((max_packet + mdwidth) * mult) + mdwidth;
diff --git a/include/dt-bindings/clock/mdss-7nm-pll-clk.h b/include/dt-bindings/clock/mdss-7nm-pll-clk.h
index 381bd11..79820b4 100644
--- a/include/dt-bindings/clock/mdss-7nm-pll-clk.h
+++ b/include/dt-bindings/clock/mdss-7nm-pll-clk.h
@@ -17,16 +17,32 @@
 #define PCLK_SRC_MUX_0_CLK	7
 #define PCLK_SRC_0_CLK		8
 #define PCLK_MUX_0_CLK		9
-#define VCO_CLK_1		10
-#define PLL_OUT_DIV_1_CLK	11
-#define BITCLK_SRC_1_CLK	12
-#define BYTECLK_SRC_1_CLK	13
-#define POST_BIT_DIV_1_CLK	14
-#define POST_VCO_DIV_1_CLK	15
-#define BYTECLK_MUX_1_CLK	16
-#define PCLK_SRC_MUX_1_CLK	17
-#define PCLK_SRC_1_CLK		18
-#define PCLK_MUX_1_CLK		19
+#define SHADOW_VCO_CLK_0		10
+#define SHADOW_PLL_OUT_DIV_0_CLK	11
+#define SHADOW_BITCLK_SRC_0_CLK		12
+#define SHADOW_BYTECLK_SRC_0_CLK	13
+#define SHADOW_POST_BIT_DIV_0_CLK	14
+#define SHADOW_POST_VCO_DIV_0_CLK	15
+#define SHADOW_PCLK_SRC_MUX_0_CLK	16
+#define SHADOW_PCLK_SRC_0_CLK		17
+#define VCO_CLK_1		18
+#define PLL_OUT_DIV_1_CLK	19
+#define BITCLK_SRC_1_CLK	20
+#define BYTECLK_SRC_1_CLK	21
+#define POST_BIT_DIV_1_CLK	22
+#define POST_VCO_DIV_1_CLK	23
+#define BYTECLK_MUX_1_CLK	24
+#define PCLK_SRC_MUX_1_CLK	25
+#define PCLK_SRC_1_CLK		26
+#define PCLK_MUX_1_CLK		27
+#define SHADOW_VCO_CLK_1		28
+#define SHADOW_PLL_OUT_DIV_1_CLK	29
+#define SHADOW_BITCLK_SRC_1_CLK		30
+#define SHADOW_BYTECLK_SRC_1_CLK	31
+#define SHADOW_POST_BIT_DIV_1_CLK	32
+#define SHADOW_POST_VCO_DIV_1_CLK	33
+#define SHADOW_PCLK_SRC_MUX_1_CLK	34
+#define SHADOW_PCLK_SRC_1_CLK		35
 
 /* DP PLL clocks */
 #define	DP_VCO_CLK	0
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 9af7e19..1c49829 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -115,6 +115,18 @@
 
 extern void dump_tasks(struct mem_cgroup *memcg,
 		       const nodemask_t *nodemask);
+
+#ifdef CONFIG_HAVE_USERSPACE_LOW_MEMORY_KILLER
+extern bool should_ulmk_retry(void);
+extern void ulmk_update_last_kill(void);
+#else
+static inline bool should_ulmk_retry(void)
+{
+	return false;
+}
+static inline void ulmk_update_last_kill(void) {}
+#endif
+
 /* sysctls */
 extern int sysctl_oom_dump_tasks;
 extern int sysctl_oom_kill_allocating_task;
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 08f5435..8709be4 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -139,6 +139,13 @@
 	POWER_SUPPLY_PL_NON_STACKED_BATFET,
 };
 
+/* Parallel output connection topology */
+enum {
+	POWER_SUPPLY_PL_OUTPUT_NONE,
+	POWER_SUPPLY_PL_OUTPUT_VPH,
+	POWER_SUPPLY_PL_OUTPUT_VBAT,
+};
+
 enum {
 	POWER_SUPPLY_PD_INACTIVE = 0,
 	POWER_SUPPLY_PD_ACTIVE,
@@ -336,9 +343,12 @@
 	POWER_SUPPLY_PROP_CP_SWITCHER_EN,
 	POWER_SUPPLY_PROP_CP_DIE_TEMP,
 	POWER_SUPPLY_PROP_CP_ISNS,
+	POWER_SUPPLY_PROP_CP_ISNS_SLAVE,
 	POWER_SUPPLY_PROP_CP_TOGGLE_SWITCHER,
 	POWER_SUPPLY_PROP_CP_IRQ_STATUS,
 	POWER_SUPPLY_PROP_CP_ILIM,
+	POWER_SUPPLY_PROP_IRQ_STATUS,
+	POWER_SUPPLY_PROP_PARALLEL_OUTPUT_MODE,
 	/* Local extensions of type int64_t */
 	POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT,
 	/* Properties of type `const char *' */
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 97ec1e6..15604ed 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -40,9 +40,8 @@
 extern unsigned int sysctl_sched_walt_rotate_big_tasks;
 extern unsigned int sysctl_sched_min_task_util_for_boost;
 extern unsigned int sysctl_sched_min_task_util_for_colocation;
-extern unsigned int sysctl_sched_little_cluster_coloc_fmin_khz;
 extern unsigned int sysctl_sched_asym_cap_sibling_freq_match_pct;
-
+extern unsigned int sysctl_sched_coloc_downmigrate_ns;
 extern int
 walt_proc_group_thresholds_handler(struct ctl_table *table, int write,
 			 void __user *buffer, size_t *lenp,
@@ -120,17 +119,10 @@
 extern int sysctl_schedstats(struct ctl_table *table, int write,
 				 void __user *buffer, size_t *lenp,
 				 loff_t *ppos);
-
-#ifdef CONFIG_SCHED_WALT
-extern int sched_little_cluster_coloc_fmin_khz_handler(struct ctl_table *table,
-					int write, void __user *buffer,
-					size_t *lenp, loff_t *ppos);
-#endif
-
 #define LIB_PATH_LENGTH 512
 extern char sched_lib_name[LIB_PATH_LENGTH];
 extern unsigned int sched_lib_mask_force;
 extern bool is_sched_lib_based_app(pid_t pid);
-extern unsigned long *sched_busy_hysteresis_cpubits;
+extern unsigned int sysctl_sched_busy_hysteresis_enable_cpus;
 
 #endif /* _LINUX_SCHED_SYSCTL_H */
diff --git a/include/linux/sde_rsc.h b/include/linux/sde_rsc.h
index 4b61295..23bd164 100644
--- a/include/linux/sde_rsc.h
+++ b/include/linux/sde_rsc.h
@@ -274,14 +274,6 @@
 enum sde_rsc_state get_sde_rsc_current_state(int rsc_index);
 
 /**
- * get_sde_rsc_primary_crtc - gets the primary crtc for the sde rsc.
- * @rsc_index:   A client will be created on this RSC. As of now only
- *		 SDE_RSC_INDEX is valid rsc index.
- * Returns: crtc id of primary crtc ; 0 for all other cases.
- */
-int get_sde_rsc_primary_crtc(int rsc_index);
-
-/**
  * sde_rsc_client_trigger_vote() - triggers ab/ib vote for rsc client
  *
  * @client:	 Client pointer provided by sde_rsc_client_create().
@@ -357,11 +349,6 @@
 	return SDE_RSC_IDLE_STATE;
 }
 
-static inline int get_sde_rsc_primary_crtc(int rsc_index)
-{
-	return 0;
-}
-
 static inline int sde_rsc_client_trigger_vote(
 	struct sde_rsc_client *caller_client, bool delta_vote)
 {
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 0b49740..316efa3 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -45,12 +45,16 @@
 struct vmap_area {
 	unsigned long va_start;
 	unsigned long va_end;
+
+	/*
+	 * Largest available free size in subtree.
+	 */
+	unsigned long subtree_max_size;
 	unsigned long flags;
 	struct rb_node rb_node;         /* address sorted rbtree */
 	struct list_head list;          /* address sorted list */
 	struct llist_node purge_list;    /* "lazy purge" list */
 	struct vm_struct *vm;
-	struct rcu_head rcu_head;
 };
 
 /*
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 2187fe5..c2c05c9 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -992,6 +992,9 @@
 	/* This DAI link can route to other DAI links at runtime (Frontend)*/
 	unsigned int dynamic:1;
 
+	/* This DAI link can be reconfigured at runtime (Backend) */
+	unsigned int dynamic_be:1;
+
 	/*
 	 * This DAI can support no host IO (no pcm data is
 	 * copied to from host)
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index dad9a5c..b666893 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -576,8 +576,8 @@
 	    TP_PROTO(int cpu,
 		     unsigned long util, unsigned long avg_cap,
 		     unsigned long max_cap, unsigned long nl, unsigned long pl,
-		     unsigned int flags),
-	    TP_ARGS(cpu, util, avg_cap, max_cap, nl, pl, flags),
+		     unsigned int rtgb, unsigned int flags),
+	    TP_ARGS(cpu, util, avg_cap, max_cap, nl, pl, rtgb, flags),
 	    TP_STRUCT__entry(
 		    __field(int, cpu)
 		    __field(unsigned long, util)
@@ -585,6 +585,7 @@
 		    __field(unsigned long, max_cap)
 		    __field(unsigned long, nl)
 		    __field(unsigned long, pl)
+		    __field(unsigned int, rtgb)
 		    __field(unsigned int, flags)
 	    ),
 	    TP_fast_assign(
@@ -594,12 +595,13 @@
 		    __entry->max_cap = max_cap;
 		    __entry->nl = nl;
 		    __entry->pl = pl;
+		    __entry->rtgb = rtgb;
 		    __entry->flags = flags;
 	    ),
-	    TP_printk("cpu=%d util=%lu avg_cap=%lu max_cap=%lu nl=%lu pl=%lu flags=0x%x",
+	    TP_printk("cpu=%d util=%lu avg_cap=%lu max_cap=%lu nl=%lu pl=%lu rtgb=%u flags=0x%x",
 		      __entry->cpu, __entry->util, __entry->avg_cap,
 		      __entry->max_cap, __entry->nl,
-		      __entry->pl, __entry->flags)
+		      __entry->pl, __entry->rtgb, __entry->flags)
 );
 
 TRACE_EVENT(sugov_next_freq,
diff --git a/include/trace/events/trace_msm_bus.h b/include/trace/events/trace_msm_bus.h
index 4ae8448..285f967 100644
--- a/include/trace/events/trace_msm_bus.h
+++ b/include/trace/events/trace_msm_bus.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2016, 2019, The Linux Foundation. All rights reserved.
  */
 
 #undef TRACE_SYSTEM
@@ -167,9 +167,10 @@
 TRACE_EVENT(bus_client_status,
 
 	TP_PROTO(const char *name, int src, int dest,
-		unsigned long long ab, unsigned long long ib, int active_only),
+		unsigned long long ab, unsigned long long ib,
+		int active_only, int vote_count),
 
-	TP_ARGS(name, src, dest, ab, ib, active_only),
+	TP_ARGS(name, src, dest, ab, ib, active_only, vote_count),
 
 	TP_STRUCT__entry(
 		__string(name, name)
@@ -178,6 +179,7 @@
 		__field(u64, ab)
 		__field(u64, ib)
 		__field(int, active_only)
+		__field(int, vote_count)
 	),
 
 	TP_fast_assign(
@@ -187,15 +189,17 @@
 		__entry->ab = ab;
 		__entry->ib = ib;
 		__entry->active_only = active_only;
+		__entry->vote_count = vote_count;
 	),
 
-	TP_printk("name=%s src=%d dest=%d ab=%llu ib=%llu active_only=%d",
+	TP_printk("name=%s src=%d dest=%d ab=%llu ib=%llu active_only=%d vote_count=%d",
 		__get_str(name),
 		__entry->src,
 		__entry->dest,
 		(unsigned long long)__entry->ab,
 		(unsigned long long)__entry->ib,
-		__entry->active_only)
+		__entry->active_only,
+		__entry->vote_count)
 );
 
 TRACE_EVENT(bus_agg_bw,
diff --git a/include/trace/events/walt.h b/include/trace/events/walt.h
index 1d5620a..1b7123f 100644
--- a/include/trace/events/walt.h
+++ b/include/trace/events/walt.h
@@ -495,12 +495,9 @@
 
 	TP_PROTO(struct rq *rq, u64 aggr_grp_load, u32 tt_load,
 		int freq_aggr, u64 load, int policy,
-		int big_task_rotation,
-		unsigned int sysctl_sched_little_cluster_coloc_fmin_khz,
-		u64 coloc_boost_load),
+		int big_task_rotation),
 	TP_ARGS(rq, aggr_grp_load, tt_load, freq_aggr, load, policy,
-		big_task_rotation, sysctl_sched_little_cluster_coloc_fmin_khz,
-		coloc_boost_load),
+		big_task_rotation),
 
 	TP_STRUCT__entry(
 		__field(int,	cpu)
@@ -516,9 +513,6 @@
 		__field(u64,	pl)
 		__field(u64,    load)
 		__field(int,    big_task_rotation)
-		__field(unsigned int,
-				sysctl_sched_little_cluster_coloc_fmin_khz)
-		__field(u64,	coloc_boost_load)
 	),
 
 	TP_fast_assign(
@@ -536,18 +530,13 @@
 					rq->walt_stats.pred_demands_sum_scaled;
 		__entry->load		= load;
 		__entry->big_task_rotation = big_task_rotation;
-		__entry->sysctl_sched_little_cluster_coloc_fmin_khz =
-				sysctl_sched_little_cluster_coloc_fmin_khz;
-		__entry->coloc_boost_load = coloc_boost_load;
 	),
 
-	TP_printk("cpu=%d policy=%d ed_task_pid=%d aggr_grp_load=%llu freq_aggr=%d tt_load=%llu rq_ps=%llu grp_rq_ps=%llu nt_ps=%llu grp_nt_ps=%llu pl=%llu load=%llu big_task_rotation=%d sysctl_sched_little_cluster_coloc_fmin_khz=%u coloc_boost_load=%llu",
+	TP_printk("cpu=%d policy=%d ed_task_pid=%d aggr_grp_load=%llu freq_aggr=%d tt_load=%llu rq_ps=%llu grp_rq_ps=%llu nt_ps=%llu grp_nt_ps=%llu pl=%llu load=%llu big_task_rotation=%d",
 		__entry->cpu, __entry->policy, __entry->ed_task_pid,
 		__entry->aggr_grp_load, __entry->freq_aggr,
 		__entry->tt_load, __entry->rq_ps, __entry->grp_rq_ps,
 		__entry->nt_ps, __entry->grp_nt_ps, __entry->pl, __entry->load,
-		__entry->big_task_rotation,
-		__entry->sysctl_sched_little_cluster_coloc_fmin_khz,
-		__entry->coloc_boost_load)
+		__entry->big_task_rotation)
 );
 #endif
diff --git a/include/uapi/linux/qbt_handler.h b/include/uapi/linux/qbt_handler.h
index 8ebbf1f..f8ffefd 100644
--- a/include/uapi/linux/qbt_handler.h
+++ b/include/uapi/linux/qbt_handler.h
@@ -14,6 +14,7 @@
 #define QBT_DISABLE_IPC          103
 #define QBT_ENABLE_FD            104
 #define QBT_DISABLE_FD           105
+#define QBT_CONFIGURE_TOUCH_FD   106
 
 /*
  * enum qbt_fw_event -
@@ -48,4 +49,17 @@
 	int value;
 };
 
+/*
+ * struct qbt_touch_config -
+ *		used to configure touch finger detect
+ * @rad_filter_enable - flag to enable/disable radius based filtering
+ * @rad_x: movement radius in x direction
+ * @rad_y: movement radius in y direction
+ */
+struct qbt_touch_config {
+	bool rad_filter_enable;
+	int rad_x;
+	int rad_y;
+};
+
 #endif /* _UAPI_QBT_HANDLER_H_ */
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index ad59af7..322e2b5 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -1079,6 +1079,7 @@
 #define V4L2_BUF_FLAG_LAST			0x00100000
 /* Vendor extensions */
 #define V4L2_BUF_FLAG_CODECCONFIG		0x00020000
+#define V4L2_BUF_FLAG_END_OF_SUBFRAME		0x00000080
 #define V4L2_BUF_FLAG_DATA_CORRUPT		0x00400000
 #define V4L2_BUF_INPUT_UNSUPPORTED		0x01000000
 #define V4L2_BUF_FLAG_EOS			0x02000000
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 3091896..6572ee2 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -23,6 +23,7 @@
 	unsigned int		down_rate_limit_us;
 	unsigned int		hispeed_load;
 	unsigned int		hispeed_freq;
+	unsigned int		rtg_boost_freq;
 	bool			pl;
 };
 
@@ -36,6 +37,7 @@
 	struct sugov_tunables	*tunables;
 	struct list_head	tunables_hook;
 	unsigned long hispeed_util;
+	unsigned long rtg_boost_util;
 	unsigned long max;
 
 	raw_spinlock_t		update_lock;	/* For shared policies */
@@ -551,11 +553,15 @@
 
 #define NL_RATIO 75
 #define DEFAULT_HISPEED_LOAD 90
+#define DEFAULT_CPU0_RTG_BOOST_FREQ 1000000
+#define DEFAULT_CPU4_RTG_BOOST_FREQ 0
+#define DEFAULT_CPU7_RTG_BOOST_FREQ 0
 static void sugov_walt_adjust(struct sugov_cpu *sg_cpu, unsigned long *util,
 			      unsigned long *max)
 {
 	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
 	bool is_migration = sg_cpu->flags & SCHED_CPUFREQ_INTERCLUSTER_MIG;
+	bool is_rtg_boost = sg_cpu->walt_load.rtgb_active;
 	unsigned long nl = sg_cpu->walt_load.nl;
 	unsigned long cpu_util = sg_cpu->util;
 	bool is_hiload;
@@ -563,6 +569,9 @@
 	if (use_pelt())
 		return;
 
+	if (is_rtg_boost)
+		*util = max(*util, sg_policy->rtg_boost_util);
+
 	is_hiload = (cpu_util >= mult_frac(sg_policy->avg_cap,
 					   sg_policy->tunables->hispeed_load,
 					   100));
@@ -587,12 +596,22 @@
 		sg_policy->need_freq_update = true;
 }
 
+static inline unsigned long target_util(struct sugov_policy *sg_policy,
+				  unsigned int freq)
+{
+	unsigned long util;
+
+	util = freq_to_util(sg_policy, freq);
+	util = mult_frac(util, TARGET_LOAD, 100);
+	return util;
+}
+
 static void sugov_update_single(struct update_util_data *hook, u64 time,
 				unsigned int flags)
 {
 	struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
 	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
-	unsigned long util, max, hs_util;
+	unsigned long util, max, hs_util, boost_util;
 	unsigned int next_f;
 	bool busy;
 
@@ -615,10 +634,13 @@
 
 	if (sg_policy->max != max) {
 		sg_policy->max = max;
-		hs_util = freq_to_util(sg_policy,
+		hs_util = target_util(sg_policy,
 				       sg_policy->tunables->hispeed_freq);
-		hs_util = mult_frac(hs_util, TARGET_LOAD, 100);
 		sg_policy->hispeed_util = hs_util;
+
+		boost_util = target_util(sg_policy,
+				    sg_policy->tunables->rtg_boost_freq);
+		sg_policy->rtg_boost_util = boost_util;
 	}
 
 	util = sugov_iowait_apply(sg_cpu, time, util, max);
@@ -627,7 +649,8 @@
 
 	trace_sugov_util_update(sg_cpu->cpu, sg_cpu->util,
 				sg_policy->avg_cap, max, sg_cpu->walt_load.nl,
-				sg_cpu->walt_load.pl, flags);
+				sg_cpu->walt_load.pl,
+				sg_cpu->walt_load.rtgb_active, flags);
 
 	sugov_walt_adjust(sg_cpu, &util, &max);
 	next_f = get_next_freq(sg_policy, util, max);
@@ -710,7 +733,7 @@
 {
 	struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
 	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
-	unsigned long hs_util;
+	unsigned long hs_util, boost_util;
 	unsigned int next_f;
 
 	if (!sg_policy->tunables->pl && flags & SCHED_CPUFREQ_PL)
@@ -722,10 +745,13 @@
 
 	if (sg_policy->max != sg_cpu->max) {
 		sg_policy->max = sg_cpu->max;
-		hs_util = freq_to_util(sg_policy,
+		hs_util = target_util(sg_policy,
 					sg_policy->tunables->hispeed_freq);
-		hs_util = mult_frac(hs_util, TARGET_LOAD, 100);
 		sg_policy->hispeed_util = hs_util;
+
+		boost_util = target_util(sg_policy,
+				    sg_policy->tunables->rtg_boost_freq);
+		sg_policy->rtg_boost_util = boost_util;
 	}
 
 	sugov_iowait_boost(sg_cpu, time, flags);
@@ -737,7 +763,8 @@
 
 	trace_sugov_util_update(sg_cpu->cpu, sg_cpu->util, sg_policy->avg_cap,
 				sg_cpu->max, sg_cpu->walt_load.nl,
-				sg_cpu->walt_load.pl, flags);
+				sg_cpu->walt_load.pl,
+				sg_cpu->walt_load.rtgb_active, flags);
 
 	if (sugov_should_update_freq(sg_policy, time) &&
 	    !(flags & SCHED_CPUFREQ_CONTINUE)) {
@@ -909,9 +936,8 @@
 	tunables->hispeed_freq = val;
 	list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) {
 		raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
-		hs_util = freq_to_util(sg_policy,
+		hs_util = target_util(sg_policy,
 					sg_policy->tunables->hispeed_freq);
-		hs_util = mult_frac(hs_util, TARGET_LOAD, 100);
 		sg_policy->hispeed_util = hs_util;
 		raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
 	}
@@ -919,6 +945,37 @@
 	return count;
 }
 
+static ssize_t rtg_boost_freq_show(struct gov_attr_set *attr_set, char *buf)
+{
+	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n", tunables->rtg_boost_freq);
+}
+
+static ssize_t rtg_boost_freq_store(struct gov_attr_set *attr_set,
+				    const char *buf, size_t count)
+{
+	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
+	unsigned int val;
+	struct sugov_policy *sg_policy;
+	unsigned long boost_util;
+	unsigned long flags;
+
+	if (kstrtouint(buf, 10, &val))
+		return -EINVAL;
+
+	tunables->rtg_boost_freq = val;
+	list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) {
+		raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
+		boost_util = target_util(sg_policy,
+					  sg_policy->tunables->rtg_boost_freq);
+		sg_policy->rtg_boost_util = boost_util;
+		raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
+	}
+
+	return count;
+}
+
 static ssize_t pl_show(struct gov_attr_set *attr_set, char *buf)
 {
 	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
@@ -939,6 +996,7 @@
 
 static struct governor_attr hispeed_load = __ATTR_RW(hispeed_load);
 static struct governor_attr hispeed_freq = __ATTR_RW(hispeed_freq);
+static struct governor_attr rtg_boost_freq = __ATTR_RW(rtg_boost_freq);
 static struct governor_attr pl = __ATTR_RW(pl);
 
 static struct attribute *sugov_attributes[] = {
@@ -946,6 +1004,7 @@
 	&down_rate_limit_us.attr,
 	&hispeed_load.attr,
 	&hispeed_freq.attr,
+	&rtg_boost_freq.attr,
 	&pl.attr,
 	NULL
 };
@@ -1059,6 +1118,7 @@
 
 	cached->pl = tunables->pl;
 	cached->hispeed_load = tunables->hispeed_load;
+	cached->rtg_boost_freq = tunables->rtg_boost_freq;
 	cached->hispeed_freq = tunables->hispeed_freq;
 	cached->up_rate_limit_us = tunables->up_rate_limit_us;
 	cached->down_rate_limit_us = tunables->down_rate_limit_us;
@@ -1083,6 +1143,7 @@
 
 	tunables->pl = cached->pl;
 	tunables->hispeed_load = cached->hispeed_load;
+	tunables->rtg_boost_freq = cached->rtg_boost_freq;
 	tunables->hispeed_freq = cached->hispeed_freq;
 	tunables->up_rate_limit_us = cached->up_rate_limit_us;
 	tunables->down_rate_limit_us = cached->down_rate_limit_us;
@@ -1092,6 +1153,7 @@
 {
 	struct sugov_policy *sg_policy;
 	struct sugov_tunables *tunables;
+	unsigned long util;
 	int ret = 0;
 
 	/* State should be equivalent to EXIT */
@@ -1135,8 +1197,25 @@
 	tunables->hispeed_load = DEFAULT_HISPEED_LOAD;
 	tunables->hispeed_freq = 0;
 
+	switch (policy->cpu) {
+	default:
+	case 0:
+		tunables->rtg_boost_freq = DEFAULT_CPU0_RTG_BOOST_FREQ;
+		break;
+	case 4:
+		tunables->rtg_boost_freq = DEFAULT_CPU4_RTG_BOOST_FREQ;
+		break;
+	case 7:
+		tunables->rtg_boost_freq = DEFAULT_CPU7_RTG_BOOST_FREQ;
+		break;
+	}
+
 	policy->governor_data = sg_policy;
 	sg_policy->tunables = tunables;
+
+	util = target_util(sg_policy, sg_policy->tunables->rtg_boost_freq);
+	sg_policy->rtg_boost_util = util;
+
 	stale_ns = sched_ravg_window + (sched_ravg_window >> 3);
 
 	sugov_tunables_restore(policy);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 1cfaf11..c74dd1f 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6950,6 +6950,13 @@
 			}
 
 			/*
+			 * Skip processing placement further if we are visiting
+			 * cpus with lower capacity than start cpu
+			 */
+			if (capacity_orig < capacity_orig_of(start_cpu))
+				continue;
+
+			/*
 			 * Case B) Non latency sensitive tasks on IDLE CPUs.
 			 *
 			 * Find an optimal backup IDLE CPU for non latency
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 410d29c..7261a43 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -143,7 +143,6 @@
 	unsigned int max_possible_freq;
 	bool freq_init_done;
 	u64 aggr_grp_load;
-	u64 coloc_boost_load;
 };
 
 extern cpumask_t asym_cap_sibling_cpus;
@@ -2149,6 +2148,7 @@
 	unsigned long prev_window_util;
 	unsigned long nl;
 	unsigned long pl;
+	bool rtgb_active;
 	u64 ws;
 };
 
@@ -2176,6 +2176,7 @@
 u64 freq_policy_load(struct rq *rq);
 
 extern u64 walt_load_reported_window;
+extern bool rtgb_active;
 
 static inline unsigned long
 __cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load)
@@ -2209,6 +2210,7 @@
 		walt_load->nl = nl;
 		walt_load->pl = pl;
 		walt_load->ws = walt_load_reported_window;
+		walt_load->rtgb_active = rtgb_active;
 	}
 
 	return (util >= capacity) ? capacity : util;
@@ -2748,6 +2750,7 @@
 	struct sched_cluster *preferred_cluster;
 	struct rcu_head rcu;
 	u64 last_update;
+	u64 downmigrate_ts;
 };
 
 extern struct sched_cluster *sched_cluster[NR_CPUS];
@@ -3129,7 +3132,6 @@
 	return policy;
 }
 
-extern void walt_map_freq_to_load(void);
 extern void walt_update_min_max_capacity(void);
 
 static inline bool is_min_capacity_cluster(struct sched_cluster *cluster)
@@ -3283,7 +3285,6 @@
 #endif
 
 static inline void note_task_waking(struct task_struct *p, u64 wallclock) { }
-static inline void walt_map_freq_to_load(void) { }
 static inline void walt_update_min_max_capacity(void) { }
 #endif	/* CONFIG_SCHED_WALT */
 
diff --git a/kernel/sched/sched_avg.c b/kernel/sched/sched_avg.c
index 7555dc9..5fb03f9 100644
--- a/kernel/sched/sched_avg.c
+++ b/kernel/sched/sched_avg.c
@@ -26,9 +26,7 @@
 static DEFINE_PER_CPU(spinlock_t, nr_lock) = __SPIN_LOCK_UNLOCKED(nr_lock);
 static s64 last_get_time;
 
-static struct cpumask sched_busy_hysteresis_cpumask;
-unsigned long *sched_busy_hysteresis_cpubits =
-				cpumask_bits(&sched_busy_hysteresis_cpumask);
+unsigned int sysctl_sched_busy_hysteresis_enable_cpus;
 static DEFINE_PER_CPU(atomic64_t, last_busy_time) = ATOMIC64_INIT(0);
 
 #define NR_THRESHOLD_PCT		15
@@ -107,7 +105,7 @@
 {
 	bool nr_run_trigger = false, load_trigger = false;
 
-	if (!cpumask_test_cpu(cpu, &sched_busy_hysteresis_cpumask))
+	if (!(BIT(cpu) & sysctl_sched_busy_hysteresis_enable_cpus))
 		return;
 
 	if (prev_nr_run >= BUSY_NR_RUN && per_cpu(nr, cpu) < BUSY_NR_RUN)
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index c4a3a2c..d7967a9 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -485,7 +485,6 @@
 	struct sched_cluster *cluster = rq->cluster;
 	u64 aggr_grp_load = cluster->aggr_grp_load;
 	u64 load, tt_load = 0;
-	u64 coloc_boost_load = cluster->coloc_boost_load;
 
 	if (rq->ed_task != NULL) {
 		load = sched_ravg_window;
@@ -497,9 +496,6 @@
 	else
 		load = rq->prev_runnable_sum + rq->grp_time.prev_runnable_sum;
 
-	if (coloc_boost_load)
-		load = max_t(u64, load, coloc_boost_load);
-
 	tt_load = top_task_load(rq);
 	switch (reporting_policy) {
 	case FREQ_REPORT_MAX_CPU_LOAD_TOP_TASK:
@@ -516,9 +512,7 @@
 
 done:
 	trace_sched_load_to_gov(rq, aggr_grp_load, tt_load, sched_freq_aggr_en,
-				load, reporting_policy, walt_rotation_enabled,
-				sysctl_sched_little_cluster_coloc_fmin_khz,
-				coloc_boost_load);
+				load, reporting_policy, walt_rotation_enabled);
 	return load;
 }
 
@@ -2081,7 +2075,7 @@
 
 #define pct_to_min_scaled(tunable) \
 		div64_u64(((u64)sched_ravg_window * tunable) << 10, \
-			   (u64)sched_cluster[0]->load_scale_factor)
+			   (u64)sched_cluster[0]->load_scale_factor * 100)
 
 static inline void walt_update_group_thresholds(void)
 {
@@ -2331,7 +2325,6 @@
 	.max_possible_freq	=	1,
 	.exec_scale_factor	=	1024,
 	.aggr_grp_load		=	0,
-	.coloc_boost_load	=	0,
 };
 
 void init_clusters(void)
@@ -2494,6 +2487,7 @@
  * The children inherits the group id from the parent.
  */
 unsigned int __read_mostly sysctl_sched_enable_thread_grouping;
+unsigned int __read_mostly sysctl_sched_coloc_downmigrate_ns;
 
 struct related_thread_group *related_thread_groups[MAX_NUM_CGROUP_COLOC_ID];
 static LIST_HEAD(active_related_thread_groups);
@@ -2515,30 +2509,35 @@
 unsigned int __read_mostly sysctl_sched_group_downmigrate_pct = 95;
 
 static inline
-struct sched_cluster *best_cluster(struct related_thread_group *grp,
+void update_best_cluster(struct related_thread_group *grp,
 				   u64 demand, bool boost)
 {
-	struct sched_cluster *cluster = sched_cluster[0];
-	unsigned int threshold;
-
 	if (boost) {
-		cluster = sched_cluster[1];
-		goto out;
+		grp->preferred_cluster = sched_cluster[1];
+		return;
 	}
 
-	if (!demand)
-		goto out;
-
-	if (grp->preferred_cluster == sched_cluster[1])
-		threshold = sched_group_downmigrate;
-	else
-		threshold = sched_group_upmigrate;
-
-	if (demand >= threshold)
-		cluster = sched_cluster[1];
-
-out:
-	return cluster;
+	if (grp->preferred_cluster == sched_cluster[0]) {
+		if (demand >= sched_group_upmigrate)
+			grp->preferred_cluster = sched_cluster[1];
+		return;
+	}
+	if (demand < sched_group_downmigrate) {
+		if (!sysctl_sched_coloc_downmigrate_ns) {
+			grp->preferred_cluster = sched_cluster[0];
+			return;
+		}
+		if (!grp->downmigrate_ts) {
+			grp->downmigrate_ts = grp->last_update;
+			return;
+		}
+		if (grp->last_update - grp->downmigrate_ts >
+				sysctl_sched_coloc_downmigrate_ns) {
+			grp->preferred_cluster = sched_cluster[0];
+			grp->downmigrate_ts = 0;
+		}
+	} else if (grp->downmigrate_ts)
+		grp->downmigrate_ts = 0;
 }
 
 int preferred_cluster(struct sched_cluster *cluster, struct task_struct *p)
@@ -2597,8 +2596,7 @@
 	}
 
 	grp->last_update = wallclock;
-	grp->preferred_cluster = best_cluster(grp, combined_demand,
-					      group_boost);
+	update_best_cluster(grp, combined_demand, group_boost);
 	trace_sched_set_preferred_cluster(grp, combined_demand);
 }
 
@@ -3078,69 +3076,21 @@
 	BUG_ON((s64)*src_nt_prev_runnable_sum < 0);
 }
 
-/* Set to 1GHz by default */
-unsigned int sysctl_sched_little_cluster_coloc_fmin_khz = 1000000;
-static u64 coloc_boost_load;
+bool rtgb_active;
 
-void walt_map_freq_to_load(void)
-{
-	struct sched_cluster *cluster;
-
-	for_each_sched_cluster(cluster) {
-		if (is_min_capacity_cluster(cluster)) {
-			int fcpu = cluster_first_cpu(cluster);
-
-			coloc_boost_load = div64_u64(
-				((u64)sched_ravg_window *
-				arch_scale_cpu_capacity(NULL, fcpu) *
-				sysctl_sched_little_cluster_coloc_fmin_khz),
-				(u64)1024 * cpu_max_possible_freq(fcpu));
-			coloc_boost_load = div64_u64(coloc_boost_load << 2, 5);
-			break;
-		}
-	}
-}
-
-static void walt_update_coloc_boost_load(void)
+static bool is_rtgb_active(void)
 {
 	struct related_thread_group *grp;
-	struct sched_cluster *cluster;
 
-	if (!sysctl_sched_little_cluster_coloc_fmin_khz ||
-			sched_boost() == CONSERVATIVE_BOOST)
-		return;
+	if (sched_boost() == CONSERVATIVE_BOOST)
+		return false;
 
 	grp = lookup_related_thread_group(DEFAULT_CGROUP_COLOC_ID);
 	if (!grp || !grp->preferred_cluster ||
 			is_min_capacity_cluster(grp->preferred_cluster))
-		return;
+		return false;
 
-	for_each_sched_cluster(cluster) {
-		if (is_min_capacity_cluster(cluster)) {
-			cluster->coloc_boost_load = coloc_boost_load;
-			break;
-		}
-	}
-}
-
-int sched_little_cluster_coloc_fmin_khz_handler(struct ctl_table *table,
-				int write, void __user *buffer, size_t *lenp,
-				loff_t *ppos)
-{
-	int ret;
-	static DEFINE_MUTEX(mutex);
-
-	mutex_lock(&mutex);
-
-	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
-	if (ret || !write)
-		goto done;
-
-	walt_map_freq_to_load();
-
-done:
-	mutex_unlock(&mutex);
-	return ret;
+	return true;
 }
 
 /*
@@ -3193,7 +3143,6 @@
 
 		cluster->aggr_grp_load = aggr_grp_load;
 		total_grp_load += aggr_grp_load;
-		cluster->coloc_boost_load = 0;
 
 		if (is_min_capacity_cluster(cluster))
 			min_cluster_grp_load = aggr_grp_load;
@@ -3208,7 +3157,9 @@
 			for_each_cpu(cpu, &asym_cap_sibling_cpus)
 				cpu_cluster(cpu)->aggr_grp_load = big_grp_load;
 		}
-		walt_update_coloc_boost_load();
+		rtgb_active = is_rtgb_active();
+	} else {
+		rtgb_active = false;
 	}
 
 	for_each_sched_cluster(cluster) {
diff --git a/kernel/signal.c b/kernel/signal.c
index e3bb6c1..3271bc6 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1370,8 +1370,10 @@
 
 	if (!ret && sig) {
 		ret = do_send_sig_info(sig, info, p, type);
-		if (capable(CAP_KILL) && sig == SIGKILL)
+		if (capable(CAP_KILL) && sig == SIGKILL) {
 			add_to_oom_reaper(p);
+			ulmk_update_last_kill();
+		}
 	}
 
 	return ret;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 6983bc7..9fa6c53 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -132,9 +132,6 @@
 static unsigned long long_max = LONG_MAX;
 static int one_hundred = 100;
 static int one_thousand = 1000;
-#ifdef CONFIG_SCHED_WALT
-static int two_million = 2000000;
-#endif
 #ifdef CONFIG_PRINTK
 static int ten_thousand = 10000;
 #endif
@@ -407,16 +404,6 @@
 		.extra2		= &one_thousand,
 	},
 	{
-		.procname	= "sched_little_cluster_coloc_fmin_khz",
-		.data		= &sysctl_sched_little_cluster_coloc_fmin_khz,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= sched_little_cluster_coloc_fmin_khz_handler,
-		.extra1		= &zero,
-		.extra2		= &two_million,
-	},
-
-	{
 		.procname       = "sched_asym_cap_sibling_freq_match_pct",
 		.data           = &sysctl_sched_asym_cap_sibling_freq_match_pct,
 		.maxlen         = sizeof(unsigned int),
@@ -425,6 +412,14 @@
 		.extra1         = &one,
 		.extra2         = &one_hundred,
 	},
+	{
+		.procname	= "sched_coloc_downmigrate_ns",
+		.data		= &sysctl_sched_coloc_downmigrate_ns,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_douintvec_minmax,
+	},
+
 #endif
 #ifdef CONFIG_SMP
 	{
@@ -443,10 +438,12 @@
 	},
 	{
 		.procname	= "sched_busy_hysteresis_enable_cpus",
-		.data		= &sched_busy_hysteresis_cpubits,
-		.maxlen		= NR_CPUS,
+		.data		= &sysctl_sched_busy_hysteresis_enable_cpus,
+		.maxlen		= sizeof(unsigned int),
 		.mode		= 0644,
-		.proc_handler	= proc_do_large_bitmap,
+		.proc_handler	= proc_douintvec_minmax,
+		.extra1		= &zero,
+		.extra2		= &two_hundred_fifty_five,
 	},
 #endif
 #ifdef CONFIG_SCHED_DEBUG
diff --git a/mm/Kconfig b/mm/Kconfig
index 18b988a..64f11e1 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -803,6 +803,15 @@
 	 allocating, it is failing its processing and a classic page fault
 	 is then tried.
 
+config HAVE_USERSPACE_LOW_MEMORY_KILLER
+	bool "Have userspace low memory killer"
+	default n
+	help
+	  The page allocator cannot directly call into a
+	  userspace low memory killer as it can call into the OOM killer.
+	  Therefore, add a timeout mechanism to give the userspace
+	  low memory killer a chance to run.
+
 config GUP_BENCHMARK
 	bool "Enable infrastructure for get_user_pages_fast() benchmarking"
 	default n
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
index e1d306b..41c7fea 100644
--- a/mm/Kconfig.debug
+++ b/mm/Kconfig.debug
@@ -41,6 +41,14 @@
 	  investigating the root cause which may be rooted within cache
 	  or memory.
 
+config DEBUG_PANIC_ON_OOM
+	bool "Enable to Panic on OOM detection"
+	help
+	  Android primarily uses an alternative mechanism to detect low
+	  memory situations and kill processes as required. The kernel
+	  oom-killer can mask problems with this feature, which may be
+	  undesireable in a debug environment.
+
 config DEBUG_PAGEALLOC_ENABLE_DEFAULT
 	bool "Enable debug page memory allocations by default?"
 	default n
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index e5f1714..dbe7f1c 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -51,7 +51,8 @@
 #define CREATE_TRACE_POINTS
 #include <trace/events/oom.h>
 
-int sysctl_panic_on_oom;
+int sysctl_panic_on_oom =
+IS_ENABLED(CONFIG_DEBUG_PANIC_ON_OOM) ? 2 : 0;
 int sysctl_oom_kill_allocating_task;
 int sysctl_oom_dump_tasks = 1;
 int sysctl_reap_mem_on_sigkill;
@@ -66,6 +67,28 @@
  */
 DEFINE_MUTEX(oom_lock);
 
+/*
+ * If ULMK has killed a process recently,
+ * we are making progress.
+ */
+
+#ifdef CONFIG_HAVE_USERSPACE_LOW_MEMORY_KILLER
+static atomic64_t ulmk_kill_jiffies = ATOMIC64_INIT(INITIAL_JIFFIES);
+
+
+bool should_ulmk_retry(void)
+{
+	unsigned long j = atomic64_read(&ulmk_kill_jiffies);
+
+	return time_before(jiffies, j + 2 * HZ);
+}
+
+void ulmk_update_last_kill(void)
+{
+	atomic64_set(&ulmk_kill_jiffies, jiffies);
+}
+#endif
+
 #ifdef CONFIG_NUMA
 /**
  * has_intersects_mems_allowed() - check task eligiblity for kill
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d2c31f6..a3dc699 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4365,6 +4365,9 @@
 	if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL))
 		goto nopage;
 
+	if (order <= PAGE_ALLOC_COSTLY_ORDER && should_ulmk_retry())
+		goto retry;
+
 	if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
 				 did_some_progress > 0, &no_progress_loops))
 		goto retry;
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index aba9b13..8721360 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -31,6 +31,7 @@
 #include <linux/compiler.h>
 #include <linux/llist.h>
 #include <linux/bitops.h>
+#include <linux/rbtree_augmented.h>
 
 #include <linux/uaccess.h>
 #include <asm/tlbflush.h>
@@ -323,6 +324,9 @@
 
 /*** Global kva allocator ***/
 
+#define DEBUG_AUGMENT_PROPAGATE_CHECK 0
+#define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
+
 #define VM_LAZY_FREE	0x02
 #define VM_VM_AREA	0x04
 
@@ -331,14 +335,67 @@
 LIST_HEAD(vmap_area_list);
 static LLIST_HEAD(vmap_purge_list);
 static struct rb_root vmap_area_root = RB_ROOT;
+static bool vmap_initialized __read_mostly;
 
-/* The vmap cache globals are protected by vmap_area_lock */
-static struct rb_node *free_vmap_cache;
-static unsigned long cached_hole_size;
-static unsigned long cached_vstart;
-static unsigned long cached_align;
+/*
+ * This kmem_cache is used for vmap_area objects. Instead of
+ * allocating from slab we reuse an object from this cache to
+ * make things faster. Especially in "no edge" splitting of
+ * free block.
+ */
+static struct kmem_cache *vmap_area_cachep;
 
-static unsigned long vmap_area_pcpu_hole;
+/*
+ * This linked list is used in pair with free_vmap_area_root.
+ * It gives O(1) access to prev/next to perform fast coalescing.
+ */
+static LIST_HEAD(free_vmap_area_list);
+
+/*
+ * This augment red-black tree represents the free vmap space.
+ * All vmap_area objects in this tree are sorted by va->va_start
+ * address. It is used for allocation and merging when a vmap
+ * object is released.
+ *
+ * Each vmap_area node contains a maximum available free block
+ * of its sub-tree, right or left. Therefore it is possible to
+ * find a lowest match of free area.
+ */
+static struct rb_root free_vmap_area_root = RB_ROOT;
+
+static __always_inline unsigned long
+va_size(struct vmap_area *va)
+{
+	return (va->va_end - va->va_start);
+}
+
+static __always_inline unsigned long
+get_subtree_max_size(struct rb_node *node)
+{
+	struct vmap_area *va;
+
+	va = rb_entry_safe(node, struct vmap_area, rb_node);
+	return va ? va->subtree_max_size : 0;
+}
+
+/*
+ * Gets called when remove the node and rotate.
+ */
+static __always_inline unsigned long
+compute_subtree_max_size(struct vmap_area *va)
+{
+	return max3(va_size(va),
+		get_subtree_max_size(va->rb_node.rb_left),
+		get_subtree_max_size(va->rb_node.rb_right));
+}
+
+RB_DECLARE_CALLBACKS(static, free_vmap_area_rb_augment_cb,
+	struct vmap_area, rb_node, unsigned long, subtree_max_size,
+	compute_subtree_max_size)
+
+static void purge_vmap_area_lazy(void);
+static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
+static unsigned long lazy_max_pages(void);
 
 static atomic_long_t nr_vmalloc_pages;
 
@@ -366,41 +423,610 @@
 	return NULL;
 }
 
-static void __insert_vmap_area(struct vmap_area *va)
+/*
+ * This function returns back addresses of parent node
+ * and its left or right link for further processing.
+ */
+static __always_inline struct rb_node **
+find_va_links(struct vmap_area *va,
+	struct rb_root *root, struct rb_node *from,
+	struct rb_node **parent)
 {
-	struct rb_node **p = &vmap_area_root.rb_node;
-	struct rb_node *parent = NULL;
-	struct rb_node *tmp;
+	struct vmap_area *tmp_va;
+	struct rb_node **link;
 
-	while (*p) {
-		struct vmap_area *tmp_va;
-
-		parent = *p;
-		tmp_va = rb_entry(parent, struct vmap_area, rb_node);
-		if (va->va_start < tmp_va->va_end)
-			p = &(*p)->rb_left;
-		else if (va->va_end > tmp_va->va_start)
-			p = &(*p)->rb_right;
-		else
-			BUG();
+	if (root) {
+		link = &root->rb_node;
+		if (unlikely(!*link)) {
+			*parent = NULL;
+			return link;
+		}
+	} else {
+		link = &from;
 	}
 
-	rb_link_node(&va->rb_node, parent, p);
-	rb_insert_color(&va->rb_node, &vmap_area_root);
+	/*
+	 * Go to the bottom of the tree. When we hit the last point
+	 * we end up with parent rb_node and correct direction, i name
+	 * it link, where the new va->rb_node will be attached to.
+	 */
+	do {
+		tmp_va = rb_entry(*link, struct vmap_area, rb_node);
 
-	/* address-sort this list */
-	tmp = rb_prev(&va->rb_node);
-	if (tmp) {
-		struct vmap_area *prev;
-		prev = rb_entry(tmp, struct vmap_area, rb_node);
-		list_add_rcu(&va->list, &prev->list);
-	} else
-		list_add_rcu(&va->list, &vmap_area_list);
+		/*
+		 * During the traversal we also do some sanity check.
+		 * Trigger the BUG() if there are sides(left/right)
+		 * or full overlaps.
+		 */
+		if (va->va_start < tmp_va->va_end &&
+				va->va_end <= tmp_va->va_start)
+			link = &(*link)->rb_left;
+		else if (va->va_end > tmp_va->va_start &&
+				va->va_start >= tmp_va->va_end)
+			link = &(*link)->rb_right;
+		else
+			BUG();
+	} while (*link);
+
+	*parent = &tmp_va->rb_node;
+	return link;
 }
 
-static void purge_vmap_area_lazy(void);
+static __always_inline struct list_head *
+get_va_next_sibling(struct rb_node *parent, struct rb_node **link)
+{
+	struct list_head *list;
 
-static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
+	if (unlikely(!parent))
+		/*
+		 * The red-black tree where we try to find VA neighbors
+		 * before merging or inserting is empty, i.e. it means
+		 * there is no free vmap space. Normally it does not
+		 * happen but we handle this case anyway.
+		 */
+		return NULL;
+
+	list = &rb_entry(parent, struct vmap_area, rb_node)->list;
+	return (&parent->rb_right == link ? list->next : list);
+}
+
+static __always_inline void
+link_va(struct vmap_area *va, struct rb_root *root,
+	struct rb_node *parent, struct rb_node **link, struct list_head *head)
+{
+	/*
+	 * VA is still not in the list, but we can
+	 * identify its future previous list_head node.
+	 */
+	if (likely(parent)) {
+		head = &rb_entry(parent, struct vmap_area, rb_node)->list;
+		if (&parent->rb_right != link)
+			head = head->prev;
+	}
+
+	/* Insert to the rb-tree */
+	rb_link_node(&va->rb_node, parent, link);
+	if (root == &free_vmap_area_root) {
+		/*
+		 * Some explanation here. Just perform simple insertion
+		 * to the tree. We do not set va->subtree_max_size to
+		 * its current size before calling rb_insert_augmented().
+		 * It is because of we populate the tree from the bottom
+		 * to parent levels when the node _is_ in the tree.
+		 *
+		 * Therefore we set subtree_max_size to zero after insertion,
+		 * to let __augment_tree_propagate_from() puts everything to
+		 * the correct order later on.
+		 */
+		rb_insert_augmented(&va->rb_node,
+			root, &free_vmap_area_rb_augment_cb);
+		va->subtree_max_size = 0;
+	} else {
+		rb_insert_color(&va->rb_node, root);
+	}
+
+	/* Address-sort this list */
+	list_add(&va->list, head);
+}
+
+static __always_inline void
+unlink_va(struct vmap_area *va, struct rb_root *root)
+{
+	/*
+	 * During merging a VA node can be empty, therefore
+	 * not linked with the tree nor list. Just check it.
+	 */
+	if (!RB_EMPTY_NODE(&va->rb_node)) {
+		if (root == &free_vmap_area_root)
+			rb_erase_augmented(&va->rb_node,
+				root, &free_vmap_area_rb_augment_cb);
+		else
+			rb_erase(&va->rb_node, root);
+
+		list_del(&va->list);
+		RB_CLEAR_NODE(&va->rb_node);
+	}
+}
+
+#if DEBUG_AUGMENT_PROPAGATE_CHECK
+static void
+augment_tree_propagate_check(struct rb_node *n)
+{
+	struct vmap_area *va;
+	struct rb_node *node;
+	unsigned long size;
+	bool found = false;
+
+	if (n == NULL)
+		return;
+
+	va = rb_entry(n, struct vmap_area, rb_node);
+	size = va->subtree_max_size;
+	node = n;
+
+	while (node) {
+		va = rb_entry(node, struct vmap_area, rb_node);
+
+		if (get_subtree_max_size(node->rb_left) == size) {
+			node = node->rb_left;
+		} else {
+			if (va_size(va) == size) {
+				found = true;
+				break;
+			}
+
+			node = node->rb_right;
+		}
+	}
+
+	if (!found) {
+		va = rb_entry(n, struct vmap_area, rb_node);
+		pr_emerg("tree is corrupted: %lu, %lu\n",
+			va_size(va), va->subtree_max_size);
+	}
+
+	augment_tree_propagate_check(n->rb_left);
+	augment_tree_propagate_check(n->rb_right);
+}
+#endif
+
+/*
+ * This function populates subtree_max_size from bottom to upper
+ * levels starting from VA point. The propagation must be done
+ * when VA size is modified by changing its va_start/va_end. Or
+ * in case of newly inserting of VA to the tree.
+ *
+ * It means that __augment_tree_propagate_from() must be called:
+ * - After VA has been inserted to the tree(free path);
+ * - After VA has been shrunk(allocation path);
+ * - After VA has been increased(merging path).
+ *
+ * Please note that, it does not mean that upper parent nodes
+ * and their subtree_max_size are recalculated all the time up
+ * to the root node.
+ *
+ *       4--8
+ *        /\
+ *       /  \
+ *      /    \
+ *    2--2  8--8
+ *
+ * For example if we modify the node 4, shrinking it to 2, then
+ * no any modification is required. If we shrink the node 2 to 1
+ * its subtree_max_size is updated only, and set to 1. If we shrink
+ * the node 8 to 6, then its subtree_max_size is set to 6 and parent
+ * node becomes 4--6.
+ */
+static __always_inline void
+augment_tree_propagate_from(struct vmap_area *va)
+{
+	struct rb_node *node = &va->rb_node;
+	unsigned long new_va_sub_max_size;
+
+	while (node) {
+		va = rb_entry(node, struct vmap_area, rb_node);
+		new_va_sub_max_size = compute_subtree_max_size(va);
+
+		/*
+		 * If the newly calculated maximum available size of the
+		 * subtree is equal to the current one, then it means that
+		 * the tree is propagated correctly. So we have to stop at
+		 * this point to save cycles.
+		 */
+		if (va->subtree_max_size == new_va_sub_max_size)
+			break;
+
+		va->subtree_max_size = new_va_sub_max_size;
+		node = rb_parent(&va->rb_node);
+	}
+
+#if DEBUG_AUGMENT_PROPAGATE_CHECK
+	augment_tree_propagate_check(free_vmap_area_root.rb_node);
+#endif
+}
+
+static void
+insert_vmap_area(struct vmap_area *va,
+	struct rb_root *root, struct list_head *head)
+{
+	struct rb_node **link;
+	struct rb_node *parent;
+
+	link = find_va_links(va, root, NULL, &parent);
+	link_va(va, root, parent, link, head);
+}
+
+static void
+insert_vmap_area_augment(struct vmap_area *va,
+	struct rb_node *from, struct rb_root *root,
+	struct list_head *head)
+{
+	struct rb_node **link;
+	struct rb_node *parent;
+
+	if (from)
+		link = find_va_links(va, NULL, from, &parent);
+	else
+		link = find_va_links(va, root, NULL, &parent);
+
+	link_va(va, root, parent, link, head);
+	augment_tree_propagate_from(va);
+}
+
+/*
+ * Merge de-allocated chunk of VA memory with previous
+ * and next free blocks. If coalesce is not done a new
+ * free area is inserted. If VA has been merged, it is
+ * freed.
+ */
+static __always_inline void
+merge_or_add_vmap_area(struct vmap_area *va,
+	struct rb_root *root, struct list_head *head)
+{
+	struct vmap_area *sibling;
+	struct list_head *next;
+	struct rb_node **link;
+	struct rb_node *parent;
+	bool merged = false;
+
+	/*
+	 * Find a place in the tree where VA potentially will be
+	 * inserted, unless it is merged with its sibling/siblings.
+	 */
+	link = find_va_links(va, root, NULL, &parent);
+
+	/*
+	 * Get next node of VA to check if merging can be done.
+	 */
+	next = get_va_next_sibling(parent, link);
+	if (unlikely(next == NULL))
+		goto insert;
+
+	/*
+	 * start            end
+	 * |                |
+	 * |<------VA------>|<-----Next----->|
+	 *                  |                |
+	 *                  start            end
+	 */
+	if (next != head) {
+		sibling = list_entry(next, struct vmap_area, list);
+		if (sibling->va_start == va->va_end) {
+			sibling->va_start = va->va_start;
+
+			/* Check and update the tree if needed. */
+			augment_tree_propagate_from(sibling);
+
+			/* Remove this VA, it has been merged. */
+			unlink_va(va, root);
+
+			/* Free vmap_area object. */
+			kmem_cache_free(vmap_area_cachep, va);
+
+			/* Point to the new merged area. */
+			va = sibling;
+			merged = true;
+		}
+	}
+
+	/*
+	 * start            end
+	 * |                |
+	 * |<-----Prev----->|<------VA------>|
+	 *                  |                |
+	 *                  start            end
+	 */
+	if (next->prev != head) {
+		sibling = list_entry(next->prev, struct vmap_area, list);
+		if (sibling->va_end == va->va_start) {
+			sibling->va_end = va->va_end;
+
+			/* Check and update the tree if needed. */
+			augment_tree_propagate_from(sibling);
+
+			/* Remove this VA, it has been merged. */
+			unlink_va(va, root);
+
+			/* Free vmap_area object. */
+			kmem_cache_free(vmap_area_cachep, va);
+
+			return;
+		}
+	}
+
+insert:
+	if (!merged) {
+		link_va(va, root, parent, link, head);
+		augment_tree_propagate_from(va);
+	}
+}
+
+static __always_inline bool
+is_within_this_va(struct vmap_area *va, unsigned long size,
+	unsigned long align, unsigned long vstart)
+{
+	unsigned long nva_start_addr;
+
+	if (va->va_start > vstart)
+		nva_start_addr = ALIGN(va->va_start, align);
+	else
+		nva_start_addr = ALIGN(vstart, align);
+
+	/* Can be overflowed due to big size or alignment. */
+	if (nva_start_addr + size < nva_start_addr ||
+			nva_start_addr < vstart)
+		return false;
+
+	return (nva_start_addr + size <= va->va_end);
+}
+
+/*
+ * Find the first free block(lowest start address) in the tree,
+ * that will accomplish the request corresponding to passing
+ * parameters.
+ */
+static __always_inline struct vmap_area *
+find_vmap_lowest_match(unsigned long size,
+	unsigned long align, unsigned long vstart)
+{
+	struct vmap_area *va;
+	struct rb_node *node;
+	unsigned long length;
+
+	/* Start from the root. */
+	node = free_vmap_area_root.rb_node;
+
+	/* Adjust the search size for alignment overhead. */
+	length = size + align - 1;
+
+	while (node) {
+		va = rb_entry(node, struct vmap_area, rb_node);
+
+		if (get_subtree_max_size(node->rb_left) >= length &&
+				vstart < va->va_start) {
+			node = node->rb_left;
+		} else {
+			if (is_within_this_va(va, size, align, vstart))
+				return va;
+
+			/*
+			 * Does not make sense to go deeper towards the right
+			 * sub-tree if it does not have a free block that is
+			 * equal or bigger to the requested search length.
+			 */
+			if (get_subtree_max_size(node->rb_right) >= length) {
+				node = node->rb_right;
+				continue;
+			}
+
+			/*
+			 * OK. We roll back and find the fist right sub-tree,
+			 * that will satisfy the search criteria. It can happen
+			 * only once due to "vstart" restriction.
+			 */
+			while ((node = rb_parent(node))) {
+				va = rb_entry(node, struct vmap_area, rb_node);
+				if (is_within_this_va(va, size, align, vstart))
+					return va;
+
+				if (get_subtree_max_size(node->rb_right) >= length &&
+						vstart <= va->va_start) {
+					node = node->rb_right;
+					break;
+				}
+			}
+		}
+	}
+
+	return NULL;
+}
+
+#if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
+#include <linux/random.h>
+
+static struct vmap_area *
+find_vmap_lowest_linear_match(unsigned long size,
+	unsigned long align, unsigned long vstart)
+{
+	struct vmap_area *va;
+
+	list_for_each_entry(va, &free_vmap_area_list, list) {
+		if (!is_within_this_va(va, size, align, vstart))
+			continue;
+
+		return va;
+	}
+
+	return NULL;
+}
+
+static void
+find_vmap_lowest_match_check(unsigned long size)
+{
+	struct vmap_area *va_1, *va_2;
+	unsigned long vstart;
+	unsigned int rnd;
+
+	get_random_bytes(&rnd, sizeof(rnd));
+	vstart = VMALLOC_START + rnd;
+
+	va_1 = find_vmap_lowest_match(size, 1, vstart);
+	va_2 = find_vmap_lowest_linear_match(size, 1, vstart);
+
+	if (va_1 != va_2)
+		pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
+			va_1, va_2, vstart);
+}
+#endif
+
+enum fit_type {
+	NOTHING_FIT = 0,
+	FL_FIT_TYPE = 1,	/* full fit */
+	LE_FIT_TYPE = 2,	/* left edge fit */
+	RE_FIT_TYPE = 3,	/* right edge fit */
+	NE_FIT_TYPE = 4		/* no edge fit */
+};
+
+static __always_inline enum fit_type
+classify_va_fit_type(struct vmap_area *va,
+	unsigned long nva_start_addr, unsigned long size)
+{
+	enum fit_type type;
+
+	/* Check if it is within VA. */
+	if (nva_start_addr < va->va_start ||
+			nva_start_addr + size > va->va_end)
+		return NOTHING_FIT;
+
+	/* Now classify. */
+	if (va->va_start == nva_start_addr) {
+		if (va->va_end == nva_start_addr + size)
+			type = FL_FIT_TYPE;
+		else
+			type = LE_FIT_TYPE;
+	} else if (va->va_end == nva_start_addr + size) {
+		type = RE_FIT_TYPE;
+	} else {
+		type = NE_FIT_TYPE;
+	}
+
+	return type;
+}
+
+static __always_inline int
+adjust_va_to_fit_type(struct vmap_area *va,
+	unsigned long nva_start_addr, unsigned long size,
+	enum fit_type type)
+{
+	struct vmap_area *lva;
+
+	if (type == FL_FIT_TYPE) {
+		/*
+		 * No need to split VA, it fully fits.
+		 *
+		 * |               |
+		 * V      NVA      V
+		 * |---------------|
+		 */
+		unlink_va(va, &free_vmap_area_root);
+		kmem_cache_free(vmap_area_cachep, va);
+	} else if (type == LE_FIT_TYPE) {
+		/*
+		 * Split left edge of fit VA.
+		 *
+		 * |       |
+		 * V  NVA  V   R
+		 * |-------|-------|
+		 */
+		va->va_start += size;
+	} else if (type == RE_FIT_TYPE) {
+		/*
+		 * Split right edge of fit VA.
+		 *
+		 *         |       |
+		 *     L   V  NVA  V
+		 * |-------|-------|
+		 */
+		va->va_end = nva_start_addr;
+	} else if (type == NE_FIT_TYPE) {
+		/*
+		 * Split no edge of fit VA.
+		 *
+		 *     |       |
+		 *   L V  NVA  V R
+		 * |---|-------|---|
+		 */
+		lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
+		if (unlikely(!lva))
+			return -1;
+
+		/*
+		 * Build the remainder.
+		 */
+		lva->va_start = va->va_start;
+		lva->va_end = nva_start_addr;
+
+		/*
+		 * Shrink this VA to remaining size.
+		 */
+		va->va_start = nva_start_addr + size;
+	} else {
+		return -1;
+	}
+
+	if (type != FL_FIT_TYPE) {
+		augment_tree_propagate_from(va);
+
+		if (type == NE_FIT_TYPE)
+			insert_vmap_area_augment(lva, &va->rb_node,
+				&free_vmap_area_root, &free_vmap_area_list);
+	}
+
+	return 0;
+}
+
+/*
+ * Returns a start address of the newly allocated area, if success.
+ * Otherwise a vend is returned that indicates failure.
+ */
+static __always_inline unsigned long
+__alloc_vmap_area(unsigned long size, unsigned long align,
+	unsigned long vstart, unsigned long vend, int node)
+{
+	unsigned long nva_start_addr;
+	struct vmap_area *va;
+	enum fit_type type;
+	int ret;
+
+	va = find_vmap_lowest_match(size, align, vstart);
+	if (unlikely(!va))
+		return vend;
+
+	if (va->va_start > vstart)
+		nva_start_addr = ALIGN(va->va_start, align);
+	else
+		nva_start_addr = ALIGN(vstart, align);
+
+	/* Check the "vend" restriction. */
+	if (nva_start_addr + size > vend)
+		return vend;
+
+	/* Classify what we have found. */
+	type = classify_va_fit_type(va, nva_start_addr, size);
+	if (WARN_ON_ONCE(type == NOTHING_FIT))
+		return vend;
+
+	/* Update the free vmap_area. */
+	ret = adjust_va_to_fit_type(va, nva_start_addr, size, type);
+	if (ret)
+		return vend;
+
+#if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
+	find_vmap_lowest_match_check(size);
+#endif
+
+	return nva_start_addr;
+}
 
 /*
  * Allocate a region of KVA of the specified size and alignment, within the
@@ -412,18 +1038,19 @@
 				int node, gfp_t gfp_mask)
 {
 	struct vmap_area *va;
-	struct rb_node *n;
 	unsigned long addr;
 	int purged = 0;
-	struct vmap_area *first;
 
 	BUG_ON(!size);
 	BUG_ON(offset_in_page(size));
 	BUG_ON(!is_power_of_2(align));
 
+	if (unlikely(!vmap_initialized))
+		return ERR_PTR(-EBUSY);
+
 	might_sleep();
 
-	va = kmalloc_node(sizeof(struct vmap_area),
+	va = kmem_cache_alloc_node(vmap_area_cachep,
 			gfp_mask & GFP_RECLAIM_MASK, node);
 	if (unlikely(!va))
 		return ERR_PTR(-ENOMEM);
@@ -436,87 +1063,20 @@
 
 retry:
 	spin_lock(&vmap_area_lock);
+
 	/*
-	 * Invalidate cache if we have more permissive parameters.
-	 * cached_hole_size notes the largest hole noticed _below_
-	 * the vmap_area cached in free_vmap_cache: if size fits
-	 * into that hole, we want to scan from vstart to reuse
-	 * the hole instead of allocating above free_vmap_cache.
-	 * Note that __free_vmap_area may update free_vmap_cache
-	 * without updating cached_hole_size or cached_align.
+	 * If an allocation fails, the "vend" address is
+	 * returned. Therefore trigger the overflow path.
 	 */
-	if (!free_vmap_cache ||
-			size < cached_hole_size ||
-			vstart < cached_vstart ||
-			align < cached_align) {
-nocache:
-		cached_hole_size = 0;
-		free_vmap_cache = NULL;
-	}
-	/* record if we encounter less permissive parameters */
-	cached_vstart = vstart;
-	cached_align = align;
-
-	/* find starting point for our search */
-	if (free_vmap_cache) {
-		first = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
-		addr = ALIGN(first->va_end, align);
-		if (addr < vstart)
-			goto nocache;
-		if (addr + size < addr)
-			goto overflow;
-
-	} else {
-		addr = ALIGN(vstart, align);
-		if (addr + size < addr)
-			goto overflow;
-
-		n = vmap_area_root.rb_node;
-		first = NULL;
-
-		while (n) {
-			struct vmap_area *tmp;
-			tmp = rb_entry(n, struct vmap_area, rb_node);
-			if (tmp->va_end >= addr) {
-				first = tmp;
-				if (tmp->va_start <= addr)
-					break;
-				n = n->rb_left;
-			} else
-				n = n->rb_right;
-		}
-
-		if (!first)
-			goto found;
-	}
-
-	/* from the starting point, walk areas until a suitable hole is found */
-	while (addr + size > first->va_start && addr + size <= vend) {
-		if (addr + cached_hole_size < first->va_start)
-			cached_hole_size = first->va_start - addr;
-		addr = ALIGN(first->va_end, align);
-		if (addr + size < addr)
-			goto overflow;
-
-		if (list_is_last(&first->list, &vmap_area_list))
-			goto found;
-
-		first = list_next_entry(first, list);
-	}
-
-found:
-	/*
-	 * Check also calculated address against the vstart,
-	 * because it can be 0 because of big align request.
-	 */
-	if (addr + size > vend || addr < vstart)
+	addr = __alloc_vmap_area(size, align, vstart, vend, node);
+	if (unlikely(addr == vend))
 		goto overflow;
 
 	va->va_start = addr;
 	va->va_end = addr + size;
 	va->flags = 0;
-	__insert_vmap_area(va);
-	free_vmap_cache = &va->rb_node;
+	insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
+
 	spin_unlock(&vmap_area_lock);
 
 	BUG_ON(!IS_ALIGNED(va->va_start, align));
@@ -545,7 +1105,8 @@
 	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
 		pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
 			size);
-	kfree(va);
+
+	kmem_cache_free(vmap_area_cachep, va);
 	return ERR_PTR(-EBUSY);
 }
 
@@ -565,35 +1126,16 @@
 {
 	BUG_ON(RB_EMPTY_NODE(&va->rb_node));
 
-	if (free_vmap_cache) {
-		if (va->va_end < cached_vstart) {
-			free_vmap_cache = NULL;
-		} else {
-			struct vmap_area *cache;
-			cache = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
-			if (va->va_start <= cache->va_start) {
-				free_vmap_cache = rb_prev(&va->rb_node);
-				/*
-				 * We don't try to update cached_hole_size or
-				 * cached_align, but it won't go very wrong.
-				 */
-			}
-		}
-	}
-	rb_erase(&va->rb_node, &vmap_area_root);
-	RB_CLEAR_NODE(&va->rb_node);
-	list_del_rcu(&va->list);
+	/*
+	 * Remove from the busy tree/list.
+	 */
+	unlink_va(va, &vmap_area_root);
 
 	/*
-	 * Track the highest possible candidate for pcpu area
-	 * allocation.  Areas outside of vmalloc area can be returned
-	 * here too, consider only end addresses which fall inside
-	 * vmalloc area proper.
+	 * Merge VA with its neighbors, otherwise just add it.
 	 */
-	if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END)
-		vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end);
-
-	kfree_rcu(va, rcu_head);
+	merge_or_add_vmap_area(va,
+		&free_vmap_area_root, &free_vmap_area_list);
 }
 
 /*
@@ -639,7 +1181,7 @@
 	return log * (32UL * 1024 * 1024 / PAGE_SIZE);
 }
 
-static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
+static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0);
 
 /*
  * Serialize vmap purging.  There is no actual criticial section protected
@@ -657,7 +1199,7 @@
  */
 void set_iounmap_nonlazy(void)
 {
-	atomic_set(&vmap_lazy_nr, lazy_max_pages()+1);
+	atomic_long_set(&vmap_lazy_nr, lazy_max_pages()+1);
 }
 
 /*
@@ -665,34 +1207,40 @@
  */
 static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
 {
+	unsigned long resched_threshold;
 	struct llist_node *valist;
 	struct vmap_area *va;
 	struct vmap_area *n_va;
-	bool do_free = false;
 
 	lockdep_assert_held(&vmap_purge_lock);
 
 	valist = llist_del_all(&vmap_purge_list);
+	if (unlikely(valist == NULL))
+		return false;
+
+	/*
+	 * TODO: to calculate a flush range without looping.
+	 * The list can be up to lazy_max_pages() elements.
+	 */
 	llist_for_each_entry(va, valist, purge_list) {
 		if (va->va_start < start)
 			start = va->va_start;
 		if (va->va_end > end)
 			end = va->va_end;
-		do_free = true;
 	}
 
-	if (!do_free)
-		return false;
-
 	flush_tlb_kernel_range(start, end);
+	resched_threshold = lazy_max_pages() << 1;
 
 	spin_lock(&vmap_area_lock);
 	llist_for_each_entry_safe(va, n_va, valist, purge_list) {
-		int nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
+		unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
 
 		__free_vmap_area(va);
-		atomic_sub(nr, &vmap_lazy_nr);
-		cond_resched_lock(&vmap_area_lock);
+		atomic_long_sub(nr, &vmap_lazy_nr);
+
+		if (atomic_long_read(&vmap_lazy_nr) < resched_threshold)
+			cond_resched_lock(&vmap_area_lock);
 	}
 	spin_unlock(&vmap_area_lock);
 	return true;
@@ -728,10 +1276,10 @@
  */
 static void free_vmap_area_noflush(struct vmap_area *va)
 {
-	int nr_lazy;
+	unsigned long nr_lazy;
 
-	nr_lazy = atomic_add_return((va->va_end - va->va_start) >> PAGE_SHIFT,
-				    &vmap_lazy_nr);
+	nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
+				PAGE_SHIFT, &vmap_lazy_nr);
 
 	/* After this point, we may free va at any time */
 	llist_add(&va->purge_list, &vmap_purge_list);
@@ -794,8 +1342,6 @@
 
 #define VMAP_BLOCK_SIZE		(VMAP_BBMAP_BITS * PAGE_SIZE)
 
-static bool vmap_initialized __read_mostly = false;
-
 struct vmap_block_queue {
 	spinlock_t lock;
 	struct list_head free;
@@ -1249,12 +1795,58 @@
 	vm_area_add_early(vm);
 }
 
+static void vmap_init_free_space(void)
+{
+	unsigned long vmap_start = 1;
+	const unsigned long vmap_end = ULONG_MAX;
+	struct vmap_area *busy, *free;
+
+	/*
+	 *     B     F     B     B     B     F
+	 * -|-----|.....|-----|-----|-----|.....|-
+	 *  |           The KVA space           |
+	 *  |<--------------------------------->|
+	 */
+	list_for_each_entry(busy, &vmap_area_list, list) {
+		if (busy->va_start - vmap_start > 0) {
+			free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
+			if (!WARN_ON_ONCE(!free)) {
+				free->va_start = vmap_start;
+				free->va_end = busy->va_start;
+
+				insert_vmap_area_augment(free, NULL,
+					&free_vmap_area_root,
+						&free_vmap_area_list);
+			}
+		}
+
+		vmap_start = busy->va_end;
+	}
+
+	if (vmap_end - vmap_start > 0) {
+		free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
+		if (!WARN_ON_ONCE(!free)) {
+			free->va_start = vmap_start;
+			free->va_end = vmap_end;
+
+			insert_vmap_area_augment(free, NULL,
+				&free_vmap_area_root,
+					&free_vmap_area_list);
+		}
+	}
+}
+
 void __init vmalloc_init(void)
 {
 	struct vmap_area *va;
 	struct vm_struct *tmp;
 	int i;
 
+	/*
+	 * Create the cache for vmap_area objects.
+	 */
+	vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
+
 	for_each_possible_cpu(i) {
 		struct vmap_block_queue *vbq;
 		struct vfree_deferred *p;
@@ -1269,16 +1861,21 @@
 
 	/* Import existing vmlist entries. */
 	for (tmp = vmlist; tmp; tmp = tmp->next) {
-		va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT);
+		va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
+		if (WARN_ON_ONCE(!va))
+			continue;
+
 		va->flags = VM_VM_AREA;
 		va->va_start = (unsigned long)tmp->addr;
 		va->va_end = va->va_start + tmp->size;
 		va->vm = tmp;
-		__insert_vmap_area(va);
+		insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
 	}
 
-	vmap_area_pcpu_hole = VMALLOC_END;
-
+	/*
+	 * Now we can initialize a free vmap space.
+	 */
+	vmap_init_free_space();
 	vmap_initialized = true;
 }
 
@@ -2381,81 +2978,64 @@
 }
 
 /**
- * pvm_find_next_prev - find the next and prev vmap_area surrounding @end
- * @end: target address
- * @pnext: out arg for the next vmap_area
- * @pprev: out arg for the previous vmap_area
+ * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
+ * @addr: target address
  *
- * Returns: %true if either or both of next and prev are found,
- *	    %false if no vmap_area exists
- *
- * Find vmap_areas end addresses of which enclose @end.  ie. if not
- * NULL, *pnext->va_end > @end and *pprev->va_end <= @end.
+ * Returns: vmap_area if it is found. If there is no such area
+ *   the first highest(reverse order) vmap_area is returned
+ *   i.e. va->va_start < addr && va->va_end < addr or NULL
+ *   if there are no any areas before @addr.
  */
-static bool pvm_find_next_prev(unsigned long end,
-			       struct vmap_area **pnext,
-			       struct vmap_area **pprev)
+static struct vmap_area *
+pvm_find_va_enclose_addr(unsigned long addr)
 {
-	struct rb_node *n = vmap_area_root.rb_node;
-	struct vmap_area *va = NULL;
+	struct vmap_area *va, *tmp;
+	struct rb_node *n;
+
+	n = free_vmap_area_root.rb_node;
+	va = NULL;
 
 	while (n) {
-		va = rb_entry(n, struct vmap_area, rb_node);
-		if (end < va->va_end)
-			n = n->rb_left;
-		else if (end > va->va_end)
+		tmp = rb_entry(n, struct vmap_area, rb_node);
+		if (tmp->va_start <= addr) {
+			va = tmp;
+			if (tmp->va_end >= addr)
+				break;
+
 			n = n->rb_right;
-		else
-			break;
+		} else {
+			n = n->rb_left;
+		}
 	}
 
-	if (!va)
-		return false;
-
-	if (va->va_end > end) {
-		*pnext = va;
-		*pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
-	} else {
-		*pprev = va;
-		*pnext = node_to_va(rb_next(&(*pprev)->rb_node));
-	}
-	return true;
+	return va;
 }
 
 /**
- * pvm_determine_end - find the highest aligned address between two vmap_areas
- * @pnext: in/out arg for the next vmap_area
- * @pprev: in/out arg for the previous vmap_area
- * @align: alignment
+ * pvm_determine_end_from_reverse - find the highest aligned address
+ * of free block below VMALLOC_END
+ * @va:
+ *   in - the VA we start the search(reverse order);
+ *   out - the VA with the highest aligned end address.
  *
- * Returns: determined end address
- *
- * Find the highest aligned address between *@pnext and *@pprev below
- * VMALLOC_END.  *@pnext and *@pprev are adjusted so that the aligned
- * down address is between the end addresses of the two vmap_areas.
- *
- * Please note that the address returned by this function may fall
- * inside *@pnext vmap_area.  The caller is responsible for checking
- * that.
+ * Returns: determined end address within vmap_area
  */
-static unsigned long pvm_determine_end(struct vmap_area **pnext,
-				       struct vmap_area **pprev,
-				       unsigned long align)
+static unsigned long
+pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
 {
-	const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
+	unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
 	unsigned long addr;
 
-	if (*pnext)
-		addr = min((*pnext)->va_start & ~(align - 1), vmalloc_end);
-	else
-		addr = vmalloc_end;
-
-	while (*pprev && (*pprev)->va_end > addr) {
-		*pnext = *pprev;
-		*pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
+	if (likely(*va)) {
+		list_for_each_entry_from_reverse((*va),
+				&free_vmap_area_list, list) {
+			addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
+			if ((*va)->va_start < addr)
+				return addr;
+		}
 	}
 
-	return addr;
+	return 0;
 }
 
 /**
@@ -2475,12 +3055,12 @@
  * to gigabytes.  To avoid interacting with regular vmallocs, these
  * areas are allocated from top.
  *
- * Despite its complicated look, this allocator is rather simple.  It
- * does everything top-down and scans areas from the end looking for
- * matching slot.  While scanning, if any of the areas overlaps with
- * existing vmap_area, the base address is pulled down to fit the
- * area.  Scanning is repeated till all the areas fit and then all
- * necessary data structures are inserted and the result is returned.
+ * Despite its complicated look, this allocator is rather simple. It
+ * does everything top-down and scans free blocks from the end looking
+ * for matching base. While scanning, if any of the areas do not fit the
+ * base address is pulled down to fit the area. Scanning is repeated till
+ * all the areas fit and then all necessary data structures are inserted
+ * and the result is returned.
  */
 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
 				     const size_t *sizes, int nr_vms,
@@ -2488,11 +3068,12 @@
 {
 	const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
 	const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
-	struct vmap_area **vas, *prev, *next;
+	struct vmap_area **vas, *va;
 	struct vm_struct **vms;
 	int area, area2, last_area, term_area;
-	unsigned long base, start, end, last_end;
+	unsigned long base, start, size, end, last_end;
 	bool purged = false;
+	enum fit_type type;
 
 	/* verify parameters and allocate data structures */
 	BUG_ON(offset_in_page(align) || !is_power_of_2(align));
@@ -2528,7 +3109,7 @@
 		goto err_free2;
 
 	for (area = 0; area < nr_vms; area++) {
-		vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL);
+		vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL);
 		vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
 		if (!vas[area] || !vms[area])
 			goto err_free;
@@ -2541,49 +3122,29 @@
 	start = offsets[area];
 	end = start + sizes[area];
 
-	if (!pvm_find_next_prev(vmap_area_pcpu_hole, &next, &prev)) {
-		base = vmalloc_end - last_end;
-		goto found;
-	}
-	base = pvm_determine_end(&next, &prev, align) - end;
+	va = pvm_find_va_enclose_addr(vmalloc_end);
+	base = pvm_determine_end_from_reverse(&va, align) - end;
 
 	while (true) {
-		BUG_ON(next && next->va_end <= base + end);
-		BUG_ON(prev && prev->va_end > base + end);
-
 		/*
 		 * base might have underflowed, add last_end before
 		 * comparing.
 		 */
-		if (base + last_end < vmalloc_start + last_end) {
-			spin_unlock(&vmap_area_lock);
-			if (!purged) {
-				purge_vmap_area_lazy();
-				purged = true;
-				goto retry;
-			}
-			goto err_free;
-		}
+		if (base + last_end < vmalloc_start + last_end)
+			goto overflow;
 
 		/*
-		 * If next overlaps, move base downwards so that it's
-		 * right below next and then recheck.
+		 * Fitting base has not been found.
 		 */
-		if (next && next->va_start < base + end) {
-			base = pvm_determine_end(&next, &prev, align) - end;
-			term_area = area;
-			continue;
-		}
+		if (va == NULL)
+			goto overflow;
 
 		/*
-		 * If prev overlaps, shift down next and prev and move
-		 * base so that it's right below new next and then
-		 * recheck.
+		 * If this VA does not fit, move base downwards and recheck.
 		 */
-		if (prev && prev->va_end > base + start)  {
-			next = prev;
-			prev = node_to_va(rb_prev(&next->rb_node));
-			base = pvm_determine_end(&next, &prev, align) - end;
+		if (base + start < va->va_start || base + end > va->va_end) {
+			va = node_to_va(rb_prev(&va->rb_node));
+			base = pvm_determine_end_from_reverse(&va, align) - end;
 			term_area = area;
 			continue;
 		}
@@ -2595,22 +3156,41 @@
 		area = (area + nr_vms - 1) % nr_vms;
 		if (area == term_area)
 			break;
+
 		start = offsets[area];
 		end = start + sizes[area];
-		pvm_find_next_prev(base + end, &next, &prev);
+		va = pvm_find_va_enclose_addr(base + end);
 	}
-found:
+
 	/* we've found a fitting base, insert all va's */
 	for (area = 0; area < nr_vms; area++) {
-		struct vmap_area *va = vas[area];
+		int ret;
 
-		va->va_start = base + offsets[area];
-		va->va_end = va->va_start + sizes[area];
-		__insert_vmap_area(va);
+		start = base + offsets[area];
+		size = sizes[area];
+
+		va = pvm_find_va_enclose_addr(start);
+		if (WARN_ON_ONCE(va == NULL))
+			/* It is a BUG(), but trigger recovery instead. */
+			goto recovery;
+
+		type = classify_va_fit_type(va, start, size);
+		if (WARN_ON_ONCE(type == NOTHING_FIT))
+			/* It is a BUG(), but trigger recovery instead. */
+			goto recovery;
+
+		ret = adjust_va_to_fit_type(va, start, size, type);
+		if (unlikely(ret))
+			goto recovery;
+
+		/* Allocated area. */
+		va = vas[area];
+		va->va_start = start;
+		va->va_end = start + size;
+
+		insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
 	}
 
-	vmap_area_pcpu_hole = base + offsets[last_area];
-
 	spin_unlock(&vmap_area_lock);
 
 	/* insert all vm's */
@@ -2621,9 +3201,38 @@
 	kfree(vas);
 	return vms;
 
+recovery:
+	/* Remove previously inserted areas. */
+	while (area--) {
+		__free_vmap_area(vas[area]);
+		vas[area] = NULL;
+	}
+
+overflow:
+	spin_unlock(&vmap_area_lock);
+	if (!purged) {
+		purge_vmap_area_lazy();
+		purged = true;
+
+		/* Before "retry", check if we recover. */
+		for (area = 0; area < nr_vms; area++) {
+			if (vas[area])
+				continue;
+
+			vas[area] = kmem_cache_zalloc(
+				vmap_area_cachep, GFP_KERNEL);
+			if (!vas[area])
+				goto err_free;
+		}
+
+		goto retry;
+	}
+
 err_free:
 	for (area = 0; area < nr_vms; area++) {
-		kfree(vas[area]);
+		if (vas[area])
+			kmem_cache_free(vmap_area_cachep, vas[area]);
+
 		kfree(vms[area]);
 	}
 err_free2:
diff --git a/security/pfe/pfk_ice.c b/security/pfe/pfk_ice.c
index 501ff67..cf08d8c 100644
--- a/security/pfe/pfk_ice.c
+++ b/security/pfe/pfk_ice.c
@@ -15,6 +15,7 @@
 #include <linux/device-mapper.h>
 #include <soc/qcom/scm.h>
 #include <soc/qcom/qseecomi.h>
+#include <soc/qcom/qtee_shmbridge.h>
 #include <crypto/ice.h>
 #include "pfk_ice.h"
 
@@ -52,8 +53,6 @@
 
 #define ICE_BUFFER_SIZE 64
 
-static uint8_t ice_buffer[ICE_BUFFER_SIZE];
-
 enum {
 	ICE_CIPHER_MODE_XTS_128 = 0,
 	ICE_CIPHER_MODE_CBC_128 = 1,
@@ -67,22 +66,26 @@
 	struct scm_desc desc = {0};
 	int ret = 0;
 	uint32_t smc_id = 0;
-	char *tzbuf = (char *)ice_buffer;
-	uint32_t size = ICE_BUFFER_SIZE / 2;
-
-	memset(tzbuf, 0, ICE_BUFFER_SIZE);
-
-	memcpy(ice_buffer, key, size);
-	memcpy(ice_buffer+size, salt, size);
-
-	dmac_flush_range(tzbuf, tzbuf + ICE_BUFFER_SIZE);
+	char *tzbuf = NULL;
+	uint32_t key_size = ICE_BUFFER_SIZE / 2;
+	struct qtee_shm shm;
 
 	smc_id = TZ_ES_CONFIG_SET_ICE_KEY_ID;
 
+	ret = qtee_shmbridge_allocate_shm(ICE_BUFFER_SIZE, &shm);
+	if (ret)
+		return -ENOMEM;
+
+	tzbuf = shm.vaddr;
+
+	memcpy(tzbuf, key, key_size);
+	memcpy(tzbuf+key_size, salt, key_size);
+	dmac_flush_range(tzbuf, tzbuf + ICE_BUFFER_SIZE);
+
 	desc.arginfo = TZ_ES_CONFIG_SET_ICE_KEY_PARAM_ID;
 	desc.args[0] = index;
-	desc.args[1] = virt_to_phys(tzbuf);
-	desc.args[2] = ICE_BUFFER_SIZE;
+	desc.args[1] = shm.paddr;
+	desc.args[2] = shm.size;
 	desc.args[3] = ICE_CIPHER_MODE_XTS_256;
 	desc.args[4] = data_unit;
 
@@ -90,6 +93,7 @@
 	if (ret)
 		pr_err("%s:SCM call Error: 0x%x\n", __func__, ret);
 
+	qtee_shmbridge_free_shm(&shm);
 	return ret;
 }
 
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index 635e5c1..2dff571 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -366,26 +366,27 @@
 	struct avc_xperms_decision_node *xpd_node;
 	struct extended_perms_decision *xpd;
 
-	xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep, GFP_NOWAIT);
+	xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep,
+			GFP_NOWAIT | __GFP_NOWARN);
 	if (!xpd_node)
 		return NULL;
 
 	xpd = &xpd_node->xpd;
 	if (which & XPERMS_ALLOWED) {
 		xpd->allowed = kmem_cache_zalloc(avc_xperms_data_cachep,
-						GFP_NOWAIT);
+						GFP_NOWAIT | __GFP_NOWARN);
 		if (!xpd->allowed)
 			goto error;
 	}
 	if (which & XPERMS_AUDITALLOW) {
 		xpd->auditallow = kmem_cache_zalloc(avc_xperms_data_cachep,
-						GFP_NOWAIT);
+						GFP_NOWAIT | __GFP_NOWARN);
 		if (!xpd->auditallow)
 			goto error;
 	}
 	if (which & XPERMS_DONTAUDIT) {
 		xpd->dontaudit = kmem_cache_zalloc(avc_xperms_data_cachep,
-						GFP_NOWAIT);
+						GFP_NOWAIT | __GFP_NOWARN);
 		if (!xpd->dontaudit)
 			goto error;
 	}
@@ -413,7 +414,8 @@
 {
 	struct avc_xperms_node *xp_node;
 
-	xp_node = kmem_cache_zalloc(avc_xperms_cachep, GFP_NOWAIT);
+	xp_node = kmem_cache_zalloc(avc_xperms_cachep,
+			GFP_NOWAIT | __GFP_NOWARN);
 	if (!xp_node)
 		return xp_node;
 	INIT_LIST_HEAD(&xp_node->xpd_head);
@@ -569,7 +571,7 @@
 {
 	struct avc_node *node;
 
-	node = kmem_cache_zalloc(avc_node_cachep, GFP_NOWAIT);
+	node = kmem_cache_zalloc(avc_node_cachep, GFP_NOWAIT | __GFP_NOWARN);
 	if (!node)
 		goto out;
 
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index db2a278..721716f 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -4231,7 +4231,8 @@
 		 * dynamic FE links have no fixed DAI mapping.
 		 * CODEC<->CODEC links have no direct connection.
 		 */
-		if (rtd->dai_link->dynamic || rtd->dai_link->params)
+		if (rtd->dai_link->dynamic || rtd->dai_link->params ||
+		    rtd->dai_link->dynamic_be)
 			continue;
 
 		dapm_connect_dai_link_widgets(card, rtd);
diff --git a/techpack/Kbuild b/techpack/Kbuild
index 9cf37f8..aef74e6 100644
--- a/techpack/Kbuild
+++ b/techpack/Kbuild
@@ -1,6 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0-only
+TECHPACK?=y
+
 techpack-dirs := $(shell find $(srctree)/$(src) -maxdepth 1 -mindepth 1 -type d -not -name ".*")
-obj-y += stub/ $(addsuffix /,$(subst $(srctree)/$(src)/,,$(techpack-dirs)))
+obj-${TECHPACK} += stub/ $(addsuffix /,$(subst $(srctree)/$(src)/,,$(techpack-dirs)))
 
 techpack-header-dirs := $(shell find $(srctree)/techpack -maxdepth 1 -mindepth 1 -type d -not -name ".*")
-header-y += $(addsuffix /include/uapi/,$(subst $(srctree)/techpack/,,$(techpack-header-dirs)))
+header-${TECHPACK} += $(addsuffix /include/uapi/,$(subst $(srctree)/techpack/,,$(techpack-header-dirs)))