Merge "dt-bindings: clk: rpmh: Add support for QLINK clock IDs"
diff --git a/arch/arm/configs/vendor/bengal-perf_defconfig b/arch/arm/configs/vendor/bengal-perf_defconfig
index 2dacf3f..9cb1803 100644
--- a/arch/arm/configs/vendor/bengal-perf_defconfig
+++ b/arch/arm/configs/vendor/bengal-perf_defconfig
@@ -52,7 +52,6 @@
 CONFIG_ARM_PSCI=y
 CONFIG_HIGHMEM=y
 CONFIG_SECCOMP=y
-CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
 CONFIG_CPU_FREQ_TIMES=y
 CONFIG_CPU_FREQ_GOV_POWERSAVE=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
diff --git a/arch/arm/configs/vendor/bengal_defconfig b/arch/arm/configs/vendor/bengal_defconfig
index 46918bd6..c588b8d 100644
--- a/arch/arm/configs/vendor/bengal_defconfig
+++ b/arch/arm/configs/vendor/bengal_defconfig
@@ -55,7 +55,6 @@
 CONFIG_ARM_PSCI=y
 CONFIG_HIGHMEM=y
 CONFIG_SECCOMP=y
-CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
 CONFIG_EFI=y
 CONFIG_CPU_FREQ_TIMES=y
 CONFIG_CPU_FREQ_GOV_POWERSAVE=y
diff --git a/arch/arm64/configs/vendor/bengal-perf_defconfig b/arch/arm64/configs/vendor/bengal-perf_defconfig
index 2358115..e9c3a94 100644
--- a/arch/arm64/configs/vendor/bengal-perf_defconfig
+++ b/arch/arm64/configs/vendor/bengal-perf_defconfig
@@ -18,6 +18,8 @@
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
 CONFIG_BLK_CGROUP=y
 CONFIG_RT_GROUP_SCHED=y
 CONFIG_CGROUP_FREEZER=y
@@ -632,7 +634,7 @@
 CONFIG_PAGE_OWNER=y
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
-CONFIG_PANIC_TIMEOUT=-1
+CONFIG_PANIC_TIMEOUT=5
 CONFIG_SCHEDSTATS=y
 # CONFIG_DEBUG_PREEMPT is not set
 CONFIG_IPC_LOGGING=y
diff --git a/arch/arm64/configs/vendor/bengal_defconfig b/arch/arm64/configs/vendor/bengal_defconfig
index f90e430..649781ca 100644
--- a/arch/arm64/configs/vendor/bengal_defconfig
+++ b/arch/arm64/configs/vendor/bengal_defconfig
@@ -17,6 +17,8 @@
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
 CONFIG_BLK_CGROUP=y
 CONFIG_DEBUG_BLK_CGROUP=y
 CONFIG_RT_GROUP_SCHED=y
@@ -675,7 +677,7 @@
 CONFIG_DEBUG_MEMORY_INIT=y
 CONFIG_SOFTLOCKUP_DETECTOR=y
 CONFIG_WQ_WATCHDOG=y
-CONFIG_PANIC_TIMEOUT=-1
+CONFIG_PANIC_TIMEOUT=5
 CONFIG_PANIC_ON_SCHED_BUG=y
 CONFIG_PANIC_ON_RT_THROTTLING=y
 CONFIG_SCHEDSTATS=y
diff --git a/arch/arm64/configs/vendor/kona-perf_defconfig b/arch/arm64/configs/vendor/kona-perf_defconfig
index d7d763d..6f33015 100644
--- a/arch/arm64/configs/vendor/kona-perf_defconfig
+++ b/arch/arm64/configs/vendor/kona-perf_defconfig
@@ -223,6 +223,8 @@
 CONFIG_IP6_NF_TARGET_REJECT=y
 CONFIG_IP6_NF_MANGLE=y
 CONFIG_IP6_NF_RAW=y
+CONFIG_IP6_NF_NAT=y
+CONFIG_IP6_NF_TARGET_MASQUERADE=y
 CONFIG_BRIDGE_NF_EBTABLES=y
 CONFIG_BRIDGE_EBT_BROUTE=y
 CONFIG_IP_SCTP=y
@@ -424,8 +426,7 @@
 CONFIG_MSM_GLOBAL_SYNX=y
 CONFIG_DVB_MPQ=m
 CONFIG_DVB_MPQ_DEMUX=m
-CONFIG_DVB_MPQ_TSPP1=y
-CONFIG_TSPP=m
+CONFIG_DVB_MPQ_SW=y
 CONFIG_VIDEO_V4L2_VIDEOBUF2_CORE=y
 CONFIG_I2C_RTC6226_QCA=y
 CONFIG_DRM=y
diff --git a/arch/arm64/configs/vendor/kona_defconfig b/arch/arm64/configs/vendor/kona_defconfig
index b3d2663..485594a 100644
--- a/arch/arm64/configs/vendor/kona_defconfig
+++ b/arch/arm64/configs/vendor/kona_defconfig
@@ -230,6 +230,8 @@
 CONFIG_IP6_NF_TARGET_REJECT=y
 CONFIG_IP6_NF_MANGLE=y
 CONFIG_IP6_NF_RAW=y
+CONFIG_IP6_NF_NAT=y
+CONFIG_IP6_NF_TARGET_MASQUERADE=y
 CONFIG_BRIDGE_NF_EBTABLES=y
 CONFIG_BRIDGE_EBT_BROUTE=y
 CONFIG_IP_SCTP=y
@@ -440,8 +442,7 @@
 CONFIG_MSM_GLOBAL_SYNX=y
 CONFIG_DVB_MPQ=m
 CONFIG_DVB_MPQ_DEMUX=m
-CONFIG_DVB_MPQ_TSPP1=y
-CONFIG_TSPP=m
+CONFIG_DVB_MPQ_SW=y
 CONFIG_VIDEO_V4L2_VIDEOBUF2_CORE=y
 CONFIG_I2C_RTC6226_QCA=y
 CONFIG_DRM=y
diff --git a/arch/arm64/configs/vendor/lito-perf_defconfig b/arch/arm64/configs/vendor/lito-perf_defconfig
index f6df776..43418e5 100644
--- a/arch/arm64/configs/vendor/lito-perf_defconfig
+++ b/arch/arm64/configs/vendor/lito-perf_defconfig
@@ -674,7 +674,7 @@
 CONFIG_PAGE_OWNER=y
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
-CONFIG_PANIC_TIMEOUT=-1
+CONFIG_PANIC_TIMEOUT=5
 CONFIG_SCHEDSTATS=y
 # CONFIG_DEBUG_PREEMPT is not set
 CONFIG_IPC_LOGGING=y
diff --git a/arch/arm64/configs/vendor/lito_defconfig b/arch/arm64/configs/vendor/lito_defconfig
index 8e025ea..9b4fe41 100644
--- a/arch/arm64/configs/vendor/lito_defconfig
+++ b/arch/arm64/configs/vendor/lito_defconfig
@@ -714,7 +714,7 @@
 CONFIG_DEBUG_MEMORY_INIT=y
 CONFIG_SOFTLOCKUP_DETECTOR=y
 CONFIG_WQ_WATCHDOG=y
-CONFIG_PANIC_TIMEOUT=-1
+CONFIG_PANIC_TIMEOUT=5
 CONFIG_PANIC_ON_SCHED_BUG=y
 CONFIG_PANIC_ON_RT_THROTTLING=y
 CONFIG_SCHEDSTATS=y
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 817a274..97369b1 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -1070,6 +1070,10 @@
 		}
 		trace_fastrpc_dma_map(fl->cid, fd, map->phys, map->size,
 			len, mflags, map->attach->dma_map_attrs);
+		if (map->size < len) {
+			err = -EFAULT;
+			goto bail;
+		}
 
 		vmid = fl->apps->channel[fl->cid].vmid;
 		if (!sess->smmu.enabled && !vmid) {
@@ -4114,6 +4118,9 @@
 			fl->ws_timeout = cp->pm.timeout;
 		fastrpc_pm_awake(fl);
 		break;
+	case FASTRPC_CONTROL_DSPPROCESS_CLEAN:
+		(void)fastrpc_release_current_dsp_process(fl);
+		break;
 	default:
 		err = -EBADRQC;
 		break;
diff --git a/drivers/char/adsprpc_shared.h b/drivers/char/adsprpc_shared.h
index bcc63c8..7501b1c 100644
--- a/drivers/char/adsprpc_shared.h
+++ b/drivers/char/adsprpc_shared.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
  */
 #ifndef ADSPRPC_SHARED_H
 #define ADSPRPC_SHARED_H
@@ -250,6 +250,8 @@
 	FASTRPC_CONTROL_KALLOC		=	3,
 	FASTRPC_CONTROL_WAKELOCK	=	4,
 	FASTRPC_CONTROL_PM		=	5,
+/* Clean process on DSP */
+	FASTRPC_CONTROL_DSPPROCESS_CLEAN	=	6,
 };
 
 struct fastrpc_ctrl_latency {
diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c
index 088e449..019e203 100644
--- a/drivers/char/diag/diag_masks.c
+++ b/drivers/char/diag/diag_masks.c
@@ -893,7 +893,7 @@
 		if (src_len < sizeof(struct diag_build_mask_req_sub_t))
 			goto fail;
 		req_sub = (struct diag_build_mask_req_sub_t *)src_buf;
-		rsp_sub.header.cmd_code = DIAG_CMD_MSG_CONFIG;
+		rsp_sub.header.cmd_code = req_sub->header.cmd_code;
 		rsp_sub.sub_cmd = DIAG_CMD_OP_GET_BUILD_MASK;
 		rsp_sub.ssid_first = req_sub->ssid_first;
 		rsp_sub.ssid_last = req_sub->ssid_last;
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index 9e72243..35e67e7 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -860,7 +860,7 @@
 DEFINE_CLK_SMD_RPM_XO_BUFFER(scuba, ln_bb_clk2, ln_bb_clk2_a, 0x2);
 DEFINE_CLK_SMD_RPM_XO_BUFFER(scuba, rf_clk3, rf_clk3_a, 6);
 
-DEFINE_CLK_SMD_RPM(scuba, qpic_clk, qpic_a_clk, RPM_SMD_QPIC_CLK, 0);
+DEFINE_CLK_SMD_RPM(scuba, qpic_clk, qpic_a_clk, QCOM_SMD_RPM_QPIC_CLK, 0);
 
 /* Scuba */
 static struct clk_hw *scuba_clks[] = {
@@ -950,7 +950,7 @@
 
 static const struct rpm_smd_clk_desc rpm_clk_scuba = {
 	.clks = scuba_clks,
-	.num_rpm_clks = RPM_SMD_QPIC_A_CLK,
+	.num_rpm_clks = RPM_SMD_CE1_A_CLK,
 	.num_clks = ARRAY_SIZE(scuba_clks),
 };
 
diff --git a/drivers/clk/qcom/debugcc-bengal.c b/drivers/clk/qcom/debugcc-bengal.c
index bf3a92f..2bb282b 100644
--- a/drivers/clk/qcom/debugcc-bengal.c
+++ b/drivers/clk/qcom/debugcc-bengal.c
@@ -165,7 +165,6 @@
 	"gcc_gpu_memnoc_gfx_clk",
 	"gcc_gpu_snoc_dvm_gfx_clk",
 	"gcc_gpu_throttle_core_clk",
-	"gcc_gpu_throttle_xo_clk",
 	"gcc_pdm2_clk",
 	"gcc_pdm_ahb_clk",
 	"gcc_pdm_xo4_clk",
@@ -270,7 +269,6 @@
 	0xE8,		/* gcc_gpu_memnoc_gfx_clk */
 	0xEA,		/* gcc_gpu_snoc_dvm_gfx_clk */
 	0xEF,		/* gcc_gpu_throttle_core_clk */
-	0xEE,		/* gcc_gpu_throttle_xo_clk */
 	0x73,		/* gcc_pdm2_clk */
 	0x71,		/* gcc_pdm_ahb_clk */
 	0x72,		/* gcc_pdm_xo4_clk */
diff --git a/drivers/devfreq/governor_msm_adreno_tz.c b/drivers/devfreq/governor_msm_adreno_tz.c
index 39a58ac..3cd103b 100644
--- a/drivers/devfreq/governor_msm_adreno_tz.c
+++ b/drivers/devfreq/governor_msm_adreno_tz.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2010-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2020, The Linux Foundation. All rights reserved.
  */
 #include <linux/errno.h>
 #include <linux/module.h>
@@ -470,11 +470,14 @@
 	unsigned int tz_pwrlevels[MSM_ADRENO_MAX_PWRLEVELS + 1];
 	int i, out, ret;
 	unsigned int version;
+	struct msm_adreno_extended_profile *gpu_profile;
 
-	struct msm_adreno_extended_profile *gpu_profile = container_of(
-					(devfreq->profile),
-					struct msm_adreno_extended_profile,
-					profile);
+	if (partner_gpu_profile)
+		return -EEXIST;
+
+	gpu_profile = container_of(devfreq->profile,
+			struct msm_adreno_extended_profile,
+			profile);
 
 	/*
 	 * Assuming that we have only one instance of the adreno device
@@ -495,6 +498,7 @@
 		tz_pwrlevels[0] = i;
 	} else {
 		pr_err(TAG "tz_pwrlevels[] is too short\n");
+		partner_gpu_profile = NULL;
 		return -EINVAL;
 	}
 
@@ -511,6 +515,7 @@
 				sizeof(version));
 	if (ret != 0 || version > MAX_TZ_VERSION) {
 		pr_err(TAG "tz_init failed\n");
+		partner_gpu_profile = NULL;
 		return ret;
 	}
 
@@ -606,7 +611,7 @@
 		break;
 	}
 
-	if (partner_gpu_profile && partner_gpu_profile->bus_devfreq)
+	if (!result && partner_gpu_profile && partner_gpu_profile->bus_devfreq)
 		switch (event) {
 		case DEVFREQ_GOV_START:
 			queue_work(workqueue,
diff --git a/drivers/hwtracing/coresight/coresight-byte-cntr.c b/drivers/hwtracing/coresight/coresight-byte-cntr.c
index e238df7..48ca137 100644
--- a/drivers/hwtracing/coresight/coresight-byte-cntr.c
+++ b/drivers/hwtracing/coresight/coresight-byte-cntr.c
@@ -340,6 +340,8 @@
 			goto out;
 		}
 
+		init_completion(&usb_req->write_done);
+
 		actual = tmc_etr_buf_get_data(etr_buf, drvdata->offset,
 					req_size, &usb_req->buf);
 		usb_req->length = actual;
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 3d41b3a..e07cadd 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -1513,12 +1513,13 @@
 	    (drvdata->out_mode == TMC_ETR_OUT_MODE_USB
 	     && drvdata->byte_cntr->sw_usb)) {
 		ret = tmc_etr_enable_hw(drvdata, drvdata->sysfs_buf);
-		if (!ret) {
-			drvdata->mode = CS_MODE_SYSFS;
-			atomic_inc(csdev->refcnt);
-		}
+		if (ret)
+			goto out;
 	}
 
+	drvdata->mode = CS_MODE_SYSFS;
+	atomic_inc(csdev->refcnt);
+
 	drvdata->enable = true;
 out:
 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -1527,11 +1528,11 @@
 	if (free_buf)
 		tmc_etr_free_sysfs_buf(free_buf);
 
-	if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM)
-		tmc_etr_byte_cntr_start(drvdata->byte_cntr);
-
-	if (!ret)
+	if (!ret) {
+		if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM)
+			tmc_etr_byte_cntr_start(drvdata->byte_cntr);
 		dev_info(drvdata->dev, "TMC-ETR enabled\n");
+	}
 
 	return ret;
 }
diff --git a/drivers/leds/leds-qti-flash.c b/drivers/leds/leds-qti-flash.c
index e0a8890..fdb903d 100644
--- a/drivers/leds/leds-qti-flash.c
+++ b/drivers/leds/leds-qti-flash.c
@@ -19,6 +19,8 @@
 
 #include "leds.h"
 
+#define FLASH_PERPH_SUBTYPE		0x05
+
 #define FLASH_LED_STATUS1		0x06
 
 #define FLASH_LED_STATUS2		0x07
@@ -59,6 +61,9 @@
 #define  FLASH_LED_ENABLE(id)			BIT(id)
 #define  FLASH_LED_DISABLE		0
 
+#define FORCE_TORCH_MODE		0x68
+#define FORCE_TORCH			BIT(0)
+
 #define MAX_IRES_LEVELS		2
 #define IRES_12P5_MAX_CURR_MA	1500
 #define IRES_5P0_MAX_CURR_MA		640
@@ -139,6 +144,7 @@
 	u16			base;
 	u8		max_channels;
 	u8		ref_count;
+	u8		subtype;
 };
 
 static const u32 flash_led_max_ires_values[MAX_IRES_LEVELS] = {
@@ -336,6 +342,13 @@
 			goto out;
 	}
 
+	if (fnode->type == FLASH_LED_TYPE_TORCH && led->subtype == 0x6) {
+		rc = qti_flash_led_masked_write(led, FORCE_TORCH_MODE,
+					FORCE_TORCH, FORCE_TORCH);
+		if (rc < 0)
+			goto out;
+	}
+
 	fnode->configured = true;
 
 	if ((fnode->strobe_sel == HW_STROBE) &&
@@ -368,6 +381,13 @@
 	if (rc < 0)
 		goto out;
 
+	if (fnode->type == FLASH_LED_TYPE_TORCH && led->subtype == 0x6) {
+		rc = qti_flash_led_masked_write(led, FORCE_TORCH_MODE,
+						FORCE_TORCH, 0);
+		if (rc < 0)
+			goto out;
+	}
+
 	fnode->current_ma = 0;
 
 out:
@@ -1203,6 +1223,12 @@
 		return rc;
 	}
 
+	rc = qti_flash_led_read(led, FLASH_PERPH_SUBTYPE, &led->subtype, 1);
+	if (rc < 0) {
+		pr_err("Failed to read flash-perph subtype rc=%d\n", rc);
+		return rc;
+	}
+
 	rc = qti_flash_led_setup(led);
 	if (rc < 0) {
 		pr_err("Failed to initialize flash LED, rc=%d\n", rc);
diff --git a/drivers/media/platform/msm/npu/npu_dev.c b/drivers/media/platform/msm/npu/npu_dev.c
index beb1795..f31cead 100644
--- a/drivers/media/platform/msm/npu/npu_dev.c
+++ b/drivers/media/platform/msm/npu/npu_dev.c
@@ -1355,12 +1355,6 @@
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
 	int rc = 0;
 
-	if (host_ctx->network_num > 0) {
-		NPU_ERR("Need to unload network first\n");
-		mutex_unlock(&npu_dev->dev_lock);
-		return -EINVAL;
-	}
-
 	if (enable) {
 		NPU_DBG("enable fw\n");
 		rc = enable_fw(npu_dev);
@@ -1370,9 +1364,6 @@
 			host_ctx->npu_init_cnt++;
 			NPU_DBG("npu_init_cnt %d\n",
 				host_ctx->npu_init_cnt);
-			/* set npu to lowest power level */
-			if (npu_set_uc_power_level(npu_dev, 1))
-				NPU_WARN("Failed to set uc power level\n");
 		}
 	} else if (host_ctx->npu_init_cnt > 0) {
 		NPU_DBG("disable fw\n");
@@ -1469,7 +1460,7 @@
 	default:
 		ret = npu_host_get_fw_property(client->npu_dev, &prop);
 		if (ret) {
-			NPU_ERR("npu_host_set_fw_property failed\n");
+			NPU_ERR("npu_host_get_fw_property failed\n");
 			return ret;
 		}
 		break;
diff --git a/drivers/media/platform/msm/npu/npu_mgr.c b/drivers/media/platform/msm/npu/npu_mgr.c
index 5381e2c..e0b681b 100644
--- a/drivers/media/platform/msm/npu/npu_mgr.c
+++ b/drivers/media/platform/msm/npu/npu_mgr.c
@@ -85,6 +85,7 @@
 	struct npu_misc_cmd *cmd);
 static struct npu_misc_cmd *npu_find_misc_cmd(struct npu_host_ctx *ctx,
 	uint32_t trans_id);
+static int npu_get_fw_caps(struct npu_device *npu_dev);
 
 /* -------------------------------------------------------------------------
  * Function Definitions - Init / Deinit
@@ -211,6 +212,37 @@
 	return ret;
 }
 
+static int npu_get_fw_caps(struct npu_device *npu_dev)
+{
+	int ret = 0, i;
+	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+
+	if (host_ctx->fw_caps_valid) {
+		NPU_DBG("cached fw caps available\n");
+		return ret;
+	}
+
+	memset(&host_ctx->fw_caps, 0, sizeof(host_ctx->fw_caps));
+	host_ctx->fw_caps.prop_id = MSM_NPU_PROP_ID_FW_GETCAPS;
+	host_ctx->fw_caps.num_of_params = PROP_PARAM_MAX_SIZE;
+
+	ret = npu_host_get_fw_property(npu_dev, &host_ctx->fw_caps);
+	if (!ret) {
+		NPU_DBG("Get fw caps successfully\n");
+		host_ctx->fw_caps_valid = true;
+
+		for (i = 0; i < host_ctx->fw_caps.num_of_params; i++)
+			NPU_INFO("fw caps %d:%x\n", i,
+				host_ctx->fw_caps.prop_param[i]);
+	} else {
+		/* save the return code */
+		host_ctx->fw_caps_err_code = ret;
+		NPU_ERR("get fw caps failed %d\n", ret);
+	}
+
+	return ret;
+}
+
 static void npu_load_fw_work(struct work_struct *work)
 {
 	int ret;
@@ -224,8 +256,12 @@
 	ret = load_fw_nolock(npu_dev, false);
 	mutex_unlock(&host_ctx->lock);
 
-	if (ret)
+	if (ret) {
 		NPU_ERR("load fw failed %d\n", ret);
+		return;
+	}
+
+	npu_get_fw_caps(npu_dev);
 }
 
 int load_fw(struct npu_device *npu_dev)
@@ -265,6 +301,8 @@
 
 	subsystem_put_local(host_ctx->subsystem_handle);
 	host_ctx->fw_state = FW_UNLOADED;
+	host_ctx->fw_caps_valid = false;
+	host_ctx->fw_caps_err_code = 0;
 	NPU_DBG("fw is unloaded\n");
 	mutex_unlock(&host_ctx->lock);
 
@@ -736,6 +774,8 @@
 
 	INIT_LIST_HEAD(&host_ctx->misc_cmd_list);
 	host_ctx->auto_pil_disable = false;
+	host_ctx->fw_caps_valid = false;
+	host_ctx->fw_caps_err_code = 0;
 
 	return 0;
 
@@ -2125,7 +2165,13 @@
 		break;
 	default:
 		NPU_ERR("unsupported property %d\n", property->prop_id);
-		goto set_prop_exit;
+		goto free_prop_packet;
+	}
+
+	ret = enable_fw(npu_dev);
+	if (ret) {
+		NPU_ERR("failed to enable fw\n");
+		goto free_prop_packet;
 	}
 
 	prop_packet->header.cmd_type = NPU_IPC_CMD_SET_PROPERTY;
@@ -2140,16 +2186,17 @@
 	for (i = 0; i < num_of_params; i++)
 		prop_packet->prop_param[i] = property->prop_param[i];
 
-	mutex_lock(&host_ctx->lock);
 	misc_cmd = npu_alloc_misc_cmd(host_ctx);
 	if (!misc_cmd) {
 		NPU_ERR("Can't allocate misc_cmd\n");
 		ret = -ENOMEM;
-		goto set_prop_exit;
+		goto disable_fw;
 	}
 
 	misc_cmd->cmd_type = NPU_IPC_CMD_SET_PROPERTY;
 	misc_cmd->trans_id = prop_packet->header.trans_id;
+
+	mutex_lock(&host_ctx->lock);
 	npu_queue_misc_cmd(host_ctx, misc_cmd);
 
 	ret = npu_send_misc_cmd(npu_dev, IPC_QUEUE_APPS_EXEC,
@@ -2183,10 +2230,13 @@
 
 free_misc_cmd:
 	npu_dequeue_misc_cmd(host_ctx, misc_cmd);
-	npu_free_misc_cmd(host_ctx, misc_cmd);
-set_prop_exit:
 	mutex_unlock(&host_ctx->lock);
+	npu_free_misc_cmd(host_ctx, misc_cmd);
+disable_fw:
+	disable_fw(npu_dev);
+free_prop_packet:
 	kfree(prop_packet);
+
 	return ret;
 }
 
@@ -2204,6 +2254,15 @@
 		NPU_ERR("Not supproted fw property id %x\n",
 			property->prop_id);
 		return -EINVAL;
+	} else if (property->prop_id == MSM_NPU_PROP_ID_FW_GETCAPS) {
+		if (host_ctx->fw_caps_valid) {
+			NPU_DBG("return cached fw_caps\n");
+			memcpy(property, &host_ctx->fw_caps, sizeof(*property));
+			return 0;
+		} else if (host_ctx->fw_caps_err_code) {
+			NPU_DBG("return cached error code\n");
+			return host_ctx->fw_caps_err_code;
+		}
 	}
 
 	num_of_params = min_t(uint32_t, property->num_of_params,
@@ -2214,6 +2273,12 @@
 	if (!prop_packet)
 		return -ENOMEM;
 
+	ret = enable_fw(npu_dev);
+	if (ret) {
+		NPU_ERR("failed to enable fw\n");
+		goto free_prop_packet;
+	}
+
 	prop_packet->header.cmd_type = NPU_IPC_CMD_GET_PROPERTY;
 	prop_packet->header.size = pkt_size;
 	prop_packet->header.trans_id =
@@ -2226,16 +2291,17 @@
 	for (i = 0; i < num_of_params; i++)
 		prop_packet->prop_param[i] = property->prop_param[i];
 
-	mutex_lock(&host_ctx->lock);
 	misc_cmd = npu_alloc_misc_cmd(host_ctx);
 	if (!misc_cmd) {
 		NPU_ERR("Can't allocate misc_cmd\n");
 		ret = -ENOMEM;
-		goto get_prop_exit;
+		goto disable_fw;
 	}
 
 	misc_cmd->cmd_type = NPU_IPC_CMD_GET_PROPERTY;
 	misc_cmd->trans_id = prop_packet->header.trans_id;
+
+	mutex_lock(&host_ctx->lock);
 	npu_queue_misc_cmd(host_ctx, misc_cmd);
 
 	ret = npu_send_misc_cmd(npu_dev, IPC_QUEUE_APPS_EXEC,
@@ -2264,26 +2330,43 @@
 	}
 
 	ret = misc_cmd->ret_status;
+	prop_from_fw = &misc_cmd->u.prop;
 	if (!ret) {
 		/* Return prop data retrieved from fw to user */
-		prop_from_fw = &misc_cmd->u.prop;
 		if (property->prop_id == prop_from_fw->prop_id &&
 			property->network_hdl == prop_from_fw->network_hdl) {
+			num_of_params = min_t(uint32_t,
+				prop_from_fw->num_of_params,
+				(uint32_t)PROP_PARAM_MAX_SIZE);
 			property->num_of_params = num_of_params;
 			for (i = 0; i < num_of_params; i++)
 				property->prop_param[i] =
 					prop_from_fw->prop_param[i];
+		} else {
+			NPU_WARN("Not Match: id %x:%x hdl %x:%x\n",
+				property->prop_id, prop_from_fw->prop_id,
+				property->network_hdl,
+				prop_from_fw->network_hdl);
+			property->num_of_params = 0;
 		}
 	} else {
 		NPU_ERR("get fw property failed %d\n", ret);
+		NPU_ERR("prop_id: %x\n", prop_from_fw->prop_id);
+		NPU_ERR("network_hdl: %x\n", prop_from_fw->network_hdl);
+		NPU_ERR("param_num: %x\n", prop_from_fw->num_of_params);
+		for (i = 0; i < prop_from_fw->num_of_params; i++)
+			NPU_ERR("%x\n", prop_from_fw->prop_param[i]);
 	}
 
 free_misc_cmd:
 	npu_dequeue_misc_cmd(host_ctx, misc_cmd);
-	npu_free_misc_cmd(host_ctx, misc_cmd);
-get_prop_exit:
 	mutex_unlock(&host_ctx->lock);
+	npu_free_misc_cmd(host_ctx, misc_cmd);
+disable_fw:
+	disable_fw(npu_dev);
+free_prop_packet:
 	kfree(prop_packet);
+
 	return ret;
 }
 
diff --git a/drivers/media/platform/msm/npu/npu_mgr.h b/drivers/media/platform/msm/npu/npu_mgr.h
index 397d450..5cc0e58 100644
--- a/drivers/media/platform/msm/npu/npu_mgr.h
+++ b/drivers/media/platform/msm/npu/npu_mgr.h
@@ -138,6 +138,10 @@
 	bool bridge_mbox_pwr_on;
 	void *ipc_msg_buf;
 	struct list_head misc_cmd_list;
+
+	struct msm_npu_property fw_caps;
+	bool fw_caps_valid;
+	uint32_t fw_caps_err_code;
 };
 
 struct npu_device;
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index b013b84..d88832a 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1569,6 +1569,8 @@
 static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req)
 {
 	struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+	struct mmc_card *card = mq->card;
+	struct mmc_host *host = card->host;
 	int err = 0;
 
 	mmc_blk_data_prep(mq, mqrq, 0, NULL, NULL);
@@ -1576,9 +1578,27 @@
 
 	mmc_deferred_scaling(mq->card->host);
 	mmc_cqe_clk_scaling_start_busy(mq, mq->card->host, true);
+	/*
+	 * When voltage corner in LSVS on low load scenario and
+	 * there is sudden burst of requests device queue all
+	 * slots are filled and it is needed to wait till all
+	 * requests are completed to scale up frequency. This
+	 * is leading to delay in scaling and impacting performance.
+	 * Fix this issue by only allowing one request in request queue
+	 * when device is running with lower speed mode.
+	 */
+	if (host->clk_scaling.state == MMC_LOAD_LOW) {
+		err = host->cqe_ops->cqe_wait_for_idle(host);
+		if (err) {
+			pr_err("%s: %s: CQE went in recovery path.\n",
+				mmc_hostname(host), __func__);
+			goto stop_scaling;
+		}
+	}
 
 	err =  mmc_blk_cqe_start_req(mq->card->host, &mqrq->brq.mrq);
 
+stop_scaling:
 	if (err)
 		mmc_cqe_clk_scaling_stop_busy(mq->card->host, true, false);
 
diff --git a/drivers/net/wireless/cnss2/bus.c b/drivers/net/wireless/cnss2/bus.c
index c590f53..94e0a4d 100644
--- a/drivers/net/wireless/cnss2/bus.c
+++ b/drivers/net/wireless/cnss2/bus.c
@@ -418,6 +418,21 @@
 	}
 }
 
+int cnss_bus_check_link_status(struct cnss_plat_data *plat_priv)
+{
+	if (!plat_priv)
+		return -ENODEV;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_check_link_status(plat_priv->bus_priv);
+	default:
+		cnss_pr_dbg("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return 0;
+	}
+}
+
 int cnss_bus_debug_reg_read(struct cnss_plat_data *plat_priv, u32 offset,
 			    u32 *val)
 {
diff --git a/drivers/net/wireless/cnss2/bus.h b/drivers/net/wireless/cnss2/bus.h
index 4b9e91f..1e7cc0f 100644
--- a/drivers/net/wireless/cnss2/bus.h
+++ b/drivers/net/wireless/cnss2/bus.h
@@ -48,6 +48,7 @@
 int cnss_bus_update_status(struct cnss_plat_data *plat_priv,
 			   enum cnss_driver_status status);
 int cnss_bus_is_device_down(struct cnss_plat_data *plat_priv);
+int cnss_bus_check_link_status(struct cnss_plat_data *plat_priv);
 int cnss_bus_debug_reg_read(struct cnss_plat_data *plat_priv, u32 offset,
 			    u32 *val);
 int cnss_bus_debug_reg_write(struct cnss_plat_data *plat_priv, u32 offset,
diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c
index 8efd309..9a348db 100644
--- a/drivers/net/wireless/cnss2/main.c
+++ b/drivers/net/wireless/cnss2/main.c
@@ -1024,6 +1024,10 @@
 
 	switch (reason) {
 	case CNSS_REASON_LINK_DOWN:
+		if (!cnss_bus_check_link_status(plat_priv)) {
+			cnss_pr_dbg("Skip link down recovery as link is already up\n");
+			return 0;
+		}
 		if (test_bit(LINK_DOWN_SELF_RECOVERY,
 			     &plat_priv->ctrl_params.quirks))
 			goto self_recovery;
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index 992ec20..ced6ff2 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -356,7 +356,7 @@
 #define PCIE_REG_SIZE ARRAY_SIZE(pcie_reg_access_seq)
 #define WLAON_REG_SIZE ARRAY_SIZE(wlaon_reg_access_seq)
 
-static int cnss_pci_check_link_status(struct cnss_pci_data *pci_priv)
+int cnss_pci_check_link_status(struct cnss_pci_data *pci_priv)
 {
 	u16 device_id;
 
diff --git a/drivers/net/wireless/cnss2/pci.h b/drivers/net/wireless/cnss2/pci.h
index ab1d8cb..5ffb1ff 100644
--- a/drivers/net/wireless/cnss2/pci.h
+++ b/drivers/net/wireless/cnss2/pci.h
@@ -162,6 +162,7 @@
 	return atomic_read(&pci_priv->drv_connected);
 }
 
+int cnss_pci_check_link_status(struct cnss_pci_data *pci_priv);
 int cnss_suspend_pci_link(struct cnss_pci_data *pci_priv);
 int cnss_resume_pci_link(struct cnss_pci_data *pci_priv);
 int cnss_pci_init(struct cnss_plat_data *plat_priv);
diff --git a/drivers/pci/controller/pci-msm.c b/drivers/pci/controller/pci-msm.c
index 71ebe4c..b30331f 100644
--- a/drivers/pci/controller/pci-msm.c
+++ b/drivers/pci/controller/pci-msm.c
@@ -4741,7 +4741,7 @@
 		struct msm_pcie_notify client_notify;
 
 		client_notify.event = event;
-		client_notify.user = notify->user;
+		client_notify.user = dev->event_reg->user;
 		client_notify.data = notify->data;
 		client_notify.options = notify->options;
 		PCIE_DUMP(dev, "PCIe: callback RC%d for event %d\n",
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index 7ee84e6..6392315 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -3740,8 +3740,8 @@
 EXPORT_SYMBOL(ipa_get_prot_id);
 
 static const struct dev_pm_ops ipa_pm_ops = {
-	.suspend = ipa_ap_suspend,
-	.resume_noirq = ipa_ap_resume,
+	.suspend_late = ipa_ap_suspend,
+	.resume_early = ipa_ap_resume,
 };
 
 static struct platform_driver ipa_plat_drv = {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
index 5c90b0e..f2cdd9b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
@@ -2779,12 +2779,7 @@
 			IPA_MPM_DBG("Already out of lpm\n");
 		}
 		break;
-	case MHI_CB_EE_RDDM:
-	case MHI_CB_PENDING_DATA:
-	case MHI_CB_SYS_ERROR:
-	case MHI_CB_FATAL_ERROR:
-	case MHI_CB_EE_MISSION_MODE:
-	case MHI_CB_DTR_SIGNAL:
+	default:
 		IPA_MPM_ERR("unexpected event %d\n", mhi_cb);
 		break;
 	}
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index 26511ed..a5f7a4c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -2697,8 +2697,8 @@
 MODULE_DEVICE_TABLE(of, rmnet_ipa_dt_match);
 
 static const struct dev_pm_ops rmnet_ipa_pm_ops = {
-	.suspend = rmnet_ipa_ap_suspend,
-	.resume_noirq = rmnet_ipa_ap_resume,
+	.suspend_late = rmnet_ipa_ap_suspend,
+	.resume_early = rmnet_ipa_ap_resume,
 };
 
 static struct platform_driver rmnet_ipa_driver = {
diff --git a/drivers/power/supply/qcom/qg-util.c b/drivers/power/supply/qcom/qg-util.c
index 8a54554..170ca87 100644
--- a/drivers/power/supply/qcom/qg-util.c
+++ b/drivers/power/supply/qcom/qg-util.c
@@ -455,6 +455,15 @@
 		return rc;
 	}
 
+	if (last_ibat == FIFO_I_RESET_VAL) {
+		/* First FIFO is not complete, read instantaneous IBAT */
+		rc = qg_get_battery_current(chip, ibat_ua);
+		if (rc < 0)
+			pr_err("Failed to read inst. IBAT rc=%d\n", rc);
+
+		return rc;
+	}
+
 	last_ibat = sign_extend32(last_ibat, 15);
 	*ibat_ua = qg_iraw_to_ua(chip, last_ibat);
 
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 1811d4d..d6dc65b 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -2204,6 +2204,7 @@
 {
 	struct icnss_priv *priv = dev_get_drvdata(dev);
 	unsigned long iova;
+	int prop_len = 0;
 	size_t len;
 	int ret = 0;
 
@@ -2222,7 +2223,8 @@
 	len = roundup(size + paddr - rounddown(paddr, PAGE_SIZE), PAGE_SIZE);
 	iova = roundup(penv->smmu_iova_ipa_current, PAGE_SIZE);
 
-	if (iova >= priv->smmu_iova_ipa_start + priv->smmu_iova_ipa_len) {
+	if (of_get_property(dev->of_node, "qcom,iommu-geometry", &prop_len) &&
+	    iova >= priv->smmu_iova_ipa_start + priv->smmu_iova_ipa_len) {
 		icnss_pr_err("No IOVA space to map, iova %lx, smmu_iova_ipa_start %pad, smmu_iova_ipa_len %zu\n",
 			     iova,
 			     &priv->smmu_iova_ipa_start,
diff --git a/drivers/soc/qcom/smcinvoke.c b/drivers/soc/qcom/smcinvoke.c
index 0a38c45..dd41636 100644
--- a/drivers/soc/qcom/smcinvoke.c
+++ b/drivers/soc/qcom/smcinvoke.c
@@ -3,6 +3,8 @@
  * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
  */
 
+#define pr_fmt(fmt) "smcinvoke: %s: " fmt, __func__
+
 #include <linux/module.h>
 #include <linux/mod_devicetable.h>
 #include <linux/device.h>
@@ -400,8 +402,10 @@
 	struct smcinvoke_mem_obj *mem_obj = find_mem_obj_locked(
 			TZHANDLE_GET_OBJID(tzhandle), is_mem_regn_obj);
 
-	if (!mem_obj)
+	if (!mem_obj) {
+		pr_err("memory object not found\n");
 		return OBJECT_ERROR_BADOBJ;
+	}
 
 	if (is_mem_regn_obj)
 		kref_put(&mem_obj->mem_regn_ref_cnt, del_mem_regn_obj_locked);
@@ -432,8 +436,10 @@
 	struct smcinvoke_cbobj *obj = NULL;
 	struct smcinvoke_server_info *server = get_cb_server_locked(srvr_id);
 
-	if (!server)
+	if (!server) {
+		pr_err("%s, server id : %u not found\n", __func__, srvr_id);
 		return OBJECT_ERROR_BADOBJ;
+	}
 
 	head = &server->pending_cbobjs;
 	list_for_each_entry(cbobj, head, list)
@@ -471,8 +477,10 @@
 	struct list_head *head = NULL;
 	struct smcinvoke_cbobj *cbobj = NULL;
 
-	if (!srvr_info)
+	if (!srvr_info) {
+		pr_err("%s, server id : %u not found\n", __func__, srvr_id);
 		return ret;
+	}
 
 	head = &srvr_info->pending_cbobjs;
 	list_for_each_entry(cbobj, head, list)
@@ -784,8 +792,10 @@
 {
 	struct smcinvoke_tzcb_req *msg = buf;
 
-	if (msg->hdr.counts != OBJECT_COUNTS_PACK(0, 0, 0, 0))
+	if (msg->hdr.counts != OBJECT_COUNTS_PACK(0, 0, 0, 0)) {
+		pr_err("Invalid object count in %s\n", __func__);
 		return OBJECT_ERROR_INVALID;
+	}
 
 	return release_tzhandle_locked(msg->hdr.tzhandle);
 }
@@ -805,9 +815,10 @@
 	struct sg_table *sgt = NULL;
 
 	if (msg->hdr.counts != OBJECT_COUNTS_PACK(0, 1, 1, 1) ||
-		(buf_len - msg->args[0].b.offset <  msg->args[0].b.size))
+		(buf_len - msg->args[0].b.offset <  msg->args[0].b.size)) {
+		pr_err("Invalid counts received for mapping mem obj\n");
 		return OBJECT_ERROR_INVALID;
-
+	}
 	/* args[0] = BO, args[1] = OI, args[2] = OO */
 	ob = buf + msg->args[0].b.offset;
 	oo =  &msg->args[2].handle;
@@ -817,6 +828,7 @@
 						SMCINVOKE_MEM_RGN_OBJ);
 	if (!mem_obj) {
 		mutex_unlock(&g_smcinvoke_lock);
+		pr_err("Memory object not found\n");
 		return OBJECT_ERROR_BADOBJ;
 	}
 
@@ -826,6 +838,7 @@
 					&smcinvoke_pdev->dev);
 		if (IS_ERR(buf_attach)) {
 			ret = OBJECT_ERROR_KMEM;
+			pr_err("dma buf attach failed, ret: %d\n", ret);
 			goto out;
 		}
 		mem_obj->buf_attach = buf_attach;
@@ -833,6 +846,7 @@
 		sgt = dma_buf_map_attachment(buf_attach, DMA_BIDIRECTIONAL);
 		if (IS_ERR(sgt)) {
 			ret = OBJECT_ERROR_KMEM;
+			pr_err("mapping dma buffers failed, ret: %d\n", ret);
 			goto out;
 		}
 		mem_obj->sgt = sgt;
@@ -840,12 +854,14 @@
 		/* contiguous only => nents=1 */
 		if (sgt->nents != 1) {
 			ret = OBJECT_ERROR_INVALID;
+			pr_err("sg enries are not contigous, ret: %d\n", ret);
 			goto out;
 		}
 		mem_obj->p_addr = sg_dma_address(sgt->sgl);
 		mem_obj->p_addr_len = sgt->sgl->length;
 		if (!mem_obj->p_addr) {
 			ret = OBJECT_ERROR_INVALID;
+			pr_err("invalid physical address, ret: %d\n", ret);
 			goto out;
 		}
 		mem_obj->mem_map_obj_id = next_mem_map_obj_id_locked();
@@ -875,6 +891,7 @@
 		cb_req->result = OBJECT_OK;
 		break;
 	default:
+		pr_err(" invalid operation for tz kernel object\n");
 		cb_req->result = OBJECT_ERROR_INVALID;
 		break;
 	}
@@ -902,8 +919,10 @@
 	struct smcinvoke_tzcb_req *cb_req = NULL, *tmp_cb_req = NULL;
 	struct smcinvoke_server_info *srvr_info = NULL;
 
-	if (buf_len < sizeof(struct smcinvoke_tzcb_req))
+	if (buf_len < sizeof(struct smcinvoke_tzcb_req)) {
+		pr_err("smaller buffer length : %u\n", buf_len);
 		return;
+	}
 
 	cb_req = buf;
 
@@ -913,6 +932,7 @@
 	} else if (TZHANDLE_IS_MEM_OBJ(cb_req->hdr.tzhandle)) {
 		return process_mem_obj(buf, buf_len);
 	} else if (!TZHANDLE_IS_CB_OBJ(cb_req->hdr.tzhandle)) {
+		pr_err("Request object is not a callback object\n");
 		cb_req->result = OBJECT_ERROR_INVALID;
 		return;
 	}
@@ -926,12 +946,16 @@
 	if (!tmp_cb_req) {
 		/* we need to return error to caller so fill up result */
 		cb_req->result = OBJECT_ERROR_KMEM;
+		pr_err("failed to create copy of request, set result: %d\n",
+							cb_req->result);
 		return;
 	}
 
 	cb_txn = kzalloc(sizeof(*cb_txn), GFP_KERNEL);
 	if (!cb_txn) {
 		cb_req->result = OBJECT_ERROR_KMEM;
+		pr_err("failed to allocate memory for request, result: %d\n",
+							cb_req->result);
 		kfree(tmp_cb_req);
 		return;
 	}
@@ -950,6 +974,7 @@
 				TZHANDLE_GET_SERVER(cb_req->hdr.tzhandle));
 	if (!srvr_info || srvr_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT) {
 		/* ret equals Object_ERROR_DEFUNCT, at this point go to out */
+		pr_err("sever is either invalid or defunct\n");
 		mutex_unlock(&g_smcinvoke_lock);
 		goto out;
 	}
@@ -983,6 +1008,7 @@
 	} else if (!srvr_info ||
 		srvr_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT) {
 		cb_req->result = OBJECT_ERROR_DEFUNCT;
+		pr_err("server invalid, res: %d\n", cb_req->result);
 	} else {
 		pr_debug("%s wait_event interrupted ret = %d\n", __func__, ret);
 		cb_req->result = OBJECT_ERROR_ABORT;
@@ -1460,14 +1486,16 @@
 	struct smcinvoke_server server_req = {0};
 	struct smcinvoke_server_info *server_info = NULL;
 
-	if (_IOC_SIZE(cmd) != sizeof(server_req))
+	if (_IOC_SIZE(cmd) != sizeof(server_req)) {
+		pr_err("invalid command size received for server request\n");
 		return -EINVAL;
-
+	}
 	ret = copy_from_user(&server_req, (void __user *)(uintptr_t)arg,
 					sizeof(server_req));
-	if (ret)
+	if (ret) {
+		pr_err("copying server request from user failed\n");
 		return -EFAULT;
-
+	}
 	server_info = kzalloc(sizeof(*server_info), GFP_KERNEL);
 	if (!server_info)
 		return -ENOMEM;
@@ -1507,25 +1535,36 @@
 	struct smcinvoke_cb_txn *cb_txn = NULL;
 	struct smcinvoke_server_info *server_info = NULL;
 
-	if (_IOC_SIZE(cmd) != sizeof(struct smcinvoke_accept))
+	if (_IOC_SIZE(cmd) != sizeof(struct smcinvoke_accept)) {
+		pr_err("command size invalid for accept request\n");
 		return -EINVAL;
+	}
 
 	if (copy_from_user(&user_args, (void __user *)arg,
-					sizeof(struct smcinvoke_accept)))
+					sizeof(struct smcinvoke_accept))) {
+		pr_err("copying accept request from user failed\n");
 		return -EFAULT;
+	}
 
-	if (user_args.argsize != sizeof(union smcinvoke_arg))
+	if (user_args.argsize != sizeof(union smcinvoke_arg)) {
+		pr_err("arguments size is invalid for accept thread\n");
 		return -EINVAL;
+	}
 
 	/* ACCEPT is available only on server obj */
-	if (server_obj->context_type != SMCINVOKE_OBJ_TYPE_SERVER)
+	if (server_obj->context_type != SMCINVOKE_OBJ_TYPE_SERVER) {
+		pr_err("invalid object type received for accept req\n");
 		return -EPERM;
+	}
 
 	mutex_lock(&g_smcinvoke_lock);
 	server_info = get_cb_server_locked(server_obj->server_id);
 	mutex_unlock(&g_smcinvoke_lock);
-	if (!server_info)
+	if (!server_info) {
+		pr_err("No matching server with server id : %u found\n",
+						server_obj->server_id);
 		return -EINVAL;
+	}
 
 	if (server_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT)
 		server_info->state = 0;
@@ -1602,6 +1641,7 @@
 			ret = marshal_in_tzcb_req(cb_txn, &user_args,
 							server_obj->server_id);
 			if (ret) {
+				pr_err("failed to marshal in the callback request\n");
 				cb_txn->cb_req->result = OBJECT_ERROR_UNAVAIL;
 				cb_txn->state = SMCINVOKE_REQ_PROCESSED;
 				kref_put(&cb_txn->ref_cnt, delete_cb_txn);
@@ -1620,6 +1660,10 @@
 out:
 	if (server_info)
 		kref_put(&server_info->ref_cnt, destroy_cb_server);
+
+	if (ret && ret != -ERESTARTSYS)
+		pr_err("accept thread returning with ret: %d\n", ret);
+
 	return ret;
 }
 
@@ -1645,18 +1689,26 @@
 	int32_t tzhandles_to_release[OBJECT_COUNTS_MAX_OO] = {0};
 	bool tz_acked = false;
 
-	if (_IOC_SIZE(cmd) != sizeof(req))
+	if (_IOC_SIZE(cmd) != sizeof(req)) {
+		pr_err("command size for invoke req is invalid\n");
 		return -EINVAL;
+	}
 
-	if (tzobj->context_type != SMCINVOKE_OBJ_TYPE_TZ_OBJ)
+	if (tzobj->context_type != SMCINVOKE_OBJ_TYPE_TZ_OBJ) {
+		pr_err("object type for invoke req is invalid\n");
 		return -EPERM;
+	}
 
 	ret = copy_from_user(&req, (void __user *)arg, sizeof(req));
-	if (ret)
+	if (ret) {
+		pr_err("copying invoke req failed\n");
 		return -EFAULT;
+	}
 
-	if (req.argsize != sizeof(union smcinvoke_arg))
+	if (req.argsize != sizeof(union smcinvoke_arg)) {
+		pr_err("arguments size for invoke req is invalid\n");
 		return -EINVAL;
+	}
 
 	nr_args = OBJECT_COUNTS_NUM_buffers(req.counts) +
 			OBJECT_COUNTS_NUM_objects(req.counts);
@@ -1679,6 +1731,7 @@
 	ret = qtee_shmbridge_allocate_shm(inmsg_size, &in_shm);
 	if (ret) {
 		ret = -ENOMEM;
+		pr_err("shmbridge alloc failed for in msg in invoke req\n");
 		goto out;
 	}
 	in_msg = in_shm.vaddr;
@@ -1689,14 +1742,17 @@
 	ret = qtee_shmbridge_allocate_shm(outmsg_size, &out_shm);
 	if (ret) {
 		ret = -ENOMEM;
+		pr_err("shmbridge alloc failed for out msg in invoke req\n");
 		goto out;
 	}
 	out_msg = out_shm.vaddr;
 
 	ret = marshal_in_invoke_req(&req, args_buf, tzobj->tzhandle, in_msg,
 			inmsg_size, filp_to_release, tzhandles_to_release);
-	if (ret)
+	if (ret) {
+		pr_err("failed to marshal in invoke req, ret :%d\n", ret);
 		goto out;
+	}
 
 	ret = prepare_send_scm_msg(in_msg, in_shm.paddr, inmsg_size,
 					out_msg, out_shm.paddr, outmsg_size,
@@ -1706,8 +1762,10 @@
 	 * If scm_call is success, TZ owns responsibility to release
 	 * refs for local objs.
 	 */
-	if (!tz_acked)
+	if (!tz_acked) {
+		pr_debug("scm call successful\n");
 		goto out;
+	}
 	memset(tzhandles_to_release, 0, sizeof(tzhandles_to_release));
 
 	/*
@@ -1738,6 +1796,10 @@
 	qtee_shmbridge_free_shm(&in_shm);
 	qtee_shmbridge_free_shm(&out_shm);
 	kfree(args_buf);
+
+	if (ret)
+		pr_err("invoke thread returning with ret = %d\n", ret);
+
 	return ret;
 }
 
@@ -1818,12 +1880,14 @@
 	ret = qtee_shmbridge_allocate_shm(SMCINVOKE_TZ_MIN_BUF_SIZE, &in_shm);
 	if (ret) {
 		ret = -ENOMEM;
+		pr_err("shmbridge alloc failed for in msg in release\n");
 		goto out;
 	}
 
 	ret = qtee_shmbridge_allocate_shm(SMCINVOKE_TZ_MIN_BUF_SIZE, &out_shm);
 	if (ret) {
 		ret = -ENOMEM;
+		pr_err("shmbridge alloc failed for out msg in release\n");
 		goto out;
 	}
 
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index 7888648..b5638df 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2015, Sony Mobile Communications AB.
- * Copyright (c) 2012-2013, 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2013, 2018-2020 The Linux Foundation. All rights reserved.
  */
 
 #include <linux/hwspinlock.h>
@@ -192,6 +192,19 @@
 	__le32 offset_free_cached;
 	__le32 reserved[3];
 };
+/**
+ * struct smem_partition_desc - descriptor for partition
+ * @virt_base:	starting virtual address of partition
+ * @phys_base:	starting physical address of partition
+ * @cacheline:	alignment for "cached" entries
+ * @size:	size of partition
+ */
+struct smem_partition_desc {
+	void __iomem *virt_base;
+	u32 phys_base;
+	u32 cacheline;
+	u32 size;
+};
 
 static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 };
 
@@ -248,9 +261,9 @@
  * struct qcom_smem - device data for the smem device
  * @dev:	device pointer
  * @hwlock:	reference to a hwspinlock
- * @global_partition_entry: pointer to global partition entry when in use
- * @ptable_entries: list of pointers to partitions table entry of current
- *		processor/host
+ * @ptable_base: virtual base of partition table
+ * @global_partition_desc: descriptor for global partition when in use
+ * @partition_desc: list of partition descriptor of current processor/host
  * @item_count: max accepted item number
  * @num_regions: number of @regions
  * @regions:	list of the memory regions defining the shared memory
@@ -260,9 +273,10 @@
 
 	struct hwspinlock *hwlock;
 
-	struct smem_ptable_entry *global_partition_entry;
-	struct smem_ptable_entry *ptable_entries[SMEM_HOST_COUNT];
 	u32 item_count;
+	struct smem_ptable *ptable_base;
+	struct smem_partition_desc global_partition_desc;
+	struct smem_partition_desc partition_desc[SMEM_HOST_COUNT];
 
 	unsigned num_regions;
 	struct smem_region regions[0];
@@ -274,12 +288,6 @@
 /* Timeout (ms) for the trylock of remote spinlocks */
 #define HWSPINLOCK_TIMEOUT	1000
 
-static struct smem_partition_header *
-ptable_entry_to_phdr(struct smem_ptable_entry *entry)
-{
-	return __smem->regions[0].virt_base + le32_to_cpu(entry->offset);
-}
-
 static struct smem_private_entry *
 phdr_to_last_uncached_entry(struct smem_partition_header *phdr)
 {
@@ -346,7 +354,7 @@
 }
 
 static int qcom_smem_alloc_private(struct qcom_smem *smem,
-				   struct smem_ptable_entry *entry,
+				   struct smem_partition_desc *p_desc,
 				   unsigned item,
 				   size_t size)
 {
@@ -356,8 +364,8 @@
 	void *cached;
 	void *p_end;
 
-	phdr = ptable_entry_to_phdr(entry);
-	p_end = (void *)phdr + le32_to_cpu(entry->size);
+	phdr = p_desc->virt_base;
+	p_end = (void *)phdr + p_desc->size;
 
 	hdr = phdr_to_first_uncached_entry(phdr);
 	end = phdr_to_last_uncached_entry(phdr);
@@ -450,7 +458,7 @@
  */
 int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
 {
-	struct smem_ptable_entry *entry;
+	struct smem_partition_desc *p_desc;
 	unsigned long flags;
 	int ret;
 
@@ -472,12 +480,12 @@
 	if (ret)
 		return ret;
 
-	if (host < SMEM_HOST_COUNT && __smem->ptable_entries[host]) {
-		entry = __smem->ptable_entries[host];
-		ret = qcom_smem_alloc_private(__smem, entry, item, size);
-	} else if (__smem->global_partition_entry) {
-		entry = __smem->global_partition_entry;
-		ret = qcom_smem_alloc_private(__smem, entry, item, size);
+	if (host < SMEM_HOST_COUNT && __smem->partition_desc[host].virt_base) {
+		p_desc = &__smem->partition_desc[host];
+		ret = qcom_smem_alloc_private(__smem, p_desc, item, size);
+	} else if (__smem->global_partition_desc.virt_base) {
+		p_desc = &__smem->global_partition_desc;
+		ret = qcom_smem_alloc_private(__smem, p_desc, item, size);
 	} else {
 		ret = qcom_smem_alloc_global(__smem, item, size);
 	}
@@ -528,22 +536,20 @@
 }
 
 static void *qcom_smem_get_private(struct qcom_smem *smem,
-				   struct smem_ptable_entry *entry,
+				   struct smem_partition_desc *p_desc,
 				   unsigned item,
 				   size_t *size)
 {
 	struct smem_private_entry *e, *end;
 	struct smem_partition_header *phdr;
 	void *item_ptr, *p_end;
-	u32 partition_size;
 	size_t cacheline;
 	u32 padding_data;
 	u32 e_size;
 
-	phdr = ptable_entry_to_phdr(entry);
-	partition_size = le32_to_cpu(entry->size);
-	p_end = (void *)phdr + partition_size;
-	cacheline = le32_to_cpu(entry->cacheline);
+	phdr = p_desc->virt_base;
+	p_end = (void *)phdr + p_desc->size;
+	cacheline = p_desc->cacheline;
 
 	e = phdr_to_first_uncached_entry(phdr);
 	end = phdr_to_last_uncached_entry(phdr);
@@ -560,7 +566,7 @@
 				e_size = le32_to_cpu(e->size);
 				padding_data = le16_to_cpu(e->padding_data);
 
-				if (e_size < partition_size
+				if (e_size < p_desc->size
 				    && padding_data < e_size)
 					*size = e_size - padding_data;
 				else
@@ -596,7 +602,7 @@
 				e_size = le32_to_cpu(e->size);
 				padding_data = le16_to_cpu(e->padding_data);
 
-				if (e_size < partition_size
+				if (e_size < p_desc->size
 				    && padding_data < e_size)
 					*size = e_size - padding_data;
 				else
@@ -635,7 +641,7 @@
  */
 void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
 {
-	struct smem_ptable_entry *entry;
+	struct smem_partition_desc *p_desc;
 	unsigned long flags;
 	int ret;
 	void *ptr = ERR_PTR(-EPROBE_DEFER);
@@ -652,12 +658,12 @@
 	if (ret)
 		return ERR_PTR(ret);
 
-	if (host < SMEM_HOST_COUNT && __smem->ptable_entries[host]) {
-		entry = __smem->ptable_entries[host];
-		ptr = qcom_smem_get_private(__smem, entry, item, size);
-	} else if (__smem->global_partition_entry) {
-		entry = __smem->global_partition_entry;
-		ptr = qcom_smem_get_private(__smem, entry, item, size);
+	if (host < SMEM_HOST_COUNT && __smem->partition_desc[host].virt_base) {
+		p_desc = &__smem->partition_desc[host];
+		ptr = qcom_smem_get_private(__smem, p_desc, item, size);
+	} else if (__smem->global_partition_desc.virt_base) {
+		p_desc = &__smem->global_partition_desc;
+		ptr = qcom_smem_get_private(__smem, p_desc, item, size);
 	} else {
 		ptr = qcom_smem_get_global(__smem, item, size);
 	}
@@ -679,30 +685,30 @@
 int qcom_smem_get_free_space(unsigned host)
 {
 	struct smem_partition_header *phdr;
-	struct smem_ptable_entry *entry;
+	struct smem_partition_desc *p_desc;
 	struct smem_header *header;
 	unsigned ret;
 
 	if (!__smem)
 		return -EPROBE_DEFER;
 
-	if (host < SMEM_HOST_COUNT && __smem->ptable_entries[host]) {
-		entry = __smem->ptable_entries[host];
-		phdr = ptable_entry_to_phdr(entry);
+	if (host < SMEM_HOST_COUNT && __smem->partition_desc[host].virt_base) {
+		p_desc = &__smem->partition_desc[host];
+		phdr = p_desc->virt_base;
 
 		ret = le32_to_cpu(phdr->offset_free_cached) -
 		      le32_to_cpu(phdr->offset_free_uncached);
 
-		if (ret > le32_to_cpu(entry->size))
+		if (ret > p_desc->size)
 			return -EINVAL;
-	} else if (__smem->global_partition_entry) {
-		entry = __smem->global_partition_entry;
-		phdr = ptable_entry_to_phdr(entry);
+	} else if (__smem->global_partition_desc.virt_base) {
+		p_desc = &__smem->global_partition_desc;
+		phdr = p_desc->virt_base;
 
 		ret = le32_to_cpu(phdr->offset_free_cached) -
 		      le32_to_cpu(phdr->offset_free_uncached);
 
-		if (ret > le32_to_cpu(entry->size))
+		if (ret > p_desc->size)
 			return -EINVAL;
 	} else {
 		header = __smem->regions[0].virt_base;
@@ -716,6 +722,15 @@
 }
 EXPORT_SYMBOL(qcom_smem_get_free_space);
 
+static int addr_in_range(void *virt_base, unsigned int size, void *addr)
+{
+	if (virt_base && addr >= virt_base &&
+			addr < virt_base + size)
+		return 1;
+
+	return 0;
+}
+
 /**
  * qcom_smem_virt_to_phys() - return the physical address associated
  * with an smem item pointer (previously returned by qcom_smem_get()
@@ -725,17 +740,36 @@
  */
 phys_addr_t qcom_smem_virt_to_phys(void *p)
 {
-	unsigned i;
+	struct smem_partition_desc *p_desc;
+	struct smem_region *area;
+	u64 offset;
+	u32 i;
+
+	for (i = 0; i < SMEM_HOST_COUNT; i++) {
+		p_desc = &__smem->partition_desc[i];
+
+		if (addr_in_range(p_desc->virt_base, p_desc->size, p)) {
+			offset = p - p_desc->virt_base;
+
+			return (phys_addr_t)p_desc->phys_base + offset;
+		}
+	}
+
+	p_desc = &__smem->global_partition_desc;
+
+	if (addr_in_range(p_desc->virt_base, p_desc->size, p)) {
+		offset = p - p_desc->virt_base;
+
+		return (phys_addr_t)p_desc->phys_base + offset;
+	}
 
 	for (i = 0; i < __smem->num_regions; i++) {
-		struct smem_region *region = &__smem->regions[i];
+		area = &__smem->regions[i];
 
-		if (p < region->virt_base)
-			continue;
-		if (p < region->virt_base + region->size) {
-			u64 offset = p - region->virt_base;
+		if (addr_in_range(area->virt_base, area->size, p)) {
+			offset = p - area->virt_base;
 
-			return (phys_addr_t)region->aux_base + offset;
+			return (phys_addr_t)area->aux_base + offset;
 		}
 	}
 
@@ -759,7 +793,7 @@
 	struct smem_ptable *ptable;
 	u32 version;
 
-	ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K;
+	ptable = smem->ptable_base;
 	if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic)))
 		return ERR_PTR(-ENOENT);
 
@@ -793,11 +827,12 @@
 	struct smem_partition_header *header;
 	struct smem_ptable_entry *entry;
 	struct smem_ptable *ptable;
+	u32 phys_addr;
 	u32 host0, host1, size;
 	bool found = false;
 	int i;
 
-	if (smem->global_partition_entry) {
+	if (smem->global_partition_desc.virt_base) {
 		dev_err(smem->dev, "Already found the global partition\n");
 		return -EINVAL;
 	}
@@ -827,7 +862,12 @@
 		return -EINVAL;
 	}
 
-	header = smem->regions[0].virt_base + le32_to_cpu(entry->offset);
+	phys_addr = smem->regions[0].aux_base + le32_to_cpu(entry->offset);
+	header = devm_ioremap_wc(smem->dev,
+				  phys_addr, le32_to_cpu(entry->size));
+	if (!header)
+		return -ENOMEM;
+
 	host0 = le16_to_cpu(header->host0);
 	host1 = le16_to_cpu(header->host1);
 
@@ -853,7 +893,10 @@
 		return -EINVAL;
 	}
 
-	smem->global_partition_entry = entry;
+	smem->global_partition_desc.virt_base = (void __iomem *)header;
+	smem->global_partition_desc.phys_base = phys_addr;
+	smem->global_partition_desc.size = le32_to_cpu(entry->size);
+	smem->global_partition_desc.cacheline = le32_to_cpu(entry->cacheline);
 
 	return 0;
 }
@@ -864,6 +907,7 @@
 	struct smem_partition_header *header;
 	struct smem_ptable_entry *entry;
 	struct smem_ptable *ptable;
+	u32 phys_addr;
 	unsigned int remote_host;
 	u32 host0, host1;
 	int i;
@@ -898,14 +942,20 @@
 			return -EINVAL;
 		}
 
-		if (smem->ptable_entries[remote_host]) {
+		if (smem->partition_desc[remote_host].virt_base) {
 			dev_err(smem->dev,
 				"Already found a partition for host %d\n",
 				remote_host);
 			return -EINVAL;
 		}
 
-		header = smem->regions[0].virt_base + le32_to_cpu(entry->offset);
+		phys_addr = smem->regions[0].aux_base +
+				le32_to_cpu(entry->offset);
+		header = devm_ioremap_wc(smem->dev,
+					  phys_addr, le32_to_cpu(entry->size));
+		if (!header)
+			return -ENOMEM;
+
 		host0 = le16_to_cpu(header->host0);
 		host1 = le16_to_cpu(header->host1);
 
@@ -940,7 +990,13 @@
 			return -EINVAL;
 		}
 
-		smem->ptable_entries[remote_host] = entry;
+		smem->partition_desc[remote_host].virt_base =
+						(void __iomem *)header;
+		smem->partition_desc[remote_host].phys_base = phys_addr;
+		smem->partition_desc[remote_host].size =
+						le32_to_cpu(entry->size);
+		smem->partition_desc[remote_host].cacheline =
+						le32_to_cpu(entry->cacheline);
 	}
 
 	return 0;
@@ -973,6 +1029,61 @@
 	return 0;
 }
 
+static int qcom_smem_map_toc(struct qcom_smem *smem, struct device *dev,
+				const char *name, int i)
+{
+	struct device_node *np;
+	struct resource r;
+	int ret;
+
+	np = of_parse_phandle(dev->of_node, name, 0);
+	if (!np) {
+		dev_err(dev, "No %s specified\n", name);
+		return -EINVAL;
+	}
+
+	ret = of_address_to_resource(np, 0, &r);
+	of_node_put(np);
+	if (ret)
+		return ret;
+
+	smem->regions[i].aux_base = (u32)r.start;
+	smem->regions[i].size = resource_size(&r);
+	/* map starting 4K for smem header */
+	smem->regions[i].virt_base = devm_ioremap_wc(dev, r.start, SZ_4K);
+	/* map last 4k for toc */
+	smem->ptable_base = devm_ioremap_wc(dev,
+				r.start + resource_size(&r) - SZ_4K, SZ_4K);
+
+	if (!smem->regions[i].virt_base || !smem->ptable_base)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static int qcom_smem_mamp_legacy(struct qcom_smem *smem)
+{
+	struct smem_header *header;
+	u32 phys_addr;
+	u32 p_size;
+
+	phys_addr = smem->regions[0].aux_base;
+	header = smem->regions[0].virt_base;
+	p_size = header->available;
+
+	/* unmap previously mapped starting 4k for smem header */
+	devm_iounmap(smem->dev, smem->regions[0].virt_base);
+
+	smem->regions[0].size = p_size;
+	smem->regions[0].virt_base = devm_ioremap_wc(smem->dev,
+						      phys_addr, p_size);
+
+	if (!smem->regions[0].virt_base)
+		return -ENOMEM;
+
+	return 0;
+}
+
 static int qcom_smem_probe(struct platform_device *pdev)
 {
 	struct smem_header *header;
@@ -995,7 +1106,7 @@
 	smem->dev = &pdev->dev;
 	smem->num_regions = num_regions;
 
-	ret = qcom_smem_map_memory(smem, &pdev->dev, "memory-region", 0);
+	ret = qcom_smem_map_toc(smem, &pdev->dev, "memory-region", 0);
 	if (ret)
 		return ret;
 
@@ -1019,6 +1130,7 @@
 		smem->item_count = qcom_smem_get_item_count(smem);
 		break;
 	case SMEM_GLOBAL_HEAP_VERSION:
+		qcom_smem_mamp_legacy(smem);
 		smem->item_count = SMEM_ITEM_COUNT;
 		break;
 	default:
diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c
index 00dc4cb..19a6511 100644
--- a/drivers/usb/gadget/function/f_mtp.c
+++ b/drivers/usb/gadget/function/f_mtp.c
@@ -1592,7 +1592,7 @@
 	}
 
 	seq_printf(s, "vfs_write(time in usec) min:%d\t max:%d\t avg:%d\n",
-						min, max, sum / iteration);
+				min, max, (iteration ? (sum / iteration) : 0));
 	min = max = sum = iteration = 0;
 	seq_puts(s, "\n=======================\n");
 	seq_puts(s, "MTP Read Stats:\n");
@@ -1614,7 +1614,7 @@
 	}
 
 	seq_printf(s, "vfs_read(time in usec) min:%d\t max:%d\t avg:%d\n",
-						min, max, sum / iteration);
+				min, max, (iteration ? (sum / iteration) : 0));
 	spin_unlock_irqrestore(&dev->lock, flags);
 	return 0;
 }
diff --git a/include/dt-bindings/clock/mdss-7nm-pll-clk.h b/include/dt-bindings/clock/mdss-7nm-pll-clk.h
index 79820b4..bb146d7 100644
--- a/include/dt-bindings/clock/mdss-7nm-pll-clk.h
+++ b/include/dt-bindings/clock/mdss-7nm-pll-clk.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
  */
 
 #ifndef __MDSS_7NM_PLL_CLK_H
@@ -25,24 +25,36 @@
 #define SHADOW_POST_VCO_DIV_0_CLK	15
 #define SHADOW_PCLK_SRC_MUX_0_CLK	16
 #define SHADOW_PCLK_SRC_0_CLK		17
-#define VCO_CLK_1		18
-#define PLL_OUT_DIV_1_CLK	19
-#define BITCLK_SRC_1_CLK	20
-#define BYTECLK_SRC_1_CLK	21
-#define POST_BIT_DIV_1_CLK	22
-#define POST_VCO_DIV_1_CLK	23
-#define BYTECLK_MUX_1_CLK	24
-#define PCLK_SRC_MUX_1_CLK	25
-#define PCLK_SRC_1_CLK		26
-#define PCLK_MUX_1_CLK		27
-#define SHADOW_VCO_CLK_1		28
-#define SHADOW_PLL_OUT_DIV_1_CLK	29
-#define SHADOW_BITCLK_SRC_1_CLK		30
-#define SHADOW_BYTECLK_SRC_1_CLK	31
-#define SHADOW_POST_BIT_DIV_1_CLK	32
-#define SHADOW_POST_VCO_DIV_1_CLK	33
-#define SHADOW_PCLK_SRC_MUX_1_CLK	34
-#define SHADOW_PCLK_SRC_1_CLK		35
+/* CPHY clocks for DSI-0 PLL */
+#define CPHY_BYTECLK_SRC_0_CLK	18
+#define POST_VCO_DIV3_5_0_CLK	19
+#define CPHY_PCLK_SRC_MUX_0_CLK	20
+#define CPHY_PCLK_SRC_0_CLK	21
+
+#define VCO_CLK_1		22
+#define PLL_OUT_DIV_1_CLK	23
+#define BITCLK_SRC_1_CLK	24
+#define BYTECLK_SRC_1_CLK	25
+#define POST_BIT_DIV_1_CLK	26
+#define POST_VCO_DIV_1_CLK	27
+#define BYTECLK_MUX_1_CLK	28
+#define PCLK_SRC_MUX_1_CLK	29
+#define PCLK_SRC_1_CLK		30
+#define PCLK_MUX_1_CLK		31
+#define SHADOW_VCO_CLK_1		32
+#define SHADOW_PLL_OUT_DIV_1_CLK	33
+#define SHADOW_BITCLK_SRC_1_CLK		34
+#define SHADOW_BYTECLK_SRC_1_CLK	35
+#define SHADOW_POST_BIT_DIV_1_CLK	36
+#define SHADOW_POST_VCO_DIV_1_CLK	37
+#define SHADOW_PCLK_SRC_MUX_1_CLK	38
+#define SHADOW_PCLK_SRC_1_CLK		39
+/* CPHY clocks for DSI-1 PLL */
+#define CPHY_BYTECLK_SRC_1_CLK	40
+#define POST_VCO_DIV3_5_1_CLK	41
+#define CPHY_PCLK_SRC_MUX_1_CLK	42
+#define CPHY_PCLK_SRC_1_CLK	43
+
 
 /* DP PLL clocks */
 #define	DP_VCO_CLK	0
diff --git a/include/uapi/linux/msm_npu.h b/include/uapi/linux/msm_npu.h
index bd68c53..d55f475 100644
--- a/include/uapi/linux/msm_npu.h
+++ b/include/uapi/linux/msm_npu.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
 /*
- * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _UAPI_MSM_NPU_H_
@@ -87,6 +87,7 @@
 #define MSM_NPU_PROP_ID_CLK_GATING_MODE (MSM_NPU_FW_PROP_ID_START + 2)
 #define MSM_NPU_PROP_ID_HW_VERSION (MSM_NPU_FW_PROP_ID_START + 3)
 #define MSM_NPU_PROP_ID_FW_VERSION (MSM_NPU_FW_PROP_ID_START + 4)
+#define MSM_NPU_PROP_ID_FW_GETCAPS (MSM_NPU_FW_PROP_ID_START + 5)
 
 /* features supported by driver */
 #define MSM_NPU_FEATURE_MULTI_EXECUTE  0x1
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 1cc20ed..b0e8970 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2029,6 +2029,9 @@
 	struct sk_buff *skb, *next;
 
 	skb = tcp_send_head(sk);
+	if (!skb)
+		return false;
+
 	tcp_for_write_queue_from_safe(skb, next, sk) {
 		if (len <= skb->len)
 			break;