Merge "mhi: core: Add checks for bhi and bhie offsets"
diff --git a/arch/arm64/configs/vendor/kona-perf_defconfig b/arch/arm64/configs/vendor/kona-perf_defconfig
index 74b58921..5713457 100644
--- a/arch/arm64/configs/vendor/kona-perf_defconfig
+++ b/arch/arm64/configs/vendor/kona-perf_defconfig
@@ -330,7 +330,6 @@
 CONFIG_CNSS2_QMI=y
 CONFIG_CNSS_ASYNC=y
 CONFIG_BUS_AUTO_SUSPEND=y
-CONFIG_CNSS_QCA6390=y
 CONFIG_CNSS_GENL=y
 CONFIG_NVM=y
 CONFIG_NVM_PBLK=y
diff --git a/arch/arm64/configs/vendor/kona_defconfig b/arch/arm64/configs/vendor/kona_defconfig
index 46b77b9..5033e4a 100644
--- a/arch/arm64/configs/vendor/kona_defconfig
+++ b/arch/arm64/configs/vendor/kona_defconfig
@@ -344,7 +344,6 @@
 CONFIG_CNSS2_QMI=y
 CONFIG_CNSS_ASYNC=y
 CONFIG_BUS_AUTO_SUSPEND=y
-CONFIG_CNSS_QCA6390=y
 CONFIG_CNSS_GENL=y
 CONFIG_NVM=y
 CONFIG_NVM_PBLK=y
diff --git a/arch/arm64/configs/vendor/lito-perf_defconfig b/arch/arm64/configs/vendor/lito-perf_defconfig
index 7548051..d043592 100644
--- a/arch/arm64/configs/vendor/lito-perf_defconfig
+++ b/arch/arm64/configs/vendor/lito-perf_defconfig
@@ -281,6 +281,7 @@
 CONFIG_HDCP_QSEECOM=y
 CONFIG_QSEECOM=y
 CONFIG_UID_SYS_STATS=y
+CONFIG_FPR_FPC=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_SG=y
@@ -607,6 +608,7 @@
 CONFIG_QMP_DEBUGFS_CLIENT=y
 CONFIG_QCOM_CDSP_RM=y
 CONFIG_QCOM_QHEE_ENABLE_MEM_PROTECTION=y
+CONFIG_QCOM_CX_IPEAK=y
 CONFIG_QTI_CRYPTO_COMMON=y
 CONFIG_QTI_CRYPTO_TZ=y
 CONFIG_ICNSS=y
diff --git a/arch/arm64/configs/vendor/lito_defconfig b/arch/arm64/configs/vendor/lito_defconfig
index 9c80d86..ba806f7 100644
--- a/arch/arm64/configs/vendor/lito_defconfig
+++ b/arch/arm64/configs/vendor/lito_defconfig
@@ -287,6 +287,7 @@
 CONFIG_HDCP_QSEECOM=y
 CONFIG_QSEECOM=y
 CONFIG_UID_SYS_STATS=y
+CONFIG_FPR_FPC=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_SG=y
@@ -626,6 +627,7 @@
 CONFIG_QMP_DEBUGFS_CLIENT=y
 CONFIG_QCOM_CDSP_RM=y
 CONFIG_QCOM_QHEE_ENABLE_MEM_PROTECTION=y
+CONFIG_QCOM_CX_IPEAK=y
 CONFIG_QTI_CRYPTO_COMMON=y
 CONFIG_QTI_CRYPTO_TZ=y
 CONFIG_ICNSS=y
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index a337b63..8e9ee2c 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2008-2019, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2020, The Linux Foundation. All rights reserved.
  */
 #include <linux/slab.h>
 #include <linux/init.h>
@@ -1378,8 +1378,8 @@
 			}
 		}
 		mutex_unlock(&driver->md_session_lock);
-		diag_update_md_clients(HDLC_SUPPORT_TYPE);
 		mutex_unlock(&driver->hdlc_disable_mutex);
+		diag_update_md_clients(HDLC_SUPPORT_TYPE);
 		return 0;
 	}
 #endif
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index c23396f..bc0c4cf 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -33,6 +33,7 @@
 obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
 obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
 obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
+obj-$(CONFIG_CRYPTO_DEV_QCOM_MSM_QCE) += msm/
 obj-$(CONFIG_CRYPTO_DEV_QCOM_RNG) += qcom-rng.o
 obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
 obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c
index 812ba67..9f19508 100644
--- a/drivers/crypto/msm/qcedev.c
+++ b/drivers/crypto/msm/qcedev.c
@@ -2308,7 +2308,7 @@
 
 	_debug_dent = debugfs_create_dir("qcedev", NULL);
 	if (IS_ERR(_debug_dent)) {
-		pr_err("qcedev debugfs_create_dir fail, error %ld\n",
+		pr_debug("qcedev debugfs_create_dir fail, error %ld\n",
 				PTR_ERR(_debug_dent));
 		return PTR_ERR(_debug_dent);
 	}
@@ -2318,7 +2318,7 @@
 	dent = debugfs_create_file(name, 0644, _debug_dent,
 			&_debug_qcedev, &_debug_stats_ops);
 	if (dent == NULL) {
-		pr_err("qcedev debugfs_create_file fail, error %ld\n",
+		pr_debug("qcedev debugfs_create_file fail, error %ld\n",
 				PTR_ERR(dent));
 		rc = PTR_ERR(dent);
 		goto err;
diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
index 6a8e0d2..9578c3a 100644
--- a/drivers/crypto/msm/qcrypto.c
+++ b/drivers/crypto/msm/qcrypto.c
@@ -5523,7 +5523,7 @@
 
 	_debug_dent = debugfs_create_dir("qcrypto", NULL);
 	if (IS_ERR(_debug_dent)) {
-		pr_err("qcrypto debugfs_create_dir fail, error %ld\n",
+		pr_debug("qcrypto debugfs_create_dir fail, error %ld\n",
 				PTR_ERR(_debug_dent));
 		return PTR_ERR(_debug_dent);
 	}
@@ -5533,7 +5533,7 @@
 	dent = debugfs_create_file(name, 0644, _debug_dent,
 				&_debug_qcrypto, &_debug_stats_ops);
 	if (dent == NULL) {
-		pr_err("qcrypto debugfs_create_file fail, error %ld\n",
+		pr_debug("qcrypto debugfs_create_file fail, error %ld\n",
 				PTR_ERR(dent));
 		rc = PTR_ERR(dent);
 		goto err;
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index 9be7cc4..06125b1 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -881,6 +881,17 @@
 }
 EXPORT_SYMBOL_GPL(extcon_set_property_capability);
 
+int extcon_set_mutually_exclusive(struct extcon_dev *edev,
+				const u32 *exclusive)
+{
+	if (!edev)
+		return -EINVAL;
+
+	edev->mutually_exclusive = exclusive;
+	return 0;
+}
+EXPORT_SYMBOL(extcon_set_mutually_exclusive);
+
 /**
  * extcon_get_extcon_dev() - Get the extcon device instance from the name.
  * @extcon_name:	the extcon name provided with extcon_dev_register()
diff --git a/drivers/gpu/drm/bridge/lt9611uxc.c b/drivers/gpu/drm/bridge/lt9611uxc.c
index e37e770..3a40d05 100644
--- a/drivers/gpu/drm/bridge/lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lt9611uxc.c
@@ -893,7 +893,7 @@
 		gpio_set_value(pdata->reset_gpio, 0);
 		msleep(20);
 		gpio_set_value(pdata->reset_gpio, 1);
-		msleep(300);
+		msleep(180);
 	} else {
 		gpio_set_value(pdata->reset_gpio, 0);
 	}
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index d8e69f8..5e98878 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -1570,6 +1570,8 @@
 		}
 
 		for (i = 0; i < csdev->nr_outport; i++) {
+			if (desc->pdata->child_names[i] == NULL)
+				continue;
 			conns[i].outport = desc->pdata->outports[i];
 			conns[i].child_name = desc->pdata->child_names[i];
 			conns[i].child_port = desc->pdata->child_ports[i];
diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
index 32ac255..36c62dc 100644
--- a/drivers/hwtracing/coresight/of_coresight.c
+++ b/drivers/hwtracing/coresight/of_coresight.c
@@ -50,16 +50,22 @@
 {
 	struct device_node *ep = NULL;
 	int in = 0, out = 0;
+	struct of_endpoint endpoint;
 
 	do {
 		ep = of_graph_get_next_endpoint(node, ep);
 		if (!ep)
 			break;
 
+		if (of_graph_parse_endpoint(ep, &endpoint))
+			continue;
+
 		if (of_property_read_bool(ep, "slave-mode"))
-			in++;
+			in = (endpoint.port + 1 > in) ?
+				endpoint.port + 1 : in;
 		else
-			out++;
+			out = (endpoint.port + 1) > out ?
+				endpoint.port + 1 : out;
 
 	} while (ep);
 
diff --git a/drivers/i3c/master/i3c-master-qcom-geni.c b/drivers/i3c/master/i3c-master-qcom-geni.c
index d9f111e..b278778 100644
--- a/drivers/i3c/master/i3c-master-qcom-geni.c
+++ b/drivers/i3c/master/i3c-master-qcom-geni.c
@@ -274,6 +274,7 @@
 	struct workqueue_struct *hj_wq;
 	struct work_struct hj_wd;
 	struct wakeup_source hj_wl;
+	struct pinctrl_state *i3c_gpio_disable;
 };
 
 struct geni_i3c_i2c_dev_data {
@@ -735,7 +736,7 @@
 {
 	dma_addr_t tx_dma = 0;
 	dma_addr_t rx_dma = 0;
-	int ret, time_remaining = 0;
+	int ret = 0, time_remaining = 0;
 	enum i3c_trans_dir rnw = gi3c->cur_rnw;
 	u32 len = gi3c->cur_len;
 
@@ -816,10 +817,12 @@
 		geni_se_tx_dma_unprep(gi3c->se.i3c_rsc.wrapper_dev,
 				tx_dma, len);
 	}
-	ret = gi3c->err;
-	if (gi3c->err)
+
+	if (gi3c->err) {
+		ret = (gi3c->err == -EBUSY) ? I3C_ERROR_M2 : gi3c->err;
 		GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev,
 			"I3C transaction error :%d\n", gi3c->err);
+	}
 
 	gi3c->cur_buf = NULL;
 	gi3c->cur_len = gi3c->cur_idx = 0;
@@ -1523,10 +1526,8 @@
 	return -ENOSPC;
 }
 
-static int qcom_deallocate_ibi_table_entry(struct i3c_dev_desc *dev)
+static int qcom_deallocate_ibi_table_entry(struct geni_i3c_dev *gi3c)
 {
-	struct i3c_master_controller *m = i3c_dev_get_master(dev);
-	struct geni_i3c_dev *gi3c = to_geni_i3c_master(m);
 	u32 i, timeout;
 
 	for (i = 0; i < gi3c->ibi.num_slots; i++) {
@@ -1558,16 +1559,14 @@
 	return 0;
 }
 
-static void qcom_geni_i3c_ibi_unconf(struct i3c_dev_desc *dev)
+static void qcom_geni_i3c_ibi_unconf(struct geni_i3c_dev *gi3c)
 {
-	struct i3c_master_controller *m = i3c_dev_get_master(dev);
-	struct geni_i3c_dev *gi3c = to_geni_i3c_master(m);
 	u32 val, timeout;
 	int ret = 0;
 
 	val = geni_read_reg(gi3c->se.ibi_base, IBI_ALLOCATED_ENTRIES_GPII(0));
 	if (val) {
-		ret = qcom_deallocate_ibi_table_entry(dev);
+		ret = qcom_deallocate_ibi_table_entry(gi3c);
 		if (ret)
 			return;
 	}
@@ -1619,7 +1618,7 @@
 	if (!gi3c->ibi.hw_support && !gi3c->ibi.is_init)
 		return;
 
-	qcom_geni_i3c_ibi_unconf(dev);
+	qcom_geni_i3c_ibi_unconf(gi3c);
 
 	spin_lock_irqsave(&gi3c->ibi.lock, flags);
 	gi3c->ibi.slots[data->ibi] = NULL;
@@ -1777,6 +1776,14 @@
 		ret = PTR_ERR(gi3c->se.i3c_rsc.geni_gpio_sleep);
 		return ret;
 	}
+	gi3c->i3c_gpio_disable =
+		pinctrl_lookup_state(gi3c->se.i3c_rsc.geni_pinctrl, "disable");
+	if (IS_ERR(gi3c->i3c_gpio_disable)) {
+		GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev,
+			"Error no pinctrl disable config specified\n");
+		ret = PTR_ERR(gi3c->i3c_gpio_disable);
+		return ret;
+	}
 
 	return 0;
 }
@@ -1976,23 +1983,38 @@
 	ret = i3c_master_register(&gi3c->ctrlr, &pdev->dev,
 		&geni_i3c_master_ops, false);
 	if (ret)
-		return ret;
+		GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev,
+			"i3c_master_register failed:%d\n", ret);
+
 	//enable hot-join IRQ also
 	geni_write_reg(~0u, gi3c->se.ibi_base, IBI_GEN_IRQ_EN);
 
 	GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev, "I3C probed\n");
 
-	return ret;
+	return 0;
 }
 
 static int geni_i3c_remove(struct platform_device *pdev)
 {
 	struct geni_i3c_dev *gi3c = platform_get_drvdata(pdev);
-	int ret = 0;
+	int ret = 0, val = 0;
 
+	//Disable hot-join, until next probe happens
+	val = geni_read_reg(gi3c->se.ibi_base, IBI_GEN_IRQ_EN);
+	val &= ~HOT_JOIN_IRQ_EN;
+	geni_write_reg(val, gi3c->se.ibi_base, IBI_GEN_IRQ_EN);
+
+	if (gi3c->ibi.is_init)
+		qcom_geni_i3c_ibi_unconf(gi3c);
 	destroy_workqueue(gi3c->hj_wq);
 	wakeup_source_trash(&gi3c->hj_wl);
-	pm_runtime_disable(gi3c->se.dev);
+	/*force suspend to avoid the auto suspend caused by driver removal*/
+	pm_runtime_force_suspend(gi3c->se.dev);
+	ret = pinctrl_select_state(gi3c->se.i3c_rsc.geni_pinctrl,
+			gi3c->i3c_gpio_disable);
+	if (ret)
+		GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev,
+			" i3c: pinctrl_select_state failed\n");
 	ret = i3c_master_unregister(&gi3c->ctrlr);
 	if (gi3c->ipcl)
 		ipc_log_context_destroy(gi3c->ipcl);
@@ -2004,7 +2026,7 @@
 	return 0;
 }
 
-#ifdef CONFIG_PM
+#if IS_ENABLED(CONFIG_PM)
 static int geni_i3c_runtime_suspend(struct device *dev)
 {
 	struct geni_i3c_dev *gi3c = dev_get_drvdata(dev);
@@ -2080,7 +2102,18 @@
 	},
 };
 
-module_platform_driver(geni_i3c_master);
+static int __init i3c_dev_init(void)
+{
+	return platform_driver_register(&geni_i3c_master);
+}
+
+static void __exit i3c_dev_exit(void)
+{
+	platform_driver_unregister(&geni_i3c_master);
+}
+
+module_init(i3c_dev_init);
+module_exit(i3c_dev_exit);
 
 MODULE_LICENSE("GPL v2");
 MODULE_ALIAS("platform:geni_i3c_master");
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 06ca3f7..053a18c 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -934,3 +934,21 @@
 			       chan->channel, buf, len);
 }
 EXPORT_SYMBOL_GPL(iio_write_channel_ext_info);
+
+int iio_write_channel_processed(struct iio_channel *chan, int val)
+{
+	int ret;
+
+	mutex_lock(&chan->indio_dev->info_exist_lock);
+	if (chan->indio_dev->info == NULL) {
+		ret = -ENODEV;
+		goto err_unlock;
+	}
+
+	ret = iio_channel_write(chan, val, 0, IIO_CHAN_INFO_PROCESSED);
+err_unlock:
+	mutex_unlock(&chan->indio_dev->info_exist_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(iio_write_channel_processed);
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index e3a9948..3a01526 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -88,5 +88,5 @@
 obj-$(CONFIG_GOLDFISH_PIC) 		+= irq-goldfish-pic.o
 obj-$(CONFIG_NDS32)			+= irq-ativic32.o
 obj-$(CONFIG_QCOM_PDC)			+= qcom-pdc.o
-obj-$(CONFIG_QCOM_MPM)			+= qcom-mpm.o qcom-mpm-bengal.o qcom-mpm-scuba.o
+obj-$(CONFIG_QCOM_MPM)			+= qcom-mpm.o qcom-mpm-bengal.o qcom-mpm-scuba.o qcom-mpm-sdm660.o
 obj-$(CONFIG_SIFIVE_PLIC)		+= irq-sifive-plic.o
diff --git a/drivers/irqchip/qcom-mpm-sdm660.c b/drivers/irqchip/qcom-mpm-sdm660.c
new file mode 100644
index 0000000..2e482e0
--- /dev/null
+++ b/drivers/irqchip/qcom-mpm-sdm660.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <soc/qcom/mpm.h>
+
+const struct mpm_pin mpm_sdm660_gic_chip_data[] = {
+	{2, 216}, /* tsens1_tsens_upper_lower_int */
+	{52, 275}, /* qmp_usb3_lfps_rxterm_irq_cx */
+	{61, 209}, /* lpi_dir_conn_irq_apps[1] */
+	{79, 379}, /* qusb2phy_intr for Dm */
+	{80, 380}, /* qusb2phy_intr for Dm for secondary PHY */
+	{81, 379}, /* qusb2phy_intr for Dp */
+	{82, 380}, /* qusb2phy_intr for Dp for secondary PHY */
+	{87, 358}, /* ee0_apps_hlos_spmi_periph_irq */
+	{91, 519}, /* lpass_pmu_tmr_timeout_irq_cx */
+	{-1},
+};
diff --git a/drivers/irqchip/qcom-mpm.c b/drivers/irqchip/qcom-mpm.c
index f7f4864..ab8a3b3 100644
--- a/drivers/irqchip/qcom-mpm.c
+++ b/drivers/irqchip/qcom-mpm.c
@@ -592,6 +592,10 @@
 		.compatible = "qcom,mpm-gic-scuba",
 		.data = mpm_scuba_gic_chip_data,
 	},
+	{
+		.compatible = "qcom,mpm-gic-sdm660",
+		.data = mpm_sdm660_gic_chip_data,
+	},
 	{}
 };
 MODULE_DEVICE_TABLE(of, mpm_gic_chip_data_table);
diff --git a/drivers/leds/leds-qti-flash.c b/drivers/leds/leds-qti-flash.c
index 0e02e78..b0ca27b 100644
--- a/drivers/leds/leds-qti-flash.c
+++ b/drivers/leds/leds-qti-flash.c
@@ -277,7 +277,7 @@
 
 	for (i = 0; i < 60; i++) {
 		/* wait for the flash vreg_ok to be set */
-		usleep_range(5000, 5500);
+		mdelay(5);
 
 		rc = power_supply_get_property(led->main_psy,
 					POWER_SUPPLY_PROP_FLASH_TRIGGER, &pval);
diff --git a/drivers/media/platform/msm/npu/npu_host_ipc.c b/drivers/media/platform/msm/npu/npu_host_ipc.c
index 0d2816f..5ea693b 100644
--- a/drivers/media/platform/msm/npu/npu_host_ipc.c
+++ b/drivers/media/platform/msm/npu/npu_host_ipc.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
  */
 
 /* -------------------------------------------------------------------------
@@ -367,8 +367,6 @@
 	/* Update qhdr_write_idx */
 	queue.qhdr_write_idx = new_write_idx;
 
-	*is_rx_req_set = (queue.qhdr_rx_req == 1) ? 1 : 0;
-
 	/* Update Write pointer -- queue.qhdr_write_idx */
 exit:
 	/* Update TX request -- queue.qhdr_tx_req */
@@ -379,6 +377,13 @@
 		(size_t)&(queue.qhdr_write_idx) - (size_t)&queue))),
 		&queue.qhdr_write_idx, sizeof(queue.qhdr_write_idx));
 
+	/* check if irq is required after write_idx is updated */
+	MEMR(npu_dev, (void *)((size_t)(offset + (uint32_t)(
+		(size_t)&(queue.qhdr_rx_req) - (size_t)&queue))),
+		(uint8_t *)&queue.qhdr_rx_req,
+		sizeof(queue.qhdr_rx_req));
+	*is_rx_req_set = (queue.qhdr_rx_req == 1) ? 1 : 0;
+
 	return status;
 }
 
diff --git a/drivers/net/wireless/cnss2/bus.c b/drivers/net/wireless/cnss2/bus.c
index 94e0a4d..87d2d8b 100644
--- a/drivers/net/wireless/cnss2/bus.c
+++ b/drivers/net/wireless/cnss2/bus.c
@@ -95,6 +95,23 @@
 	}
 }
 
+void cnss_bus_add_fw_prefix_name(struct cnss_plat_data *plat_priv,
+				 char *prefix_name, char *name)
+{
+	if (!plat_priv)
+		return;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_add_fw_prefix_name(plat_priv->bus_priv,
+						   prefix_name, name);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return;
+	}
+}
+
 int cnss_bus_load_m3(struct cnss_plat_data *plat_priv)
 {
 	if (!plat_priv)
diff --git a/drivers/net/wireless/cnss2/bus.h b/drivers/net/wireless/cnss2/bus.h
index 1e7cc0f..686b12d 100644
--- a/drivers/net/wireless/cnss2/bus.h
+++ b/drivers/net/wireless/cnss2/bus.h
@@ -24,6 +24,8 @@
 struct cnss_plat_data *cnss_bus_dev_to_plat_priv(struct device *dev);
 int cnss_bus_init(struct cnss_plat_data *plat_priv);
 void cnss_bus_deinit(struct cnss_plat_data *plat_priv);
+void cnss_bus_add_fw_prefix_name(struct cnss_plat_data *plat_priv,
+				 char *prefix_name, char *name);
 int cnss_bus_load_m3(struct cnss_plat_data *plat_priv);
 int cnss_bus_alloc_fw_mem(struct cnss_plat_data *plat_priv);
 int cnss_bus_alloc_qdss_mem(struct cnss_plat_data *plat_priv);
diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c
index 7bb0bc03..6757ea9 100644
--- a/drivers/net/wireless/cnss2/main.c
+++ b/drivers/net/wireless/cnss2/main.c
@@ -1090,6 +1090,7 @@
 
 	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
 		cnss_pr_err("Recovery is already in progress\n");
+		CNSS_ASSERT(0);
 		ret = -EINVAL;
 		goto out;
 	}
@@ -1142,7 +1143,8 @@
 	struct cnss_recovery_data *data;
 	int gfp = GFP_KERNEL;
 
-	cnss_bus_update_status(plat_priv, CNSS_FW_DOWN);
+	if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
+		cnss_bus_update_status(plat_priv, CNSS_FW_DOWN);
 
 	if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state) ||
 	    test_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state)) {
@@ -2203,6 +2205,12 @@
 		    plat_priv->set_wlaon_pwr_ctrl);
 }
 
+static bool cnss_use_fw_path_with_prefix(struct cnss_plat_data *plat_priv)
+{
+	return of_property_read_bool(plat_priv->plat_dev->dev.of_node,
+				     "qcom,converged-dt");
+}
+
 static const struct platform_device_id cnss_platform_id_table[] = {
 	{ .name = "qca6174", .driver_data = QCA6174_DEVICE_ID, },
 	{ .name = "qca6290", .driver_data = QCA6290_DEVICE_ID, },
@@ -2268,6 +2276,8 @@
 	plat_priv->device_id = device_id->driver_data;
 	plat_priv->bus_type = cnss_get_bus_type(plat_priv->device_id);
 	plat_priv->use_nv_mac = cnss_use_nv_mac(plat_priv);
+	plat_priv->use_fw_path_with_prefix =
+		cnss_use_fw_path_with_prefix(plat_priv);
 	cnss_set_plat_priv(plat_dev, plat_priv);
 	platform_set_drvdata(plat_dev, plat_priv);
 	INIT_LIST_HEAD(&plat_priv->vreg_list);
diff --git a/drivers/net/wireless/cnss2/main.h b/drivers/net/wireless/cnss2/main.h
index 9d0c51a..5dfd4a4 100644
--- a/drivers/net/wireless/cnss2/main.h
+++ b/drivers/net/wireless/cnss2/main.h
@@ -25,6 +25,7 @@
 #define TIME_CLOCK_FREQ_HZ		19200000
 #define CNSS_RAMDUMP_MAGIC		0x574C414E
 #define CNSS_RAMDUMP_VERSION		0
+#define MAX_FIRMWARE_NAME_LEN		20
 
 #define CNSS_EVENT_SYNC   BIT(0)
 #define CNSS_EVENT_UNINTERRUPTIBLE BIT(1)
@@ -369,7 +370,9 @@
 	u8 *diag_reg_read_buf;
 	u8 cal_done;
 	u8 powered_on;
-	char firmware_name[13];
+	u8 use_fw_path_with_prefix;
+	char firmware_name[MAX_FIRMWARE_NAME_LEN];
+	char fw_fallback_name[MAX_FIRMWARE_NAME_LEN];
 	struct completion rddm_complete;
 	struct completion recovery_complete;
 	struct cnss_control_params ctrl_params;
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index 020090b..6a91e49 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -38,7 +38,8 @@
 #define MHI_NODE_NAME			"qcom,mhi"
 #define MHI_MSI_NAME			"MHI"
 
-#define MAX_M3_FILE_NAME_LENGTH		13
+#define QCA6390_PATH_PREFIX		"qca6390/"
+#define QCA6490_PATH_PREFIX		"qca6490/"
 #define DEFAULT_M3_FILE_NAME		"m3.bin"
 #define DEFAULT_FW_FILE_NAME		"amss.bin"
 #define FW_V2_FILE_NAME			"amss20.bin"
@@ -3113,12 +3114,13 @@
 {
 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
 	struct cnss_fw_mem *m3_mem = &plat_priv->m3_mem;
-	char filename[MAX_M3_FILE_NAME_LENGTH];
+	char filename[MAX_FIRMWARE_NAME_LEN];
 	const struct firmware *fw_entry;
 	int ret = 0;
 
 	if (!m3_mem->va && !m3_mem->size) {
-		snprintf(filename, sizeof(filename), DEFAULT_M3_FILE_NAME);
+		cnss_pci_add_fw_prefix_name(pci_priv, filename,
+					    DEFAULT_M3_FILE_NAME);
 
 		ret = request_firmware(&fw_entry, filename,
 				       &pci_priv->pci_dev->dev);
@@ -4038,6 +4040,99 @@
 	cnss_pci_pm_runtime_put_noidle(pci_priv);
 }
 
+void cnss_pci_add_fw_prefix_name(struct cnss_pci_data *pci_priv,
+				 char *prefix_name, char *name)
+{
+	struct cnss_plat_data *plat_priv;
+
+	if (!pci_priv)
+		return;
+
+	plat_priv = pci_priv->plat_priv;
+
+	if (!plat_priv->use_fw_path_with_prefix) {
+		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN, "%s", name);
+		return;
+	}
+
+	switch (pci_priv->device_id) {
+	case QCA6390_DEVICE_ID:
+		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
+			  QCA6390_PATH_PREFIX "%s", name);
+		break;
+	case QCA6490_DEVICE_ID:
+		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
+			  QCA6490_PATH_PREFIX "%s", name);
+		break;
+	default:
+		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN, "%s", name);
+		break;
+	}
+
+	cnss_pr_dbg("FW name added with prefix: %s\n", prefix_name);
+}
+
+static int cnss_pci_update_fw_name(struct cnss_pci_data *pci_priv)
+{
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+	struct mhi_controller *mhi_ctrl = pci_priv->mhi_ctrl;
+
+	plat_priv->device_version.family_number = mhi_ctrl->family_number;
+	plat_priv->device_version.device_number = mhi_ctrl->device_number;
+	plat_priv->device_version.major_version = mhi_ctrl->major_version;
+	plat_priv->device_version.minor_version = mhi_ctrl->minor_version;
+
+	cnss_pr_dbg("Get device version info, family number: 0x%x, device number: 0x%x, major version: 0x%x, minor version: 0x%x\n",
+		    plat_priv->device_version.family_number,
+		    plat_priv->device_version.device_number,
+		    plat_priv->device_version.major_version,
+		    plat_priv->device_version.minor_version);
+
+	switch (pci_priv->device_id) {
+	case QCA6390_DEVICE_ID:
+		if (plat_priv->device_version.major_version < FW_V2_NUMBER) {
+			cnss_pr_dbg("Device ID:version (0x%lx:%d) is not supported\n",
+				    pci_priv->device_id,
+				    plat_priv->device_version.major_version);
+			return -EINVAL;
+		}
+		cnss_pci_add_fw_prefix_name(pci_priv, plat_priv->firmware_name,
+					    FW_V2_FILE_NAME);
+		snprintf(plat_priv->fw_fallback_name, MAX_FIRMWARE_NAME_LEN,
+			 FW_V2_FILE_NAME);
+		break;
+	case QCA6490_DEVICE_ID:
+		switch (plat_priv->device_version.major_version) {
+		case FW_V2_NUMBER:
+			cnss_pci_add_fw_prefix_name(pci_priv,
+						    plat_priv->firmware_name,
+						    FW_V2_FILE_NAME);
+			snprintf(plat_priv->fw_fallback_name,
+				 MAX_FIRMWARE_NAME_LEN, FW_V2_FILE_NAME);
+			break;
+		default:
+			cnss_pci_add_fw_prefix_name(pci_priv,
+						    plat_priv->firmware_name,
+						    DEFAULT_FW_FILE_NAME);
+			snprintf(plat_priv->fw_fallback_name,
+				 MAX_FIRMWARE_NAME_LEN, DEFAULT_FW_FILE_NAME);
+			break;
+		}
+		break;
+	default:
+		cnss_pci_add_fw_prefix_name(pci_priv, plat_priv->firmware_name,
+					    DEFAULT_FW_FILE_NAME);
+		snprintf(plat_priv->fw_fallback_name, MAX_FIRMWARE_NAME_LEN,
+			 DEFAULT_FW_FILE_NAME);
+		break;
+	}
+
+	cnss_pr_dbg("FW name is %s, FW fallback name is %s\n",
+		    mhi_ctrl->fw_image, mhi_ctrl->fw_image_fallback);
+
+	return 0;
+}
+
 static char *cnss_mhi_notify_status_to_str(enum MHI_CB status)
 {
 	switch (status) {
@@ -4051,6 +4146,8 @@
 		return "FATAL_ERROR";
 	case MHI_CB_EE_MISSION_MODE:
 		return "MISSION_MODE";
+	case MHI_CB_FW_FALLBACK_IMG:
+		return "FW_FALLBACK";
 	default:
 		return "UNKNOWN";
 	}
@@ -4123,6 +4220,10 @@
 		cnss_pci_update_status(pci_priv, CNSS_FW_DOWN);
 		cnss_reason = CNSS_REASON_RDDM;
 		break;
+	case MHI_CB_FW_FALLBACK_IMG:
+		plat_priv->use_fw_path_with_prefix = false;
+		cnss_pci_update_fw_name(pci_priv);
+		return;
 	default:
 		cnss_pr_err("Unsupported MHI status cb reason: %d\n", reason);
 		return;
@@ -4160,55 +4261,6 @@
 	return 0;
 }
 
-static int cnss_pci_update_fw_name(struct cnss_pci_data *pci_priv)
-{
-	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
-	struct mhi_controller *mhi_ctrl = pci_priv->mhi_ctrl;
-
-	plat_priv->device_version.family_number = mhi_ctrl->family_number;
-	plat_priv->device_version.device_number = mhi_ctrl->device_number;
-	plat_priv->device_version.major_version = mhi_ctrl->major_version;
-	plat_priv->device_version.minor_version = mhi_ctrl->minor_version;
-
-	cnss_pr_dbg("Get device version info, family number: 0x%x, device number: 0x%x, major version: 0x%x, minor version: 0x%x\n",
-		    plat_priv->device_version.family_number,
-		    plat_priv->device_version.device_number,
-		    plat_priv->device_version.major_version,
-		    plat_priv->device_version.minor_version);
-
-	switch (pci_priv->device_id) {
-	case QCA6390_DEVICE_ID:
-		if (plat_priv->device_version.major_version < FW_V2_NUMBER) {
-			cnss_pr_dbg("Device ID:version (0x%lx:%d) is not supported\n",
-				    pci_priv->device_id,
-				    plat_priv->device_version.major_version);
-			return -EINVAL;
-		}
-		scnprintf(plat_priv->firmware_name,
-			  sizeof(plat_priv->firmware_name), FW_V2_FILE_NAME);
-		mhi_ctrl->fw_image = plat_priv->firmware_name;
-		break;
-	case QCA6490_DEVICE_ID:
-		switch (plat_priv->device_version.major_version) {
-		case FW_V2_NUMBER:
-			scnprintf(plat_priv->firmware_name,
-				  sizeof(plat_priv->firmware_name),
-				  FW_V2_FILE_NAME);
-			break;
-		default:
-			break;
-		}
-
-		break;
-	default:
-		break;
-	}
-
-	cnss_pr_dbg("Firmware name is %s\n", mhi_ctrl->fw_image);
-
-	return 0;
-}
-
 static int cnss_pci_register_mhi(struct cnss_pci_data *pci_priv)
 {
 	int ret = 0;
@@ -4233,6 +4285,7 @@
 	mhi_ctrl->slot = PCI_SLOT(pci_dev->devfn);
 
 	mhi_ctrl->fw_image = plat_priv->firmware_name;
+	mhi_ctrl->fw_image_fallback = plat_priv->fw_fallback_name;
 
 	mhi_ctrl->regs = pci_priv->bar;
 	mhi_ctrl->len = pci_resource_len(pci_priv->pci_dev, PCI_BAR_NUM);
@@ -4365,8 +4418,6 @@
 	cnss_set_pci_priv(pci_dev, pci_priv);
 	plat_priv->device_id = pci_dev->device;
 	plat_priv->bus_priv = pci_priv;
-	snprintf(plat_priv->firmware_name, sizeof(plat_priv->firmware_name),
-		 DEFAULT_FW_FILE_NAME);
 	mutex_init(&pci_priv->bus_lock);
 
 	ret = of_reserved_mem_device_init(dev);
diff --git a/drivers/net/wireless/cnss2/pci.h b/drivers/net/wireless/cnss2/pci.h
index 2984273..a05ad7c 100644
--- a/drivers/net/wireless/cnss2/pci.h
+++ b/drivers/net/wireless/cnss2/pci.h
@@ -167,6 +167,8 @@
 int cnss_resume_pci_link(struct cnss_pci_data *pci_priv);
 int cnss_pci_init(struct cnss_plat_data *plat_priv);
 void cnss_pci_deinit(struct cnss_plat_data *plat_priv);
+void cnss_pci_add_fw_prefix_name(struct cnss_pci_data *pci_priv,
+				 char *prefix_name, char *name);
 int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv);
 int cnss_pci_alloc_qdss_mem(struct cnss_pci_data *pci_priv);
 void cnss_pci_free_qdss_mem(struct cnss_pci_data *pci_priv);
diff --git a/drivers/net/wireless/cnss2/qmi.c b/drivers/net/wireless/cnss2/qmi.c
index e1153ed..e7dc9ff 100644
--- a/drivers/net/wireless/cnss2/qmi.c
+++ b/drivers/net/wireless/cnss2/qmi.c
@@ -12,7 +12,6 @@
 
 #define WLFW_SERVICE_INS_ID_V01		1
 #define WLFW_CLIENT_ID			0x4b4e454c
-#define MAX_BDF_FILE_NAME		13
 #define BDF_FILE_NAME_PREFIX		"bdwlan"
 #define ELF_BDF_FILE_NAME		"bdwlan.elf"
 #define ELF_BDF_FILE_NAME_PREFIX	"bdwlan.e"
@@ -459,42 +458,43 @@
 				  u32 bdf_type, char *filename,
 				  u32 filename_len)
 {
+	char filename_tmp[MAX_FIRMWARE_NAME_LEN];
 	int ret = 0;
 
 	switch (bdf_type) {
 	case CNSS_BDF_ELF:
 		if (plat_priv->board_info.board_id == 0xFF)
-			snprintf(filename, filename_len, ELF_BDF_FILE_NAME);
+			snprintf(filename_tmp, filename_len, ELF_BDF_FILE_NAME);
 		else if (plat_priv->board_info.board_id < 0xFF)
-			snprintf(filename, filename_len,
+			snprintf(filename_tmp, filename_len,
 				 ELF_BDF_FILE_NAME_PREFIX "%02x",
 				 plat_priv->board_info.board_id);
 		else
-			snprintf(filename, filename_len,
+			snprintf(filename_tmp, filename_len,
 				 BDF_FILE_NAME_PREFIX "%02x.e%02x",
 				 plat_priv->board_info.board_id >> 8 & 0xFF,
 				 plat_priv->board_info.board_id & 0xFF);
 		break;
 	case CNSS_BDF_BIN:
 		if (plat_priv->board_info.board_id == 0xFF)
-			snprintf(filename, filename_len, BIN_BDF_FILE_NAME);
+			snprintf(filename_tmp, filename_len, BIN_BDF_FILE_NAME);
 		else if (plat_priv->board_info.board_id < 0xFF)
-			snprintf(filename, filename_len,
+			snprintf(filename_tmp, filename_len,
 				 BIN_BDF_FILE_NAME_PREFIX "%02x",
 				 plat_priv->board_info.board_id);
 		else
-			snprintf(filename, filename_len,
+			snprintf(filename_tmp, filename_len,
 				 BDF_FILE_NAME_PREFIX "%02x.b%02x",
 				 plat_priv->board_info.board_id >> 8 & 0xFF,
 				 plat_priv->board_info.board_id & 0xFF);
 		break;
 	case CNSS_BDF_REGDB:
-		snprintf(filename, filename_len, REGDB_FILE_NAME);
+		snprintf(filename_tmp, filename_len, REGDB_FILE_NAME);
 		break;
 	case CNSS_BDF_DUMMY:
 		cnss_pr_dbg("CNSS_BDF_DUMMY is set, sending dummy BDF\n");
-		snprintf(filename, filename_len, DUMMY_BDF_FILE_NAME);
-		ret = MAX_BDF_FILE_NAME;
+		snprintf(filename_tmp, filename_len, DUMMY_BDF_FILE_NAME);
+		ret = MAX_FIRMWARE_NAME_LEN;
 		break;
 	default:
 		cnss_pr_err("Invalid BDF type: %d\n",
@@ -502,6 +502,10 @@
 		ret = -EINVAL;
 		break;
 	}
+
+	if (ret >= 0)
+		cnss_bus_add_fw_prefix_name(plat_priv, filename, filename_tmp);
+
 	return ret;
 }
 
@@ -511,7 +515,7 @@
 	struct wlfw_bdf_download_req_msg_v01 *req;
 	struct wlfw_bdf_download_resp_msg_v01 *resp;
 	struct qmi_txn txn;
-	char filename[MAX_BDF_FILE_NAME];
+	char filename[MAX_FIRMWARE_NAME_LEN];
 	const struct firmware *fw_entry = NULL;
 	const u8 *temp;
 	unsigned int remaining;
@@ -534,7 +538,7 @@
 				     filename, sizeof(filename));
 	if (ret > 0) {
 		temp = DUMMY_BDF_FILE_NAME;
-		remaining = MAX_BDF_FILE_NAME;
+		remaining = MAX_FIRMWARE_NAME_LEN;
 		goto bypass_bdf;
 	} else if (ret < 0) {
 		goto err_req_fw;
diff --git a/drivers/nfc/nq-nci.c b/drivers/nfc/nq-nci.c
index 1d49640..ab47b32 100644
--- a/drivers/nfc/nq-nci.c
+++ b/drivers/nfc/nq-nci.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/kernel.h>
@@ -22,6 +22,7 @@
 #include <linux/compat.h>
 #endif
 #include <linux/jiffies.h>
+#include <linux/regulator/consumer.h>
 
 struct nqx_platform_data {
 	unsigned int irq_gpio;
@@ -29,6 +30,8 @@
 	unsigned int clkreq_gpio;
 	unsigned int firm_gpio;
 	unsigned int ese_gpio;
+	int vdd_levels[2];
+	int max_current;
 	const char *clk_src_name;
 	/* NFC_CLK pin voting state */
 	bool clk_pin_voting;
@@ -67,6 +70,8 @@
 	/* NFC_IRQ wake-up state */
 	bool			irq_wake_up;
 	bool			cold_reset_rsp_pending;
+	bool			is_vreg_enabled;
+	bool			is_ese_session_active;
 	uint8_t			cold_reset_status;
 	spinlock_t		irq_enabled_lock;
 	unsigned int		count_irq;
@@ -81,6 +86,7 @@
 	size_t kbuflen;
 	u8 *kbuf;
 	struct nqx_platform_data *pdata;
+	struct regulator *reg;
 };
 
 static int nfcc_reboot(struct notifier_block *notifier, unsigned long val,
@@ -455,6 +461,7 @@
 		} else {
 			dev_dbg(&nqx_dev->client->dev, "en_gpio already HIGH\n");
 		}
+		nqx_dev->is_ese_session_active = true;
 		r = 0;
 	} else if (arg == ESE_POWER_OFF) {
 		if (!nqx_dev->nfc_ven_enabled) {
@@ -465,6 +472,7 @@
 		} else {
 			dev_dbg(&nqx_dev->client->dev, "keep en_gpio high as NFC is enabled\n");
 		}
+		nqx_dev->is_ese_session_active = false;
 		r = 0;
 	} else if (arg == ESE_COLD_RESET) {
 		// set default value for status as failure
@@ -618,6 +626,123 @@
 	return r;
 }
 
+/**
+ * nfc_ldo_vote()
+ * @nqx_dev: NFC device containing regulator handle
+ *
+ * LDO voting based on voltage and current entries in DT
+ *
+ * Return: 0 on success and -ve on failure
+ */
+static int nfc_ldo_vote(struct nqx_dev *nqx_dev)
+{
+	struct device *dev = &nqx_dev->client->dev;
+	int ret;
+
+	ret =  regulator_set_voltage(nqx_dev->reg,
+			nqx_dev->pdata->vdd_levels[0],
+			nqx_dev->pdata->vdd_levels[1]);
+	if (ret < 0) {
+		dev_err(dev, "%s:set voltage failed\n", __func__);
+		return ret;
+	}
+
+	/* pass expected current from NFC in uA */
+	ret = regulator_set_load(nqx_dev->reg, nqx_dev->pdata->max_current);
+	if (ret < 0) {
+		dev_err(dev, "%s:set load failed\n", __func__);
+		return ret;
+	}
+
+	ret = regulator_enable(nqx_dev->reg);
+	if (ret < 0)
+		dev_err(dev, "%s:regulator_enable failed\n", __func__);
+	else
+		nqx_dev->is_vreg_enabled = true;
+	return ret;
+}
+
+/**
+ * nfc_ldo_config()
+ * @client: I2C client instance, containing node to read DT entry
+ * @nqx_dev: NFC device containing regulator handle
+ *
+ * Configure LDO if entry is present in DT file otherwise
+ * with success as it's optional
+ *
+ * Return: 0 on success and -ve on failure
+ */
+static int nfc_ldo_config(struct i2c_client *client, struct nqx_dev *nqx_dev)
+{
+	int r;
+
+	if (of_get_property(client->dev.of_node, NFC_LDO_SUPPLY_NAME, NULL)) {
+		// Get the regulator handle
+		nqx_dev->reg = regulator_get(&client->dev,
+					NFC_LDO_SUPPLY_DT_NAME);
+		if (IS_ERR(nqx_dev->reg)) {
+			r = PTR_ERR(nqx_dev->reg);
+			nqx_dev->reg = NULL;
+			dev_err(&client->dev,
+				"%s: regulator_get failed, ret = %d\n",
+				__func__, r);
+			return r;
+		}
+	} else {
+		nqx_dev->reg = NULL;
+		dev_err(&client->dev,
+			"%s: regulator entry not present\n", __func__);
+		// return success as it's optional to configure LDO
+		return 0;
+	}
+
+	// LDO config supported by platform DT
+	r = nfc_ldo_vote(nqx_dev);
+	if (r < 0) {
+		dev_err(&client->dev,
+			"%s: LDO voting failed, ret = %d\n", __func__, r);
+		regulator_put(nqx_dev->reg);
+	}
+	return r;
+}
+
+/**
+ * nfc_ldo_unvote()
+ * @nqx_dev: NFC device containing regulator handle
+ *
+ * set voltage and load to zero and disable regulator
+ *
+ * Return: 0 on success and -ve on failure
+ */
+static int nfc_ldo_unvote(struct nqx_dev *nqx_dev)
+{
+	struct device *dev = &nqx_dev->client->dev;
+	int ret;
+
+	if (!nqx_dev->is_vreg_enabled) {
+		dev_err(dev, "%s: regulator already disabled\n", __func__);
+		return -EINVAL;
+	}
+
+	ret = regulator_disable(nqx_dev->reg);
+	if (ret < 0) {
+		dev_err(dev, "%s:regulator_disable failed\n", __func__);
+		return ret;
+	}
+	nqx_dev->is_vreg_enabled = false;
+
+	ret =  regulator_set_voltage(nqx_dev->reg, 0, NFC_VDDIO_MAX);
+	if (ret < 0) {
+		dev_err(dev, "%s:set voltage failed\n", __func__);
+		return ret;
+	}
+
+	ret = regulator_set_load(nqx_dev->reg, 0);
+	if (ret < 0)
+		dev_err(dev, "%s:set load failed\n", __func__);
+	return ret;
+}
+
 static int nfc_open(struct inode *inode, struct file *filp)
 {
 	struct nqx_dev *nqx_dev = container_of(inode->i_cdev,
@@ -1219,9 +1344,29 @@
 	else
 		pdata->clk_pin_voting = true;
 
+	// optional property
+	r = of_property_read_u32_array(np, NFC_LDO_VOL_DT_NAME,
+			(u32 *) pdata->vdd_levels,
+			ARRAY_SIZE(pdata->vdd_levels));
+	if (r) {
+		dev_err(dev, "error reading NFC VDDIO min and max value\n");
+		// set default as per datasheet
+		pdata->vdd_levels[0] = NFC_VDDIO_MIN;
+		pdata->vdd_levels[1] = NFC_VDDIO_MAX;
+	}
+
+	// optional property
+	r = of_property_read_u32(np, NFC_LDO_CUR_DT_NAME, &pdata->max_current);
+	if (r) {
+		dev_err(dev, "error reading NFC current value\n");
+		// set default as per datasheet
+		pdata->max_current = NFC_CURRENT_MAX;
+	}
+
 	pdata->clkreq_gpio = of_get_named_gpio(np, "qcom,nq-clkreq", 0);
 
-	return r;
+	// return success as above properties are optional
+	return 0;
 }
 
 static inline int gpio_input_init(const struct device * const dev,
@@ -1466,6 +1611,12 @@
 	}
 	nqx_disable_irq(nqx_dev);
 
+	r = nfc_ldo_config(client, nqx_dev);
+	if (r) {
+		dev_err(&client->dev, "%s: LDO config failed\n", __func__);
+		goto err_ldo_config_failed;
+	}
+
 	/*
 	 * To be efficient we need to test whether nfcc hardware is physically
 	 * present before attempting further hardware initialisation.
@@ -1507,6 +1658,7 @@
 	nqx_dev->irq_wake_up = false;
 	nqx_dev->cold_reset_rsp_pending = false;
 	nqx_dev->nfc_enabled = false;
+	nqx_dev->is_ese_session_active = false;
 
 	dev_err(&client->dev,
 	"%s: probing NFCC NQxxx exited successfully\n",
@@ -1518,6 +1670,11 @@
 	unregister_reboot_notifier(&nfcc_notifier);
 #endif
 err_request_hw_check_failed:
+	if (nqx_dev->reg) {
+		nfc_ldo_unvote(nqx_dev);
+		regulator_put(nqx_dev->reg);
+	}
+err_ldo_config_failed:
 	free_irq(client->irq, nqx_dev);
 err_request_irq_failed:
 	device_destroy(nqx_dev->nqx_class, nqx_dev->devno);
@@ -1568,6 +1725,13 @@
 		goto err;
 	}
 
+	gpio_set_value(nqx_dev->en_gpio, 0);
+	// HW dependent delay before LDO goes into LPM mode
+	usleep_range(10000, 10100);
+	if (nqx_dev->reg) {
+		ret = nfc_ldo_unvote(nqx_dev);
+		regulator_put(nqx_dev->reg);
+	}
 	unregister_reboot_notifier(&nfcc_notifier);
 	free_irq(client->irq, nqx_dev);
 	cdev_del(&nqx_dev->c_dev);
diff --git a/drivers/nfc/nq-nci.h b/drivers/nfc/nq-nci.h
index 8d807ec..dee13be 100644
--- a/drivers/nfc/nq-nci.h
+++ b/drivers/nfc/nq-nci.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
  */
 
 #ifndef __NQ_NCI_H
@@ -50,6 +50,15 @@
 #define PAYLOAD_LENGTH_MAX		(256)
 #define BYTE				(0x8)
 #define NCI_IDENTIFIER			(0x10)
+#define NFC_LDO_SUPPLY_DT_NAME		"qcom,nq-vdd-1p8"
+#define NFC_LDO_SUPPLY_NAME		"qcom,nq-vdd-1p8-supply"
+#define NFC_LDO_VOL_DT_NAME		"qcom,nq-vdd-1p8-voltage"
+#define NFC_LDO_CUR_DT_NAME		"qcom,nq-vdd-1p8-current"
+
+//as per SN1x0 datasheet
+#define NFC_VDDIO_MIN			1650000 //in uV
+#define NFC_VDDIO_MAX			1950000 //in uV
+#define NFC_CURRENT_MAX			157000 //in uA
 
 enum ese_ioctl_request {
 	/* eSE POWER ON */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c
index 43fb446..1b900a2 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c
@@ -2235,6 +2235,81 @@
 	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
 }
 
+static ssize_t ipa_debugfs_enable_disable_drop_stats(struct file *file,
+	const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	unsigned long missing;
+	unsigned int pipe_num = 0;
+	bool enable_pipe = true;
+	u32 pipe_bitmask = ipa3_ctx->hw_stats.drop.init.enabled_bitmask;
+	char seprator = ',';
+	int i, j;
+	bool is_pipe = false;
+	ssize_t ret;
+
+	mutex_lock(&ipa3_ctx->lock);
+	if (sizeof(dbg_buff) < count + 1) {
+		ret = -EFAULT;
+		goto bail;
+	}
+
+	missing = copy_from_user(dbg_buff, ubuf, count);
+	if (missing) {
+		ret = -EFAULT;
+		goto bail;
+	}
+	dbg_buff[count] = '\0';
+	IPADBG("data is %s", dbg_buff);
+
+	i = 0;
+	while (dbg_buff[i] != ' ' && i < count)
+		i++;
+	j = i;
+	i++;
+	if (i < count) {
+		if (dbg_buff[i] == '0') {
+			enable_pipe = false;
+			IPADBG("Drop stats will be disabled for pipes:");
+		}
+	}
+
+	for (i = 0; i < j; i++) {
+		if (dbg_buff[i] >= '0' && dbg_buff[i] <= '9') {
+			pipe_num = (pipe_num * 10) + (dbg_buff[i] - '0');
+			is_pipe = true;
+		}
+		if (dbg_buff[i] == seprator) {
+			if (pipe_num >= 0 && pipe_num < ipa3_ctx->ipa_num_pipes
+				&& ipa3_get_client_by_pipe(pipe_num) <
+				IPA_CLIENT_MAX) {
+				IPADBG("pipe number %u\n", pipe_num);
+				if (enable_pipe)
+					pipe_bitmask = pipe_bitmask |
+							(1 << pipe_num);
+				else
+					pipe_bitmask = pipe_bitmask &
+							(~(1 << pipe_num));
+			}
+			pipe_num = 0;
+			is_pipe = false;
+		}
+	}
+	if (is_pipe && pipe_num >= 0 && pipe_num < ipa3_ctx->ipa_num_pipes &&
+		ipa3_get_client_by_pipe(pipe_num) < IPA_CLIENT_MAX) {
+		IPADBG("pipe number %u\n", pipe_num);
+		if (enable_pipe)
+			pipe_bitmask = pipe_bitmask | (1 << pipe_num);
+		else
+			pipe_bitmask = pipe_bitmask & (~(1 << pipe_num));
+	}
+
+	ipa_init_drop_stats(pipe_bitmask);
+	ret = count;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return ret;
+}
+
 static const struct file_operations ipa3_quota_ops = {
 	.read = ipa_debugfs_print_quota_stats,
 	.write = ipa_debugfs_reset_quota_stats,
@@ -2255,10 +2330,14 @@
 	.write = ipa_debugfs_reset_drop_stats,
 };
 
+static const struct file_operations ipa3_enable_drop_ops = {
+	.write = ipa_debugfs_enable_disable_drop_stats,
+};
 
 int ipa_debugfs_init_stats(struct dentry *parent)
 {
 	const mode_t read_write_mode = 0664;
+	const mode_t write_mode = 0220;
 	struct dentry *file;
 	struct dentry *dent;
 
@@ -2285,6 +2364,13 @@
 		goto fail;
 	}
 
+	file = debugfs_create_file("enable_drop_stats", write_mode, dent, NULL,
+		&ipa3_enable_drop_ops);
+	if (IS_ERR_OR_NULL(file)) {
+		IPAERR("fail to create file %s\n", "enable_drop_stats");
+		goto fail;
+	}
+
 	file = debugfs_create_file("tethering", read_write_mode, dent, NULL,
 		&ipa3_tethering_ops);
 	if (IS_ERR_OR_NULL(file)) {
diff --git a/drivers/power/supply/qcom/qpnp-qg.c b/drivers/power/supply/qcom/qpnp-qg.c
index 0acae67..4a29912 100644
--- a/drivers/power/supply/qcom/qpnp-qg.c
+++ b/drivers/power/supply/qcom/qpnp-qg.c
@@ -4427,6 +4427,7 @@
 		return rc;
 	}
 	sleep_fifo_length &= SLEEP_IBAT_QUALIFIED_LENGTH_MASK;
+	sleep_fifo_length++;
 
 	if (chip->dt.qg_sleep_config) {
 		qg_dbg(chip, QG_DEBUG_STATUS, "Suspend: Forcing S2_SLEEP\n");
diff --git a/drivers/power/supply/qcom/smb5-lib.c b/drivers/power/supply/qcom/smb5-lib.c
index 651d212..9219018 100644
--- a/drivers/power/supply/qcom/smb5-lib.c
+++ b/drivers/power/supply/qcom/smb5-lib.c
@@ -2200,7 +2200,8 @@
 			 * If Vbatt is within 40mV above Vfloat, then don't
 			 * treat it as overvoltage.
 			 */
-			effective_fv_uv = get_effective_result(chg->fv_votable);
+			effective_fv_uv = get_effective_result_locked(
+							chg->fv_votable);
 			if (pval.intval >= effective_fv_uv + 40000) {
 				val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
 				smblib_err(chg, "battery over-voltage vbat_fg = %duV, fv = %duV\n",
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 5e96575..7e7dd02 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -1057,6 +1057,14 @@
 	  This driver provides support for the voltage regulators on the
 	  WM8994 CODEC.
 
+config REGULATOR_MEM_ACC
+	tristate "QTI Memory accelerator regulator driver"
+	help
+	  Say y here to enable the memory accelerator driver for
+	  Qualcomm Technologies, Inc. (QTI) chips. The accelerator
+	  controls delays applied for memory accesses.  This driver
+	  configures the power-mode(corner) for the memory accelerator.
+
 config REGULATOR_REFGEN
 	tristate "Qualcomm Technologies, Inc. REFGEN regulator driver"
 	depends on OF
@@ -1086,6 +1094,16 @@
 	  be used on systems which contain an RPM which communicates with the
 	  application processor over SMD.
 
+config REGULATOR_SPM
+	bool "SPM regulator driver"
+	depends on SPMI
+	help
+	  Enable support for the SPM regulator driver which is used for
+	  setting voltages of processor supply regulators via the SPM module
+	  found inside chips of Qualcomm Technologies Inc. The SPM regulator
+	  driver can be used on QTI SoCs where the APSS processor cores are
+	  supplied by their own PMIC regulator.
+
 config REGULATOR_STUB
 	tristate "Stub Regulator"
 	help
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 9d6e77d..2f8b0ba 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -135,8 +135,9 @@
 obj-$(CONFIG_REGULATOR_WM8350) += wm8350-regulator.o
 obj-$(CONFIG_REGULATOR_WM8400) += wm8400-regulator.o
 obj-$(CONFIG_REGULATOR_WM8994) += wm8994-regulator.o
-
+obj-$(CONFIG_REGULATOR_MEM_ACC) += mem-acc-regulator.o
 obj-$(CONFIG_REGULATOR_REFGEN) += refgen.o
+obj-$(CONFIG_REGULATOR_SPM) += spm-regulator.o
 obj-$(CONFIG_REGULATOR_RPMH) += rpmh-regulator.o
 obj-$(CONFIG_REGULATOR_STUB) += stub-regulator.o
 
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index f2360fa..6968971 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2734,6 +2734,40 @@
 EXPORT_SYMBOL_GPL(regulator_list_hardware_vsel);
 
 /**
+ * regulator_list_corner_voltage - return the maximum voltage in microvolts that
+ *	can be physically configured for the regulator when operating at the
+ *	specified voltage corner
+ * @regulator: regulator source
+ * @corner: voltage corner value
+ * Context: can sleep
+ *
+ * This function can be used for regulators which allow scaling between
+ * different voltage corners as opposed to be different absolute voltages.  The
+ * absolute voltage for a given corner may vary part-to-part or for a given part
+ * at runtime based upon various factors.
+ *
+ * Returns a voltage corresponding to the specified voltage corner or a negative
+ * errno if the corner value can't be used on this system.
+ */
+int regulator_list_corner_voltage(struct regulator *regulator, int corner)
+{
+	struct regulator_dev *rdev = regulator->rdev;
+	int ret;
+
+	if (corner < rdev->constraints->min_uV ||
+	    corner > rdev->constraints->max_uV ||
+	    !rdev->desc->ops->list_corner_voltage)
+		return -EINVAL;
+
+	mutex_lock(&rdev->mutex);
+	ret = rdev->desc->ops->list_corner_voltage(rdev, corner);
+	mutex_unlock(&rdev->mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL(regulator_list_corner_voltage);
+
+/**
  * regulator_get_linear_step - return the voltage step size between VSEL values
  * @regulator: regulator source
  *
diff --git a/drivers/regulator/mem-acc-regulator.c b/drivers/regulator/mem-acc-regulator.c
new file mode 100644
index 0000000..07c867b
--- /dev/null
+++ b/drivers/regulator/mem-acc-regulator.c
@@ -0,0 +1,1506 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2014-2018, 2020, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt)	"ACC: %s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/string.h>
+#include <soc/qcom/scm.h>
+
+#define MEM_ACC_DEFAULT_SEL_SIZE	2
+
+#define BYTES_PER_FUSE_ROW		8
+
+/* mem-acc config flags */
+
+enum {
+	MEM_ACC_USE_CORNER_ACC_MAP	= BIT(0),
+	MEM_ACC_USE_ADDR_VAL_MAP	= BIT(1),
+};
+
+#define FUSE_MAP_NO_MATCH		(-1)
+#define FUSE_PARAM_MATCH_ANY		(-1)
+#define PARAM_MATCH_ANY			(-1)
+
+enum {
+	MEMORY_L1,
+	MEMORY_L2,
+	MEMORY_MAX,
+};
+
+#define MEM_ACC_TYPE_MAX		6
+
+/**
+ * struct acc_reg_value - Acc register configuration structure
+ * @addr_index:	An index in to phys_reg_addr_list and remap_reg_addr_list
+ *		to get the ACC register physical address and remapped address.
+ * @reg_val:	Value to program in to the register mapped by addr_index.
+ */
+struct acc_reg_value {
+	u32		addr_index;
+	u32		reg_val;
+};
+
+struct corner_acc_reg_config {
+	struct acc_reg_value	*reg_config_list;
+	int			max_reg_config_len;
+};
+
+struct mem_acc_regulator {
+	struct device		*dev;
+	struct regulator_desc	rdesc;
+	struct regulator_dev	*rdev;
+
+	int			corner;
+	bool			mem_acc_supported[MEMORY_MAX];
+	bool			mem_acc_custom_supported[MEMORY_MAX];
+
+	u32			*acc_sel_mask[MEMORY_MAX];
+	u32			*acc_sel_bit_pos[MEMORY_MAX];
+	u32			acc_sel_bit_size[MEMORY_MAX];
+	u32			num_acc_sel[MEMORY_MAX];
+	u32			*acc_en_bit_pos;
+	u32			num_acc_en;
+	u32			*corner_acc_map;
+	u32			num_corners;
+	u32			override_fuse_value;
+	int			override_map_match;
+	int			override_map_count;
+
+
+	void __iomem		*acc_sel_base[MEMORY_MAX];
+	void __iomem		*acc_en_base;
+	phys_addr_t		acc_sel_addr[MEMORY_MAX];
+	phys_addr_t		acc_en_addr;
+	u32			flags;
+
+	void __iomem		*acc_custom_addr[MEMORY_MAX];
+	u32			*acc_custom_data[MEMORY_MAX];
+
+	phys_addr_t		mem_acc_type_addr[MEM_ACC_TYPE_MAX];
+	u32			*mem_acc_type_data;
+
+	/* eFuse parameters */
+	phys_addr_t		efuse_addr;
+	void __iomem		*efuse_base;
+
+	u32			num_acc_reg;
+	u32			*phys_reg_addr_list;
+	void __iomem		**remap_reg_addr_list;
+	struct corner_acc_reg_config	*corner_acc_reg_config;
+	u32			*override_acc_range_fuse_list;
+	int			override_acc_range_fuse_num;
+};
+
+static DEFINE_MUTEX(mem_acc_memory_mutex);
+
+static u64 mem_acc_read_efuse_row(struct mem_acc_regulator *mem_acc_vreg,
+					u32 row_num, bool use_tz_api)
+{
+	int rc;
+	u64 efuse_bits;
+	struct scm_desc desc = {0};
+
+	if (!use_tz_api) {
+		efuse_bits = readq_relaxed(mem_acc_vreg->efuse_base
+			+ row_num * BYTES_PER_FUSE_ROW);
+		return efuse_bits;
+	}
+
+	desc.args[0] = mem_acc_vreg->efuse_addr + row_num * BYTES_PER_FUSE_ROW;
+	desc.args[1] = 0;
+	desc.arginfo = SCM_ARGS(2);
+	efuse_bits = 0;
+
+	rc = scm_call2(SCM_SIP_FNID(SCM_SVC_FUSE, SCM_FUSE_READ), &desc);
+	if (rc) {
+		pr_err("read row %d failed, err code = %d\n", row_num, rc);
+	} else {
+		efuse_bits = ((u64)(desc.ret[1]) << 32) + (u64)desc.ret[0];
+	}
+
+	return efuse_bits;
+}
+
+static inline u32 apc_to_acc_corner(struct mem_acc_regulator *mem_acc_vreg,
+								int corner)
+{
+	/*
+	 * corner_acc_map maps the corner from index 0 and  APC corner value
+	 * starts from the value 1
+	 */
+	return mem_acc_vreg->corner_acc_map[corner - 1];
+}
+
+static void __update_acc_sel(struct mem_acc_regulator *mem_acc_vreg,
+						int corner, int mem_type)
+{
+	u32 acc_data, acc_data_old, i, bit, acc_corner;
+
+	acc_data = readl_relaxed(mem_acc_vreg->acc_sel_base[mem_type]);
+	acc_data_old = acc_data;
+	for (i = 0; i < mem_acc_vreg->num_acc_sel[mem_type]; i++) {
+		bit = mem_acc_vreg->acc_sel_bit_pos[mem_type][i];
+		acc_data &= ~mem_acc_vreg->acc_sel_mask[mem_type][i];
+		acc_corner = apc_to_acc_corner(mem_acc_vreg, corner);
+		acc_data |= (acc_corner << bit) &
+			mem_acc_vreg->acc_sel_mask[mem_type][i];
+	}
+	pr_debug("corner=%d old_acc_sel=0x%02x new_acc_sel=0x%02x mem_type=%d\n",
+			corner, acc_data_old, acc_data, mem_type);
+	writel_relaxed(acc_data, mem_acc_vreg->acc_sel_base[mem_type]);
+}
+
+static void __update_acc_type(struct mem_acc_regulator *mem_acc_vreg,
+				int corner)
+{
+	int i, rc;
+
+	for (i = 0; i < MEM_ACC_TYPE_MAX; i++) {
+		if (mem_acc_vreg->mem_acc_type_addr[i]) {
+			rc = scm_io_write(mem_acc_vreg->mem_acc_type_addr[i],
+				mem_acc_vreg->mem_acc_type_data[corner - 1 + i *
+				mem_acc_vreg->num_corners]);
+			if (rc)
+				pr_err("scm_io_write: %pa failure rc:%d\n",
+					&(mem_acc_vreg->mem_acc_type_addr[i]),
+					rc);
+		}
+	}
+}
+
+static void __update_acc_custom(struct mem_acc_regulator *mem_acc_vreg,
+						int corner, int mem_type)
+{
+	writel_relaxed(
+		mem_acc_vreg->acc_custom_data[mem_type][corner-1],
+		mem_acc_vreg->acc_custom_addr[mem_type]);
+	pr_debug("corner=%d mem_type=%d custom_data=0x%2x\n", corner,
+		mem_type, mem_acc_vreg->acc_custom_data[mem_type][corner-1]);
+}
+
+static void update_acc_sel(struct mem_acc_regulator *mem_acc_vreg, int corner)
+{
+	int i;
+
+	for (i = 0; i < MEMORY_MAX; i++) {
+		if (mem_acc_vreg->mem_acc_supported[i])
+			__update_acc_sel(mem_acc_vreg, corner, i);
+		if (mem_acc_vreg->mem_acc_custom_supported[i])
+			__update_acc_custom(mem_acc_vreg, corner, i);
+	}
+
+	if (mem_acc_vreg->mem_acc_type_data)
+		__update_acc_type(mem_acc_vreg, corner);
+}
+
+static void update_acc_reg(struct mem_acc_regulator *mem_acc_vreg, int corner)
+{
+	struct corner_acc_reg_config *corner_acc_reg_config;
+	struct acc_reg_value *reg_config_list;
+	int i, index;
+	u32 addr_index, reg_val;
+
+	corner_acc_reg_config =
+		&mem_acc_vreg->corner_acc_reg_config[mem_acc_vreg->corner];
+	reg_config_list = corner_acc_reg_config->reg_config_list;
+	for (i = 0; i < corner_acc_reg_config->max_reg_config_len; i++) {
+		/*
+		 * Use (corner - 1) in the below equation as
+		 * the reg_config_list[] stores the values starting from
+		 * index '0' where as the minimum corner value allowed
+		 * in regulator framework is '1'.
+		 */
+		index = (corner - 1) * corner_acc_reg_config->max_reg_config_len
+			+ i;
+		addr_index = reg_config_list[index].addr_index;
+		reg_val = reg_config_list[index].reg_val;
+
+		if (addr_index == PARAM_MATCH_ANY)
+			break;
+
+		writel_relaxed(reg_val,
+				mem_acc_vreg->remap_reg_addr_list[addr_index]);
+		/* make sure write complete */
+		mb();
+
+		pr_debug("corner=%d register:0x%x value:0x%x\n", corner,
+			mem_acc_vreg->phys_reg_addr_list[addr_index], reg_val);
+	}
+}
+
+static int mem_acc_regulator_set_voltage(struct regulator_dev *rdev,
+		int corner, int corner_max, unsigned int *selector)
+{
+	struct mem_acc_regulator *mem_acc_vreg = rdev_get_drvdata(rdev);
+	int i;
+
+	if (corner > mem_acc_vreg->num_corners) {
+		pr_err("Invalid corner=%d requested\n", corner);
+		return -EINVAL;
+	}
+
+	pr_debug("old corner=%d, new corner=%d\n",
+			mem_acc_vreg->corner, corner);
+
+	if (corner == mem_acc_vreg->corner)
+		return 0;
+
+	/* go up or down one level at a time */
+	mutex_lock(&mem_acc_memory_mutex);
+
+	if (mem_acc_vreg->flags & MEM_ACC_USE_ADDR_VAL_MAP) {
+		update_acc_reg(mem_acc_vreg, corner);
+	} else if (mem_acc_vreg->flags & MEM_ACC_USE_CORNER_ACC_MAP) {
+		if (corner > mem_acc_vreg->corner) {
+			for (i = mem_acc_vreg->corner + 1; i <= corner; i++) {
+				pr_debug("UP: to corner %d\n", i);
+				update_acc_sel(mem_acc_vreg, i);
+			}
+		} else {
+			for (i = mem_acc_vreg->corner - 1; i >= corner; i--) {
+				pr_debug("DOWN: to corner %d\n", i);
+				update_acc_sel(mem_acc_vreg, i);
+			}
+		}
+	}
+
+	mutex_unlock(&mem_acc_memory_mutex);
+
+	pr_debug("new voltage corner set %d\n", corner);
+
+	mem_acc_vreg->corner = corner;
+
+	return 0;
+}
+
+static int mem_acc_regulator_get_voltage(struct regulator_dev *rdev)
+{
+	struct mem_acc_regulator *mem_acc_vreg = rdev_get_drvdata(rdev);
+
+	return mem_acc_vreg->corner;
+}
+
+static struct regulator_ops mem_acc_corner_ops = {
+	.set_voltage		= mem_acc_regulator_set_voltage,
+	.get_voltage		= mem_acc_regulator_get_voltage,
+};
+
+static int __mem_acc_sel_init(struct mem_acc_regulator *mem_acc_vreg,
+							int mem_type)
+{
+	int i;
+	u32 bit, mask;
+
+	mem_acc_vreg->acc_sel_mask[mem_type] = devm_kzalloc(mem_acc_vreg->dev,
+		mem_acc_vreg->num_acc_sel[mem_type] * sizeof(u32), GFP_KERNEL);
+	if (!mem_acc_vreg->acc_sel_mask[mem_type])
+		return -ENOMEM;
+
+	for (i = 0; i < mem_acc_vreg->num_acc_sel[mem_type]; i++) {
+		bit = mem_acc_vreg->acc_sel_bit_pos[mem_type][i];
+		mask = BIT(mem_acc_vreg->acc_sel_bit_size[mem_type]) - 1;
+		mem_acc_vreg->acc_sel_mask[mem_type][i] = mask << bit;
+	}
+
+	return 0;
+}
+
+static int mem_acc_sel_init(struct mem_acc_regulator *mem_acc_vreg)
+{
+	int i, rc;
+
+	for (i = 0; i < MEMORY_MAX; i++) {
+		if (mem_acc_vreg->mem_acc_supported[i]) {
+			rc = __mem_acc_sel_init(mem_acc_vreg, i);
+			if (rc) {
+				pr_err("Unable to initialize mem_type=%d rc=%d\n",
+					i, rc);
+				return rc;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static void mem_acc_en_init(struct mem_acc_regulator *mem_acc_vreg)
+{
+	int i, bit;
+	u32 acc_data;
+
+	acc_data = readl_relaxed(mem_acc_vreg->acc_en_base);
+	pr_debug("init: acc_en_register=%x\n", acc_data);
+	for (i = 0; i < mem_acc_vreg->num_acc_en; i++) {
+		bit = mem_acc_vreg->acc_en_bit_pos[i];
+		acc_data |= BIT(bit);
+	}
+	pr_debug("final: acc_en_register=%x\n", acc_data);
+	writel_relaxed(acc_data, mem_acc_vreg->acc_en_base);
+}
+
+static int populate_acc_data(struct mem_acc_regulator *mem_acc_vreg,
+			const char *prop_name, u32 **value, u32 *len)
+{
+	int rc;
+
+	if (!of_get_property(mem_acc_vreg->dev->of_node, prop_name, len)) {
+		pr_err("Unable to find %s property\n", prop_name);
+		return -EINVAL;
+	}
+	*len /= sizeof(u32);
+	if (!(*len)) {
+		pr_err("Incorrect entries in %s\n", prop_name);
+		return -EINVAL;
+	}
+
+	*value = devm_kzalloc(mem_acc_vreg->dev, (*len) * sizeof(u32),
+							GFP_KERNEL);
+	if (!(*value)) {
+		pr_err("Unable to allocate memory for %s\n", prop_name);
+		return -ENOMEM;
+	}
+
+	pr_debug("Found %s, data-length = %d\n", prop_name, *len);
+
+	rc = of_property_read_u32_array(mem_acc_vreg->dev->of_node,
+					prop_name, *value, *len);
+	if (rc) {
+		pr_err("Unable to populate %s rc=%d\n", prop_name, rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int mem_acc_sel_setup(struct mem_acc_regulator *mem_acc_vreg,
+			struct resource *res, int mem_type)
+{
+	int len, rc;
+	char *mem_select_str;
+	char *mem_select_size_str;
+
+	mem_acc_vreg->acc_sel_addr[mem_type] = res->start;
+	len = resource_size(res);
+	pr_debug("'acc_sel_addr' = %pa mem_type=%d (len=%d)\n",
+					&res->start, mem_type, len);
+
+	mem_acc_vreg->acc_sel_base[mem_type] = devm_ioremap(mem_acc_vreg->dev,
+			mem_acc_vreg->acc_sel_addr[mem_type], len);
+	if (!mem_acc_vreg->acc_sel_base[mem_type]) {
+		pr_err("Unable to map 'acc_sel_addr' %pa for mem_type=%d\n",
+			&mem_acc_vreg->acc_sel_addr[mem_type], mem_type);
+		return -EINVAL;
+	}
+
+	switch (mem_type) {
+	case MEMORY_L1:
+		mem_select_str = "qcom,acc-sel-l1-bit-pos";
+		mem_select_size_str = "qcom,acc-sel-l1-bit-size";
+		break;
+	case MEMORY_L2:
+		mem_select_str = "qcom,acc-sel-l2-bit-pos";
+		mem_select_size_str = "qcom,acc-sel-l2-bit-size";
+		break;
+	default:
+		pr_err("Invalid memory type: %d\n", mem_type);
+		return -EINVAL;
+	}
+
+	mem_acc_vreg->acc_sel_bit_size[mem_type] = MEM_ACC_DEFAULT_SEL_SIZE;
+	of_property_read_u32(mem_acc_vreg->dev->of_node, mem_select_size_str,
+			&mem_acc_vreg->acc_sel_bit_size[mem_type]);
+
+	rc = populate_acc_data(mem_acc_vreg, mem_select_str,
+			&mem_acc_vreg->acc_sel_bit_pos[mem_type],
+			&mem_acc_vreg->num_acc_sel[mem_type]);
+	if (rc)
+		pr_err("Unable to populate '%s' rc=%d\n", mem_select_str, rc);
+
+	return rc;
+}
+
+static int mem_acc_efuse_init(struct platform_device *pdev,
+				 struct mem_acc_regulator *mem_acc_vreg)
+{
+	struct resource *res;
+	int len;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "efuse_addr");
+	if (!res || !res->start) {
+		mem_acc_vreg->efuse_base = NULL;
+		pr_debug("'efuse_addr' resource missing or not used.\n");
+		return 0;
+	}
+
+	mem_acc_vreg->efuse_addr = res->start;
+	len = resource_size(res);
+
+	pr_info("efuse_addr = %pa (len=0x%x)\n", &res->start, len);
+
+	mem_acc_vreg->efuse_base = devm_ioremap(&pdev->dev,
+						mem_acc_vreg->efuse_addr, len);
+	if (!mem_acc_vreg->efuse_base) {
+		pr_err("Unable to map efuse_addr %pa\n",
+				&mem_acc_vreg->efuse_addr);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int mem_acc_custom_data_init(struct platform_device *pdev,
+				 struct mem_acc_regulator *mem_acc_vreg,
+				 int mem_type)
+{
+	struct resource *res;
+	char *custom_apc_addr_str, *custom_apc_data_str;
+	int len, rc = 0;
+
+	switch (mem_type) {
+	case MEMORY_L1:
+		custom_apc_addr_str = "acc-l1-custom";
+		custom_apc_data_str = "qcom,l1-acc-custom-data";
+		break;
+	case MEMORY_L2:
+		custom_apc_addr_str = "acc-l2-custom";
+		custom_apc_data_str = "qcom,l2-acc-custom-data";
+		break;
+	default:
+		pr_err("Invalid memory type: %d\n", mem_type);
+		return -EINVAL;
+	}
+
+	if (!of_find_property(mem_acc_vreg->dev->of_node,
+				custom_apc_data_str, NULL)) {
+		pr_debug("%s custom_data not specified\n", custom_apc_data_str);
+		return 0;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						custom_apc_addr_str);
+	if (!res || !res->start) {
+		pr_debug("%s resource missing\n", custom_apc_addr_str);
+		return -EINVAL;
+	}
+
+	len = resource_size(res);
+	mem_acc_vreg->acc_custom_addr[mem_type] =
+		devm_ioremap(mem_acc_vreg->dev, res->start, len);
+	if (!mem_acc_vreg->acc_custom_addr[mem_type]) {
+		pr_err("Unable to map %s %pa\n",
+			custom_apc_addr_str, &res->start);
+		return -EINVAL;
+	}
+
+	rc = populate_acc_data(mem_acc_vreg, custom_apc_data_str,
+				&mem_acc_vreg->acc_custom_data[mem_type], &len);
+	if (rc) {
+		pr_err("Unable to find %s rc=%d\n", custom_apc_data_str, rc);
+		return rc;
+	}
+
+	if (mem_acc_vreg->num_corners != len) {
+		pr_err("Custom data is not present for all the corners\n");
+		return -EINVAL;
+	}
+
+	mem_acc_vreg->mem_acc_custom_supported[mem_type] = true;
+
+	return 0;
+}
+
+static int override_mem_acc_custom_data(struct mem_acc_regulator *mem_acc_vreg,
+		 int mem_type)
+{
+	char *custom_apc_data_str;
+	int len, rc = 0, i;
+	int tuple_count, tuple_match;
+	u32 index = 0, value = 0;
+
+	switch (mem_type) {
+	case MEMORY_L1:
+		custom_apc_data_str = "qcom,override-l1-acc-custom-data";
+		break;
+	case MEMORY_L2:
+		custom_apc_data_str = "qcom,override-l2-acc-custom-data";
+		break;
+	default:
+		pr_err("Invalid memory type: %d\n", mem_type);
+		return -EINVAL;
+	}
+
+	if (!of_find_property(mem_acc_vreg->dev->of_node,
+				custom_apc_data_str, &len)) {
+		pr_debug("%s not specified\n", custom_apc_data_str);
+		return 0;
+	}
+
+	if (mem_acc_vreg->override_map_count) {
+		if (mem_acc_vreg->override_map_match == FUSE_MAP_NO_MATCH)
+			return 0;
+		tuple_count = mem_acc_vreg->override_map_count;
+		tuple_match = mem_acc_vreg->override_map_match;
+	} else {
+		tuple_count = 1;
+		tuple_match = 0;
+	}
+
+	if (len != mem_acc_vreg->num_corners * tuple_count * sizeof(u32)) {
+		pr_err("%s length=%d is invalid\n", custom_apc_data_str, len);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < mem_acc_vreg->num_corners; i++) {
+		index = (tuple_match * mem_acc_vreg->num_corners) + i;
+		rc = of_property_read_u32_index(mem_acc_vreg->dev->of_node,
+					custom_apc_data_str, index, &value);
+		if (rc) {
+			pr_err("Unable read %s index %u, rc=%d\n",
+					custom_apc_data_str, index, rc);
+			return rc;
+		}
+		mem_acc_vreg->acc_custom_data[mem_type][i] = value;
+	}
+
+	return 0;
+}
+
+static int mem_acc_override_corner_map(struct mem_acc_regulator *mem_acc_vreg)
+{
+	int len = 0, i, rc;
+	int tuple_count, tuple_match;
+	u32 index = 0, value = 0;
+	char *prop_str = "qcom,override-corner-acc-map";
+
+	if (!of_find_property(mem_acc_vreg->dev->of_node, prop_str, &len))
+		return 0;
+
+	if (mem_acc_vreg->override_map_count) {
+		if (mem_acc_vreg->override_map_match ==	FUSE_MAP_NO_MATCH)
+			return 0;
+		tuple_count = mem_acc_vreg->override_map_count;
+		tuple_match = mem_acc_vreg->override_map_match;
+	} else {
+		tuple_count = 1;
+		tuple_match = 0;
+	}
+
+	if (len != mem_acc_vreg->num_corners * tuple_count * sizeof(u32)) {
+		pr_err("%s length=%d is invalid\n", prop_str, len);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < mem_acc_vreg->num_corners; i++) {
+		index = (tuple_match * mem_acc_vreg->num_corners) + i;
+		rc = of_property_read_u32_index(mem_acc_vreg->dev->of_node,
+						prop_str, index, &value);
+		if (rc) {
+			pr_err("Unable read %s index %u, rc=%d\n",
+						prop_str, index, rc);
+			return rc;
+		}
+		mem_acc_vreg->corner_acc_map[i] = value;
+	}
+
+	return 0;
+
+}
+
+static void mem_acc_read_efuse_param(struct mem_acc_regulator *mem_acc_vreg,
+		u32 *fuse_sel, int *val)
+{
+	u64 fuse_bits;
+
+	fuse_bits = mem_acc_read_efuse_row(mem_acc_vreg, fuse_sel[0],
+					   fuse_sel[3]);
+	/*
+	 * fuse_sel[1] = LSB position in row (shift)
+	 * fuse_sel[2] = num of bits (mask)
+	 */
+	*val = (fuse_bits >> fuse_sel[1]) & ((1 << fuse_sel[2]) - 1);
+}
+
+#define FUSE_TUPLE_SIZE 4
+static int mem_acc_parse_override_fuse_version_map(
+			 struct mem_acc_regulator *mem_acc_vreg)
+{
+	struct device_node *of_node = mem_acc_vreg->dev->of_node;
+	int i, rc, tuple_size;
+	int len = 0;
+	u32 *tmp;
+	u32 fuse_sel[4];
+	char *prop_str;
+
+	prop_str = "qcom,override-acc-fuse-sel";
+	rc = of_property_read_u32_array(of_node, prop_str, fuse_sel,
+					FUSE_TUPLE_SIZE);
+	if (rc < 0) {
+		pr_err("Read failed - %s rc=%d\n", prop_str, rc);
+		return rc;
+	}
+
+	mem_acc_read_efuse_param(mem_acc_vreg, fuse_sel,
+				 &mem_acc_vreg->override_fuse_value);
+
+	prop_str = "qcom,override-fuse-version-map";
+	if (!of_find_property(of_node, prop_str, &len))
+		return -EINVAL;
+
+	tuple_size = 1;
+	mem_acc_vreg->override_map_count = len / (sizeof(u32) * tuple_size);
+	if (len == 0 || len % (sizeof(u32) * tuple_size)) {
+		pr_err("%s length=%d is invalid\n", prop_str, len);
+		return -EINVAL;
+	}
+
+	tmp = kzalloc(len, GFP_KERNEL);
+	if (!tmp)
+		return -ENOMEM;
+
+	rc = of_property_read_u32_array(of_node, prop_str, tmp,
+			mem_acc_vreg->override_map_count * tuple_size);
+	if (rc) {
+		pr_err("could not read %s rc=%d\n", prop_str, rc);
+		goto done;
+	}
+
+	for (i = 0; i < mem_acc_vreg->override_map_count; i++) {
+		if (tmp[i * tuple_size] != mem_acc_vreg->override_fuse_value
+		    && tmp[i * tuple_size] != FUSE_PARAM_MATCH_ANY) {
+			continue;
+		} else {
+			mem_acc_vreg->override_map_match = i;
+			break;
+		}
+	}
+
+	if (mem_acc_vreg->override_map_match != FUSE_MAP_NO_MATCH)
+		pr_info("override_fuse_val=%d, %s tuple match found: %d\n",
+			mem_acc_vreg->override_fuse_value, prop_str,
+			mem_acc_vreg->override_map_match);
+	else
+		pr_err("%s tuple match not found\n", prop_str);
+
+done:
+	kfree(tmp);
+	return rc;
+}
+
+static int mem_acc_parse_override_fuse_version_range(
+			 struct mem_acc_regulator *mem_acc_vreg)
+{
+	struct device_node *of_node = mem_acc_vreg->dev->of_node;
+	int i, j, rc, size, row_size;
+	int num_fuse_sel, len = 0;
+	u32 *tmp = NULL;
+	char *prop_str;
+	u32 *fuse_val, *fuse_sel;
+	char *buf = NULL;
+	int pos = 0, buflen;
+
+	prop_str = "qcom,override-acc-range-fuse-list";
+	if (!of_find_property(of_node, prop_str, &len)) {
+		pr_err("%s property is missing\n", prop_str);
+		return -EINVAL;
+	}
+
+	size = len / sizeof(u32);
+	if (len == 0 || (size % FUSE_TUPLE_SIZE)) {
+		pr_err("%s property length (%d) is invalid\n", prop_str, len);
+		return -EINVAL;
+	}
+
+	num_fuse_sel = size / FUSE_TUPLE_SIZE;
+	fuse_val = devm_kcalloc(mem_acc_vreg->dev, num_fuse_sel,
+				sizeof(*fuse_val), GFP_KERNEL);
+	if (!fuse_val)
+		return -ENOMEM;
+	mem_acc_vreg->override_acc_range_fuse_list = fuse_val;
+	mem_acc_vreg->override_acc_range_fuse_num = num_fuse_sel;
+
+	fuse_sel = kzalloc(len, GFP_KERNEL);
+	if (!fuse_sel) {
+		rc = -ENOMEM;
+		goto done;
+	}
+
+	rc = of_property_read_u32_array(of_node, prop_str, fuse_sel,
+					size);
+	if (rc) {
+		pr_err("%s read failed, rc=%d\n", prop_str, rc);
+		goto done;
+	}
+
+	for (i = 0; i < num_fuse_sel; i++) {
+		mem_acc_read_efuse_param(mem_acc_vreg, &fuse_sel[i * 4],
+					 &fuse_val[i]);
+	}
+
+	prop_str = "qcom,override-fuse-range-map";
+	if (!of_find_property(of_node, prop_str, &len))
+		goto done;
+
+	row_size = num_fuse_sel * 2;
+	mem_acc_vreg->override_map_count = len / (sizeof(u32) * row_size);
+
+	if (len == 0 || len % (sizeof(u32) * row_size)) {
+		pr_err("%s length=%d is invalid\n", prop_str, len);
+		rc = -EINVAL;
+		goto done;
+	}
+
+	tmp = kzalloc(len, GFP_KERNEL);
+	if (!tmp) {
+		rc = -ENOMEM;
+		goto done;
+	}
+
+	rc = of_property_read_u32_array(of_node, prop_str, tmp,
+				mem_acc_vreg->override_map_count * row_size);
+	if (rc) {
+		pr_err("could not read %s rc=%d\n", prop_str, rc);
+		goto done;
+	}
+
+	for (i = 0; i < mem_acc_vreg->override_map_count; i++) {
+		for (j = 0; j < num_fuse_sel; j++) {
+			if (tmp[i * row_size + j * 2] > fuse_val[j]
+				|| tmp[i * row_size + j * 2 + 1] < fuse_val[j])
+				break;
+		}
+
+		if (j == num_fuse_sel) {
+			mem_acc_vreg->override_map_match = i;
+			break;
+		}
+	}
+
+	/*
+	 * Log register and value mapping since they are useful for
+	 * baseline MEM ACC logging.
+	 */
+	buflen = num_fuse_sel * sizeof("fuse_selxxxx = XXXX ");
+	buf = kzalloc(buflen, GFP_KERNEL);
+	if (!buf)
+		goto done;
+
+	for (j = 0; j < num_fuse_sel; j++)
+		pos += scnprintf(buf + pos, buflen - pos, "fuse_sel%d = %d ",
+				 j, fuse_val[j]);
+	buf[pos] = '\0';
+	if (mem_acc_vreg->override_map_match != FUSE_MAP_NO_MATCH)
+		pr_info("%s %s tuple match found: %d\n", buf, prop_str,
+			mem_acc_vreg->override_map_match);
+	else
+		pr_err("%s %s tuple match not found\n", buf, prop_str);
+
+done:
+	kfree(fuse_sel);
+	kfree(tmp);
+	kfree(buf);
+	return rc;
+}
+
+#define MAX_CHARS_PER_INT	20
+
+static int mem_acc_reg_addr_val_dump(struct mem_acc_regulator *mem_acc_vreg,
+			struct corner_acc_reg_config *corner_acc_reg_config,
+			u32 corner)
+{
+	int i, k, index, pos = 0;
+	u32 addr_index;
+	size_t buflen;
+	char *buf;
+	struct acc_reg_value *reg_config_list =
+					corner_acc_reg_config->reg_config_list;
+	int max_reg_config_len = corner_acc_reg_config->max_reg_config_len;
+	int num_corners = mem_acc_vreg->num_corners;
+
+	/*
+	 * Log register and value mapping since they are useful for
+	 * baseline MEM ACC logging.
+	 */
+	buflen = max_reg_config_len * (MAX_CHARS_PER_INT + 6) * sizeof(*buf);
+	buf = kzalloc(buflen, GFP_KERNEL);
+	if (buf == NULL) {
+		pr_err("Could not allocate memory for acc register and value logging\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < num_corners; i++) {
+		if (corner == i + 1)
+			continue;
+
+		pr_debug("Corner: %d --> %d:\n", corner, i + 1);
+		pos = 0;
+		for (k = 0; k < max_reg_config_len; k++) {
+			index = i * max_reg_config_len + k;
+			addr_index = reg_config_list[index].addr_index;
+			if (addr_index == PARAM_MATCH_ANY)
+				break;
+
+			pos += scnprintf(buf + pos, buflen - pos,
+				"<0x%x 0x%x> ",
+				mem_acc_vreg->phys_reg_addr_list[addr_index],
+				reg_config_list[index].reg_val);
+		}
+		buf[pos] = '\0';
+		pr_debug("%s\n", buf);
+	}
+
+	kfree(buf);
+	return 0;
+}
+
+static int mem_acc_get_reg_addr_val(struct device_node *of_node,
+		const char *prop_str, struct acc_reg_value *reg_config_list,
+		int list_offset, int list_size, u32 max_reg_index)
+{
+
+	int i, index, rc  = 0;
+
+	for (i = 0; i < list_size / 2; i++) {
+		index = (list_offset * list_size) + i * 2;
+		rc = of_property_read_u32_index(of_node, prop_str, index,
+					&reg_config_list[i].addr_index);
+		rc |= of_property_read_u32_index(of_node, prop_str, index + 1,
+					&reg_config_list[i].reg_val);
+		if (rc) {
+			pr_err("could not read %s at tuple %u: rc=%d\n",
+				prop_str, index, rc);
+			return rc;
+		}
+
+		if (reg_config_list[i].addr_index == PARAM_MATCH_ANY)
+			continue;
+
+		if ((!reg_config_list[i].addr_index) ||
+			reg_config_list[i].addr_index > max_reg_index) {
+			pr_err("Invalid register index %u in %s at tuple %u\n",
+				reg_config_list[i].addr_index, prop_str, index);
+			return -EINVAL;
+		}
+	}
+
+	return rc;
+}
+
+static int mem_acc_override_reg_addr_val_init(
+			struct mem_acc_regulator *mem_acc_vreg)
+{
+	struct device_node *of_node = mem_acc_vreg->dev->of_node;
+	struct corner_acc_reg_config *corner_acc_reg_config;
+	struct acc_reg_value *override_reg_config_list;
+	int i, tuple_count, tuple_match, len = 0, rc = 0;
+	u32 list_size, override_max_reg_config_len;
+	char prop_str[40];
+	struct property *prop;
+	int num_corners = mem_acc_vreg->num_corners;
+
+	if (!mem_acc_vreg->corner_acc_reg_config)
+		return 0;
+
+	if (mem_acc_vreg->override_map_count) {
+		if (mem_acc_vreg->override_map_match == FUSE_MAP_NO_MATCH)
+			return 0;
+		tuple_count = mem_acc_vreg->override_map_count;
+		tuple_match = mem_acc_vreg->override_map_match;
+	} else {
+		tuple_count = 1;
+		tuple_match = 0;
+	}
+
+	corner_acc_reg_config = mem_acc_vreg->corner_acc_reg_config;
+	for (i = 1; i <= num_corners; i++) {
+		snprintf(prop_str, sizeof(prop_str),
+			 "qcom,override-corner%d-addr-val-map", i);
+		prop = of_find_property(of_node, prop_str, &len);
+		list_size = len / (tuple_count * sizeof(u32));
+		if (!prop) {
+			pr_debug("%s property not specified\n", prop_str);
+			continue;
+		}
+
+		if ((!list_size) || list_size < (num_corners * 2)) {
+			pr_err("qcom,override-corner%d-addr-val-map property is missed or invalid length: len=%d\n",
+			i, len);
+			return -EINVAL;
+		}
+
+		override_max_reg_config_len = list_size / (num_corners * 2);
+		override_reg_config_list =
+				corner_acc_reg_config[i].reg_config_list;
+
+		if (corner_acc_reg_config[i].max_reg_config_len
+					!= override_max_reg_config_len) {
+			/* Free already allocate memory */
+			devm_kfree(mem_acc_vreg->dev, override_reg_config_list);
+
+			/* Allocated memory for new requirement */
+			override_reg_config_list =
+				devm_kcalloc(mem_acc_vreg->dev,
+				override_max_reg_config_len * num_corners,
+				sizeof(*override_reg_config_list), GFP_KERNEL);
+			if (!override_reg_config_list)
+				return -ENOMEM;
+
+			corner_acc_reg_config[i].max_reg_config_len =
+						override_max_reg_config_len;
+			corner_acc_reg_config[i].reg_config_list =
+						override_reg_config_list;
+		}
+
+		rc = mem_acc_get_reg_addr_val(of_node, prop_str,
+					override_reg_config_list, tuple_match,
+					list_size, mem_acc_vreg->num_acc_reg);
+		if (rc) {
+			pr_err("Failed to read %s property: rc=%d\n",
+				prop_str, rc);
+			return rc;
+		}
+
+		rc = mem_acc_reg_addr_val_dump(mem_acc_vreg,
+						&corner_acc_reg_config[i], i);
+		if (rc) {
+			pr_err("could not dump acc address-value dump for corner=%d: rc=%d\n",
+				i, rc);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int mem_acc_parse_override_config(struct mem_acc_regulator *mem_acc_vreg)
+{
+	struct device_node *of_node = mem_acc_vreg->dev->of_node;
+	int i, rc = 0;
+
+	/* Specify default no match case. */
+	mem_acc_vreg->override_map_match = FUSE_MAP_NO_MATCH;
+	mem_acc_vreg->override_map_count = 0;
+
+	if (of_find_property(of_node, "qcom,override-fuse-range-map",
+			     NULL)) {
+		rc = mem_acc_parse_override_fuse_version_range(mem_acc_vreg);
+		if (rc) {
+			pr_err("parsing qcom,override-fuse-range-map property failed, rc=%d\n",
+				rc);
+			return rc;
+		}
+	} else if (of_find_property(of_node, "qcom,override-fuse-version-map",
+				    NULL)) {
+		rc = mem_acc_parse_override_fuse_version_map(mem_acc_vreg);
+		if (rc) {
+			pr_err("parsing qcom,override-fuse-version-map property failed, rc=%d\n",
+				rc);
+			return rc;
+		}
+	} else {
+		/* No override fuse configuration defined in device node */
+		return 0;
+	}
+
+	if (mem_acc_vreg->override_map_match == FUSE_MAP_NO_MATCH)
+		return 0;
+
+	rc = mem_acc_override_corner_map(mem_acc_vreg);
+	if (rc) {
+		pr_err("Unable to override corner map rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = mem_acc_override_reg_addr_val_init(mem_acc_vreg);
+	if (rc) {
+		pr_err("Unable to override reg_config_list init rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	for (i = 0; i < MEMORY_MAX; i++) {
+		rc = override_mem_acc_custom_data(mem_acc_vreg, i);
+		if (rc) {
+			pr_err("Unable to override custom data for mem_type=%d rc=%d\n",
+				i, rc);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int mem_acc_init_reg_config(struct mem_acc_regulator *mem_acc_vreg)
+{
+	struct device_node *of_node = mem_acc_vreg->dev->of_node;
+	int i, size, len = 0, rc = 0;
+	u32 addr_index, reg_val, index;
+	char *prop_str = "qcom,acc-init-reg-config";
+
+	if (!of_find_property(of_node, prop_str, &len)) {
+		/* Initial acc register configuration not specified */
+		return rc;
+	}
+
+	size = len / sizeof(u32);
+	if ((!size) || (size % 2)) {
+		pr_err("%s specified with invalid length: %d\n",
+			prop_str, size);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < size / 2; i++) {
+		index = i * 2;
+		rc = of_property_read_u32_index(of_node, prop_str, index,
+						&addr_index);
+		rc |= of_property_read_u32_index(of_node, prop_str, index + 1,
+						&reg_val);
+		if (rc) {
+			pr_err("could not read %s at tuple %u: rc=%d\n",
+				prop_str, index, rc);
+			return rc;
+		}
+
+		if ((!addr_index) || addr_index > mem_acc_vreg->num_acc_reg) {
+			pr_err("Invalid register index %u in %s at tuple %u\n",
+				addr_index, prop_str, index);
+			return -EINVAL;
+		}
+
+		writel_relaxed(reg_val,
+				mem_acc_vreg->remap_reg_addr_list[addr_index]);
+		/* make sure write complete */
+		mb();
+
+		pr_debug("acc initial config: register:0x%x value:0x%x\n",
+			mem_acc_vreg->phys_reg_addr_list[addr_index], reg_val);
+	}
+
+	return rc;
+}
+
+static int mem_acc_get_reg_addr(struct mem_acc_regulator *mem_acc_vreg)
+{
+	struct device_node *of_node = mem_acc_vreg->dev->of_node;
+	void __iomem **remap_reg_addr_list;
+	u32 *phys_reg_addr_list;
+	int i, num_acc_reg, len = 0, rc = 0;
+
+	if (!of_find_property(of_node, "qcom,acc-reg-addr-list", &len)) {
+		/* acc register address list not specified */
+		return rc;
+	}
+
+	num_acc_reg = len / sizeof(u32);
+	if (!num_acc_reg) {
+		pr_err("qcom,acc-reg-addr-list has invalid len = %d\n", len);
+		return -EINVAL;
+	}
+
+	phys_reg_addr_list = devm_kcalloc(mem_acc_vreg->dev, num_acc_reg + 1,
+				sizeof(*phys_reg_addr_list), GFP_KERNEL);
+	if (!phys_reg_addr_list)
+		return -ENOMEM;
+
+	remap_reg_addr_list = devm_kcalloc(mem_acc_vreg->dev, num_acc_reg + 1,
+				sizeof(*remap_reg_addr_list), GFP_KERNEL);
+	if (!remap_reg_addr_list)
+		return -ENOMEM;
+
+	rc = of_property_read_u32_array(of_node, "qcom,acc-reg-addr-list",
+					&phys_reg_addr_list[1], num_acc_reg);
+	if (rc) {
+		pr_err("Read- qcom,acc-reg-addr-list failed: rc=%d\n", rc);
+		return rc;
+	}
+
+	for (i = 1; i <= num_acc_reg; i++) {
+		remap_reg_addr_list[i] = devm_ioremap(mem_acc_vreg->dev,
+						phys_reg_addr_list[i], 0x4);
+		if (!remap_reg_addr_list[i]) {
+			pr_err("Unable to map register address 0x%x\n",
+					phys_reg_addr_list[i]);
+			return -EINVAL;
+		}
+	}
+
+	mem_acc_vreg->num_acc_reg = num_acc_reg;
+	mem_acc_vreg->phys_reg_addr_list = phys_reg_addr_list;
+	mem_acc_vreg->remap_reg_addr_list = remap_reg_addr_list;
+
+	return rc;
+}
+
+static int mem_acc_reg_config_init(struct mem_acc_regulator *mem_acc_vreg)
+{
+	struct device_node *of_node = mem_acc_vreg->dev->of_node;
+	struct acc_reg_value *reg_config_list;
+	int len, size, rc, i, num_corners;
+	struct property *prop;
+	char prop_str[30];
+	struct corner_acc_reg_config *corner_acc_reg_config;
+
+	rc = of_property_read_u32(of_node, "qcom,num-acc-corners",
+				&num_corners);
+	if (rc) {
+		pr_err("could not read qcom,num-acc-corners: rc=%d\n", rc);
+		return rc;
+	}
+
+	mem_acc_vreg->num_corners = num_corners;
+
+	rc = of_property_read_u32(of_node, "qcom,boot-acc-corner",
+				&mem_acc_vreg->corner);
+	if (rc) {
+		pr_err("could not read qcom,boot-acc-corner: rc=%d\n", rc);
+		return rc;
+	}
+	pr_debug("boot acc corner = %d\n", mem_acc_vreg->corner);
+
+	corner_acc_reg_config = devm_kcalloc(mem_acc_vreg->dev, num_corners + 1,
+						sizeof(*corner_acc_reg_config),
+						GFP_KERNEL);
+	if (!corner_acc_reg_config)
+		return -ENOMEM;
+
+	for (i = 1; i <= num_corners; i++) {
+		snprintf(prop_str, sizeof(prop_str),
+				"qcom,corner%d-reg-config", i);
+		prop = of_find_property(of_node, prop_str, &len);
+		size = len / sizeof(u32);
+		if ((!prop) || (!size) || size < (num_corners * 2)) {
+			pr_err("%s property is missed or invalid length: len=%d\n",
+				prop_str, len);
+			return -EINVAL;
+		}
+
+		reg_config_list = devm_kcalloc(mem_acc_vreg->dev, size / 2,
+					sizeof(*reg_config_list), GFP_KERNEL);
+		if (!reg_config_list)
+			return -ENOMEM;
+
+		rc = mem_acc_get_reg_addr_val(of_node, prop_str,
+						reg_config_list, 0, size,
+						mem_acc_vreg->num_acc_reg);
+		if (rc) {
+			pr_err("Failed to read %s property: rc=%d\n",
+				prop_str, rc);
+			return rc;
+		}
+
+		corner_acc_reg_config[i].max_reg_config_len =
+						size / (num_corners * 2);
+		corner_acc_reg_config[i].reg_config_list = reg_config_list;
+
+		rc = mem_acc_reg_addr_val_dump(mem_acc_vreg,
+						&corner_acc_reg_config[i], i);
+		if (rc) {
+			pr_err("could not dump acc address-value dump for corner=%d: rc=%d\n",
+				i, rc);
+			return rc;
+		}
+	}
+
+	mem_acc_vreg->corner_acc_reg_config = corner_acc_reg_config;
+	mem_acc_vreg->flags |= MEM_ACC_USE_ADDR_VAL_MAP;
+	return rc;
+}
+
+#define MEM_TYPE_STRING_LEN	20
+static int mem_acc_init(struct platform_device *pdev,
+		struct mem_acc_regulator *mem_acc_vreg)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	struct resource *res;
+	int len, rc, i, j;
+	bool acc_type_present = false;
+	char tmps[MEM_TYPE_STRING_LEN];
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "acc-en");
+	if (!res || !res->start) {
+		pr_debug("'acc-en' resource missing or not used.\n");
+	} else {
+		mem_acc_vreg->acc_en_addr = res->start;
+		len = resource_size(res);
+		pr_debug("'acc_en_addr' = %pa (len=0x%x)\n", &res->start, len);
+
+		mem_acc_vreg->acc_en_base = devm_ioremap(mem_acc_vreg->dev,
+				mem_acc_vreg->acc_en_addr, len);
+		if (!mem_acc_vreg->acc_en_base) {
+			pr_err("Unable to map 'acc_en_addr' %pa\n",
+					&mem_acc_vreg->acc_en_addr);
+			return -EINVAL;
+		}
+
+		rc = populate_acc_data(mem_acc_vreg, "qcom,acc-en-bit-pos",
+				&mem_acc_vreg->acc_en_bit_pos,
+				&mem_acc_vreg->num_acc_en);
+		if (rc) {
+			pr_err("Unable to populate 'qcom,acc-en-bit-pos' rc=%d\n",
+					rc);
+			return rc;
+		}
+	}
+
+	rc = mem_acc_efuse_init(pdev, mem_acc_vreg);
+	if (rc) {
+		pr_err("Wrong eFuse address specified: rc=%d\n", rc);
+		return rc;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "acc-sel-l1");
+	if (!res || !res->start) {
+		pr_debug("'acc-sel-l1' resource missing or not used.\n");
+	} else {
+		rc = mem_acc_sel_setup(mem_acc_vreg, res, MEMORY_L1);
+		if (rc) {
+			pr_err("Unable to setup mem-acc for mem_type=%d rc=%d\n",
+					MEMORY_L1, rc);
+			return rc;
+		}
+		mem_acc_vreg->mem_acc_supported[MEMORY_L1] = true;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "acc-sel-l2");
+	if (!res || !res->start) {
+		pr_debug("'acc-sel-l2' resource missing or not used.\n");
+	} else {
+		rc = mem_acc_sel_setup(mem_acc_vreg, res, MEMORY_L2);
+		if (rc) {
+			pr_err("Unable to setup mem-acc for mem_type=%d rc=%d\n",
+					MEMORY_L2, rc);
+			return rc;
+		}
+		mem_acc_vreg->mem_acc_supported[MEMORY_L2] = true;
+	}
+
+	for (i = 0; i < MEM_ACC_TYPE_MAX; i++) {
+		snprintf(tmps, MEM_TYPE_STRING_LEN, "mem-acc-type%d", i + 1);
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM, tmps);
+
+		if (!res || !res->start) {
+			pr_debug("'%s' resource missing or not used.\n", tmps);
+		} else {
+			mem_acc_vreg->mem_acc_type_addr[i] = res->start;
+			acc_type_present = true;
+		}
+	}
+
+	rc = mem_acc_get_reg_addr(mem_acc_vreg);
+	if (rc) {
+		pr_err("Unable to get acc register addresses: rc=%d\n", rc);
+		return rc;
+	}
+
+	if (mem_acc_vreg->phys_reg_addr_list) {
+		rc = mem_acc_reg_config_init(mem_acc_vreg);
+		if (rc) {
+			pr_err("acc register address-value map failed: rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	if (of_find_property(of_node, "qcom,corner-acc-map", NULL)) {
+		rc = populate_acc_data(mem_acc_vreg, "qcom,corner-acc-map",
+			&mem_acc_vreg->corner_acc_map,
+			&mem_acc_vreg->num_corners);
+
+		/* Check if at least one valid mem-acc config. is specified */
+		for (i = 0; i < MEMORY_MAX; i++) {
+			if (mem_acc_vreg->mem_acc_supported[i])
+				break;
+		}
+		if (i == MEMORY_MAX && !acc_type_present) {
+			pr_err("No mem-acc configuration specified\n");
+			return -EINVAL;
+		}
+
+		mem_acc_vreg->flags |= MEM_ACC_USE_CORNER_ACC_MAP;
+	}
+
+	if ((mem_acc_vreg->flags & MEM_ACC_USE_CORNER_ACC_MAP) &&
+		(mem_acc_vreg->flags & MEM_ACC_USE_ADDR_VAL_MAP)) {
+		pr_err("Invalid configuration, both qcom,corner-acc-map and qcom,cornerX-addr-val-map specified\n");
+		return -EINVAL;
+	}
+
+	pr_debug("num_corners = %d\n", mem_acc_vreg->num_corners);
+
+	if (mem_acc_vreg->num_acc_en)
+		mem_acc_en_init(mem_acc_vreg);
+
+	if (mem_acc_vreg->phys_reg_addr_list) {
+		rc = mem_acc_init_reg_config(mem_acc_vreg);
+		if (rc) {
+			pr_err("acc initial register configuration failed: rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	rc = mem_acc_sel_init(mem_acc_vreg);
+	if (rc) {
+		pr_err("Unable to initialize mem_acc_sel reg rc=%d\n", rc);
+		return rc;
+	}
+
+	for (i = 0; i < MEMORY_MAX; i++) {
+		rc = mem_acc_custom_data_init(pdev, mem_acc_vreg, i);
+		if (rc) {
+			pr_err("Unable to initialize custom data for mem_type=%d rc=%d\n",
+					i, rc);
+			return rc;
+		}
+	}
+
+	rc = mem_acc_parse_override_config(mem_acc_vreg);
+	if (rc) {
+		pr_err("Unable to parse mem acc override configuration, rc=%d\n",
+			rc);
+		return rc;
+	}
+	if (acc_type_present) {
+		mem_acc_vreg->mem_acc_type_data = devm_kzalloc(
+			mem_acc_vreg->dev, mem_acc_vreg->num_corners *
+			MEM_ACC_TYPE_MAX * sizeof(u32), GFP_KERNEL);
+
+		if (!mem_acc_vreg->mem_acc_type_data) {
+			pr_err("Unable to allocate memory for mem_acc_type\n");
+			return -ENOMEM;
+		}
+
+		for (i = 0; i < MEM_ACC_TYPE_MAX; i++) {
+			if (mem_acc_vreg->mem_acc_type_addr[i]) {
+				snprintf(tmps, MEM_TYPE_STRING_LEN,
+					"qcom,mem-acc-type%d", i + 1);
+
+				j = i * mem_acc_vreg->num_corners;
+				rc = of_property_read_u32_array(
+					mem_acc_vreg->dev->of_node,
+					tmps,
+					&mem_acc_vreg->mem_acc_type_data[j],
+					mem_acc_vreg->num_corners);
+				if (rc) {
+					pr_err("Unable to get property %s rc=%d\n",
+						tmps, rc);
+					return rc;
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int mem_acc_regulator_probe(struct platform_device *pdev)
+{
+	struct regulator_config reg_config = {};
+	struct mem_acc_regulator *mem_acc_vreg;
+	struct regulator_desc *rdesc;
+	struct regulator_init_data *init_data;
+	int rc;
+
+	if (!pdev->dev.of_node) {
+		pr_err("Device tree node is missing\n");
+		return -EINVAL;
+	}
+
+	init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node,
+					NULL);
+	if (!init_data) {
+		pr_err("regulator init data is missing\n");
+		return -EINVAL;
+	}
+
+	init_data->constraints.input_uV = init_data->constraints.max_uV;
+	init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_VOLTAGE;
+
+	mem_acc_vreg = devm_kzalloc(&pdev->dev, sizeof(*mem_acc_vreg),
+			GFP_KERNEL);
+	if (!mem_acc_vreg)
+		return -ENOMEM;
+
+	mem_acc_vreg->dev = &pdev->dev;
+
+	rc = mem_acc_init(pdev, mem_acc_vreg);
+	if (rc) {
+		pr_err("Unable to initialize mem_acc configuration rc=%d\n",
+				rc);
+		return rc;
+	}
+
+	rdesc			= &mem_acc_vreg->rdesc;
+	rdesc->owner		= THIS_MODULE;
+	rdesc->type		= REGULATOR_VOLTAGE;
+	rdesc->ops		= &mem_acc_corner_ops;
+	rdesc->name		= init_data->constraints.name;
+
+	reg_config.dev = &pdev->dev;
+	reg_config.init_data = init_data;
+	reg_config.driver_data = mem_acc_vreg;
+	reg_config.of_node = pdev->dev.of_node;
+	mem_acc_vreg->rdev = regulator_register(rdesc, &reg_config);
+	if (IS_ERR(mem_acc_vreg->rdev)) {
+		rc = PTR_ERR(mem_acc_vreg->rdev);
+		if (rc != -EPROBE_DEFER)
+			pr_err("regulator_register failed: rc=%d\n", rc);
+		return rc;
+	}
+
+	platform_set_drvdata(pdev, mem_acc_vreg);
+
+	return 0;
+}
+
+static int mem_acc_regulator_remove(struct platform_device *pdev)
+{
+	struct mem_acc_regulator *mem_acc_vreg = platform_get_drvdata(pdev);
+
+	regulator_unregister(mem_acc_vreg->rdev);
+
+	return 0;
+}
+
+static const struct of_device_id mem_acc_regulator_match_table[] = {
+	{ .compatible = "qcom,mem-acc-regulator", },
+	{}
+};
+
+static struct platform_driver mem_acc_regulator_driver = {
+	.probe		= mem_acc_regulator_probe,
+	.remove		= mem_acc_regulator_remove,
+	.driver		= {
+		.name		= "qcom,mem-acc-regulator",
+		.of_match_table = mem_acc_regulator_match_table,
+
+	},
+};
+
+int __init mem_acc_regulator_init(void)
+{
+	return platform_driver_register(&mem_acc_regulator_driver);
+}
+postcore_initcall(mem_acc_regulator_init);
+
+static void __exit mem_acc_regulator_exit(void)
+{
+	platform_driver_unregister(&mem_acc_regulator_driver);
+}
+module_exit(mem_acc_regulator_exit);
+
+MODULE_DESCRIPTION("MEM-ACC-SEL regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/spm-regulator.c b/drivers/regulator/spm-regulator.c
new file mode 100644
index 0000000..313dcee
--- /dev/null
+++ b/drivers/regulator/spm-regulator.c
@@ -0,0 +1,1351 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2013-2017, 2020, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/arm-smccc.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/spm-regulator.h>
+#include <soc/qcom/spm.h>
+#include <linux/arm-smccc.h>
+
+#if defined(CONFIG_ARM64) || (defined(CONFIG_ARM) && defined(CONFIG_ARM_PSCI))
+#else
+	#define __invoke_psci_fn_smc(a, b, c, d) 0
+#endif
+
+#define SPM_REGULATOR_DRIVER_NAME "qcom,spm-regulator"
+
+struct voltage_range {
+	int min_uV;
+	int set_point_min_uV;
+	int max_uV;
+	int step_uV;
+};
+
+enum qpnp_regulator_uniq_type {
+	QPNP_TYPE_HF,
+	QPNP_TYPE_FTS2,
+	QPNP_TYPE_FTS2p5,
+	QPNP_TYPE_FTS426,
+	QPNP_TYPE_ULT_HF,
+	QPNP_TYPE_HFS430,
+};
+
+enum qpnp_regulator_type {
+	QPNP_HF_TYPE		= 0x03,
+	QPNP_FTS2_TYPE		= 0x1C,
+	QPNP_FTS2p5_TYPE	= 0x1C,
+	QPNP_FTS426_TYPE	= 0x1C,
+	QPNP_ULT_HF_TYPE	= 0x22,
+};
+
+enum qpnp_regulator_subtype {
+	QPNP_FTS2_SUBTYPE	= 0x08,
+	QPNP_HF_SUBTYPE		= 0x08,
+	QPNP_FTS2p5_SUBTYPE	= 0x09,
+	QPNP_FTS426_SUBTYPE	= 0x0A,
+	QPNP_ULT_HF_SUBTYPE	= 0x0D,
+	QPNP_HFS430_SUBTYPE	= 0x0A,
+};
+
+enum qpnp_logical_mode {
+	QPNP_LOGICAL_MODE_AUTO,
+	QPNP_LOGICAL_MODE_PWM,
+};
+
+static const struct voltage_range fts2_range0 = {0, 350000, 1275000,  5000};
+static const struct voltage_range fts2_range1 = {0, 700000, 2040000, 10000};
+static const struct voltage_range fts2p5_range0
+					 = { 80000, 350000, 1355000,  5000};
+static const struct voltage_range fts2p5_range1
+					 = {160000, 700000, 2200000, 10000};
+static const struct voltage_range fts426_range = {0, 320000, 1352000, 4000};
+static const struct voltage_range hfs430_range = {0, 320000, 2040000, 8000};
+static const struct voltage_range ult_hf_range0 = {375000, 375000, 1562500,
+								12500};
+static const struct voltage_range ult_hf_range1 = {750000, 750000, 1525000,
+								25000};
+static const struct voltage_range hf_range0 = {375000, 375000, 1562500, 12500};
+static const struct voltage_range hf_range1 = {1550000, 1550000, 3125000,
+								25000};
+
+#define QPNP_SMPS_REG_TYPE		0x04
+#define QPNP_SMPS_REG_SUBTYPE		0x05
+#define QPNP_SMPS_REG_VOLTAGE_RANGE	0x40
+#define QPNP_SMPS_REG_VOLTAGE_SETPOINT	0x41
+#define QPNP_SMPS_REG_MODE		0x45
+#define QPNP_SMPS_REG_STEP_CTRL		0x61
+#define QPNP_SMPS_REG_UL_LL_CTRL	0x68
+
+/* FTS426/HFS430 voltage control registers */
+#define QPNP_FTS426_HFS430_REG_VOLTAGE_LB	0x40
+#define QPNP_FTS426_HFS430_REG_VOLTAGE_UB	0x41
+#define QPNP_FTS426_HFS430_REG_VOLTAGE_VALID_LB	0x42
+#define QPNP_FTS426_HFS430_REG_VOLTAGE_VALID_UB	0x43
+
+/* HF voltage limit registers */
+#define QPNP_HF_REG_VOLTAGE_ULS		0x69
+#define QPNP_HF_REG_VOLTAGE_LLS		0x6B
+
+/* FTS voltage limit registers */
+#define QPNP_FTS_REG_VOLTAGE_ULS_VALID	0x6A
+#define QPNP_FTS_REG_VOLTAGE_LLS_VALID	0x6C
+
+/* FTS426/HFS430 voltage limit registers */
+#define QPNP_FTS426_HFS430_REG_VOLTAGE_ULS_LB	0x68
+#define QPNP_FTS426_HFS430_REG_VOLTAGE_ULS_UB	0x69
+
+/* Common regulator UL & LL limits control register layout */
+#define QPNP_COMMON_UL_EN_MASK		0x80
+#define QPNP_COMMON_LL_EN_MASK		0x40
+
+#define QPNP_SMPS_MODE_PWM		0x80
+#define QPNP_SMPS_MODE_AUTO		0x40
+#define QPNP_FTS426_HFS430_MODE_PWM	0x07
+#define QPNP_FTS426_HFS430_MODE_AUTO	0x06
+
+#define QPNP_SMPS_STEP_CTRL_STEP_MASK	0x18
+#define QPNP_SMPS_STEP_CTRL_STEP_SHIFT	3
+#define QPNP_SMPS_STEP_CTRL_DELAY_MASK	0x07
+#define QPNP_SMPS_STEP_CTRL_DELAY_SHIFT	0
+#define QPNP_FTS426_HFS430_STEP_CTRL_DELAY_MASK		0x03
+#define QPNP_FTS426_HFS430_STEP_CTRL_DELAY_SHIFT	0
+
+/* Clock rate in kHz of the FTS2 regulator reference clock. */
+#define QPNP_SMPS_CLOCK_RATE		19200
+#define QPNP_FTS426_CLOCK_RATE		4800
+#define QPNP_HFS430_CLOCK_RATE		1600
+
+/* Time to delay in us to ensure that a mode change has completed. */
+#define QPNP_FTS2_MODE_CHANGE_DELAY	50
+
+/* Minimum time in us that it takes to complete a single SPMI write. */
+#define QPNP_SPMI_WRITE_MIN_DELAY	8
+
+/* Minimum voltage stepper delay for each step. */
+#define QPNP_FTS2_STEP_DELAY		8
+#define QPNP_HF_STEP_DELAY		20
+#define QPNP_FTS426_HFS430_STEP_DELAY	2
+
+/* Arbitrarily large max step size used to avoid possible numerical overflow */
+#define SPM_REGULATOR_MAX_STEP_UV	10000000
+
+/*
+ * The ratio QPNP_FTS2_STEP_MARGIN_NUM/QPNP_FTS2_STEP_MARGIN_DEN is use to
+ * adjust the step rate in order to account for oscillator variance.
+ */
+#define QPNP_FTS2_STEP_MARGIN_NUM	4
+#define QPNP_FTS2_STEP_MARGIN_DEN	5
+#define QPNP_FTS426_HFS430_STEP_MARGIN_NUM	10
+#define QPNP_FTS426_HFS430_STEP_MARGIN_DEN	11
+
+/*
+ * Settling delay for FTS2.5
+ * Warm-up=20uS, 0-10% & 90-100% non-linear V-ramp delay = 50uS
+ */
+#define FTS2P5_SETTLING_DELAY_US	70
+
+/* VSET value to decide the range of ULT SMPS */
+#define ULT_SMPS_RANGE_SPLIT 0x60
+
+struct spm_vreg {
+	struct regulator_desc		rdesc;
+	struct regulator_dev		*rdev;
+	struct platform_device		*pdev;
+	struct regmap			*regmap;
+	const struct voltage_range	*range;
+	int				uV;
+	int				last_set_uV;
+	unsigned int			vlevel;
+	unsigned int			last_set_vlevel;
+	u32				max_step_uV;
+	bool				online;
+	u16				spmi_base_addr;
+	enum qpnp_logical_mode		init_mode;
+	enum qpnp_logical_mode		mode;
+	int				step_rate;
+	enum qpnp_regulator_uniq_type	regulator_type;
+	u32				cpu_num;
+	bool				bypass_spm;
+	struct regulator_desc		avs_rdesc;
+	struct regulator_dev		*avs_rdev;
+	int				avs_min_uV;
+	int				avs_max_uV;
+	bool				avs_enabled;
+	u32				recal_cluster_mask;
+};
+
+static inline bool spm_regulator_using_avs(struct spm_vreg *vreg)
+{
+	return vreg->avs_rdev && !vreg->bypass_spm;
+}
+
+static int spm_regulator_uv_to_vlevel(struct spm_vreg *vreg, int uV)
+{
+	int vlevel;
+
+	if (vreg->regulator_type == QPNP_TYPE_FTS426
+		|| vreg->regulator_type == QPNP_TYPE_HFS430)
+		return roundup(uV, vreg->range->step_uV) / 1000;
+
+	vlevel = DIV_ROUND_UP(uV - vreg->range->min_uV, vreg->range->step_uV);
+
+	/* Fix VSET for ULT HF Buck */
+	if (vreg->regulator_type == QPNP_TYPE_ULT_HF
+	    && vreg->range == &ult_hf_range1) {
+		vlevel &= 0x1F;
+		vlevel |= ULT_SMPS_RANGE_SPLIT;
+	}
+
+	return vlevel;
+}
+
+static int spm_regulator_vlevel_to_uv(struct spm_vreg *vreg, int vlevel)
+{
+	if (vreg->regulator_type == QPNP_TYPE_FTS426
+		|| vreg->regulator_type == QPNP_TYPE_HFS430)
+		return vlevel * 1000;
+	/*
+	 * Calculate ULT HF buck VSET based on range:
+	 * In case of range 0: VSET is a 7 bit value.
+	 * In case of range 1: VSET is a 5 bit value.
+	 */
+	if (vreg->regulator_type == QPNP_TYPE_ULT_HF
+	    && vreg->range == &ult_hf_range1)
+		vlevel &= ~ULT_SMPS_RANGE_SPLIT;
+
+	return vlevel * vreg->range->step_uV + vreg->range->min_uV;
+}
+
+static unsigned int spm_regulator_vlevel_to_selector(struct spm_vreg *vreg,
+						 unsigned int vlevel)
+{
+	/* Fix VSET for ULT HF Buck */
+	if (vreg->regulator_type == QPNP_TYPE_ULT_HF
+	    && vreg->range == &ult_hf_range1)
+		vlevel &= ~ULT_SMPS_RANGE_SPLIT;
+
+	if (vreg->regulator_type == QPNP_TYPE_HFS430)
+		vlevel = spm_regulator_vlevel_to_uv(vreg, vlevel)
+				/ vreg->range->step_uV;
+
+	return vlevel - (vreg->range->set_point_min_uV - vreg->range->min_uV)
+				/ vreg->range->step_uV;
+}
+
+static int qpnp_smps_read_voltage(struct spm_vreg *vreg)
+{
+	int rc;
+	u8 val[2] = {0};
+
+	if (vreg->regulator_type == QPNP_TYPE_FTS426
+		|| vreg->regulator_type == QPNP_TYPE_HFS430) {
+		rc = regmap_bulk_read(vreg->regmap,
+				vreg->spmi_base_addr
+				+ QPNP_FTS426_HFS430_REG_VOLTAGE_VALID_LB,
+				val, 2);
+		if (rc) {
+			dev_err(&vreg->pdev->dev, "%s: could not read voltage setpoint registers, rc=%d\n",
+				__func__, rc);
+			return rc;
+		}
+
+		vreg->last_set_vlevel = ((unsigned int)val[1] << 8) | val[0];
+	} else {
+		rc = regmap_bulk_read(vreg->regmap,
+			vreg->spmi_base_addr + QPNP_SMPS_REG_VOLTAGE_SETPOINT,
+				val, 1);
+		if (rc) {
+			dev_err(&vreg->pdev->dev, "%s: could not read voltage setpoint register, rc=%d\n",
+				__func__, rc);
+			return rc;
+		}
+		vreg->last_set_vlevel = val[0];
+	}
+
+	vreg->last_set_uV = spm_regulator_vlevel_to_uv(vreg,
+						vreg->last_set_vlevel);
+	return rc;
+}
+
+static int qpnp_smps_write_voltage(struct spm_vreg *vreg, unsigned int vlevel)
+{
+	int rc = 0;
+	u8 reg[2];
+
+	/* Set voltage control registers via SPMI. */
+	reg[0] = vlevel & 0xFF;
+	reg[1] = (vlevel >> 8) & 0xFF;
+
+	if (vreg->regulator_type == QPNP_TYPE_FTS426
+		|| vreg->regulator_type == QPNP_TYPE_HFS430) {
+		rc = regmap_bulk_write(vreg->regmap,
+			  vreg->spmi_base_addr
+			  + QPNP_FTS426_HFS430_REG_VOLTAGE_LB,
+			  reg, 2);
+	} else {
+		rc = regmap_write(vreg->regmap,
+			  vreg->spmi_base_addr + QPNP_SMPS_REG_VOLTAGE_SETPOINT,
+			  reg[0]);
+	}
+
+	if (rc)
+		pr_err("%s: regmap_write failed, rc=%d\n",
+			vreg->rdesc.name, rc);
+
+	return rc;
+}
+
+static inline enum qpnp_logical_mode qpnp_regval_to_mode(struct spm_vreg *vreg,
+							u8 regval)
+{
+	if (vreg->regulator_type == QPNP_TYPE_FTS426
+		|| vreg->regulator_type == QPNP_TYPE_HFS430)
+		return (regval == QPNP_FTS426_HFS430_MODE_PWM)
+			? QPNP_LOGICAL_MODE_PWM : QPNP_LOGICAL_MODE_AUTO;
+	else
+		return (regval & QPNP_SMPS_MODE_PWM)
+			? QPNP_LOGICAL_MODE_PWM : QPNP_LOGICAL_MODE_AUTO;
+}
+
+static inline u8 qpnp_mode_to_regval(struct spm_vreg *vreg,
+					enum qpnp_logical_mode mode)
+{
+	if (vreg->regulator_type == QPNP_TYPE_FTS426
+		|| vreg->regulator_type == QPNP_TYPE_HFS430)
+		return (mode == QPNP_LOGICAL_MODE_PWM)
+			? QPNP_FTS426_HFS430_MODE_PWM
+			: QPNP_FTS426_HFS430_MODE_AUTO;
+	else
+		return (mode == QPNP_LOGICAL_MODE_PWM)
+			? QPNP_SMPS_MODE_PWM : QPNP_SMPS_MODE_AUTO;
+}
+
+static int qpnp_smps_set_mode(struct spm_vreg *vreg, u8 mode)
+{
+	int rc;
+
+	rc = regmap_write(vreg->regmap,
+			  vreg->spmi_base_addr + QPNP_SMPS_REG_MODE,
+			  qpnp_mode_to_regval(vreg, mode));
+	if (rc)
+		dev_err(&vreg->pdev->dev,
+			"%s: could not write to mode register, rc=%d\n",
+			__func__, rc);
+
+	return rc;
+}
+
+static int spm_regulator_get_voltage(struct regulator_dev *rdev)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+	int vlevel, rc;
+
+	if (spm_regulator_using_avs(vreg)) {
+		vlevel = msm_spm_get_vdd(vreg->cpu_num);
+
+		if (vlevel < 0) {
+			pr_debug("%s: msm_spm_get_vdd failed, rc=%d; falling back on SPMI read\n",
+				vreg->rdesc.name, vlevel);
+
+			rc = qpnp_smps_read_voltage(vreg);
+			if (rc) {
+				pr_err("%s: voltage read failed, rc=%d\n",
+				       vreg->rdesc.name, rc);
+				return rc;
+			}
+
+			return vreg->last_set_uV;
+		}
+
+		vreg->last_set_vlevel = vlevel;
+		vreg->last_set_uV = spm_regulator_vlevel_to_uv(vreg, vlevel);
+
+		return vreg->last_set_uV;
+	} else {
+		return vreg->uV;
+	}
+};
+
+static int spm_regulator_write_voltage(struct spm_vreg *vreg, int uV)
+{
+	unsigned int vlevel = spm_regulator_uv_to_vlevel(vreg, uV);
+	bool spm_failed = false;
+	int rc = 0;
+	u32 slew_delay;
+
+	if (likely(!vreg->bypass_spm)) {
+		/* Set voltage control register via SPM. */
+		rc = msm_spm_set_vdd(vreg->cpu_num, vlevel);
+		if (rc) {
+			pr_debug("%s: msm_spm_set_vdd failed, rc=%d; falling back on SPMI write\n",
+				vreg->rdesc.name, rc);
+			spm_failed = true;
+		}
+	}
+
+	if (unlikely(vreg->bypass_spm || spm_failed)) {
+		rc = qpnp_smps_write_voltage(vreg, vlevel);
+		if (rc) {
+			pr_err("%s: voltage write failed, rc=%d\n",
+				vreg->rdesc.name, rc);
+			return rc;
+		}
+	}
+
+	if (uV > vreg->last_set_uV) {
+		/* Wait for voltage stepping to complete. */
+		slew_delay = DIV_ROUND_UP(uV - vreg->last_set_uV,
+					vreg->step_rate);
+		if (vreg->regulator_type == QPNP_TYPE_FTS2p5)
+			slew_delay += FTS2P5_SETTLING_DELAY_US;
+		udelay(slew_delay);
+	} else if (vreg->regulator_type == QPNP_TYPE_FTS2p5) {
+		/* add the ramp-down delay */
+		slew_delay = DIV_ROUND_UP(vreg->last_set_uV - uV,
+				vreg->step_rate) + FTS2P5_SETTLING_DELAY_US;
+		udelay(slew_delay);
+	}
+
+	vreg->last_set_uV = uV;
+	vreg->last_set_vlevel = vlevel;
+
+	return rc;
+}
+
+static int spm_regulator_recalibrate(struct spm_vreg *vreg)
+{
+	struct arm_smccc_res res;
+
+	if (!vreg->recal_cluster_mask)
+		return 0;
+
+	arm_smccc_smc(0xC4000020, vreg->recal_cluster_mask,
+		2, 0, 0, 0, 0, 0, &res);
+	if (res.a0)
+		pr_err("%s: recalibration failed, rc=%ld\n", vreg->rdesc.name,
+			res.a0);
+
+	return res.a0;
+}
+
+static int _spm_regulator_set_voltage(struct regulator_dev *rdev)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+	bool pwm_required;
+	int rc = 0;
+	int uV;
+
+	rc = spm_regulator_get_voltage(rdev);
+	if (rc < 0)
+		return rc;
+
+	if (vreg->vlevel == vreg->last_set_vlevel)
+		return 0;
+
+	pwm_required = (vreg->regulator_type == QPNP_TYPE_FTS2)
+			&& (vreg->init_mode != QPNP_LOGICAL_MODE_PWM)
+			&& vreg->uV > vreg->last_set_uV;
+
+	if (pwm_required) {
+		/* Switch to PWM mode so that voltage ramping is fast. */
+		rc = qpnp_smps_set_mode(vreg, QPNP_LOGICAL_MODE_PWM);
+		if (rc)
+			return rc;
+	}
+
+	do {
+		uV = vreg->uV > vreg->last_set_uV
+		    ? min(vreg->uV, vreg->last_set_uV + (int)vreg->max_step_uV)
+		    : max(vreg->uV, vreg->last_set_uV - (int)vreg->max_step_uV);
+
+		rc = spm_regulator_write_voltage(vreg, uV);
+		if (rc)
+			return rc;
+	} while (vreg->last_set_uV != vreg->uV);
+
+	if (pwm_required) {
+		/* Wait for mode transition to complete. */
+		udelay(QPNP_FTS2_MODE_CHANGE_DELAY - QPNP_SPMI_WRITE_MIN_DELAY);
+		/* Switch to AUTO mode so that power consumption is lowered. */
+		rc = qpnp_smps_set_mode(vreg, QPNP_LOGICAL_MODE_AUTO);
+		if (rc)
+			return rc;
+	}
+
+	rc = spm_regulator_recalibrate(vreg);
+
+	return rc;
+}
+
+static int spm_regulator_set_voltage(struct regulator_dev *rdev, int min_uV,
+					int max_uV, unsigned int *selector)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+	const struct voltage_range *range = vreg->range;
+	int uV = min_uV;
+	unsigned int vlevel;
+
+	if (uV < range->set_point_min_uV && max_uV >= range->set_point_min_uV)
+		uV = range->set_point_min_uV;
+
+	if (uV < range->set_point_min_uV || uV > range->max_uV) {
+		pr_err("%s: request v=[%d, %d] is outside possible v=[%d, %d]\n",
+			vreg->rdesc.name, min_uV, max_uV,
+			range->set_point_min_uV, range->max_uV);
+		return -EINVAL;
+	}
+
+	vlevel = spm_regulator_uv_to_vlevel(vreg, uV);
+	uV = spm_regulator_vlevel_to_uv(vreg, vlevel);
+
+	if (uV > max_uV) {
+		pr_err("%s: request v=[%d, %d] cannot be met by any set point\n",
+			vreg->rdesc.name, min_uV, max_uV);
+		return -EINVAL;
+	}
+
+	*selector = spm_regulator_vlevel_to_selector(vreg, vlevel);
+	vreg->vlevel = vlevel;
+	vreg->uV = uV;
+
+	if (!vreg->online)
+		return 0;
+
+	return _spm_regulator_set_voltage(rdev);
+}
+
+static int spm_regulator_list_voltage(struct regulator_dev *rdev,
+					unsigned int selector)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+
+	if (selector >= vreg->rdesc.n_voltages)
+		return 0;
+
+	return selector * vreg->range->step_uV + vreg->range->set_point_min_uV;
+}
+
+static int spm_regulator_enable(struct regulator_dev *rdev)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc;
+
+	rc = _spm_regulator_set_voltage(rdev);
+
+	if (!rc)
+		vreg->online = true;
+
+	return rc;
+}
+
+static int spm_regulator_disable(struct regulator_dev *rdev)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+
+	vreg->online = false;
+
+	return 0;
+}
+
+static int spm_regulator_is_enabled(struct regulator_dev *rdev)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+
+	return vreg->online;
+}
+
+static unsigned int spm_regulator_get_mode(struct regulator_dev *rdev)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+
+	return vreg->mode == QPNP_LOGICAL_MODE_PWM
+			? REGULATOR_MODE_NORMAL : REGULATOR_MODE_IDLE;
+}
+
+static int spm_regulator_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+
+	/*
+	 * Map REGULATOR_MODE_NORMAL to PWM mode and REGULATOR_MODE_IDLE to
+	 * init_mode.  This ensures that the regulator always stays in PWM mode
+	 * in the case that qcom,mode has been specified as "pwm" in device
+	 * tree.
+	 */
+	vreg->mode = (mode == REGULATOR_MODE_NORMAL) ? QPNP_LOGICAL_MODE_PWM
+						     : vreg->init_mode;
+
+	return qpnp_smps_set_mode(vreg, vreg->mode);
+}
+
+static struct regulator_ops spm_regulator_ops = {
+	.get_voltage	= spm_regulator_get_voltage,
+	.set_voltage	= spm_regulator_set_voltage,
+	.list_voltage	= spm_regulator_list_voltage,
+	.get_mode	= spm_regulator_get_mode,
+	.set_mode	= spm_regulator_set_mode,
+	.enable		= spm_regulator_enable,
+	.disable	= spm_regulator_disable,
+	.is_enabled	= spm_regulator_is_enabled,
+};
+
+static int spm_regulator_avs_set_voltage(struct regulator_dev *rdev, int min_uV,
+					int max_uV, unsigned int *selector)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+	const struct voltage_range *range = vreg->range;
+	unsigned int vlevel_min, vlevel_max;
+	int uV, avs_min_uV, avs_max_uV, rc;
+
+	uV = min_uV;
+
+	if (uV < range->set_point_min_uV && max_uV >= range->set_point_min_uV)
+		uV = range->set_point_min_uV;
+
+	if (uV < range->set_point_min_uV || uV > range->max_uV) {
+		pr_err("%s: request v=[%d, %d] is outside possible v=[%d, %d]\n",
+			vreg->avs_rdesc.name, min_uV, max_uV,
+			range->set_point_min_uV, range->max_uV);
+		return -EINVAL;
+	}
+
+	vlevel_min = spm_regulator_uv_to_vlevel(vreg, uV);
+	avs_min_uV = spm_regulator_vlevel_to_uv(vreg, vlevel_min);
+
+	if (avs_min_uV > max_uV) {
+		pr_err("%s: request v=[%d, %d] cannot be met by any set point\n",
+			vreg->avs_rdesc.name, min_uV, max_uV);
+		return -EINVAL;
+	}
+
+	uV = max_uV;
+
+	if (uV > range->max_uV && min_uV <= range->max_uV)
+		uV = range->max_uV;
+
+	if (uV < range->set_point_min_uV || uV > range->max_uV) {
+		pr_err("%s: request v=[%d, %d] is outside possible v=[%d, %d]\n",
+			vreg->avs_rdesc.name, min_uV, max_uV,
+			range->set_point_min_uV, range->max_uV);
+		return -EINVAL;
+	}
+
+	vlevel_max = spm_regulator_uv_to_vlevel(vreg, uV);
+	avs_max_uV = spm_regulator_vlevel_to_uv(vreg, vlevel_max);
+
+	if (avs_max_uV < min_uV) {
+		pr_err("%s: request v=[%d, %d] cannot be met by any set point\n",
+			vreg->avs_rdesc.name, min_uV, max_uV);
+		return -EINVAL;
+	}
+
+	if (likely(!vreg->bypass_spm)) {
+		rc = msm_spm_avs_set_limit(vreg->cpu_num, vlevel_min,
+						vlevel_max);
+		if (rc) {
+			pr_err("%s: AVS limit setting failed, rc=%d\n",
+				vreg->avs_rdesc.name, rc);
+			return rc;
+		}
+	}
+
+	*selector = spm_regulator_vlevel_to_selector(vreg, vlevel_min);
+	vreg->avs_min_uV = avs_min_uV;
+	vreg->avs_max_uV = avs_max_uV;
+
+	return 0;
+}
+
+static int spm_regulator_avs_get_voltage(struct regulator_dev *rdev)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+
+	return vreg->avs_min_uV;
+}
+
+static int spm_regulator_avs_enable(struct regulator_dev *rdev)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc;
+
+	if (likely(!vreg->bypass_spm)) {
+		rc = msm_spm_avs_enable(vreg->cpu_num);
+		if (rc) {
+			pr_err("%s: AVS enable failed, rc=%d\n",
+				vreg->avs_rdesc.name, rc);
+			return rc;
+		}
+	}
+
+	vreg->avs_enabled = true;
+
+	return 0;
+}
+
+static int spm_regulator_avs_disable(struct regulator_dev *rdev)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc;
+
+	if (likely(!vreg->bypass_spm)) {
+		rc = msm_spm_avs_disable(vreg->cpu_num);
+		if (rc) {
+			pr_err("%s: AVS disable failed, rc=%d\n",
+				vreg->avs_rdesc.name, rc);
+			return rc;
+		}
+	}
+
+	vreg->avs_enabled = false;
+
+	return 0;
+}
+
+static int spm_regulator_avs_is_enabled(struct regulator_dev *rdev)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+
+	return vreg->avs_enabled;
+}
+
+static struct regulator_ops spm_regulator_avs_ops = {
+	.get_voltage	= spm_regulator_avs_get_voltage,
+	.set_voltage	= spm_regulator_avs_set_voltage,
+	.list_voltage	= spm_regulator_list_voltage,
+	.enable		= spm_regulator_avs_enable,
+	.disable	= spm_regulator_avs_disable,
+	.is_enabled	= spm_regulator_avs_is_enabled,
+};
+
+static int qpnp_smps_check_type(struct spm_vreg *vreg)
+{
+	int rc;
+	u8 type[2];
+
+	rc = regmap_bulk_read(vreg->regmap,
+			      vreg->spmi_base_addr + QPNP_SMPS_REG_TYPE,
+			      type,
+			      2);
+	if (rc) {
+		dev_err(&vreg->pdev->dev,
+			"%s: could not read type register, rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	if (type[0] == QPNP_FTS2_TYPE && type[1] == QPNP_FTS2_SUBTYPE) {
+		vreg->regulator_type = QPNP_TYPE_FTS2;
+	} else if (type[0] == QPNP_FTS2p5_TYPE
+					&& type[1] == QPNP_FTS2p5_SUBTYPE) {
+		vreg->regulator_type = QPNP_TYPE_FTS2p5;
+	} else if (type[0] == QPNP_FTS426_TYPE
+					&& type[1] == QPNP_FTS426_SUBTYPE) {
+		vreg->regulator_type = QPNP_TYPE_FTS426;
+	} else if (type[0] == QPNP_HF_TYPE
+					&& type[1] == QPNP_HFS430_SUBTYPE) {
+		vreg->regulator_type = QPNP_TYPE_HFS430;
+	} else if (type[0] == QPNP_ULT_HF_TYPE
+					&& type[1] == QPNP_ULT_HF_SUBTYPE) {
+		vreg->regulator_type = QPNP_TYPE_ULT_HF;
+	} else if (type[0] == QPNP_HF_TYPE
+					&& type[1] == QPNP_HF_SUBTYPE) {
+		vreg->regulator_type = QPNP_TYPE_HF;
+	} else {
+		dev_err(&vreg->pdev->dev,
+			"%s: invalid type=0x%02X, subtype=0x%02X register pair\n",
+			 __func__, type[0], type[1]);
+		return -ENODEV;
+	}
+
+	return rc;
+}
+
+static int qpnp_smps_init_range(struct spm_vreg *vreg,
+	const struct voltage_range *range0, const struct voltage_range *range1)
+{
+	int rc;
+	u8 reg = 0;
+	uint val;
+
+	rc = regmap_read(vreg->regmap,
+			 vreg->spmi_base_addr + QPNP_SMPS_REG_VOLTAGE_RANGE,
+			 &val);
+	if (rc) {
+		dev_err(&vreg->pdev->dev,
+			"%s: could not read voltage range register, rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
+	reg = (u8)val;
+
+	if (reg == 0x00) {
+		vreg->range = range0;
+	} else if (reg == 0x01) {
+		vreg->range = range1;
+	} else {
+		dev_err(&vreg->pdev->dev, "%s: voltage range=%d is invalid\n",
+			__func__, reg);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static int qpnp_ult_hf_init_range(struct spm_vreg *vreg)
+{
+	int rc;
+	u8 reg = 0;
+	uint val;
+
+	rc = regmap_read(vreg->regmap,
+			 vreg->spmi_base_addr + QPNP_SMPS_REG_VOLTAGE_SETPOINT,
+			 &val);
+	if (rc) {
+		dev_err(&vreg->pdev->dev,
+			"%s: could not read voltage range register, rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
+	reg = (u8)val;
+
+	vreg->range = (reg < ULT_SMPS_RANGE_SPLIT) ? &ult_hf_range0 :
+							&ult_hf_range1;
+	return rc;
+}
+
+static int qpnp_smps_init_voltage(struct spm_vreg *vreg)
+{
+	int rc;
+
+	rc = qpnp_smps_read_voltage(vreg);
+	if (rc) {
+		pr_err("%s: voltage read failed, rc=%d\n", vreg->rdesc.name,
+			rc);
+		return rc;
+	}
+
+	vreg->vlevel = vreg->last_set_vlevel;
+	vreg->uV = vreg->last_set_uV;
+
+	/* Initialize SAW voltage control register */
+	if (!vreg->bypass_spm) {
+		rc = msm_spm_set_vdd(vreg->cpu_num, vreg->vlevel);
+		if (rc)
+			pr_err("%s: msm_spm_set_vdd failed, rc=%d\n",
+			       vreg->rdesc.name, rc);
+	}
+
+	return 0;
+}
+
+static int qpnp_smps_init_mode(struct spm_vreg *vreg)
+{
+	const char *mode_name;
+	int rc;
+	uint val;
+
+	rc = of_property_read_string(vreg->pdev->dev.of_node, "qcom,mode",
+					&mode_name);
+	if (!rc) {
+		if (strcmp("pwm", mode_name) == 0) {
+			vreg->init_mode = QPNP_LOGICAL_MODE_PWM;
+		} else if ((strcmp("auto", mode_name) == 0) &&
+				(vreg->regulator_type != QPNP_TYPE_ULT_HF)) {
+			vreg->init_mode = QPNP_LOGICAL_MODE_AUTO;
+		} else {
+			dev_err(&vreg->pdev->dev,
+				"%s: unknown regulator mode: %s\n",
+				__func__, mode_name);
+			return -EINVAL;
+		}
+
+		rc = qpnp_smps_set_mode(vreg, vreg->init_mode);
+		if (rc)
+			return rc;
+	} else {
+		rc = regmap_read(vreg->regmap,
+				 vreg->spmi_base_addr + QPNP_SMPS_REG_MODE,
+				 &val);
+		if (rc)
+			dev_err(&vreg->pdev->dev,
+				"%s: could not read mode register, rc=%d\n",
+				__func__, rc);
+			 vreg->init_mode = qpnp_regval_to_mode(vreg, val);
+	}
+
+	vreg->mode = vreg->init_mode;
+
+	return rc;
+}
+
+static int qpnp_smps_init_step_rate(struct spm_vreg *vreg)
+{
+	int rc;
+	u8 reg = 0;
+	int step = 0, delay;
+	uint val;
+
+	rc = regmap_read(vreg->regmap,
+			 vreg->spmi_base_addr + QPNP_SMPS_REG_STEP_CTRL, &val);
+	if (rc) {
+		dev_err(&vreg->pdev->dev,
+			"%s: could not read stepping control register, rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
+	reg = (u8)val;
+
+	/* ULT and FTS426 bucks do not support steps */
+	if (vreg->regulator_type != QPNP_TYPE_ULT_HF && vreg->regulator_type !=
+		QPNP_TYPE_FTS426  && vreg->regulator_type != QPNP_TYPE_HFS430)
+		step = (reg & QPNP_SMPS_STEP_CTRL_STEP_MASK)
+			>> QPNP_SMPS_STEP_CTRL_STEP_SHIFT;
+
+	if (vreg->regulator_type == QPNP_TYPE_FTS426
+		|| vreg->regulator_type == QPNP_TYPE_HFS430) {
+		delay = (reg & QPNP_FTS426_HFS430_STEP_CTRL_DELAY_MASK)
+			>> QPNP_FTS426_HFS430_STEP_CTRL_DELAY_SHIFT;
+
+		/* step_rate has units of uV/us. */
+		vreg->step_rate = ((vreg->regulator_type == QPNP_TYPE_FTS426)
+					? QPNP_FTS426_CLOCK_RATE
+					: QPNP_HFS430_CLOCK_RATE)
+					* vreg->range->step_uV;
+	} else {
+		delay = (reg & QPNP_SMPS_STEP_CTRL_DELAY_MASK)
+			>> QPNP_SMPS_STEP_CTRL_DELAY_SHIFT;
+
+		/* step_rate has units of uV/us. */
+		vreg->step_rate = QPNP_SMPS_CLOCK_RATE * vreg->range->step_uV
+					* (1 << step);
+	}
+
+	if ((vreg->regulator_type == QPNP_TYPE_ULT_HF)
+			|| (vreg->regulator_type == QPNP_TYPE_HF))
+		vreg->step_rate /= 1000 * (QPNP_HF_STEP_DELAY << delay);
+	else if (vreg->regulator_type == QPNP_TYPE_FTS426
+			|| vreg->regulator_type == QPNP_TYPE_HFS430)
+		vreg->step_rate /= 1000 * (QPNP_FTS426_HFS430_STEP_DELAY
+						<< delay);
+	else
+		vreg->step_rate /= 1000 * (QPNP_FTS2_STEP_DELAY << delay);
+
+	if (vreg->regulator_type == QPNP_TYPE_FTS426
+			|| vreg->regulator_type == QPNP_TYPE_HFS430)
+		vreg->step_rate = vreg->step_rate
+					* QPNP_FTS426_HFS430_STEP_MARGIN_NUM
+					/ QPNP_FTS426_HFS430_STEP_MARGIN_DEN;
+	else
+		vreg->step_rate = vreg->step_rate * QPNP_FTS2_STEP_MARGIN_NUM
+					/ QPNP_FTS2_STEP_MARGIN_DEN;
+
+	/* Ensure that the stepping rate is greater than 0. */
+	vreg->step_rate = max(vreg->step_rate, 1);
+
+	return rc;
+}
+
+static int qpnp_smps_check_constraints(struct spm_vreg *vreg,
+					struct regulator_init_data *init_data)
+{
+	int rc = 0, limit_min_uV, limit_max_uV;
+	u16 ul_reg, ll_reg;
+	u8 reg[2];
+
+	limit_min_uV = 0;
+	limit_max_uV = INT_MAX;
+
+	ul_reg = QPNP_FTS_REG_VOLTAGE_ULS_VALID;
+	ll_reg = QPNP_FTS_REG_VOLTAGE_LLS_VALID;
+
+	switch (vreg->regulator_type) {
+	case QPNP_TYPE_HF:
+		ul_reg = QPNP_HF_REG_VOLTAGE_ULS;
+		ll_reg = QPNP_HF_REG_VOLTAGE_LLS;
+	case QPNP_TYPE_FTS2:
+	case QPNP_TYPE_FTS2p5:
+		rc = regmap_bulk_read(vreg->regmap, vreg->spmi_base_addr
+					+ QPNP_SMPS_REG_UL_LL_CTRL, reg, 1);
+		if (rc) {
+			dev_err(&vreg->pdev->dev, "%s: UL_LL register read failed, rc=%d\n",
+				__func__, rc);
+			return rc;
+		}
+
+		if (reg[0] & QPNP_COMMON_UL_EN_MASK) {
+			rc = regmap_bulk_read(vreg->regmap, vreg->spmi_base_addr
+						+ ul_reg, &reg[1], 1);
+			if (rc) {
+				dev_err(&vreg->pdev->dev, "%s: ULS register read failed, rc=%d\n",
+					__func__, rc);
+				return rc;
+			}
+
+			limit_max_uV = spm_regulator_vlevel_to_uv(vreg, reg[1]);
+		}
+
+		if (reg[0] & QPNP_COMMON_LL_EN_MASK) {
+			rc = regmap_bulk_read(vreg->regmap, vreg->spmi_base_addr
+						+ ll_reg, &reg[1], 1);
+			if (rc) {
+				dev_err(&vreg->pdev->dev, "%s: LLS register read failed, rc=%d\n",
+					__func__, rc);
+				return rc;
+			}
+
+			limit_min_uV = spm_regulator_vlevel_to_uv(vreg, reg[1]);
+		}
+
+		break;
+	case QPNP_TYPE_FTS426:
+	case QPNP_TYPE_HFS430:
+		rc = regmap_bulk_read(vreg->regmap, vreg->spmi_base_addr
+					+ QPNP_FTS426_HFS430_REG_VOLTAGE_ULS_LB,
+					reg, 2);
+		if (rc) {
+			dev_err(&vreg->pdev->dev, "%s: could not read voltage limit registers, rc=%d\n",
+				__func__, rc);
+			return rc;
+		}
+
+		limit_max_uV = spm_regulator_vlevel_to_uv(vreg,
+					((unsigned int)reg[1] << 8) | reg[0]);
+		break;
+	case QPNP_TYPE_ULT_HF:
+		/* no HW voltage limit configuration */
+		break;
+	}
+
+	if (init_data->constraints.min_uV < limit_min_uV
+	    || init_data->constraints.max_uV >  limit_max_uV) {
+		dev_err(&vreg->pdev->dev, "regulator min/max(%d/%d) constraints do not fit within HW configured min/max(%d/%d) constraints\n",
+			init_data->constraints.min_uV,
+			init_data->constraints.max_uV, limit_min_uV,
+			limit_max_uV);
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+static bool spm_regulator_using_range0(struct spm_vreg *vreg)
+{
+	return vreg->range == &fts2_range0 || vreg->range == &fts2p5_range0
+		|| vreg->range == &ult_hf_range0 || vreg->range == &hf_range0
+		|| vreg->range == &fts426_range;
+}
+
+/* Register a regulator to enable/disable AVS and set AVS min/max limits. */
+static int spm_regulator_avs_register(struct spm_vreg *vreg,
+				struct device *dev, struct device_node *node)
+{
+	struct regulator_config reg_config = {};
+	struct device_node *avs_node = NULL;
+	struct device_node *child_node;
+	struct regulator_init_data *init_data;
+	int rc;
+
+	/*
+	 * Find the first available child node (if any).  It corresponds to an
+	 * AVS limits regulator.
+	 */
+	for_each_available_child_of_node(node, child_node) {
+		avs_node = child_node;
+		break;
+	}
+
+	if (!avs_node)
+		return 0;
+
+	init_data = of_get_regulator_init_data(dev, avs_node, &vreg->avs_rdesc);
+	if (!init_data) {
+		dev_err(dev, "%s: unable to allocate memory\n", __func__);
+		return -ENOMEM;
+	}
+	init_data->constraints.input_uV = init_data->constraints.max_uV;
+	init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_STATUS
+						| REGULATOR_CHANGE_VOLTAGE;
+
+	if (!init_data->constraints.name) {
+		dev_err(dev, "%s: AVS node is missing regulator name\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	vreg->avs_rdesc.name	= init_data->constraints.name;
+	vreg->avs_rdesc.type	= REGULATOR_VOLTAGE;
+	vreg->avs_rdesc.owner	= THIS_MODULE;
+	vreg->avs_rdesc.ops	= &spm_regulator_avs_ops;
+	vreg->avs_rdesc.n_voltages
+		= (vreg->range->max_uV - vreg->range->set_point_min_uV)
+			/ vreg->range->step_uV + 1;
+
+	reg_config.dev = dev;
+	reg_config.init_data = init_data;
+	reg_config.driver_data = vreg;
+	reg_config.of_node = avs_node;
+
+	vreg->avs_rdev = regulator_register(&vreg->avs_rdesc, &reg_config);
+	if (IS_ERR(vreg->avs_rdev)) {
+		rc = PTR_ERR(vreg->avs_rdev);
+		dev_err(dev, "%s: AVS regulator_register failed, rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	if (vreg->bypass_spm)
+		pr_debug("%s: SPM bypassed so AVS regulator calls are no-ops\n",
+			vreg->avs_rdesc.name);
+
+	return 0;
+}
+
+static int spm_regulator_probe(struct platform_device *pdev)
+{
+	struct regulator_config reg_config = {};
+	struct device_node *node = pdev->dev.of_node;
+	struct regulator_init_data *init_data;
+	struct spm_vreg *vreg;
+	unsigned int base;
+	bool bypass_spm;
+	int rc;
+
+	if (!node) {
+		dev_err(&pdev->dev, "%s: device node missing\n", __func__);
+		return -ENODEV;
+	}
+
+	bypass_spm = of_property_read_bool(node, "qcom,bypass-spm");
+	if (!bypass_spm) {
+		rc = msm_spm_probe_done();
+		if (rc) {
+			if (rc != -EPROBE_DEFER)
+				dev_err(&pdev->dev,
+					"%s: spm unavailable, rc=%d\n",
+					__func__, rc);
+			return rc;
+		}
+	}
+
+	vreg = devm_kzalloc(&pdev->dev, sizeof(*vreg), GFP_KERNEL);
+	if (!vreg)
+		return -ENOMEM;
+
+	vreg->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!vreg->regmap) {
+		dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+		return -EINVAL;
+	}
+	vreg->pdev = pdev;
+	vreg->bypass_spm = bypass_spm;
+
+	rc = of_property_read_u32(pdev->dev.of_node, "reg", &base);
+	if (rc < 0) {
+		dev_err(&pdev->dev,
+			"Couldn't find reg in node = %s rc = %d\n",
+			pdev->dev.of_node->full_name, rc);
+		return rc;
+	}
+	vreg->spmi_base_addr = base;
+
+	rc = qpnp_smps_check_type(vreg);
+	if (rc)
+		return rc;
+
+	/* Specify CPU 0 as default in order to handle shared regulator case. */
+	vreg->cpu_num = 0;
+	of_property_read_u32(vreg->pdev->dev.of_node, "qcom,cpu-num",
+						&vreg->cpu_num);
+
+	of_property_read_u32(vreg->pdev->dev.of_node, "qcom,recal-mask",
+						&vreg->recal_cluster_mask);
+
+	/*
+	 * The regulator must be initialized to range 0 or range 1 during
+	 * PMIC power on sequence.  Once it is set, it cannot be changed
+	 * dynamically.
+	 */
+	if (vreg->regulator_type == QPNP_TYPE_FTS2)
+		rc = qpnp_smps_init_range(vreg, &fts2_range0, &fts2_range1);
+	else if (vreg->regulator_type == QPNP_TYPE_FTS2p5)
+		rc = qpnp_smps_init_range(vreg, &fts2p5_range0, &fts2p5_range1);
+	else if (vreg->regulator_type == QPNP_TYPE_FTS426)
+		vreg->range = &fts426_range;
+	else if (vreg->regulator_type == QPNP_TYPE_HFS430)
+		vreg->range = &hfs430_range;
+	else if (vreg->regulator_type == QPNP_TYPE_HF)
+		rc = qpnp_smps_init_range(vreg, &hf_range0, &hf_range1);
+	else if (vreg->regulator_type == QPNP_TYPE_ULT_HF)
+		rc = qpnp_ult_hf_init_range(vreg);
+	if (rc)
+		return rc;
+
+	rc = qpnp_smps_init_voltage(vreg);
+	if (rc)
+		return rc;
+
+	rc = qpnp_smps_init_mode(vreg);
+	if (rc)
+		return rc;
+
+	rc = qpnp_smps_init_step_rate(vreg);
+	if (rc)
+		return rc;
+
+	init_data = of_get_regulator_init_data(&pdev->dev, node, &vreg->rdesc);
+	if (!init_data) {
+		dev_err(&pdev->dev, "%s: unable to allocate memory\n",
+				__func__);
+		return -ENOMEM;
+	}
+	init_data->constraints.input_uV = init_data->constraints.max_uV;
+	init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_STATUS
+			| REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_MODE;
+	init_data->constraints.valid_modes_mask
+				= REGULATOR_MODE_NORMAL | REGULATOR_MODE_IDLE;
+
+	if (!init_data->constraints.name) {
+		dev_err(&pdev->dev, "%s: node is missing regulator name\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	rc = qpnp_smps_check_constraints(vreg, init_data);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: regulator constraints check failed, rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	vreg->rdesc.name	= init_data->constraints.name;
+	vreg->rdesc.type	= REGULATOR_VOLTAGE;
+	vreg->rdesc.owner	= THIS_MODULE;
+	vreg->rdesc.ops		= &spm_regulator_ops;
+	vreg->rdesc.n_voltages
+		= (vreg->range->max_uV - vreg->range->set_point_min_uV)
+			/ vreg->range->step_uV + 1;
+
+	vreg->max_step_uV = SPM_REGULATOR_MAX_STEP_UV;
+	of_property_read_u32(vreg->pdev->dev.of_node,
+				"qcom,max-voltage-step", &vreg->max_step_uV);
+
+	if (vreg->max_step_uV > SPM_REGULATOR_MAX_STEP_UV)
+		vreg->max_step_uV = SPM_REGULATOR_MAX_STEP_UV;
+
+	vreg->max_step_uV = rounddown(vreg->max_step_uV, vreg->range->step_uV);
+	pr_debug("%s: max single voltage step size=%u uV\n",
+		vreg->rdesc.name, vreg->max_step_uV);
+
+	reg_config.dev = &pdev->dev;
+	reg_config.init_data = init_data;
+	reg_config.driver_data = vreg;
+	reg_config.of_node = node;
+	vreg->rdev = regulator_register(&vreg->rdesc, &reg_config);
+
+	if (IS_ERR(vreg->rdev)) {
+		rc = PTR_ERR(vreg->rdev);
+		dev_err(&pdev->dev, "%s: regulator_register failed, rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	rc = spm_regulator_avs_register(vreg, &pdev->dev, node);
+	if (rc) {
+		regulator_unregister(vreg->rdev);
+		return rc;
+	}
+
+	dev_set_drvdata(&pdev->dev, vreg);
+
+	pr_info("name=%s, range=%s, voltage=%d uV, mode=%s, step rate=%d uV/us\n",
+		vreg->rdesc.name,
+		spm_regulator_using_range0(vreg) ? "LV" : "MV",
+		vreg->uV,
+		vreg->init_mode == QPNP_LOGICAL_MODE_PWM ? "PWM" :
+		   (vreg->init_mode == QPNP_LOGICAL_MODE_AUTO ? "AUTO" : "PFM"),
+		vreg->step_rate);
+
+	return rc;
+}
+
+static int spm_regulator_remove(struct platform_device *pdev)
+{
+	struct spm_vreg *vreg = dev_get_drvdata(&pdev->dev);
+
+	if (vreg->avs_rdev)
+		regulator_unregister(vreg->avs_rdev);
+	regulator_unregister(vreg->rdev);
+
+	return 0;
+}
+
+static const struct of_device_id spm_regulator_match_table[] = {
+	{ .compatible = SPM_REGULATOR_DRIVER_NAME, },
+	{}
+};
+
+static const struct platform_device_id spm_regulator_id[] = {
+	{ SPM_REGULATOR_DRIVER_NAME, 0 },
+	{}
+};
+MODULE_DEVICE_TABLE(spmi, spm_regulator_id);
+
+static struct platform_driver spm_regulator_driver = {
+	.driver = {
+		.name		= SPM_REGULATOR_DRIVER_NAME,
+		.of_match_table = spm_regulator_match_table,
+	},
+	.probe		= spm_regulator_probe,
+	.remove		= spm_regulator_remove,
+	.id_table	= spm_regulator_id,
+};
+
+/**
+ * spm_regulator_init() - register spmi driver for spm-regulator
+ *
+ * This initialization function should be called in systems in which driver
+ * registration ordering must be controlled precisely.
+ *
+ * Returns 0 on success or errno on failure.
+ */
+int __init spm_regulator_init(void)
+{
+	static bool has_registered;
+
+	if (has_registered)
+		return 0;
+
+	has_registered = true;
+
+	return platform_driver_register(&spm_regulator_driver);
+}
+EXPORT_SYMBOL(spm_regulator_init);
+
+static void __exit spm_regulator_exit(void)
+{
+	platform_driver_unregister(&spm_regulator_driver);
+}
+
+arch_initcall(spm_regulator_init);
+module_exit(spm_regulator_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("SPM regulator driver");
+MODULE_ALIAS("platform:spm-regulator");
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 0d74602e..9efd8af 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -343,6 +343,15 @@
 	  Client driver for the WCNSS_CTRL SMD channel, used to download nv
 	  firmware to a newly booted WCNSS chip.
 
+config MSM_PIL_MSS_QDSP6V5
+	tristate "MSS QDSP6v5 (Hexagon) Boot Support"
+	depends on MSM_PIL && MSM_SUBSYSTEM_RESTART
+	help
+	 Support for booting and shutting down QDSP6v5 (Hexagon) processors
+	 in modem subsystems. If you would like to make or receive phone
+	 calls then say Y here.
+	 If unsure, say N.
+
 config SETUP_SSR_NOTIF_TIMEOUTS
 	bool "Set timeouts on SSR sysmon notifications and notifier callbacks"
 	help
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index f5b2b90..62a34a5 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -46,6 +46,7 @@
 obj-$(CONFIG_MSM_SERVICE_LOCATOR) += service-locator.o
 obj-$(CONFIG_MSM_SYSMON_GLINK_COMM) += sysmon-glink.o
 obj-$(CONFIG_MSM_SYSMON_QMI_COMM) += sysmon-qmi.o
+obj-$(CONFIG_MSM_PIL_MSS_QDSP6V5) += pil-q6v5.o pil-msa.o pil-q6v5-mss.o
 obj-$(CONFIG_MSM_PIL_SSR_GENERIC) += subsys-pil-tz.o
 obj-$(CONFIG_MEM_SHARE_QMI_SERVICE)		+= memshare/
 obj-$(CONFIG_MSM_PIL)   +=      peripheral-loader.o
diff --git a/drivers/soc/qcom/eud.c b/drivers/soc/qcom/eud.c
index 864bd65..0ee43a8 100644
--- a/drivers/soc/qcom/eud.c
+++ b/drivers/soc/qcom/eud.c
@@ -92,14 +92,6 @@
 static bool eud_ready;
 static struct platform_device *eud_private;
 
-static int check_eud_mode_mgr2(struct eud_chip *chip)
-{
-	u32 val;
-
-	val = scm_io_read(chip->eud_mode_mgr2_phys_base);
-	return val & BIT(0);
-}
-
 static void enable_eud(struct platform_device *pdev)
 {
 	struct eud_chip *priv = platform_get_drvdata(pdev);
@@ -113,7 +105,7 @@
 			priv->eud_reg_base + EUD_REG_INT1_EN_MASK);
 
 	/* Enable secure eud if supported */
-	if (priv->secure_eud_en && !check_eud_mode_mgr2(priv)) {
+	if (priv->secure_eud_en) {
 		ret = scm_io_write(priv->eud_mode_mgr2_phys_base +
 				   EUD_REG_EUD_EN2, EUD_ENABLE_CMD);
 		if (ret)
@@ -572,9 +564,6 @@
 		}
 
 		chip->eud_mode_mgr2_phys_base = res->start;
-
-		if (check_eud_mode_mgr2(chip))
-			enable = 1;
 	}
 
 	chip->need_phy_clk_vote = of_property_read_bool(pdev->dev.of_node,
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 3e3a41b..addf18e 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -846,6 +846,11 @@
 	clear_bit(ICNSS_FW_DOWN, &penv->state);
 	icnss_ignore_fw_timeout(false);
 
+	if (test_bit(ICNSS_WLFW_CONNECTED, &penv->state)) {
+		icnss_pr_err("QMI Server already in Connected State\n");
+		ICNSS_ASSERT(0);
+	}
+
 	ret = icnss_connect_to_fw_server(penv, data);
 	if (ret)
 		goto fail;
diff --git a/drivers/soc/qcom/icnss2/main.c b/drivers/soc/qcom/icnss2/main.c
index b869497..2cea0df 100644
--- a/drivers/soc/qcom/icnss2/main.c
+++ b/drivers/soc/qcom/icnss2/main.c
@@ -177,6 +177,20 @@
 	return "UNKNOWN";
 };
 
+char *icnss_soc_wake_event_to_str(enum icnss_soc_wake_event_type type)
+{
+	switch (type) {
+	case ICNSS_SOC_WAKE_REQUEST_EVENT:
+		return "SOC_WAKE_REQUEST";
+	case ICNSS_SOC_WAKE_RELEASE_EVENT:
+		return "SOC_WAKE_RELEASE";
+	case ICNSS_SOC_WAKE_EVENT_MAX:
+		return "SOC_EVENT_MAX";
+	}
+
+	return "UNKNOWN";
+};
+
 int icnss_driver_event_post(struct icnss_priv *priv,
 			    enum icnss_driver_event_type type,
 			    u32 flags, void *data)
@@ -249,6 +263,78 @@
 	return ret;
 }
 
+int icnss_soc_wake_event_post(struct icnss_priv *priv,
+			      enum icnss_soc_wake_event_type type,
+			      u32 flags, void *data)
+{
+	struct icnss_soc_wake_event *event;
+	unsigned long irq_flags;
+	int gfp = GFP_KERNEL;
+	int ret = 0;
+
+	if (!priv)
+		return -ENODEV;
+
+	icnss_pr_dbg("Posting event: %s(%d), %s, flags: 0x%x, state: 0x%lx\n",
+		     icnss_soc_wake_event_to_str(type), type, current->comm,
+		     flags, priv->state);
+
+	if (type >= ICNSS_SOC_WAKE_EVENT_MAX) {
+		icnss_pr_err("Invalid Event type: %d, can't post", type);
+		return -EINVAL;
+	}
+
+	if (in_interrupt() || irqs_disabled())
+		gfp = GFP_ATOMIC;
+
+	event = kzalloc(sizeof(*event), gfp);
+	if (!event)
+		return -ENOMEM;
+
+	icnss_pm_stay_awake(priv);
+
+	event->type = type;
+	event->data = data;
+	init_completion(&event->complete);
+	event->ret = ICNSS_EVENT_PENDING;
+	event->sync = !!(flags & ICNSS_EVENT_SYNC);
+
+	spin_lock_irqsave(&priv->soc_wake_msg_lock, irq_flags);
+	list_add_tail(&event->list, &priv->soc_wake_msg_list);
+	spin_unlock_irqrestore(&priv->soc_wake_msg_lock, irq_flags);
+
+	priv->stats.soc_wake_events[type].posted++;
+	queue_work(priv->soc_wake_wq, &priv->soc_wake_msg_work);
+
+	if (!(flags & ICNSS_EVENT_SYNC))
+		goto out;
+
+	if (flags & ICNSS_EVENT_UNINTERRUPTIBLE)
+		wait_for_completion(&event->complete);
+	else
+		ret = wait_for_completion_interruptible(&event->complete);
+
+	icnss_pr_dbg("Completed event: %s(%d), state: 0x%lx, ret: %d/%d\n",
+		     icnss_soc_wake_event_to_str(type), type, priv->state, ret,
+		     event->ret);
+
+	spin_lock_irqsave(&priv->soc_wake_msg_lock, irq_flags);
+	if (ret == -ERESTARTSYS && event->ret == ICNSS_EVENT_PENDING) {
+		event->sync = false;
+		spin_unlock_irqrestore(&priv->soc_wake_msg_lock, irq_flags);
+		ret = -EINTR;
+		goto out;
+	}
+	spin_unlock_irqrestore(&priv->soc_wake_msg_lock, irq_flags);
+
+	ret = event->ret;
+	kfree(event);
+
+out:
+	icnss_pm_relax(priv);
+	return ret;
+}
+
 bool icnss_is_fw_ready(void)
 {
 	if (!penv)
@@ -888,6 +974,41 @@
 	return ret;
 }
 
+static int icnss_event_soc_wake_request(struct icnss_priv *priv, void *data)
+{
+	int ret = 0;
+
+	if (!priv)
+		return -ENODEV;
+
+	ret = wlfw_send_soc_wake_msg(priv, QMI_WLFW_WAKE_REQUEST_V01);
+	if (!ret)
+		atomic_inc(&priv->soc_wake_ref_count);
+
+	return ret;
+}
+
+static int icnss_event_soc_wake_release(struct icnss_priv *priv, void *data)
+{
+	int ret = 0;
+	int count = 0;
+
+	if (!priv)
+		return -ENODEV;
+
+	count = atomic_dec_return(&priv->soc_wake_ref_count);
+
+	if (count) {
+		icnss_pr_dbg("Wake release not called. Ref count: %d",
+			     count);
+		return 0;
+	}
+
+	ret = wlfw_send_soc_wake_msg(priv, QMI_WLFW_WAKE_RELEASE_V01);
+
+	return ret;
+}
+
 static int icnss_driver_event_register_driver(struct icnss_priv *priv,
 							 void *data)
 {
@@ -1225,6 +1346,68 @@
 	icnss_pm_relax(priv);
 }
 
+static void icnss_soc_wake_msg_work(struct work_struct *work)
+{
+	struct icnss_priv *priv =
+		container_of(work, struct icnss_priv, soc_wake_msg_work);
+	struct icnss_soc_wake_event *event;
+	unsigned long flags;
+	int ret;
+
+	icnss_pm_stay_awake(priv);
+
+	spin_lock_irqsave(&priv->soc_wake_msg_lock, flags);
+
+	while (!list_empty(&priv->soc_wake_msg_list)) {
+		event = list_first_entry(&priv->soc_wake_msg_list,
+					 struct icnss_soc_wake_event, list);
+		list_del(&event->list);
+		spin_unlock_irqrestore(&priv->soc_wake_msg_lock, flags);
+
+		icnss_pr_dbg("Processing event: %s%s(%d), state: 0x%lx\n",
+			     icnss_soc_wake_event_to_str(event->type),
+			     event->sync ? "-sync" : "", event->type,
+			     priv->state);
+
+		switch (event->type) {
+		case ICNSS_SOC_WAKE_REQUEST_EVENT:
+			ret = icnss_event_soc_wake_request(priv,
+							   event->data);
+			break;
+		case ICNSS_SOC_WAKE_RELEASE_EVENT:
+			ret = icnss_event_soc_wake_release(priv,
+							   event->data);
+			break;
+		default:
+			icnss_pr_err("Invalid Event type: %d", event->type);
+			kfree(event);
+			continue;
+		}
+
+		priv->stats.soc_wake_events[event->type].processed++;
+
+		icnss_pr_dbg("Event Processed: %s%s(%d), ret: %d, state: 0x%lx\n",
+			     icnss_soc_wake_event_to_str(event->type),
+			     event->sync ? "-sync" : "", event->type, ret,
+			     priv->state);
+
+		spin_lock_irqsave(&priv->soc_wake_msg_lock, flags);
+		if (event->sync) {
+			event->ret = ret;
+			complete(&event->complete);
+			continue;
+		}
+		spin_unlock_irqrestore(&priv->soc_wake_msg_lock, flags);
+
+		kfree(event);
+
+		spin_lock_irqsave(&priv->soc_wake_msg_lock, flags);
+	}
+	spin_unlock_irqrestore(&priv->soc_wake_msg_lock, flags);
+
+	icnss_pm_relax(priv);
+}
+
 static int icnss_msa0_ramdump(struct icnss_priv *priv)
 {
 	struct ramdump_segment segment;
@@ -1963,6 +2146,71 @@
 }
 EXPORT_SYMBOL(icnss_set_fw_log_mode);
 
+int icnss_force_wake_request(struct device *dev)
+{
+	struct icnss_priv *priv = dev_get_drvdata(dev);
+	int count = 0;
+
+	if (!dev)
+		return -ENODEV;
+
+	if (!priv) {
+		icnss_pr_err("Platform driver not initialized\n");
+		return -EINVAL;
+	}
+
+	icnss_pr_dbg("Calling SOC Wake request");
+
+	if (atomic_read(&priv->soc_wake_ref_count)) {
+		count = atomic_inc_return(&priv->soc_wake_ref_count);
+		icnss_pr_dbg("SOC already awake, Ref count: %d", count);
+		return 0;
+	}
+
+	icnss_soc_wake_event_post(priv, ICNSS_SOC_WAKE_REQUEST_EVENT,
+				  0, NULL);
+
+	return 0;
+}
+EXPORT_SYMBOL(icnss_force_wake_request);
+
+int icnss_force_wake_release(struct device *dev)
+{
+	struct icnss_priv *priv = dev_get_drvdata(dev);
+
+	if (!dev)
+		return -ENODEV;
+
+	if (!priv) {
+		icnss_pr_err("Platform driver not initialized\n");
+		return -EINVAL;
+	}
+
+	icnss_pr_dbg("Calling SOC Wake response");
+
+	icnss_soc_wake_event_post(priv, ICNSS_SOC_WAKE_RELEASE_EVENT,
+				  0, NULL);
+
+	return 0;
+}
+EXPORT_SYMBOL(icnss_force_wake_release);
+
+int icnss_is_device_awake(struct device *dev)
+{
+	struct icnss_priv *priv = dev_get_drvdata(dev);
+
+	if (!dev)
+		return -ENODEV;
+
+	if (!priv) {
+		icnss_pr_err("Platform driver not initialized\n");
+		return -EINVAL;
+	}
+
+	return atomic_read(&priv->soc_wake_ref_count);
+}
+EXPORT_SYMBOL(icnss_is_device_awake);
+
 int icnss_athdiag_read(struct device *dev, uint32_t offset,
 		       uint32_t mem_type, uint32_t data_len,
 		       uint8_t *output)
@@ -2656,6 +2904,7 @@
 
 	spin_lock_init(&priv->event_lock);
 	spin_lock_init(&priv->on_off_lock);
+	spin_lock_init(&priv->soc_wake_msg_lock);
 	mutex_init(&priv->dev_lock);
 
 	priv->event_wq = alloc_workqueue("icnss_driver_event", WQ_UNBOUND, 1);
@@ -2668,10 +2917,21 @@
 	INIT_WORK(&priv->event_work, icnss_driver_event_work);
 	INIT_LIST_HEAD(&priv->event_list);
 
+	priv->soc_wake_wq = alloc_workqueue("icnss_soc_wake_event",
+					    WQ_UNBOUND, 1);
+	if (!priv->soc_wake_wq) {
+		icnss_pr_err("Soc wake Workqueue creation failed\n");
+		ret = -EFAULT;
+		goto out_destroy_wq;
+	}
+
+	INIT_WORK(&priv->soc_wake_msg_work, icnss_soc_wake_msg_work);
+	INIT_LIST_HEAD(&priv->soc_wake_msg_list);
+
 	ret = icnss_register_fw_service(priv);
 	if (ret < 0) {
 		icnss_pr_err("fw service registration failed: %d\n", ret);
-		goto out_destroy_wq;
+		goto out_destroy_soc_wq;
 	}
 
 	icnss_enable_recovery(priv);
@@ -2697,6 +2957,8 @@
 
 	return 0;
 
+out_destroy_soc_wq:
+	destroy_workqueue(priv->soc_wake_wq);
 out_destroy_wq:
 	destroy_workqueue(priv->event_wq);
 smmu_cleanup:
@@ -2733,6 +2995,9 @@
 	if (priv->event_wq)
 		destroy_workqueue(priv->event_wq);
 
+	if (priv->soc_wake_wq)
+		destroy_workqueue(priv->soc_wake_wq);
+
 	priv->iommu_domain = NULL;
 
 	icnss_hw_power_off(priv);
diff --git a/drivers/soc/qcom/icnss2/main.h b/drivers/soc/qcom/icnss2/main.h
index cd5d6dd..44efede 100644
--- a/drivers/soc/qcom/icnss2/main.h
+++ b/drivers/soc/qcom/icnss2/main.h
@@ -55,6 +55,12 @@
 	ICNSS_DRIVER_EVENT_MAX,
 };
 
+enum icnss_soc_wake_event_type {
+	ICNSS_SOC_WAKE_REQUEST_EVENT,
+	ICNSS_SOC_WAKE_RELEASE_EVENT,
+	ICNSS_SOC_WAKE_EVENT_MAX,
+};
+
 struct icnss_event_server_arrive_data {
 	unsigned int node;
 	unsigned int port;
@@ -74,6 +80,15 @@
 	void *data;
 };
 
+struct icnss_soc_wake_event {
+	struct list_head list;
+	enum icnss_soc_wake_event_type type;
+	bool sync;
+	struct completion complete;
+	int ret;
+	void *data;
+};
+
 enum icnss_driver_state {
 	ICNSS_WLFW_CONNECTED,
 	ICNSS_POWER_ON,
@@ -150,6 +165,11 @@
 	} events[ICNSS_DRIVER_EVENT_MAX];
 
 	struct {
+		u32 posted;
+		u32 processed;
+	} soc_wake_events[ICNSS_SOC_WAKE_EVENT_MAX];
+
+	struct {
 		uint32_t request;
 		uint32_t free;
 		uint32_t enable;
@@ -210,6 +230,9 @@
 	u32 exit_power_save_req;
 	u32 exit_power_save_resp;
 	u32 exit_power_save_err;
+	u32 soc_wake_req;
+	u32 soc_wake_resp;
+	u32 soc_wake_err;
 };
 
 #define WLFW_MAX_TIMESTAMP_LEN 32
@@ -282,10 +305,14 @@
 	size_t smmu_iova_ipa_len;
 	struct qmi_handle qmi;
 	struct list_head event_list;
+	struct list_head soc_wake_msg_list;
 	spinlock_t event_lock;
+	spinlock_t soc_wake_msg_lock;
 	struct work_struct event_work;
 	struct work_struct fw_recv_msg_work;
+	struct work_struct soc_wake_msg_work;
 	struct workqueue_struct *event_wq;
+	struct workqueue_struct *soc_wake_wq;
 	phys_addr_t msa_pa;
 	phys_addr_t msi_addr_pa;
 	dma_addr_t msi_addr_iova;
@@ -342,6 +369,7 @@
 	struct icnss_fw_mem qdss_mem[QMI_WLFW_MAX_NUM_MEM_SEG];
 	void *get_info_cb_ctx;
 	int (*get_info_cb)(void *ctx, void *event, int event_len);
+	atomic_t soc_wake_ref_count;
 };
 
 struct icnss_reg_info {
@@ -358,5 +386,9 @@
 			    u32 flags, void *data);
 void icnss_allow_recursive_recovery(struct device *dev);
 void icnss_disallow_recursive_recovery(struct device *dev);
+char *icnss_soc_wake_event_to_str(enum icnss_soc_wake_event_type type);
+int icnss_soc_wake_event_post(struct icnss_priv *priv,
+			      enum icnss_soc_wake_event_type type,
+			      u32 flags, void *data);
 #endif
 
diff --git a/drivers/soc/qcom/icnss2/qmi.c b/drivers/soc/qcom/icnss2/qmi.c
index 3a96131..225afb1 100644
--- a/drivers/soc/qcom/icnss2/qmi.c
+++ b/drivers/soc/qcom/icnss2/qmi.c
@@ -413,6 +413,82 @@
 	return ret;
 }
 
+int wlfw_send_soc_wake_msg(struct icnss_priv *priv,
+			   enum wlfw_soc_wake_enum_v01 type)
+{
+	int ret;
+	struct wlfw_soc_wake_req_msg_v01 *req;
+	struct wlfw_soc_wake_resp_msg_v01 *resp;
+	struct qmi_txn txn;
+
+	if (!priv)
+		return -ENODEV;
+
+	if (test_bit(ICNSS_FW_DOWN, &priv->state))
+		return -EINVAL;
+
+	icnss_pr_dbg("Sending soc wake msg, type: 0x%x\n",
+		     type);
+
+	req = kzalloc(sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+	if (!resp) {
+		kfree(req);
+		return -ENOMEM;
+	}
+	req->wake_valid = 1;
+	req->wake = type;
+
+	priv->stats.soc_wake_req++;
+
+	ret = qmi_txn_init(&priv->qmi, &txn,
+			   wlfw_soc_wake_resp_msg_v01_ei, resp);
+
+	if (ret < 0) {
+		icnss_pr_err("Fail to init txn for wake msg resp %d\n",
+			     ret);
+		goto out;
+	}
+
+	ret = qmi_send_request(&priv->qmi, NULL, &txn,
+			       QMI_WLFW_SOC_WAKE_REQ_V01,
+			       WLFW_SOC_WAKE_REQ_MSG_V01_MAX_MSG_LEN,
+			       wlfw_soc_wake_req_msg_v01_ei, req);
+	if (ret < 0) {
+		qmi_txn_cancel(&txn);
+		icnss_pr_err("Fail to send soc wake msg %d\n", ret);
+		goto out;
+	}
+
+	ret = qmi_txn_wait(&txn, priv->ctrl_params.qmi_timeout);
+	if (ret < 0) {
+		icnss_qmi_fatal_err("SOC wake timed out with ret %d\n",
+				    ret);
+		goto out;
+	} else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+		icnss_qmi_fatal_err(
+			"SOC wake request rejected,result:%d error:%d\n",
+			resp->resp.result, resp->resp.error);
+		ret = -resp->resp.result;
+		goto out;
+	}
+
+	priv->stats.soc_wake_resp++;
+
+	kfree(resp);
+	kfree(req);
+	return 0;
+
+out:
+	kfree(req);
+	kfree(resp);
+	priv->stats.soc_wake_err++;
+	return ret;
+}
+
 int wlfw_ind_register_send_sync_msg(struct icnss_priv *priv)
 {
 	int ret;
@@ -2196,7 +2272,7 @@
 	if (cmd_len > QMI_WLFW_MAX_DATA_SIZE_V01)
 		return -EINVAL;
 
-	if (test_bit(ICNSS_FW_DOWN, &priv->state))
+	if (test_bit(ICNSS_FW_DOWN, &plat_priv->state))
 		return -EINVAL;
 
 	req = kzalloc(sizeof(*req), GFP_KERNEL);
diff --git a/drivers/soc/qcom/icnss2/qmi.h b/drivers/soc/qcom/icnss2/qmi.h
index c4c42ce..f4c1d2b 100644
--- a/drivers/soc/qcom/icnss2/qmi.h
+++ b/drivers/soc/qcom/icnss2/qmi.h
@@ -139,6 +139,12 @@
 {
 	return 0;
 }
+
+int wlfw_send_soc_wake_msg(struct icnss_priv *priv,
+			   enum wlfw_soc_wake_enum_v01 type)
+{
+	return 0;
+}
 #else
 int wlfw_ind_register_send_sync_msg(struct icnss_priv *priv);
 int icnss_connect_to_fw_server(struct icnss_priv *priv, void *data);
@@ -177,6 +183,8 @@
 int wlfw_exit_power_save_send_msg(struct icnss_priv *priv);
 int icnss_wlfw_get_info_send_sync(struct icnss_priv *priv, int type,
 				  void *cmd, int cmd_len);
+int wlfw_send_soc_wake_msg(struct icnss_priv *priv,
+			   enum wlfw_soc_wake_enum_v01 type);
 #endif
 
 #endif /* __ICNSS_QMI_H__*/
diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c
index 6fa278f..1df009f 100644
--- a/drivers/soc/qcom/peripheral-loader.c
+++ b/drivers/soc/qcom/peripheral-loader.c
@@ -1305,7 +1305,7 @@
 	 * Fallback to serial loading of blobs if the
 	 * workqueue creatation failed during module init.
 	 */
-	if (pil_wq) {
+	if (pil_wq && !(desc->sequential_loading)) {
 		ret = pil_load_segs(desc);
 		if (ret)
 			goto err_deinit_image;
diff --git a/drivers/soc/qcom/peripheral-loader.h b/drivers/soc/qcom/peripheral-loader.h
index c83b038..29fa4b6 100644
--- a/drivers/soc/qcom/peripheral-loader.h
+++ b/drivers/soc/qcom/peripheral-loader.h
@@ -63,6 +63,7 @@
 	bool shutdown_fail;
 	bool modem_ssr;
 	bool clear_fw_region;
+	bool sequential_loading;
 	u32 subsys_vmid;
 	bool signal_aop;
 	struct mbox_client cl;
diff --git a/drivers/soc/qcom/pil-msa.c b/drivers/soc/qcom/pil-msa.c
new file mode 100644
index 0000000..9cff905
--- /dev/null
+++ b/drivers/soc/qcom/pil-msa.c
@@ -0,0 +1,1051 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/secure_buffer.h>
+#include <trace/events/trace_msm_pil_event.h>
+
+#include "peripheral-loader.h"
+#include "pil-q6v5.h"
+#include "pil-msa.h"
+
+/* Q6 Register Offsets */
+#define QDSP6SS_RST_EVB			0x010
+#define QDSP6SS_DBG_CFG			0x018
+#define QDSP6SS_NMI_CFG			0x40
+
+/* AXI Halting Registers */
+#define MSS_Q6_HALT_BASE		0x180
+#define MSS_MODEM_HALT_BASE		0x200
+#define MSS_NC_HALT_BASE		0x280
+
+/* RMB Status Register Values */
+#define STATUS_PBL_SUCCESS		0x1
+#define STATUS_XPU_UNLOCKED		0x1
+#define STATUS_XPU_UNLOCKED_SCRIBBLED	0x2
+
+/* PBL/MBA interface registers */
+#define RMB_MBA_IMAGE			0x00
+#define RMB_PBL_STATUS			0x04
+#define RMB_MBA_COMMAND			0x08
+#define RMB_MBA_STATUS			0x0C
+#define RMB_PMI_META_DATA		0x10
+#define RMB_PMI_CODE_START		0x14
+#define RMB_PMI_CODE_LENGTH		0x18
+#define RMB_PROTOCOL_VERSION		0x1C
+#define RMB_MBA_DEBUG_INFORMATION	0x20
+
+#define POLL_INTERVAL_US		50
+
+#define CMD_META_DATA_READY		0x1
+#define CMD_LOAD_READY			0x2
+#define CMD_PILFAIL_NFY_MBA		0xffffdead
+
+#define STATUS_META_DATA_AUTH_SUCCESS	0x3
+#define STATUS_AUTH_COMPLETE		0x4
+#define STATUS_MBA_UNLOCKED		0x6
+
+/* External BHS */
+#define EXTERNAL_BHS_ON			BIT(0)
+#define EXTERNAL_BHS_STATUS		BIT(4)
+#define BHS_TIMEOUT_US			50
+
+#define MSS_RESTART_PARAM_ID		0x2
+#define MSS_RESTART_ID			0xA
+
+#define MSS_MAGIC			0XAABADEAD
+
+/* Timeout value for MBA boot when minidump is enabled */
+#define MBA_ENCRYPTION_TIMEOUT	3000
+enum scm_cmd {
+	PAS_MEM_SETUP_CMD = 2,
+};
+
+static int pbl_mba_boot_timeout_ms = 1000;
+module_param(pbl_mba_boot_timeout_ms, int, 0644);
+
+static int modem_auth_timeout_ms = 10000;
+module_param(modem_auth_timeout_ms, int, 0644);
+
+/* If set to 0xAABADEAD, MBA failures trigger a kernel panic */
+static uint modem_trigger_panic;
+module_param(modem_trigger_panic, uint, 0644);
+
+/* To set the modem debug cookie in DBG_CFG register for debugging */
+static uint modem_dbg_cfg;
+module_param(modem_dbg_cfg, uint, 0644);
+
+static void modem_log_rmb_regs(void __iomem *base)
+{
+	pr_err("RMB_MBA_IMAGE: %08x\n", readl_relaxed(base + RMB_MBA_IMAGE));
+	pr_err("RMB_PBL_STATUS: %08x\n", readl_relaxed(base + RMB_PBL_STATUS));
+	pr_err("RMB_MBA_COMMAND: %08x\n",
+				readl_relaxed(base + RMB_MBA_COMMAND));
+	pr_err("RMB_MBA_STATUS: %08x\n", readl_relaxed(base + RMB_MBA_STATUS));
+	pr_err("RMB_PMI_META_DATA: %08x\n",
+				readl_relaxed(base + RMB_PMI_META_DATA));
+	pr_err("RMB_PMI_CODE_START: %08x\n",
+				readl_relaxed(base + RMB_PMI_CODE_START));
+	pr_err("RMB_PMI_CODE_LENGTH: %08x\n",
+				readl_relaxed(base + RMB_PMI_CODE_LENGTH));
+	pr_err("RMB_PROTOCOL_VERSION: %08x\n",
+				readl_relaxed(base + RMB_PROTOCOL_VERSION));
+	pr_err("RMB_MBA_DEBUG_INFORMATION: %08x\n",
+			readl_relaxed(base + RMB_MBA_DEBUG_INFORMATION));
+
+	if (modem_trigger_panic == MSS_MAGIC)
+		panic("%s: System ramdump is needed!!!\n", __func__);
+}
+
+static int pil_mss_power_up(struct q6v5_data *drv)
+{
+	int ret = 0;
+	u32 regval;
+
+	if (drv->cxrail_bhs) {
+		regval = readl_relaxed(drv->cxrail_bhs);
+		regval |= EXTERNAL_BHS_ON;
+		writel_relaxed(regval, drv->cxrail_bhs);
+
+		ret = readl_poll_timeout(drv->cxrail_bhs, regval,
+			regval & EXTERNAL_BHS_STATUS, 1, BHS_TIMEOUT_US);
+	}
+
+	return ret;
+}
+
+static int pil_mss_power_down(struct q6v5_data *drv)
+{
+	u32 regval;
+
+	if (drv->cxrail_bhs) {
+		regval = readl_relaxed(drv->cxrail_bhs);
+		regval &= ~EXTERNAL_BHS_ON;
+		writel_relaxed(regval, drv->cxrail_bhs);
+	}
+
+	return 0;
+}
+
+static int pil_mss_enable_clks(struct q6v5_data *drv)
+{
+	int ret;
+
+	ret = clk_prepare_enable(drv->ahb_clk);
+	if (ret)
+		goto err_ahb_clk;
+	ret = clk_prepare_enable(drv->axi_clk);
+	if (ret)
+		goto err_axi_clk;
+	ret = clk_prepare_enable(drv->rom_clk);
+	if (ret)
+		goto err_rom_clk;
+	ret = clk_prepare_enable(drv->gpll0_mss_clk);
+	if (ret)
+		goto err_gpll0_mss_clk;
+	ret = clk_prepare_enable(drv->snoc_axi_clk);
+	if (ret)
+		goto err_snoc_axi_clk;
+	ret = clk_prepare_enable(drv->mnoc_axi_clk);
+	if (ret)
+		goto err_mnoc_axi_clk;
+	return 0;
+err_mnoc_axi_clk:
+	clk_disable_unprepare(drv->mnoc_axi_clk);
+err_snoc_axi_clk:
+	clk_disable_unprepare(drv->snoc_axi_clk);
+err_gpll0_mss_clk:
+	clk_disable_unprepare(drv->gpll0_mss_clk);
+err_rom_clk:
+	clk_disable_unprepare(drv->rom_clk);
+err_axi_clk:
+	clk_disable_unprepare(drv->axi_clk);
+err_ahb_clk:
+	clk_disable_unprepare(drv->ahb_clk);
+	return ret;
+}
+
+static void pil_mss_disable_clks(struct q6v5_data *drv)
+{
+	clk_disable_unprepare(drv->mnoc_axi_clk);
+	clk_disable_unprepare(drv->snoc_axi_clk);
+	clk_disable_unprepare(drv->gpll0_mss_clk);
+	clk_disable_unprepare(drv->rom_clk);
+	clk_disable_unprepare(drv->axi_clk);
+	if (!drv->ahb_clk_vote)
+		clk_disable_unprepare(drv->ahb_clk);
+}
+
+static void pil_mss_pdc_sync(struct q6v5_data *drv, bool pdc_sync)
+{
+	u32 val = 0;
+	u32 mss_pdc_mask = BIT(drv->mss_pdc_offset);
+
+	if (drv->pdc_sync) {
+		val = readl_relaxed(drv->pdc_sync);
+		if (pdc_sync)
+			val |= mss_pdc_mask;
+		else
+			val &= ~mss_pdc_mask;
+		writel_relaxed(val, drv->pdc_sync);
+		/* Ensure PDC is written before next write */
+		wmb();
+		udelay(2);
+	}
+}
+
+static void pil_mss_alt_reset(struct q6v5_data *drv, u32 val)
+{
+	if (drv->alt_reset) {
+		writel_relaxed(val, drv->alt_reset);
+		/* Ensure alt reset is written before restart reg */
+		wmb();
+		udelay(2);
+	}
+}
+
+static int pil_mss_restart_reg(struct q6v5_data *drv, u32 mss_restart)
+{
+	int ret = 0;
+	int scm_ret = 0;
+	struct scm_desc desc = {0};
+
+	desc.args[0] = mss_restart;
+	desc.args[1] = 0;
+	desc.arginfo = SCM_ARGS(2);
+
+	if (drv->restart_reg && !drv->restart_reg_sec) {
+		writel_relaxed(mss_restart, drv->restart_reg);
+		/* Ensure physical address access is done before returning.*/
+		mb();
+		udelay(2);
+	} else if (drv->restart_reg_sec) {
+		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
+					MSS_RESTART_ID), &desc);
+		scm_ret = desc.ret[0];
+		if (ret || scm_ret)
+			pr_err("Secure MSS restart failed\n");
+	}
+
+	return ret;
+}
+
+int pil_mss_assert_resets(struct q6v5_data *drv)
+{
+	int ret = 0;
+
+	pil_mss_pdc_sync(drv, 1);
+	pil_mss_alt_reset(drv, 1);
+	if (drv->reset_clk) {
+		pil_mss_disable_clks(drv);
+		if (drv->ahb_clk_vote)
+			clk_disable_unprepare(drv->ahb_clk);
+	}
+
+	ret = pil_mss_restart_reg(drv, true);
+
+	return ret;
+}
+
+int pil_mss_deassert_resets(struct q6v5_data *drv)
+{
+	int ret = 0;
+
+	ret = pil_mss_restart_reg(drv, 0);
+	if (ret)
+		return ret;
+	/* Wait 6 32kHz sleep cycles for reset */
+	udelay(200);
+
+	if (drv->reset_clk)
+		pil_mss_enable_clks(drv);
+	pil_mss_alt_reset(drv, 0);
+	pil_mss_pdc_sync(drv, false);
+
+	return ret;
+}
+
+static int pil_msa_wait_for_mba_ready(struct q6v5_data *drv)
+{
+	struct device *dev = drv->desc.dev;
+	int ret;
+	u32 status;
+	u64 val;
+
+	if (of_property_read_bool(dev->of_node, "qcom,minidump-id"))
+		pbl_mba_boot_timeout_ms = MBA_ENCRYPTION_TIMEOUT;
+
+	val = is_timeout_disabled() ? 0 : pbl_mba_boot_timeout_ms * 1000;
+
+	/* Wait for PBL completion. */
+	ret = readl_poll_timeout(drv->rmb_base + RMB_PBL_STATUS, status,
+				 status != 0, POLL_INTERVAL_US, val);
+	if (ret) {
+		dev_err(dev, "PBL boot timed out (rc:%d)\n", ret);
+		return ret;
+	}
+	if (status != STATUS_PBL_SUCCESS) {
+		dev_err(dev, "PBL returned unexpected status %d\n", status);
+		return -EINVAL;
+	}
+
+	/* Wait for MBA completion. */
+	ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
+				status != 0, POLL_INTERVAL_US, val);
+	if (ret) {
+		dev_err(dev, "MBA boot timed out (rc:%d)\n", ret);
+		return ret;
+	}
+	if (status != STATUS_XPU_UNLOCKED &&
+	    status != STATUS_XPU_UNLOCKED_SCRIBBLED) {
+		dev_err(dev, "MBA returned unexpected status %d\n", status);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int pil_mss_shutdown(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	int ret = 0;
+
+	if (drv->axi_halt_base) {
+		pil_q6v5_halt_axi_port(pil,
+			drv->axi_halt_base + MSS_Q6_HALT_BASE);
+		pil_q6v5_halt_axi_port(pil,
+			drv->axi_halt_base + MSS_MODEM_HALT_BASE);
+		pil_q6v5_halt_axi_port(pil,
+			drv->axi_halt_base + MSS_NC_HALT_BASE);
+	}
+
+	if (drv->axi_halt_q6)
+		pil_q6v5_halt_axi_port(pil, drv->axi_halt_q6);
+	if (drv->axi_halt_mss)
+		pil_q6v5_halt_axi_port(pil, drv->axi_halt_mss);
+	if (drv->axi_halt_nc)
+		pil_q6v5_halt_axi_port(pil, drv->axi_halt_nc);
+
+	/*
+	 * Software workaround to avoid high MX current during LPASS/MSS
+	 * restart.
+	 */
+	if (drv->mx_spike_wa && drv->ahb_clk_vote) {
+		ret = clk_prepare_enable(drv->ahb_clk);
+		if (!ret)
+			assert_clamps(pil);
+		else
+			dev_err(pil->dev, "error turning ON AHB clock(rc:%d)\n",
+									ret);
+	}
+
+	pil_mss_pdc_sync(drv, true);
+	/* Wait 6 32kHz sleep cycles for PDC SYNC true */
+	udelay(200);
+	pil_mss_restart_reg(drv, 1);
+	/* Wait 6 32kHz sleep cycles for reset */
+	udelay(200);
+	ret =  pil_mss_restart_reg(drv, 0);
+	/* Wait 6 32kHz sleep cycles for reset false */
+	udelay(200);
+	pil_mss_pdc_sync(drv, false);
+
+	if (drv->is_booted) {
+		pil_mss_disable_clks(drv);
+		pil_mss_power_down(drv);
+		drv->is_booted = false;
+	}
+
+	return ret;
+}
+
+int __pil_mss_deinit_image(struct pil_desc *pil, bool err_path)
+{
+	struct modem_data *drv = dev_get_drvdata(pil->dev);
+	struct q6v5_data *q6_drv = container_of(pil, struct q6v5_data, desc);
+	int ret = 0;
+	struct device *dma_dev = drv->mba_mem_dev_fixed ?: &drv->mba_mem_dev;
+	s32 status;
+	u64 val = is_timeout_disabled() ? 0 : pbl_mba_boot_timeout_ms * 1000;
+
+	if (err_path) {
+		writel_relaxed(CMD_PILFAIL_NFY_MBA,
+				drv->rmb_base + RMB_MBA_COMMAND);
+		ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
+				status == STATUS_MBA_UNLOCKED || status < 0,
+				POLL_INTERVAL_US, val);
+		if (ret)
+			dev_err(pil->dev, "MBA region unlock timed out(rc:%d)\n",
+									ret);
+		else if (status < 0)
+			dev_err(pil->dev, "MBA unlock returned err status: %d\n",
+						status);
+	}
+
+	ret = pil_mss_shutdown(pil);
+
+	if (q6_drv->ahb_clk_vote)
+		clk_disable_unprepare(q6_drv->ahb_clk);
+
+	/* In case of any failure where reclaiming MBA and DP memory
+	 * could not happen, free the memory here
+	 */
+	if (drv->q6->mba_dp_virt && !drv->mba_mem_dev_fixed) {
+		if (pil->subsys_vmid > 0)
+			pil_assign_mem_to_linux(pil, drv->q6->mba_dp_phys,
+						drv->q6->mba_dp_size);
+		dma_free_attrs(dma_dev, drv->q6->mba_dp_size,
+				drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
+				drv->attrs_dma);
+		drv->q6->mba_dp_virt = NULL;
+	}
+
+	return ret;
+}
+
+int pil_mss_deinit_image(struct pil_desc *pil)
+{
+	return __pil_mss_deinit_image(pil, true);
+}
+
+int pil_mss_make_proxy_votes(struct pil_desc *pil)
+{
+	int ret;
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	int uv = 0;
+
+	ret = of_property_read_u32(pil->dev->of_node, "vdd_mx-uV", &uv);
+	if (ret) {
+		dev_err(pil->dev, "missing vdd_mx-uV property(rc:%d)\n", ret);
+		return ret;
+	}
+
+	ret = regulator_set_voltage(drv->vreg_mx, uv, INT_MAX);
+	if (ret) {
+		dev_err(pil->dev, "Failed to request vreg_mx voltage(rc:%d)\n",
+									ret);
+		return ret;
+	}
+
+	ret = regulator_enable(drv->vreg_mx);
+	if (ret) {
+		dev_err(pil->dev, "Failed to enable vreg_mx(rc:%d)\n", ret);
+		regulator_set_voltage(drv->vreg_mx, 0, INT_MAX);
+		return ret;
+	}
+
+	if (drv->vreg) {
+		ret = of_property_read_u32(pil->dev->of_node, "vdd_mss-uV",
+								&uv);
+		if (ret) {
+			dev_err(pil->dev,
+				"missing vdd_mss-uV property(rc:%d)\n", ret);
+			goto out;
+		}
+
+		ret = regulator_set_voltage(drv->vreg, uv,
+						INT_MAX);
+		if (ret) {
+			dev_err(pil->dev, "Failed to set vreg voltage(rc:%d)\n",
+									ret);
+			goto out;
+		}
+
+		ret = regulator_set_load(drv->vreg, 100000);
+		if (ret < 0) {
+			dev_err(pil->dev, "Failed to set vreg mode(rc:%d)\n",
+									ret);
+			goto out;
+		}
+		ret = regulator_enable(drv->vreg);
+		if (ret) {
+			dev_err(pil->dev, "Failed to enable vreg(rc:%d)\n",
+				ret);
+			regulator_set_voltage(drv->vreg, 0, INT_MAX);
+			goto out;
+		}
+	}
+
+	ret = pil_q6v5_make_proxy_votes(pil);
+	if (ret && drv->vreg) {
+		regulator_disable(drv->vreg);
+		regulator_set_voltage(drv->vreg, 0, INT_MAX);
+	}
+out:
+	if (ret) {
+		regulator_disable(drv->vreg_mx);
+		regulator_set_voltage(drv->vreg_mx, 0, INT_MAX);
+	}
+
+	return ret;
+}
+
+void pil_mss_remove_proxy_votes(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+
+	pil_q6v5_remove_proxy_votes(pil);
+	regulator_disable(drv->vreg_mx);
+	regulator_set_voltage(drv->vreg_mx, 0, INT_MAX);
+	if (drv->vreg) {
+		regulator_disable(drv->vreg);
+		regulator_set_voltage(drv->vreg, 0, INT_MAX);
+	}
+}
+
+static int pil_mss_mem_setup(struct pil_desc *pil,
+					phys_addr_t addr, size_t size)
+{
+	struct modem_data *md = dev_get_drvdata(pil->dev);
+
+	struct pas_init_image_req {
+		u32	proc;
+		u32	start_addr;
+		u32	len;
+	} request;
+	u32 scm_ret = 0;
+	int ret;
+	struct scm_desc desc = {0};
+
+	if (!md->subsys_desc.pil_mss_memsetup)
+		return 0;
+
+	request.proc = md->pas_id;
+	request.start_addr = addr;
+	request.len = size;
+
+	desc.args[0] = md->pas_id;
+	desc.args[1] = addr;
+	desc.args[2] = size;
+	desc.arginfo = SCM_ARGS(3);
+	ret = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL, PAS_MEM_SETUP_CMD),
+			&desc);
+	scm_ret = desc.ret[0];
+
+	if (ret)
+		return ret;
+	return scm_ret;
+}
+
+static int pil_mss_reset(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	phys_addr_t start_addr = pil_get_entry_addr(pil);
+	u32 debug_val = 0;
+	int ret;
+
+	trace_pil_func(__func__);
+	if (drv->mba_dp_phys)
+		start_addr = drv->mba_dp_phys;
+
+	/*
+	 * Bring subsystem out of reset and enable required
+	 * regulators and clocks.
+	 */
+	ret = pil_mss_power_up(drv);
+	if (ret)
+		goto err_power;
+
+	ret = pil_mss_enable_clks(drv);
+	if (ret)
+		goto err_clks;
+
+	if (!pil->minidump_ss || !pil->modem_ssr) {
+		/* Save state of modem debug register before full reset */
+		debug_val = readl_relaxed(drv->reg_base + QDSP6SS_DBG_CFG);
+	}
+
+	/* Assert reset to subsystem */
+	pil_mss_assert_resets(drv);
+	/* Wait 6 32kHz sleep cycles for reset */
+	udelay(200);
+	ret = pil_mss_deassert_resets(drv);
+	if (ret)
+		goto err_restart;
+
+	if (!pil->minidump_ss || !pil->modem_ssr) {
+		writel_relaxed(debug_val, drv->reg_base + QDSP6SS_DBG_CFG);
+		if (modem_dbg_cfg)
+			writel_relaxed(modem_dbg_cfg,
+				drv->reg_base + QDSP6SS_DBG_CFG);
+	}
+
+	/* Program Image Address */
+	if (drv->self_auth) {
+		writel_relaxed(start_addr, drv->rmb_base + RMB_MBA_IMAGE);
+		/*
+		 * Ensure write to RMB base occurs before reset
+		 * is released.
+		 */
+		mb();
+	} else {
+		writel_relaxed((start_addr >> 4) & 0x0FFFFFF0,
+				drv->reg_base + QDSP6SS_RST_EVB);
+	}
+
+	/* Program DP Address */
+	if (drv->dp_size) {
+		writel_relaxed(start_addr + SZ_1M, drv->rmb_base +
+			       RMB_PMI_CODE_START);
+		writel_relaxed(drv->dp_size, drv->rmb_base +
+			       RMB_PMI_CODE_LENGTH);
+	} else {
+		writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_START);
+		writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_LENGTH);
+	}
+	/* Make sure RMB regs are written before bringing modem out of reset */
+	mb();
+
+	ret = pil_q6v5_reset(pil);
+	if (ret)
+		goto err_q6v5_reset;
+
+	/* Wait for MBA to start. Check for PBL and MBA errors while waiting. */
+	if (drv->self_auth) {
+		ret = pil_msa_wait_for_mba_ready(drv);
+		if (ret)
+			goto err_q6v5_reset;
+	}
+
+	dev_info(pil->dev, "MBA boot done\n");
+	drv->is_booted = true;
+
+	return 0;
+
+err_q6v5_reset:
+	modem_log_rmb_regs(drv->rmb_base);
+err_restart:
+	pil_mss_disable_clks(drv);
+	if (drv->ahb_clk_vote)
+		clk_disable_unprepare(drv->ahb_clk);
+err_clks:
+	pil_mss_power_down(drv);
+err_power:
+	return ret;
+}
+
+int pil_mss_reset_load_mba(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	struct modem_data *md = dev_get_drvdata(pil->dev);
+	const struct firmware *fw = NULL, *dp_fw = NULL;
+	char fw_name_legacy[10] = "mba.b00";
+	char fw_name[10] = "mba.mbn";
+	char *dp_name = "msadp";
+	char *fw_name_p;
+	void *mba_dp_virt;
+	dma_addr_t mba_dp_phys, mba_dp_phys_end;
+	int ret;
+	const u8 *data;
+	struct device *dma_dev = md->mba_mem_dev_fixed ?: &md->mba_mem_dev;
+
+	trace_pil_func(__func__);
+	if (drv->mba_dp_virt && md->mba_mem_dev_fixed)
+		goto mss_reset;
+	fw_name_p = drv->non_elf_image ? fw_name_legacy : fw_name;
+	ret = request_firmware(&fw, fw_name_p, pil->dev);
+	if (ret) {
+		dev_err(pil->dev, "Failed to locate %s (rc:%d)\n",
+						fw_name_p, ret);
+		return ret;
+	}
+
+	data = fw ? fw->data : NULL;
+	if (!data) {
+		dev_err(pil->dev, "MBA data is NULL\n");
+		ret = -ENOMEM;
+		goto err_invalid_fw;
+	}
+
+	drv->mba_dp_size = SZ_1M;
+
+	arch_setup_dma_ops(dma_dev, 0, 0, NULL, 0);
+
+	dma_dev->coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
+
+	md->attrs_dma = 0;
+	md->attrs_dma |= DMA_ATTR_SKIP_ZEROING;
+	md->attrs_dma |= DMA_ATTR_STRONGLY_ORDERED;
+
+	ret = request_firmware(&dp_fw, dp_name, pil->dev);
+	if (ret) {
+		dev_warn(pil->dev, "Debug policy not present - %s. Continue.\n",
+						dp_name);
+	} else {
+		if (!dp_fw || !dp_fw->data) {
+			dev_err(pil->dev, "Invalid DP firmware\n");
+			ret = -ENOMEM;
+			goto err_invalid_fw;
+		}
+		drv->dp_size = dp_fw->size;
+		drv->mba_dp_size += drv->dp_size;
+		drv->mba_dp_size = ALIGN(drv->mba_dp_size, SZ_4K);
+	}
+
+	mba_dp_virt = dma_alloc_attrs(dma_dev, drv->mba_dp_size, &mba_dp_phys,
+				   GFP_KERNEL, md->attrs_dma);
+	if (!mba_dp_virt) {
+		dev_err(pil->dev, "%s MBA/DP buffer allocation %zx bytes failed\n",
+				 __func__, drv->mba_dp_size);
+		ret = -ENOMEM;
+		goto err_invalid_fw;
+	}
+
+	/* Make sure there are no mappings in PKMAP and fixmap */
+	kmap_flush_unused();
+	kmap_atomic_flush_unused();
+
+	drv->mba_dp_phys = mba_dp_phys;
+	drv->mba_dp_virt = mba_dp_virt;
+	mba_dp_phys_end = mba_dp_phys + drv->mba_dp_size;
+
+	dev_info(pil->dev, "Loading MBA and DP (if present) from %pa to %pa\n",
+					&mba_dp_phys, &mba_dp_phys_end);
+
+	/* Load the MBA image into memory */
+	if (fw->size <= SZ_1M) {
+		/* Ensures memcpy is done for max 1MB fw size */
+		memcpy(mba_dp_virt, data, fw->size);
+	} else {
+		dev_err(pil->dev, "%s fw image loading into memory is failed due to fw size overflow\n",
+			__func__);
+		ret = -EINVAL;
+		goto err_mba_data;
+	}
+	/* Ensure memcpy of the MBA memory is done before loading the DP */
+	wmb();
+
+	/* Load the DP image into memory */
+	if (drv->mba_dp_size > SZ_1M) {
+		memcpy(mba_dp_virt + SZ_1M, dp_fw->data, dp_fw->size);
+		/* Ensure memcpy is done before powering up modem */
+		wmb();
+	}
+
+	if (pil->subsys_vmid > 0) {
+		ret = pil_assign_mem_to_subsys(pil, drv->mba_dp_phys,
+							drv->mba_dp_size);
+		if (ret) {
+			pr_err("scm_call to unprotect MBA and DP mem failed(rc:%d)\n",
+									ret);
+			goto err_mba_data;
+		}
+	}
+	if (dp_fw)
+		release_firmware(dp_fw);
+	release_firmware(fw);
+	dp_fw = NULL;
+	fw = NULL;
+
+mss_reset:
+	ret = pil_mss_reset(pil);
+	if (ret) {
+		dev_err(pil->dev, "MBA boot failed(rc:%d)\n", ret);
+		goto err_mss_reset;
+	}
+
+	return 0;
+
+err_mss_reset:
+	if (pil->subsys_vmid > 0)
+		pil_assign_mem_to_linux(pil, drv->mba_dp_phys,
+							drv->mba_dp_size);
+err_mba_data:
+	dma_free_attrs(dma_dev, drv->mba_dp_size, drv->mba_dp_virt,
+				drv->mba_dp_phys, md->attrs_dma);
+err_invalid_fw:
+	if (dp_fw)
+		release_firmware(dp_fw);
+	if (fw)
+		release_firmware(fw);
+	drv->mba_dp_virt = NULL;
+	return ret;
+}
+
+int pil_mss_debug_reset(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	u32 encryption_status;
+	int ret;
+
+
+	if (!pil->minidump_ss)
+		return 0;
+
+	encryption_status = pil->minidump_ss->encryption_status;
+
+	if ((pil->minidump_ss->md_ss_enable_status != MD_SS_ENABLED) ||
+		encryption_status == MD_SS_ENCR_NOTREQ)
+		return 0;
+
+	/*
+	 * Bring subsystem out of reset and enable required
+	 * regulators and clocks.
+	 */
+	ret = pil_mss_enable_clks(drv);
+	if (ret)
+		return ret;
+
+	if (pil->minidump_ss) {
+		writel_relaxed(0x1, drv->reg_base + QDSP6SS_NMI_CFG);
+		/* Let write complete before proceeding */
+		mb();
+		udelay(2);
+	}
+	/* Assert reset to subsystem */
+	pil_mss_restart_reg(drv, true);
+	/* Wait 6 32kHz sleep cycles for reset */
+	udelay(200);
+	ret =  pil_mss_restart_reg(drv, false);
+	if (ret)
+		goto err_restart;
+	/* Let write complete before proceeding */
+	mb();
+	udelay(200);
+	ret = pil_q6v5_reset(pil);
+	/*
+	 * Need to Wait for timeout for debug reset sequence to
+	 * complete before returning
+	 */
+	pr_info("Minidump: waiting encryption to complete\n");
+	msleep(13000);
+	if (pil->minidump_ss) {
+		writel_relaxed(0x2, drv->reg_base + QDSP6SS_NMI_CFG);
+		/* Let write complete before proceeding */
+		mb();
+		udelay(200);
+	}
+	if (ret)
+		goto err_restart;
+	return 0;
+err_restart:
+	pil_mss_disable_clks(drv);
+	if (drv->ahb_clk_vote)
+		clk_disable_unprepare(drv->ahb_clk);
+	return ret;
+}
+
+static int pil_msa_auth_modem_mdt(struct pil_desc *pil, const u8 *metadata,
+				  size_t size)
+{
+	struct modem_data *drv = dev_get_drvdata(pil->dev);
+	void *mdata_virt;
+	dma_addr_t mdata_phys;
+	s32 status;
+	int ret;
+	u64 val = is_timeout_disabled() ? 0 : modem_auth_timeout_ms * 1000;
+	struct device *dma_dev = drv->mba_mem_dev_fixed ?: &drv->mba_mem_dev;
+	unsigned long attrs = 0;
+
+	trace_pil_func(__func__);
+	dma_dev->coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
+	attrs |= DMA_ATTR_SKIP_ZEROING;
+	attrs |= DMA_ATTR_STRONGLY_ORDERED;
+	/* Make metadata physically contiguous and 4K aligned. */
+	mdata_virt = dma_alloc_attrs(dma_dev, size, &mdata_phys,
+					GFP_KERNEL, attrs);
+	if (!mdata_virt) {
+		dev_err(pil->dev, "MBA metadata buffer allocation failed\n");
+		ret = -ENOMEM;
+		goto fail;
+	}
+	memcpy(mdata_virt, metadata, size);
+	/* wmb() ensures copy completes prior to starting authentication. */
+	wmb();
+
+	if (pil->subsys_vmid > 0) {
+		ret = pil_assign_mem_to_subsys(pil, mdata_phys,
+							ALIGN(size, SZ_4K));
+		if (ret) {
+			pr_err("scm_call to unprotect modem metadata mem failed(rc:%d)\n",
+									ret);
+			dma_free_attrs(dma_dev, size, mdata_virt, mdata_phys,
+									attrs);
+			goto fail;
+		}
+	}
+
+	/* Initialize length counter to 0 */
+	writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_LENGTH);
+
+	/* Pass address of meta-data to the MBA and perform authentication */
+	writel_relaxed(mdata_phys, drv->rmb_base + RMB_PMI_META_DATA);
+	writel_relaxed(CMD_META_DATA_READY, drv->rmb_base + RMB_MBA_COMMAND);
+	ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
+			status == STATUS_META_DATA_AUTH_SUCCESS || status < 0,
+			POLL_INTERVAL_US, val);
+	if (ret) {
+		dev_err(pil->dev, "MBA authentication of headers timed out(rc:%d)\n",
+								ret);
+	} else if (status < 0) {
+		dev_err(pil->dev, "MBA returned error %d for headers\n",
+				status);
+		ret = -EINVAL;
+	}
+
+	if (pil->subsys_vmid > 0)
+		pil_assign_mem_to_linux(pil, mdata_phys, ALIGN(size, SZ_4K));
+
+	dma_free_attrs(dma_dev, size, mdata_virt, mdata_phys, attrs);
+
+	if (!ret)
+		return ret;
+
+fail:
+	modem_log_rmb_regs(drv->rmb_base);
+	if (drv->q6) {
+		pil_mss_shutdown(pil);
+		if (pil->subsys_vmid > 0)
+			pil_assign_mem_to_linux(pil, drv->q6->mba_dp_phys,
+						drv->q6->mba_dp_size);
+		if (drv->q6->mba_dp_virt && !drv->mba_mem_dev_fixed) {
+			dma_free_attrs(dma_dev, drv->q6->mba_dp_size,
+				drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
+				drv->attrs_dma);
+			drv->q6->mba_dp_virt = NULL;
+		}
+
+	}
+	return ret;
+}
+
+static int pil_msa_mss_reset_mba_load_auth_mdt(struct pil_desc *pil,
+				const u8 *metadata, size_t size)
+{
+	int ret;
+
+	ret = pil_mss_reset_load_mba(pil);
+	if (ret)
+		return ret;
+
+	return pil_msa_auth_modem_mdt(pil, metadata, size);
+}
+
+static int pil_msa_mba_verify_blob(struct pil_desc *pil, phys_addr_t phy_addr,
+				   size_t size)
+{
+	struct modem_data *drv = dev_get_drvdata(pil->dev);
+	s32 status;
+	u32 img_length = readl_relaxed(drv->rmb_base + RMB_PMI_CODE_LENGTH);
+
+	/* Begin image authentication */
+	if (img_length == 0) {
+		writel_relaxed(phy_addr, drv->rmb_base + RMB_PMI_CODE_START);
+		writel_relaxed(CMD_LOAD_READY, drv->rmb_base + RMB_MBA_COMMAND);
+	}
+	/* Increment length counter */
+	img_length += size;
+	writel_relaxed(img_length, drv->rmb_base + RMB_PMI_CODE_LENGTH);
+
+	status = readl_relaxed(drv->rmb_base + RMB_MBA_STATUS);
+	if (status < 0) {
+		dev_err(pil->dev, "MBA returned error %d\n", status);
+		modem_log_rmb_regs(drv->rmb_base);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int pil_msa_mba_auth(struct pil_desc *pil)
+{
+	struct modem_data *drv = dev_get_drvdata(pil->dev);
+	struct q6v5_data *q6_drv = container_of(pil, struct q6v5_data, desc);
+	int ret;
+	struct device *dma_dev = drv->mba_mem_dev_fixed ?: &drv->mba_mem_dev;
+	s32 status;
+	u64 val = is_timeout_disabled() ? 0 : modem_auth_timeout_ms * 1000;
+
+	/* Wait for all segments to be authenticated or an error to occur */
+	ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
+		status == STATUS_AUTH_COMPLETE || status < 0, 50, val);
+	if (ret) {
+		dev_err(pil->dev, "MBA authentication of image timed out(rc:%d)\n",
+									ret);
+	} else if (status < 0) {
+		dev_err(pil->dev, "MBA returned error %d for image\n", status);
+		ret = -EINVAL;
+	}
+
+	if (drv->q6) {
+		if (drv->q6->mba_dp_virt && !drv->mba_mem_dev_fixed) {
+			/* Reclaim MBA and DP (if allocated) memory. */
+			if (pil->subsys_vmid > 0)
+				pil_assign_mem_to_linux(pil,
+					drv->q6->mba_dp_phys,
+					drv->q6->mba_dp_size);
+			dma_free_attrs(dma_dev, drv->q6->mba_dp_size,
+				drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
+				drv->attrs_dma);
+
+			drv->q6->mba_dp_virt = NULL;
+		}
+	}
+	if (ret)
+		modem_log_rmb_regs(drv->rmb_base);
+	if (q6_drv->ahb_clk_vote)
+		clk_disable_unprepare(q6_drv->ahb_clk);
+
+	return ret;
+}
+
+/*
+ * To be used only if self-auth is disabled, or if the
+ * MBA image is loaded as segments and not in init_image.
+ */
+struct pil_reset_ops pil_msa_mss_ops = {
+	.proxy_vote = pil_mss_make_proxy_votes,
+	.proxy_unvote = pil_mss_remove_proxy_votes,
+	.auth_and_reset = pil_mss_reset,
+	.shutdown = pil_mss_shutdown,
+};
+
+/*
+ * To be used if self-auth is enabled and the MBA is to be loaded
+ * in init_image and the modem headers are also to be authenticated
+ * in init_image. Modem segments authenticated in auth_and_reset.
+ */
+struct pil_reset_ops pil_msa_mss_ops_selfauth = {
+	.init_image = pil_msa_mss_reset_mba_load_auth_mdt,
+	.proxy_vote = pil_mss_make_proxy_votes,
+	.proxy_unvote = pil_mss_remove_proxy_votes,
+	.mem_setup = pil_mss_mem_setup,
+	.verify_blob = pil_msa_mba_verify_blob,
+	.auth_and_reset = pil_msa_mba_auth,
+	.deinit_image = pil_mss_deinit_image,
+	.shutdown = pil_mss_shutdown,
+};
+
+/*
+ * To be used if the modem headers are to be authenticated
+ * in init_image, and the modem segments in auth_and_reset.
+ */
+struct pil_reset_ops pil_msa_femto_mba_ops = {
+	.init_image = pil_msa_auth_modem_mdt,
+	.verify_blob = pil_msa_mba_verify_blob,
+	.auth_and_reset = pil_msa_mba_auth,
+};
diff --git a/drivers/soc/qcom/pil-msa.h b/drivers/soc/qcom/pil-msa.h
new file mode 100644
index 0000000..0310234
--- /dev/null
+++ b/drivers/soc/qcom/pil-msa.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_PIL_MSA_H
+#define __MSM_PIL_MSA_H
+
+#include <soc/qcom/subsystem_restart.h>
+
+#include "peripheral-loader.h"
+
+struct modem_data {
+	struct q6v5_data *q6;
+	struct subsys_device *subsys;
+	struct subsys_desc subsys_desc;
+	void *ramdump_dev;
+	void *minidump_dev;
+	bool crash_shutdown;
+	u32 pas_id;
+	bool ignore_errors;
+	struct completion stop_ack;
+	void __iomem *rmb_base;
+	struct clk *xo;
+	struct pil_desc desc;
+	struct device mba_mem_dev;
+	struct device *mba_mem_dev_fixed;
+	unsigned long attrs_dma;
+};
+
+extern struct pil_reset_ops pil_msa_mss_ops;
+extern struct pil_reset_ops pil_msa_mss_ops_selfauth;
+extern struct pil_reset_ops pil_msa_femto_mba_ops;
+
+int pil_mss_reset_load_mba(struct pil_desc *pil);
+int pil_mss_make_proxy_votes(struct pil_desc *pil);
+void pil_mss_remove_proxy_votes(struct pil_desc *pil);
+int pil_mss_shutdown(struct pil_desc *pil);
+int pil_mss_deinit_image(struct pil_desc *pil);
+int __pil_mss_deinit_image(struct pil_desc *pil, bool err_path);
+int pil_mss_assert_resets(struct q6v5_data *drv);
+int pil_mss_deassert_resets(struct q6v5_data *drv);
+int pil_mss_debug_reset(struct pil_desc *pil);
+#endif
diff --git a/drivers/soc/qcom/pil-q6v5-mss.c b/drivers/soc/qcom/pil-q6v5-mss.c
new file mode 100644
index 0000000..db48b1a
--- /dev/null
+++ b/drivers/soc/qcom/pil-q6v5-mss.c
@@ -0,0 +1,554 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/ramdump.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/soc/qcom/smem_state.h>
+
+#include "peripheral-loader.h"
+#include "pil-q6v5.h"
+#include "pil-msa.h"
+
+#define PROXY_TIMEOUT_MS	10000
+#define MAX_SSR_REASON_LEN	256U
+#define STOP_ACK_TIMEOUT_MS	1000
+
+#define subsys_to_drv(d) container_of(d, struct modem_data, subsys_desc)
+
+static void log_modem_sfr(struct modem_data *drv)
+{
+	size_t size;
+	char *smem_reason, reason[MAX_SSR_REASON_LEN];
+
+	if (drv->q6->smem_id == -1)
+		return;
+
+	smem_reason = qcom_smem_get(QCOM_SMEM_HOST_ANY, drv->q6->smem_id,
+								&size);
+	if (IS_ERR(smem_reason) || !size) {
+		pr_err("modem SFR: (unknown, qcom_smem_get failed).\n");
+		return;
+	}
+	if (!smem_reason[0]) {
+		pr_err("modem SFR: (unknown, empty string found).\n");
+		return;
+	}
+
+	strlcpy(reason, smem_reason, min(size, (size_t)MAX_SSR_REASON_LEN));
+	pr_err("modem subsystem failure reason: %s.\n", reason);
+}
+
+static void restart_modem(struct modem_data *drv)
+{
+	log_modem_sfr(drv);
+	drv->ignore_errors = true;
+	subsystem_restart_dev(drv->subsys);
+}
+
+static irqreturn_t modem_err_fatal_intr_handler(int irq, void *dev_id)
+{
+	struct modem_data *drv = subsys_to_drv(dev_id);
+
+	/* Ignore if we're the one that set the force stop BIT */
+	if (drv->crash_shutdown)
+		return IRQ_HANDLED;
+
+	pr_err("Fatal error on the modem.\n");
+	subsys_set_crash_status(drv->subsys, CRASH_STATUS_ERR_FATAL);
+	restart_modem(drv);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t modem_stop_ack_intr_handler(int irq, void *dev_id)
+{
+	struct modem_data *drv = subsys_to_drv(dev_id);
+
+	pr_info("Received stop ack interrupt from modem\n");
+	complete(&drv->stop_ack);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t modem_shutdown_ack_intr_handler(int irq, void *dev_id)
+{
+	struct modem_data *drv = subsys_to_drv(dev_id);
+
+	pr_info("Received stop shutdown interrupt from modem\n");
+	complete_shutdown_ack(drv->subsys);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t modem_ramdump_disable_intr_handler(int irq, void *dev_id)
+{
+	struct modem_data *drv = subsys_to_drv(dev_id);
+
+	pr_info("Received ramdump disable interrupt from modem\n");
+	drv->subsys_desc.ramdump_disable = 1;
+	return IRQ_HANDLED;
+}
+
+static int modem_shutdown(const struct subsys_desc *subsys, bool force_stop)
+{
+	struct modem_data *drv = subsys_to_drv(subsys);
+	unsigned long ret;
+
+	if (subsys->is_not_loadable)
+		return 0;
+
+	if (!subsys_get_crash_status(drv->subsys) && force_stop &&
+	    subsys->force_stop_bit) {
+		qcom_smem_state_update_bits(subsys->state,
+				BIT(subsys->force_stop_bit), 1);
+		ret = wait_for_completion_timeout(&drv->stop_ack,
+				msecs_to_jiffies(STOP_ACK_TIMEOUT_MS));
+		if (!ret)
+			pr_warn("Timed out on stop ack from modem.\n");
+		qcom_smem_state_update_bits(subsys->state,
+				BIT(subsys->force_stop_bit), 0);
+	}
+
+	if (drv->subsys_desc.ramdump_disable_irq) {
+		pr_warn("Ramdump disable value is %d\n",
+			drv->subsys_desc.ramdump_disable);
+	}
+
+	pil_shutdown(&drv->q6->desc);
+
+	return 0;
+}
+
+static int modem_powerup(const struct subsys_desc *subsys)
+{
+	struct modem_data *drv = subsys_to_drv(subsys);
+
+	if (subsys->is_not_loadable)
+		return 0;
+	/*
+	 * At this time, the modem is shutdown. Therefore this function cannot
+	 * run concurrently with the watchdog bite error handler, making it safe
+	 * to unset the flag below.
+	 */
+	reinit_completion(&drv->stop_ack);
+	drv->subsys_desc.ramdump_disable = 0;
+	drv->ignore_errors = false;
+	drv->q6->desc.fw_name = subsys->fw_name;
+	return pil_boot(&drv->q6->desc);
+}
+
+static void modem_crash_shutdown(const struct subsys_desc *subsys)
+{
+	struct modem_data *drv = subsys_to_drv(subsys);
+
+	drv->crash_shutdown = true;
+	if (!subsys_get_crash_status(drv->subsys) &&
+		subsys->force_stop_bit) {
+		qcom_smem_state_update_bits(subsys->state,
+				BIT(subsys->force_stop_bit), 1);
+		msleep(STOP_ACK_TIMEOUT_MS);
+	}
+}
+
+static int modem_ramdump(int enable, const struct subsys_desc *subsys)
+{
+	struct modem_data *drv = subsys_to_drv(subsys);
+	int ret;
+
+	if (!enable)
+		return 0;
+
+	ret = pil_mss_make_proxy_votes(&drv->q6->desc);
+	if (ret)
+		return ret;
+
+	ret = pil_mss_debug_reset(&drv->q6->desc);
+	if (ret)
+		return ret;
+
+	pil_mss_remove_proxy_votes(&drv->q6->desc);
+	ret = pil_mss_make_proxy_votes(&drv->q6->desc);
+	if (ret)
+		return ret;
+
+	ret = pil_mss_reset_load_mba(&drv->q6->desc);
+	if (ret)
+		return ret;
+
+	ret = pil_do_ramdump(&drv->q6->desc,
+			drv->ramdump_dev, drv->minidump_dev);
+	if (ret < 0)
+		pr_err("Unable to dump modem fw memory (rc = %d).\n", ret);
+
+	ret = __pil_mss_deinit_image(&drv->q6->desc, false);
+	if (ret < 0)
+		pr_err("Unable to free up resources (rc = %d).\n", ret);
+
+	pil_mss_remove_proxy_votes(&drv->q6->desc);
+	return ret;
+}
+
+static irqreturn_t modem_wdog_bite_intr_handler(int irq, void *dev_id)
+{
+	struct modem_data *drv = subsys_to_drv(dev_id);
+
+	if (drv->ignore_errors)
+		return IRQ_HANDLED;
+
+	pr_err("Watchdog bite received from modem software!\n");
+	if (drv->subsys_desc.system_debug)
+		panic("%s: System ramdump requested. Triggering device restart!\n",
+							__func__);
+	subsys_set_crash_status(drv->subsys, CRASH_STATUS_WDOG_BITE);
+	restart_modem(drv);
+	return IRQ_HANDLED;
+}
+
+static int pil_subsys_init(struct modem_data *drv,
+					struct platform_device *pdev)
+{
+	int ret = -EINVAL;
+
+	drv->subsys_desc.name = "modem";
+	drv->subsys_desc.dev = &pdev->dev;
+	drv->subsys_desc.owner = THIS_MODULE;
+	drv->subsys_desc.shutdown = modem_shutdown;
+	drv->subsys_desc.powerup = modem_powerup;
+	drv->subsys_desc.ramdump = modem_ramdump;
+	drv->subsys_desc.crash_shutdown = modem_crash_shutdown;
+	drv->subsys_desc.err_fatal_handler = modem_err_fatal_intr_handler;
+	drv->subsys_desc.stop_ack_handler = modem_stop_ack_intr_handler;
+	drv->subsys_desc.wdog_bite_handler = modem_wdog_bite_intr_handler;
+	drv->subsys_desc.ramdump_disable_handler =
+					modem_ramdump_disable_intr_handler;
+	drv->subsys_desc.shutdown_ack_handler = modem_shutdown_ack_intr_handler;
+
+	if (IS_ERR_OR_NULL(drv->q6)) {
+		ret = PTR_ERR(drv->q6);
+		dev_err(&pdev->dev, "Pil q6 data is err %pK %d!!!\n",
+			drv->q6, ret);
+		goto err_subsys;
+	}
+
+	drv->q6->desc.modem_ssr = false;
+	drv->q6->desc.signal_aop = of_property_read_bool(pdev->dev.of_node,
+						"qcom,signal-aop");
+	if (drv->q6->desc.signal_aop) {
+		drv->q6->desc.cl.dev = &pdev->dev;
+		drv->q6->desc.cl.tx_block = true;
+		drv->q6->desc.cl.tx_tout = 1000;
+		drv->q6->desc.cl.knows_txdone = false;
+		drv->q6->desc.mbox = mbox_request_channel(&drv->q6->desc.cl, 0);
+		if (IS_ERR(drv->q6->desc.mbox)) {
+			ret = PTR_ERR(drv->q6->desc.mbox);
+			dev_err(&pdev->dev, "Failed to get mailbox channel %pK %d\n",
+				drv->q6->desc.mbox, ret);
+			goto err_subsys;
+		}
+	}
+
+	drv->subsys = subsys_register(&drv->subsys_desc);
+	if (IS_ERR(drv->subsys)) {
+		ret = PTR_ERR(drv->subsys);
+		goto err_subsys;
+	}
+
+	drv->ramdump_dev = create_ramdump_device("modem", &pdev->dev);
+	if (!drv->ramdump_dev) {
+		pr_err("%s: Unable to create a modem ramdump device.\n",
+			__func__);
+		ret = -ENOMEM;
+		goto err_ramdump;
+	}
+	drv->minidump_dev = create_ramdump_device("md_modem", &pdev->dev);
+	if (!drv->minidump_dev) {
+		pr_err("%s: Unable to create a modem minidump device.\n",
+			__func__);
+		ret = -ENOMEM;
+		goto err_minidump;
+	}
+
+	return 0;
+
+err_minidump:
+	destroy_ramdump_device(drv->ramdump_dev);
+err_ramdump:
+	subsys_unregister(drv->subsys);
+err_subsys:
+	return ret;
+}
+
+static int pil_mss_loadable_init(struct modem_data *drv,
+					struct platform_device *pdev)
+{
+	struct q6v5_data *q6;
+	struct pil_desc *q6_desc;
+	struct resource *res;
+	struct property *prop;
+	int ret;
+
+	q6 = pil_q6v5_init(pdev);
+	if (IS_ERR_OR_NULL(q6))
+		return PTR_ERR(q6);
+	drv->q6 = q6;
+	drv->xo = q6->xo;
+
+	q6_desc = &q6->desc;
+	q6_desc->owner = THIS_MODULE;
+	q6_desc->proxy_timeout = PROXY_TIMEOUT_MS;
+
+	q6_desc->ops = &pil_msa_mss_ops;
+
+	q6_desc->sequential_loading = of_property_read_bool(pdev->dev.of_node,
+						"qcom,sequential-fw-load");
+	q6->reset_clk = of_property_read_bool(pdev->dev.of_node,
+							"qcom,reset-clk");
+	q6->self_auth = of_property_read_bool(pdev->dev.of_node,
+							"qcom,pil-self-auth");
+	if (q6->self_auth) {
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						    "rmb_base");
+		q6->rmb_base = devm_ioremap_resource(&pdev->dev, res);
+		if (IS_ERR(q6->rmb_base))
+			return PTR_ERR(q6->rmb_base);
+		drv->rmb_base = q6->rmb_base;
+		q6_desc->ops = &pil_msa_mss_ops_selfauth;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "restart_reg");
+	if (!res) {
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"restart_reg_sec");
+		if (!res) {
+			dev_err(&pdev->dev, "No restart register defined\n");
+			return -ENOMEM;
+		}
+		q6->restart_reg_sec = true;
+	}
+
+	q6->restart_reg = devm_ioremap(&pdev->dev,
+						res->start, resource_size(res));
+	if (!q6->restart_reg)
+		return -ENOMEM;
+
+	q6->pdc_sync = NULL;
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pdc_sync");
+	if (res) {
+		q6->pdc_sync = devm_ioremap(&pdev->dev,
+						res->start, resource_size(res));
+		if (of_property_read_u32(pdev->dev.of_node,
+			"qcom,mss_pdc_offset", &q6->mss_pdc_offset)) {
+			dev_err(&pdev->dev,
+				"Offset for MSS PDC not specified\n");
+			return -EINVAL;
+		}
+
+	}
+
+	q6->alt_reset = NULL;
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "alt_reset");
+	if (res) {
+		q6->alt_reset = devm_ioremap(&pdev->dev,
+						res->start, resource_size(res));
+	}
+
+	q6->vreg = NULL;
+
+	prop = of_find_property(pdev->dev.of_node, "vdd_mss-supply", NULL);
+	if (prop) {
+		q6->vreg = devm_regulator_get(&pdev->dev, "vdd_mss");
+		if (IS_ERR(q6->vreg))
+			return PTR_ERR(q6->vreg);
+	}
+
+	q6->vreg_mx = devm_regulator_get(&pdev->dev, "vdd_mx");
+	if (IS_ERR(q6->vreg_mx))
+		return PTR_ERR(q6->vreg_mx);
+	prop = of_find_property(pdev->dev.of_node, "vdd_mx-uV", NULL);
+	if (!prop) {
+		dev_err(&pdev->dev, "Missing vdd_mx-uV property\n");
+		return -EINVAL;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+		"cxrail_bhs_reg");
+	if (res)
+		q6->cxrail_bhs = devm_ioremap(&pdev->dev, res->start,
+					  resource_size(res));
+
+	q6->ahb_clk = devm_clk_get(&pdev->dev, "iface_clk");
+	if (IS_ERR(q6->ahb_clk))
+		return PTR_ERR(q6->ahb_clk);
+
+	q6->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
+	if (IS_ERR(q6->axi_clk))
+		return PTR_ERR(q6->axi_clk);
+
+	q6->rom_clk = devm_clk_get(&pdev->dev, "mem_clk");
+	if (IS_ERR(q6->rom_clk))
+		return PTR_ERR(q6->rom_clk);
+
+	ret = of_property_read_u32(pdev->dev.of_node,
+					"qcom,pas-id", &drv->pas_id);
+	if (ret)
+		dev_info(&pdev->dev, "No pas_id found.\n");
+
+	drv->subsys_desc.pil_mss_memsetup =
+	of_property_read_bool(pdev->dev.of_node, "qcom,pil-mss-memsetup");
+
+	/* Optional. */
+	if (of_property_match_string(pdev->dev.of_node,
+			"qcom,active-clock-names", "gpll0_mss_clk") >= 0)
+		q6->gpll0_mss_clk = devm_clk_get(&pdev->dev, "gpll0_mss_clk");
+
+	if (of_property_match_string(pdev->dev.of_node,
+			"qcom,active-clock-names", "snoc_axi_clk") >= 0)
+		q6->snoc_axi_clk = devm_clk_get(&pdev->dev, "snoc_axi_clk");
+
+	if (of_property_match_string(pdev->dev.of_node,
+			"qcom,active-clock-names", "mnoc_axi_clk") >= 0)
+		q6->mnoc_axi_clk = devm_clk_get(&pdev->dev, "mnoc_axi_clk");
+
+	/* Defaulting smem_id to be not present */
+	q6->smem_id = -1;
+
+	if (of_find_property(pdev->dev.of_node, "qcom,smem-id", NULL)) {
+		ret = of_property_read_u32(pdev->dev.of_node, "qcom,smem-id",
+					   &q6->smem_id);
+		if (ret) {
+			dev_err(&pdev->dev, "Failed to get the smem_id(ret:%d)\n",
+				ret);
+			return ret;
+		}
+	}
+
+	ret = pil_desc_init(q6_desc);
+
+	return ret;
+}
+
+static int pil_mss_driver_probe(struct platform_device *pdev)
+{
+	struct modem_data *drv;
+	int ret, is_not_loadable;
+
+	drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
+	if (!drv)
+		return -ENOMEM;
+	platform_set_drvdata(pdev, drv);
+
+	is_not_loadable = of_property_read_bool(pdev->dev.of_node,
+							"qcom,is-not-loadable");
+	if (is_not_loadable) {
+		drv->subsys_desc.is_not_loadable = 1;
+	} else {
+		ret = pil_mss_loadable_init(drv, pdev);
+		if (ret)
+			return ret;
+	}
+	init_completion(&drv->stop_ack);
+
+	/* Probe the MBA mem device if present */
+	ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+	if (ret)
+		return ret;
+
+	return pil_subsys_init(drv, pdev);
+}
+
+static int pil_mss_driver_exit(struct platform_device *pdev)
+{
+	struct modem_data *drv = platform_get_drvdata(pdev);
+
+	subsys_unregister(drv->subsys);
+	destroy_ramdump_device(drv->ramdump_dev);
+	destroy_ramdump_device(drv->minidump_dev);
+	pil_desc_release(&drv->q6->desc);
+	return 0;
+}
+
+static int pil_mba_mem_driver_probe(struct platform_device *pdev)
+{
+	struct modem_data *drv;
+
+	if (!pdev->dev.parent) {
+		pr_err("No parent found.\n");
+		return -EINVAL;
+	}
+	drv = dev_get_drvdata(pdev->dev.parent);
+	drv->mba_mem_dev_fixed = &pdev->dev;
+	return 0;
+}
+
+static const struct of_device_id mba_mem_match_table[] = {
+	{ .compatible = "qcom,pil-mba-mem" },
+	{}
+};
+
+static struct platform_driver pil_mba_mem_driver = {
+	.probe = pil_mba_mem_driver_probe,
+	.driver = {
+		.name = "pil-mba-mem",
+		.of_match_table = mba_mem_match_table,
+	},
+};
+
+static const struct of_device_id mss_match_table[] = {
+	{ .compatible = "qcom,pil-q6v5-mss" },
+	{ .compatible = "qcom,pil-q6v55-mss" },
+	{ .compatible = "qcom,pil-q6v56-mss" },
+	{}
+};
+
+static struct platform_driver pil_mss_driver = {
+	.probe = pil_mss_driver_probe,
+	.remove = pil_mss_driver_exit,
+	.driver = {
+		.name = "pil-q6v5-mss",
+		.of_match_table = mss_match_table,
+	},
+};
+
+static int __init pil_mss_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&pil_mba_mem_driver);
+	if (!ret)
+		ret = platform_driver_register(&pil_mss_driver);
+	return ret;
+}
+module_init(pil_mss_init);
+
+static void __exit pil_mss_exit(void)
+{
+	platform_driver_unregister(&pil_mss_driver);
+}
+module_exit(pil_mss_exit);
+
+MODULE_DESCRIPTION("Support for booting modem subsystems with QDSP6v5 Hexagon processors");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/pil-q6v5.c b/drivers/soc/qcom/pil-q6v5.c
new file mode 100644
index 0000000..208e327
--- /dev/null
+++ b/drivers/soc/qcom/pil-q6v5.c
@@ -0,0 +1,848 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+#include <trace/events/trace_msm_pil_event.h>
+
+#include "peripheral-loader.h"
+#include "pil-msa.h"
+#include "pil-q6v5.h"
+
+/* QDSP6SS Register Offsets */
+#define QDSP6SS_RESET			0x014
+#define QDSP6SS_GFMUX_CTL		0x020
+#define QDSP6SS_PWR_CTL			0x030
+#define QDSP6V6SS_MEM_PWR_CTL		0x034
+#define QDSP6SS_BHS_STATUS		0x078
+#define QDSP6SS_MEM_PWR_CTL		0x0B0
+#define QDSP6SS_STRAP_ACC		0x110
+#define QDSP6V62SS_BHS_STATUS		0x0C4
+
+/* AXI Halt Register Offsets */
+#define AXI_HALTREQ			0x0
+#define AXI_HALTACK			0x4
+#define AXI_IDLE			0x8
+
+#define HALT_ACK_TIMEOUT_US		100000
+
+/* QDSP6SS_RESET */
+#define Q6SS_STOP_CORE			BIT(0)
+#define Q6SS_CORE_ARES			BIT(1)
+#define Q6SS_BUS_ARES_ENA		BIT(2)
+
+/* QDSP6SS_GFMUX_CTL */
+#define Q6SS_CLK_ENA			BIT(1)
+#define Q6SS_CLK_SRC_SEL_C		BIT(3)
+#define Q6SS_CLK_SRC_SEL_FIELD		0xC
+#define Q6SS_CLK_SRC_SWITCH_CLK_OVR	BIT(8)
+
+/* QDSP6SS_PWR_CTL */
+#define Q6SS_L2DATA_SLP_NRET_N_0	BIT(0)
+#define Q6SS_L2DATA_SLP_NRET_N_1	BIT(1)
+#define Q6SS_L2DATA_SLP_NRET_N_2	BIT(2)
+#define Q6SS_L2TAG_SLP_NRET_N		BIT(16)
+#define Q6SS_ETB_SLP_NRET_N		BIT(17)
+#define Q6SS_L2DATA_STBY_N		BIT(18)
+#define Q6SS_SLP_RET_N			BIT(19)
+#define Q6SS_CLAMP_IO			BIT(20)
+#define QDSS_BHS_ON			BIT(21)
+#define QDSS_LDO_BYP			BIT(22)
+
+/* QDSP6v55 parameters */
+#define QDSP6v55_LDO_ON                 BIT(26)
+#define QDSP6v55_LDO_BYP                BIT(25)
+#define QDSP6v55_BHS_ON                 BIT(24)
+#define QDSP6v55_CLAMP_WL               BIT(21)
+#define QDSP6v55_CLAMP_QMC_MEM          BIT(22)
+#define L1IU_SLP_NRET_N                 BIT(15)
+#define L1DU_SLP_NRET_N                 BIT(14)
+#define L2PLRU_SLP_NRET_N               BIT(13)
+#define QDSP6v55_BHS_EN_REST_ACK        BIT(0)
+
+#define HALT_CHECK_MAX_LOOPS            (200)
+#define BHS_CHECK_MAX_LOOPS             (200)
+#define QDSP6SS_XO_CBCR                 (0x0038)
+
+/* QDSP6v65 parameters */
+#define QDSP6SS_BOOT_CORE_START		(0x400)
+#define QDSP6SS_BOOT_CMD		(0x404)
+#define MSS_STATUS			(0x40)
+#define QDSP6SS_SLEEP			(0x3C)
+#define SLEEP_CHECK_MAX_LOOPS		(200)
+#define BOOT_FSM_TIMEOUT		(10000)
+
+#define QDSP6SS_ACC_OVERRIDE_VAL	0x20
+
+int pil_q6v5_make_proxy_votes(struct pil_desc *pil)
+{
+	int ret;
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	int uv;
+
+	ret = of_property_read_u32(pil->dev->of_node, "vdd_cx-voltage", &uv);
+	if (ret) {
+		dev_err(pil->dev, "missing vdd_cx-voltage property(rc:%d)\n",
+								ret);
+		return ret;
+	}
+
+	ret = clk_prepare_enable(drv->xo);
+	if (ret) {
+		dev_err(pil->dev, "Failed to vote for XO(rc:%d)\n", ret);
+		goto out;
+	}
+
+	ret = clk_prepare_enable(drv->pnoc_clk);
+	if (ret) {
+		dev_err(pil->dev, "Failed to vote for pnoc(rc:%d)\n", ret);
+		goto err_pnoc_vote;
+	}
+
+	ret = clk_prepare_enable(drv->qdss_clk);
+	if (ret) {
+		dev_err(pil->dev, "Failed to vote for qdss(rc:%d)\n", ret);
+		goto err_qdss_vote;
+	}
+
+	ret = clk_prepare_enable(drv->prng_clk);
+	if (ret) {
+		dev_err(pil->dev, "Failed to vote for prng(rc:%d)\n", ret);
+		goto err_prng_vote;
+	}
+
+	ret = clk_prepare_enable(drv->axis2_clk);
+	if (ret) {
+		dev_err(pil->dev, "Failed to vote for axis2(rc:%d)\n", ret);
+		goto err_axis2_vote;
+	}
+
+	ret = regulator_set_voltage(drv->vreg_cx, uv, INT_MAX);
+	if (ret) {
+		dev_err(pil->dev, "Failed to request vdd_cx voltage(rc:%d)\n",
+								ret);
+		goto err_cx_voltage;
+	}
+
+	ret = regulator_set_load(drv->vreg_cx, 100000);
+	if (ret < 0) {
+		dev_err(pil->dev, "Failed to set vdd_cx mode(rc:%d)\n", ret);
+		goto err_cx_mode;
+	}
+
+	ret = regulator_enable(drv->vreg_cx);
+	if (ret) {
+		dev_err(pil->dev, "Failed to vote for vdd_cx(rc:%d)\n", ret);
+		goto err_cx_enable;
+	}
+
+	if (drv->vreg_pll) {
+		ret = regulator_enable(drv->vreg_pll);
+		if (ret) {
+			dev_err(pil->dev, "Failed to vote for vdd_pll(rc:%d)\n",
+									ret);
+			goto err_vreg_pll;
+		}
+	}
+
+	return 0;
+
+err_vreg_pll:
+	regulator_disable(drv->vreg_cx);
+err_cx_enable:
+	regulator_set_load(drv->vreg_cx, 0);
+err_cx_mode:
+	regulator_set_voltage(drv->vreg_cx, 0, INT_MAX);
+err_cx_voltage:
+	clk_disable_unprepare(drv->axis2_clk);
+err_axis2_vote:
+	clk_disable_unprepare(drv->prng_clk);
+err_prng_vote:
+	clk_disable_unprepare(drv->qdss_clk);
+err_qdss_vote:
+	clk_disable_unprepare(drv->pnoc_clk);
+err_pnoc_vote:
+	clk_disable_unprepare(drv->xo);
+out:
+	return ret;
+}
+EXPORT_SYMBOL(pil_q6v5_make_proxy_votes);
+
+void pil_q6v5_remove_proxy_votes(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	int uv, ret = 0;
+
+	ret = of_property_read_u32(pil->dev->of_node, "vdd_cx-voltage", &uv);
+	if (ret) {
+		dev_err(pil->dev, "missing vdd_cx-voltage property(rc:%d)\n",
+									ret);
+		return;
+	}
+
+	if (drv->vreg_pll) {
+		regulator_disable(drv->vreg_pll);
+		regulator_set_load(drv->vreg_pll, 0);
+	}
+	regulator_disable(drv->vreg_cx);
+	regulator_set_load(drv->vreg_cx, 0);
+	regulator_set_voltage(drv->vreg_cx, 0, INT_MAX);
+	clk_disable_unprepare(drv->xo);
+	clk_disable_unprepare(drv->pnoc_clk);
+	clk_disable_unprepare(drv->qdss_clk);
+	clk_disable_unprepare(drv->prng_clk);
+	clk_disable_unprepare(drv->axis2_clk);
+}
+EXPORT_SYMBOL(pil_q6v5_remove_proxy_votes);
+
+void pil_q6v5_halt_axi_port(struct pil_desc *pil, void __iomem *halt_base)
+{
+	int ret;
+	u32 status;
+
+	/* Assert halt request */
+	writel_relaxed(1, halt_base + AXI_HALTREQ);
+
+	/* Wait for halt */
+	ret = readl_poll_timeout(halt_base + AXI_HALTACK,
+		status, status != 0, 50, HALT_ACK_TIMEOUT_US);
+	if (ret)
+		dev_warn(pil->dev, "Port %pK halt timeout\n", halt_base);
+	else if (!readl_relaxed(halt_base + AXI_IDLE))
+		dev_warn(pil->dev, "Port %pK halt failed\n", halt_base);
+
+	/* Clear halt request (port will remain halted until reset) */
+	writel_relaxed(0, halt_base + AXI_HALTREQ);
+}
+EXPORT_SYMBOL(pil_q6v5_halt_axi_port);
+
+void assert_clamps(struct pil_desc *pil)
+{
+	u32 val;
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+
+	/*
+	 * Assert QDSP6 I/O clamp, memory wordline clamp, and compiler memory
+	 * clamp as a software workaround to avoid high MX current during
+	 * LPASS/MSS restart.
+	 */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+	val |= (Q6SS_CLAMP_IO | QDSP6v55_CLAMP_WL |
+			QDSP6v55_CLAMP_QMC_MEM);
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+	/* To make sure asserting clamps is done before MSS restart*/
+	mb();
+}
+
+static void __pil_q6v5_shutdown(struct pil_desc *pil)
+{
+	u32 val;
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+
+	/* Turn off core clock */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_GFMUX_CTL);
+	val &= ~Q6SS_CLK_ENA;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_GFMUX_CTL);
+
+	/* Clamp IO */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+	val |= Q6SS_CLAMP_IO;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+	/* Turn off Q6 memories */
+	val &= ~(Q6SS_L2DATA_SLP_NRET_N_0 | Q6SS_L2DATA_SLP_NRET_N_1 |
+		 Q6SS_L2DATA_SLP_NRET_N_2 | Q6SS_SLP_RET_N |
+		 Q6SS_L2TAG_SLP_NRET_N | Q6SS_ETB_SLP_NRET_N |
+		 Q6SS_L2DATA_STBY_N);
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+	/* Assert Q6 resets */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+	val |= (Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENA);
+	writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+	/* Kill power at block headswitch */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+	val &= ~QDSS_BHS_ON;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+}
+
+void pil_q6v5_shutdown(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+
+	if (drv->qdsp6v55) {
+		/* Subsystem driver expected to halt bus and assert reset */
+		return;
+	}
+	__pil_q6v5_shutdown(pil);
+}
+EXPORT_SYMBOL(pil_q6v5_shutdown);
+
+static int __pil_q6v5_reset(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	u32 val;
+
+	/* Assert resets, stop core */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+	val |= (Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENA | Q6SS_STOP_CORE);
+	writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+	/* Enable power block headswitch, and wait for it to stabilize */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+	val |= QDSS_BHS_ON | QDSS_LDO_BYP;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+	/* Ensure physical memory access is done*/
+	mb();
+	udelay(1);
+
+	/*
+	 * Turn on memories. L2 banks should be done individually
+	 * to minimize inrush current.
+	 */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+	val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
+	       Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+	val |= Q6SS_L2DATA_SLP_NRET_N_2;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+	val |= Q6SS_L2DATA_SLP_NRET_N_1;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+	val |= Q6SS_L2DATA_SLP_NRET_N_0;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+	/* Remove IO clamp */
+	val &= ~Q6SS_CLAMP_IO;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+	/* Bring core out of reset */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+	val &= ~Q6SS_CORE_ARES;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+	/* Turn on core clock */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_GFMUX_CTL);
+	val |= Q6SS_CLK_ENA;
+
+	/* Need a different clock source for v5.2.0 */
+	if (drv->qdsp6v5_2_0) {
+		val &= ~Q6SS_CLK_SRC_SEL_FIELD;
+		val |= Q6SS_CLK_SRC_SEL_C;
+	}
+
+	/* force clock on during source switch */
+	if (drv->qdsp6v56)
+		val |= Q6SS_CLK_SRC_SWITCH_CLK_OVR;
+
+	writel_relaxed(val, drv->reg_base + QDSP6SS_GFMUX_CTL);
+
+	/* Start core execution */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+	val &= ~Q6SS_STOP_CORE;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+	return 0;
+}
+
+static int q6v55_branch_clk_enable(struct q6v5_data *drv)
+{
+	u32 val, count;
+	void __iomem *cbcr_reg = drv->reg_base + QDSP6SS_XO_CBCR;
+
+	val = readl_relaxed(cbcr_reg);
+	val |= 0x1;
+	writel_relaxed(val, cbcr_reg);
+
+	for (count = HALT_CHECK_MAX_LOOPS; count > 0; count--) {
+		val = readl_relaxed(cbcr_reg);
+		if (!(val & BIT(31)))
+			return 0;
+		udelay(1);
+	}
+
+	dev_err(drv->desc.dev, "Failed to enable xo branch clock.\n");
+	return -EINVAL;
+}
+
+static int __pil_q6v65_reset(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	u32 val, count;
+	int ret;
+
+	val = readl_relaxed(drv->reg_base + QDSP6SS_SLEEP);
+	val |= 0x1;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_SLEEP);
+	for (count = SLEEP_CHECK_MAX_LOOPS; count > 0; count--) {
+		val = readl_relaxed(drv->reg_base + QDSP6SS_SLEEP);
+		if (!(val & BIT(31)))
+			break;
+		udelay(1);
+	}
+
+	if (!count) {
+		dev_err(drv->desc.dev, "Sleep clock did not come on in time\n");
+		return -ETIMEDOUT;
+	}
+
+	/* De-assert QDSP6 stop core */
+	writel_relaxed(1, drv->reg_base + QDSP6SS_BOOT_CORE_START);
+	/* De-assert stop core before starting boot FSM */
+	mb();
+	/* Trigger boot FSM */
+	writel_relaxed(1, drv->reg_base + QDSP6SS_BOOT_CMD);
+
+	/* Wait for boot FSM to complete */
+	ret = readl_poll_timeout(drv->rmb_base + MSS_STATUS, val,
+			(val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
+
+	if (ret) {
+		dev_err(drv->desc.dev, "Boot FSM failed to complete.\n");
+		/* Reset the modem so that boot FSM is in reset state */
+		pil_mss_assert_resets(drv);
+		/* Wait 6 32kHz sleep cycles for reset */
+		udelay(200);
+		pil_mss_deassert_resets(drv);
+	}
+
+	return ret;
+}
+
+static int __pil_q6v55_reset(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	u32 val;
+	int i;
+
+	trace_pil_func(__func__);
+	/* Override the ACC value if required */
+	if (drv->override_acc)
+		writel_relaxed(QDSP6SS_ACC_OVERRIDE_VAL,
+				drv->reg_base + QDSP6SS_STRAP_ACC);
+
+	/* Override the ACC value with input value */
+	if (!of_property_read_u32(pil->dev->of_node, "qcom,override-acc-1",
+				&drv->override_acc_1))
+		writel_relaxed(drv->override_acc_1,
+				drv->reg_base + QDSP6SS_STRAP_ACC);
+
+	/* Assert resets, stop core */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+	val |= (Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENA | Q6SS_STOP_CORE);
+	writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+	/* BHS require xo cbcr to be enabled */
+	i = q6v55_branch_clk_enable(drv);
+	if (i)
+		return i;
+
+	/* Enable power block headswitch, and wait for it to stabilize */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+	val |= QDSP6v55_BHS_ON;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+	/* Ensure physical memory access is done*/
+	mb();
+	udelay(1);
+
+	if (drv->qdsp6v62_1_2 || drv->qdsp6v62_1_5 || drv->qdsp6v62_1_4) {
+		for (i = BHS_CHECK_MAX_LOOPS; i > 0; i--) {
+			if (readl_relaxed(drv->reg_base + QDSP6V62SS_BHS_STATUS)
+			    & QDSP6v55_BHS_EN_REST_ACK)
+				break;
+			udelay(1);
+		}
+		if (!i) {
+			pr_err("%s: BHS_EN_REST_ACK not set!\n", __func__);
+			return -ETIMEDOUT;
+		}
+	}
+
+	if (drv->qdsp6v61_1_1) {
+		for (i = BHS_CHECK_MAX_LOOPS; i > 0; i--) {
+			if (readl_relaxed(drv->reg_base + QDSP6SS_BHS_STATUS)
+			    & QDSP6v55_BHS_EN_REST_ACK)
+				break;
+			udelay(1);
+		}
+		if (!i) {
+			pr_err("%s: BHS_EN_REST_ACK not set!\n", __func__);
+			return -ETIMEDOUT;
+		}
+	}
+
+	/* Put LDO in bypass mode */
+	val |= QDSP6v55_LDO_BYP;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+	if (drv->qdsp6v56_1_3) {
+		/* Deassert memory peripheral sleep and L2 memory standby */
+		val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+		val |= (Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N);
+		writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+		/* Turn on L1, L2 and ETB memories 1 at a time */
+		for (i = 17; i >= 0; i--) {
+			val |= BIT(i);
+			writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+			udelay(1);
+		}
+	} else if (drv->qdsp6v56_1_5 || drv->qdsp6v56_1_8
+					|| drv->qdsp6v56_1_10) {
+		/* Deassert QDSP6 compiler memory clamp */
+		val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+		val &= ~QDSP6v55_CLAMP_QMC_MEM;
+		writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+		/* Deassert memory peripheral sleep and L2 memory standby */
+		val |= (Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N);
+		writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+		/* Turn on L1, L2, ETB and JU memories 1 at a time */
+		val = readl_relaxed(drv->reg_base + QDSP6SS_MEM_PWR_CTL);
+		for (i = 19; i >= 0; i--) {
+			val |= BIT(i);
+			writel_relaxed(val, drv->reg_base +
+						QDSP6SS_MEM_PWR_CTL);
+			val |= readl_relaxed(drv->reg_base +
+						QDSP6SS_MEM_PWR_CTL);
+			/*
+			 * Wait for 1us for both memory peripheral and
+			 * data array to turn on.
+			 */
+			udelay(1);
+		}
+	} else if (drv->qdsp6v56_1_8_inrush_current) {
+		/* Deassert QDSP6 compiler memory clamp */
+		val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+		val &= ~QDSP6v55_CLAMP_QMC_MEM;
+		writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+		/* Deassert memory peripheral sleep and L2 memory standby */
+		val |= (Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N);
+		writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+		/* Turn on L1, L2, ETB and JU memories 1 at a time */
+		val = readl_relaxed(drv->reg_base + QDSP6SS_MEM_PWR_CTL);
+		for (i = 19; i >= 6; i--) {
+			val |= BIT(i);
+			writel_relaxed(val, drv->reg_base +
+						QDSP6SS_MEM_PWR_CTL);
+			/*
+			 * Wait for 1us for both memory peripheral and
+			 * data array to turn on.
+			 */
+			udelay(1);
+		}
+
+		for (i = 0 ; i <= 5 ; i++) {
+			val |= BIT(i);
+			writel_relaxed(val, drv->reg_base +
+						QDSP6SS_MEM_PWR_CTL);
+			/*
+			 * Wait for 1us for both memory peripheral and
+			 * data array to turn on.
+			 */
+			udelay(1);
+		}
+	} else if (drv->qdsp6v61_1_1 || drv->qdsp6v62_1_2 ||
+			drv->qdsp6v62_1_4 || drv->qdsp6v62_1_5) {
+		/* Deassert QDSP6 compiler memory clamp */
+		val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+		val &= ~QDSP6v55_CLAMP_QMC_MEM;
+		writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+		/* Deassert memory peripheral sleep and L2 memory standby */
+		val |= (Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N);
+		writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+		/* Turn on L1, L2, ETB and JU memories 1 at a time */
+		val = readl_relaxed(drv->reg_base +
+				QDSP6V6SS_MEM_PWR_CTL);
+
+		if (drv->qdsp6v62_1_4 || drv->qdsp6v62_1_5)
+			i = 29;
+		else
+			i = 28;
+
+		for ( ; i >= 0; i--) {
+			val |= BIT(i);
+			writel_relaxed(val, drv->reg_base +
+					QDSP6V6SS_MEM_PWR_CTL);
+			/*
+			 * Wait for 1us for both memory peripheral and
+			 * data array to turn on.
+			 */
+			udelay(1);
+		}
+	} else {
+		/* Turn on memories. */
+		val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+		val |= 0xFFF00;
+		writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+		/* Turn on L2 banks 1 at a time */
+		for (i = 0; i <= 7; i++) {
+			val |= BIT(i);
+			writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+		}
+	}
+
+	/* Remove word line clamp */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+	val &= ~QDSP6v55_CLAMP_WL;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+	/* Remove IO clamp */
+	val &= ~Q6SS_CLAMP_IO;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+	/* Bring core out of reset */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+	val &= ~(Q6SS_CORE_ARES | Q6SS_STOP_CORE);
+	writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+	/* Turn on core clock */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_GFMUX_CTL);
+	val |= Q6SS_CLK_ENA;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_GFMUX_CTL);
+
+	return 0;
+}
+
+int pil_q6v5_reset(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+
+
+	if (drv->qdsp6v65_1_0)
+		return __pil_q6v65_reset(pil);
+	else if (drv->qdsp6v55)
+		return __pil_q6v55_reset(pil);
+	else
+		return __pil_q6v5_reset(pil);
+}
+EXPORT_SYMBOL(pil_q6v5_reset);
+
+struct q6v5_data *pil_q6v5_init(struct platform_device *pdev)
+{
+	struct q6v5_data *drv;
+	struct resource *res;
+	struct pil_desc *desc;
+	struct property *prop;
+	int ret, vdd_pll;
+
+	drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
+	if (!drv)
+		return ERR_PTR(-ENOMEM);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6_base");
+	drv->reg_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(drv->reg_base))
+		return drv->reg_base;
+
+	desc = &drv->desc;
+	ret = of_property_read_string(pdev->dev.of_node, "qcom,firmware-name",
+				      &desc->name);
+	if (ret)
+		return ERR_PTR(ret);
+
+	desc->clear_fw_region = false;
+	desc->dev = &pdev->dev;
+
+	drv->qdsp6v5_2_0 = of_device_is_compatible(pdev->dev.of_node,
+						   "qcom,pil-femto-modem");
+
+	if (drv->qdsp6v5_2_0)
+		return drv;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "halt_base");
+	if (res) {
+		drv->axi_halt_base = devm_ioremap(&pdev->dev, res->start,
+							resource_size(res));
+		if (!drv->axi_halt_base) {
+			dev_err(&pdev->dev, "Failed to map axi_halt_base.\n");
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+
+	if (!drv->axi_halt_base) {
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+								"halt_q6");
+		if (res) {
+			drv->axi_halt_q6 = devm_ioremap(&pdev->dev,
+					res->start, resource_size(res));
+			if (!drv->axi_halt_q6) {
+				dev_err(&pdev->dev, "Failed to map axi_halt_q6.\n");
+				return ERR_PTR(-ENOMEM);
+			}
+		}
+
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+								"halt_modem");
+		if (res) {
+			drv->axi_halt_mss = devm_ioremap(&pdev->dev,
+					res->start, resource_size(res));
+			if (!drv->axi_halt_mss) {
+				dev_err(&pdev->dev, "Failed to map axi_halt_mss.\n");
+				return ERR_PTR(-ENOMEM);
+			}
+		}
+
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+								"halt_nc");
+		if (res) {
+			drv->axi_halt_nc = devm_ioremap(&pdev->dev,
+					res->start, resource_size(res));
+			if (!drv->axi_halt_nc) {
+				dev_err(&pdev->dev, "Failed to map axi_halt_nc.\n");
+				return ERR_PTR(-ENOMEM);
+			}
+		}
+	}
+
+	if (!(drv->axi_halt_base || (drv->axi_halt_q6 && drv->axi_halt_mss
+					&& drv->axi_halt_nc))) {
+		dev_err(&pdev->dev, "halt bases for Q6 are not defined.\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	drv->qdsp6v55 = of_device_is_compatible(pdev->dev.of_node,
+						"qcom,pil-q6v55-mss");
+	drv->qdsp6v56 = of_device_is_compatible(pdev->dev.of_node,
+						"qcom,pil-q6v56-mss");
+
+	drv->qdsp6v56_1_3 = of_property_read_bool(pdev->dev.of_node,
+						"qcom,qdsp6v56-1-3");
+	drv->qdsp6v56_1_5 = of_property_read_bool(pdev->dev.of_node,
+						"qcom,qdsp6v56-1-5");
+
+	drv->qdsp6v56_1_8 = of_property_read_bool(pdev->dev.of_node,
+						"qcom,qdsp6v56-1-8");
+	drv->qdsp6v56_1_10 = of_property_read_bool(pdev->dev.of_node,
+						"qcom,qdsp6v56-1-10");
+
+	drv->qdsp6v56_1_8_inrush_current = of_property_read_bool(
+						pdev->dev.of_node,
+						"qcom,qdsp6v56-1-8-inrush-current");
+
+	drv->qdsp6v61_1_1 = of_property_read_bool(pdev->dev.of_node,
+						"qcom,qdsp6v61-1-1");
+
+	drv->qdsp6v62_1_2 = of_property_read_bool(pdev->dev.of_node,
+						"qcom,qdsp6v62-1-2");
+
+	drv->qdsp6v62_1_4 = of_property_read_bool(pdev->dev.of_node,
+						"qcom,qdsp6v62-1-4");
+
+	drv->qdsp6v62_1_5 = of_property_read_bool(pdev->dev.of_node,
+						"qcom,qdsp6v62-1-5");
+
+	drv->qdsp6v65_1_0 = of_property_read_bool(pdev->dev.of_node,
+						"qcom,qdsp6v65-1-0");
+
+	drv->non_elf_image = of_property_read_bool(pdev->dev.of_node,
+						"qcom,mba-image-is-not-elf");
+
+	drv->override_acc = of_property_read_bool(pdev->dev.of_node,
+						"qcom,override-acc");
+
+	drv->ahb_clk_vote = of_property_read_bool(pdev->dev.of_node,
+						"qcom,ahb-clk-vote");
+	drv->mx_spike_wa = of_property_read_bool(pdev->dev.of_node,
+						"qcom,mx-spike-wa");
+
+	drv->xo = devm_clk_get(&pdev->dev, "xo");
+	if (IS_ERR(drv->xo))
+		return ERR_CAST(drv->xo);
+
+	if (of_property_read_bool(pdev->dev.of_node, "qcom,pnoc-clk-vote")) {
+		drv->pnoc_clk = devm_clk_get(&pdev->dev, "pnoc_clk");
+		if (IS_ERR(drv->pnoc_clk))
+			return ERR_CAST(drv->pnoc_clk);
+	} else {
+		drv->pnoc_clk = NULL;
+	}
+
+	if (of_property_match_string(pdev->dev.of_node,
+			"qcom,proxy-clock-names", "qdss_clk") >= 0) {
+		drv->qdss_clk = devm_clk_get(&pdev->dev, "qdss_clk");
+		if (IS_ERR(drv->qdss_clk))
+			return ERR_CAST(drv->qdss_clk);
+	} else {
+		drv->qdss_clk = NULL;
+	}
+
+	if (of_property_match_string(pdev->dev.of_node,
+			"qcom,proxy-clock-names", "prng_clk") >= 0) {
+		drv->prng_clk = devm_clk_get(&pdev->dev, "prng_clk");
+		if (IS_ERR(drv->prng_clk))
+			return ERR_CAST(drv->prng_clk);
+	} else {
+		drv->prng_clk = NULL;
+	}
+
+	if (of_property_match_string(pdev->dev.of_node,
+			"qcom,proxy-clock-names", "axis2_clk") >= 0) {
+		drv->axis2_clk = devm_clk_get(&pdev->dev, "axis2_clk");
+		if (IS_ERR(drv->axis2_clk))
+			return ERR_CAST(drv->axis2_clk);
+	} else {
+		drv->axis2_clk = NULL;
+	}
+
+	drv->vreg_cx = devm_regulator_get(&pdev->dev, "vdd_cx");
+	if (IS_ERR(drv->vreg_cx))
+		return ERR_CAST(drv->vreg_cx);
+	prop = of_find_property(pdev->dev.of_node, "vdd_cx-voltage", NULL);
+	if (!prop) {
+		dev_err(&pdev->dev, "Missing vdd_cx-voltage property\n");
+		return ERR_CAST(prop);
+	}
+
+	ret = of_property_read_u32(pdev->dev.of_node, "qcom,vdd_pll",
+		&vdd_pll);
+	if (!ret) {
+		drv->vreg_pll = devm_regulator_get(&pdev->dev, "vdd_pll");
+		if (!IS_ERR_OR_NULL(drv->vreg_pll)) {
+			ret = regulator_set_voltage(drv->vreg_pll, vdd_pll,
+							vdd_pll);
+			if (ret) {
+				dev_err(&pdev->dev, "Failed to set vdd_pll voltage(rc:%d)\n",
+									ret);
+				return ERR_PTR(ret);
+			}
+
+			ret = regulator_set_load(drv->vreg_pll, 10000);
+			if (ret < 0) {
+				dev_err(&pdev->dev, "Failed to set vdd_pll mode(rc:%d)\n",
+									ret);
+				return ERR_PTR(ret);
+			}
+		} else
+			drv->vreg_pll = NULL;
+	}
+
+	return drv;
+}
+EXPORT_SYMBOL(pil_q6v5_init);
diff --git a/drivers/soc/qcom/pil-q6v5.h b/drivers/soc/qcom/pil-q6v5.h
new file mode 100644
index 0000000..01b1cef
--- /dev/null
+++ b/drivers/soc/qcom/pil-q6v5.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MSM_PIL_Q6V5_H
+#define __MSM_PIL_Q6V5_H
+
+#include "peripheral-loader.h"
+
+struct regulator;
+struct clk;
+struct pil_device;
+struct platform_device;
+
+struct q6v5_data {
+	void __iomem *reg_base;
+	void __iomem *rmb_base;
+	void __iomem *cxrail_bhs;  /* External BHS register */
+	struct clk *xo;		   /* XO clock source */
+	struct clk *pnoc_clk;	   /* PNOC bus clock source */
+	struct clk *ahb_clk;	   /* PIL access to registers */
+	struct clk *axi_clk;	   /* CPU access to memory */
+	struct clk *core_clk;	   /* CPU core */
+	struct clk *reg_clk;	   /* CPU access registers */
+	struct clk *gpll0_mss_clk; /* GPLL0 to MSS connection */
+	struct clk *rom_clk;	   /* Boot ROM */
+	struct clk *snoc_axi_clk;
+	struct clk *mnoc_axi_clk;
+	struct clk *qdss_clk;
+	struct clk *prng_clk;
+	struct clk *axis2_clk;
+	void __iomem *axi_halt_base; /* Halt base of q6, mss,
+				      * nc are in same 4K page
+				      */
+	void __iomem *axi_halt_q6;
+	void __iomem *axi_halt_mss;
+	void __iomem *axi_halt_nc;
+	void __iomem *restart_reg;
+	void __iomem *pdc_sync;
+	void __iomem *alt_reset;
+	struct regulator *vreg;
+	struct regulator *vreg_cx;
+	struct regulator *vreg_mx;
+	struct regulator *vreg_pll;
+	bool is_booted;
+	struct pil_desc desc;
+	bool self_auth;
+	phys_addr_t mba_dp_phys;
+	void *mba_dp_virt;
+	size_t mba_dp_size;
+	size_t dp_size;
+	bool qdsp6v55;
+	bool qdsp6v5_2_0;
+	bool qdsp6v56;
+	bool qdsp6v56_1_3;
+	bool qdsp6v56_1_5;
+	bool qdsp6v56_1_8;
+	bool qdsp6v56_1_8_inrush_current;
+	bool qdsp6v56_1_10;
+	bool qdsp6v61_1_1;
+	bool qdsp6v62_1_2;
+	bool qdsp6v62_1_4;
+	bool qdsp6v62_1_5;
+	bool qdsp6v65_1_0;
+	bool non_elf_image;
+	bool restart_reg_sec;
+	bool override_acc;
+	int override_acc_1;
+	int mss_pdc_offset;
+	int smem_id;
+	bool ahb_clk_vote;
+	bool mx_spike_wa;
+	bool reset_clk;
+};
+
+int pil_q6v5_make_proxy_votes(struct pil_desc *pil);
+void pil_q6v5_remove_proxy_votes(struct pil_desc *pil);
+void pil_q6v5_halt_axi_port(struct pil_desc *pil, void __iomem *halt_base);
+void pil_q6v5_shutdown(struct pil_desc *pil);
+int pil_q6v5_reset(struct pil_desc *pil);
+void assert_clamps(struct pil_desc *pil);
+struct q6v5_data *pil_q6v5_init(struct platform_device *pdev);
+
+#endif
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 00bfed4..63556d1 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -1108,12 +1108,30 @@
 				"Failed to cancel/abort m_cmd\n");
 	}
 	if (mas->cur_xfer_mode == SE_DMA) {
-		if (xfer->tx_buf)
+		if (xfer->tx_buf) {
+			reinit_completion(&mas->xfer_done);
+			writel_relaxed(1, mas->base +
+				SE_DMA_TX_FSM_RST);
+			timeout =
+			wait_for_completion_timeout(&mas->xfer_done, HZ);
+			if (!timeout)
+				dev_err(mas->dev,
+					"DMA TX RESET failed\n");
 			geni_se_tx_dma_unprep(mas->wrapper_dev,
-					xfer->tx_dma, xfer->len);
-		if (xfer->rx_buf)
+				xfer->tx_dma, xfer->len);
+		}
+		if (xfer->rx_buf) {
+			reinit_completion(&mas->xfer_done);
+			writel_relaxed(1, mas->base +
+				SE_DMA_RX_FSM_RST);
+			timeout =
+			wait_for_completion_timeout(&mas->xfer_done, HZ);
+			if (!timeout)
+				dev_err(mas->dev,
+					"DMA RX RESET failed\n");
 			geni_se_rx_dma_unprep(mas->wrapper_dev,
-					xfer->rx_dma, xfer->len);
+				xfer->rx_dma, xfer->len);
+		}
 	}
 
 }
diff --git a/drivers/thermal/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom-spmi-temp-alarm.c
index bda5af5..8e0cfd7 100644
--- a/drivers/thermal/qcom-spmi-temp-alarm.c
+++ b/drivers/thermal/qcom-spmi-temp-alarm.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011-2015, 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2015, 2017, 2020, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -201,7 +201,7 @@
 		chip->temp = mili_celsius;
 	}
 
-	*temp = chip->temp < 0 ? 0 : chip->temp;
+	*temp = chip->temp;
 
 	return 0;
 }
diff --git a/drivers/thermal/tsens2xxx.c b/drivers/thermal/tsens2xxx.c
index 941f7f4..d04ea03 100644
--- a/drivers/thermal/tsens2xxx.c
+++ b/drivers/thermal/tsens2xxx.c
@@ -83,12 +83,70 @@
 	*temp = last_temp * TSENS_TM_SCALE_DECI_MILLIDEG;
 }
 
+static int __tsens2xxx_hw_init(struct tsens_device *tmdev)
+{
+	void __iomem *srot_addr;
+	void __iomem *sensor_int_mask_addr;
+	unsigned int srot_val, crit_mask, crit_val;
+	void __iomem *int_mask_addr;
+
+	srot_addr = TSENS_CTRL_ADDR(tmdev->tsens_srot_addr + 0x4);
+	srot_val = readl_relaxed(srot_addr);
+	if (!(srot_val & TSENS_EN)) {
+		pr_err("TSENS device is not enabled\n");
+		return -ENODEV;
+	}
+
+	if (tmdev->ctrl_data->cycle_monitor) {
+		sensor_int_mask_addr =
+			TSENS_TM_CRITICAL_INT_MASK(tmdev->tsens_tm_addr);
+		crit_mask = readl_relaxed(sensor_int_mask_addr);
+		crit_val = TSENS_TM_CRITICAL_CYCLE_MONITOR;
+		if (tmdev->ctrl_data->cycle_compltn_monitor_mask)
+			writel_relaxed((crit_mask | crit_val),
+				(TSENS_TM_CRITICAL_INT_MASK
+				(tmdev->tsens_tm_addr)));
+		else
+			writel_relaxed((crit_mask & ~crit_val),
+				(TSENS_TM_CRITICAL_INT_MASK
+				(tmdev->tsens_tm_addr)));
+		/*Update critical cycle monitoring*/
+		mb();
+	}
+
+	if (tmdev->ctrl_data->wd_bark) {
+		sensor_int_mask_addr =
+			TSENS_TM_CRITICAL_INT_MASK(tmdev->tsens_tm_addr);
+		crit_mask = readl_relaxed(sensor_int_mask_addr);
+		crit_val = TSENS_TM_CRITICAL_WD_BARK;
+		if (tmdev->ctrl_data->wd_bark_mask)
+			writel_relaxed((crit_mask | crit_val),
+			(TSENS_TM_CRITICAL_INT_MASK
+			(tmdev->tsens_tm_addr)));
+		else
+			writel_relaxed((crit_mask & ~crit_val),
+			(TSENS_TM_CRITICAL_INT_MASK
+			(tmdev->tsens_tm_addr)));
+		/*Update watchdog monitoring*/
+		mb();
+	}
+
+	int_mask_addr = TSENS_TM_UPPER_LOWER_INT_MASK(tmdev->tsens_tm_addr);
+	writel_relaxed(TSENS_TM_UPPER_LOWER_INT_DISABLE, int_mask_addr);
+
+	writel_relaxed(TSENS_TM_CRITICAL_INT_EN |
+		TSENS_TM_UPPER_INT_EN | TSENS_TM_LOWER_INT_EN,
+		TSENS_TM_INT_EN(tmdev->tsens_tm_addr));
+
+	return 0;
+}
+
 static int tsens2xxx_get_temp(struct tsens_sensor *sensor, int *temp)
 {
 	struct tsens_device *tmdev = NULL, *tmdev_itr;
 	unsigned int code, ret, tsens_ret;
 	void __iomem *sensor_addr, *trdy;
-	int last_temp = 0, last_temp2 = 0, last_temp3 = 0, count = 0;
+	int rc = 0, last_temp = 0, last_temp2 = 0, last_temp3 = 0, count = 0;
 	static atomic_t in_tsens_reinit;
 
 	if (!sensor)
@@ -172,6 +230,13 @@
 			/* Notify thermal fwk */
 			list_for_each_entry(tmdev_itr,
 						&tsens_device_list, list) {
+				rc = __tsens2xxx_hw_init(tmdev_itr);
+				if (rc) {
+					pr_err(
+					"%s: Failed to re-initialize TSENS controller\n",
+						__func__);
+					BUG();
+				}
 				queue_work(tmdev_itr->tsens_reinit_work,
 					&tmdev_itr->therm_fwk_notify);
 			}
@@ -713,58 +778,11 @@
 
 static int tsens2xxx_hw_init(struct tsens_device *tmdev)
 {
-	void __iomem *srot_addr;
-	void __iomem *sensor_int_mask_addr;
-	unsigned int srot_val, crit_mask, crit_val;
-	void __iomem *int_mask_addr;
+	int rc = 0;
 
-	srot_addr = TSENS_CTRL_ADDR(tmdev->tsens_srot_addr + 0x4);
-	srot_val = readl_relaxed(srot_addr);
-	if (!(srot_val & TSENS_EN)) {
-		pr_err("TSENS device is not enabled\n");
-		return -ENODEV;
-	}
-
-	if (tmdev->ctrl_data->cycle_monitor) {
-		sensor_int_mask_addr =
-			TSENS_TM_CRITICAL_INT_MASK(tmdev->tsens_tm_addr);
-		crit_mask = readl_relaxed(sensor_int_mask_addr);
-		crit_val = TSENS_TM_CRITICAL_CYCLE_MONITOR;
-		if (tmdev->ctrl_data->cycle_compltn_monitor_mask)
-			writel_relaxed((crit_mask | crit_val),
-				(TSENS_TM_CRITICAL_INT_MASK
-				(tmdev->tsens_tm_addr)));
-		else
-			writel_relaxed((crit_mask & ~crit_val),
-				(TSENS_TM_CRITICAL_INT_MASK
-				(tmdev->tsens_tm_addr)));
-		/*Update critical cycle monitoring*/
-		mb();
-	}
-
-	if (tmdev->ctrl_data->wd_bark) {
-		sensor_int_mask_addr =
-			TSENS_TM_CRITICAL_INT_MASK(tmdev->tsens_tm_addr);
-		crit_mask = readl_relaxed(sensor_int_mask_addr);
-		crit_val = TSENS_TM_CRITICAL_WD_BARK;
-		if (tmdev->ctrl_data->wd_bark_mask)
-			writel_relaxed((crit_mask | crit_val),
-			(TSENS_TM_CRITICAL_INT_MASK
-			(tmdev->tsens_tm_addr)));
-		else
-			writel_relaxed((crit_mask & ~crit_val),
-			(TSENS_TM_CRITICAL_INT_MASK
-			(tmdev->tsens_tm_addr)));
-		/*Update watchdog monitoring*/
-		mb();
-	}
-
-	int_mask_addr = TSENS_TM_UPPER_LOWER_INT_MASK(tmdev->tsens_tm_addr);
-	writel_relaxed(TSENS_TM_UPPER_LOWER_INT_DISABLE, int_mask_addr);
-
-	writel_relaxed(TSENS_TM_CRITICAL_INT_EN |
-		TSENS_TM_UPPER_INT_EN | TSENS_TM_LOWER_INT_EN,
-		TSENS_TM_INT_EN(tmdev->tsens_tm_addr));
+	rc = __tsens2xxx_hw_init(tmdev);
+	if (rc)
+		return rc;
 
 	spin_lock_init(&tmdev->tsens_crit_lock);
 	spin_lock_init(&tmdev->tsens_upp_low_lock);
diff --git a/include/dt-bindings/clock/mdss-7nm-pll-clk.h b/include/dt-bindings/clock/mdss-7nm-pll-clk.h
index bb146d7..d414a8a 100644
--- a/include/dt-bindings/clock/mdss-7nm-pll-clk.h
+++ b/include/dt-bindings/clock/mdss-7nm-pll-clk.h
@@ -30,30 +30,38 @@
 #define POST_VCO_DIV3_5_0_CLK	19
 #define CPHY_PCLK_SRC_MUX_0_CLK	20
 #define CPHY_PCLK_SRC_0_CLK	21
+#define SHADOW_CPHY_BYTECLK_SRC_0_CLK   22
+#define SHADOW_POST_VCO_DIV3_5_0_CLK    23
+#define SHADOW_CPHY_PCLK_SRC_MUX_0_CLK  24
+#define SHADOW_CPHY_PCLK_SRC_0_CLK      25
 
-#define VCO_CLK_1		22
-#define PLL_OUT_DIV_1_CLK	23
-#define BITCLK_SRC_1_CLK	24
-#define BYTECLK_SRC_1_CLK	25
-#define POST_BIT_DIV_1_CLK	26
-#define POST_VCO_DIV_1_CLK	27
-#define BYTECLK_MUX_1_CLK	28
-#define PCLK_SRC_MUX_1_CLK	29
-#define PCLK_SRC_1_CLK		30
-#define PCLK_MUX_1_CLK		31
-#define SHADOW_VCO_CLK_1		32
-#define SHADOW_PLL_OUT_DIV_1_CLK	33
-#define SHADOW_BITCLK_SRC_1_CLK		34
-#define SHADOW_BYTECLK_SRC_1_CLK	35
-#define SHADOW_POST_BIT_DIV_1_CLK	36
-#define SHADOW_POST_VCO_DIV_1_CLK	37
-#define SHADOW_PCLK_SRC_MUX_1_CLK	38
-#define SHADOW_PCLK_SRC_1_CLK		39
+#define VCO_CLK_1		26
+#define PLL_OUT_DIV_1_CLK	27
+#define BITCLK_SRC_1_CLK	28
+#define BYTECLK_SRC_1_CLK	29
+#define POST_BIT_DIV_1_CLK	30
+#define POST_VCO_DIV_1_CLK	31
+#define BYTECLK_MUX_1_CLK	32
+#define PCLK_SRC_MUX_1_CLK	33
+#define PCLK_SRC_1_CLK		34
+#define PCLK_MUX_1_CLK		35
+#define SHADOW_VCO_CLK_1		36
+#define SHADOW_PLL_OUT_DIV_1_CLK	37
+#define SHADOW_BITCLK_SRC_1_CLK		38
+#define SHADOW_BYTECLK_SRC_1_CLK	39
+#define SHADOW_POST_BIT_DIV_1_CLK	40
+#define SHADOW_POST_VCO_DIV_1_CLK	41
+#define SHADOW_PCLK_SRC_MUX_1_CLK	42
+#define SHADOW_PCLK_SRC_1_CLK		43
 /* CPHY clocks for DSI-1 PLL */
-#define CPHY_BYTECLK_SRC_1_CLK	40
-#define POST_VCO_DIV3_5_1_CLK	41
-#define CPHY_PCLK_SRC_MUX_1_CLK	42
-#define CPHY_PCLK_SRC_1_CLK	43
+#define CPHY_BYTECLK_SRC_1_CLK	44
+#define POST_VCO_DIV3_5_1_CLK	45
+#define CPHY_PCLK_SRC_MUX_1_CLK	46
+#define CPHY_PCLK_SRC_1_CLK	47
+#define SHADOW_CPHY_BYTECLK_SRC_1_CLK   48
+#define SHADOW_POST_VCO_DIV3_5_1_CLK    49
+#define SHADOW_CPHY_PCLK_SRC_MUX_1_CLK  50
+#define SHADOW_CPHY_PCLK_SRC_1_CLK      51
 
 
 /* DP PLL clocks */
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index c65aa57..3aef2d1 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -201,6 +201,14 @@
 				unsigned int id, unsigned int prop);
 
 /*
+ * Following APIs set array of mutually exclusive.
+ * The 'exclusive' argument indicates the array of mutually exclusive set
+ * of cables that cannot be attached simultaneously.
+ */
+extern int extcon_set_mutually_exclusive(struct extcon_dev *edev,
+				const u32 *exclusive);
+
+/*
  * Following APIs register the notifier block in order to detect
  * the change of both state and property value for each external connector.
  *
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index 9887f4f..11e95d9 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -289,6 +289,15 @@
  */
 int iio_read_avail_channel_raw(struct iio_channel *chan,
 			       const int **vals, int *length);
+/**
+ * iio_write_channel_processed() - write to a given channel
+ * @chan:		The channel being queried.
+ * @val:		Value being written.
+ *
+ * Note processed writes to iio channels are converted to raw
+ * values before being written.
+ */
+int iio_write_channel_processed(struct iio_channel *chan, int val);
 
 /**
  * iio_get_channel_type() - get the type of a channel
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index f3f7605..775e63e 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -244,6 +244,7 @@
 
 int regulator_count_voltages(struct regulator *regulator);
 int regulator_list_voltage(struct regulator *regulator, unsigned selector);
+int regulator_list_corner_voltage(struct regulator *regulator, int corner);
 int regulator_is_supported_voltage(struct regulator *regulator,
 				   int min_uV, int max_uV);
 unsigned int regulator_get_linear_step(struct regulator *regulator);
@@ -579,6 +580,11 @@
 	return -EINVAL;
 }
 
+static inline int regulator_list_corner_voltage(struct regulator *regulator,
+	int corner)
+{
+	return -EINVAL;
+}
 #endif
 
 static inline int regulator_set_voltage_triplet(struct regulator *regulator,
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 71756e6..7ae7dc3 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -94,6 +94,10 @@
  *	if the selector indicates a voltage that is unusable on this system;
  *	or negative errno.  Selectors range from zero to one less than
  *	regulator_desc.n_voltages.  Voltages may be reported in any order.
+ * @list_corner_voltage: Return the maximum voltage in microvolts that
+ *	that can be physically configured for the regulator when operating at
+ *	the specified voltage corner or a negative errno if the corner value
+ *	can't be used on this system.
  *
  * @set_current_limit: Configure a limit for a current-limited regulator.
  *                     The driver should select the current closest to max_uA.
@@ -150,6 +154,7 @@
 
 	/* enumerate supported voltages */
 	int (*list_voltage) (struct regulator_dev *, unsigned selector);
+	int (*list_corner_voltage)(struct regulator_dev *list_reg, int corner);
 
 	/* get/set regulator voltage */
 	int (*set_voltage) (struct regulator_dev *, int min_uV, int max_uV,
diff --git a/include/linux/regulator/spm-regulator.h b/include/linux/regulator/spm-regulator.h
new file mode 100644
index 0000000..c1eaee6
--- /dev/null
+++ b/include/linux/regulator/spm-regulator.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2013-2014, 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _LINUX_REGULATOR_SPM_H
+#define _LINUX_REGULATOR_SPM_H
+
+#include <linux/err.h>
+#include <linux/init.h>
+
+#ifdef CONFIG_REGULATOR_SPM
+int __init spm_regulator_init(void);
+#else
+static inline int __init spm_regulator_init(void) { return -ENODEV; }
+#endif
+
+#endif
diff --git a/include/soc/qcom/icnss2.h b/include/soc/qcom/icnss2.h
index 64128de..bb75490 100644
--- a/include/soc/qcom/icnss2.h
+++ b/include/soc/qcom/icnss2.h
@@ -167,4 +167,7 @@
 extern int icnss_qmi_send(struct device *dev, int type, void *cmd,
 			  int cmd_len, void *cb_ctx,
 			  int (*cb)(void *ctx, void *event, int event_len));
+extern int icnss_force_wake_request(struct device *dev);
+extern int icnss_force_wake_release(struct device *dev);
+extern int icnss_is_device_awake(struct device *dev);
 #endif /* _ICNSS_WLAN_H_ */
diff --git a/include/soc/qcom/mpm.h b/include/soc/qcom/mpm.h
index 43bed05..2360335 100644
--- a/include/soc/qcom/mpm.h
+++ b/include/soc/qcom/mpm.h
@@ -16,4 +16,5 @@
 
 extern const struct mpm_pin mpm_bengal_gic_chip_data[];
 extern const struct mpm_pin mpm_scuba_gic_chip_data[];
+extern const struct mpm_pin mpm_sdm660_gic_chip_data[];
 #endif /* __QCOM_MPM_H__ */