Merge "drivers: net: rmnet: cleanup task boost list"
diff --git a/arch/arm64/configs/vendor/lito-perf_defconfig b/arch/arm64/configs/vendor/lito-perf_defconfig
index 8aa068c..d043592 100644
--- a/arch/arm64/configs/vendor/lito-perf_defconfig
+++ b/arch/arm64/configs/vendor/lito-perf_defconfig
@@ -281,6 +281,7 @@
CONFIG_HDCP_QSEECOM=y
CONFIG_QSEECOM=y
CONFIG_UID_SYS_STATS=y
+CONFIG_FPR_FPC=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
diff --git a/arch/arm64/configs/vendor/lito_defconfig b/arch/arm64/configs/vendor/lito_defconfig
index 6d687fa..ba806f7 100644
--- a/arch/arm64/configs/vendor/lito_defconfig
+++ b/arch/arm64/configs/vendor/lito_defconfig
@@ -287,6 +287,7 @@
CONFIG_HDCP_QSEECOM=y
CONFIG_QSEECOM=y
CONFIG_UID_SYS_STATS=y
+CONFIG_FPR_FPC=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
diff --git a/drivers/bus/mhi/controllers/mhi_qcom.c b/drivers/bus/mhi/controllers/mhi_qcom.c
index 61de2b1..715fa86 100644
--- a/drivers/bus/mhi/controllers/mhi_qcom.c
+++ b/drivers/bus/mhi/controllers/mhi_qcom.c
@@ -124,6 +124,7 @@
MHI_CNTRL_ERR("Error ioremap region\n");
goto error_ioremap;
}
+ mhi_cntrl->len = len;
ret = pci_alloc_irq_vectors(pci_dev, mhi_cntrl->msi_required,
mhi_cntrl->msi_required, PCI_IRQ_MSI);
diff --git a/drivers/bus/mhi/core/mhi_pm.c b/drivers/bus/mhi/core/mhi_pm.c
index a4e63bc..d86107b 100644
--- a/drivers/bus/mhi/core/mhi_pm.c
+++ b/drivers/bus/mhi/core/mhi_pm.c
@@ -947,6 +947,12 @@
goto error_bhi_offset;
}
+ if (val >= mhi_cntrl->len) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ MHI_ERR("Invalid bhi offset:%x\n", val);
+ goto error_bhi_offset;
+ }
+
mhi_cntrl->bhi = mhi_cntrl->regs + val;
/* setup bhie offset if not set */
@@ -958,6 +964,12 @@
goto error_bhi_offset;
}
+ if (val >= mhi_cntrl->len) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ MHI_ERR("Invalid bhie offset:%x\n", val);
+ goto error_bhi_offset;
+ }
+
mhi_cntrl->bhie = mhi_cntrl->regs + val;
}
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index c23396f..bc0c4cf 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -33,6 +33,7 @@
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
+obj-$(CONFIG_CRYPTO_DEV_QCOM_MSM_QCE) += msm/
obj-$(CONFIG_CRYPTO_DEV_QCOM_RNG) += qcom-rng.o
obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index 9be7cc4..06125b1 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -881,6 +881,17 @@
}
EXPORT_SYMBOL_GPL(extcon_set_property_capability);
+int extcon_set_mutually_exclusive(struct extcon_dev *edev,
+ const u32 *exclusive)
+{
+ if (!edev)
+ return -EINVAL;
+
+ edev->mutually_exclusive = exclusive;
+ return 0;
+}
+EXPORT_SYMBOL(extcon_set_mutually_exclusive);
+
/**
* extcon_get_extcon_dev() - Get the extcon device instance from the name.
* @extcon_name: the extcon name provided with extcon_dev_register()
diff --git a/drivers/gpu/drm/bridge/lt9611uxc.c b/drivers/gpu/drm/bridge/lt9611uxc.c
index e37e770..3a40d05 100644
--- a/drivers/gpu/drm/bridge/lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lt9611uxc.c
@@ -893,7 +893,7 @@
gpio_set_value(pdata->reset_gpio, 0);
msleep(20);
gpio_set_value(pdata->reset_gpio, 1);
- msleep(300);
+ msleep(180);
} else {
gpio_set_value(pdata->reset_gpio, 0);
}
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index d8e69f8..5e98878 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -1570,6 +1570,8 @@
}
for (i = 0; i < csdev->nr_outport; i++) {
+ if (desc->pdata->child_names[i] == NULL)
+ continue;
conns[i].outport = desc->pdata->outports[i];
conns[i].child_name = desc->pdata->child_names[i];
conns[i].child_port = desc->pdata->child_ports[i];
diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
index 32ac255..36c62dc 100644
--- a/drivers/hwtracing/coresight/of_coresight.c
+++ b/drivers/hwtracing/coresight/of_coresight.c
@@ -50,16 +50,22 @@
{
struct device_node *ep = NULL;
int in = 0, out = 0;
+ struct of_endpoint endpoint;
do {
ep = of_graph_get_next_endpoint(node, ep);
if (!ep)
break;
+ if (of_graph_parse_endpoint(ep, &endpoint))
+ continue;
+
if (of_property_read_bool(ep, "slave-mode"))
- in++;
+ in = (endpoint.port + 1 > in) ?
+ endpoint.port + 1 : in;
else
- out++;
+ out = (endpoint.port + 1) > out ?
+ endpoint.port + 1 : out;
} while (ep);
diff --git a/drivers/i3c/master/i3c-master-qcom-geni.c b/drivers/i3c/master/i3c-master-qcom-geni.c
index d9f111e..b278778 100644
--- a/drivers/i3c/master/i3c-master-qcom-geni.c
+++ b/drivers/i3c/master/i3c-master-qcom-geni.c
@@ -274,6 +274,7 @@
struct workqueue_struct *hj_wq;
struct work_struct hj_wd;
struct wakeup_source hj_wl;
+ struct pinctrl_state *i3c_gpio_disable;
};
struct geni_i3c_i2c_dev_data {
@@ -735,7 +736,7 @@
{
dma_addr_t tx_dma = 0;
dma_addr_t rx_dma = 0;
- int ret, time_remaining = 0;
+ int ret = 0, time_remaining = 0;
enum i3c_trans_dir rnw = gi3c->cur_rnw;
u32 len = gi3c->cur_len;
@@ -816,10 +817,12 @@
geni_se_tx_dma_unprep(gi3c->se.i3c_rsc.wrapper_dev,
tx_dma, len);
}
- ret = gi3c->err;
- if (gi3c->err)
+
+ if (gi3c->err) {
+ ret = (gi3c->err == -EBUSY) ? I3C_ERROR_M2 : gi3c->err;
GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev,
"I3C transaction error :%d\n", gi3c->err);
+ }
gi3c->cur_buf = NULL;
gi3c->cur_len = gi3c->cur_idx = 0;
@@ -1523,10 +1526,8 @@
return -ENOSPC;
}
-static int qcom_deallocate_ibi_table_entry(struct i3c_dev_desc *dev)
+static int qcom_deallocate_ibi_table_entry(struct geni_i3c_dev *gi3c)
{
- struct i3c_master_controller *m = i3c_dev_get_master(dev);
- struct geni_i3c_dev *gi3c = to_geni_i3c_master(m);
u32 i, timeout;
for (i = 0; i < gi3c->ibi.num_slots; i++) {
@@ -1558,16 +1559,14 @@
return 0;
}
-static void qcom_geni_i3c_ibi_unconf(struct i3c_dev_desc *dev)
+static void qcom_geni_i3c_ibi_unconf(struct geni_i3c_dev *gi3c)
{
- struct i3c_master_controller *m = i3c_dev_get_master(dev);
- struct geni_i3c_dev *gi3c = to_geni_i3c_master(m);
u32 val, timeout;
int ret = 0;
val = geni_read_reg(gi3c->se.ibi_base, IBI_ALLOCATED_ENTRIES_GPII(0));
if (val) {
- ret = qcom_deallocate_ibi_table_entry(dev);
+ ret = qcom_deallocate_ibi_table_entry(gi3c);
if (ret)
return;
}
@@ -1619,7 +1618,7 @@
if (!gi3c->ibi.hw_support && !gi3c->ibi.is_init)
return;
- qcom_geni_i3c_ibi_unconf(dev);
+ qcom_geni_i3c_ibi_unconf(gi3c);
spin_lock_irqsave(&gi3c->ibi.lock, flags);
gi3c->ibi.slots[data->ibi] = NULL;
@@ -1777,6 +1776,14 @@
ret = PTR_ERR(gi3c->se.i3c_rsc.geni_gpio_sleep);
return ret;
}
+ gi3c->i3c_gpio_disable =
+ pinctrl_lookup_state(gi3c->se.i3c_rsc.geni_pinctrl, "disable");
+ if (IS_ERR(gi3c->i3c_gpio_disable)) {
+ GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev,
+ "Error no pinctrl disable config specified\n");
+ ret = PTR_ERR(gi3c->i3c_gpio_disable);
+ return ret;
+ }
return 0;
}
@@ -1976,23 +1983,38 @@
ret = i3c_master_register(&gi3c->ctrlr, &pdev->dev,
&geni_i3c_master_ops, false);
if (ret)
- return ret;
+ GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev,
+ "i3c_master_register failed:%d\n", ret);
+
//enable hot-join IRQ also
geni_write_reg(~0u, gi3c->se.ibi_base, IBI_GEN_IRQ_EN);
GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev, "I3C probed\n");
- return ret;
+ return 0;
}
static int geni_i3c_remove(struct platform_device *pdev)
{
struct geni_i3c_dev *gi3c = platform_get_drvdata(pdev);
- int ret = 0;
+ int ret = 0, val = 0;
+ //Disable hot-join, until next probe happens
+ val = geni_read_reg(gi3c->se.ibi_base, IBI_GEN_IRQ_EN);
+ val &= ~HOT_JOIN_IRQ_EN;
+ geni_write_reg(val, gi3c->se.ibi_base, IBI_GEN_IRQ_EN);
+
+ if (gi3c->ibi.is_init)
+ qcom_geni_i3c_ibi_unconf(gi3c);
destroy_workqueue(gi3c->hj_wq);
wakeup_source_trash(&gi3c->hj_wl);
- pm_runtime_disable(gi3c->se.dev);
+ /*force suspend to avoid the auto suspend caused by driver removal*/
+ pm_runtime_force_suspend(gi3c->se.dev);
+ ret = pinctrl_select_state(gi3c->se.i3c_rsc.geni_pinctrl,
+ gi3c->i3c_gpio_disable);
+ if (ret)
+ GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev,
+ " i3c: pinctrl_select_state failed\n");
ret = i3c_master_unregister(&gi3c->ctrlr);
if (gi3c->ipcl)
ipc_log_context_destroy(gi3c->ipcl);
@@ -2004,7 +2026,7 @@
return 0;
}
-#ifdef CONFIG_PM
+#if IS_ENABLED(CONFIG_PM)
static int geni_i3c_runtime_suspend(struct device *dev)
{
struct geni_i3c_dev *gi3c = dev_get_drvdata(dev);
@@ -2080,7 +2102,18 @@
},
};
-module_platform_driver(geni_i3c_master);
+static int __init i3c_dev_init(void)
+{
+ return platform_driver_register(&geni_i3c_master);
+}
+
+static void __exit i3c_dev_exit(void)
+{
+ platform_driver_unregister(&geni_i3c_master);
+}
+
+module_init(i3c_dev_init);
+module_exit(i3c_dev_exit);
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:geni_i3c_master");
diff --git a/drivers/iio/adc/qcom-spmi-adc5.c b/drivers/iio/adc/qcom-spmi-adc5.c
index a71075f..d5f06df 100644
--- a/drivers/iio/adc/qcom-spmi-adc5.c
+++ b/drivers/iio/adc/qcom-spmi-adc5.c
@@ -987,6 +987,8 @@
SCALE_HW_CALIB_THERM_100K_PULLUP)
[ADC_XO_THERM_PU2] = ADC_CHAN_TEMP("xo_therm", 1,
SCALE_HW_CALIB_THERM_100K_PULLUP)
+ [ANA_IN] = ADC_CHAN_TEMP("drax_temp", 1,
+ SCALE_HW_CALIB_PMIC_THERM)
};
static int adc_get_dt_channel_data(struct adc_chip *adc,
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 06ca3f7..053a18c 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -934,3 +934,21 @@
chan->channel, buf, len);
}
EXPORT_SYMBOL_GPL(iio_write_channel_ext_info);
+
+int iio_write_channel_processed(struct iio_channel *chan, int val)
+{
+ int ret;
+
+ mutex_lock(&chan->indio_dev->info_exist_lock);
+ if (chan->indio_dev->info == NULL) {
+ ret = -ENODEV;
+ goto err_unlock;
+ }
+
+ ret = iio_channel_write(chan, val, 0, IIO_CHAN_INFO_PROCESSED);
+err_unlock:
+ mutex_unlock(&chan->indio_dev->info_exist_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(iio_write_channel_processed);
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index e3a9948..3a01526 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -88,5 +88,5 @@
obj-$(CONFIG_GOLDFISH_PIC) += irq-goldfish-pic.o
obj-$(CONFIG_NDS32) += irq-ativic32.o
obj-$(CONFIG_QCOM_PDC) += qcom-pdc.o
-obj-$(CONFIG_QCOM_MPM) += qcom-mpm.o qcom-mpm-bengal.o qcom-mpm-scuba.o
+obj-$(CONFIG_QCOM_MPM) += qcom-mpm.o qcom-mpm-bengal.o qcom-mpm-scuba.o qcom-mpm-sdm660.o
obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o
diff --git a/drivers/irqchip/qcom-mpm-sdm660.c b/drivers/irqchip/qcom-mpm-sdm660.c
new file mode 100644
index 0000000..2e482e0
--- /dev/null
+++ b/drivers/irqchip/qcom-mpm-sdm660.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <soc/qcom/mpm.h>
+
+const struct mpm_pin mpm_sdm660_gic_chip_data[] = {
+ {2, 216}, /* tsens1_tsens_upper_lower_int */
+ {52, 275}, /* qmp_usb3_lfps_rxterm_irq_cx */
+ {61, 209}, /* lpi_dir_conn_irq_apps[1] */
+ {79, 379}, /* qusb2phy_intr for Dm */
+ {80, 380}, /* qusb2phy_intr for Dm for secondary PHY */
+ {81, 379}, /* qusb2phy_intr for Dp */
+ {82, 380}, /* qusb2phy_intr for Dp for secondary PHY */
+ {87, 358}, /* ee0_apps_hlos_spmi_periph_irq */
+ {91, 519}, /* lpass_pmu_tmr_timeout_irq_cx */
+ {-1},
+};
diff --git a/drivers/irqchip/qcom-mpm.c b/drivers/irqchip/qcom-mpm.c
index f7f4864..ab8a3b3 100644
--- a/drivers/irqchip/qcom-mpm.c
+++ b/drivers/irqchip/qcom-mpm.c
@@ -592,6 +592,10 @@
.compatible = "qcom,mpm-gic-scuba",
.data = mpm_scuba_gic_chip_data,
},
+ {
+ .compatible = "qcom,mpm-gic-sdm660",
+ .data = mpm_sdm660_gic_chip_data,
+ },
{}
};
MODULE_DEVICE_TABLE(of, mpm_gic_chip_data_table);
diff --git a/drivers/leds/leds-qti-flash.c b/drivers/leds/leds-qti-flash.c
index 0e02e78..b0ca27b 100644
--- a/drivers/leds/leds-qti-flash.c
+++ b/drivers/leds/leds-qti-flash.c
@@ -277,7 +277,7 @@
for (i = 0; i < 60; i++) {
/* wait for the flash vreg_ok to be set */
- usleep_range(5000, 5500);
+ mdelay(5);
rc = power_supply_get_property(led->main_psy,
POWER_SUPPLY_PROP_FLASH_TRIGGER, &pval);
diff --git a/drivers/media/platform/msm/npu/npu_host_ipc.c b/drivers/media/platform/msm/npu/npu_host_ipc.c
index 0d2816f..5ea693b 100644
--- a/drivers/media/platform/msm/npu/npu_host_ipc.c
+++ b/drivers/media/platform/msm/npu/npu_host_ipc.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*/
/* -------------------------------------------------------------------------
@@ -367,8 +367,6 @@
/* Update qhdr_write_idx */
queue.qhdr_write_idx = new_write_idx;
- *is_rx_req_set = (queue.qhdr_rx_req == 1) ? 1 : 0;
-
/* Update Write pointer -- queue.qhdr_write_idx */
exit:
/* Update TX request -- queue.qhdr_tx_req */
@@ -379,6 +377,13 @@
(size_t)&(queue.qhdr_write_idx) - (size_t)&queue))),
&queue.qhdr_write_idx, sizeof(queue.qhdr_write_idx));
+ /* check if irq is required after write_idx is updated */
+ MEMR(npu_dev, (void *)((size_t)(offset + (uint32_t)(
+ (size_t)&(queue.qhdr_rx_req) - (size_t)&queue))),
+ (uint8_t *)&queue.qhdr_rx_req,
+ sizeof(queue.qhdr_rx_req));
+ *is_rx_req_set = (queue.qhdr_rx_req == 1) ? 1 : 0;
+
return status;
}
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index cc94890..6a91e49 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -4288,8 +4288,10 @@
mhi_ctrl->fw_image_fallback = plat_priv->fw_fallback_name;
mhi_ctrl->regs = pci_priv->bar;
- cnss_pr_dbg("BAR starts at %pa\n",
- &pci_resource_start(pci_priv->pci_dev, PCI_BAR_NUM));
+ mhi_ctrl->len = pci_resource_len(pci_priv->pci_dev, PCI_BAR_NUM);
+ cnss_pr_dbg("BAR starts at %pa, len-%x\n",
+ &pci_resource_start(pci_priv->pci_dev, PCI_BAR_NUM),
+ mhi_ctrl->len);
ret = cnss_pci_get_mhi_msi(pci_priv);
if (ret) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c
index 43fb446..1b900a2 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c
@@ -2235,6 +2235,81 @@
return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
}
+static ssize_t ipa_debugfs_enable_disable_drop_stats(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ unsigned long missing;
+ unsigned int pipe_num = 0;
+ bool enable_pipe = true;
+ u32 pipe_bitmask = ipa3_ctx->hw_stats.drop.init.enabled_bitmask;
+ char seprator = ',';
+ int i, j;
+ bool is_pipe = false;
+ ssize_t ret;
+
+ mutex_lock(&ipa3_ctx->lock);
+ if (sizeof(dbg_buff) < count + 1) {
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ missing = copy_from_user(dbg_buff, ubuf, count);
+ if (missing) {
+ ret = -EFAULT;
+ goto bail;
+ }
+ dbg_buff[count] = '\0';
+ IPADBG("data is %s", dbg_buff);
+
+ i = 0;
+ while (dbg_buff[i] != ' ' && i < count)
+ i++;
+ j = i;
+ i++;
+ if (i < count) {
+ if (dbg_buff[i] == '0') {
+ enable_pipe = false;
+ IPADBG("Drop stats will be disabled for pipes:");
+ }
+ }
+
+ for (i = 0; i < j; i++) {
+ if (dbg_buff[i] >= '0' && dbg_buff[i] <= '9') {
+ pipe_num = (pipe_num * 10) + (dbg_buff[i] - '0');
+ is_pipe = true;
+ }
+ if (dbg_buff[i] == seprator) {
+ if (pipe_num >= 0 && pipe_num < ipa3_ctx->ipa_num_pipes
+ && ipa3_get_client_by_pipe(pipe_num) <
+ IPA_CLIENT_MAX) {
+ IPADBG("pipe number %u\n", pipe_num);
+ if (enable_pipe)
+ pipe_bitmask = pipe_bitmask |
+ (1 << pipe_num);
+ else
+ pipe_bitmask = pipe_bitmask &
+ (~(1 << pipe_num));
+ }
+ pipe_num = 0;
+ is_pipe = false;
+ }
+ }
+ if (is_pipe && pipe_num >= 0 && pipe_num < ipa3_ctx->ipa_num_pipes &&
+ ipa3_get_client_by_pipe(pipe_num) < IPA_CLIENT_MAX) {
+ IPADBG("pipe number %u\n", pipe_num);
+ if (enable_pipe)
+ pipe_bitmask = pipe_bitmask | (1 << pipe_num);
+ else
+ pipe_bitmask = pipe_bitmask & (~(1 << pipe_num));
+ }
+
+ ipa_init_drop_stats(pipe_bitmask);
+ ret = count;
+bail:
+ mutex_unlock(&ipa3_ctx->lock);
+ return ret;
+}
+
static const struct file_operations ipa3_quota_ops = {
.read = ipa_debugfs_print_quota_stats,
.write = ipa_debugfs_reset_quota_stats,
@@ -2255,10 +2330,14 @@
.write = ipa_debugfs_reset_drop_stats,
};
+static const struct file_operations ipa3_enable_drop_ops = {
+ .write = ipa_debugfs_enable_disable_drop_stats,
+};
int ipa_debugfs_init_stats(struct dentry *parent)
{
const mode_t read_write_mode = 0664;
+ const mode_t write_mode = 0220;
struct dentry *file;
struct dentry *dent;
@@ -2285,6 +2364,13 @@
goto fail;
}
+ file = debugfs_create_file("enable_drop_stats", write_mode, dent, NULL,
+ &ipa3_enable_drop_ops);
+ if (IS_ERR_OR_NULL(file)) {
+ IPAERR("fail to create file %s\n", "enable_drop_stats");
+ goto fail;
+ }
+
file = debugfs_create_file("tethering", read_write_mode, dent, NULL,
&ipa3_tethering_ops);
if (IS_ERR_OR_NULL(file)) {
diff --git a/drivers/power/supply/qcom/qpnp-qg.c b/drivers/power/supply/qcom/qpnp-qg.c
index 0acae67..4a29912 100644
--- a/drivers/power/supply/qcom/qpnp-qg.c
+++ b/drivers/power/supply/qcom/qpnp-qg.c
@@ -4427,6 +4427,7 @@
return rc;
}
sleep_fifo_length &= SLEEP_IBAT_QUALIFIED_LENGTH_MASK;
+ sleep_fifo_length++;
if (chip->dt.qg_sleep_config) {
qg_dbg(chip, QG_DEBUG_STATUS, "Suspend: Forcing S2_SLEEP\n");
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 4e4bdcf..7e7dd02 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -1094,6 +1094,16 @@
be used on systems which contain an RPM which communicates with the
application processor over SMD.
+config REGULATOR_SPM
+ bool "SPM regulator driver"
+ depends on SPMI
+ help
+ Enable support for the SPM regulator driver which is used for
+ setting voltages of processor supply regulators via the SPM module
+ found inside chips of Qualcomm Technologies Inc. The SPM regulator
+ driver can be used on QTI SoCs where the APSS processor cores are
+ supplied by their own PMIC regulator.
+
config REGULATOR_STUB
tristate "Stub Regulator"
help
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 9aff7d8..2f8b0ba 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -137,6 +137,7 @@
obj-$(CONFIG_REGULATOR_WM8994) += wm8994-regulator.o
obj-$(CONFIG_REGULATOR_MEM_ACC) += mem-acc-regulator.o
obj-$(CONFIG_REGULATOR_REFGEN) += refgen.o
+obj-$(CONFIG_REGULATOR_SPM) += spm-regulator.o
obj-$(CONFIG_REGULATOR_RPMH) += rpmh-regulator.o
obj-$(CONFIG_REGULATOR_STUB) += stub-regulator.o
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index f2360fa..6968971 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2734,6 +2734,40 @@
EXPORT_SYMBOL_GPL(regulator_list_hardware_vsel);
/**
+ * regulator_list_corner_voltage - return the maximum voltage in microvolts that
+ * can be physically configured for the regulator when operating at the
+ * specified voltage corner
+ * @regulator: regulator source
+ * @corner: voltage corner value
+ * Context: can sleep
+ *
+ * This function can be used for regulators which allow scaling between
+ * different voltage corners as opposed to be different absolute voltages. The
+ * absolute voltage for a given corner may vary part-to-part or for a given part
+ * at runtime based upon various factors.
+ *
+ * Returns a voltage corresponding to the specified voltage corner or a negative
+ * errno if the corner value can't be used on this system.
+ */
+int regulator_list_corner_voltage(struct regulator *regulator, int corner)
+{
+ struct regulator_dev *rdev = regulator->rdev;
+ int ret;
+
+ if (corner < rdev->constraints->min_uV ||
+ corner > rdev->constraints->max_uV ||
+ !rdev->desc->ops->list_corner_voltage)
+ return -EINVAL;
+
+ mutex_lock(&rdev->mutex);
+ ret = rdev->desc->ops->list_corner_voltage(rdev, corner);
+ mutex_unlock(&rdev->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(regulator_list_corner_voltage);
+
+/**
* regulator_get_linear_step - return the voltage step size between VSEL values
* @regulator: regulator source
*
diff --git a/drivers/regulator/spm-regulator.c b/drivers/regulator/spm-regulator.c
new file mode 100644
index 0000000..313dcee
--- /dev/null
+++ b/drivers/regulator/spm-regulator.c
@@ -0,0 +1,1351 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2013-2017, 2020, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/arm-smccc.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/spm-regulator.h>
+#include <soc/qcom/spm.h>
+#include <linux/arm-smccc.h>
+
+#if defined(CONFIG_ARM64) || (defined(CONFIG_ARM) && defined(CONFIG_ARM_PSCI))
+#else
+ #define __invoke_psci_fn_smc(a, b, c, d) 0
+#endif
+
+#define SPM_REGULATOR_DRIVER_NAME "qcom,spm-regulator"
+
+struct voltage_range {
+ int min_uV;
+ int set_point_min_uV;
+ int max_uV;
+ int step_uV;
+};
+
+enum qpnp_regulator_uniq_type {
+ QPNP_TYPE_HF,
+ QPNP_TYPE_FTS2,
+ QPNP_TYPE_FTS2p5,
+ QPNP_TYPE_FTS426,
+ QPNP_TYPE_ULT_HF,
+ QPNP_TYPE_HFS430,
+};
+
+enum qpnp_regulator_type {
+ QPNP_HF_TYPE = 0x03,
+ QPNP_FTS2_TYPE = 0x1C,
+ QPNP_FTS2p5_TYPE = 0x1C,
+ QPNP_FTS426_TYPE = 0x1C,
+ QPNP_ULT_HF_TYPE = 0x22,
+};
+
+enum qpnp_regulator_subtype {
+ QPNP_FTS2_SUBTYPE = 0x08,
+ QPNP_HF_SUBTYPE = 0x08,
+ QPNP_FTS2p5_SUBTYPE = 0x09,
+ QPNP_FTS426_SUBTYPE = 0x0A,
+ QPNP_ULT_HF_SUBTYPE = 0x0D,
+ QPNP_HFS430_SUBTYPE = 0x0A,
+};
+
+enum qpnp_logical_mode {
+ QPNP_LOGICAL_MODE_AUTO,
+ QPNP_LOGICAL_MODE_PWM,
+};
+
+static const struct voltage_range fts2_range0 = {0, 350000, 1275000, 5000};
+static const struct voltage_range fts2_range1 = {0, 700000, 2040000, 10000};
+static const struct voltage_range fts2p5_range0
+ = { 80000, 350000, 1355000, 5000};
+static const struct voltage_range fts2p5_range1
+ = {160000, 700000, 2200000, 10000};
+static const struct voltage_range fts426_range = {0, 320000, 1352000, 4000};
+static const struct voltage_range hfs430_range = {0, 320000, 2040000, 8000};
+static const struct voltage_range ult_hf_range0 = {375000, 375000, 1562500,
+ 12500};
+static const struct voltage_range ult_hf_range1 = {750000, 750000, 1525000,
+ 25000};
+static const struct voltage_range hf_range0 = {375000, 375000, 1562500, 12500};
+static const struct voltage_range hf_range1 = {1550000, 1550000, 3125000,
+ 25000};
+
+#define QPNP_SMPS_REG_TYPE 0x04
+#define QPNP_SMPS_REG_SUBTYPE 0x05
+#define QPNP_SMPS_REG_VOLTAGE_RANGE 0x40
+#define QPNP_SMPS_REG_VOLTAGE_SETPOINT 0x41
+#define QPNP_SMPS_REG_MODE 0x45
+#define QPNP_SMPS_REG_STEP_CTRL 0x61
+#define QPNP_SMPS_REG_UL_LL_CTRL 0x68
+
+/* FTS426/HFS430 voltage control registers */
+#define QPNP_FTS426_HFS430_REG_VOLTAGE_LB 0x40
+#define QPNP_FTS426_HFS430_REG_VOLTAGE_UB 0x41
+#define QPNP_FTS426_HFS430_REG_VOLTAGE_VALID_LB 0x42
+#define QPNP_FTS426_HFS430_REG_VOLTAGE_VALID_UB 0x43
+
+/* HF voltage limit registers */
+#define QPNP_HF_REG_VOLTAGE_ULS 0x69
+#define QPNP_HF_REG_VOLTAGE_LLS 0x6B
+
+/* FTS voltage limit registers */
+#define QPNP_FTS_REG_VOLTAGE_ULS_VALID 0x6A
+#define QPNP_FTS_REG_VOLTAGE_LLS_VALID 0x6C
+
+/* FTS426/HFS430 voltage limit registers */
+#define QPNP_FTS426_HFS430_REG_VOLTAGE_ULS_LB 0x68
+#define QPNP_FTS426_HFS430_REG_VOLTAGE_ULS_UB 0x69
+
+/* Common regulator UL & LL limits control register layout */
+#define QPNP_COMMON_UL_EN_MASK 0x80
+#define QPNP_COMMON_LL_EN_MASK 0x40
+
+#define QPNP_SMPS_MODE_PWM 0x80
+#define QPNP_SMPS_MODE_AUTO 0x40
+#define QPNP_FTS426_HFS430_MODE_PWM 0x07
+#define QPNP_FTS426_HFS430_MODE_AUTO 0x06
+
+#define QPNP_SMPS_STEP_CTRL_STEP_MASK 0x18
+#define QPNP_SMPS_STEP_CTRL_STEP_SHIFT 3
+#define QPNP_SMPS_STEP_CTRL_DELAY_MASK 0x07
+#define QPNP_SMPS_STEP_CTRL_DELAY_SHIFT 0
+#define QPNP_FTS426_HFS430_STEP_CTRL_DELAY_MASK 0x03
+#define QPNP_FTS426_HFS430_STEP_CTRL_DELAY_SHIFT 0
+
+/* Clock rate in kHz of the FTS2 regulator reference clock. */
+#define QPNP_SMPS_CLOCK_RATE 19200
+#define QPNP_FTS426_CLOCK_RATE 4800
+#define QPNP_HFS430_CLOCK_RATE 1600
+
+/* Time to delay in us to ensure that a mode change has completed. */
+#define QPNP_FTS2_MODE_CHANGE_DELAY 50
+
+/* Minimum time in us that it takes to complete a single SPMI write. */
+#define QPNP_SPMI_WRITE_MIN_DELAY 8
+
+/* Minimum voltage stepper delay for each step. */
+#define QPNP_FTS2_STEP_DELAY 8
+#define QPNP_HF_STEP_DELAY 20
+#define QPNP_FTS426_HFS430_STEP_DELAY 2
+
+/* Arbitrarily large max step size used to avoid possible numerical overflow */
+#define SPM_REGULATOR_MAX_STEP_UV 10000000
+
+/*
+ * The ratio QPNP_FTS2_STEP_MARGIN_NUM/QPNP_FTS2_STEP_MARGIN_DEN is use to
+ * adjust the step rate in order to account for oscillator variance.
+ */
+#define QPNP_FTS2_STEP_MARGIN_NUM 4
+#define QPNP_FTS2_STEP_MARGIN_DEN 5
+#define QPNP_FTS426_HFS430_STEP_MARGIN_NUM 10
+#define QPNP_FTS426_HFS430_STEP_MARGIN_DEN 11
+
+/*
+ * Settling delay for FTS2.5
+ * Warm-up=20uS, 0-10% & 90-100% non-linear V-ramp delay = 50uS
+ */
+#define FTS2P5_SETTLING_DELAY_US 70
+
+/* VSET value to decide the range of ULT SMPS */
+#define ULT_SMPS_RANGE_SPLIT 0x60
+
+struct spm_vreg {
+ struct regulator_desc rdesc;
+ struct regulator_dev *rdev;
+ struct platform_device *pdev;
+ struct regmap *regmap;
+ const struct voltage_range *range;
+ int uV;
+ int last_set_uV;
+ unsigned int vlevel;
+ unsigned int last_set_vlevel;
+ u32 max_step_uV;
+ bool online;
+ u16 spmi_base_addr;
+ enum qpnp_logical_mode init_mode;
+ enum qpnp_logical_mode mode;
+ int step_rate;
+ enum qpnp_regulator_uniq_type regulator_type;
+ u32 cpu_num;
+ bool bypass_spm;
+ struct regulator_desc avs_rdesc;
+ struct regulator_dev *avs_rdev;
+ int avs_min_uV;
+ int avs_max_uV;
+ bool avs_enabled;
+ u32 recal_cluster_mask;
+};
+
+static inline bool spm_regulator_using_avs(struct spm_vreg *vreg)
+{
+ return vreg->avs_rdev && !vreg->bypass_spm;
+}
+
+static int spm_regulator_uv_to_vlevel(struct spm_vreg *vreg, int uV)
+{
+ int vlevel;
+
+ if (vreg->regulator_type == QPNP_TYPE_FTS426
+ || vreg->regulator_type == QPNP_TYPE_HFS430)
+ return roundup(uV, vreg->range->step_uV) / 1000;
+
+ vlevel = DIV_ROUND_UP(uV - vreg->range->min_uV, vreg->range->step_uV);
+
+ /* Fix VSET for ULT HF Buck */
+ if (vreg->regulator_type == QPNP_TYPE_ULT_HF
+ && vreg->range == &ult_hf_range1) {
+ vlevel &= 0x1F;
+ vlevel |= ULT_SMPS_RANGE_SPLIT;
+ }
+
+ return vlevel;
+}
+
+static int spm_regulator_vlevel_to_uv(struct spm_vreg *vreg, int vlevel)
+{
+ if (vreg->regulator_type == QPNP_TYPE_FTS426
+ || vreg->regulator_type == QPNP_TYPE_HFS430)
+ return vlevel * 1000;
+ /*
+ * Calculate ULT HF buck VSET based on range:
+ * In case of range 0: VSET is a 7 bit value.
+ * In case of range 1: VSET is a 5 bit value.
+ */
+ if (vreg->regulator_type == QPNP_TYPE_ULT_HF
+ && vreg->range == &ult_hf_range1)
+ vlevel &= ~ULT_SMPS_RANGE_SPLIT;
+
+ return vlevel * vreg->range->step_uV + vreg->range->min_uV;
+}
+
+static unsigned int spm_regulator_vlevel_to_selector(struct spm_vreg *vreg,
+ unsigned int vlevel)
+{
+ /* Fix VSET for ULT HF Buck */
+ if (vreg->regulator_type == QPNP_TYPE_ULT_HF
+ && vreg->range == &ult_hf_range1)
+ vlevel &= ~ULT_SMPS_RANGE_SPLIT;
+
+ if (vreg->regulator_type == QPNP_TYPE_HFS430)
+ vlevel = spm_regulator_vlevel_to_uv(vreg, vlevel)
+ / vreg->range->step_uV;
+
+ return vlevel - (vreg->range->set_point_min_uV - vreg->range->min_uV)
+ / vreg->range->step_uV;
+}
+
+static int qpnp_smps_read_voltage(struct spm_vreg *vreg)
+{
+ int rc;
+ u8 val[2] = {0};
+
+ if (vreg->regulator_type == QPNP_TYPE_FTS426
+ || vreg->regulator_type == QPNP_TYPE_HFS430) {
+ rc = regmap_bulk_read(vreg->regmap,
+ vreg->spmi_base_addr
+ + QPNP_FTS426_HFS430_REG_VOLTAGE_VALID_LB,
+ val, 2);
+ if (rc) {
+ dev_err(&vreg->pdev->dev, "%s: could not read voltage setpoint registers, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ vreg->last_set_vlevel = ((unsigned int)val[1] << 8) | val[0];
+ } else {
+ rc = regmap_bulk_read(vreg->regmap,
+ vreg->spmi_base_addr + QPNP_SMPS_REG_VOLTAGE_SETPOINT,
+ val, 1);
+ if (rc) {
+ dev_err(&vreg->pdev->dev, "%s: could not read voltage setpoint register, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+ vreg->last_set_vlevel = val[0];
+ }
+
+ vreg->last_set_uV = spm_regulator_vlevel_to_uv(vreg,
+ vreg->last_set_vlevel);
+ return rc;
+}
+
+static int qpnp_smps_write_voltage(struct spm_vreg *vreg, unsigned int vlevel)
+{
+ int rc = 0;
+ u8 reg[2];
+
+ /* Set voltage control registers via SPMI. */
+ reg[0] = vlevel & 0xFF;
+ reg[1] = (vlevel >> 8) & 0xFF;
+
+ if (vreg->regulator_type == QPNP_TYPE_FTS426
+ || vreg->regulator_type == QPNP_TYPE_HFS430) {
+ rc = regmap_bulk_write(vreg->regmap,
+ vreg->spmi_base_addr
+ + QPNP_FTS426_HFS430_REG_VOLTAGE_LB,
+ reg, 2);
+ } else {
+ rc = regmap_write(vreg->regmap,
+ vreg->spmi_base_addr + QPNP_SMPS_REG_VOLTAGE_SETPOINT,
+ reg[0]);
+ }
+
+ if (rc)
+ pr_err("%s: regmap_write failed, rc=%d\n",
+ vreg->rdesc.name, rc);
+
+ return rc;
+}
+
+static inline enum qpnp_logical_mode qpnp_regval_to_mode(struct spm_vreg *vreg,
+ u8 regval)
+{
+ if (vreg->regulator_type == QPNP_TYPE_FTS426
+ || vreg->regulator_type == QPNP_TYPE_HFS430)
+ return (regval == QPNP_FTS426_HFS430_MODE_PWM)
+ ? QPNP_LOGICAL_MODE_PWM : QPNP_LOGICAL_MODE_AUTO;
+ else
+ return (regval & QPNP_SMPS_MODE_PWM)
+ ? QPNP_LOGICAL_MODE_PWM : QPNP_LOGICAL_MODE_AUTO;
+}
+
+static inline u8 qpnp_mode_to_regval(struct spm_vreg *vreg,
+ enum qpnp_logical_mode mode)
+{
+ if (vreg->regulator_type == QPNP_TYPE_FTS426
+ || vreg->regulator_type == QPNP_TYPE_HFS430)
+ return (mode == QPNP_LOGICAL_MODE_PWM)
+ ? QPNP_FTS426_HFS430_MODE_PWM
+ : QPNP_FTS426_HFS430_MODE_AUTO;
+ else
+ return (mode == QPNP_LOGICAL_MODE_PWM)
+ ? QPNP_SMPS_MODE_PWM : QPNP_SMPS_MODE_AUTO;
+}
+
+static int qpnp_smps_set_mode(struct spm_vreg *vreg, u8 mode)
+{
+ int rc;
+
+ rc = regmap_write(vreg->regmap,
+ vreg->spmi_base_addr + QPNP_SMPS_REG_MODE,
+ qpnp_mode_to_regval(vreg, mode));
+ if (rc)
+ dev_err(&vreg->pdev->dev,
+ "%s: could not write to mode register, rc=%d\n",
+ __func__, rc);
+
+ return rc;
+}
+
+static int spm_regulator_get_voltage(struct regulator_dev *rdev)
+{
+ struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+ int vlevel, rc;
+
+ if (spm_regulator_using_avs(vreg)) {
+ vlevel = msm_spm_get_vdd(vreg->cpu_num);
+
+ if (vlevel < 0) {
+ pr_debug("%s: msm_spm_get_vdd failed, rc=%d; falling back on SPMI read\n",
+ vreg->rdesc.name, vlevel);
+
+ rc = qpnp_smps_read_voltage(vreg);
+ if (rc) {
+ pr_err("%s: voltage read failed, rc=%d\n",
+ vreg->rdesc.name, rc);
+ return rc;
+ }
+
+ return vreg->last_set_uV;
+ }
+
+ vreg->last_set_vlevel = vlevel;
+ vreg->last_set_uV = spm_regulator_vlevel_to_uv(vreg, vlevel);
+
+ return vreg->last_set_uV;
+ } else {
+ return vreg->uV;
+ }
+};
+
+static int spm_regulator_write_voltage(struct spm_vreg *vreg, int uV)
+{
+ unsigned int vlevel = spm_regulator_uv_to_vlevel(vreg, uV);
+ bool spm_failed = false;
+ int rc = 0;
+ u32 slew_delay;
+
+ if (likely(!vreg->bypass_spm)) {
+ /* Set voltage control register via SPM. */
+ rc = msm_spm_set_vdd(vreg->cpu_num, vlevel);
+ if (rc) {
+ pr_debug("%s: msm_spm_set_vdd failed, rc=%d; falling back on SPMI write\n",
+ vreg->rdesc.name, rc);
+ spm_failed = true;
+ }
+ }
+
+ if (unlikely(vreg->bypass_spm || spm_failed)) {
+ rc = qpnp_smps_write_voltage(vreg, vlevel);
+ if (rc) {
+ pr_err("%s: voltage write failed, rc=%d\n",
+ vreg->rdesc.name, rc);
+ return rc;
+ }
+ }
+
+ if (uV > vreg->last_set_uV) {
+ /* Wait for voltage stepping to complete. */
+ slew_delay = DIV_ROUND_UP(uV - vreg->last_set_uV,
+ vreg->step_rate);
+ if (vreg->regulator_type == QPNP_TYPE_FTS2p5)
+ slew_delay += FTS2P5_SETTLING_DELAY_US;
+ udelay(slew_delay);
+ } else if (vreg->regulator_type == QPNP_TYPE_FTS2p5) {
+ /* add the ramp-down delay */
+ slew_delay = DIV_ROUND_UP(vreg->last_set_uV - uV,
+ vreg->step_rate) + FTS2P5_SETTLING_DELAY_US;
+ udelay(slew_delay);
+ }
+
+ vreg->last_set_uV = uV;
+ vreg->last_set_vlevel = vlevel;
+
+ return rc;
+}
+
+static int spm_regulator_recalibrate(struct spm_vreg *vreg)
+{
+ struct arm_smccc_res res;
+
+ if (!vreg->recal_cluster_mask)
+ return 0;
+
+ arm_smccc_smc(0xC4000020, vreg->recal_cluster_mask,
+ 2, 0, 0, 0, 0, 0, &res);
+ if (res.a0)
+ pr_err("%s: recalibration failed, rc=%ld\n", vreg->rdesc.name,
+ res.a0);
+
+ return res.a0;
+}
+
+static int _spm_regulator_set_voltage(struct regulator_dev *rdev)
+{
+ struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+ bool pwm_required;
+ int rc = 0;
+ int uV;
+
+ rc = spm_regulator_get_voltage(rdev);
+ if (rc < 0)
+ return rc;
+
+ if (vreg->vlevel == vreg->last_set_vlevel)
+ return 0;
+
+ pwm_required = (vreg->regulator_type == QPNP_TYPE_FTS2)
+ && (vreg->init_mode != QPNP_LOGICAL_MODE_PWM)
+ && vreg->uV > vreg->last_set_uV;
+
+ if (pwm_required) {
+ /* Switch to PWM mode so that voltage ramping is fast. */
+ rc = qpnp_smps_set_mode(vreg, QPNP_LOGICAL_MODE_PWM);
+ if (rc)
+ return rc;
+ }
+
+ do {
+ uV = vreg->uV > vreg->last_set_uV
+ ? min(vreg->uV, vreg->last_set_uV + (int)vreg->max_step_uV)
+ : max(vreg->uV, vreg->last_set_uV - (int)vreg->max_step_uV);
+
+ rc = spm_regulator_write_voltage(vreg, uV);
+ if (rc)
+ return rc;
+ } while (vreg->last_set_uV != vreg->uV);
+
+ if (pwm_required) {
+ /* Wait for mode transition to complete. */
+ udelay(QPNP_FTS2_MODE_CHANGE_DELAY - QPNP_SPMI_WRITE_MIN_DELAY);
+ /* Switch to AUTO mode so that power consumption is lowered. */
+ rc = qpnp_smps_set_mode(vreg, QPNP_LOGICAL_MODE_AUTO);
+ if (rc)
+ return rc;
+ }
+
+ rc = spm_regulator_recalibrate(vreg);
+
+ return rc;
+}
+
+static int spm_regulator_set_voltage(struct regulator_dev *rdev, int min_uV,
+ int max_uV, unsigned int *selector)
+{
+ struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+ const struct voltage_range *range = vreg->range;
+ int uV = min_uV;
+ unsigned int vlevel;
+
+ if (uV < range->set_point_min_uV && max_uV >= range->set_point_min_uV)
+ uV = range->set_point_min_uV;
+
+ if (uV < range->set_point_min_uV || uV > range->max_uV) {
+ pr_err("%s: request v=[%d, %d] is outside possible v=[%d, %d]\n",
+ vreg->rdesc.name, min_uV, max_uV,
+ range->set_point_min_uV, range->max_uV);
+ return -EINVAL;
+ }
+
+ vlevel = spm_regulator_uv_to_vlevel(vreg, uV);
+ uV = spm_regulator_vlevel_to_uv(vreg, vlevel);
+
+ if (uV > max_uV) {
+ pr_err("%s: request v=[%d, %d] cannot be met by any set point\n",
+ vreg->rdesc.name, min_uV, max_uV);
+ return -EINVAL;
+ }
+
+ *selector = spm_regulator_vlevel_to_selector(vreg, vlevel);
+ vreg->vlevel = vlevel;
+ vreg->uV = uV;
+
+ if (!vreg->online)
+ return 0;
+
+ return _spm_regulator_set_voltage(rdev);
+}
+
+static int spm_regulator_list_voltage(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+
+ if (selector >= vreg->rdesc.n_voltages)
+ return 0;
+
+ return selector * vreg->range->step_uV + vreg->range->set_point_min_uV;
+}
+
+static int spm_regulator_enable(struct regulator_dev *rdev)
+{
+ struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+ int rc;
+
+ rc = _spm_regulator_set_voltage(rdev);
+
+ if (!rc)
+ vreg->online = true;
+
+ return rc;
+}
+
+static int spm_regulator_disable(struct regulator_dev *rdev)
+{
+ struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+
+ vreg->online = false;
+
+ return 0;
+}
+
+static int spm_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+
+ return vreg->online;
+}
+
+static unsigned int spm_regulator_get_mode(struct regulator_dev *rdev)
+{
+ struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+
+ return vreg->mode == QPNP_LOGICAL_MODE_PWM
+ ? REGULATOR_MODE_NORMAL : REGULATOR_MODE_IDLE;
+}
+
+static int spm_regulator_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+
+ /*
+ * Map REGULATOR_MODE_NORMAL to PWM mode and REGULATOR_MODE_IDLE to
+ * init_mode. This ensures that the regulator always stays in PWM mode
+ * in the case that qcom,mode has been specified as "pwm" in device
+ * tree.
+ */
+ vreg->mode = (mode == REGULATOR_MODE_NORMAL) ? QPNP_LOGICAL_MODE_PWM
+ : vreg->init_mode;
+
+ return qpnp_smps_set_mode(vreg, vreg->mode);
+}
+
+static struct regulator_ops spm_regulator_ops = {
+ .get_voltage = spm_regulator_get_voltage,
+ .set_voltage = spm_regulator_set_voltage,
+ .list_voltage = spm_regulator_list_voltage,
+ .get_mode = spm_regulator_get_mode,
+ .set_mode = spm_regulator_set_mode,
+ .enable = spm_regulator_enable,
+ .disable = spm_regulator_disable,
+ .is_enabled = spm_regulator_is_enabled,
+};
+
+static int spm_regulator_avs_set_voltage(struct regulator_dev *rdev, int min_uV,
+ int max_uV, unsigned int *selector)
+{
+ struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+ const struct voltage_range *range = vreg->range;
+ unsigned int vlevel_min, vlevel_max;
+ int uV, avs_min_uV, avs_max_uV, rc;
+
+ uV = min_uV;
+
+ if (uV < range->set_point_min_uV && max_uV >= range->set_point_min_uV)
+ uV = range->set_point_min_uV;
+
+ if (uV < range->set_point_min_uV || uV > range->max_uV) {
+ pr_err("%s: request v=[%d, %d] is outside possible v=[%d, %d]\n",
+ vreg->avs_rdesc.name, min_uV, max_uV,
+ range->set_point_min_uV, range->max_uV);
+ return -EINVAL;
+ }
+
+ vlevel_min = spm_regulator_uv_to_vlevel(vreg, uV);
+ avs_min_uV = spm_regulator_vlevel_to_uv(vreg, vlevel_min);
+
+ if (avs_min_uV > max_uV) {
+ pr_err("%s: request v=[%d, %d] cannot be met by any set point\n",
+ vreg->avs_rdesc.name, min_uV, max_uV);
+ return -EINVAL;
+ }
+
+ uV = max_uV;
+
+ if (uV > range->max_uV && min_uV <= range->max_uV)
+ uV = range->max_uV;
+
+ if (uV < range->set_point_min_uV || uV > range->max_uV) {
+ pr_err("%s: request v=[%d, %d] is outside possible v=[%d, %d]\n",
+ vreg->avs_rdesc.name, min_uV, max_uV,
+ range->set_point_min_uV, range->max_uV);
+ return -EINVAL;
+ }
+
+ vlevel_max = spm_regulator_uv_to_vlevel(vreg, uV);
+ avs_max_uV = spm_regulator_vlevel_to_uv(vreg, vlevel_max);
+
+ if (avs_max_uV < min_uV) {
+ pr_err("%s: request v=[%d, %d] cannot be met by any set point\n",
+ vreg->avs_rdesc.name, min_uV, max_uV);
+ return -EINVAL;
+ }
+
+ if (likely(!vreg->bypass_spm)) {
+ rc = msm_spm_avs_set_limit(vreg->cpu_num, vlevel_min,
+ vlevel_max);
+ if (rc) {
+ pr_err("%s: AVS limit setting failed, rc=%d\n",
+ vreg->avs_rdesc.name, rc);
+ return rc;
+ }
+ }
+
+ *selector = spm_regulator_vlevel_to_selector(vreg, vlevel_min);
+ vreg->avs_min_uV = avs_min_uV;
+ vreg->avs_max_uV = avs_max_uV;
+
+ return 0;
+}
+
+static int spm_regulator_avs_get_voltage(struct regulator_dev *rdev)
+{
+ struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+
+ return vreg->avs_min_uV;
+}
+
+static int spm_regulator_avs_enable(struct regulator_dev *rdev)
+{
+ struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+ int rc;
+
+ if (likely(!vreg->bypass_spm)) {
+ rc = msm_spm_avs_enable(vreg->cpu_num);
+ if (rc) {
+ pr_err("%s: AVS enable failed, rc=%d\n",
+ vreg->avs_rdesc.name, rc);
+ return rc;
+ }
+ }
+
+ vreg->avs_enabled = true;
+
+ return 0;
+}
+
+static int spm_regulator_avs_disable(struct regulator_dev *rdev)
+{
+ struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+ int rc;
+
+ if (likely(!vreg->bypass_spm)) {
+ rc = msm_spm_avs_disable(vreg->cpu_num);
+ if (rc) {
+ pr_err("%s: AVS disable failed, rc=%d\n",
+ vreg->avs_rdesc.name, rc);
+ return rc;
+ }
+ }
+
+ vreg->avs_enabled = false;
+
+ return 0;
+}
+
+static int spm_regulator_avs_is_enabled(struct regulator_dev *rdev)
+{
+ struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+
+ return vreg->avs_enabled;
+}
+
+static struct regulator_ops spm_regulator_avs_ops = {
+ .get_voltage = spm_regulator_avs_get_voltage,
+ .set_voltage = spm_regulator_avs_set_voltage,
+ .list_voltage = spm_regulator_list_voltage,
+ .enable = spm_regulator_avs_enable,
+ .disable = spm_regulator_avs_disable,
+ .is_enabled = spm_regulator_avs_is_enabled,
+};
+
+static int qpnp_smps_check_type(struct spm_vreg *vreg)
+{
+ int rc;
+ u8 type[2];
+
+ rc = regmap_bulk_read(vreg->regmap,
+ vreg->spmi_base_addr + QPNP_SMPS_REG_TYPE,
+ type,
+ 2);
+ if (rc) {
+ dev_err(&vreg->pdev->dev,
+ "%s: could not read type register, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ if (type[0] == QPNP_FTS2_TYPE && type[1] == QPNP_FTS2_SUBTYPE) {
+ vreg->regulator_type = QPNP_TYPE_FTS2;
+ } else if (type[0] == QPNP_FTS2p5_TYPE
+ && type[1] == QPNP_FTS2p5_SUBTYPE) {
+ vreg->regulator_type = QPNP_TYPE_FTS2p5;
+ } else if (type[0] == QPNP_FTS426_TYPE
+ && type[1] == QPNP_FTS426_SUBTYPE) {
+ vreg->regulator_type = QPNP_TYPE_FTS426;
+ } else if (type[0] == QPNP_HF_TYPE
+ && type[1] == QPNP_HFS430_SUBTYPE) {
+ vreg->regulator_type = QPNP_TYPE_HFS430;
+ } else if (type[0] == QPNP_ULT_HF_TYPE
+ && type[1] == QPNP_ULT_HF_SUBTYPE) {
+ vreg->regulator_type = QPNP_TYPE_ULT_HF;
+ } else if (type[0] == QPNP_HF_TYPE
+ && type[1] == QPNP_HF_SUBTYPE) {
+ vreg->regulator_type = QPNP_TYPE_HF;
+ } else {
+ dev_err(&vreg->pdev->dev,
+ "%s: invalid type=0x%02X, subtype=0x%02X register pair\n",
+ __func__, type[0], type[1]);
+ return -ENODEV;
+ }
+
+ return rc;
+}
+
+static int qpnp_smps_init_range(struct spm_vreg *vreg,
+ const struct voltage_range *range0, const struct voltage_range *range1)
+{
+ int rc;
+ u8 reg = 0;
+ uint val;
+
+ rc = regmap_read(vreg->regmap,
+ vreg->spmi_base_addr + QPNP_SMPS_REG_VOLTAGE_RANGE,
+ &val);
+ if (rc) {
+ dev_err(&vreg->pdev->dev,
+ "%s: could not read voltage range register, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+ reg = (u8)val;
+
+ if (reg == 0x00) {
+ vreg->range = range0;
+ } else if (reg == 0x01) {
+ vreg->range = range1;
+ } else {
+ dev_err(&vreg->pdev->dev, "%s: voltage range=%d is invalid\n",
+ __func__, reg);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static int qpnp_ult_hf_init_range(struct spm_vreg *vreg)
+{
+ int rc;
+ u8 reg = 0;
+ uint val;
+
+ rc = regmap_read(vreg->regmap,
+ vreg->spmi_base_addr + QPNP_SMPS_REG_VOLTAGE_SETPOINT,
+ &val);
+ if (rc) {
+ dev_err(&vreg->pdev->dev,
+ "%s: could not read voltage range register, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+ reg = (u8)val;
+
+ vreg->range = (reg < ULT_SMPS_RANGE_SPLIT) ? &ult_hf_range0 :
+ &ult_hf_range1;
+ return rc;
+}
+
+static int qpnp_smps_init_voltage(struct spm_vreg *vreg)
+{
+ int rc;
+
+ rc = qpnp_smps_read_voltage(vreg);
+ if (rc) {
+ pr_err("%s: voltage read failed, rc=%d\n", vreg->rdesc.name,
+ rc);
+ return rc;
+ }
+
+ vreg->vlevel = vreg->last_set_vlevel;
+ vreg->uV = vreg->last_set_uV;
+
+ /* Initialize SAW voltage control register */
+ if (!vreg->bypass_spm) {
+ rc = msm_spm_set_vdd(vreg->cpu_num, vreg->vlevel);
+ if (rc)
+ pr_err("%s: msm_spm_set_vdd failed, rc=%d\n",
+ vreg->rdesc.name, rc);
+ }
+
+ return 0;
+}
+
+static int qpnp_smps_init_mode(struct spm_vreg *vreg)
+{
+ const char *mode_name;
+ int rc;
+ uint val;
+
+ rc = of_property_read_string(vreg->pdev->dev.of_node, "qcom,mode",
+ &mode_name);
+ if (!rc) {
+ if (strcmp("pwm", mode_name) == 0) {
+ vreg->init_mode = QPNP_LOGICAL_MODE_PWM;
+ } else if ((strcmp("auto", mode_name) == 0) &&
+ (vreg->regulator_type != QPNP_TYPE_ULT_HF)) {
+ vreg->init_mode = QPNP_LOGICAL_MODE_AUTO;
+ } else {
+ dev_err(&vreg->pdev->dev,
+ "%s: unknown regulator mode: %s\n",
+ __func__, mode_name);
+ return -EINVAL;
+ }
+
+ rc = qpnp_smps_set_mode(vreg, vreg->init_mode);
+ if (rc)
+ return rc;
+ } else {
+ rc = regmap_read(vreg->regmap,
+ vreg->spmi_base_addr + QPNP_SMPS_REG_MODE,
+ &val);
+ if (rc)
+ dev_err(&vreg->pdev->dev,
+ "%s: could not read mode register, rc=%d\n",
+ __func__, rc);
+ vreg->init_mode = qpnp_regval_to_mode(vreg, val);
+ }
+
+ vreg->mode = vreg->init_mode;
+
+ return rc;
+}
+
+static int qpnp_smps_init_step_rate(struct spm_vreg *vreg)
+{
+ int rc;
+ u8 reg = 0;
+ int step = 0, delay;
+ uint val;
+
+ rc = regmap_read(vreg->regmap,
+ vreg->spmi_base_addr + QPNP_SMPS_REG_STEP_CTRL, &val);
+ if (rc) {
+ dev_err(&vreg->pdev->dev,
+ "%s: could not read stepping control register, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+ reg = (u8)val;
+
+ /* ULT and FTS426 bucks do not support steps */
+ if (vreg->regulator_type != QPNP_TYPE_ULT_HF && vreg->regulator_type !=
+ QPNP_TYPE_FTS426 && vreg->regulator_type != QPNP_TYPE_HFS430)
+ step = (reg & QPNP_SMPS_STEP_CTRL_STEP_MASK)
+ >> QPNP_SMPS_STEP_CTRL_STEP_SHIFT;
+
+ if (vreg->regulator_type == QPNP_TYPE_FTS426
+ || vreg->regulator_type == QPNP_TYPE_HFS430) {
+ delay = (reg & QPNP_FTS426_HFS430_STEP_CTRL_DELAY_MASK)
+ >> QPNP_FTS426_HFS430_STEP_CTRL_DELAY_SHIFT;
+
+ /* step_rate has units of uV/us. */
+ vreg->step_rate = ((vreg->regulator_type == QPNP_TYPE_FTS426)
+ ? QPNP_FTS426_CLOCK_RATE
+ : QPNP_HFS430_CLOCK_RATE)
+ * vreg->range->step_uV;
+ } else {
+ delay = (reg & QPNP_SMPS_STEP_CTRL_DELAY_MASK)
+ >> QPNP_SMPS_STEP_CTRL_DELAY_SHIFT;
+
+ /* step_rate has units of uV/us. */
+ vreg->step_rate = QPNP_SMPS_CLOCK_RATE * vreg->range->step_uV
+ * (1 << step);
+ }
+
+ if ((vreg->regulator_type == QPNP_TYPE_ULT_HF)
+ || (vreg->regulator_type == QPNP_TYPE_HF))
+ vreg->step_rate /= 1000 * (QPNP_HF_STEP_DELAY << delay);
+ else if (vreg->regulator_type == QPNP_TYPE_FTS426
+ || vreg->regulator_type == QPNP_TYPE_HFS430)
+ vreg->step_rate /= 1000 * (QPNP_FTS426_HFS430_STEP_DELAY
+ << delay);
+ else
+ vreg->step_rate /= 1000 * (QPNP_FTS2_STEP_DELAY << delay);
+
+ if (vreg->regulator_type == QPNP_TYPE_FTS426
+ || vreg->regulator_type == QPNP_TYPE_HFS430)
+ vreg->step_rate = vreg->step_rate
+ * QPNP_FTS426_HFS430_STEP_MARGIN_NUM
+ / QPNP_FTS426_HFS430_STEP_MARGIN_DEN;
+ else
+ vreg->step_rate = vreg->step_rate * QPNP_FTS2_STEP_MARGIN_NUM
+ / QPNP_FTS2_STEP_MARGIN_DEN;
+
+ /* Ensure that the stepping rate is greater than 0. */
+ vreg->step_rate = max(vreg->step_rate, 1);
+
+ return rc;
+}
+
+static int qpnp_smps_check_constraints(struct spm_vreg *vreg,
+ struct regulator_init_data *init_data)
+{
+ int rc = 0, limit_min_uV, limit_max_uV;
+ u16 ul_reg, ll_reg;
+ u8 reg[2];
+
+ limit_min_uV = 0;
+ limit_max_uV = INT_MAX;
+
+ ul_reg = QPNP_FTS_REG_VOLTAGE_ULS_VALID;
+ ll_reg = QPNP_FTS_REG_VOLTAGE_LLS_VALID;
+
+ switch (vreg->regulator_type) {
+ case QPNP_TYPE_HF:
+ ul_reg = QPNP_HF_REG_VOLTAGE_ULS;
+ ll_reg = QPNP_HF_REG_VOLTAGE_LLS;
+ case QPNP_TYPE_FTS2:
+ case QPNP_TYPE_FTS2p5:
+ rc = regmap_bulk_read(vreg->regmap, vreg->spmi_base_addr
+ + QPNP_SMPS_REG_UL_LL_CTRL, reg, 1);
+ if (rc) {
+ dev_err(&vreg->pdev->dev, "%s: UL_LL register read failed, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ if (reg[0] & QPNP_COMMON_UL_EN_MASK) {
+ rc = regmap_bulk_read(vreg->regmap, vreg->spmi_base_addr
+ + ul_reg, ®[1], 1);
+ if (rc) {
+ dev_err(&vreg->pdev->dev, "%s: ULS register read failed, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ limit_max_uV = spm_regulator_vlevel_to_uv(vreg, reg[1]);
+ }
+
+ if (reg[0] & QPNP_COMMON_LL_EN_MASK) {
+ rc = regmap_bulk_read(vreg->regmap, vreg->spmi_base_addr
+ + ll_reg, ®[1], 1);
+ if (rc) {
+ dev_err(&vreg->pdev->dev, "%s: LLS register read failed, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ limit_min_uV = spm_regulator_vlevel_to_uv(vreg, reg[1]);
+ }
+
+ break;
+ case QPNP_TYPE_FTS426:
+ case QPNP_TYPE_HFS430:
+ rc = regmap_bulk_read(vreg->regmap, vreg->spmi_base_addr
+ + QPNP_FTS426_HFS430_REG_VOLTAGE_ULS_LB,
+ reg, 2);
+ if (rc) {
+ dev_err(&vreg->pdev->dev, "%s: could not read voltage limit registers, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ limit_max_uV = spm_regulator_vlevel_to_uv(vreg,
+ ((unsigned int)reg[1] << 8) | reg[0]);
+ break;
+ case QPNP_TYPE_ULT_HF:
+ /* no HW voltage limit configuration */
+ break;
+ }
+
+ if (init_data->constraints.min_uV < limit_min_uV
+ || init_data->constraints.max_uV > limit_max_uV) {
+ dev_err(&vreg->pdev->dev, "regulator min/max(%d/%d) constraints do not fit within HW configured min/max(%d/%d) constraints\n",
+ init_data->constraints.min_uV,
+ init_data->constraints.max_uV, limit_min_uV,
+ limit_max_uV);
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static bool spm_regulator_using_range0(struct spm_vreg *vreg)
+{
+ return vreg->range == &fts2_range0 || vreg->range == &fts2p5_range0
+ || vreg->range == &ult_hf_range0 || vreg->range == &hf_range0
+ || vreg->range == &fts426_range;
+}
+
+/* Register a regulator to enable/disable AVS and set AVS min/max limits. */
+static int spm_regulator_avs_register(struct spm_vreg *vreg,
+ struct device *dev, struct device_node *node)
+{
+ struct regulator_config reg_config = {};
+ struct device_node *avs_node = NULL;
+ struct device_node *child_node;
+ struct regulator_init_data *init_data;
+ int rc;
+
+ /*
+ * Find the first available child node (if any). It corresponds to an
+ * AVS limits regulator.
+ */
+ for_each_available_child_of_node(node, child_node) {
+ avs_node = child_node;
+ break;
+ }
+
+ if (!avs_node)
+ return 0;
+
+ init_data = of_get_regulator_init_data(dev, avs_node, &vreg->avs_rdesc);
+ if (!init_data) {
+ dev_err(dev, "%s: unable to allocate memory\n", __func__);
+ return -ENOMEM;
+ }
+ init_data->constraints.input_uV = init_data->constraints.max_uV;
+ init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_STATUS
+ | REGULATOR_CHANGE_VOLTAGE;
+
+ if (!init_data->constraints.name) {
+ dev_err(dev, "%s: AVS node is missing regulator name\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ vreg->avs_rdesc.name = init_data->constraints.name;
+ vreg->avs_rdesc.type = REGULATOR_VOLTAGE;
+ vreg->avs_rdesc.owner = THIS_MODULE;
+ vreg->avs_rdesc.ops = &spm_regulator_avs_ops;
+ vreg->avs_rdesc.n_voltages
+ = (vreg->range->max_uV - vreg->range->set_point_min_uV)
+ / vreg->range->step_uV + 1;
+
+ reg_config.dev = dev;
+ reg_config.init_data = init_data;
+ reg_config.driver_data = vreg;
+ reg_config.of_node = avs_node;
+
+ vreg->avs_rdev = regulator_register(&vreg->avs_rdesc, ®_config);
+ if (IS_ERR(vreg->avs_rdev)) {
+ rc = PTR_ERR(vreg->avs_rdev);
+ dev_err(dev, "%s: AVS regulator_register failed, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ if (vreg->bypass_spm)
+ pr_debug("%s: SPM bypassed so AVS regulator calls are no-ops\n",
+ vreg->avs_rdesc.name);
+
+ return 0;
+}
+
+static int spm_regulator_probe(struct platform_device *pdev)
+{
+ struct regulator_config reg_config = {};
+ struct device_node *node = pdev->dev.of_node;
+ struct regulator_init_data *init_data;
+ struct spm_vreg *vreg;
+ unsigned int base;
+ bool bypass_spm;
+ int rc;
+
+ if (!node) {
+ dev_err(&pdev->dev, "%s: device node missing\n", __func__);
+ return -ENODEV;
+ }
+
+ bypass_spm = of_property_read_bool(node, "qcom,bypass-spm");
+ if (!bypass_spm) {
+ rc = msm_spm_probe_done();
+ if (rc) {
+ if (rc != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "%s: spm unavailable, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+ }
+
+ vreg = devm_kzalloc(&pdev->dev, sizeof(*vreg), GFP_KERNEL);
+ if (!vreg)
+ return -ENOMEM;
+
+ vreg->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!vreg->regmap) {
+ dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+ return -EINVAL;
+ }
+ vreg->pdev = pdev;
+ vreg->bypass_spm = bypass_spm;
+
+ rc = of_property_read_u32(pdev->dev.of_node, "reg", &base);
+ if (rc < 0) {
+ dev_err(&pdev->dev,
+ "Couldn't find reg in node = %s rc = %d\n",
+ pdev->dev.of_node->full_name, rc);
+ return rc;
+ }
+ vreg->spmi_base_addr = base;
+
+ rc = qpnp_smps_check_type(vreg);
+ if (rc)
+ return rc;
+
+ /* Specify CPU 0 as default in order to handle shared regulator case. */
+ vreg->cpu_num = 0;
+ of_property_read_u32(vreg->pdev->dev.of_node, "qcom,cpu-num",
+ &vreg->cpu_num);
+
+ of_property_read_u32(vreg->pdev->dev.of_node, "qcom,recal-mask",
+ &vreg->recal_cluster_mask);
+
+ /*
+ * The regulator must be initialized to range 0 or range 1 during
+ * PMIC power on sequence. Once it is set, it cannot be changed
+ * dynamically.
+ */
+ if (vreg->regulator_type == QPNP_TYPE_FTS2)
+ rc = qpnp_smps_init_range(vreg, &fts2_range0, &fts2_range1);
+ else if (vreg->regulator_type == QPNP_TYPE_FTS2p5)
+ rc = qpnp_smps_init_range(vreg, &fts2p5_range0, &fts2p5_range1);
+ else if (vreg->regulator_type == QPNP_TYPE_FTS426)
+ vreg->range = &fts426_range;
+ else if (vreg->regulator_type == QPNP_TYPE_HFS430)
+ vreg->range = &hfs430_range;
+ else if (vreg->regulator_type == QPNP_TYPE_HF)
+ rc = qpnp_smps_init_range(vreg, &hf_range0, &hf_range1);
+ else if (vreg->regulator_type == QPNP_TYPE_ULT_HF)
+ rc = qpnp_ult_hf_init_range(vreg);
+ if (rc)
+ return rc;
+
+ rc = qpnp_smps_init_voltage(vreg);
+ if (rc)
+ return rc;
+
+ rc = qpnp_smps_init_mode(vreg);
+ if (rc)
+ return rc;
+
+ rc = qpnp_smps_init_step_rate(vreg);
+ if (rc)
+ return rc;
+
+ init_data = of_get_regulator_init_data(&pdev->dev, node, &vreg->rdesc);
+ if (!init_data) {
+ dev_err(&pdev->dev, "%s: unable to allocate memory\n",
+ __func__);
+ return -ENOMEM;
+ }
+ init_data->constraints.input_uV = init_data->constraints.max_uV;
+ init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_STATUS
+ | REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_MODE;
+ init_data->constraints.valid_modes_mask
+ = REGULATOR_MODE_NORMAL | REGULATOR_MODE_IDLE;
+
+ if (!init_data->constraints.name) {
+ dev_err(&pdev->dev, "%s: node is missing regulator name\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ rc = qpnp_smps_check_constraints(vreg, init_data);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: regulator constraints check failed, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ vreg->rdesc.name = init_data->constraints.name;
+ vreg->rdesc.type = REGULATOR_VOLTAGE;
+ vreg->rdesc.owner = THIS_MODULE;
+ vreg->rdesc.ops = &spm_regulator_ops;
+ vreg->rdesc.n_voltages
+ = (vreg->range->max_uV - vreg->range->set_point_min_uV)
+ / vreg->range->step_uV + 1;
+
+ vreg->max_step_uV = SPM_REGULATOR_MAX_STEP_UV;
+ of_property_read_u32(vreg->pdev->dev.of_node,
+ "qcom,max-voltage-step", &vreg->max_step_uV);
+
+ if (vreg->max_step_uV > SPM_REGULATOR_MAX_STEP_UV)
+ vreg->max_step_uV = SPM_REGULATOR_MAX_STEP_UV;
+
+ vreg->max_step_uV = rounddown(vreg->max_step_uV, vreg->range->step_uV);
+ pr_debug("%s: max single voltage step size=%u uV\n",
+ vreg->rdesc.name, vreg->max_step_uV);
+
+ reg_config.dev = &pdev->dev;
+ reg_config.init_data = init_data;
+ reg_config.driver_data = vreg;
+ reg_config.of_node = node;
+ vreg->rdev = regulator_register(&vreg->rdesc, ®_config);
+
+ if (IS_ERR(vreg->rdev)) {
+ rc = PTR_ERR(vreg->rdev);
+ dev_err(&pdev->dev, "%s: regulator_register failed, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ rc = spm_regulator_avs_register(vreg, &pdev->dev, node);
+ if (rc) {
+ regulator_unregister(vreg->rdev);
+ return rc;
+ }
+
+ dev_set_drvdata(&pdev->dev, vreg);
+
+ pr_info("name=%s, range=%s, voltage=%d uV, mode=%s, step rate=%d uV/us\n",
+ vreg->rdesc.name,
+ spm_regulator_using_range0(vreg) ? "LV" : "MV",
+ vreg->uV,
+ vreg->init_mode == QPNP_LOGICAL_MODE_PWM ? "PWM" :
+ (vreg->init_mode == QPNP_LOGICAL_MODE_AUTO ? "AUTO" : "PFM"),
+ vreg->step_rate);
+
+ return rc;
+}
+
+static int spm_regulator_remove(struct platform_device *pdev)
+{
+ struct spm_vreg *vreg = dev_get_drvdata(&pdev->dev);
+
+ if (vreg->avs_rdev)
+ regulator_unregister(vreg->avs_rdev);
+ regulator_unregister(vreg->rdev);
+
+ return 0;
+}
+
+static const struct of_device_id spm_regulator_match_table[] = {
+ { .compatible = SPM_REGULATOR_DRIVER_NAME, },
+ {}
+};
+
+static const struct platform_device_id spm_regulator_id[] = {
+ { SPM_REGULATOR_DRIVER_NAME, 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(spmi, spm_regulator_id);
+
+static struct platform_driver spm_regulator_driver = {
+ .driver = {
+ .name = SPM_REGULATOR_DRIVER_NAME,
+ .of_match_table = spm_regulator_match_table,
+ },
+ .probe = spm_regulator_probe,
+ .remove = spm_regulator_remove,
+ .id_table = spm_regulator_id,
+};
+
+/**
+ * spm_regulator_init() - register spmi driver for spm-regulator
+ *
+ * This initialization function should be called in systems in which driver
+ * registration ordering must be controlled precisely.
+ *
+ * Returns 0 on success or errno on failure.
+ */
+int __init spm_regulator_init(void)
+{
+ static bool has_registered;
+
+ if (has_registered)
+ return 0;
+
+ has_registered = true;
+
+ return platform_driver_register(&spm_regulator_driver);
+}
+EXPORT_SYMBOL(spm_regulator_init);
+
+static void __exit spm_regulator_exit(void)
+{
+ platform_driver_unregister(&spm_regulator_driver);
+}
+
+arch_initcall(spm_regulator_init);
+module_exit(spm_regulator_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("SPM regulator driver");
+MODULE_ALIAS("platform:spm-regulator");
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 0d74602e..9efd8af 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -343,6 +343,15 @@
Client driver for the WCNSS_CTRL SMD channel, used to download nv
firmware to a newly booted WCNSS chip.
+config MSM_PIL_MSS_QDSP6V5
+ tristate "MSS QDSP6v5 (Hexagon) Boot Support"
+ depends on MSM_PIL && MSM_SUBSYSTEM_RESTART
+ help
+ Support for booting and shutting down QDSP6v5 (Hexagon) processors
+ in modem subsystems. If you would like to make or receive phone
+ calls then say Y here.
+ If unsure, say N.
+
config SETUP_SSR_NOTIF_TIMEOUTS
bool "Set timeouts on SSR sysmon notifications and notifier callbacks"
help
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index f5b2b90..62a34a5 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -46,6 +46,7 @@
obj-$(CONFIG_MSM_SERVICE_LOCATOR) += service-locator.o
obj-$(CONFIG_MSM_SYSMON_GLINK_COMM) += sysmon-glink.o
obj-$(CONFIG_MSM_SYSMON_QMI_COMM) += sysmon-qmi.o
+obj-$(CONFIG_MSM_PIL_MSS_QDSP6V5) += pil-q6v5.o pil-msa.o pil-q6v5-mss.o
obj-$(CONFIG_MSM_PIL_SSR_GENERIC) += subsys-pil-tz.o
obj-$(CONFIG_MEM_SHARE_QMI_SERVICE) += memshare/
obj-$(CONFIG_MSM_PIL) += peripheral-loader.o
diff --git a/drivers/soc/qcom/cx_ipeak.c b/drivers/soc/qcom/cx_ipeak.c
index 543885b..6693f31 100644
--- a/drivers/soc/qcom/cx_ipeak.c
+++ b/drivers/soc/qcom/cx_ipeak.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*/
#include <linux/module.h>
@@ -12,6 +12,7 @@
#include <linux/of_device.h>
#include <linux/slab.h>
#include <linux/err.h>
+#include <linux/interrupt.h>
#include <soc/qcom/cx_ipeak.h>
@@ -29,9 +30,13 @@
#define CXIP_CLIENT_OFFSET 0x1000
#define CXIP_CLIENT10_OFFSET 0x3000
+#define CXIP_VICTIM_OFFSET 0xB000
#define CXIP_POLL_TIMEOUT_US (50 * 1000)
+#define CXIP_VICTIMS 3
+#define VICTIM_ENTRIES 3
+
struct cx_ipeak_client;
struct cx_ipeak_core_ops {
@@ -43,14 +48,27 @@
spinlock_t vote_lock;
void __iomem *tcsr_vptr;
struct cx_ipeak_core_ops *core_ops;
+ u32 victims_count;
+ u32 victims_reg_count;
+ u32 danger_intr_num;
+ u32 safe_intr_num;
} device_ipeak;
struct cx_ipeak_client {
int vote_count;
unsigned int offset;
+ int client_id;
struct cx_ipeak_device *dev;
};
+static struct cx_ipeak_victims {
+ int client_id;
+ int victim_id;
+ u32 freq_limit;
+ void *data;
+ cx_ipeak_victim_fn victim_cb;
+} victims_ipeak[CXIP_VICTIMS];
+
/**
* cx_ipeak_register() - allocate client structure and fill device private and
* offset details.
@@ -84,6 +102,9 @@
if (device_ipeak.core_ops)
client = device_ipeak.core_ops->register_client
(cx_spec.args[0]);
+
+ client->client_id = cx_spec.args[0];
+
return client;
}
EXPORT_SYMBOL(cx_ipeak_register);
@@ -145,7 +166,57 @@
return client;
}
-/*
+/**
+ * cx_ipeak_victim_register - victim registration API to handle
+ * the cx ipeak hw interrupts (danger/safe) to throttle freq.
+ * @client: cx ipeak client
+ * @victim_cb: callback function of victim
+ * @data: data to be passed to victim while handling irq
+ */
+int cx_ipeak_victim_register(struct cx_ipeak_client *client,
+ cx_ipeak_victim_fn victim_cb, void *data)
+{
+ int i = 0;
+
+ if (!victim_cb)
+ return -EINVAL;
+
+ while (i < device_ipeak.victims_count) {
+ if (client->client_id == victims_ipeak[i].client_id) {
+ victims_ipeak[i].victim_cb = victim_cb;
+ victims_ipeak[i].data = data;
+ device_ipeak.victims_reg_count++;
+ break;
+ }
+ i++;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(cx_ipeak_victim_register);
+
+/**
+ * cx_ipeak_victim_unregister - unregister victim client from
+ * cx_ipeak driver.
+ * @client: cx ipeak client
+ */
+
+void cx_ipeak_victim_unregister(struct cx_ipeak_client *client)
+{
+ int i = 0;
+
+ while (i < device_ipeak.victims_count) {
+ if (client->client_id == victims_ipeak[i].client_id) {
+ victims_ipeak[i].victim_cb = NULL;
+ victims_ipeak[i].data = NULL;
+ device_ipeak.victims_reg_count--;
+ break;
+ }
+ i++;
+ }
+}
+EXPORT_SYMBOL(cx_ipeak_victim_unregister);
+
+/**
* cx_ipeak_update() - Set/Clear client vote for Cx iPeak limit
* manager to throttle cDSP.
* @client: client handle.
@@ -258,6 +329,61 @@
return ret;
}
+static irqreturn_t cx_ipeak_irq_handler(int irq, void *data)
+{
+ int i;
+ irqreturn_t ret = IRQ_NONE;
+
+ for (i = 0; i < device_ipeak.victims_reg_count; i++) {
+ cx_ipeak_victim_fn victim_cb = victims_ipeak[i].victim_cb;
+
+ if (irq == device_ipeak.danger_intr_num) {
+ /*
+ * To set frequency limit at victim client
+ * side in danger interrupt case
+ */
+ victim_cb(victims_ipeak[i].data,
+ victims_ipeak[i].freq_limit);
+ writel_relaxed(1, (device_ipeak.tcsr_vptr +
+ CXIP_VICTIM_OFFSET +
+ ((victims_ipeak[i].victim_id)*
+ CXIP_CLIENT_OFFSET)));
+ ret = IRQ_HANDLED;
+ } else if (irq == device_ipeak.safe_intr_num) {
+ /*
+ * To remove frequency limit at victim client
+ * side in safe interrupt case
+ */
+ victim_cb(victims_ipeak[i].data, 0);
+ writel_relaxed(0, (device_ipeak.tcsr_vptr +
+ CXIP_VICTIM_OFFSET +
+ ((victims_ipeak[i].victim_id)*
+ CXIP_CLIENT_OFFSET)));
+ ret = IRQ_HANDLED;
+ }
+ }
+
+ return ret;
+}
+
+int cx_ipeak_request_irq(struct platform_device *pdev, const char *name,
+ irq_handler_t handler, void *data)
+{
+ int ret, num = platform_get_irq_byname(pdev, name);
+
+ if (num < 0)
+ return num;
+
+ ret = devm_request_irq(&pdev->dev, num, handler, IRQF_TRIGGER_RISING,
+ name, data);
+
+ if (ret)
+ dev_err(&pdev->dev, "Unable to get interrupt %s: %d\n",
+ name, ret);
+
+ return ret ? ret : num;
+}
+
/**
* cx_ipeak_unregister() - unregister client
* @client: client address to free
@@ -283,6 +409,9 @@
static int cx_ipeak_probe(struct platform_device *pdev)
{
struct resource *res;
+ int status = -EINVAL;
+ int i, ret, count;
+ u32 victim_en;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
device_ipeak.tcsr_vptr = devm_ioremap_resource(&pdev->dev, res);
@@ -297,6 +426,59 @@
else
device_ipeak.core_ops = NULL;
+ victim_en = of_property_read_bool(pdev->dev.of_node,
+ "victims_table");
+
+ if (victim_en) {
+ count = of_property_count_u32_elems(pdev->dev.of_node,
+ "victims_table");
+
+ if (((count%VICTIM_ENTRIES) != 0) ||
+ ((count/VICTIM_ENTRIES) > CXIP_VICTIMS))
+ return -EINVAL;
+
+ for (i = 0; i < (count/VICTIM_ENTRIES); i++) {
+ ret = of_property_read_u32_index(pdev->dev.of_node,
+ "victims_table", i*VICTIM_ENTRIES,
+ &victims_ipeak[i].client_id);
+
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32_index(pdev->dev.of_node,
+ "victims_table", (i*VICTIM_ENTRIES) + 1,
+ &victims_ipeak[i].victim_id);
+
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32_index(pdev->dev.of_node,
+ "victims_table", (i*VICTIM_ENTRIES) + 2,
+ &victims_ipeak[i].freq_limit);
+
+ if (ret)
+ return ret;
+
+ device_ipeak.victims_count++;
+ }
+
+ status = cx_ipeak_request_irq(pdev, "cx_ipeak_danger",
+ cx_ipeak_irq_handler, NULL);
+
+ if (status < 0)
+ return status;
+
+ device_ipeak.danger_intr_num = status;
+
+ status = cx_ipeak_request_irq(pdev, "cx_ipeak_safe",
+ cx_ipeak_irq_handler, NULL);
+
+ if (status < 0)
+ return status;
+
+ device_ipeak.safe_intr_num = status;
+ }
+
spin_lock_init(&device_ipeak.vote_lock);
return 0;
}
diff --git a/drivers/soc/qcom/eud.c b/drivers/soc/qcom/eud.c
index 864bd65..0ee43a8 100644
--- a/drivers/soc/qcom/eud.c
+++ b/drivers/soc/qcom/eud.c
@@ -92,14 +92,6 @@
static bool eud_ready;
static struct platform_device *eud_private;
-static int check_eud_mode_mgr2(struct eud_chip *chip)
-{
- u32 val;
-
- val = scm_io_read(chip->eud_mode_mgr2_phys_base);
- return val & BIT(0);
-}
-
static void enable_eud(struct platform_device *pdev)
{
struct eud_chip *priv = platform_get_drvdata(pdev);
@@ -113,7 +105,7 @@
priv->eud_reg_base + EUD_REG_INT1_EN_MASK);
/* Enable secure eud if supported */
- if (priv->secure_eud_en && !check_eud_mode_mgr2(priv)) {
+ if (priv->secure_eud_en) {
ret = scm_io_write(priv->eud_mode_mgr2_phys_base +
EUD_REG_EUD_EN2, EUD_ENABLE_CMD);
if (ret)
@@ -572,9 +564,6 @@
}
chip->eud_mode_mgr2_phys_base = res->start;
-
- if (check_eud_mode_mgr2(chip))
- enable = 1;
}
chip->need_phy_clk_vote = of_property_read_bool(pdev->dev.of_node,
diff --git a/drivers/soc/qcom/icnss2/main.c b/drivers/soc/qcom/icnss2/main.c
index b869497..2cea0df 100644
--- a/drivers/soc/qcom/icnss2/main.c
+++ b/drivers/soc/qcom/icnss2/main.c
@@ -177,6 +177,20 @@
return "UNKNOWN";
};
+char *icnss_soc_wake_event_to_str(enum icnss_soc_wake_event_type type)
+{
+ switch (type) {
+ case ICNSS_SOC_WAKE_REQUEST_EVENT:
+ return "SOC_WAKE_REQUEST";
+ case ICNSS_SOC_WAKE_RELEASE_EVENT:
+ return "SOC_WAKE_RELEASE";
+ case ICNSS_SOC_WAKE_EVENT_MAX:
+ return "SOC_EVENT_MAX";
+ }
+
+ return "UNKNOWN";
+};
+
int icnss_driver_event_post(struct icnss_priv *priv,
enum icnss_driver_event_type type,
u32 flags, void *data)
@@ -249,6 +263,78 @@
return ret;
}
+int icnss_soc_wake_event_post(struct icnss_priv *priv,
+ enum icnss_soc_wake_event_type type,
+ u32 flags, void *data)
+{
+ struct icnss_soc_wake_event *event;
+ unsigned long irq_flags;
+ int gfp = GFP_KERNEL;
+ int ret = 0;
+
+ if (!priv)
+ return -ENODEV;
+
+ icnss_pr_dbg("Posting event: %s(%d), %s, flags: 0x%x, state: 0x%lx\n",
+ icnss_soc_wake_event_to_str(type), type, current->comm,
+ flags, priv->state);
+
+ if (type >= ICNSS_SOC_WAKE_EVENT_MAX) {
+ icnss_pr_err("Invalid Event type: %d, can't post", type);
+ return -EINVAL;
+ }
+
+ if (in_interrupt() || irqs_disabled())
+ gfp = GFP_ATOMIC;
+
+ event = kzalloc(sizeof(*event), gfp);
+ if (!event)
+ return -ENOMEM;
+
+ icnss_pm_stay_awake(priv);
+
+ event->type = type;
+ event->data = data;
+ init_completion(&event->complete);
+ event->ret = ICNSS_EVENT_PENDING;
+ event->sync = !!(flags & ICNSS_EVENT_SYNC);
+
+ spin_lock_irqsave(&priv->soc_wake_msg_lock, irq_flags);
+ list_add_tail(&event->list, &priv->soc_wake_msg_list);
+ spin_unlock_irqrestore(&priv->soc_wake_msg_lock, irq_flags);
+
+ priv->stats.soc_wake_events[type].posted++;
+ queue_work(priv->soc_wake_wq, &priv->soc_wake_msg_work);
+
+ if (!(flags & ICNSS_EVENT_SYNC))
+ goto out;
+
+ if (flags & ICNSS_EVENT_UNINTERRUPTIBLE)
+ wait_for_completion(&event->complete);
+ else
+ ret = wait_for_completion_interruptible(&event->complete);
+
+ icnss_pr_dbg("Completed event: %s(%d), state: 0x%lx, ret: %d/%d\n",
+ icnss_soc_wake_event_to_str(type), type, priv->state, ret,
+ event->ret);
+
+ spin_lock_irqsave(&priv->soc_wake_msg_lock, irq_flags);
+ if (ret == -ERESTARTSYS && event->ret == ICNSS_EVENT_PENDING) {
+ event->sync = false;
+ spin_unlock_irqrestore(&priv->soc_wake_msg_lock, irq_flags);
+ ret = -EINTR;
+ goto out;
+ }
+ spin_unlock_irqrestore(&priv->soc_wake_msg_lock, irq_flags);
+
+ ret = event->ret;
+ kfree(event);
+
+out:
+ icnss_pm_relax(priv);
+ return ret;
+}
+
bool icnss_is_fw_ready(void)
{
if (!penv)
@@ -888,6 +974,41 @@
return ret;
}
+static int icnss_event_soc_wake_request(struct icnss_priv *priv, void *data)
+{
+ int ret = 0;
+
+ if (!priv)
+ return -ENODEV;
+
+ ret = wlfw_send_soc_wake_msg(priv, QMI_WLFW_WAKE_REQUEST_V01);
+ if (!ret)
+ atomic_inc(&priv->soc_wake_ref_count);
+
+ return ret;
+}
+
+static int icnss_event_soc_wake_release(struct icnss_priv *priv, void *data)
+{
+ int ret = 0;
+ int count = 0;
+
+ if (!priv)
+ return -ENODEV;
+
+ count = atomic_dec_return(&priv->soc_wake_ref_count);
+
+ if (count) {
+ icnss_pr_dbg("Wake release not called. Ref count: %d",
+ count);
+ return 0;
+ }
+
+ ret = wlfw_send_soc_wake_msg(priv, QMI_WLFW_WAKE_RELEASE_V01);
+
+ return ret;
+}
+
static int icnss_driver_event_register_driver(struct icnss_priv *priv,
void *data)
{
@@ -1225,6 +1346,68 @@
icnss_pm_relax(priv);
}
+static void icnss_soc_wake_msg_work(struct work_struct *work)
+{
+ struct icnss_priv *priv =
+ container_of(work, struct icnss_priv, soc_wake_msg_work);
+ struct icnss_soc_wake_event *event;
+ unsigned long flags;
+ int ret;
+
+ icnss_pm_stay_awake(priv);
+
+ spin_lock_irqsave(&priv->soc_wake_msg_lock, flags);
+
+ while (!list_empty(&priv->soc_wake_msg_list)) {
+ event = list_first_entry(&priv->soc_wake_msg_list,
+ struct icnss_soc_wake_event, list);
+ list_del(&event->list);
+ spin_unlock_irqrestore(&priv->soc_wake_msg_lock, flags);
+
+ icnss_pr_dbg("Processing event: %s%s(%d), state: 0x%lx\n",
+ icnss_soc_wake_event_to_str(event->type),
+ event->sync ? "-sync" : "", event->type,
+ priv->state);
+
+ switch (event->type) {
+ case ICNSS_SOC_WAKE_REQUEST_EVENT:
+ ret = icnss_event_soc_wake_request(priv,
+ event->data);
+ break;
+ case ICNSS_SOC_WAKE_RELEASE_EVENT:
+ ret = icnss_event_soc_wake_release(priv,
+ event->data);
+ break;
+ default:
+ icnss_pr_err("Invalid Event type: %d", event->type);
+ kfree(event);
+ continue;
+ }
+
+ priv->stats.soc_wake_events[event->type].processed++;
+
+ icnss_pr_dbg("Event Processed: %s%s(%d), ret: %d, state: 0x%lx\n",
+ icnss_soc_wake_event_to_str(event->type),
+ event->sync ? "-sync" : "", event->type, ret,
+ priv->state);
+
+ spin_lock_irqsave(&priv->soc_wake_msg_lock, flags);
+ if (event->sync) {
+ event->ret = ret;
+ complete(&event->complete);
+ continue;
+ }
+ spin_unlock_irqrestore(&priv->soc_wake_msg_lock, flags);
+
+ kfree(event);
+
+ spin_lock_irqsave(&priv->soc_wake_msg_lock, flags);
+ }
+ spin_unlock_irqrestore(&priv->soc_wake_msg_lock, flags);
+
+ icnss_pm_relax(priv);
+}
+
static int icnss_msa0_ramdump(struct icnss_priv *priv)
{
struct ramdump_segment segment;
@@ -1963,6 +2146,71 @@
}
EXPORT_SYMBOL(icnss_set_fw_log_mode);
+int icnss_force_wake_request(struct device *dev)
+{
+ struct icnss_priv *priv = dev_get_drvdata(dev);
+ int count = 0;
+
+ if (!dev)
+ return -ENODEV;
+
+ if (!priv) {
+ icnss_pr_err("Platform driver not initialized\n");
+ return -EINVAL;
+ }
+
+ icnss_pr_dbg("Calling SOC Wake request");
+
+ if (atomic_read(&priv->soc_wake_ref_count)) {
+ count = atomic_inc_return(&priv->soc_wake_ref_count);
+ icnss_pr_dbg("SOC already awake, Ref count: %d", count);
+ return 0;
+ }
+
+ icnss_soc_wake_event_post(priv, ICNSS_SOC_WAKE_REQUEST_EVENT,
+ 0, NULL);
+
+ return 0;
+}
+EXPORT_SYMBOL(icnss_force_wake_request);
+
+int icnss_force_wake_release(struct device *dev)
+{
+ struct icnss_priv *priv = dev_get_drvdata(dev);
+
+ if (!dev)
+ return -ENODEV;
+
+ if (!priv) {
+ icnss_pr_err("Platform driver not initialized\n");
+ return -EINVAL;
+ }
+
+ icnss_pr_dbg("Calling SOC Wake response");
+
+ icnss_soc_wake_event_post(priv, ICNSS_SOC_WAKE_RELEASE_EVENT,
+ 0, NULL);
+
+ return 0;
+}
+EXPORT_SYMBOL(icnss_force_wake_release);
+
+int icnss_is_device_awake(struct device *dev)
+{
+ struct icnss_priv *priv = dev_get_drvdata(dev);
+
+ if (!dev)
+ return -ENODEV;
+
+ if (!priv) {
+ icnss_pr_err("Platform driver not initialized\n");
+ return -EINVAL;
+ }
+
+ return atomic_read(&priv->soc_wake_ref_count);
+}
+EXPORT_SYMBOL(icnss_is_device_awake);
+
int icnss_athdiag_read(struct device *dev, uint32_t offset,
uint32_t mem_type, uint32_t data_len,
uint8_t *output)
@@ -2656,6 +2904,7 @@
spin_lock_init(&priv->event_lock);
spin_lock_init(&priv->on_off_lock);
+ spin_lock_init(&priv->soc_wake_msg_lock);
mutex_init(&priv->dev_lock);
priv->event_wq = alloc_workqueue("icnss_driver_event", WQ_UNBOUND, 1);
@@ -2668,10 +2917,21 @@
INIT_WORK(&priv->event_work, icnss_driver_event_work);
INIT_LIST_HEAD(&priv->event_list);
+ priv->soc_wake_wq = alloc_workqueue("icnss_soc_wake_event",
+ WQ_UNBOUND, 1);
+ if (!priv->soc_wake_wq) {
+ icnss_pr_err("Soc wake Workqueue creation failed\n");
+ ret = -EFAULT;
+ goto out_destroy_wq;
+ }
+
+ INIT_WORK(&priv->soc_wake_msg_work, icnss_soc_wake_msg_work);
+ INIT_LIST_HEAD(&priv->soc_wake_msg_list);
+
ret = icnss_register_fw_service(priv);
if (ret < 0) {
icnss_pr_err("fw service registration failed: %d\n", ret);
- goto out_destroy_wq;
+ goto out_destroy_soc_wq;
}
icnss_enable_recovery(priv);
@@ -2697,6 +2957,8 @@
return 0;
+out_destroy_soc_wq:
+ destroy_workqueue(priv->soc_wake_wq);
out_destroy_wq:
destroy_workqueue(priv->event_wq);
smmu_cleanup:
@@ -2733,6 +2995,9 @@
if (priv->event_wq)
destroy_workqueue(priv->event_wq);
+ if (priv->soc_wake_wq)
+ destroy_workqueue(priv->soc_wake_wq);
+
priv->iommu_domain = NULL;
icnss_hw_power_off(priv);
diff --git a/drivers/soc/qcom/icnss2/main.h b/drivers/soc/qcom/icnss2/main.h
index cd5d6dd..44efede 100644
--- a/drivers/soc/qcom/icnss2/main.h
+++ b/drivers/soc/qcom/icnss2/main.h
@@ -55,6 +55,12 @@
ICNSS_DRIVER_EVENT_MAX,
};
+enum icnss_soc_wake_event_type {
+ ICNSS_SOC_WAKE_REQUEST_EVENT,
+ ICNSS_SOC_WAKE_RELEASE_EVENT,
+ ICNSS_SOC_WAKE_EVENT_MAX,
+};
+
struct icnss_event_server_arrive_data {
unsigned int node;
unsigned int port;
@@ -74,6 +80,15 @@
void *data;
};
+struct icnss_soc_wake_event {
+ struct list_head list;
+ enum icnss_soc_wake_event_type type;
+ bool sync;
+ struct completion complete;
+ int ret;
+ void *data;
+};
+
enum icnss_driver_state {
ICNSS_WLFW_CONNECTED,
ICNSS_POWER_ON,
@@ -150,6 +165,11 @@
} events[ICNSS_DRIVER_EVENT_MAX];
struct {
+ u32 posted;
+ u32 processed;
+ } soc_wake_events[ICNSS_SOC_WAKE_EVENT_MAX];
+
+ struct {
uint32_t request;
uint32_t free;
uint32_t enable;
@@ -210,6 +230,9 @@
u32 exit_power_save_req;
u32 exit_power_save_resp;
u32 exit_power_save_err;
+ u32 soc_wake_req;
+ u32 soc_wake_resp;
+ u32 soc_wake_err;
};
#define WLFW_MAX_TIMESTAMP_LEN 32
@@ -282,10 +305,14 @@
size_t smmu_iova_ipa_len;
struct qmi_handle qmi;
struct list_head event_list;
+ struct list_head soc_wake_msg_list;
spinlock_t event_lock;
+ spinlock_t soc_wake_msg_lock;
struct work_struct event_work;
struct work_struct fw_recv_msg_work;
+ struct work_struct soc_wake_msg_work;
struct workqueue_struct *event_wq;
+ struct workqueue_struct *soc_wake_wq;
phys_addr_t msa_pa;
phys_addr_t msi_addr_pa;
dma_addr_t msi_addr_iova;
@@ -342,6 +369,7 @@
struct icnss_fw_mem qdss_mem[QMI_WLFW_MAX_NUM_MEM_SEG];
void *get_info_cb_ctx;
int (*get_info_cb)(void *ctx, void *event, int event_len);
+ atomic_t soc_wake_ref_count;
};
struct icnss_reg_info {
@@ -358,5 +386,9 @@
u32 flags, void *data);
void icnss_allow_recursive_recovery(struct device *dev);
void icnss_disallow_recursive_recovery(struct device *dev);
+char *icnss_soc_wake_event_to_str(enum icnss_soc_wake_event_type type);
+int icnss_soc_wake_event_post(struct icnss_priv *priv,
+ enum icnss_soc_wake_event_type type,
+ u32 flags, void *data);
#endif
diff --git a/drivers/soc/qcom/icnss2/qmi.c b/drivers/soc/qcom/icnss2/qmi.c
index 3a96131..225afb1 100644
--- a/drivers/soc/qcom/icnss2/qmi.c
+++ b/drivers/soc/qcom/icnss2/qmi.c
@@ -413,6 +413,82 @@
return ret;
}
+int wlfw_send_soc_wake_msg(struct icnss_priv *priv,
+ enum wlfw_soc_wake_enum_v01 type)
+{
+ int ret;
+ struct wlfw_soc_wake_req_msg_v01 *req;
+ struct wlfw_soc_wake_resp_msg_v01 *resp;
+ struct qmi_txn txn;
+
+ if (!priv)
+ return -ENODEV;
+
+ if (test_bit(ICNSS_FW_DOWN, &priv->state))
+ return -EINVAL;
+
+ icnss_pr_dbg("Sending soc wake msg, type: 0x%x\n",
+ type);
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ kfree(req);
+ return -ENOMEM;
+ }
+ req->wake_valid = 1;
+ req->wake = type;
+
+ priv->stats.soc_wake_req++;
+
+ ret = qmi_txn_init(&priv->qmi, &txn,
+ wlfw_soc_wake_resp_msg_v01_ei, resp);
+
+ if (ret < 0) {
+ icnss_pr_err("Fail to init txn for wake msg resp %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_send_request(&priv->qmi, NULL, &txn,
+ QMI_WLFW_SOC_WAKE_REQ_V01,
+ WLFW_SOC_WAKE_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_soc_wake_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ icnss_pr_err("Fail to send soc wake msg %d\n", ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, priv->ctrl_params.qmi_timeout);
+ if (ret < 0) {
+ icnss_qmi_fatal_err("SOC wake timed out with ret %d\n",
+ ret);
+ goto out;
+ } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ icnss_qmi_fatal_err(
+ "SOC wake request rejected,result:%d error:%d\n",
+ resp->resp.result, resp->resp.error);
+ ret = -resp->resp.result;
+ goto out;
+ }
+
+ priv->stats.soc_wake_resp++;
+
+ kfree(resp);
+ kfree(req);
+ return 0;
+
+out:
+ kfree(req);
+ kfree(resp);
+ priv->stats.soc_wake_err++;
+ return ret;
+}
+
int wlfw_ind_register_send_sync_msg(struct icnss_priv *priv)
{
int ret;
@@ -2196,7 +2272,7 @@
if (cmd_len > QMI_WLFW_MAX_DATA_SIZE_V01)
return -EINVAL;
- if (test_bit(ICNSS_FW_DOWN, &priv->state))
+ if (test_bit(ICNSS_FW_DOWN, &plat_priv->state))
return -EINVAL;
req = kzalloc(sizeof(*req), GFP_KERNEL);
diff --git a/drivers/soc/qcom/icnss2/qmi.h b/drivers/soc/qcom/icnss2/qmi.h
index c4c42ce..f4c1d2b 100644
--- a/drivers/soc/qcom/icnss2/qmi.h
+++ b/drivers/soc/qcom/icnss2/qmi.h
@@ -139,6 +139,12 @@
{
return 0;
}
+
+int wlfw_send_soc_wake_msg(struct icnss_priv *priv,
+ enum wlfw_soc_wake_enum_v01 type)
+{
+ return 0;
+}
#else
int wlfw_ind_register_send_sync_msg(struct icnss_priv *priv);
int icnss_connect_to_fw_server(struct icnss_priv *priv, void *data);
@@ -177,6 +183,8 @@
int wlfw_exit_power_save_send_msg(struct icnss_priv *priv);
int icnss_wlfw_get_info_send_sync(struct icnss_priv *priv, int type,
void *cmd, int cmd_len);
+int wlfw_send_soc_wake_msg(struct icnss_priv *priv,
+ enum wlfw_soc_wake_enum_v01 type);
#endif
#endif /* __ICNSS_QMI_H__*/
diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c
index 6fa278f..1df009f 100644
--- a/drivers/soc/qcom/peripheral-loader.c
+++ b/drivers/soc/qcom/peripheral-loader.c
@@ -1305,7 +1305,7 @@
* Fallback to serial loading of blobs if the
* workqueue creatation failed during module init.
*/
- if (pil_wq) {
+ if (pil_wq && !(desc->sequential_loading)) {
ret = pil_load_segs(desc);
if (ret)
goto err_deinit_image;
diff --git a/drivers/soc/qcom/peripheral-loader.h b/drivers/soc/qcom/peripheral-loader.h
index c83b038..29fa4b6 100644
--- a/drivers/soc/qcom/peripheral-loader.h
+++ b/drivers/soc/qcom/peripheral-loader.h
@@ -63,6 +63,7 @@
bool shutdown_fail;
bool modem_ssr;
bool clear_fw_region;
+ bool sequential_loading;
u32 subsys_vmid;
bool signal_aop;
struct mbox_client cl;
diff --git a/drivers/soc/qcom/pil-msa.c b/drivers/soc/qcom/pil-msa.c
new file mode 100644
index 0000000..9cff905
--- /dev/null
+++ b/drivers/soc/qcom/pil-msa.c
@@ -0,0 +1,1051 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/secure_buffer.h>
+#include <trace/events/trace_msm_pil_event.h>
+
+#include "peripheral-loader.h"
+#include "pil-q6v5.h"
+#include "pil-msa.h"
+
+/* Q6 Register Offsets */
+#define QDSP6SS_RST_EVB 0x010
+#define QDSP6SS_DBG_CFG 0x018
+#define QDSP6SS_NMI_CFG 0x40
+
+/* AXI Halting Registers */
+#define MSS_Q6_HALT_BASE 0x180
+#define MSS_MODEM_HALT_BASE 0x200
+#define MSS_NC_HALT_BASE 0x280
+
+/* RMB Status Register Values */
+#define STATUS_PBL_SUCCESS 0x1
+#define STATUS_XPU_UNLOCKED 0x1
+#define STATUS_XPU_UNLOCKED_SCRIBBLED 0x2
+
+/* PBL/MBA interface registers */
+#define RMB_MBA_IMAGE 0x00
+#define RMB_PBL_STATUS 0x04
+#define RMB_MBA_COMMAND 0x08
+#define RMB_MBA_STATUS 0x0C
+#define RMB_PMI_META_DATA 0x10
+#define RMB_PMI_CODE_START 0x14
+#define RMB_PMI_CODE_LENGTH 0x18
+#define RMB_PROTOCOL_VERSION 0x1C
+#define RMB_MBA_DEBUG_INFORMATION 0x20
+
+#define POLL_INTERVAL_US 50
+
+#define CMD_META_DATA_READY 0x1
+#define CMD_LOAD_READY 0x2
+#define CMD_PILFAIL_NFY_MBA 0xffffdead
+
+#define STATUS_META_DATA_AUTH_SUCCESS 0x3
+#define STATUS_AUTH_COMPLETE 0x4
+#define STATUS_MBA_UNLOCKED 0x6
+
+/* External BHS */
+#define EXTERNAL_BHS_ON BIT(0)
+#define EXTERNAL_BHS_STATUS BIT(4)
+#define BHS_TIMEOUT_US 50
+
+#define MSS_RESTART_PARAM_ID 0x2
+#define MSS_RESTART_ID 0xA
+
+#define MSS_MAGIC 0XAABADEAD
+
+/* Timeout value for MBA boot when minidump is enabled */
+#define MBA_ENCRYPTION_TIMEOUT 3000
+enum scm_cmd {
+ PAS_MEM_SETUP_CMD = 2,
+};
+
+static int pbl_mba_boot_timeout_ms = 1000;
+module_param(pbl_mba_boot_timeout_ms, int, 0644);
+
+static int modem_auth_timeout_ms = 10000;
+module_param(modem_auth_timeout_ms, int, 0644);
+
+/* If set to 0xAABADEAD, MBA failures trigger a kernel panic */
+static uint modem_trigger_panic;
+module_param(modem_trigger_panic, uint, 0644);
+
+/* To set the modem debug cookie in DBG_CFG register for debugging */
+static uint modem_dbg_cfg;
+module_param(modem_dbg_cfg, uint, 0644);
+
+static void modem_log_rmb_regs(void __iomem *base)
+{
+ pr_err("RMB_MBA_IMAGE: %08x\n", readl_relaxed(base + RMB_MBA_IMAGE));
+ pr_err("RMB_PBL_STATUS: %08x\n", readl_relaxed(base + RMB_PBL_STATUS));
+ pr_err("RMB_MBA_COMMAND: %08x\n",
+ readl_relaxed(base + RMB_MBA_COMMAND));
+ pr_err("RMB_MBA_STATUS: %08x\n", readl_relaxed(base + RMB_MBA_STATUS));
+ pr_err("RMB_PMI_META_DATA: %08x\n",
+ readl_relaxed(base + RMB_PMI_META_DATA));
+ pr_err("RMB_PMI_CODE_START: %08x\n",
+ readl_relaxed(base + RMB_PMI_CODE_START));
+ pr_err("RMB_PMI_CODE_LENGTH: %08x\n",
+ readl_relaxed(base + RMB_PMI_CODE_LENGTH));
+ pr_err("RMB_PROTOCOL_VERSION: %08x\n",
+ readl_relaxed(base + RMB_PROTOCOL_VERSION));
+ pr_err("RMB_MBA_DEBUG_INFORMATION: %08x\n",
+ readl_relaxed(base + RMB_MBA_DEBUG_INFORMATION));
+
+ if (modem_trigger_panic == MSS_MAGIC)
+ panic("%s: System ramdump is needed!!!\n", __func__);
+}
+
+static int pil_mss_power_up(struct q6v5_data *drv)
+{
+ int ret = 0;
+ u32 regval;
+
+ if (drv->cxrail_bhs) {
+ regval = readl_relaxed(drv->cxrail_bhs);
+ regval |= EXTERNAL_BHS_ON;
+ writel_relaxed(regval, drv->cxrail_bhs);
+
+ ret = readl_poll_timeout(drv->cxrail_bhs, regval,
+ regval & EXTERNAL_BHS_STATUS, 1, BHS_TIMEOUT_US);
+ }
+
+ return ret;
+}
+
+static int pil_mss_power_down(struct q6v5_data *drv)
+{
+ u32 regval;
+
+ if (drv->cxrail_bhs) {
+ regval = readl_relaxed(drv->cxrail_bhs);
+ regval &= ~EXTERNAL_BHS_ON;
+ writel_relaxed(regval, drv->cxrail_bhs);
+ }
+
+ return 0;
+}
+
+static int pil_mss_enable_clks(struct q6v5_data *drv)
+{
+ int ret;
+
+ ret = clk_prepare_enable(drv->ahb_clk);
+ if (ret)
+ goto err_ahb_clk;
+ ret = clk_prepare_enable(drv->axi_clk);
+ if (ret)
+ goto err_axi_clk;
+ ret = clk_prepare_enable(drv->rom_clk);
+ if (ret)
+ goto err_rom_clk;
+ ret = clk_prepare_enable(drv->gpll0_mss_clk);
+ if (ret)
+ goto err_gpll0_mss_clk;
+ ret = clk_prepare_enable(drv->snoc_axi_clk);
+ if (ret)
+ goto err_snoc_axi_clk;
+ ret = clk_prepare_enable(drv->mnoc_axi_clk);
+ if (ret)
+ goto err_mnoc_axi_clk;
+ return 0;
+err_mnoc_axi_clk:
+ clk_disable_unprepare(drv->mnoc_axi_clk);
+err_snoc_axi_clk:
+ clk_disable_unprepare(drv->snoc_axi_clk);
+err_gpll0_mss_clk:
+ clk_disable_unprepare(drv->gpll0_mss_clk);
+err_rom_clk:
+ clk_disable_unprepare(drv->rom_clk);
+err_axi_clk:
+ clk_disable_unprepare(drv->axi_clk);
+err_ahb_clk:
+ clk_disable_unprepare(drv->ahb_clk);
+ return ret;
+}
+
+static void pil_mss_disable_clks(struct q6v5_data *drv)
+{
+ clk_disable_unprepare(drv->mnoc_axi_clk);
+ clk_disable_unprepare(drv->snoc_axi_clk);
+ clk_disable_unprepare(drv->gpll0_mss_clk);
+ clk_disable_unprepare(drv->rom_clk);
+ clk_disable_unprepare(drv->axi_clk);
+ if (!drv->ahb_clk_vote)
+ clk_disable_unprepare(drv->ahb_clk);
+}
+
+static void pil_mss_pdc_sync(struct q6v5_data *drv, bool pdc_sync)
+{
+ u32 val = 0;
+ u32 mss_pdc_mask = BIT(drv->mss_pdc_offset);
+
+ if (drv->pdc_sync) {
+ val = readl_relaxed(drv->pdc_sync);
+ if (pdc_sync)
+ val |= mss_pdc_mask;
+ else
+ val &= ~mss_pdc_mask;
+ writel_relaxed(val, drv->pdc_sync);
+ /* Ensure PDC is written before next write */
+ wmb();
+ udelay(2);
+ }
+}
+
+static void pil_mss_alt_reset(struct q6v5_data *drv, u32 val)
+{
+ if (drv->alt_reset) {
+ writel_relaxed(val, drv->alt_reset);
+ /* Ensure alt reset is written before restart reg */
+ wmb();
+ udelay(2);
+ }
+}
+
+static int pil_mss_restart_reg(struct q6v5_data *drv, u32 mss_restart)
+{
+ int ret = 0;
+ int scm_ret = 0;
+ struct scm_desc desc = {0};
+
+ desc.args[0] = mss_restart;
+ desc.args[1] = 0;
+ desc.arginfo = SCM_ARGS(2);
+
+ if (drv->restart_reg && !drv->restart_reg_sec) {
+ writel_relaxed(mss_restart, drv->restart_reg);
+ /* Ensure physical address access is done before returning.*/
+ mb();
+ udelay(2);
+ } else if (drv->restart_reg_sec) {
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
+ MSS_RESTART_ID), &desc);
+ scm_ret = desc.ret[0];
+ if (ret || scm_ret)
+ pr_err("Secure MSS restart failed\n");
+ }
+
+ return ret;
+}
+
+int pil_mss_assert_resets(struct q6v5_data *drv)
+{
+ int ret = 0;
+
+ pil_mss_pdc_sync(drv, 1);
+ pil_mss_alt_reset(drv, 1);
+ if (drv->reset_clk) {
+ pil_mss_disable_clks(drv);
+ if (drv->ahb_clk_vote)
+ clk_disable_unprepare(drv->ahb_clk);
+ }
+
+ ret = pil_mss_restart_reg(drv, true);
+
+ return ret;
+}
+
+int pil_mss_deassert_resets(struct q6v5_data *drv)
+{
+ int ret = 0;
+
+ ret = pil_mss_restart_reg(drv, 0);
+ if (ret)
+ return ret;
+ /* Wait 6 32kHz sleep cycles for reset */
+ udelay(200);
+
+ if (drv->reset_clk)
+ pil_mss_enable_clks(drv);
+ pil_mss_alt_reset(drv, 0);
+ pil_mss_pdc_sync(drv, false);
+
+ return ret;
+}
+
+static int pil_msa_wait_for_mba_ready(struct q6v5_data *drv)
+{
+ struct device *dev = drv->desc.dev;
+ int ret;
+ u32 status;
+ u64 val;
+
+ if (of_property_read_bool(dev->of_node, "qcom,minidump-id"))
+ pbl_mba_boot_timeout_ms = MBA_ENCRYPTION_TIMEOUT;
+
+ val = is_timeout_disabled() ? 0 : pbl_mba_boot_timeout_ms * 1000;
+
+ /* Wait for PBL completion. */
+ ret = readl_poll_timeout(drv->rmb_base + RMB_PBL_STATUS, status,
+ status != 0, POLL_INTERVAL_US, val);
+ if (ret) {
+ dev_err(dev, "PBL boot timed out (rc:%d)\n", ret);
+ return ret;
+ }
+ if (status != STATUS_PBL_SUCCESS) {
+ dev_err(dev, "PBL returned unexpected status %d\n", status);
+ return -EINVAL;
+ }
+
+ /* Wait for MBA completion. */
+ ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
+ status != 0, POLL_INTERVAL_US, val);
+ if (ret) {
+ dev_err(dev, "MBA boot timed out (rc:%d)\n", ret);
+ return ret;
+ }
+ if (status != STATUS_XPU_UNLOCKED &&
+ status != STATUS_XPU_UNLOCKED_SCRIBBLED) {
+ dev_err(dev, "MBA returned unexpected status %d\n", status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int pil_mss_shutdown(struct pil_desc *pil)
+{
+ struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+ int ret = 0;
+
+ if (drv->axi_halt_base) {
+ pil_q6v5_halt_axi_port(pil,
+ drv->axi_halt_base + MSS_Q6_HALT_BASE);
+ pil_q6v5_halt_axi_port(pil,
+ drv->axi_halt_base + MSS_MODEM_HALT_BASE);
+ pil_q6v5_halt_axi_port(pil,
+ drv->axi_halt_base + MSS_NC_HALT_BASE);
+ }
+
+ if (drv->axi_halt_q6)
+ pil_q6v5_halt_axi_port(pil, drv->axi_halt_q6);
+ if (drv->axi_halt_mss)
+ pil_q6v5_halt_axi_port(pil, drv->axi_halt_mss);
+ if (drv->axi_halt_nc)
+ pil_q6v5_halt_axi_port(pil, drv->axi_halt_nc);
+
+ /*
+ * Software workaround to avoid high MX current during LPASS/MSS
+ * restart.
+ */
+ if (drv->mx_spike_wa && drv->ahb_clk_vote) {
+ ret = clk_prepare_enable(drv->ahb_clk);
+ if (!ret)
+ assert_clamps(pil);
+ else
+ dev_err(pil->dev, "error turning ON AHB clock(rc:%d)\n",
+ ret);
+ }
+
+ pil_mss_pdc_sync(drv, true);
+ /* Wait 6 32kHz sleep cycles for PDC SYNC true */
+ udelay(200);
+ pil_mss_restart_reg(drv, 1);
+ /* Wait 6 32kHz sleep cycles for reset */
+ udelay(200);
+ ret = pil_mss_restart_reg(drv, 0);
+ /* Wait 6 32kHz sleep cycles for reset false */
+ udelay(200);
+ pil_mss_pdc_sync(drv, false);
+
+ if (drv->is_booted) {
+ pil_mss_disable_clks(drv);
+ pil_mss_power_down(drv);
+ drv->is_booted = false;
+ }
+
+ return ret;
+}
+
+int __pil_mss_deinit_image(struct pil_desc *pil, bool err_path)
+{
+ struct modem_data *drv = dev_get_drvdata(pil->dev);
+ struct q6v5_data *q6_drv = container_of(pil, struct q6v5_data, desc);
+ int ret = 0;
+ struct device *dma_dev = drv->mba_mem_dev_fixed ?: &drv->mba_mem_dev;
+ s32 status;
+ u64 val = is_timeout_disabled() ? 0 : pbl_mba_boot_timeout_ms * 1000;
+
+ if (err_path) {
+ writel_relaxed(CMD_PILFAIL_NFY_MBA,
+ drv->rmb_base + RMB_MBA_COMMAND);
+ ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
+ status == STATUS_MBA_UNLOCKED || status < 0,
+ POLL_INTERVAL_US, val);
+ if (ret)
+ dev_err(pil->dev, "MBA region unlock timed out(rc:%d)\n",
+ ret);
+ else if (status < 0)
+ dev_err(pil->dev, "MBA unlock returned err status: %d\n",
+ status);
+ }
+
+ ret = pil_mss_shutdown(pil);
+
+ if (q6_drv->ahb_clk_vote)
+ clk_disable_unprepare(q6_drv->ahb_clk);
+
+ /* In case of any failure where reclaiming MBA and DP memory
+ * could not happen, free the memory here
+ */
+ if (drv->q6->mba_dp_virt && !drv->mba_mem_dev_fixed) {
+ if (pil->subsys_vmid > 0)
+ pil_assign_mem_to_linux(pil, drv->q6->mba_dp_phys,
+ drv->q6->mba_dp_size);
+ dma_free_attrs(dma_dev, drv->q6->mba_dp_size,
+ drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
+ drv->attrs_dma);
+ drv->q6->mba_dp_virt = NULL;
+ }
+
+ return ret;
+}
+
+int pil_mss_deinit_image(struct pil_desc *pil)
+{
+ return __pil_mss_deinit_image(pil, true);
+}
+
+int pil_mss_make_proxy_votes(struct pil_desc *pil)
+{
+ int ret;
+ struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+ int uv = 0;
+
+ ret = of_property_read_u32(pil->dev->of_node, "vdd_mx-uV", &uv);
+ if (ret) {
+ dev_err(pil->dev, "missing vdd_mx-uV property(rc:%d)\n", ret);
+ return ret;
+ }
+
+ ret = regulator_set_voltage(drv->vreg_mx, uv, INT_MAX);
+ if (ret) {
+ dev_err(pil->dev, "Failed to request vreg_mx voltage(rc:%d)\n",
+ ret);
+ return ret;
+ }
+
+ ret = regulator_enable(drv->vreg_mx);
+ if (ret) {
+ dev_err(pil->dev, "Failed to enable vreg_mx(rc:%d)\n", ret);
+ regulator_set_voltage(drv->vreg_mx, 0, INT_MAX);
+ return ret;
+ }
+
+ if (drv->vreg) {
+ ret = of_property_read_u32(pil->dev->of_node, "vdd_mss-uV",
+ &uv);
+ if (ret) {
+ dev_err(pil->dev,
+ "missing vdd_mss-uV property(rc:%d)\n", ret);
+ goto out;
+ }
+
+ ret = regulator_set_voltage(drv->vreg, uv,
+ INT_MAX);
+ if (ret) {
+ dev_err(pil->dev, "Failed to set vreg voltage(rc:%d)\n",
+ ret);
+ goto out;
+ }
+
+ ret = regulator_set_load(drv->vreg, 100000);
+ if (ret < 0) {
+ dev_err(pil->dev, "Failed to set vreg mode(rc:%d)\n",
+ ret);
+ goto out;
+ }
+ ret = regulator_enable(drv->vreg);
+ if (ret) {
+ dev_err(pil->dev, "Failed to enable vreg(rc:%d)\n",
+ ret);
+ regulator_set_voltage(drv->vreg, 0, INT_MAX);
+ goto out;
+ }
+ }
+
+ ret = pil_q6v5_make_proxy_votes(pil);
+ if (ret && drv->vreg) {
+ regulator_disable(drv->vreg);
+ regulator_set_voltage(drv->vreg, 0, INT_MAX);
+ }
+out:
+ if (ret) {
+ regulator_disable(drv->vreg_mx);
+ regulator_set_voltage(drv->vreg_mx, 0, INT_MAX);
+ }
+
+ return ret;
+}
+
+void pil_mss_remove_proxy_votes(struct pil_desc *pil)
+{
+ struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+
+ pil_q6v5_remove_proxy_votes(pil);
+ regulator_disable(drv->vreg_mx);
+ regulator_set_voltage(drv->vreg_mx, 0, INT_MAX);
+ if (drv->vreg) {
+ regulator_disable(drv->vreg);
+ regulator_set_voltage(drv->vreg, 0, INT_MAX);
+ }
+}
+
+static int pil_mss_mem_setup(struct pil_desc *pil,
+ phys_addr_t addr, size_t size)
+{
+ struct modem_data *md = dev_get_drvdata(pil->dev);
+
+ struct pas_init_image_req {
+ u32 proc;
+ u32 start_addr;
+ u32 len;
+ } request;
+ u32 scm_ret = 0;
+ int ret;
+ struct scm_desc desc = {0};
+
+ if (!md->subsys_desc.pil_mss_memsetup)
+ return 0;
+
+ request.proc = md->pas_id;
+ request.start_addr = addr;
+ request.len = size;
+
+ desc.args[0] = md->pas_id;
+ desc.args[1] = addr;
+ desc.args[2] = size;
+ desc.arginfo = SCM_ARGS(3);
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL, PAS_MEM_SETUP_CMD),
+ &desc);
+ scm_ret = desc.ret[0];
+
+ if (ret)
+ return ret;
+ return scm_ret;
+}
+
+static int pil_mss_reset(struct pil_desc *pil)
+{
+ struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+ phys_addr_t start_addr = pil_get_entry_addr(pil);
+ u32 debug_val = 0;
+ int ret;
+
+ trace_pil_func(__func__);
+ if (drv->mba_dp_phys)
+ start_addr = drv->mba_dp_phys;
+
+ /*
+ * Bring subsystem out of reset and enable required
+ * regulators and clocks.
+ */
+ ret = pil_mss_power_up(drv);
+ if (ret)
+ goto err_power;
+
+ ret = pil_mss_enable_clks(drv);
+ if (ret)
+ goto err_clks;
+
+ if (!pil->minidump_ss || !pil->modem_ssr) {
+ /* Save state of modem debug register before full reset */
+ debug_val = readl_relaxed(drv->reg_base + QDSP6SS_DBG_CFG);
+ }
+
+ /* Assert reset to subsystem */
+ pil_mss_assert_resets(drv);
+ /* Wait 6 32kHz sleep cycles for reset */
+ udelay(200);
+ ret = pil_mss_deassert_resets(drv);
+ if (ret)
+ goto err_restart;
+
+ if (!pil->minidump_ss || !pil->modem_ssr) {
+ writel_relaxed(debug_val, drv->reg_base + QDSP6SS_DBG_CFG);
+ if (modem_dbg_cfg)
+ writel_relaxed(modem_dbg_cfg,
+ drv->reg_base + QDSP6SS_DBG_CFG);
+ }
+
+ /* Program Image Address */
+ if (drv->self_auth) {
+ writel_relaxed(start_addr, drv->rmb_base + RMB_MBA_IMAGE);
+ /*
+ * Ensure write to RMB base occurs before reset
+ * is released.
+ */
+ mb();
+ } else {
+ writel_relaxed((start_addr >> 4) & 0x0FFFFFF0,
+ drv->reg_base + QDSP6SS_RST_EVB);
+ }
+
+ /* Program DP Address */
+ if (drv->dp_size) {
+ writel_relaxed(start_addr + SZ_1M, drv->rmb_base +
+ RMB_PMI_CODE_START);
+ writel_relaxed(drv->dp_size, drv->rmb_base +
+ RMB_PMI_CODE_LENGTH);
+ } else {
+ writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_START);
+ writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_LENGTH);
+ }
+ /* Make sure RMB regs are written before bringing modem out of reset */
+ mb();
+
+ ret = pil_q6v5_reset(pil);
+ if (ret)
+ goto err_q6v5_reset;
+
+ /* Wait for MBA to start. Check for PBL and MBA errors while waiting. */
+ if (drv->self_auth) {
+ ret = pil_msa_wait_for_mba_ready(drv);
+ if (ret)
+ goto err_q6v5_reset;
+ }
+
+ dev_info(pil->dev, "MBA boot done\n");
+ drv->is_booted = true;
+
+ return 0;
+
+err_q6v5_reset:
+ modem_log_rmb_regs(drv->rmb_base);
+err_restart:
+ pil_mss_disable_clks(drv);
+ if (drv->ahb_clk_vote)
+ clk_disable_unprepare(drv->ahb_clk);
+err_clks:
+ pil_mss_power_down(drv);
+err_power:
+ return ret;
+}
+
+int pil_mss_reset_load_mba(struct pil_desc *pil)
+{
+ struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+ struct modem_data *md = dev_get_drvdata(pil->dev);
+ const struct firmware *fw = NULL, *dp_fw = NULL;
+ char fw_name_legacy[10] = "mba.b00";
+ char fw_name[10] = "mba.mbn";
+ char *dp_name = "msadp";
+ char *fw_name_p;
+ void *mba_dp_virt;
+ dma_addr_t mba_dp_phys, mba_dp_phys_end;
+ int ret;
+ const u8 *data;
+ struct device *dma_dev = md->mba_mem_dev_fixed ?: &md->mba_mem_dev;
+
+ trace_pil_func(__func__);
+ if (drv->mba_dp_virt && md->mba_mem_dev_fixed)
+ goto mss_reset;
+ fw_name_p = drv->non_elf_image ? fw_name_legacy : fw_name;
+ ret = request_firmware(&fw, fw_name_p, pil->dev);
+ if (ret) {
+ dev_err(pil->dev, "Failed to locate %s (rc:%d)\n",
+ fw_name_p, ret);
+ return ret;
+ }
+
+ data = fw ? fw->data : NULL;
+ if (!data) {
+ dev_err(pil->dev, "MBA data is NULL\n");
+ ret = -ENOMEM;
+ goto err_invalid_fw;
+ }
+
+ drv->mba_dp_size = SZ_1M;
+
+ arch_setup_dma_ops(dma_dev, 0, 0, NULL, 0);
+
+ dma_dev->coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
+
+ md->attrs_dma = 0;
+ md->attrs_dma |= DMA_ATTR_SKIP_ZEROING;
+ md->attrs_dma |= DMA_ATTR_STRONGLY_ORDERED;
+
+ ret = request_firmware(&dp_fw, dp_name, pil->dev);
+ if (ret) {
+ dev_warn(pil->dev, "Debug policy not present - %s. Continue.\n",
+ dp_name);
+ } else {
+ if (!dp_fw || !dp_fw->data) {
+ dev_err(pil->dev, "Invalid DP firmware\n");
+ ret = -ENOMEM;
+ goto err_invalid_fw;
+ }
+ drv->dp_size = dp_fw->size;
+ drv->mba_dp_size += drv->dp_size;
+ drv->mba_dp_size = ALIGN(drv->mba_dp_size, SZ_4K);
+ }
+
+ mba_dp_virt = dma_alloc_attrs(dma_dev, drv->mba_dp_size, &mba_dp_phys,
+ GFP_KERNEL, md->attrs_dma);
+ if (!mba_dp_virt) {
+ dev_err(pil->dev, "%s MBA/DP buffer allocation %zx bytes failed\n",
+ __func__, drv->mba_dp_size);
+ ret = -ENOMEM;
+ goto err_invalid_fw;
+ }
+
+ /* Make sure there are no mappings in PKMAP and fixmap */
+ kmap_flush_unused();
+ kmap_atomic_flush_unused();
+
+ drv->mba_dp_phys = mba_dp_phys;
+ drv->mba_dp_virt = mba_dp_virt;
+ mba_dp_phys_end = mba_dp_phys + drv->mba_dp_size;
+
+ dev_info(pil->dev, "Loading MBA and DP (if present) from %pa to %pa\n",
+ &mba_dp_phys, &mba_dp_phys_end);
+
+ /* Load the MBA image into memory */
+ if (fw->size <= SZ_1M) {
+ /* Ensures memcpy is done for max 1MB fw size */
+ memcpy(mba_dp_virt, data, fw->size);
+ } else {
+ dev_err(pil->dev, "%s fw image loading into memory is failed due to fw size overflow\n",
+ __func__);
+ ret = -EINVAL;
+ goto err_mba_data;
+ }
+ /* Ensure memcpy of the MBA memory is done before loading the DP */
+ wmb();
+
+ /* Load the DP image into memory */
+ if (drv->mba_dp_size > SZ_1M) {
+ memcpy(mba_dp_virt + SZ_1M, dp_fw->data, dp_fw->size);
+ /* Ensure memcpy is done before powering up modem */
+ wmb();
+ }
+
+ if (pil->subsys_vmid > 0) {
+ ret = pil_assign_mem_to_subsys(pil, drv->mba_dp_phys,
+ drv->mba_dp_size);
+ if (ret) {
+ pr_err("scm_call to unprotect MBA and DP mem failed(rc:%d)\n",
+ ret);
+ goto err_mba_data;
+ }
+ }
+ if (dp_fw)
+ release_firmware(dp_fw);
+ release_firmware(fw);
+ dp_fw = NULL;
+ fw = NULL;
+
+mss_reset:
+ ret = pil_mss_reset(pil);
+ if (ret) {
+ dev_err(pil->dev, "MBA boot failed(rc:%d)\n", ret);
+ goto err_mss_reset;
+ }
+
+ return 0;
+
+err_mss_reset:
+ if (pil->subsys_vmid > 0)
+ pil_assign_mem_to_linux(pil, drv->mba_dp_phys,
+ drv->mba_dp_size);
+err_mba_data:
+ dma_free_attrs(dma_dev, drv->mba_dp_size, drv->mba_dp_virt,
+ drv->mba_dp_phys, md->attrs_dma);
+err_invalid_fw:
+ if (dp_fw)
+ release_firmware(dp_fw);
+ if (fw)
+ release_firmware(fw);
+ drv->mba_dp_virt = NULL;
+ return ret;
+}
+
+int pil_mss_debug_reset(struct pil_desc *pil)
+{
+ struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+ u32 encryption_status;
+ int ret;
+
+
+ if (!pil->minidump_ss)
+ return 0;
+
+ encryption_status = pil->minidump_ss->encryption_status;
+
+ if ((pil->minidump_ss->md_ss_enable_status != MD_SS_ENABLED) ||
+ encryption_status == MD_SS_ENCR_NOTREQ)
+ return 0;
+
+ /*
+ * Bring subsystem out of reset and enable required
+ * regulators and clocks.
+ */
+ ret = pil_mss_enable_clks(drv);
+ if (ret)
+ return ret;
+
+ if (pil->minidump_ss) {
+ writel_relaxed(0x1, drv->reg_base + QDSP6SS_NMI_CFG);
+ /* Let write complete before proceeding */
+ mb();
+ udelay(2);
+ }
+ /* Assert reset to subsystem */
+ pil_mss_restart_reg(drv, true);
+ /* Wait 6 32kHz sleep cycles for reset */
+ udelay(200);
+ ret = pil_mss_restart_reg(drv, false);
+ if (ret)
+ goto err_restart;
+ /* Let write complete before proceeding */
+ mb();
+ udelay(200);
+ ret = pil_q6v5_reset(pil);
+ /*
+ * Need to Wait for timeout for debug reset sequence to
+ * complete before returning
+ */
+ pr_info("Minidump: waiting encryption to complete\n");
+ msleep(13000);
+ if (pil->minidump_ss) {
+ writel_relaxed(0x2, drv->reg_base + QDSP6SS_NMI_CFG);
+ /* Let write complete before proceeding */
+ mb();
+ udelay(200);
+ }
+ if (ret)
+ goto err_restart;
+ return 0;
+err_restart:
+ pil_mss_disable_clks(drv);
+ if (drv->ahb_clk_vote)
+ clk_disable_unprepare(drv->ahb_clk);
+ return ret;
+}
+
+static int pil_msa_auth_modem_mdt(struct pil_desc *pil, const u8 *metadata,
+ size_t size)
+{
+ struct modem_data *drv = dev_get_drvdata(pil->dev);
+ void *mdata_virt;
+ dma_addr_t mdata_phys;
+ s32 status;
+ int ret;
+ u64 val = is_timeout_disabled() ? 0 : modem_auth_timeout_ms * 1000;
+ struct device *dma_dev = drv->mba_mem_dev_fixed ?: &drv->mba_mem_dev;
+ unsigned long attrs = 0;
+
+ trace_pil_func(__func__);
+ dma_dev->coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
+ attrs |= DMA_ATTR_SKIP_ZEROING;
+ attrs |= DMA_ATTR_STRONGLY_ORDERED;
+ /* Make metadata physically contiguous and 4K aligned. */
+ mdata_virt = dma_alloc_attrs(dma_dev, size, &mdata_phys,
+ GFP_KERNEL, attrs);
+ if (!mdata_virt) {
+ dev_err(pil->dev, "MBA metadata buffer allocation failed\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+ memcpy(mdata_virt, metadata, size);
+ /* wmb() ensures copy completes prior to starting authentication. */
+ wmb();
+
+ if (pil->subsys_vmid > 0) {
+ ret = pil_assign_mem_to_subsys(pil, mdata_phys,
+ ALIGN(size, SZ_4K));
+ if (ret) {
+ pr_err("scm_call to unprotect modem metadata mem failed(rc:%d)\n",
+ ret);
+ dma_free_attrs(dma_dev, size, mdata_virt, mdata_phys,
+ attrs);
+ goto fail;
+ }
+ }
+
+ /* Initialize length counter to 0 */
+ writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_LENGTH);
+
+ /* Pass address of meta-data to the MBA and perform authentication */
+ writel_relaxed(mdata_phys, drv->rmb_base + RMB_PMI_META_DATA);
+ writel_relaxed(CMD_META_DATA_READY, drv->rmb_base + RMB_MBA_COMMAND);
+ ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
+ status == STATUS_META_DATA_AUTH_SUCCESS || status < 0,
+ POLL_INTERVAL_US, val);
+ if (ret) {
+ dev_err(pil->dev, "MBA authentication of headers timed out(rc:%d)\n",
+ ret);
+ } else if (status < 0) {
+ dev_err(pil->dev, "MBA returned error %d for headers\n",
+ status);
+ ret = -EINVAL;
+ }
+
+ if (pil->subsys_vmid > 0)
+ pil_assign_mem_to_linux(pil, mdata_phys, ALIGN(size, SZ_4K));
+
+ dma_free_attrs(dma_dev, size, mdata_virt, mdata_phys, attrs);
+
+ if (!ret)
+ return ret;
+
+fail:
+ modem_log_rmb_regs(drv->rmb_base);
+ if (drv->q6) {
+ pil_mss_shutdown(pil);
+ if (pil->subsys_vmid > 0)
+ pil_assign_mem_to_linux(pil, drv->q6->mba_dp_phys,
+ drv->q6->mba_dp_size);
+ if (drv->q6->mba_dp_virt && !drv->mba_mem_dev_fixed) {
+ dma_free_attrs(dma_dev, drv->q6->mba_dp_size,
+ drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
+ drv->attrs_dma);
+ drv->q6->mba_dp_virt = NULL;
+ }
+
+ }
+ return ret;
+}
+
+static int pil_msa_mss_reset_mba_load_auth_mdt(struct pil_desc *pil,
+ const u8 *metadata, size_t size)
+{
+ int ret;
+
+ ret = pil_mss_reset_load_mba(pil);
+ if (ret)
+ return ret;
+
+ return pil_msa_auth_modem_mdt(pil, metadata, size);
+}
+
+static int pil_msa_mba_verify_blob(struct pil_desc *pil, phys_addr_t phy_addr,
+ size_t size)
+{
+ struct modem_data *drv = dev_get_drvdata(pil->dev);
+ s32 status;
+ u32 img_length = readl_relaxed(drv->rmb_base + RMB_PMI_CODE_LENGTH);
+
+ /* Begin image authentication */
+ if (img_length == 0) {
+ writel_relaxed(phy_addr, drv->rmb_base + RMB_PMI_CODE_START);
+ writel_relaxed(CMD_LOAD_READY, drv->rmb_base + RMB_MBA_COMMAND);
+ }
+ /* Increment length counter */
+ img_length += size;
+ writel_relaxed(img_length, drv->rmb_base + RMB_PMI_CODE_LENGTH);
+
+ status = readl_relaxed(drv->rmb_base + RMB_MBA_STATUS);
+ if (status < 0) {
+ dev_err(pil->dev, "MBA returned error %d\n", status);
+ modem_log_rmb_regs(drv->rmb_base);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int pil_msa_mba_auth(struct pil_desc *pil)
+{
+ struct modem_data *drv = dev_get_drvdata(pil->dev);
+ struct q6v5_data *q6_drv = container_of(pil, struct q6v5_data, desc);
+ int ret;
+ struct device *dma_dev = drv->mba_mem_dev_fixed ?: &drv->mba_mem_dev;
+ s32 status;
+ u64 val = is_timeout_disabled() ? 0 : modem_auth_timeout_ms * 1000;
+
+ /* Wait for all segments to be authenticated or an error to occur */
+ ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
+ status == STATUS_AUTH_COMPLETE || status < 0, 50, val);
+ if (ret) {
+ dev_err(pil->dev, "MBA authentication of image timed out(rc:%d)\n",
+ ret);
+ } else if (status < 0) {
+ dev_err(pil->dev, "MBA returned error %d for image\n", status);
+ ret = -EINVAL;
+ }
+
+ if (drv->q6) {
+ if (drv->q6->mba_dp_virt && !drv->mba_mem_dev_fixed) {
+ /* Reclaim MBA and DP (if allocated) memory. */
+ if (pil->subsys_vmid > 0)
+ pil_assign_mem_to_linux(pil,
+ drv->q6->mba_dp_phys,
+ drv->q6->mba_dp_size);
+ dma_free_attrs(dma_dev, drv->q6->mba_dp_size,
+ drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
+ drv->attrs_dma);
+
+ drv->q6->mba_dp_virt = NULL;
+ }
+ }
+ if (ret)
+ modem_log_rmb_regs(drv->rmb_base);
+ if (q6_drv->ahb_clk_vote)
+ clk_disable_unprepare(q6_drv->ahb_clk);
+
+ return ret;
+}
+
+/*
+ * To be used only if self-auth is disabled, or if the
+ * MBA image is loaded as segments and not in init_image.
+ */
+struct pil_reset_ops pil_msa_mss_ops = {
+ .proxy_vote = pil_mss_make_proxy_votes,
+ .proxy_unvote = pil_mss_remove_proxy_votes,
+ .auth_and_reset = pil_mss_reset,
+ .shutdown = pil_mss_shutdown,
+};
+
+/*
+ * To be used if self-auth is enabled and the MBA is to be loaded
+ * in init_image and the modem headers are also to be authenticated
+ * in init_image. Modem segments authenticated in auth_and_reset.
+ */
+struct pil_reset_ops pil_msa_mss_ops_selfauth = {
+ .init_image = pil_msa_mss_reset_mba_load_auth_mdt,
+ .proxy_vote = pil_mss_make_proxy_votes,
+ .proxy_unvote = pil_mss_remove_proxy_votes,
+ .mem_setup = pil_mss_mem_setup,
+ .verify_blob = pil_msa_mba_verify_blob,
+ .auth_and_reset = pil_msa_mba_auth,
+ .deinit_image = pil_mss_deinit_image,
+ .shutdown = pil_mss_shutdown,
+};
+
+/*
+ * To be used if the modem headers are to be authenticated
+ * in init_image, and the modem segments in auth_and_reset.
+ */
+struct pil_reset_ops pil_msa_femto_mba_ops = {
+ .init_image = pil_msa_auth_modem_mdt,
+ .verify_blob = pil_msa_mba_verify_blob,
+ .auth_and_reset = pil_msa_mba_auth,
+};
diff --git a/drivers/soc/qcom/pil-msa.h b/drivers/soc/qcom/pil-msa.h
new file mode 100644
index 0000000..0310234
--- /dev/null
+++ b/drivers/soc/qcom/pil-msa.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_PIL_MSA_H
+#define __MSM_PIL_MSA_H
+
+#include <soc/qcom/subsystem_restart.h>
+
+#include "peripheral-loader.h"
+
+struct modem_data {
+ struct q6v5_data *q6;
+ struct subsys_device *subsys;
+ struct subsys_desc subsys_desc;
+ void *ramdump_dev;
+ void *minidump_dev;
+ bool crash_shutdown;
+ u32 pas_id;
+ bool ignore_errors;
+ struct completion stop_ack;
+ void __iomem *rmb_base;
+ struct clk *xo;
+ struct pil_desc desc;
+ struct device mba_mem_dev;
+ struct device *mba_mem_dev_fixed;
+ unsigned long attrs_dma;
+};
+
+extern struct pil_reset_ops pil_msa_mss_ops;
+extern struct pil_reset_ops pil_msa_mss_ops_selfauth;
+extern struct pil_reset_ops pil_msa_femto_mba_ops;
+
+int pil_mss_reset_load_mba(struct pil_desc *pil);
+int pil_mss_make_proxy_votes(struct pil_desc *pil);
+void pil_mss_remove_proxy_votes(struct pil_desc *pil);
+int pil_mss_shutdown(struct pil_desc *pil);
+int pil_mss_deinit_image(struct pil_desc *pil);
+int __pil_mss_deinit_image(struct pil_desc *pil, bool err_path);
+int pil_mss_assert_resets(struct q6v5_data *drv);
+int pil_mss_deassert_resets(struct q6v5_data *drv);
+int pil_mss_debug_reset(struct pil_desc *pil);
+#endif
diff --git a/drivers/soc/qcom/pil-q6v5-mss.c b/drivers/soc/qcom/pil-q6v5-mss.c
new file mode 100644
index 0000000..db48b1a
--- /dev/null
+++ b/drivers/soc/qcom/pil-q6v5-mss.c
@@ -0,0 +1,554 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/ramdump.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/soc/qcom/smem_state.h>
+
+#include "peripheral-loader.h"
+#include "pil-q6v5.h"
+#include "pil-msa.h"
+
+#define PROXY_TIMEOUT_MS 10000
+#define MAX_SSR_REASON_LEN 256U
+#define STOP_ACK_TIMEOUT_MS 1000
+
+#define subsys_to_drv(d) container_of(d, struct modem_data, subsys_desc)
+
+static void log_modem_sfr(struct modem_data *drv)
+{
+ size_t size;
+ char *smem_reason, reason[MAX_SSR_REASON_LEN];
+
+ if (drv->q6->smem_id == -1)
+ return;
+
+ smem_reason = qcom_smem_get(QCOM_SMEM_HOST_ANY, drv->q6->smem_id,
+ &size);
+ if (IS_ERR(smem_reason) || !size) {
+ pr_err("modem SFR: (unknown, qcom_smem_get failed).\n");
+ return;
+ }
+ if (!smem_reason[0]) {
+ pr_err("modem SFR: (unknown, empty string found).\n");
+ return;
+ }
+
+ strlcpy(reason, smem_reason, min(size, (size_t)MAX_SSR_REASON_LEN));
+ pr_err("modem subsystem failure reason: %s.\n", reason);
+}
+
+static void restart_modem(struct modem_data *drv)
+{
+ log_modem_sfr(drv);
+ drv->ignore_errors = true;
+ subsystem_restart_dev(drv->subsys);
+}
+
+static irqreturn_t modem_err_fatal_intr_handler(int irq, void *dev_id)
+{
+ struct modem_data *drv = subsys_to_drv(dev_id);
+
+ /* Ignore if we're the one that set the force stop BIT */
+ if (drv->crash_shutdown)
+ return IRQ_HANDLED;
+
+ pr_err("Fatal error on the modem.\n");
+ subsys_set_crash_status(drv->subsys, CRASH_STATUS_ERR_FATAL);
+ restart_modem(drv);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t modem_stop_ack_intr_handler(int irq, void *dev_id)
+{
+ struct modem_data *drv = subsys_to_drv(dev_id);
+
+ pr_info("Received stop ack interrupt from modem\n");
+ complete(&drv->stop_ack);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t modem_shutdown_ack_intr_handler(int irq, void *dev_id)
+{
+ struct modem_data *drv = subsys_to_drv(dev_id);
+
+ pr_info("Received stop shutdown interrupt from modem\n");
+ complete_shutdown_ack(drv->subsys);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t modem_ramdump_disable_intr_handler(int irq, void *dev_id)
+{
+ struct modem_data *drv = subsys_to_drv(dev_id);
+
+ pr_info("Received ramdump disable interrupt from modem\n");
+ drv->subsys_desc.ramdump_disable = 1;
+ return IRQ_HANDLED;
+}
+
+static int modem_shutdown(const struct subsys_desc *subsys, bool force_stop)
+{
+ struct modem_data *drv = subsys_to_drv(subsys);
+ unsigned long ret;
+
+ if (subsys->is_not_loadable)
+ return 0;
+
+ if (!subsys_get_crash_status(drv->subsys) && force_stop &&
+ subsys->force_stop_bit) {
+ qcom_smem_state_update_bits(subsys->state,
+ BIT(subsys->force_stop_bit), 1);
+ ret = wait_for_completion_timeout(&drv->stop_ack,
+ msecs_to_jiffies(STOP_ACK_TIMEOUT_MS));
+ if (!ret)
+ pr_warn("Timed out on stop ack from modem.\n");
+ qcom_smem_state_update_bits(subsys->state,
+ BIT(subsys->force_stop_bit), 0);
+ }
+
+ if (drv->subsys_desc.ramdump_disable_irq) {
+ pr_warn("Ramdump disable value is %d\n",
+ drv->subsys_desc.ramdump_disable);
+ }
+
+ pil_shutdown(&drv->q6->desc);
+
+ return 0;
+}
+
+static int modem_powerup(const struct subsys_desc *subsys)
+{
+ struct modem_data *drv = subsys_to_drv(subsys);
+
+ if (subsys->is_not_loadable)
+ return 0;
+ /*
+ * At this time, the modem is shutdown. Therefore this function cannot
+ * run concurrently with the watchdog bite error handler, making it safe
+ * to unset the flag below.
+ */
+ reinit_completion(&drv->stop_ack);
+ drv->subsys_desc.ramdump_disable = 0;
+ drv->ignore_errors = false;
+ drv->q6->desc.fw_name = subsys->fw_name;
+ return pil_boot(&drv->q6->desc);
+}
+
+static void modem_crash_shutdown(const struct subsys_desc *subsys)
+{
+ struct modem_data *drv = subsys_to_drv(subsys);
+
+ drv->crash_shutdown = true;
+ if (!subsys_get_crash_status(drv->subsys) &&
+ subsys->force_stop_bit) {
+ qcom_smem_state_update_bits(subsys->state,
+ BIT(subsys->force_stop_bit), 1);
+ msleep(STOP_ACK_TIMEOUT_MS);
+ }
+}
+
+static int modem_ramdump(int enable, const struct subsys_desc *subsys)
+{
+ struct modem_data *drv = subsys_to_drv(subsys);
+ int ret;
+
+ if (!enable)
+ return 0;
+
+ ret = pil_mss_make_proxy_votes(&drv->q6->desc);
+ if (ret)
+ return ret;
+
+ ret = pil_mss_debug_reset(&drv->q6->desc);
+ if (ret)
+ return ret;
+
+ pil_mss_remove_proxy_votes(&drv->q6->desc);
+ ret = pil_mss_make_proxy_votes(&drv->q6->desc);
+ if (ret)
+ return ret;
+
+ ret = pil_mss_reset_load_mba(&drv->q6->desc);
+ if (ret)
+ return ret;
+
+ ret = pil_do_ramdump(&drv->q6->desc,
+ drv->ramdump_dev, drv->minidump_dev);
+ if (ret < 0)
+ pr_err("Unable to dump modem fw memory (rc = %d).\n", ret);
+
+ ret = __pil_mss_deinit_image(&drv->q6->desc, false);
+ if (ret < 0)
+ pr_err("Unable to free up resources (rc = %d).\n", ret);
+
+ pil_mss_remove_proxy_votes(&drv->q6->desc);
+ return ret;
+}
+
+static irqreturn_t modem_wdog_bite_intr_handler(int irq, void *dev_id)
+{
+ struct modem_data *drv = subsys_to_drv(dev_id);
+
+ if (drv->ignore_errors)
+ return IRQ_HANDLED;
+
+ pr_err("Watchdog bite received from modem software!\n");
+ if (drv->subsys_desc.system_debug)
+ panic("%s: System ramdump requested. Triggering device restart!\n",
+ __func__);
+ subsys_set_crash_status(drv->subsys, CRASH_STATUS_WDOG_BITE);
+ restart_modem(drv);
+ return IRQ_HANDLED;
+}
+
+static int pil_subsys_init(struct modem_data *drv,
+ struct platform_device *pdev)
+{
+ int ret = -EINVAL;
+
+ drv->subsys_desc.name = "modem";
+ drv->subsys_desc.dev = &pdev->dev;
+ drv->subsys_desc.owner = THIS_MODULE;
+ drv->subsys_desc.shutdown = modem_shutdown;
+ drv->subsys_desc.powerup = modem_powerup;
+ drv->subsys_desc.ramdump = modem_ramdump;
+ drv->subsys_desc.crash_shutdown = modem_crash_shutdown;
+ drv->subsys_desc.err_fatal_handler = modem_err_fatal_intr_handler;
+ drv->subsys_desc.stop_ack_handler = modem_stop_ack_intr_handler;
+ drv->subsys_desc.wdog_bite_handler = modem_wdog_bite_intr_handler;
+ drv->subsys_desc.ramdump_disable_handler =
+ modem_ramdump_disable_intr_handler;
+ drv->subsys_desc.shutdown_ack_handler = modem_shutdown_ack_intr_handler;
+
+ if (IS_ERR_OR_NULL(drv->q6)) {
+ ret = PTR_ERR(drv->q6);
+ dev_err(&pdev->dev, "Pil q6 data is err %pK %d!!!\n",
+ drv->q6, ret);
+ goto err_subsys;
+ }
+
+ drv->q6->desc.modem_ssr = false;
+ drv->q6->desc.signal_aop = of_property_read_bool(pdev->dev.of_node,
+ "qcom,signal-aop");
+ if (drv->q6->desc.signal_aop) {
+ drv->q6->desc.cl.dev = &pdev->dev;
+ drv->q6->desc.cl.tx_block = true;
+ drv->q6->desc.cl.tx_tout = 1000;
+ drv->q6->desc.cl.knows_txdone = false;
+ drv->q6->desc.mbox = mbox_request_channel(&drv->q6->desc.cl, 0);
+ if (IS_ERR(drv->q6->desc.mbox)) {
+ ret = PTR_ERR(drv->q6->desc.mbox);
+ dev_err(&pdev->dev, "Failed to get mailbox channel %pK %d\n",
+ drv->q6->desc.mbox, ret);
+ goto err_subsys;
+ }
+ }
+
+ drv->subsys = subsys_register(&drv->subsys_desc);
+ if (IS_ERR(drv->subsys)) {
+ ret = PTR_ERR(drv->subsys);
+ goto err_subsys;
+ }
+
+ drv->ramdump_dev = create_ramdump_device("modem", &pdev->dev);
+ if (!drv->ramdump_dev) {
+ pr_err("%s: Unable to create a modem ramdump device.\n",
+ __func__);
+ ret = -ENOMEM;
+ goto err_ramdump;
+ }
+ drv->minidump_dev = create_ramdump_device("md_modem", &pdev->dev);
+ if (!drv->minidump_dev) {
+ pr_err("%s: Unable to create a modem minidump device.\n",
+ __func__);
+ ret = -ENOMEM;
+ goto err_minidump;
+ }
+
+ return 0;
+
+err_minidump:
+ destroy_ramdump_device(drv->ramdump_dev);
+err_ramdump:
+ subsys_unregister(drv->subsys);
+err_subsys:
+ return ret;
+}
+
+static int pil_mss_loadable_init(struct modem_data *drv,
+ struct platform_device *pdev)
+{
+ struct q6v5_data *q6;
+ struct pil_desc *q6_desc;
+ struct resource *res;
+ struct property *prop;
+ int ret;
+
+ q6 = pil_q6v5_init(pdev);
+ if (IS_ERR_OR_NULL(q6))
+ return PTR_ERR(q6);
+ drv->q6 = q6;
+ drv->xo = q6->xo;
+
+ q6_desc = &q6->desc;
+ q6_desc->owner = THIS_MODULE;
+ q6_desc->proxy_timeout = PROXY_TIMEOUT_MS;
+
+ q6_desc->ops = &pil_msa_mss_ops;
+
+ q6_desc->sequential_loading = of_property_read_bool(pdev->dev.of_node,
+ "qcom,sequential-fw-load");
+ q6->reset_clk = of_property_read_bool(pdev->dev.of_node,
+ "qcom,reset-clk");
+ q6->self_auth = of_property_read_bool(pdev->dev.of_node,
+ "qcom,pil-self-auth");
+ if (q6->self_auth) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "rmb_base");
+ q6->rmb_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(q6->rmb_base))
+ return PTR_ERR(q6->rmb_base);
+ drv->rmb_base = q6->rmb_base;
+ q6_desc->ops = &pil_msa_mss_ops_selfauth;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "restart_reg");
+ if (!res) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "restart_reg_sec");
+ if (!res) {
+ dev_err(&pdev->dev, "No restart register defined\n");
+ return -ENOMEM;
+ }
+ q6->restart_reg_sec = true;
+ }
+
+ q6->restart_reg = devm_ioremap(&pdev->dev,
+ res->start, resource_size(res));
+ if (!q6->restart_reg)
+ return -ENOMEM;
+
+ q6->pdc_sync = NULL;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pdc_sync");
+ if (res) {
+ q6->pdc_sync = devm_ioremap(&pdev->dev,
+ res->start, resource_size(res));
+ if (of_property_read_u32(pdev->dev.of_node,
+ "qcom,mss_pdc_offset", &q6->mss_pdc_offset)) {
+ dev_err(&pdev->dev,
+ "Offset for MSS PDC not specified\n");
+ return -EINVAL;
+ }
+
+ }
+
+ q6->alt_reset = NULL;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "alt_reset");
+ if (res) {
+ q6->alt_reset = devm_ioremap(&pdev->dev,
+ res->start, resource_size(res));
+ }
+
+ q6->vreg = NULL;
+
+ prop = of_find_property(pdev->dev.of_node, "vdd_mss-supply", NULL);
+ if (prop) {
+ q6->vreg = devm_regulator_get(&pdev->dev, "vdd_mss");
+ if (IS_ERR(q6->vreg))
+ return PTR_ERR(q6->vreg);
+ }
+
+ q6->vreg_mx = devm_regulator_get(&pdev->dev, "vdd_mx");
+ if (IS_ERR(q6->vreg_mx))
+ return PTR_ERR(q6->vreg_mx);
+ prop = of_find_property(pdev->dev.of_node, "vdd_mx-uV", NULL);
+ if (!prop) {
+ dev_err(&pdev->dev, "Missing vdd_mx-uV property\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "cxrail_bhs_reg");
+ if (res)
+ q6->cxrail_bhs = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+
+ q6->ahb_clk = devm_clk_get(&pdev->dev, "iface_clk");
+ if (IS_ERR(q6->ahb_clk))
+ return PTR_ERR(q6->ahb_clk);
+
+ q6->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
+ if (IS_ERR(q6->axi_clk))
+ return PTR_ERR(q6->axi_clk);
+
+ q6->rom_clk = devm_clk_get(&pdev->dev, "mem_clk");
+ if (IS_ERR(q6->rom_clk))
+ return PTR_ERR(q6->rom_clk);
+
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "qcom,pas-id", &drv->pas_id);
+ if (ret)
+ dev_info(&pdev->dev, "No pas_id found.\n");
+
+ drv->subsys_desc.pil_mss_memsetup =
+ of_property_read_bool(pdev->dev.of_node, "qcom,pil-mss-memsetup");
+
+ /* Optional. */
+ if (of_property_match_string(pdev->dev.of_node,
+ "qcom,active-clock-names", "gpll0_mss_clk") >= 0)
+ q6->gpll0_mss_clk = devm_clk_get(&pdev->dev, "gpll0_mss_clk");
+
+ if (of_property_match_string(pdev->dev.of_node,
+ "qcom,active-clock-names", "snoc_axi_clk") >= 0)
+ q6->snoc_axi_clk = devm_clk_get(&pdev->dev, "snoc_axi_clk");
+
+ if (of_property_match_string(pdev->dev.of_node,
+ "qcom,active-clock-names", "mnoc_axi_clk") >= 0)
+ q6->mnoc_axi_clk = devm_clk_get(&pdev->dev, "mnoc_axi_clk");
+
+ /* Defaulting smem_id to be not present */
+ q6->smem_id = -1;
+
+ if (of_find_property(pdev->dev.of_node, "qcom,smem-id", NULL)) {
+ ret = of_property_read_u32(pdev->dev.of_node, "qcom,smem-id",
+ &q6->smem_id);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to get the smem_id(ret:%d)\n",
+ ret);
+ return ret;
+ }
+ }
+
+ ret = pil_desc_init(q6_desc);
+
+ return ret;
+}
+
+static int pil_mss_driver_probe(struct platform_device *pdev)
+{
+ struct modem_data *drv;
+ int ret, is_not_loadable;
+
+ drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
+ if (!drv)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, drv);
+
+ is_not_loadable = of_property_read_bool(pdev->dev.of_node,
+ "qcom,is-not-loadable");
+ if (is_not_loadable) {
+ drv->subsys_desc.is_not_loadable = 1;
+ } else {
+ ret = pil_mss_loadable_init(drv, pdev);
+ if (ret)
+ return ret;
+ }
+ init_completion(&drv->stop_ack);
+
+ /* Probe the MBA mem device if present */
+ ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+ if (ret)
+ return ret;
+
+ return pil_subsys_init(drv, pdev);
+}
+
+static int pil_mss_driver_exit(struct platform_device *pdev)
+{
+ struct modem_data *drv = platform_get_drvdata(pdev);
+
+ subsys_unregister(drv->subsys);
+ destroy_ramdump_device(drv->ramdump_dev);
+ destroy_ramdump_device(drv->minidump_dev);
+ pil_desc_release(&drv->q6->desc);
+ return 0;
+}
+
+static int pil_mba_mem_driver_probe(struct platform_device *pdev)
+{
+ struct modem_data *drv;
+
+ if (!pdev->dev.parent) {
+ pr_err("No parent found.\n");
+ return -EINVAL;
+ }
+ drv = dev_get_drvdata(pdev->dev.parent);
+ drv->mba_mem_dev_fixed = &pdev->dev;
+ return 0;
+}
+
+static const struct of_device_id mba_mem_match_table[] = {
+ { .compatible = "qcom,pil-mba-mem" },
+ {}
+};
+
+static struct platform_driver pil_mba_mem_driver = {
+ .probe = pil_mba_mem_driver_probe,
+ .driver = {
+ .name = "pil-mba-mem",
+ .of_match_table = mba_mem_match_table,
+ },
+};
+
+static const struct of_device_id mss_match_table[] = {
+ { .compatible = "qcom,pil-q6v5-mss" },
+ { .compatible = "qcom,pil-q6v55-mss" },
+ { .compatible = "qcom,pil-q6v56-mss" },
+ {}
+};
+
+static struct platform_driver pil_mss_driver = {
+ .probe = pil_mss_driver_probe,
+ .remove = pil_mss_driver_exit,
+ .driver = {
+ .name = "pil-q6v5-mss",
+ .of_match_table = mss_match_table,
+ },
+};
+
+static int __init pil_mss_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&pil_mba_mem_driver);
+ if (!ret)
+ ret = platform_driver_register(&pil_mss_driver);
+ return ret;
+}
+module_init(pil_mss_init);
+
+static void __exit pil_mss_exit(void)
+{
+ platform_driver_unregister(&pil_mss_driver);
+}
+module_exit(pil_mss_exit);
+
+MODULE_DESCRIPTION("Support for booting modem subsystems with QDSP6v5 Hexagon processors");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/pil-q6v5.c b/drivers/soc/qcom/pil-q6v5.c
new file mode 100644
index 0000000..208e327
--- /dev/null
+++ b/drivers/soc/qcom/pil-q6v5.c
@@ -0,0 +1,848 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+#include <trace/events/trace_msm_pil_event.h>
+
+#include "peripheral-loader.h"
+#include "pil-msa.h"
+#include "pil-q6v5.h"
+
+/* QDSP6SS Register Offsets */
+#define QDSP6SS_RESET 0x014
+#define QDSP6SS_GFMUX_CTL 0x020
+#define QDSP6SS_PWR_CTL 0x030
+#define QDSP6V6SS_MEM_PWR_CTL 0x034
+#define QDSP6SS_BHS_STATUS 0x078
+#define QDSP6SS_MEM_PWR_CTL 0x0B0
+#define QDSP6SS_STRAP_ACC 0x110
+#define QDSP6V62SS_BHS_STATUS 0x0C4
+
+/* AXI Halt Register Offsets */
+#define AXI_HALTREQ 0x0
+#define AXI_HALTACK 0x4
+#define AXI_IDLE 0x8
+
+#define HALT_ACK_TIMEOUT_US 100000
+
+/* QDSP6SS_RESET */
+#define Q6SS_STOP_CORE BIT(0)
+#define Q6SS_CORE_ARES BIT(1)
+#define Q6SS_BUS_ARES_ENA BIT(2)
+
+/* QDSP6SS_GFMUX_CTL */
+#define Q6SS_CLK_ENA BIT(1)
+#define Q6SS_CLK_SRC_SEL_C BIT(3)
+#define Q6SS_CLK_SRC_SEL_FIELD 0xC
+#define Q6SS_CLK_SRC_SWITCH_CLK_OVR BIT(8)
+
+/* QDSP6SS_PWR_CTL */
+#define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0)
+#define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1)
+#define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2)
+#define Q6SS_L2TAG_SLP_NRET_N BIT(16)
+#define Q6SS_ETB_SLP_NRET_N BIT(17)
+#define Q6SS_L2DATA_STBY_N BIT(18)
+#define Q6SS_SLP_RET_N BIT(19)
+#define Q6SS_CLAMP_IO BIT(20)
+#define QDSS_BHS_ON BIT(21)
+#define QDSS_LDO_BYP BIT(22)
+
+/* QDSP6v55 parameters */
+#define QDSP6v55_LDO_ON BIT(26)
+#define QDSP6v55_LDO_BYP BIT(25)
+#define QDSP6v55_BHS_ON BIT(24)
+#define QDSP6v55_CLAMP_WL BIT(21)
+#define QDSP6v55_CLAMP_QMC_MEM BIT(22)
+#define L1IU_SLP_NRET_N BIT(15)
+#define L1DU_SLP_NRET_N BIT(14)
+#define L2PLRU_SLP_NRET_N BIT(13)
+#define QDSP6v55_BHS_EN_REST_ACK BIT(0)
+
+#define HALT_CHECK_MAX_LOOPS (200)
+#define BHS_CHECK_MAX_LOOPS (200)
+#define QDSP6SS_XO_CBCR (0x0038)
+
+/* QDSP6v65 parameters */
+#define QDSP6SS_BOOT_CORE_START (0x400)
+#define QDSP6SS_BOOT_CMD (0x404)
+#define MSS_STATUS (0x40)
+#define QDSP6SS_SLEEP (0x3C)
+#define SLEEP_CHECK_MAX_LOOPS (200)
+#define BOOT_FSM_TIMEOUT (10000)
+
+#define QDSP6SS_ACC_OVERRIDE_VAL 0x20
+
+int pil_q6v5_make_proxy_votes(struct pil_desc *pil)
+{
+ int ret;
+ struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+ int uv;
+
+ ret = of_property_read_u32(pil->dev->of_node, "vdd_cx-voltage", &uv);
+ if (ret) {
+ dev_err(pil->dev, "missing vdd_cx-voltage property(rc:%d)\n",
+ ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(drv->xo);
+ if (ret) {
+ dev_err(pil->dev, "Failed to vote for XO(rc:%d)\n", ret);
+ goto out;
+ }
+
+ ret = clk_prepare_enable(drv->pnoc_clk);
+ if (ret) {
+ dev_err(pil->dev, "Failed to vote for pnoc(rc:%d)\n", ret);
+ goto err_pnoc_vote;
+ }
+
+ ret = clk_prepare_enable(drv->qdss_clk);
+ if (ret) {
+ dev_err(pil->dev, "Failed to vote for qdss(rc:%d)\n", ret);
+ goto err_qdss_vote;
+ }
+
+ ret = clk_prepare_enable(drv->prng_clk);
+ if (ret) {
+ dev_err(pil->dev, "Failed to vote for prng(rc:%d)\n", ret);
+ goto err_prng_vote;
+ }
+
+ ret = clk_prepare_enable(drv->axis2_clk);
+ if (ret) {
+ dev_err(pil->dev, "Failed to vote for axis2(rc:%d)\n", ret);
+ goto err_axis2_vote;
+ }
+
+ ret = regulator_set_voltage(drv->vreg_cx, uv, INT_MAX);
+ if (ret) {
+ dev_err(pil->dev, "Failed to request vdd_cx voltage(rc:%d)\n",
+ ret);
+ goto err_cx_voltage;
+ }
+
+ ret = regulator_set_load(drv->vreg_cx, 100000);
+ if (ret < 0) {
+ dev_err(pil->dev, "Failed to set vdd_cx mode(rc:%d)\n", ret);
+ goto err_cx_mode;
+ }
+
+ ret = regulator_enable(drv->vreg_cx);
+ if (ret) {
+ dev_err(pil->dev, "Failed to vote for vdd_cx(rc:%d)\n", ret);
+ goto err_cx_enable;
+ }
+
+ if (drv->vreg_pll) {
+ ret = regulator_enable(drv->vreg_pll);
+ if (ret) {
+ dev_err(pil->dev, "Failed to vote for vdd_pll(rc:%d)\n",
+ ret);
+ goto err_vreg_pll;
+ }
+ }
+
+ return 0;
+
+err_vreg_pll:
+ regulator_disable(drv->vreg_cx);
+err_cx_enable:
+ regulator_set_load(drv->vreg_cx, 0);
+err_cx_mode:
+ regulator_set_voltage(drv->vreg_cx, 0, INT_MAX);
+err_cx_voltage:
+ clk_disable_unprepare(drv->axis2_clk);
+err_axis2_vote:
+ clk_disable_unprepare(drv->prng_clk);
+err_prng_vote:
+ clk_disable_unprepare(drv->qdss_clk);
+err_qdss_vote:
+ clk_disable_unprepare(drv->pnoc_clk);
+err_pnoc_vote:
+ clk_disable_unprepare(drv->xo);
+out:
+ return ret;
+}
+EXPORT_SYMBOL(pil_q6v5_make_proxy_votes);
+
+void pil_q6v5_remove_proxy_votes(struct pil_desc *pil)
+{
+ struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+ int uv, ret = 0;
+
+ ret = of_property_read_u32(pil->dev->of_node, "vdd_cx-voltage", &uv);
+ if (ret) {
+ dev_err(pil->dev, "missing vdd_cx-voltage property(rc:%d)\n",
+ ret);
+ return;
+ }
+
+ if (drv->vreg_pll) {
+ regulator_disable(drv->vreg_pll);
+ regulator_set_load(drv->vreg_pll, 0);
+ }
+ regulator_disable(drv->vreg_cx);
+ regulator_set_load(drv->vreg_cx, 0);
+ regulator_set_voltage(drv->vreg_cx, 0, INT_MAX);
+ clk_disable_unprepare(drv->xo);
+ clk_disable_unprepare(drv->pnoc_clk);
+ clk_disable_unprepare(drv->qdss_clk);
+ clk_disable_unprepare(drv->prng_clk);
+ clk_disable_unprepare(drv->axis2_clk);
+}
+EXPORT_SYMBOL(pil_q6v5_remove_proxy_votes);
+
+void pil_q6v5_halt_axi_port(struct pil_desc *pil, void __iomem *halt_base)
+{
+ int ret;
+ u32 status;
+
+ /* Assert halt request */
+ writel_relaxed(1, halt_base + AXI_HALTREQ);
+
+ /* Wait for halt */
+ ret = readl_poll_timeout(halt_base + AXI_HALTACK,
+ status, status != 0, 50, HALT_ACK_TIMEOUT_US);
+ if (ret)
+ dev_warn(pil->dev, "Port %pK halt timeout\n", halt_base);
+ else if (!readl_relaxed(halt_base + AXI_IDLE))
+ dev_warn(pil->dev, "Port %pK halt failed\n", halt_base);
+
+ /* Clear halt request (port will remain halted until reset) */
+ writel_relaxed(0, halt_base + AXI_HALTREQ);
+}
+EXPORT_SYMBOL(pil_q6v5_halt_axi_port);
+
+void assert_clamps(struct pil_desc *pil)
+{
+ u32 val;
+ struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+
+ /*
+ * Assert QDSP6 I/O clamp, memory wordline clamp, and compiler memory
+ * clamp as a software workaround to avoid high MX current during
+ * LPASS/MSS restart.
+ */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+ val |= (Q6SS_CLAMP_IO | QDSP6v55_CLAMP_WL |
+ QDSP6v55_CLAMP_QMC_MEM);
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+ /* To make sure asserting clamps is done before MSS restart*/
+ mb();
+}
+
+static void __pil_q6v5_shutdown(struct pil_desc *pil)
+{
+ u32 val;
+ struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+
+ /* Turn off core clock */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_GFMUX_CTL);
+ val &= ~Q6SS_CLK_ENA;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_GFMUX_CTL);
+
+ /* Clamp IO */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+ val |= Q6SS_CLAMP_IO;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+ /* Turn off Q6 memories */
+ val &= ~(Q6SS_L2DATA_SLP_NRET_N_0 | Q6SS_L2DATA_SLP_NRET_N_1 |
+ Q6SS_L2DATA_SLP_NRET_N_2 | Q6SS_SLP_RET_N |
+ Q6SS_L2TAG_SLP_NRET_N | Q6SS_ETB_SLP_NRET_N |
+ Q6SS_L2DATA_STBY_N);
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+ /* Assert Q6 resets */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+ val |= (Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENA);
+ writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+ /* Kill power at block headswitch */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+ val &= ~QDSS_BHS_ON;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+}
+
+void pil_q6v5_shutdown(struct pil_desc *pil)
+{
+ struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+
+ if (drv->qdsp6v55) {
+ /* Subsystem driver expected to halt bus and assert reset */
+ return;
+ }
+ __pil_q6v5_shutdown(pil);
+}
+EXPORT_SYMBOL(pil_q6v5_shutdown);
+
+static int __pil_q6v5_reset(struct pil_desc *pil)
+{
+ struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+ u32 val;
+
+ /* Assert resets, stop core */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+ val |= (Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENA | Q6SS_STOP_CORE);
+ writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+ /* Enable power block headswitch, and wait for it to stabilize */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+ val |= QDSS_BHS_ON | QDSS_LDO_BYP;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+ /* Ensure physical memory access is done*/
+ mb();
+ udelay(1);
+
+ /*
+ * Turn on memories. L2 banks should be done individually
+ * to minimize inrush current.
+ */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+ val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
+ Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+ val |= Q6SS_L2DATA_SLP_NRET_N_2;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+ val |= Q6SS_L2DATA_SLP_NRET_N_1;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+ val |= Q6SS_L2DATA_SLP_NRET_N_0;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+ /* Remove IO clamp */
+ val &= ~Q6SS_CLAMP_IO;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+ /* Bring core out of reset */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+ val &= ~Q6SS_CORE_ARES;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+ /* Turn on core clock */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_GFMUX_CTL);
+ val |= Q6SS_CLK_ENA;
+
+ /* Need a different clock source for v5.2.0 */
+ if (drv->qdsp6v5_2_0) {
+ val &= ~Q6SS_CLK_SRC_SEL_FIELD;
+ val |= Q6SS_CLK_SRC_SEL_C;
+ }
+
+ /* force clock on during source switch */
+ if (drv->qdsp6v56)
+ val |= Q6SS_CLK_SRC_SWITCH_CLK_OVR;
+
+ writel_relaxed(val, drv->reg_base + QDSP6SS_GFMUX_CTL);
+
+ /* Start core execution */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+ val &= ~Q6SS_STOP_CORE;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+ return 0;
+}
+
+static int q6v55_branch_clk_enable(struct q6v5_data *drv)
+{
+ u32 val, count;
+ void __iomem *cbcr_reg = drv->reg_base + QDSP6SS_XO_CBCR;
+
+ val = readl_relaxed(cbcr_reg);
+ val |= 0x1;
+ writel_relaxed(val, cbcr_reg);
+
+ for (count = HALT_CHECK_MAX_LOOPS; count > 0; count--) {
+ val = readl_relaxed(cbcr_reg);
+ if (!(val & BIT(31)))
+ return 0;
+ udelay(1);
+ }
+
+ dev_err(drv->desc.dev, "Failed to enable xo branch clock.\n");
+ return -EINVAL;
+}
+
+static int __pil_q6v65_reset(struct pil_desc *pil)
+{
+ struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+ u32 val, count;
+ int ret;
+
+ val = readl_relaxed(drv->reg_base + QDSP6SS_SLEEP);
+ val |= 0x1;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_SLEEP);
+ for (count = SLEEP_CHECK_MAX_LOOPS; count > 0; count--) {
+ val = readl_relaxed(drv->reg_base + QDSP6SS_SLEEP);
+ if (!(val & BIT(31)))
+ break;
+ udelay(1);
+ }
+
+ if (!count) {
+ dev_err(drv->desc.dev, "Sleep clock did not come on in time\n");
+ return -ETIMEDOUT;
+ }
+
+ /* De-assert QDSP6 stop core */
+ writel_relaxed(1, drv->reg_base + QDSP6SS_BOOT_CORE_START);
+ /* De-assert stop core before starting boot FSM */
+ mb();
+ /* Trigger boot FSM */
+ writel_relaxed(1, drv->reg_base + QDSP6SS_BOOT_CMD);
+
+ /* Wait for boot FSM to complete */
+ ret = readl_poll_timeout(drv->rmb_base + MSS_STATUS, val,
+ (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
+
+ if (ret) {
+ dev_err(drv->desc.dev, "Boot FSM failed to complete.\n");
+ /* Reset the modem so that boot FSM is in reset state */
+ pil_mss_assert_resets(drv);
+ /* Wait 6 32kHz sleep cycles for reset */
+ udelay(200);
+ pil_mss_deassert_resets(drv);
+ }
+
+ return ret;
+}
+
+static int __pil_q6v55_reset(struct pil_desc *pil)
+{
+ struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+ u32 val;
+ int i;
+
+ trace_pil_func(__func__);
+ /* Override the ACC value if required */
+ if (drv->override_acc)
+ writel_relaxed(QDSP6SS_ACC_OVERRIDE_VAL,
+ drv->reg_base + QDSP6SS_STRAP_ACC);
+
+ /* Override the ACC value with input value */
+ if (!of_property_read_u32(pil->dev->of_node, "qcom,override-acc-1",
+ &drv->override_acc_1))
+ writel_relaxed(drv->override_acc_1,
+ drv->reg_base + QDSP6SS_STRAP_ACC);
+
+ /* Assert resets, stop core */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+ val |= (Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENA | Q6SS_STOP_CORE);
+ writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+ /* BHS require xo cbcr to be enabled */
+ i = q6v55_branch_clk_enable(drv);
+ if (i)
+ return i;
+
+ /* Enable power block headswitch, and wait for it to stabilize */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+ val |= QDSP6v55_BHS_ON;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+ /* Ensure physical memory access is done*/
+ mb();
+ udelay(1);
+
+ if (drv->qdsp6v62_1_2 || drv->qdsp6v62_1_5 || drv->qdsp6v62_1_4) {
+ for (i = BHS_CHECK_MAX_LOOPS; i > 0; i--) {
+ if (readl_relaxed(drv->reg_base + QDSP6V62SS_BHS_STATUS)
+ & QDSP6v55_BHS_EN_REST_ACK)
+ break;
+ udelay(1);
+ }
+ if (!i) {
+ pr_err("%s: BHS_EN_REST_ACK not set!\n", __func__);
+ return -ETIMEDOUT;
+ }
+ }
+
+ if (drv->qdsp6v61_1_1) {
+ for (i = BHS_CHECK_MAX_LOOPS; i > 0; i--) {
+ if (readl_relaxed(drv->reg_base + QDSP6SS_BHS_STATUS)
+ & QDSP6v55_BHS_EN_REST_ACK)
+ break;
+ udelay(1);
+ }
+ if (!i) {
+ pr_err("%s: BHS_EN_REST_ACK not set!\n", __func__);
+ return -ETIMEDOUT;
+ }
+ }
+
+ /* Put LDO in bypass mode */
+ val |= QDSP6v55_LDO_BYP;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+ if (drv->qdsp6v56_1_3) {
+ /* Deassert memory peripheral sleep and L2 memory standby */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+ val |= (Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N);
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+ /* Turn on L1, L2 and ETB memories 1 at a time */
+ for (i = 17; i >= 0; i--) {
+ val |= BIT(i);
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+ udelay(1);
+ }
+ } else if (drv->qdsp6v56_1_5 || drv->qdsp6v56_1_8
+ || drv->qdsp6v56_1_10) {
+ /* Deassert QDSP6 compiler memory clamp */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+ val &= ~QDSP6v55_CLAMP_QMC_MEM;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+ /* Deassert memory peripheral sleep and L2 memory standby */
+ val |= (Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N);
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+ /* Turn on L1, L2, ETB and JU memories 1 at a time */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_MEM_PWR_CTL);
+ for (i = 19; i >= 0; i--) {
+ val |= BIT(i);
+ writel_relaxed(val, drv->reg_base +
+ QDSP6SS_MEM_PWR_CTL);
+ val |= readl_relaxed(drv->reg_base +
+ QDSP6SS_MEM_PWR_CTL);
+ /*
+ * Wait for 1us for both memory peripheral and
+ * data array to turn on.
+ */
+ udelay(1);
+ }
+ } else if (drv->qdsp6v56_1_8_inrush_current) {
+ /* Deassert QDSP6 compiler memory clamp */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+ val &= ~QDSP6v55_CLAMP_QMC_MEM;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+ /* Deassert memory peripheral sleep and L2 memory standby */
+ val |= (Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N);
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+ /* Turn on L1, L2, ETB and JU memories 1 at a time */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_MEM_PWR_CTL);
+ for (i = 19; i >= 6; i--) {
+ val |= BIT(i);
+ writel_relaxed(val, drv->reg_base +
+ QDSP6SS_MEM_PWR_CTL);
+ /*
+ * Wait for 1us for both memory peripheral and
+ * data array to turn on.
+ */
+ udelay(1);
+ }
+
+ for (i = 0 ; i <= 5 ; i++) {
+ val |= BIT(i);
+ writel_relaxed(val, drv->reg_base +
+ QDSP6SS_MEM_PWR_CTL);
+ /*
+ * Wait for 1us for both memory peripheral and
+ * data array to turn on.
+ */
+ udelay(1);
+ }
+ } else if (drv->qdsp6v61_1_1 || drv->qdsp6v62_1_2 ||
+ drv->qdsp6v62_1_4 || drv->qdsp6v62_1_5) {
+ /* Deassert QDSP6 compiler memory clamp */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+ val &= ~QDSP6v55_CLAMP_QMC_MEM;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+ /* Deassert memory peripheral sleep and L2 memory standby */
+ val |= (Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N);
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+ /* Turn on L1, L2, ETB and JU memories 1 at a time */
+ val = readl_relaxed(drv->reg_base +
+ QDSP6V6SS_MEM_PWR_CTL);
+
+ if (drv->qdsp6v62_1_4 || drv->qdsp6v62_1_5)
+ i = 29;
+ else
+ i = 28;
+
+ for ( ; i >= 0; i--) {
+ val |= BIT(i);
+ writel_relaxed(val, drv->reg_base +
+ QDSP6V6SS_MEM_PWR_CTL);
+ /*
+ * Wait for 1us for both memory peripheral and
+ * data array to turn on.
+ */
+ udelay(1);
+ }
+ } else {
+ /* Turn on memories. */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+ val |= 0xFFF00;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+ /* Turn on L2 banks 1 at a time */
+ for (i = 0; i <= 7; i++) {
+ val |= BIT(i);
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+ }
+ }
+
+ /* Remove word line clamp */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+ val &= ~QDSP6v55_CLAMP_WL;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+ /* Remove IO clamp */
+ val &= ~Q6SS_CLAMP_IO;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+ /* Bring core out of reset */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+ val &= ~(Q6SS_CORE_ARES | Q6SS_STOP_CORE);
+ writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+ /* Turn on core clock */
+ val = readl_relaxed(drv->reg_base + QDSP6SS_GFMUX_CTL);
+ val |= Q6SS_CLK_ENA;
+ writel_relaxed(val, drv->reg_base + QDSP6SS_GFMUX_CTL);
+
+ return 0;
+}
+
+int pil_q6v5_reset(struct pil_desc *pil)
+{
+ struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+
+
+ if (drv->qdsp6v65_1_0)
+ return __pil_q6v65_reset(pil);
+ else if (drv->qdsp6v55)
+ return __pil_q6v55_reset(pil);
+ else
+ return __pil_q6v5_reset(pil);
+}
+EXPORT_SYMBOL(pil_q6v5_reset);
+
+struct q6v5_data *pil_q6v5_init(struct platform_device *pdev)
+{
+ struct q6v5_data *drv;
+ struct resource *res;
+ struct pil_desc *desc;
+ struct property *prop;
+ int ret, vdd_pll;
+
+ drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
+ if (!drv)
+ return ERR_PTR(-ENOMEM);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6_base");
+ drv->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(drv->reg_base))
+ return drv->reg_base;
+
+ desc = &drv->desc;
+ ret = of_property_read_string(pdev->dev.of_node, "qcom,firmware-name",
+ &desc->name);
+ if (ret)
+ return ERR_PTR(ret);
+
+ desc->clear_fw_region = false;
+ desc->dev = &pdev->dev;
+
+ drv->qdsp6v5_2_0 = of_device_is_compatible(pdev->dev.of_node,
+ "qcom,pil-femto-modem");
+
+ if (drv->qdsp6v5_2_0)
+ return drv;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "halt_base");
+ if (res) {
+ drv->axi_halt_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!drv->axi_halt_base) {
+ dev_err(&pdev->dev, "Failed to map axi_halt_base.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ if (!drv->axi_halt_base) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "halt_q6");
+ if (res) {
+ drv->axi_halt_q6 = devm_ioremap(&pdev->dev,
+ res->start, resource_size(res));
+ if (!drv->axi_halt_q6) {
+ dev_err(&pdev->dev, "Failed to map axi_halt_q6.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "halt_modem");
+ if (res) {
+ drv->axi_halt_mss = devm_ioremap(&pdev->dev,
+ res->start, resource_size(res));
+ if (!drv->axi_halt_mss) {
+ dev_err(&pdev->dev, "Failed to map axi_halt_mss.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "halt_nc");
+ if (res) {
+ drv->axi_halt_nc = devm_ioremap(&pdev->dev,
+ res->start, resource_size(res));
+ if (!drv->axi_halt_nc) {
+ dev_err(&pdev->dev, "Failed to map axi_halt_nc.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+ }
+
+ if (!(drv->axi_halt_base || (drv->axi_halt_q6 && drv->axi_halt_mss
+ && drv->axi_halt_nc))) {
+ dev_err(&pdev->dev, "halt bases for Q6 are not defined.\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ drv->qdsp6v55 = of_device_is_compatible(pdev->dev.of_node,
+ "qcom,pil-q6v55-mss");
+ drv->qdsp6v56 = of_device_is_compatible(pdev->dev.of_node,
+ "qcom,pil-q6v56-mss");
+
+ drv->qdsp6v56_1_3 = of_property_read_bool(pdev->dev.of_node,
+ "qcom,qdsp6v56-1-3");
+ drv->qdsp6v56_1_5 = of_property_read_bool(pdev->dev.of_node,
+ "qcom,qdsp6v56-1-5");
+
+ drv->qdsp6v56_1_8 = of_property_read_bool(pdev->dev.of_node,
+ "qcom,qdsp6v56-1-8");
+ drv->qdsp6v56_1_10 = of_property_read_bool(pdev->dev.of_node,
+ "qcom,qdsp6v56-1-10");
+
+ drv->qdsp6v56_1_8_inrush_current = of_property_read_bool(
+ pdev->dev.of_node,
+ "qcom,qdsp6v56-1-8-inrush-current");
+
+ drv->qdsp6v61_1_1 = of_property_read_bool(pdev->dev.of_node,
+ "qcom,qdsp6v61-1-1");
+
+ drv->qdsp6v62_1_2 = of_property_read_bool(pdev->dev.of_node,
+ "qcom,qdsp6v62-1-2");
+
+ drv->qdsp6v62_1_4 = of_property_read_bool(pdev->dev.of_node,
+ "qcom,qdsp6v62-1-4");
+
+ drv->qdsp6v62_1_5 = of_property_read_bool(pdev->dev.of_node,
+ "qcom,qdsp6v62-1-5");
+
+ drv->qdsp6v65_1_0 = of_property_read_bool(pdev->dev.of_node,
+ "qcom,qdsp6v65-1-0");
+
+ drv->non_elf_image = of_property_read_bool(pdev->dev.of_node,
+ "qcom,mba-image-is-not-elf");
+
+ drv->override_acc = of_property_read_bool(pdev->dev.of_node,
+ "qcom,override-acc");
+
+ drv->ahb_clk_vote = of_property_read_bool(pdev->dev.of_node,
+ "qcom,ahb-clk-vote");
+ drv->mx_spike_wa = of_property_read_bool(pdev->dev.of_node,
+ "qcom,mx-spike-wa");
+
+ drv->xo = devm_clk_get(&pdev->dev, "xo");
+ if (IS_ERR(drv->xo))
+ return ERR_CAST(drv->xo);
+
+ if (of_property_read_bool(pdev->dev.of_node, "qcom,pnoc-clk-vote")) {
+ drv->pnoc_clk = devm_clk_get(&pdev->dev, "pnoc_clk");
+ if (IS_ERR(drv->pnoc_clk))
+ return ERR_CAST(drv->pnoc_clk);
+ } else {
+ drv->pnoc_clk = NULL;
+ }
+
+ if (of_property_match_string(pdev->dev.of_node,
+ "qcom,proxy-clock-names", "qdss_clk") >= 0) {
+ drv->qdss_clk = devm_clk_get(&pdev->dev, "qdss_clk");
+ if (IS_ERR(drv->qdss_clk))
+ return ERR_CAST(drv->qdss_clk);
+ } else {
+ drv->qdss_clk = NULL;
+ }
+
+ if (of_property_match_string(pdev->dev.of_node,
+ "qcom,proxy-clock-names", "prng_clk") >= 0) {
+ drv->prng_clk = devm_clk_get(&pdev->dev, "prng_clk");
+ if (IS_ERR(drv->prng_clk))
+ return ERR_CAST(drv->prng_clk);
+ } else {
+ drv->prng_clk = NULL;
+ }
+
+ if (of_property_match_string(pdev->dev.of_node,
+ "qcom,proxy-clock-names", "axis2_clk") >= 0) {
+ drv->axis2_clk = devm_clk_get(&pdev->dev, "axis2_clk");
+ if (IS_ERR(drv->axis2_clk))
+ return ERR_CAST(drv->axis2_clk);
+ } else {
+ drv->axis2_clk = NULL;
+ }
+
+ drv->vreg_cx = devm_regulator_get(&pdev->dev, "vdd_cx");
+ if (IS_ERR(drv->vreg_cx))
+ return ERR_CAST(drv->vreg_cx);
+ prop = of_find_property(pdev->dev.of_node, "vdd_cx-voltage", NULL);
+ if (!prop) {
+ dev_err(&pdev->dev, "Missing vdd_cx-voltage property\n");
+ return ERR_CAST(prop);
+ }
+
+ ret = of_property_read_u32(pdev->dev.of_node, "qcom,vdd_pll",
+ &vdd_pll);
+ if (!ret) {
+ drv->vreg_pll = devm_regulator_get(&pdev->dev, "vdd_pll");
+ if (!IS_ERR_OR_NULL(drv->vreg_pll)) {
+ ret = regulator_set_voltage(drv->vreg_pll, vdd_pll,
+ vdd_pll);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to set vdd_pll voltage(rc:%d)\n",
+ ret);
+ return ERR_PTR(ret);
+ }
+
+ ret = regulator_set_load(drv->vreg_pll, 10000);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to set vdd_pll mode(rc:%d)\n",
+ ret);
+ return ERR_PTR(ret);
+ }
+ } else
+ drv->vreg_pll = NULL;
+ }
+
+ return drv;
+}
+EXPORT_SYMBOL(pil_q6v5_init);
diff --git a/drivers/soc/qcom/pil-q6v5.h b/drivers/soc/qcom/pil-q6v5.h
new file mode 100644
index 0000000..01b1cef
--- /dev/null
+++ b/drivers/soc/qcom/pil-q6v5.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MSM_PIL_Q6V5_H
+#define __MSM_PIL_Q6V5_H
+
+#include "peripheral-loader.h"
+
+struct regulator;
+struct clk;
+struct pil_device;
+struct platform_device;
+
+struct q6v5_data {
+ void __iomem *reg_base;
+ void __iomem *rmb_base;
+ void __iomem *cxrail_bhs; /* External BHS register */
+ struct clk *xo; /* XO clock source */
+ struct clk *pnoc_clk; /* PNOC bus clock source */
+ struct clk *ahb_clk; /* PIL access to registers */
+ struct clk *axi_clk; /* CPU access to memory */
+ struct clk *core_clk; /* CPU core */
+ struct clk *reg_clk; /* CPU access registers */
+ struct clk *gpll0_mss_clk; /* GPLL0 to MSS connection */
+ struct clk *rom_clk; /* Boot ROM */
+ struct clk *snoc_axi_clk;
+ struct clk *mnoc_axi_clk;
+ struct clk *qdss_clk;
+ struct clk *prng_clk;
+ struct clk *axis2_clk;
+ void __iomem *axi_halt_base; /* Halt base of q6, mss,
+ * nc are in same 4K page
+ */
+ void __iomem *axi_halt_q6;
+ void __iomem *axi_halt_mss;
+ void __iomem *axi_halt_nc;
+ void __iomem *restart_reg;
+ void __iomem *pdc_sync;
+ void __iomem *alt_reset;
+ struct regulator *vreg;
+ struct regulator *vreg_cx;
+ struct regulator *vreg_mx;
+ struct regulator *vreg_pll;
+ bool is_booted;
+ struct pil_desc desc;
+ bool self_auth;
+ phys_addr_t mba_dp_phys;
+ void *mba_dp_virt;
+ size_t mba_dp_size;
+ size_t dp_size;
+ bool qdsp6v55;
+ bool qdsp6v5_2_0;
+ bool qdsp6v56;
+ bool qdsp6v56_1_3;
+ bool qdsp6v56_1_5;
+ bool qdsp6v56_1_8;
+ bool qdsp6v56_1_8_inrush_current;
+ bool qdsp6v56_1_10;
+ bool qdsp6v61_1_1;
+ bool qdsp6v62_1_2;
+ bool qdsp6v62_1_4;
+ bool qdsp6v62_1_5;
+ bool qdsp6v65_1_0;
+ bool non_elf_image;
+ bool restart_reg_sec;
+ bool override_acc;
+ int override_acc_1;
+ int mss_pdc_offset;
+ int smem_id;
+ bool ahb_clk_vote;
+ bool mx_spike_wa;
+ bool reset_clk;
+};
+
+int pil_q6v5_make_proxy_votes(struct pil_desc *pil);
+void pil_q6v5_remove_proxy_votes(struct pil_desc *pil);
+void pil_q6v5_halt_axi_port(struct pil_desc *pil, void __iomem *halt_base);
+void pil_q6v5_shutdown(struct pil_desc *pil);
+int pil_q6v5_reset(struct pil_desc *pil);
+void assert_clamps(struct pil_desc *pil);
+struct q6v5_data *pil_q6v5_init(struct platform_device *pdev);
+
+#endif
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 00bfed4..63556d1 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -1108,12 +1108,30 @@
"Failed to cancel/abort m_cmd\n");
}
if (mas->cur_xfer_mode == SE_DMA) {
- if (xfer->tx_buf)
+ if (xfer->tx_buf) {
+ reinit_completion(&mas->xfer_done);
+ writel_relaxed(1, mas->base +
+ SE_DMA_TX_FSM_RST);
+ timeout =
+ wait_for_completion_timeout(&mas->xfer_done, HZ);
+ if (!timeout)
+ dev_err(mas->dev,
+ "DMA TX RESET failed\n");
geni_se_tx_dma_unprep(mas->wrapper_dev,
- xfer->tx_dma, xfer->len);
- if (xfer->rx_buf)
+ xfer->tx_dma, xfer->len);
+ }
+ if (xfer->rx_buf) {
+ reinit_completion(&mas->xfer_done);
+ writel_relaxed(1, mas->base +
+ SE_DMA_RX_FSM_RST);
+ timeout =
+ wait_for_completion_timeout(&mas->xfer_done, HZ);
+ if (!timeout)
+ dev_err(mas->dev,
+ "DMA RX RESET failed\n");
geni_se_rx_dma_unprep(mas->wrapper_dev,
- xfer->rx_dma, xfer->len);
+ xfer->rx_dma, xfer->len);
+ }
}
}
diff --git a/drivers/thermal/qcom/adc-tm5.c b/drivers/thermal/qcom/adc-tm5.c
index 3371802..654689e 100644
--- a/drivers/thermal/qcom/adc-tm5.c
+++ b/drivers/thermal/qcom/adc-tm5.c
@@ -28,6 +28,9 @@
#define ADC_TM_MEAS_INTERVAL_CTL 0x44
#define ADC_TM_MEAS_INTERVAL_CTL2 0x45
+#define ADC_TM_MEAS_INTERVAL_CTL_660 0x50
+#define ADC_TM_MEAS_INTERVAL_CTL2_660 0x51
+
#define ADC_TM_MEAS_INTERVAL_CTL2_SHIFT 0x4
#define ADC_TM_MEAS_INTERVAL_CTL2_MASK 0xf0
#define ADC_TM_MEAS_INTERVAL_CTL3_MASK 0xf
@@ -1035,22 +1038,31 @@
{
u8 buf[4], channels_available, meas_int_timer_2_3 = 0;
int ret;
+ int dig_param_len = 4;
+ bool pmic_subtype_660 = false;
unsigned int offset_btm_idx = 0, i;
- ret = adc_tm5_read_reg(chip, ADC_TM_NUM_BTM, &channels_available, 1);
- if (ret < 0) {
- pr_err("read failed for BTM channels\n");
- return ret;
- }
+ if ((chip->pmic_rev_id) &&
+ (chip->pmic_rev_id->pmic_subtype == PM660_SUBTYPE)) {
+ dig_param_len = 2;
+ pmic_subtype_660 = true;
+ } else {
+ ret = adc_tm5_read_reg(chip, ADC_TM_NUM_BTM,
+ &channels_available, 1);
+ if (ret < 0) {
+ pr_err("read failed for BTM channels\n");
+ return ret;
+ }
- if (dt_chans > channels_available) {
- pr_err("Number of nodes greater than channels supported:%d\n",
- channels_available);
- return -EINVAL;
+ if (dt_chans > channels_available) {
+ pr_err("More nodes than channels supported:%d\n",
+ channels_available);
+ return -EINVAL;
+ }
}
ret = adc_tm5_read_reg(chip,
- ADC_TM_ADC_DIG_PARAM, buf, 4);
+ ADC_TM_ADC_DIG_PARAM, buf, dig_param_len);
if (ret < 0) {
pr_err("adc-tm block read failed with %d\n", ret);
return ret;
@@ -1072,10 +1084,18 @@
buf[3] = meas_int_timer_2_3;
ret = adc_tm5_write_reg(chip,
- ADC_TM_ADC_DIG_PARAM, buf, 4);
+ ADC_TM_ADC_DIG_PARAM, buf, dig_param_len);
if (ret < 0)
pr_err("adc-tm block write failed with %d\n", ret);
+ if (pmic_subtype_660) {
+ ret = adc_tm5_write_reg(chip,
+ ADC_TM_MEAS_INTERVAL_CTL_660, &buf[2], 2);
+
+ if (ret < 0)
+ pr_err("adc-tm block write failed with %d\n", ret);
+ }
+
spin_lock_init(&chip->adc_tm_lock);
mutex_init(&chip->adc_mutex_lock);
diff --git a/drivers/thermal/tsens2xxx.c b/drivers/thermal/tsens2xxx.c
index 941f7f4..d04ea03 100644
--- a/drivers/thermal/tsens2xxx.c
+++ b/drivers/thermal/tsens2xxx.c
@@ -83,12 +83,70 @@
*temp = last_temp * TSENS_TM_SCALE_DECI_MILLIDEG;
}
+static int __tsens2xxx_hw_init(struct tsens_device *tmdev)
+{
+ void __iomem *srot_addr;
+ void __iomem *sensor_int_mask_addr;
+ unsigned int srot_val, crit_mask, crit_val;
+ void __iomem *int_mask_addr;
+
+ srot_addr = TSENS_CTRL_ADDR(tmdev->tsens_srot_addr + 0x4);
+ srot_val = readl_relaxed(srot_addr);
+ if (!(srot_val & TSENS_EN)) {
+ pr_err("TSENS device is not enabled\n");
+ return -ENODEV;
+ }
+
+ if (tmdev->ctrl_data->cycle_monitor) {
+ sensor_int_mask_addr =
+ TSENS_TM_CRITICAL_INT_MASK(tmdev->tsens_tm_addr);
+ crit_mask = readl_relaxed(sensor_int_mask_addr);
+ crit_val = TSENS_TM_CRITICAL_CYCLE_MONITOR;
+ if (tmdev->ctrl_data->cycle_compltn_monitor_mask)
+ writel_relaxed((crit_mask | crit_val),
+ (TSENS_TM_CRITICAL_INT_MASK
+ (tmdev->tsens_tm_addr)));
+ else
+ writel_relaxed((crit_mask & ~crit_val),
+ (TSENS_TM_CRITICAL_INT_MASK
+ (tmdev->tsens_tm_addr)));
+ /*Update critical cycle monitoring*/
+ mb();
+ }
+
+ if (tmdev->ctrl_data->wd_bark) {
+ sensor_int_mask_addr =
+ TSENS_TM_CRITICAL_INT_MASK(tmdev->tsens_tm_addr);
+ crit_mask = readl_relaxed(sensor_int_mask_addr);
+ crit_val = TSENS_TM_CRITICAL_WD_BARK;
+ if (tmdev->ctrl_data->wd_bark_mask)
+ writel_relaxed((crit_mask | crit_val),
+ (TSENS_TM_CRITICAL_INT_MASK
+ (tmdev->tsens_tm_addr)));
+ else
+ writel_relaxed((crit_mask & ~crit_val),
+ (TSENS_TM_CRITICAL_INT_MASK
+ (tmdev->tsens_tm_addr)));
+ /*Update watchdog monitoring*/
+ mb();
+ }
+
+ int_mask_addr = TSENS_TM_UPPER_LOWER_INT_MASK(tmdev->tsens_tm_addr);
+ writel_relaxed(TSENS_TM_UPPER_LOWER_INT_DISABLE, int_mask_addr);
+
+ writel_relaxed(TSENS_TM_CRITICAL_INT_EN |
+ TSENS_TM_UPPER_INT_EN | TSENS_TM_LOWER_INT_EN,
+ TSENS_TM_INT_EN(tmdev->tsens_tm_addr));
+
+ return 0;
+}
+
static int tsens2xxx_get_temp(struct tsens_sensor *sensor, int *temp)
{
struct tsens_device *tmdev = NULL, *tmdev_itr;
unsigned int code, ret, tsens_ret;
void __iomem *sensor_addr, *trdy;
- int last_temp = 0, last_temp2 = 0, last_temp3 = 0, count = 0;
+ int rc = 0, last_temp = 0, last_temp2 = 0, last_temp3 = 0, count = 0;
static atomic_t in_tsens_reinit;
if (!sensor)
@@ -172,6 +230,13 @@
/* Notify thermal fwk */
list_for_each_entry(tmdev_itr,
&tsens_device_list, list) {
+ rc = __tsens2xxx_hw_init(tmdev_itr);
+ if (rc) {
+ pr_err(
+ "%s: Failed to re-initialize TSENS controller\n",
+ __func__);
+ BUG();
+ }
queue_work(tmdev_itr->tsens_reinit_work,
&tmdev_itr->therm_fwk_notify);
}
@@ -713,58 +778,11 @@
static int tsens2xxx_hw_init(struct tsens_device *tmdev)
{
- void __iomem *srot_addr;
- void __iomem *sensor_int_mask_addr;
- unsigned int srot_val, crit_mask, crit_val;
- void __iomem *int_mask_addr;
+ int rc = 0;
- srot_addr = TSENS_CTRL_ADDR(tmdev->tsens_srot_addr + 0x4);
- srot_val = readl_relaxed(srot_addr);
- if (!(srot_val & TSENS_EN)) {
- pr_err("TSENS device is not enabled\n");
- return -ENODEV;
- }
-
- if (tmdev->ctrl_data->cycle_monitor) {
- sensor_int_mask_addr =
- TSENS_TM_CRITICAL_INT_MASK(tmdev->tsens_tm_addr);
- crit_mask = readl_relaxed(sensor_int_mask_addr);
- crit_val = TSENS_TM_CRITICAL_CYCLE_MONITOR;
- if (tmdev->ctrl_data->cycle_compltn_monitor_mask)
- writel_relaxed((crit_mask | crit_val),
- (TSENS_TM_CRITICAL_INT_MASK
- (tmdev->tsens_tm_addr)));
- else
- writel_relaxed((crit_mask & ~crit_val),
- (TSENS_TM_CRITICAL_INT_MASK
- (tmdev->tsens_tm_addr)));
- /*Update critical cycle monitoring*/
- mb();
- }
-
- if (tmdev->ctrl_data->wd_bark) {
- sensor_int_mask_addr =
- TSENS_TM_CRITICAL_INT_MASK(tmdev->tsens_tm_addr);
- crit_mask = readl_relaxed(sensor_int_mask_addr);
- crit_val = TSENS_TM_CRITICAL_WD_BARK;
- if (tmdev->ctrl_data->wd_bark_mask)
- writel_relaxed((crit_mask | crit_val),
- (TSENS_TM_CRITICAL_INT_MASK
- (tmdev->tsens_tm_addr)));
- else
- writel_relaxed((crit_mask & ~crit_val),
- (TSENS_TM_CRITICAL_INT_MASK
- (tmdev->tsens_tm_addr)));
- /*Update watchdog monitoring*/
- mb();
- }
-
- int_mask_addr = TSENS_TM_UPPER_LOWER_INT_MASK(tmdev->tsens_tm_addr);
- writel_relaxed(TSENS_TM_UPPER_LOWER_INT_DISABLE, int_mask_addr);
-
- writel_relaxed(TSENS_TM_CRITICAL_INT_EN |
- TSENS_TM_UPPER_INT_EN | TSENS_TM_LOWER_INT_EN,
- TSENS_TM_INT_EN(tmdev->tsens_tm_addr));
+ rc = __tsens2xxx_hw_init(tmdev);
+ if (rc)
+ return rc;
spin_lock_init(&tmdev->tsens_crit_lock);
spin_lock_init(&tmdev->tsens_upp_low_lock);
diff --git a/include/dt-bindings/clock/mdss-7nm-pll-clk.h b/include/dt-bindings/clock/mdss-7nm-pll-clk.h
index bb146d7..d414a8a 100644
--- a/include/dt-bindings/clock/mdss-7nm-pll-clk.h
+++ b/include/dt-bindings/clock/mdss-7nm-pll-clk.h
@@ -30,30 +30,38 @@
#define POST_VCO_DIV3_5_0_CLK 19
#define CPHY_PCLK_SRC_MUX_0_CLK 20
#define CPHY_PCLK_SRC_0_CLK 21
+#define SHADOW_CPHY_BYTECLK_SRC_0_CLK 22
+#define SHADOW_POST_VCO_DIV3_5_0_CLK 23
+#define SHADOW_CPHY_PCLK_SRC_MUX_0_CLK 24
+#define SHADOW_CPHY_PCLK_SRC_0_CLK 25
-#define VCO_CLK_1 22
-#define PLL_OUT_DIV_1_CLK 23
-#define BITCLK_SRC_1_CLK 24
-#define BYTECLK_SRC_1_CLK 25
-#define POST_BIT_DIV_1_CLK 26
-#define POST_VCO_DIV_1_CLK 27
-#define BYTECLK_MUX_1_CLK 28
-#define PCLK_SRC_MUX_1_CLK 29
-#define PCLK_SRC_1_CLK 30
-#define PCLK_MUX_1_CLK 31
-#define SHADOW_VCO_CLK_1 32
-#define SHADOW_PLL_OUT_DIV_1_CLK 33
-#define SHADOW_BITCLK_SRC_1_CLK 34
-#define SHADOW_BYTECLK_SRC_1_CLK 35
-#define SHADOW_POST_BIT_DIV_1_CLK 36
-#define SHADOW_POST_VCO_DIV_1_CLK 37
-#define SHADOW_PCLK_SRC_MUX_1_CLK 38
-#define SHADOW_PCLK_SRC_1_CLK 39
+#define VCO_CLK_1 26
+#define PLL_OUT_DIV_1_CLK 27
+#define BITCLK_SRC_1_CLK 28
+#define BYTECLK_SRC_1_CLK 29
+#define POST_BIT_DIV_1_CLK 30
+#define POST_VCO_DIV_1_CLK 31
+#define BYTECLK_MUX_1_CLK 32
+#define PCLK_SRC_MUX_1_CLK 33
+#define PCLK_SRC_1_CLK 34
+#define PCLK_MUX_1_CLK 35
+#define SHADOW_VCO_CLK_1 36
+#define SHADOW_PLL_OUT_DIV_1_CLK 37
+#define SHADOW_BITCLK_SRC_1_CLK 38
+#define SHADOW_BYTECLK_SRC_1_CLK 39
+#define SHADOW_POST_BIT_DIV_1_CLK 40
+#define SHADOW_POST_VCO_DIV_1_CLK 41
+#define SHADOW_PCLK_SRC_MUX_1_CLK 42
+#define SHADOW_PCLK_SRC_1_CLK 43
/* CPHY clocks for DSI-1 PLL */
-#define CPHY_BYTECLK_SRC_1_CLK 40
-#define POST_VCO_DIV3_5_1_CLK 41
-#define CPHY_PCLK_SRC_MUX_1_CLK 42
-#define CPHY_PCLK_SRC_1_CLK 43
+#define CPHY_BYTECLK_SRC_1_CLK 44
+#define POST_VCO_DIV3_5_1_CLK 45
+#define CPHY_PCLK_SRC_MUX_1_CLK 46
+#define CPHY_PCLK_SRC_1_CLK 47
+#define SHADOW_CPHY_BYTECLK_SRC_1_CLK 48
+#define SHADOW_POST_VCO_DIV3_5_1_CLK 49
+#define SHADOW_CPHY_PCLK_SRC_MUX_1_CLK 50
+#define SHADOW_CPHY_PCLK_SRC_1_CLK 51
/* DP PLL clocks */
diff --git a/include/dt-bindings/iio/qcom,spmi-vadc.h b/include/dt-bindings/iio/qcom,spmi-vadc.h
index 0bb1f16..5cd49dd 100644
--- a/include/dt-bindings/iio/qcom,spmi-vadc.h
+++ b/include/dt-bindings/iio/qcom,spmi-vadc.h
@@ -139,6 +139,7 @@
#define ADC_GPIO7 0x18
#define ADC_SBUx 0x99
#define ADC_MID_CHG_DIV6 0x1e
+#define ANA_IN 0x1d
#define ADC_OFF 0xff
/* 30k pull-up1 */
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index c65aa57..3aef2d1 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -201,6 +201,14 @@
unsigned int id, unsigned int prop);
/*
+ * Following APIs set array of mutually exclusive.
+ * The 'exclusive' argument indicates the array of mutually exclusive set
+ * of cables that cannot be attached simultaneously.
+ */
+extern int extcon_set_mutually_exclusive(struct extcon_dev *edev,
+ const u32 *exclusive);
+
+/*
* Following APIs register the notifier block in order to detect
* the change of both state and property value for each external connector.
*
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index 9887f4f..11e95d9 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -289,6 +289,15 @@
*/
int iio_read_avail_channel_raw(struct iio_channel *chan,
const int **vals, int *length);
+/**
+ * iio_write_channel_processed() - write to a given channel
+ * @chan: The channel being queried.
+ * @val: Value being written.
+ *
+ * Note processed writes to iio channels are converted to raw
+ * values before being written.
+ */
+int iio_write_channel_processed(struct iio_channel *chan, int val);
/**
* iio_get_channel_type() - get the type of a channel
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
index 547beaf..51a5ec4 100644
--- a/include/linux/mhi.h
+++ b/include/linux/mhi.h
@@ -260,6 +260,7 @@
/* mmio base */
phys_addr_t base_addr;
+ unsigned int len;
void __iomem *regs;
void __iomem *bhi;
void __iomem *bhie;
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index f3f7605..775e63e 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -244,6 +244,7 @@
int regulator_count_voltages(struct regulator *regulator);
int regulator_list_voltage(struct regulator *regulator, unsigned selector);
+int regulator_list_corner_voltage(struct regulator *regulator, int corner);
int regulator_is_supported_voltage(struct regulator *regulator,
int min_uV, int max_uV);
unsigned int regulator_get_linear_step(struct regulator *regulator);
@@ -579,6 +580,11 @@
return -EINVAL;
}
+static inline int regulator_list_corner_voltage(struct regulator *regulator,
+ int corner)
+{
+ return -EINVAL;
+}
#endif
static inline int regulator_set_voltage_triplet(struct regulator *regulator,
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 71756e6..7ae7dc3 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -94,6 +94,10 @@
* if the selector indicates a voltage that is unusable on this system;
* or negative errno. Selectors range from zero to one less than
* regulator_desc.n_voltages. Voltages may be reported in any order.
+ * @list_corner_voltage: Return the maximum voltage in microvolts that
+ * that can be physically configured for the regulator when operating at
+ * the specified voltage corner or a negative errno if the corner value
+ * can't be used on this system.
*
* @set_current_limit: Configure a limit for a current-limited regulator.
* The driver should select the current closest to max_uA.
@@ -150,6 +154,7 @@
/* enumerate supported voltages */
int (*list_voltage) (struct regulator_dev *, unsigned selector);
+ int (*list_corner_voltage)(struct regulator_dev *list_reg, int corner);
/* get/set regulator voltage */
int (*set_voltage) (struct regulator_dev *, int min_uV, int max_uV,
diff --git a/include/linux/regulator/spm-regulator.h b/include/linux/regulator/spm-regulator.h
new file mode 100644
index 0000000..c1eaee6
--- /dev/null
+++ b/include/linux/regulator/spm-regulator.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2013-2014, 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _LINUX_REGULATOR_SPM_H
+#define _LINUX_REGULATOR_SPM_H
+
+#include <linux/err.h>
+#include <linux/init.h>
+
+#ifdef CONFIG_REGULATOR_SPM
+int __init spm_regulator_init(void);
+#else
+static inline int __init spm_regulator_init(void) { return -ENODEV; }
+#endif
+
+#endif
diff --git a/include/soc/qcom/cx_ipeak.h b/include/soc/qcom/cx_ipeak.h
index 53d7e9c..eed850a 100644
--- a/include/soc/qcom/cx_ipeak.h
+++ b/include/soc/qcom/cx_ipeak.h
@@ -1,11 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018, 2020 The Linux Foundation. All rights reserved.
*/
#ifndef __SOC_COM_CX_IPEAK_H
#define __SOC_COM_CX_IPEAK_H
+typedef void (*cx_ipeak_victim_fn)(void *data, u32 freq_limit);
+
struct device_node;
struct cx_ipeak_client;
@@ -27,12 +29,25 @@
{
return 0;
}
+
+static inline int cx_ipeak_victim_register(struct cx_ipeak_client *client,
+ cx_ipeak_victim_fn victim_cb, void *data)
+{
+ return 0;
+}
+
+static inline void cx_ipeak_victim_unregister(struct cx_ipeak_client *client)
+{
+}
#else
struct cx_ipeak_client *cx_ipeak_register(struct device_node *dev_node,
const char *client_name);
void cx_ipeak_unregister(struct cx_ipeak_client *client);
int cx_ipeak_update(struct cx_ipeak_client *ipeak_client, bool vote);
+int cx_ipeak_victim_register(struct cx_ipeak_client *client,
+ cx_ipeak_victim_fn victim_cb, void *data);
+void cx_ipeak_victim_unregister(struct cx_ipeak_client *client);
#endif
diff --git a/include/soc/qcom/icnss2.h b/include/soc/qcom/icnss2.h
index 64128de..bb75490 100644
--- a/include/soc/qcom/icnss2.h
+++ b/include/soc/qcom/icnss2.h
@@ -167,4 +167,7 @@
extern int icnss_qmi_send(struct device *dev, int type, void *cmd,
int cmd_len, void *cb_ctx,
int (*cb)(void *ctx, void *event, int event_len));
+extern int icnss_force_wake_request(struct device *dev);
+extern int icnss_force_wake_release(struct device *dev);
+extern int icnss_is_device_awake(struct device *dev);
#endif /* _ICNSS_WLAN_H_ */
diff --git a/include/soc/qcom/mpm.h b/include/soc/qcom/mpm.h
index 43bed05..2360335 100644
--- a/include/soc/qcom/mpm.h
+++ b/include/soc/qcom/mpm.h
@@ -16,4 +16,5 @@
extern const struct mpm_pin mpm_bengal_gic_chip_data[];
extern const struct mpm_pin mpm_scuba_gic_chip_data[];
+extern const struct mpm_pin mpm_sdm660_gic_chip_data[];
#endif /* __QCOM_MPM_H__ */