Merge "defconfig: msm: Enable dm-snapshot for msm8937_64"
diff --git a/arch/arm/configs/vendor/msm8937-perf_defconfig b/arch/arm/configs/vendor/msm8937-perf_defconfig
index 437a96d..525f55b 100644
--- a/arch/arm/configs/vendor/msm8937-perf_defconfig
+++ b/arch/arm/configs/vendor/msm8937-perf_defconfig
@@ -282,6 +282,7 @@
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
+CONFIG_DM_SNAPSHOT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
CONFIG_DM_VERITY_FEC=y
@@ -296,6 +297,7 @@
# CONFIG_NET_VENDOR_HISILICON is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
+CONFIG_RMNET=y
# CONFIG_NET_VENDOR_ROCKER is not set
# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=y
@@ -571,6 +573,9 @@
CONFIG_QTI_CRYPTO_COMMON=y
CONFIG_QTI_CRYPTO_TZ=y
CONFIG_MSM_BAM_DMUX=y
+CONFIG_WCNSS_CORE=y
+CONFIG_WCNSS_CORE_PRONTO=y
+CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
CONFIG_DEVFREQ_GOV_PASSIVE=y
CONFIG_QCOM_BIMC_BWMON=y
CONFIG_ARM_MEMLAT_MON=y
diff --git a/arch/arm/configs/vendor/msm8937_32go-perf_defconfig b/arch/arm/configs/vendor/msm8937_32go-perf_defconfig
index 0836d7e..76d2855 100644
--- a/arch/arm/configs/vendor/msm8937_32go-perf_defconfig
+++ b/arch/arm/configs/vendor/msm8937_32go-perf_defconfig
@@ -281,6 +281,7 @@
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_DEFAULT_KEY=y
+CONFIG_DM_SNAPSHOT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
CONFIG_DM_VERITY_FEC=y
diff --git a/arch/arm/configs/vendor/msm8937_32go_defconfig b/arch/arm/configs/vendor/msm8937_32go_defconfig
index 4a8b685..e40b49d 100644
--- a/arch/arm/configs/vendor/msm8937_32go_defconfig
+++ b/arch/arm/configs/vendor/msm8937_32go_defconfig
@@ -286,6 +286,7 @@
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_DEFAULT_KEY=y
+CONFIG_DM_SNAPSHOT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
CONFIG_DM_VERITY_FEC=y
diff --git a/arch/arm/configs/vendor/msm8937_defconfig b/arch/arm/configs/vendor/msm8937_defconfig
index 4810de5..e72fa9d 100644
--- a/arch/arm/configs/vendor/msm8937_defconfig
+++ b/arch/arm/configs/vendor/msm8937_defconfig
@@ -287,6 +287,7 @@
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
+CONFIG_DM_SNAPSHOT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
CONFIG_DM_VERITY_FEC=y
@@ -301,6 +302,7 @@
# CONFIG_NET_VENDOR_HISILICON is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
+CONFIG_RMNET=y
# CONFIG_NET_VENDOR_ROCKER is not set
# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=y
@@ -583,6 +585,9 @@
CONFIG_QTI_CRYPTO_COMMON=y
CONFIG_QTI_CRYPTO_TZ=y
CONFIG_MSM_BAM_DMUX=y
+CONFIG_WCNSS_CORE=y
+CONFIG_WCNSS_CORE_PRONTO=y
+CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
CONFIG_DEVFREQ_GOV_PASSIVE=y
CONFIG_QCOM_BIMC_BWMON=y
CONFIG_ARM_MEMLAT_MON=y
diff --git a/arch/arm64/configs/vendor/kona-iot-perf_defconfig b/arch/arm64/configs/vendor/kona-iot-perf_defconfig
index efcf655..c61d35c 100644
--- a/arch/arm64/configs/vendor/kona-iot-perf_defconfig
+++ b/arch/arm64/configs/vendor/kona-iot-perf_defconfig
@@ -396,6 +396,7 @@
CONFIG_QTI_THERMAL_LIMITS_DCVS=y
CONFIG_QTI_CPU_ISOLATE_COOLING_DEVICE=y
CONFIG_QTI_LIMITS_ISENSE_CDSP=y
+CONFIG_QTI_THERMAL_QFPROM=y
CONFIG_MFD_I2C_PMIC=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_REGULATOR=y
diff --git a/arch/arm64/configs/vendor/kona-iot_defconfig b/arch/arm64/configs/vendor/kona-iot_defconfig
index c55859f..ffd02ab 100644
--- a/arch/arm64/configs/vendor/kona-iot_defconfig
+++ b/arch/arm64/configs/vendor/kona-iot_defconfig
@@ -412,6 +412,7 @@
CONFIG_QTI_THERMAL_LIMITS_DCVS=y
CONFIG_QTI_CPU_ISOLATE_COOLING_DEVICE=y
CONFIG_QTI_LIMITS_ISENSE_CDSP=y
+CONFIG_QTI_THERMAL_QFPROM=y
CONFIG_MFD_I2C_PMIC=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_REGULATOR=y
diff --git a/arch/arm64/configs/vendor/msm8937-perf_defconfig b/arch/arm64/configs/vendor/msm8937-perf_defconfig
index ed5b66d..0c0bce7c 100644
--- a/arch/arm64/configs/vendor/msm8937-perf_defconfig
+++ b/arch/arm64/configs/vendor/msm8937-perf_defconfig
@@ -288,10 +288,12 @@
CONFIG_DUMMY=y
CONFIG_TUN=y
# CONFIG_NET_VENDOR_AMAZON is not set
+CONFIG_MSM_RMNET_BAM=y
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_HISILICON is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
+CONFIG_RMNET=y
# CONFIG_NET_VENDOR_ROCKER is not set
# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=y
@@ -339,6 +341,7 @@
CONFIG_SERIAL_MSM_HS=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_MSM_LEGACY=y
+CONFIG_MSM_SMD_PKT=y
CONFIG_DIAG_CHAR=y
CONFIG_MSM_ADSPRPC=y
CONFIG_MSM_RDBG=m
@@ -510,6 +513,7 @@
CONFIG_IPA=y
CONFIG_RMNET_IPA=y
CONFIG_RNDIS_IPA=y
+CONFIG_USB_BAM=y
CONFIG_MDSS_PLL=y
CONFIG_QCOM_CLK_SMD_RPM=y
CONFIG_SDM_GCC_429W=y
@@ -556,6 +560,10 @@
CONFIG_MSM_PERFORMANCE=y
CONFIG_QTI_CRYPTO_COMMON=y
CONFIG_QTI_CRYPTO_TZ=y
+CONFIG_MSM_BAM_DMUX=y
+CONFIG_WCNSS_CORE=y
+CONFIG_WCNSS_CORE_PRONTO=y
+CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
CONFIG_DEVFREQ_GOV_PASSIVE=y
CONFIG_QCOM_BIMC_BWMON=y
CONFIG_ARM_MEMLAT_MON=y
diff --git a/arch/arm64/configs/vendor/msm8937_defconfig b/arch/arm64/configs/vendor/msm8937_defconfig
index 7d55e31..d697267 100644
--- a/arch/arm64/configs/vendor/msm8937_defconfig
+++ b/arch/arm64/configs/vendor/msm8937_defconfig
@@ -296,10 +296,12 @@
CONFIG_DUMMY=y
CONFIG_TUN=y
# CONFIG_NET_VENDOR_AMAZON is not set
+CONFIG_MSM_RMNET_BAM=y
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_HISILICON is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
+CONFIG_RMNET=y
# CONFIG_NET_VENDOR_ROCKER is not set
# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=y
@@ -349,6 +351,7 @@
CONFIG_SERIAL_MSM_HS=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_MSM_LEGACY=y
+CONFIG_MSM_SMD_PKT=y
CONFIG_DIAG_CHAR=y
CONFIG_MSM_ADSPRPC=y
CONFIG_MSM_RDBG=m
@@ -524,6 +527,7 @@
CONFIG_IPA=y
CONFIG_RMNET_IPA=y
CONFIG_RNDIS_IPA=y
+CONFIG_USB_BAM=y
CONFIG_MDSS_PLL=y
CONFIG_QCOM_CLK_SMD_RPM=y
CONFIG_SDM_GCC_429W=y
@@ -574,6 +578,10 @@
CONFIG_MSM_PERFORMANCE=y
CONFIG_QTI_CRYPTO_COMMON=y
CONFIG_QTI_CRYPTO_TZ=y
+CONFIG_MSM_BAM_DMUX=y
+CONFIG_WCNSS_CORE=y
+CONFIG_WCNSS_CORE_PRONTO=y
+CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
CONFIG_DEVFREQ_GOV_PASSIVE=y
CONFIG_QCOM_BIMC_BWMON=y
CONFIG_ARM_MEMLAT_MON=y
diff --git a/drivers/bluetooth/bluetooth-power.c b/drivers/bluetooth/bluetooth-power.c
index f2f3c9b..f66ff5c 100644
--- a/drivers/bluetooth/bluetooth-power.c
+++ b/drivers/bluetooth/bluetooth-power.c
@@ -1149,7 +1149,9 @@
soc_id = chipset_version;
if (soc_id == QCA_HSP_SOC_ID_0100 ||
soc_id == QCA_HSP_SOC_ID_0110 ||
- soc_id == QCA_HSP_SOC_ID_0200) {
+ soc_id == QCA_HSP_SOC_ID_0200 ||
+ soc_id == QCA_HSP_SOC_ID_0210 ||
+ soc_id == QCA_HSP_SOC_ID_1211) {
ret = bt_disable_asd();
}
} else {
diff --git a/drivers/bluetooth/btfm_slim_slave.h b/drivers/bluetooth/btfm_slim_slave.h
index 26fc8ab..0dbdbfb 100644
--- a/drivers/bluetooth/btfm_slim_slave.h
+++ b/drivers/bluetooth/btfm_slim_slave.h
@@ -110,6 +110,8 @@
QCA_HSP_SOC_ID_0100 = 0x400C0100,
QCA_HSP_SOC_ID_0110 = 0x400C0110,
QCA_HSP_SOC_ID_0200 = 0x400C0200,
+ QCA_HSP_SOC_ID_0210 = 0x400C0210,
+ QCA_HSP_SOC_ID_1211 = 0x400C1211,
};
/* Function Prototype */
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index cc265cd..7834b39 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -4088,11 +4088,14 @@
{
int err = 0, buf_size = 0;
char strpid[PID_SIZE];
+ char cur_comm[TASK_COMM_LEN];
+ memcpy(cur_comm, current->comm, TASK_COMM_LEN);
+ cur_comm[TASK_COMM_LEN-1] = '\0';
fl->tgid = current->tgid;
snprintf(strpid, PID_SIZE, "%d", current->pid);
if (debugfs_root) {
- buf_size = strlen(current->comm) + strlen("_")
+ buf_size = strlen(cur_comm) + strlen("_")
+ strlen(strpid) + 1;
spin_lock(&fl->hlock);
@@ -4107,13 +4110,13 @@
err = -ENOMEM;
return err;
}
- snprintf(fl->debug_buf, UL_SIZE, "%.10s%s%d",
- current->comm, "_", current->pid);
+ snprintf(fl->debug_buf, buf_size, "%.10s%s%d",
+ cur_comm, "_", current->pid);
fl->debugfs_file = debugfs_create_file(fl->debug_buf, 0644,
debugfs_root, fl, &debugfs_fops);
if (IS_ERR_OR_NULL(fl->debugfs_file)) {
pr_warn("Error: %s: %s: failed to create debugfs file %s\n",
- current->comm, __func__, fl->debug_buf);
+ cur_comm, __func__, fl->debug_buf);
fl->debugfs_file = NULL;
kfree(fl->debug_buf);
fl->debug_buf_alloced_attempted = 0;
diff --git a/drivers/clk/qcom/clk-pll.c b/drivers/clk/qcom/clk-pll.c
index 1f4298e..3a76de4 100644
--- a/drivers/clk/qcom/clk-pll.c
+++ b/drivers/clk/qcom/clk-pll.c
@@ -138,7 +138,8 @@
f = find_freq(pll->freq_tbl, req->rate);
if (!f)
- req->rate = clk_pll_recalc_rate(hw, req->best_parent_rate);
+ req->rate = DIV_ROUND_UP_ULL(req->rate, req->best_parent_rate)
+ * req->best_parent_rate;
else
req->rate = f->freq;
@@ -175,12 +176,38 @@
return 0;
}
+static void clk_pll_list_registers(struct seq_file *f, struct clk_hw *hw)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+ int size, i, val;
+
+ static struct clk_register_data data[] = {
+ {"PLL_MODE", 0x0},
+ {"PLL_L_VAL", 0x4},
+ {"PLL_M_VAL", 0x8},
+ {"PLL_N_VAL", 0xC},
+ {"PLL_USER_CTL", 0x10},
+ {"PLL_CONFIG_CTL", 0x14},
+ {"PLL_STATUS_CTL", 0x1C},
+ };
+
+ size = ARRAY_SIZE(data);
+
+ for (i = 0; i < size; i++) {
+ regmap_read(pll->clkr.regmap, pll->mode_reg + data[i].offset,
+ &val);
+ clock_debug_output(f, false,
+ "%20s: 0x%.8x\n", data[i].name, val);
+ }
+}
+
const struct clk_ops clk_pll_ops = {
.enable = clk_pll_enable,
.disable = clk_pll_disable,
.recalc_rate = clk_pll_recalc_rate,
.determine_rate = clk_pll_determine_rate,
.set_rate = clk_pll_set_rate,
+ .list_registers = clk_pll_list_registers,
};
EXPORT_SYMBOL_GPL(clk_pll_ops);
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index 78dbdb01..2fe527c 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2016, Linaro Limited
- * Copyright (c) 2014, 2016-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2016-2021, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -1190,6 +1190,7 @@
{ .compatible = "qcom,rpmcc-scuba", .data = &rpm_clk_scuba},
{ .compatible = "qcom,rpmcc-sdm660", .data = &rpm_clk_sdm660 },
{ .compatible = "qcom,rpmcc-qm215", .data = &rpm_clk_qm215 },
+ { .compatible = "qcom,rpmcc-sdm439", .data = &rpm_clk_qm215 },
{ }
};
MODULE_DEVICE_TABLE(of, rpm_smd_clk_match_table);
@@ -1200,7 +1201,7 @@
struct clk *clk;
struct rpm_cc *rcc;
struct clk_onecell_data *data;
- int ret, is_bengal, is_scuba, is_sdm660, is_qm215;
+ int ret, is_bengal, is_scuba, is_sdm660, is_qm215, is_sdm439;
size_t num_clks, i;
struct clk_hw **hw_clks;
const struct rpm_smd_clk_desc *desc;
@@ -1222,13 +1223,16 @@
is_qm215 = of_device_is_compatible(pdev->dev.of_node,
"qcom,rpmcc-qm215");
+ is_sdm439 = of_device_is_compatible(pdev->dev.of_node,
+ "qcom,rpmcc-sdm439");
+
if (is_sdm660) {
ret = clk_vote_bimc(&sdm660_bimc_clk.hw, INT_MAX);
if (ret < 0)
return ret;
}
- if (is_qm215) {
+ if (is_qm215 || is_sdm439) {
ret = clk_vote_bimc(&sdm429w_bimc_clk.hw, INT_MAX);
if (ret < 0)
return ret;
@@ -1251,6 +1255,11 @@
data->clks = clks;
data->clk_num = num_clks;
+ if (is_sdm439) {
+ rpm_clk_qm215.clks[RPM_SMD_BIMC_GPU_CLK] = NULL;
+ rpm_clk_qm215.clks[RPM_SMD_BIMC_GPU_A_CLK] = NULL;
+ }
+
for (i = 0; i <= desc->num_rpm_clks; i++) {
if (!hw_clks[i]) {
clks[i] = ERR_PTR(-ENOENT);
@@ -1317,7 +1326,7 @@
/* Hold an active set vote for the cnoc_periph resource */
clk_set_rate(cnoc_periph_keepalive_a_clk.hw.clk, 19200000);
clk_prepare_enable(cnoc_periph_keepalive_a_clk.hw.clk);
- } else if (is_qm215) {
+ } else if (is_qm215 || is_sdm439) {
clk_prepare_enable(sdm429w_bi_tcxo_ao.hw.clk);
/*
diff --git a/drivers/clk/qcom/gcc-sdm429w.c b/drivers/clk/qcom/gcc-sdm429w.c
index 9ef0590..d5d8cb3 100644
--- a/drivers/clk/qcom/gcc-sdm429w.c
+++ b/drivers/clk/qcom/gcc-sdm429w.c
@@ -241,7 +241,7 @@
static const struct parent_map gcc_parent_map_14[] = {
{ P_BI_TCXO, 0 },
{ P_GPLL0_OUT_MAIN, 1 },
- { P_GPLL3_OUT_MAIN, 2 },
+ { P_GPLL3_OUT_MAIN_DIV, 2 },
{ P_GPLL6_OUT_AUX, 3 },
{ P_GPLL4_OUT_AUX, 4 },
{ P_CORE_BI_PLL_TEST_SE, 7 },
@@ -1719,13 +1719,13 @@
F_SLEW(240000000, P_GPLL6_OUT_AUX, 4.5, 0, 0, FIXED_FREQ_SRC),
F_SLEW(266666667, P_GPLL0_OUT_MAIN, 3, 0, 0, FIXED_FREQ_SRC),
F_SLEW(320000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0, FIXED_FREQ_SRC),
- F_SLEW(355200000, P_GPLL3_OUT_MAIN, 1, 0, 0, 710400000),
- F_SLEW(375000000, P_GPLL3_OUT_MAIN, 1, 0, 0, 750000000),
+ F_SLEW(355200000, P_GPLL3_OUT_MAIN_DIV, 1, 0, 0, 710400000),
+ F_SLEW(375000000, P_GPLL3_OUT_MAIN_DIV, 1, 0, 0, 750000000),
F_SLEW(400000000, P_GPLL0_OUT_MAIN, 2, 0, 0, FIXED_FREQ_SRC),
- F_SLEW(450000000, P_GPLL3_OUT_MAIN, 1, 0, 0, 900000000),
- F_SLEW(510000000, P_GPLL3_OUT_MAIN, 1, 0, 0, 1020000000),
- F_SLEW(560000000, P_GPLL3_OUT_MAIN, 1, 0, 0, 1120000000),
- F_SLEW(650000000, P_GPLL3_OUT_MAIN, 1, 0, 0, 1300000000),
+ F_SLEW(450000000, P_GPLL3_OUT_MAIN_DIV, 1, 0, 0, 900000000),
+ F_SLEW(510000000, P_GPLL3_OUT_MAIN_DIV, 1, 0, 0, 1020000000),
+ F_SLEW(560000000, P_GPLL3_OUT_MAIN_DIV, 1, 0, 0, 1120000000),
+ F_SLEW(650000000, P_GPLL3_OUT_MAIN_DIV, 1, 0, 0, 1300000000),
{ }
};
@@ -4287,9 +4287,19 @@
gcc_sdm429w_desc.clks[GCC_MDSS_ESC1_CLK] = NULL;
}
+static void fixup_for_sdm439_429(void)
+{
+ /*
+ * Below clocks are not available on SDM429/439, thus mark them NULL.
+ */
+ gcc_sdm429w_desc.clks[GCC_GFX_TCU_CLK] = NULL;
+ gcc_sdm429w_desc.clks[GCC_GFX_TBU_CLK] = NULL;
+ gcc_sdm429w_desc.clks[GCC_GTCU_AHB_CLK] = NULL;
+}
static const struct of_device_id gcc_sdm429w_match_table[] = {
{ .compatible = "qcom,gcc-sdm429w" },
{ .compatible = "qcom,gcc-qm215" },
+ { .compatible = "qcom,gcc-sdm439" },
{ }
};
MODULE_DEVICE_TABLE(of, gcc_sdm429w_match_table);
@@ -4299,11 +4309,14 @@
struct regmap *regmap;
struct clk *clk;
int ret, speed_bin;
- bool qm215;
+ bool qm215, is_sdm439;
qm215 = of_device_is_compatible(pdev->dev.of_node,
"qcom,gcc-qm215");
+ is_sdm439 = of_device_is_compatible(pdev->dev.of_node,
+ "qcom,gcc-sdm439");
+
clk = clk_get(&pdev->dev, "bi_tcxo");
if (IS_ERR(clk)) {
if (PTR_ERR(clk) != -EPROBE_DEFER)
@@ -4333,6 +4346,9 @@
0xff0, 0xff0);
}
+ if (is_sdm439)
+ fixup_for_sdm439_429();
+
clk_alpha_pll_configure(&gpll3_out_main, regmap, &gpll3_config);
clk = devm_clk_register(&pdev->dev, &wcnss_m_clk.hw);
@@ -4421,6 +4437,7 @@
static const struct of_device_id mdss_sdm429w_match_table[] = {
{ .compatible = "qcom,gcc-mdss-sdm429w" },
{ .compatible = "qcom,gcc-mdss-qm215" },
+ { .compatible = "qcom,gcc-mdss-sdm439" },
{}
};
MODULE_DEVICE_TABLE(of, mdss_sdm429w_match_table);
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index 35ef9cd..97f6d52 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -1324,8 +1324,10 @@
goto err_dev;
}
- for (index = 0; index < edev->max_supported; index++)
+ for (index = 0; index < edev->max_supported; index++) {
RAW_INIT_NOTIFIER_HEAD(&edev->nh[index]);
+ BLOCKING_INIT_NOTIFIER_HEAD(&edev->bnh[index]);
+ }
RAW_INIT_NOTIFIER_HEAD(&edev->nh_all);
diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h
index 7fd704b..1e08b4d 100644
--- a/drivers/gpu/msm/adreno-gpulist.h
+++ b/drivers/gpu/msm/adreno-gpulist.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2002,2007-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2002,2007-2021, The Linux Foundation. All rights reserved.
*/
#define ANY_ID (~0)
@@ -275,7 +275,8 @@
static const struct adreno_a5xx_core adreno_gpu_core_a505 = {
.base = {
DEFINE_ADRENO_REV(ADRENO_REV_A505, 5, 0, 5, ANY_ID),
- .features = ADRENO_PREEMPTION | ADRENO_64BIT,
+ .features = ADRENO_PREEMPTION | ADRENO_64BIT |
+ ADRENO_CONTENT_PROTECTION | ADRENO_CPZ_RETENTION,
.gpudev = &adreno_a5xx_gpudev,
.gmem_size = (SZ_128K + SZ_8K),
.busy_mask = 0xfffffffe,
@@ -283,6 +284,7 @@
},
.pm4fw_name = "a530_pm4.fw",
.pfpfw_name = "a530_pfp.fw",
+ .zap_name = "a506_zap",
.hwcg = a50x_hwcg_regs,
.hwcg_count = ARRAY_SIZE(a50x_hwcg_regs),
.vbif = a530_vbif_regs,
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 61ae03a..469649e 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -59,6 +59,7 @@
#include <dt-bindings/msm/msm-bus-ids.h>
#include <linux/irq.h>
#include <linux/wait.h>
+#include <linux/notifier.h>
#include <linux/amba/bus.h>
@@ -266,6 +267,7 @@
#define ARM_SMMU_OPT_STATIC_CB (1 << 6)
#define ARM_SMMU_OPT_DISABLE_ATOS (1 << 7)
#define ARM_SMMU_OPT_NO_DYNAMIC_ASID (1 << 8)
+#define ARM_SMMU_OPT_HALT (1 << 9)
u32 options;
enum arm_smmu_arch_version version;
enum arm_smmu_implementation model;
@@ -307,6 +309,7 @@
unsigned int num_impl_def_attach_registers;
struct arm_smmu_power_resources *pwr;
+ struct notifier_block regulator_nb;
spinlock_t atos_lock;
@@ -451,6 +454,7 @@
{ ARM_SMMU_OPT_STATIC_CB, "qcom,enable-static-cb"},
{ ARM_SMMU_OPT_DISABLE_ATOS, "qcom,disable-atos" },
{ ARM_SMMU_OPT_NO_DYNAMIC_ASID, "qcom,no-dynamic-asid" },
+ { ARM_SMMU_OPT_HALT, "qcom,enable-smmu-halt"},
{ 0, NULL},
};
@@ -5161,6 +5165,71 @@
return 0;
}
+static int regulator_notifier(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ int ret = 0;
+ struct arm_smmu_device *smmu = container_of(nb, struct arm_smmu_device,
+ regulator_nb);
+
+ if (event != REGULATOR_EVENT_PRE_DISABLE &&
+ event != REGULATOR_EVENT_ENABLE)
+ return NOTIFY_OK;
+
+ ret = arm_smmu_prepare_clocks(smmu->pwr);
+ if (ret)
+ goto out;
+
+ ret = arm_smmu_power_on_atomic(smmu->pwr);
+ if (ret)
+ goto unprepare_clock;
+
+ if (event == REGULATOR_EVENT_PRE_DISABLE)
+ qsmmuv2_halt(smmu);
+ else if (event == REGULATOR_EVENT_ENABLE) {
+ if (arm_smmu_restore_sec_cfg(smmu, 0))
+ goto power_off;
+ qsmmuv2_resume(smmu);
+ }
+power_off:
+ arm_smmu_power_off_atomic(smmu->pwr);
+unprepare_clock:
+ arm_smmu_unprepare_clocks(smmu->pwr);
+out:
+ return NOTIFY_OK;
+}
+
+static int register_regulator_notifier(struct arm_smmu_device *smmu)
+{
+ struct device *dev = smmu->dev;
+ struct regulator_bulk_data *consumers;
+ int ret = 0, num_consumers;
+ struct arm_smmu_power_resources *pwr = smmu->pwr;
+
+ if (!(smmu->options & ARM_SMMU_OPT_HALT))
+ goto out;
+
+ num_consumers = pwr->num_gdscs;
+ consumers = pwr->gdscs;
+
+ if (!num_consumers) {
+ dev_info(dev, "no regulator info exist for %s\n",
+ dev_name(dev));
+ goto out;
+ }
+
+ smmu->regulator_nb.notifier_call = regulator_notifier;
+ /* registering the notifier against one gdsc is sufficient as
+ * we do enable/disable regulators in group.
+ */
+ ret = regulator_register_notifier(consumers[0].consumer,
+ &smmu->regulator_nb);
+ if (ret)
+ dev_err(dev, "Regulator notifier request failed\n");
+out:
+ return ret;
+}
+
static int arm_smmu_init_regulators(struct arm_smmu_power_resources *pwr)
{
const char *cname;
@@ -5778,6 +5847,10 @@
if (!using_legacy_binding)
arm_smmu_bus_init();
+ err = register_regulator_notifier(smmu);
+ if (err)
+ goto out_power_off;
+
return 0;
out_power_off:
diff --git a/drivers/iommu/io-pgtable-msm-secure.c b/drivers/iommu/io-pgtable-msm-secure.c
index 0d50258..1481c9a 100644
--- a/drivers/iommu/io-pgtable-msm-secure.c
+++ b/drivers/iommu/io-pgtable-msm-secure.c
@@ -66,6 +66,7 @@
/* Now allocate memory for the secure page tables */
attrs = DMA_ATTR_NO_KERNEL_MAPPING;
dev.coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
+ arch_setup_dma_ops(&dev, 0, 0, NULL, 0);
cpu_addr = dma_alloc_attrs(&dev, psize[0], &paddr, GFP_KERNEL, attrs);
if (!cpu_addr) {
pr_err("%s: Failed to allocate %d bytes for PTBL\n",
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index ef31fea..393bc94 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -1086,6 +1086,8 @@
host->ops->enable_sdio_irq(host, 1);
}
+ mmc_retune_needed(host);
+
out:
mmc_log_string(host, "Exit err: %d\n", err);
mmc_release_host(host);
diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c
index 4f97dbe..eff54c6 100644
--- a/drivers/net/wireless/cnss2/main.c
+++ b/drivers/net/wireless/cnss2/main.c
@@ -41,6 +41,7 @@
#define CNSS_QMI_TIMEOUT_DEFAULT 10000
#define CNSS_BDF_TYPE_DEFAULT CNSS_BDF_ELF
#define CNSS_TIME_SYNC_PERIOD_DEFAULT 900000
+#define CNSS_MIN_TIME_SYNC_PERIOD 2000
static struct cnss_plat_data *plat_env;
@@ -2329,6 +2330,33 @@
msm_bus_scale_unregister_client(bus_bw_info->bus_client);
}
+static ssize_t qtime_sync_period_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ plat_priv->ctrl_params.time_sync_period);
+}
+
+static ssize_t qtime_sync_period_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int qtime_sync_period = 0;
+
+ if (sscanf(buf, "%du", &qtime_sync_period) != 1) {
+ cnss_pr_err("Invalid qtime sync sysfs command\n");
+ return -EINVAL;
+ }
+
+ if (qtime_sync_period >= CNSS_MIN_TIME_SYNC_PERIOD)
+ cnss_pci_update_qtime_sync_period(dev, qtime_sync_period);
+
+ return count;
+}
+
static ssize_t recovery_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -2419,11 +2447,13 @@
static DEVICE_ATTR_WO(fs_ready);
static DEVICE_ATTR_WO(shutdown);
static DEVICE_ATTR_WO(recovery);
+static DEVICE_ATTR_RW(qtime_sync_period);
static struct attribute *cnss_attrs[] = {
&dev_attr_fs_ready.attr,
&dev_attr_shutdown.attr,
&dev_attr_recovery.attr,
+ &dev_attr_qtime_sync_period.attr,
NULL,
};
diff --git a/drivers/net/wireless/cnss2/main.h b/drivers/net/wireless/cnss2/main.h
index f4f7fb6..54aee78 100644
--- a/drivers/net/wireless/cnss2/main.h
+++ b/drivers/net/wireless/cnss2/main.h
@@ -479,5 +479,6 @@
void *va, phys_addr_t pa, size_t size);
unsigned int cnss_get_timeout(struct cnss_plat_data *plat_priv,
enum cnss_timeout_type);
-
+int cnss_pci_update_qtime_sync_period(struct device *dev,
+ unsigned int qtime_sync_period);
#endif /* _CNSS_MAIN_H */
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index 25e01ec..78b4c47 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -1502,6 +1502,24 @@
cancel_delayed_work_sync(&pci_priv->time_sync_work);
}
+int cnss_pci_update_qtime_sync_period(struct device *dev,
+ unsigned int qtime_sync_period)
+{
+ struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
+ struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+
+ if (!plat_priv || !pci_priv)
+ return -ENODEV;
+
+ cnss_pci_stop_time_sync_update(pci_priv);
+ plat_priv->ctrl_params.time_sync_period = qtime_sync_period;
+ cnss_pci_start_time_sync_update(pci_priv);
+ cnss_pr_dbg("WLAN qtime sync period %u\n",
+ plat_priv->ctrl_params.time_sync_period);
+
+ return 0;
+}
+
int cnss_pci_call_driver_probe(struct cnss_pci_data *pci_priv)
{
int ret = 0;
diff --git a/drivers/net/wireless/cnss2/power.c b/drivers/net/wireless/cnss2/power.c
index 13e92c3..2cd59e8 100644
--- a/drivers/net/wireless/cnss2/power.c
+++ b/drivers/net/wireless/cnss2/power.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. */
#include <linux/clk.h>
#include <linux/delay.h>
@@ -789,10 +789,17 @@
int ret = 0, bt_en_gpio = plat_priv->pinctrl_info.bt_en_gpio;
u8 wlan_en_state = 0;
- if (bt_en_gpio < 0 || plat_priv->device_id != QCA6490_DEVICE_ID ||
- plat_priv->device_id != QCA6390_DEVICE_ID)
+ if (bt_en_gpio < 0)
goto set_wlan_en;
+ switch (plat_priv->device_id) {
+ case QCA6390_DEVICE_ID:
+ case QCA6490_DEVICE_ID:
+ break;
+ default:
+ goto set_wlan_en;
+ }
+
if (gpio_get_value(bt_en_gpio)) {
cnss_pr_dbg("BT_EN_GPIO State: On\n");
ret = cnss_select_pinctrl_state(plat_priv, true);
@@ -808,7 +815,9 @@
cnss_select_pinctrl_state(plat_priv, false);
wlan_en_state = 0;
}
- /* 100 ms delay for BT_EN and WLAN_EN QCA6490 PMU sequencing */
+ /* 100 ms delay for BT_EN and WLAN_EN QCA6490/QCA6390 PMU
+ * sequencing.
+ */
msleep(100);
}
set_wlan_en:
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 7f03e6d..8ec06c5 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
*/
#include <linux/clk.h>
@@ -746,6 +746,15 @@
retval = -EFAULT;
goto free_param_kptr;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_add_rt_rule_v2 *)param)->num_rules
+ != pre_entry)) {
+ IPAERR_RL("current %d pre %d\n",
+ ((struct ipa_ioc_add_rt_rule_v2 *)param)->
+ num_rules, pre_entry);
+ retval = -EFAULT;
+ goto free_param_kptr;
+ }
/* alloc kernel pointer with actual payload size */
kptr = kzalloc(pyld_sz, GFP_KERNEL);
if (!kptr) {
@@ -841,6 +850,15 @@
retval = -EFAULT;
goto free_param_kptr;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_add_rt_rule_ext_v2 *)param)->num_rules
+ != pre_entry)) {
+ IPAERR_RL("current %d pre %d\n",
+ ((struct ipa_ioc_add_rt_rule_ext_v2 *)param)->
+ num_rules, pre_entry);
+ retval = -EFAULT;
+ goto free_param_kptr;
+ }
/* alloc kernel pointer with actual payload size */
kptr = kzalloc(pyld_sz, GFP_KERNEL);
if (!kptr) {
@@ -937,6 +955,15 @@
retval = -EFAULT;
goto free_param_kptr;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_add_rt_rule_after_v2 *)param)->num_rules
+ != pre_entry)) {
+ IPAERR_RL("current %d pre %d\n",
+ ((struct ipa_ioc_add_rt_rule_after_v2 *)param)->
+ num_rules, pre_entry);
+ retval = -EFAULT;
+ goto free_param_kptr;
+ }
/* alloc kernel pointer with actual payload size */
kptr = kzalloc(pyld_sz, GFP_KERNEL);
if (!kptr) {
@@ -1031,6 +1058,15 @@
retval = -EFAULT;
goto free_param_kptr;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_mdfy_rt_rule_v2 *)param)->num_rules
+ != pre_entry)) {
+ IPAERR_RL("current %d pre %d\n",
+ ((struct ipa_ioc_mdfy_rt_rule_v2 *)param)->
+ num_rules, pre_entry);
+ retval = -EFAULT;
+ goto free_param_kptr;
+ }
/* alloc kernel pointer with actual payload size */
kptr = kzalloc(pyld_sz, GFP_KERNEL);
if (!kptr) {
@@ -1124,6 +1160,15 @@
retval = -EFAULT;
goto free_param_kptr;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_add_flt_rule_v2 *)param)->num_rules
+ != pre_entry)) {
+ IPAERR_RL("current %d pre %d\n",
+ ((struct ipa_ioc_add_flt_rule_v2 *)param)->
+ num_rules, pre_entry);
+ retval = -EFAULT;
+ goto free_param_kptr;
+ }
/* alloc kernel pointer with actual payload size */
kptr = kzalloc(pyld_sz, GFP_KERNEL);
if (!kptr) {
@@ -1217,6 +1262,15 @@
retval = -EFAULT;
goto free_param_kptr;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_add_flt_rule_after_v2 *)param)->num_rules
+ != pre_entry)) {
+ IPAERR_RL("current %d pre %d\n",
+ ((struct ipa_ioc_add_flt_rule_after_v2 *)param)->
+ num_rules, pre_entry);
+ retval = -EFAULT;
+ goto free_param_kptr;
+ }
/* alloc kernel pointer with actual payload size */
kptr = kzalloc(pyld_sz, GFP_KERNEL);
if (!kptr) {
@@ -1311,6 +1365,15 @@
retval = -EFAULT;
goto free_param_kptr;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_mdfy_flt_rule_v2 *)param)->num_rules
+ != pre_entry)) {
+ IPAERR_RL("current %d pre %d\n",
+ ((struct ipa_ioc_mdfy_flt_rule_v2 *)param)->
+ num_rules, pre_entry);
+ retval = -EFAULT;
+ goto free_param_kptr;
+ }
/* alloc kernel pointer with actual payload size */
kptr = kzalloc(pyld_sz, GFP_KERNEL);
if (!kptr) {
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 439df92..abceee6 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "icnss: " fmt
@@ -1081,26 +1081,26 @@
goto qmi_registered;
}
ignore_assert = true;
- goto clear_server;
+ goto fail;
}
if (!penv->msa_va) {
icnss_pr_err("Invalid MSA address\n");
ret = -EINVAL;
- goto clear_server;
+ goto fail;
}
ret = wlfw_msa_mem_info_send_sync_msg(penv);
if (ret < 0) {
ignore_assert = true;
- goto clear_server;
+ goto fail;
}
if (!test_bit(ICNSS_MSA0_ASSIGNED, &penv->state)) {
ret = icnss_assign_msa_perm_all(penv,
ICNSS_MSA_PERM_WLAN_HW_RW);
if (ret < 0)
- goto clear_server;
+ goto fail;
set_bit(ICNSS_MSA0_ASSIGNED, &penv->state);
}
@@ -1140,8 +1140,6 @@
err_setup_msa:
icnss_assign_msa_perm_all(penv, ICNSS_MSA_PERM_HLOS_ALL);
clear_bit(ICNSS_MSA0_ASSIGNED, &penv->state);
-clear_server:
- icnss_clear_server(penv);
fail:
ICNSS_ASSERT(ignore_assert);
qmi_registered:
diff --git a/drivers/soc/qcom/icnss2/main.c b/drivers/soc/qcom/icnss2/main.c
index ca0703b..618f6d8 100644
--- a/drivers/soc/qcom/icnss2/main.c
+++ b/drivers/soc/qcom/icnss2/main.c
@@ -566,7 +566,7 @@
ret = icnss_hw_power_on(priv);
if (ret)
- goto clear_server;
+ goto fail;
ret = wlfw_ind_register_send_sync_msg(priv);
if (ret < 0) {
@@ -654,8 +654,6 @@
err_power_on:
icnss_hw_power_off(priv);
-clear_server:
- icnss_clear_server(priv);
fail:
ICNSS_ASSERT(ignore_assert);
qmi_registered:
diff --git a/drivers/thermal/qcom/Kconfig b/drivers/thermal/qcom/Kconfig
index cc78d37..eacb7d3 100644
--- a/drivers/thermal/qcom/Kconfig
+++ b/drivers/thermal/qcom/Kconfig
@@ -151,3 +151,14 @@
on the CX rail.
If you want this support, you should say Y here.
+
+config QTI_THERMAL_QFPROM
+ tristate "Qualcomm Technologies Inc. thermal QFPROM driver"
+ depends on THERMAL
+ depends on QCOM_QFPROM
+ help
+ This driver enables or disables pre-configured thermal zones
+ selectively at runtime based on QFPROM nvmem cell bit value is
+ set or not. It supports to check multiple nvmem cell value for
+ multiple condtion. In that case, if any of the nvmem-cell condition
+ fails, driver just exits with default enabled thermal zones.
diff --git a/drivers/thermal/qcom/Makefile b/drivers/thermal/qcom/Makefile
index 8a0b619..9f5224a 100644
--- a/drivers/thermal/qcom/Makefile
+++ b/drivers/thermal/qcom/Makefile
@@ -14,3 +14,4 @@
obj-$(CONFIG_QTI_LMH_CPU_VDD_COOLING_DEVICE) += lmh_cpu_vdd_cdev.o
obj-$(CONFIG_QTI_LIMITS_ISENSE_CDSP) += msm_isense_cdsp.o
obj-$(CONFIG_QTI_CX_IPEAK_COOLING_DEVICE) += cx_ipeak_cdev.o
+obj-$(CONFIG_QTI_THERMAL_QFPROM) += qti_thermal_qfprom.o
diff --git a/drivers/thermal/qcom/qti_thermal_qfprom.c b/drivers/thermal/qcom/qti_thermal_qfprom.c
new file mode 100644
index 0000000..47b9b2c
--- /dev/null
+++ b/drivers/thermal/qcom/qti_thermal_qfprom.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
+
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/thermal.h>
+
+#include "../thermal_core.h"
+
+static int thermal_qfprom_read(struct platform_device *pdev,
+ const char *cname, unsigned int *efuse_val)
+{
+ struct nvmem_cell *cell;
+ size_t len;
+ char *buf;
+
+ cell = nvmem_cell_get(&pdev->dev, cname);
+ if (IS_ERR(cell)) {
+ dev_err(&pdev->dev, "failed to get nvmem cell %s\n", cname);
+ return -EINVAL;
+ }
+
+ buf = nvmem_cell_read(cell, &len);
+ nvmem_cell_put(cell);
+ if (IS_ERR_OR_NULL(buf)) {
+ dev_err(&pdev->dev, "failed to read nvmem cell %s\n", cname);
+ return -EINVAL;
+ }
+
+ if (len <= 0 || len > sizeof(u32)) {
+ dev_err(&pdev->dev, "nvmem cell length out of range:%d\n", len);
+ kfree(buf);
+ return -EINVAL;
+ }
+ memcpy(efuse_val, buf, min(len, sizeof(*efuse_val)));
+ kfree(buf);
+
+ return 0;
+}
+
+static int thermal_zone_set_mode(struct platform_device *pdev,
+ enum thermal_device_mode mode)
+{
+ const char *name;
+ struct property *prop = NULL;
+
+ of_property_for_each_string(pdev->dev.of_node,
+ mode == THERMAL_DEVICE_ENABLED ?
+ "qcom,thermal-zone-enable-list" :
+ "qcom,thermal-zone-disable-list", prop, name) {
+ struct thermal_zone_device *zone;
+ struct thermal_instance *pos;
+
+ zone = thermal_zone_get_zone_by_name(name);
+ if (IS_ERR(zone)) {
+ dev_err(&pdev->dev,
+ "could not find %s thermal zone\n", name);
+ continue;
+ }
+
+ if (!(zone->ops && zone->ops->set_mode)) {
+ dev_err(&pdev->dev,
+ "thermal zone ops is not supported for %s\n",
+ name);
+ continue;
+ }
+
+ zone->ops->set_mode(zone, mode);
+ if (mode == THERMAL_DEVICE_DISABLED) {
+ /* Clear thermal zone device */
+ mutex_lock(&zone->lock);
+ zone->temperature = THERMAL_TEMP_INVALID;
+ zone->passive = 0;
+ list_for_each_entry(pos, &zone->thermal_instances,
+ tz_node) {
+ pos->initialized = false;
+ pos->target = THERMAL_NO_TARGET;
+ mutex_lock(&pos->cdev->lock);
+ pos->cdev->updated = false;
+ mutex_unlock(&pos->cdev->lock);
+ thermal_cdev_update(pos->cdev);
+ }
+ mutex_unlock(&zone->lock);
+ }
+ dev_dbg(&pdev->dev, "thermal zone %s is %s\n", name,
+ mode == THERMAL_DEVICE_ENABLED ?
+ "enabled" : "disabled");
+ }
+
+ return 0;
+}
+
+static void update_thermal_zones(struct platform_device *pdev)
+{
+ thermal_zone_set_mode(pdev, THERMAL_DEVICE_ENABLED);
+ thermal_zone_set_mode(pdev, THERMAL_DEVICE_DISABLED);
+}
+
+static int thermal_qfprom_probe(struct platform_device *pdev)
+{
+ int err = 0;
+ const char *name;
+ struct property *prop = NULL;
+ u8 efuse_pass_cnt = 0;
+
+ of_property_for_each_string(pdev->dev.of_node,
+ "nvmem-cell-names", prop, name) {
+ u32 efuse_val = 0, efuse_match_val = 0;
+
+ err = thermal_qfprom_read(pdev, name, &efuse_val);
+ if (err)
+ return err;
+
+ err = of_property_read_u32_index(pdev->dev.of_node,
+ "qcom,thermal-qfprom-bit-values", efuse_pass_cnt,
+ &efuse_match_val);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Invalid qfprom bit value for index %d\n",
+ efuse_pass_cnt);
+ return err;
+ }
+
+ dev_dbg(&pdev->dev, "efuse[%s] val:0x%x match val[%d]:0x%x\n",
+ name, efuse_val, efuse_pass_cnt,
+ efuse_match_val);
+
+ /* if any of efuse condition fails, just exit */
+ if (efuse_val != efuse_match_val)
+ return 0;
+
+ efuse_pass_cnt++;
+ }
+
+ if (efuse_pass_cnt)
+ update_thermal_zones(pdev);
+
+ return err;
+}
+
+static const struct of_device_id thermal_qfprom_match[] = {
+ { .compatible = "qcom,thermal-qfprom-device", },
+ {},
+};
+
+static struct platform_driver thermal_qfprom_driver = {
+ .probe = thermal_qfprom_probe,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = thermal_qfprom_match,
+ },
+};
+
+int __init thermal_qfprom_init(void)
+{
+ int err;
+
+ err = platform_driver_register(&thermal_qfprom_driver);
+ if (err)
+ pr_err("Failed to register thermal qfprom platform driver:%d\n",
+ err);
+ return err;
+}
+
+late_initcall(thermal_qfprom_init);
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index 7ed40ca..9d9a1dc 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -211,6 +211,7 @@
struct completion m_cmd_timeout;
struct completion s_cmd_timeout;
spinlock_t rx_lock;
+ bool bypass_flow_control;
};
static const struct uart_ops msm_geni_serial_pops;
@@ -1454,7 +1455,8 @@
}
if (!uart_console(uport)) {
- msm_geni_serial_set_manual_flow(false, port);
+ if (!port->bypass_flow_control)
+ msm_geni_serial_set_manual_flow(false, port);
/*
* Wait for the stale timeout to happen if there
* is any data pending in the rx fifo.
@@ -1571,7 +1573,7 @@
port->s_cmd = false;
exit_rx_seq:
- if (!uart_console(uport))
+ if (!uart_console(uport) && !port->bypass_flow_control)
msm_geni_serial_set_manual_flow(true, port);
geni_status = geni_read_reg_nolog(uport->membase, SE_GENI_STATUS);
@@ -2507,7 +2509,12 @@
return;
}
}
+
+ //Client must control Flow, don't touch RFR during baud change.
+ port->bypass_flow_control = true;
msm_geni_serial_stop_rx(uport);
+ port->bypass_flow_control = false;
+
/* baud rate */
baud = uart_get_baud_rate(uport, termios, old, 300, 4000000);
port->cur_baud = baud;
diff --git a/drivers/usb/gadget/function/Makefile b/drivers/usb/gadget/function/Makefile
index 2b07aae..f27f707 100644
--- a/drivers/usb/gadget/function/Makefile
+++ b/drivers/usb/gadget/function/Makefile
@@ -71,5 +71,5 @@
obj-$(CONFIG_USB_F_PTP) += usb_f_ptp.o
usb_f_qcrndis-y := f_qc_rndis.o u_data_ipa.o
obj-$(CONFIG_USB_F_QCRNDIS) += usb_f_qcrndis.o
-usb_f_rmnet_bam-y := f_rmnet.o u_ctrl_qti.o
+usb_f_rmnet_bam-y := f_rmnet.o u_ctrl_qti.o u_bam_dmux.o
obj-$(CONFIG_USB_F_RMNET_BAM) += usb_f_rmnet_bam.o
diff --git a/drivers/usb/gadget/function/u_bam_dmux.c b/drivers/usb/gadget/function/u_bam_dmux.c
new file mode 100644
index 0000000..9eab3ce
--- /dev/null
+++ b/drivers/usb/gadget/function/u_bam_dmux.c
@@ -0,0 +1,1723 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2011-2018, 2020-2021, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/termios.h>
+#include <linux/netdevice.h>
+#include <linux/debugfs.h>
+#include <linux/bitops.h>
+#include <linux/termios.h>
+#include <linux/platform_device.h>
+
+#include <soc/qcom/bam_dmux.h>
+
+#include <linux/usb/msm_hsusb.h>
+#include <linux/usb/usb_ctrl_qti.h>
+#include <linux/usb_bam.h>
+
+#include "u_rmnet.h"
+
+static struct workqueue_struct *gbam_wq;
+static unsigned int n_tx_req_queued;
+
+static unsigned int bam_ch_ids[BAM_DMUX_NUM_FUNCS] = {
+ BAM_DMUX_USB_RMNET_0,
+ BAM_DMUX_USB_DPL
+};
+
+static char bam_ch_names[BAM_DMUX_NUM_FUNCS][BAM_DMUX_CH_NAME_MAX_LEN];
+
+#define BAM_PENDING_PKTS_LIMIT 220
+#define BAM_MUX_TX_PKT_DROP_THRESHOLD 1000
+#define BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD 500
+#define BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD 300
+#define BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT 1
+
+#define BAM_MUX_HDR 8
+
+#define BAM_MUX_RX_Q_SIZE 128
+#define BAM_MUX_TX_Q_SIZE 200
+#define BAM_MUX_RX_REQ_SIZE 2048 /* Must be 1KB aligned */
+
+#define DL_INTR_THRESHOLD 20
+#define BAM_PENDING_BYTES_LIMIT (50 * BAM_MUX_RX_REQ_SIZE)
+#define BAM_PENDING_BYTES_FCTRL_EN_TSHOLD (BAM_PENDING_BYTES_LIMIT / 3)
+
+/* Extra buffer size to allocate for tx */
+#define EXTRA_ALLOCATION_SIZE_U_BAM 128
+
+static unsigned int bam_pending_pkts_limit = BAM_PENDING_PKTS_LIMIT;
+static ssize_t bam_pending_pkts_limit_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", bam_pending_pkts_limit);
+}
+
+static ssize_t bam_pending_pkts_limit_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int val;
+
+ if (kstrtos32(buf, 0, &val))
+ return -EINVAL;
+
+ bam_pending_pkts_limit = val;
+
+ return count;
+
+}
+static DEVICE_ATTR_RW(bam_pending_pkts_limit);
+
+static unsigned int bam_pending_bytes_limit = BAM_PENDING_BYTES_LIMIT;
+static ssize_t bam_pending_bytes_limit_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", bam_pending_bytes_limit);
+}
+
+static ssize_t bam_pending_bytes_limit_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int val;
+
+ if (kstrtos32(buf, 0, &val))
+ return -EINVAL;
+
+ bam_pending_bytes_limit = val;
+
+ return count;
+
+}
+static DEVICE_ATTR_RW(bam_pending_bytes_limit);
+
+static unsigned int bam_pending_bytes_fctrl_en_thold =
+ BAM_PENDING_BYTES_FCTRL_EN_TSHOLD;
+static ssize_t bam_pending_bytes_fctrl_en_thold_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ bam_pending_bytes_fctrl_en_thold);
+}
+
+static ssize_t bam_pending_bytes_fctrl_en_thold_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int val;
+
+ if (kstrtos32(buf, 0, &val))
+ return -EINVAL;
+
+ bam_pending_bytes_fctrl_en_thold = val;
+
+ return count;
+
+}
+static DEVICE_ATTR_RW(bam_pending_bytes_fctrl_en_thold);
+
+static unsigned int bam_mux_tx_pkt_drop_thld = BAM_MUX_TX_PKT_DROP_THRESHOLD;
+static ssize_t bam_mux_tx_pkt_drop_thld_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", bam_mux_tx_pkt_drop_thld);
+}
+
+static ssize_t bam_mux_tx_pkt_drop_thld_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int val;
+
+ if (kstrtos32(buf, 0, &val))
+ return -EINVAL;
+
+ bam_mux_tx_pkt_drop_thld = val;
+
+ return count;
+
+}
+static DEVICE_ATTR_RW(bam_mux_tx_pkt_drop_thld);
+
+static unsigned int bam_mux_rx_fctrl_en_thld = BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD;
+static ssize_t bam_mux_rx_fctrl_en_thld_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", bam_mux_rx_fctrl_en_thld);
+}
+
+static ssize_t bam_mux_rx_fctrl_en_thld_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int val;
+
+ if (kstrtos32(buf, 0, &val))
+ return -EINVAL;
+
+ bam_mux_rx_fctrl_en_thld = val;
+
+ return count;
+
+}
+static DEVICE_ATTR_RW(bam_mux_rx_fctrl_en_thld);
+
+static unsigned int bam_mux_rx_fctrl_support = BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT;
+static ssize_t bam_mux_rx_fctrl_support_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", bam_mux_rx_fctrl_support);
+}
+
+static ssize_t bam_mux_rx_fctrl_support_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int val;
+
+ if (kstrtos32(buf, 0, &val))
+ return -EINVAL;
+
+ bam_mux_rx_fctrl_support = val;
+
+ return count;
+
+}
+static DEVICE_ATTR_RW(bam_mux_rx_fctrl_support);
+
+static unsigned int bam_mux_rx_fctrl_dis_thld = BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD;
+static ssize_t bam_mux_rx_fctrl_dis_thld_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", bam_mux_rx_fctrl_dis_thld);
+}
+
+static ssize_t bam_mux_rx_fctrl_dis_thld_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int val;
+
+ if (kstrtos32(buf, 0, &val))
+ return -EINVAL;
+
+ bam_mux_rx_fctrl_dis_thld = val;
+
+ return count;
+
+}
+static DEVICE_ATTR_RW(bam_mux_rx_fctrl_dis_thld);
+
+static unsigned int bam_mux_tx_q_size = BAM_MUX_TX_Q_SIZE;
+static ssize_t bam_mux_tx_q_size_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", bam_mux_tx_q_size);
+}
+
+static ssize_t bam_mux_tx_q_size_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int val;
+
+ if (kstrtos32(buf, 0, &val))
+ return -EINVAL;
+
+ bam_mux_tx_q_size = val;
+
+ return count;
+
+}
+static DEVICE_ATTR_RW(bam_mux_tx_q_size);
+
+static unsigned int bam_mux_rx_q_size = BAM_MUX_RX_Q_SIZE;
+static ssize_t bam_mux_rx_q_size_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", bam_mux_rx_q_size);
+}
+
+static ssize_t bam_mux_rx_q_size_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int val;
+
+ if (kstrtos32(buf, 0, &val))
+ return -EINVAL;
+
+ bam_mux_rx_q_size = val;
+
+ return count;
+
+}
+static DEVICE_ATTR_RW(bam_mux_rx_q_size);
+
+static unsigned long bam_mux_rx_req_size = BAM_MUX_RX_REQ_SIZE;
+static ssize_t bam_mux_rx_req_size_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", bam_mux_rx_req_size);
+}
+
+static ssize_t bam_mux_rx_req_size_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int val;
+
+ if (kstrtos32(buf, 0, &val))
+ return -EINVAL;
+
+ bam_mux_rx_req_size = val;
+
+ return count;
+
+}
+static DEVICE_ATTR_RW(bam_mux_rx_req_size);
+
+static unsigned int dl_intr_threshold = DL_INTR_THRESHOLD;
+static ssize_t dl_intr_threshold_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", dl_intr_threshold);
+}
+
+static ssize_t dl_intr_threshold_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int val;
+
+ if (kstrtos32(buf, 0, &val))
+ return -EINVAL;
+
+ dl_intr_threshold = val;
+
+ return count;
+
+}
+static DEVICE_ATTR_RW(dl_intr_threshold);
+
+#define BAM_CH_OPENED BIT(0)
+#define BAM_CH_READY BIT(1)
+#define BAM_CH_WRITE_INPROGRESS BIT(2)
+
+enum u_bam_event_type {
+ U_BAM_DISCONNECT_E = 0,
+ U_BAM_CONNECT_E,
+};
+
+struct bam_ch_info {
+ unsigned long flags;
+ unsigned int id;
+
+ struct list_head tx_idle;
+ struct sk_buff_head tx_skb_q;
+
+ struct list_head rx_idle;
+ struct sk_buff_head rx_skb_q;
+ struct sk_buff_head rx_skb_idle;
+
+ struct gbam_port *port;
+ struct work_struct write_tobam_w;
+ struct work_struct write_tohost_w;
+
+ /* stats */
+ unsigned int pending_pkts_with_bam;
+ unsigned int pending_bytes_with_bam;
+ unsigned int tohost_drp_cnt;
+ unsigned int tomodem_drp_cnt;
+ unsigned int tx_len;
+ unsigned int rx_len;
+ unsigned long to_modem;
+ unsigned long to_host;
+ unsigned int rx_flow_control_disable;
+ unsigned int rx_flow_control_enable;
+ unsigned int rx_flow_control_triggered;
+ unsigned int max_num_pkts_pending_with_bam;
+ unsigned int max_bytes_pending_with_bam;
+ unsigned int delayed_bam_mux_write_done;
+ unsigned long skb_expand_cnt;
+};
+
+struct gbam_port {
+ enum u_bam_event_type last_event;
+ unsigned int port_num;
+ spinlock_t port_lock_ul;
+ spinlock_t port_lock_dl;
+ spinlock_t port_lock;
+
+ struct data_port *port_usb;
+ struct usb_gadget *gadget;
+
+ struct bam_ch_info data_ch;
+
+ struct work_struct connect_w;
+ struct work_struct disconnect_w;
+};
+
+static struct bam_portmaster {
+ struct gbam_port *port;
+ struct platform_driver pdrv;
+} bam_ports[BAM_DMUX_NUM_FUNCS];
+
+static void gbam_start_rx(struct gbam_port *port);
+static void gbam_notify(void *p, int event, unsigned long data);
+static void gbam_data_write_tobam(struct work_struct *w);
+
+/*---------------misc functions---------------- */
+static void gbam_free_requests(struct usb_ep *ep, struct list_head *head)
+{
+ struct usb_request *req;
+
+ while (!list_empty(head)) {
+ req = list_entry(head->next, struct usb_request, list);
+ list_del(&req->list);
+ usb_ep_free_request(ep, req);
+ }
+}
+
+static int gbam_alloc_requests(struct usb_ep *ep, struct list_head *head,
+ int num,
+ void (*cb)(struct usb_ep *ep, struct usb_request *),
+ gfp_t flags)
+{
+ int i;
+ struct usb_request *req;
+
+ pr_debug("%s: ep:%pK head:%pK num:%d cb:%pK\n", __func__,
+ ep, head, num, cb);
+
+ for (i = 0; i < num; i++) {
+ req = usb_ep_alloc_request(ep, flags);
+ if (!req) {
+ pr_debug("%s: req allocated:%d\n", __func__, i);
+ return list_empty(head) ? -ENOMEM : 0;
+ }
+ req->complete = cb;
+ list_add(&req->list, head);
+ }
+
+ return 0;
+}
+
+static inline dma_addr_t gbam_get_dma_from_skb(struct sk_buff *skb)
+{
+ return *((dma_addr_t *)(skb->cb));
+}
+
+/* This function should be called with port_lock_ul lock held */
+static struct sk_buff *gbam_alloc_skb_from_pool(struct gbam_port *port)
+{
+ struct bam_ch_info *d;
+ struct sk_buff *skb;
+ dma_addr_t skb_buf_dma_addr;
+
+ if (!port)
+ return NULL;
+
+ d = &port->data_ch;
+ if (!d)
+ return NULL;
+
+ if (d->rx_skb_idle.qlen == 0) {
+ /*
+ * In case skb idle pool is empty, we allow to allocate more
+ * skbs so we dynamically enlarge the pool size when needed.
+ * Therefore, in steady state this dynamic allocation will
+ * stop when the pool will arrive to its optimal size.
+ */
+ pr_debug("%s: allocate skb\n", __func__);
+ skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
+
+ if (!skb)
+ goto alloc_exit;
+
+ skb_reserve(skb, BAM_MUX_HDR);
+ skb_buf_dma_addr = DMA_ERROR_CODE;
+
+ memcpy(skb->cb, &skb_buf_dma_addr,
+ sizeof(skb_buf_dma_addr));
+
+ } else {
+ pr_debug("%s: pull skb from pool\n", __func__);
+ skb = __skb_dequeue(&d->rx_skb_idle);
+ if (!skb)
+ goto alloc_exit;
+
+ if (skb_headroom(skb) < BAM_MUX_HDR)
+ skb_reserve(skb, BAM_MUX_HDR);
+ }
+
+alloc_exit:
+ return skb;
+}
+
+/* This function should be called with port_lock_ul lock held */
+static void gbam_free_skb_to_pool(struct gbam_port *port, struct sk_buff *skb)
+{
+ struct bam_ch_info *d;
+
+ if (!port)
+ return;
+ d = &port->data_ch;
+
+ skb->len = 0;
+ skb_reset_tail_pointer(skb);
+ __skb_queue_tail(&d->rx_skb_idle, skb);
+}
+
+static void gbam_free_rx_skb_idle_list(struct gbam_port *port)
+{
+ struct bam_ch_info *d;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ struct usb_gadget *gadget = NULL;
+
+ if (!port)
+ return;
+ d = &port->data_ch;
+
+ gadget = port->port_usb->cdev->gadget;
+
+ while (d->rx_skb_idle.qlen > 0) {
+ skb = __skb_dequeue(&d->rx_skb_idle);
+ if (!skb)
+ break;
+
+ dma_addr = gbam_get_dma_from_skb(skb);
+
+ if (gadget && dma_addr != DMA_ERROR_CODE) {
+ dma_unmap_single(&gadget->dev, dma_addr,
+ bam_mux_rx_req_size, DMA_BIDIRECTIONAL);
+
+ dma_addr = DMA_ERROR_CODE;
+ memcpy(skb->cb, &dma_addr,
+ sizeof(dma_addr));
+ }
+ dev_kfree_skb_any(skb);
+ }
+}
+
+/*--------------------------------------------- */
+
+/*------------data_path----------------------------*/
+static void gbam_write_data_tohost(struct gbam_port *port)
+{
+ unsigned long flags;
+ struct bam_ch_info *d = &port->data_ch;
+ struct sk_buff *skb;
+ struct sk_buff *new_skb;
+ int ret;
+ int tail_room = 0;
+ int extra_alloc = 0;
+ struct usb_request *req;
+ struct usb_ep *ep;
+
+ spin_lock_irqsave(&port->port_lock_dl, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock_dl, flags);
+ return;
+ }
+
+ ep = port->port_usb->in;
+
+ while (!list_empty(&d->tx_idle)) {
+ skb = __skb_dequeue(&d->tx_skb_q);
+ if (!skb)
+ break;
+
+ /*
+ * Some UDC requires allocation of some extra bytes for
+ * TX buffer due to hardware requirement. Check if extra
+ * bytes are already there, otherwise allocate new buffer
+ * with extra bytes and do memcpy.
+ */
+ if (port->gadget->extra_buf_alloc)
+ extra_alloc = EXTRA_ALLOCATION_SIZE_U_BAM;
+ tail_room = skb_tailroom(skb);
+ if (tail_room < extra_alloc) {
+ pr_debug("%s: tail_room %d less than %d\n", __func__,
+ tail_room, extra_alloc);
+ new_skb = skb_copy_expand(skb, 0, extra_alloc -
+ tail_room, GFP_ATOMIC);
+ if (!new_skb) {
+ pr_err("skb_copy_expand failed\n");
+ break;
+ }
+ dev_kfree_skb_any(skb);
+ skb = new_skb;
+ d->skb_expand_cnt++;
+ }
+
+ req = list_first_entry(&d->tx_idle,
+ struct usb_request,
+ list);
+ req->context = skb;
+ req->buf = skb->data;
+ req->length = skb->len;
+ n_tx_req_queued++;
+ if (n_tx_req_queued == dl_intr_threshold) {
+ req->no_interrupt = 0;
+ n_tx_req_queued = 0;
+ } else {
+ req->no_interrupt = 1;
+ }
+
+ /* Send ZLP in case packet length is multiple of maxpacksize */
+ req->zero = 1;
+
+ list_del(&req->list);
+
+ spin_unlock(&port->port_lock_dl);
+ ret = usb_ep_queue(ep, req, GFP_ATOMIC);
+ spin_lock(&port->port_lock_dl);
+ if (ret) {
+ pr_err_ratelimited("%s: usb epIn failed with %d\n",
+ __func__, ret);
+ list_add(&req->list, &d->tx_idle);
+ dev_kfree_skb_any(skb);
+ break;
+ }
+ d->to_host++;
+ }
+ spin_unlock_irqrestore(&port->port_lock_dl, flags);
+}
+
+static void gbam_write_data_tohost_w(struct work_struct *w)
+{
+ struct bam_ch_info *d;
+ struct gbam_port *port;
+
+ d = container_of(w, struct bam_ch_info, write_tohost_w);
+ port = d->port;
+
+ gbam_write_data_tohost(port);
+}
+
+static void gbam_data_recv_cb(void *p, struct sk_buff *skb)
+{
+ struct gbam_port *port = p;
+ struct bam_ch_info *d = &port->data_ch;
+ unsigned long flags;
+
+ if (!skb)
+ return;
+
+ pr_debug("%s: p:%pK#%d d:%pK skb_len:%d\n", __func__,
+ port, port->port_num, d, skb->len);
+
+ spin_lock_irqsave(&port->port_lock_dl, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock_dl, flags);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ if (d->tx_skb_q.qlen > bam_mux_tx_pkt_drop_thld) {
+ d->tohost_drp_cnt++;
+ printk_ratelimited(KERN_ERR "%s: tx pkt dropped: tx_drop_cnt:%u\n",
+ __func__, d->tohost_drp_cnt);
+ spin_unlock_irqrestore(&port->port_lock_dl, flags);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ __skb_queue_tail(&d->tx_skb_q, skb);
+ spin_unlock_irqrestore(&port->port_lock_dl, flags);
+
+ gbam_write_data_tohost(port);
+}
+
+static void gbam_data_write_done(void *p, struct sk_buff *skb)
+{
+ struct gbam_port *port = p;
+ struct bam_ch_info *d = &port->data_ch;
+ unsigned long flags;
+
+ if (!skb)
+ return;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+
+ d->pending_pkts_with_bam--;
+ d->pending_bytes_with_bam -= skb->len;
+ gbam_free_skb_to_pool(port, skb);
+
+ pr_debug("%s:port:%pK d:%pK tom:%lu ppkt:%u pbytes:%u pno:%d\n",
+ __func__, port, d, d->to_modem, d->pending_pkts_with_bam,
+ d->pending_bytes_with_bam, port->port_num);
+
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+
+ /*
+ * If BAM doesn't have much pending data then push new data from here:
+ * write_complete notify only to avoid any underruns due to wq latency
+ */
+ if (d->pending_bytes_with_bam <= bam_pending_bytes_fctrl_en_thold) {
+ gbam_data_write_tobam(&d->write_tobam_w);
+ } else {
+ d->delayed_bam_mux_write_done++;
+ queue_work(gbam_wq, &d->write_tobam_w);
+ }
+}
+
+/* This function should be called with port_lock_ul spinlock acquired */
+static bool gbam_ul_bam_limit_reached(struct bam_ch_info *data_ch)
+{
+ unsigned int curr_pending_pkts = data_ch->pending_pkts_with_bam;
+ unsigned int curr_pending_bytes = data_ch->pending_bytes_with_bam;
+ struct sk_buff *skb;
+
+ if (curr_pending_pkts >= bam_pending_pkts_limit)
+ return true;
+
+ /* check if next skb length doesn't exceed pending_bytes_limit */
+ skb = skb_peek(&data_ch->rx_skb_q);
+ if (!skb)
+ return false;
+
+ if ((curr_pending_bytes + skb->len) > bam_pending_bytes_limit)
+ return true;
+ else
+ return false;
+}
+
+static void gbam_data_write_tobam(struct work_struct *w)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+ struct sk_buff *skb;
+ unsigned long flags;
+ int ret;
+ int qlen;
+
+ d = container_of(w, struct bam_ch_info, write_tobam_w);
+ port = d->port;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ return;
+ }
+ /* Bail out if already in progress */
+ if (test_bit(BAM_CH_WRITE_INPROGRESS, &d->flags)) {
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ return;
+ }
+
+ set_bit(BAM_CH_WRITE_INPROGRESS, &d->flags);
+
+ while (!gbam_ul_bam_limit_reached(d)) {
+ skb = __skb_dequeue(&d->rx_skb_q);
+ if (!skb)
+ break;
+
+ d->pending_pkts_with_bam++;
+ d->pending_bytes_with_bam += skb->len;
+ d->to_modem++;
+
+ pr_debug("%s: port:%pK d:%pK tom:%lu ppkts:%u pbytes:%u pno:%d\n",
+ __func__, port, d,
+ d->to_modem, d->pending_pkts_with_bam,
+ d->pending_bytes_with_bam, port->port_num);
+
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ ret = msm_bam_dmux_write(d->id, skb);
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ if (ret) {
+ pr_debug("%s: write error:%d\n", __func__, ret);
+ d->pending_pkts_with_bam--;
+ d->pending_bytes_with_bam -= skb->len;
+ d->to_modem--;
+ d->tomodem_drp_cnt++;
+ gbam_free_skb_to_pool(port, skb);
+ break;
+ }
+ if (d->pending_pkts_with_bam > d->max_num_pkts_pending_with_bam)
+ d->max_num_pkts_pending_with_bam =
+ d->pending_pkts_with_bam;
+ if (d->pending_bytes_with_bam > d->max_bytes_pending_with_bam)
+ d->max_bytes_pending_with_bam =
+ d->pending_bytes_with_bam;
+ }
+
+ qlen = d->rx_skb_q.qlen;
+
+ clear_bit(BAM_CH_WRITE_INPROGRESS, &d->flags);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+
+ if (qlen < bam_mux_rx_fctrl_dis_thld) {
+ if (d->rx_flow_control_triggered) {
+ d->rx_flow_control_disable++;
+ d->rx_flow_control_triggered = 0;
+ }
+ gbam_start_rx(port);
+ }
+}
+/*-------------------------------------------------------------*/
+
+static void gbam_epin_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct gbam_port *port = ep->driver_data;
+ struct bam_ch_info *d;
+ struct sk_buff *skb = req->context;
+ int status = req->status;
+
+ switch (status) {
+ case 0:
+ /* successful completion */
+ break;
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* connection gone */
+ dev_kfree_skb_any(skb);
+ usb_ep_free_request(ep, req);
+ return;
+ default:
+ pr_err("%s: data tx ep error %d\n",
+ __func__, status);
+ break;
+ }
+
+ dev_kfree_skb_any(skb);
+
+ if (!port)
+ return;
+
+ spin_lock(&port->port_lock_dl);
+ d = &port->data_ch;
+ list_add_tail(&req->list, &d->tx_idle);
+ spin_unlock(&port->port_lock_dl);
+
+ queue_work(gbam_wq, &d->write_tohost_w);
+}
+
+static void
+gbam_epout_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct gbam_port *port = ep->driver_data;
+ struct bam_ch_info *d = &port->data_ch;
+ struct sk_buff *skb = req->context;
+ int status = req->status;
+ int queue = 0;
+
+ switch (status) {
+ case 0:
+ skb_put(skb, req->actual);
+ queue = 1;
+ break;
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* cable disconnection */
+ spin_lock(&port->port_lock_ul);
+ gbam_free_skb_to_pool(port, skb);
+ spin_unlock(&port->port_lock_ul);
+ req->buf = NULL;
+ usb_ep_free_request(ep, req);
+ return;
+ default:
+ printk_ratelimited(KERN_ERR "%s: %s response error %d, %d/%d\n",
+ __func__, ep->name, status, req->actual, req->length);
+ spin_lock(&port->port_lock_ul);
+ gbam_free_skb_to_pool(port, skb);
+ spin_unlock(&port->port_lock_ul);
+ break;
+ }
+
+ spin_lock(&port->port_lock_ul);
+
+ if (queue) {
+ __skb_queue_tail(&d->rx_skb_q, skb);
+ queue_work(gbam_wq, &d->write_tobam_w);
+ }
+
+ /* TODO: Handle flow control gracefully by having
+ * having call back mechanism from bam driver
+ */
+ if (bam_mux_rx_fctrl_support &&
+ d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld) {
+ if (!d->rx_flow_control_triggered) {
+ d->rx_flow_control_triggered = 1;
+ d->rx_flow_control_enable++;
+ }
+ list_add_tail(&req->list, &d->rx_idle);
+ spin_unlock(&port->port_lock_ul);
+ return;
+ }
+
+ skb = gbam_alloc_skb_from_pool(port);
+ if (!skb) {
+ list_add_tail(&req->list, &d->rx_idle);
+ spin_unlock(&port->port_lock_ul);
+ return;
+ }
+ spin_unlock(&port->port_lock_ul);
+
+ req->buf = skb->data;
+ req->dma = gbam_get_dma_from_skb(skb);
+ req->length = bam_mux_rx_req_size;
+
+ req->context = skb;
+
+ status = usb_ep_queue(ep, req, GFP_ATOMIC);
+ if (status) {
+ spin_lock(&port->port_lock_ul);
+ gbam_free_skb_to_pool(port, skb);
+ spin_unlock(&port->port_lock_ul);
+
+ printk_ratelimited(KERN_ERR "%s: data rx enqueue err %d\n",
+ __func__, status);
+
+ spin_lock(&port->port_lock_ul);
+ list_add_tail(&req->list, &d->rx_idle);
+ spin_unlock(&port->port_lock_ul);
+ }
+}
+
+static void gbam_start_rx(struct gbam_port *port)
+{
+ struct usb_request *req;
+ struct bam_ch_info *d;
+ struct usb_ep *ep;
+ unsigned long flags;
+ int ret;
+ struct sk_buff *skb;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ if (!port->port_usb || !port->port_usb->out) {
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ return;
+ }
+
+ d = &port->data_ch;
+ ep = port->port_usb->out;
+
+ while (port->port_usb && !list_empty(&d->rx_idle)) {
+
+ if (bam_mux_rx_fctrl_support &&
+ d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld)
+ break;
+
+ req = list_first_entry(&d->rx_idle, struct usb_request, list);
+
+ skb = gbam_alloc_skb_from_pool(port);
+ if (!skb)
+ break;
+
+ list_del(&req->list);
+ req->buf = skb->data;
+ req->dma = gbam_get_dma_from_skb(skb);
+ req->length = bam_mux_rx_req_size;
+
+ req->context = skb;
+
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ ret = usb_ep_queue(ep, req, GFP_ATOMIC);
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ if (ret) {
+ gbam_free_skb_to_pool(port, skb);
+
+ printk_ratelimited(KERN_ERR "%s: rx queue failed %d\n",
+ __func__, ret);
+
+ if (port->port_usb)
+ list_add(&req->list, &d->rx_idle);
+ else
+ usb_ep_free_request(ep, req);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+}
+
+static int _gbam_start_io(struct gbam_port *port, bool in)
+{
+ unsigned long flags;
+ int ret = 0;
+ struct usb_ep *ep;
+ struct list_head *idle;
+ unsigned int queue_size;
+ spinlock_t *spinlock;
+ void (*ep_complete)(struct usb_ep *ep,
+ struct usb_request *req);
+
+ if (in)
+ spinlock = &port->port_lock_dl;
+ else
+ spinlock = &port->port_lock_ul;
+
+ spin_lock_irqsave(spinlock, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(spinlock, flags);
+ return -EBUSY;
+ }
+
+ if (in) {
+ ep = port->port_usb->in;
+ idle = &port->data_ch.tx_idle;
+ queue_size = bam_mux_tx_q_size;
+ ep_complete = gbam_epin_complete;
+ } else {
+ ep = port->port_usb->out;
+ if (!ep)
+ goto out;
+ idle = &port->data_ch.rx_idle;
+ queue_size = bam_mux_rx_q_size;
+ ep_complete = gbam_epout_complete;
+ }
+
+ ret = gbam_alloc_requests(ep, idle, queue_size, ep_complete,
+ GFP_ATOMIC);
+out:
+ spin_unlock_irqrestore(spinlock, flags);
+ if (ret)
+ pr_err("%s: allocation failed\n", __func__);
+
+ return ret;
+}
+
+static void gbam_start_io(struct gbam_port *port)
+{
+ unsigned long flags;
+
+ pr_debug("%s: port:%pK\n", __func__, port);
+
+ if (_gbam_start_io(port, true))
+ return;
+
+ if (_gbam_start_io(port, false)) {
+ spin_lock_irqsave(&port->port_lock_dl, flags);
+ if (port->port_usb)
+ gbam_free_requests(port->port_usb->in,
+ &port->data_ch.tx_idle);
+ spin_unlock_irqrestore(&port->port_lock_dl, flags);
+ return;
+ }
+
+ /* queue out requests */
+ gbam_start_rx(port);
+}
+
+static void gbam_notify(void *p, int event, unsigned long data)
+{
+ struct gbam_port *port = p;
+ struct bam_ch_info *d;
+ struct sk_buff *skb;
+
+ if (port == NULL)
+ pr_err("BAM DMUX notifying after channel close\n");
+
+ switch (event) {
+ case BAM_DMUX_RECEIVE:
+ skb = (struct sk_buff *)data;
+ if (port)
+ gbam_data_recv_cb(p, skb);
+ else
+ dev_kfree_skb_any(skb);
+ break;
+ case BAM_DMUX_WRITE_DONE:
+ skb = (struct sk_buff *)data;
+ if (port)
+ gbam_data_write_done(p, skb);
+ else
+ dev_kfree_skb_any(skb);
+ break;
+ case BAM_DMUX_TRANSMIT_SIZE:
+ d = &port->data_ch;
+ if (test_bit(BAM_CH_OPENED, &d->flags))
+ pr_warn("%s, BAM channel opened already\n", __func__);
+ bam_mux_rx_req_size = data;
+ pr_debug("%s rx_req_size: %lu\n", __func__,
+ bam_mux_rx_req_size);
+ break;
+ }
+}
+
+static void gbam_free_rx_buffers(struct gbam_port *port)
+{
+ struct sk_buff *skb;
+ unsigned long flags;
+ struct bam_ch_info *d;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+
+ if (!port->port_usb || !port->port_usb->out)
+ goto free_rx_buf_out;
+
+ d = &port->data_ch;
+ gbam_free_requests(port->port_usb->out, &d->rx_idle);
+
+ while ((skb = __skb_dequeue(&d->rx_skb_q)))
+ dev_kfree_skb_any(skb);
+
+ gbam_free_rx_skb_idle_list(port);
+
+free_rx_buf_out:
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+}
+
+static void gbam_free_tx_buffers(struct gbam_port *port)
+{
+ struct sk_buff *skb;
+ unsigned long flags;
+ struct bam_ch_info *d;
+
+ spin_lock_irqsave(&port->port_lock_dl, flags);
+
+ if (!port->port_usb)
+ goto free_tx_buf_out;
+
+ d = &port->data_ch;
+ gbam_free_requests(port->port_usb->in, &d->tx_idle);
+
+ while ((skb = __skb_dequeue(&d->tx_skb_q)))
+ dev_kfree_skb_any(skb);
+
+free_tx_buf_out:
+ spin_unlock_irqrestore(&port->port_lock_dl, flags);
+}
+
+static void gbam_free_buffers(struct gbam_port *port)
+{
+ gbam_free_rx_buffers(port);
+ gbam_free_tx_buffers(port);
+}
+
+static void gbam_disconnect_work(struct work_struct *w)
+{
+ struct gbam_port *port =
+ container_of(w, struct gbam_port, disconnect_w);
+ struct bam_ch_info *d = &port->data_ch;
+
+ if (!test_bit(BAM_CH_OPENED, &d->flags)) {
+ pr_err("%s: Bam channel is not opened\n", __func__);
+ goto exit;
+ }
+
+ msm_bam_dmux_close(d->id);
+ clear_bit(BAM_CH_OPENED, &d->flags);
+exit:
+ return;
+}
+
+static void gbam_connect_work(struct work_struct *w)
+{
+ struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
+ struct bam_ch_info *d = &port->data_ch;
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ spin_lock(&port->port_lock_dl);
+ if (!port->port_usb) {
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ return;
+ }
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+
+ if (!test_bit(BAM_CH_READY, &d->flags)) {
+ pr_err("%s: Bam channel is not ready\n", __func__);
+ return;
+ }
+
+ ret = msm_bam_dmux_open(d->id, port, gbam_notify);
+ if (ret) {
+ pr_err("%s: unable open bam ch:%d err:%d\n",
+ __func__, d->id, ret);
+ return;
+ }
+
+ set_bit(BAM_CH_OPENED, &d->flags);
+
+ gbam_start_io(port);
+
+ pr_debug("%s: done\n", __func__);
+}
+
+static int gbam_sys_init(struct device *dev)
+{
+ int ret = 0;
+
+ if (!dev)
+ return -EINVAL;
+
+ device_create_file(dev, &dev_attr_bam_pending_pkts_limit);
+ device_create_file(dev, &dev_attr_bam_pending_bytes_limit);
+ device_create_file(dev, &dev_attr_bam_pending_bytes_fctrl_en_thold);
+ device_create_file(dev, &dev_attr_bam_mux_tx_pkt_drop_thld);
+ device_create_file(dev, &dev_attr_bam_mux_rx_fctrl_en_thld);
+ device_create_file(dev, &dev_attr_bam_mux_rx_fctrl_support);
+ device_create_file(dev, &dev_attr_bam_mux_rx_fctrl_dis_thld);
+ device_create_file(dev, &dev_attr_bam_mux_tx_q_size);
+ device_create_file(dev, &dev_attr_bam_mux_rx_q_size);
+ device_create_file(dev, &dev_attr_bam_mux_rx_req_size);
+ device_create_file(dev, &dev_attr_dl_intr_threshold);
+
+ return ret;
+
+}
+
+static void gbam_sys_remove(struct device *dev)
+{
+
+ device_remove_file(dev, &dev_attr_bam_pending_pkts_limit);
+ device_remove_file(dev, &dev_attr_bam_pending_bytes_limit);
+ device_remove_file(dev, &dev_attr_bam_pending_bytes_fctrl_en_thold);
+ device_remove_file(dev, &dev_attr_bam_mux_tx_pkt_drop_thld);
+ device_remove_file(dev, &dev_attr_bam_mux_rx_fctrl_en_thld);
+ device_remove_file(dev, &dev_attr_bam_mux_rx_fctrl_support);
+ device_remove_file(dev, &dev_attr_bam_mux_rx_fctrl_dis_thld);
+ device_remove_file(dev, &dev_attr_bam_mux_tx_q_size);
+ device_remove_file(dev, &dev_attr_bam_mux_rx_q_size);
+ device_remove_file(dev, &dev_attr_bam_mux_rx_req_size);
+ device_remove_file(dev, &dev_attr_dl_intr_threshold);
+
+}
+
+/* BAM data channel ready, allow attempt to open */
+static int gbam_data_ch_probe(struct platform_device *pdev)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+ int i;
+ unsigned long flags;
+ bool do_work = false;
+
+ pr_debug("%s: name:%s\n", __func__, pdev->name);
+
+ for (i = 0; i < BAM_DMUX_NUM_FUNCS; i++) {
+ port = bam_ports[i].port;
+ if (!port)
+ continue;
+
+ d = &port->data_ch;
+
+ if (!strcmp(bam_ch_names[i], pdev->name)) {
+ set_bit(BAM_CH_READY, &d->flags);
+
+ /* if usb is online, try opening bam_ch */
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ spin_lock(&port->port_lock_dl);
+ if (port->port_usb)
+ do_work = true;
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+
+ if (do_work)
+ queue_work(gbam_wq, &port->connect_w);
+ break;
+ }
+ }
+
+ gbam_sys_init(&pdev->dev);
+
+ return 0;
+}
+
+/* BAM data channel went inactive, so close it */
+static int gbam_data_ch_remove(struct platform_device *pdev)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+ struct usb_ep *ep_in = NULL;
+ struct usb_ep *ep_out = NULL;
+ unsigned long flags;
+ int i;
+
+ pr_debug("%s: name:%s\n", __func__, pdev->name);
+
+ for (i = 0; i < BAM_DMUX_NUM_FUNCS; i++) {
+ if (!strcmp(bam_ch_names[i], pdev->name)) {
+ port = bam_ports[i].port;
+ if (!port)
+ continue;
+
+ d = &port->data_ch;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ spin_lock(&port->port_lock_dl);
+ if (port->port_usb) {
+ ep_in = port->port_usb->in;
+ ep_out = port->port_usb->out;
+ }
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+
+ if (ep_in)
+ usb_ep_fifo_flush(ep_in);
+ if (ep_out)
+ usb_ep_fifo_flush(ep_out);
+
+ gbam_free_buffers(port);
+
+ msm_bam_dmux_close(d->id);
+
+ /* bam dmux will free all pending skbs */
+ d->pending_pkts_with_bam = 0;
+ d->pending_bytes_with_bam = 0;
+
+ clear_bit(BAM_CH_READY, &d->flags);
+ clear_bit(BAM_CH_OPENED, &d->flags);
+ }
+ }
+ gbam_sys_remove(&pdev->dev);
+
+ return 0;
+}
+
+static void gbam_port_free(enum bam_dmux_func_type func)
+{
+ struct gbam_port *port = bam_ports[func].port;
+ struct platform_driver *pdrv = &bam_ports[func].pdrv;
+
+ if (port) {
+ platform_driver_unregister(pdrv);
+
+ kfree(port);
+ bam_ports[func].port = NULL;
+ }
+}
+
+static int gbam_port_alloc(enum bam_dmux_func_type func)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+ struct platform_driver *pdrv;
+
+ port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ port->port_num = func;
+
+ /* port initialization */
+ spin_lock_init(&port->port_lock_ul);
+ spin_lock_init(&port->port_lock_dl);
+ spin_lock_init(&port->port_lock);
+ INIT_WORK(&port->connect_w, gbam_connect_work);
+ INIT_WORK(&port->disconnect_w, gbam_disconnect_work);
+
+ /* data ch */
+ d = &port->data_ch;
+ d->port = port;
+ INIT_LIST_HEAD(&d->tx_idle);
+ INIT_LIST_HEAD(&d->rx_idle);
+ INIT_WORK(&d->write_tobam_w, gbam_data_write_tobam);
+ INIT_WORK(&d->write_tohost_w, gbam_write_data_tohost_w);
+ skb_queue_head_init(&d->tx_skb_q);
+ skb_queue_head_init(&d->rx_skb_q);
+ skb_queue_head_init(&d->rx_skb_idle);
+ d->id = bam_ch_ids[func];
+
+ bam_ports[func].port = port;
+
+ scnprintf(bam_ch_names[func], BAM_DMUX_CH_NAME_MAX_LEN,
+ "bam_dmux_ch_%d", bam_ch_ids[func]);
+ pdrv = &bam_ports[func].pdrv;
+ pdrv->probe = gbam_data_ch_probe;
+ pdrv->remove = gbam_data_ch_remove;
+ pdrv->driver.name = bam_ch_names[func];
+ pdrv->driver.owner = THIS_MODULE;
+
+ platform_driver_register(pdrv);
+ pr_debug("%s: port:%pK portno:%d\n", __func__, port, func);
+
+ return 0;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+#define DEBUG_BUF_SIZE 1024
+static ssize_t gbam_read_stats(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+ char *buf;
+ unsigned long flags;
+ int ret;
+ int i;
+ int temp = 0;
+
+ buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (i = 0; i < BAM_DMUX_NUM_FUNCS; i++) {
+ port = bam_ports[i].port;
+ if (!port)
+ continue;
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ spin_lock(&port->port_lock_dl);
+
+ d = &port->data_ch;
+
+ temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
+ "#PORT:%d port:%pK data_ch:%pK#\n"
+ "dpkts_to_usbhost: %lu\n"
+ "dpkts_to_modem: %lu\n"
+ "dpkts_pwith_bam: %u\n"
+ "dbytes_pwith_bam: %u\n"
+ "to_usbhost_dcnt: %u\n"
+ "tomodem__dcnt: %u\n"
+ "rx_flow_control_disable_count: %u\n"
+ "rx_flow_control_enable_count: %u\n"
+ "rx_flow_control_triggered: %u\n"
+ "max_num_pkts_pending_with_bam: %u\n"
+ "max_bytes_pending_with_bam: %u\n"
+ "delayed_bam_mux_write_done: %u\n"
+ "tx_buf_len: %u\n"
+ "rx_buf_len: %u\n"
+ "data_ch_open: %d\n"
+ "data_ch_ready: %d\n"
+ "skb_expand_cnt: %lu\n",
+ i, port, &port->data_ch,
+ d->to_host, d->to_modem,
+ d->pending_pkts_with_bam,
+ d->pending_bytes_with_bam,
+ d->tohost_drp_cnt, d->tomodem_drp_cnt,
+ d->rx_flow_control_disable,
+ d->rx_flow_control_enable,
+ d->rx_flow_control_triggered,
+ d->max_num_pkts_pending_with_bam,
+ d->max_bytes_pending_with_bam,
+ d->delayed_bam_mux_write_done,
+ d->tx_skb_q.qlen, d->rx_skb_q.qlen,
+ test_bit(BAM_CH_OPENED, &d->flags),
+ test_bit(BAM_CH_READY, &d->flags),
+ d->skb_expand_cnt);
+
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ }
+
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
+
+ kfree(buf);
+
+ return ret;
+}
+
+static ssize_t gbam_reset_stats(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+ int i;
+ unsigned long flags;
+
+ for (i = 0; i < BAM_DMUX_NUM_FUNCS; i++) {
+ port = bam_ports[i].port;
+ if (!port)
+ continue;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags);
+ spin_lock(&port->port_lock_dl);
+
+ d = &port->data_ch;
+
+ d->to_host = 0;
+ d->to_modem = 0;
+ d->pending_pkts_with_bam = 0;
+ d->pending_bytes_with_bam = 0;
+ d->tohost_drp_cnt = 0;
+ d->tomodem_drp_cnt = 0;
+ d->rx_flow_control_disable = 0;
+ d->rx_flow_control_enable = 0;
+ d->rx_flow_control_triggered = 0;
+ d->max_num_pkts_pending_with_bam = 0;
+ d->max_bytes_pending_with_bam = 0;
+ d->delayed_bam_mux_write_done = 0;
+ d->skb_expand_cnt = 0;
+
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags);
+ }
+ return count;
+}
+
+static const struct file_operations gbam_stats_ops = {
+ .read = gbam_read_stats,
+ .write = gbam_reset_stats,
+};
+
+static struct dentry *gbam_dent;
+static void gbam_debugfs_init(void)
+{
+ struct dentry *dfile;
+
+ if (gbam_dent)
+ return;
+
+ gbam_dent = debugfs_create_dir("usb_rmnet", NULL);
+ if (!gbam_dent || IS_ERR(gbam_dent))
+ return;
+
+ dfile = debugfs_create_file("status", 0444, gbam_dent, NULL,
+ &gbam_stats_ops);
+ if (!dfile || IS_ERR(dfile)) {
+ debugfs_remove(gbam_dent);
+ gbam_dent = NULL;
+ return;
+ }
+}
+static void gbam_debugfs_remove(void)
+{
+ if (!gbam_dent)
+ return;
+
+ debugfs_remove_recursive(gbam_dent);
+ debugfs_remove(gbam_dent);
+ gbam_dent = NULL;
+}
+#else
+static inline void gbam_debugfs_init(void) {}
+static inline void gbam_debugfs_remove(void) {}
+#endif
+
+void gbam_disconnect(struct data_port *gr, enum bam_dmux_func_type func)
+{
+ struct gbam_port *port;
+ unsigned long flags, flags_ul;
+ struct bam_ch_info *d;
+
+ pr_debug("%s: grmnet:%pK port#%d\n", __func__, gr, func);
+
+ if (func >= BAM_DMUX_NUM_FUNCS) {
+ pr_err("%s: invalid bam portno#%d\n", __func__, func);
+ return;
+ }
+
+ if (!gr) {
+ pr_err("%s: grmnet port is null\n", __func__);
+ return;
+ }
+ port = bam_ports[func].port;
+
+ if (!port) {
+ pr_err("%s: NULL port\n", __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ d = &port->data_ch;
+ /* Already disconnected due to suspend with remote wake disabled */
+ if (port->last_event == U_BAM_DISCONNECT_E) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ port->port_usb = gr;
+
+ gbam_free_buffers(port);
+
+ spin_lock_irqsave(&port->port_lock_ul, flags_ul);
+ spin_lock(&port->port_lock_dl);
+ port->port_usb = NULL;
+ n_tx_req_queued = 0;
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags_ul);
+
+ usb_ep_disable(gr->in);
+ /* disable endpoints */
+ if (gr->out)
+ usb_ep_disable(gr->out);
+
+ gr->in->driver_data = NULL;
+ if (gr->out)
+ gr->out->driver_data = NULL;
+
+ port->last_event = U_BAM_DISCONNECT_E;
+ queue_work(gbam_wq, &port->disconnect_w);
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+int gbam_connect(struct data_port *gr, enum bam_dmux_func_type func)
+{
+ struct gbam_port *port;
+ struct bam_ch_info *d;
+ int ret;
+ unsigned long flags, flags_ul;
+
+ pr_debug("%s: grmnet:%pK port#%d\n", __func__, gr, func);
+
+ if (!gr) {
+ pr_err("%s: grmnet port is null\n", __func__);
+ return -ENODEV;
+ }
+
+ if (!gr->cdev->gadget) {
+ pr_err("%s: gadget handle not passed\n", __func__);
+ return -EINVAL;
+ }
+
+ if (func >= BAM_DMUX_NUM_FUNCS) {
+ pr_err("%s: invalid portno#%d\n", __func__, func);
+ return -ENODEV;
+ }
+
+ port = bam_ports[func].port;
+
+ if (!port) {
+ pr_err("%s: NULL port\n", __func__);
+ return -ENODEV;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ d = &port->data_ch;
+
+ spin_lock_irqsave(&port->port_lock_ul, flags_ul);
+ spin_lock(&port->port_lock_dl);
+ port->port_usb = gr;
+ port->gadget = port->port_usb->cdev->gadget;
+
+ d->to_host = 0;
+ d->to_modem = 0;
+ d->pending_pkts_with_bam = 0;
+ d->pending_bytes_with_bam = 0;
+ d->tohost_drp_cnt = 0;
+ d->tomodem_drp_cnt = 0;
+ d->rx_flow_control_disable = 0;
+ d->rx_flow_control_enable = 0;
+ d->rx_flow_control_triggered = 0;
+ d->max_num_pkts_pending_with_bam = 0;
+ d->max_bytes_pending_with_bam = 0;
+ d->delayed_bam_mux_write_done = 0;
+
+ spin_unlock(&port->port_lock_dl);
+ spin_unlock_irqrestore(&port->port_lock_ul, flags_ul);
+
+ ret = usb_ep_enable(gr->in);
+ if (ret) {
+ pr_err("%s: usb_ep_enable failed eptype:IN ep:%pK\n",
+ __func__, gr->in);
+ goto exit;
+ }
+ gr->in->driver_data = port;
+
+ /*
+ * DPL traffic is routed through BAM-DMUX on some targets.
+ * DPL function has only 1 IN endpoint. Add out endpoint
+ * checks for BAM-DMUX transport.
+ */
+ if (gr->out) {
+ ret = usb_ep_enable(gr->out);
+ if (ret) {
+ pr_err("%s: usb_ep_enable failed eptype:OUT ep:%pK\n",
+ __func__, gr->out);
+ gr->in->driver_data = NULL;
+ usb_ep_disable(gr->in);
+ goto exit;
+ }
+ gr->out->driver_data = port;
+ }
+
+ port->last_event = U_BAM_CONNECT_E;
+ queue_work(gbam_wq, &port->connect_w);
+
+ ret = 0;
+exit:
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return ret;
+}
+
+int gbam_setup(enum bam_dmux_func_type func)
+{
+ int ret;
+
+ pr_debug("%s: requested BAM port:%d\n", __func__, func);
+
+ if (func >= BAM_DMUX_NUM_FUNCS) {
+ pr_err("%s: Invalid num of ports count:%d\n", __func__, func);
+ return -EINVAL;
+ }
+
+ if (!gbam_wq) {
+ gbam_wq = alloc_workqueue("k_gbam", WQ_UNBOUND |
+ WQ_MEM_RECLAIM, 1);
+ if (!gbam_wq) {
+ pr_err("%s: Unable to create workqueue gbam_wq\n",
+ __func__);
+ return -ENOMEM;
+ }
+ }
+
+ ret = gbam_port_alloc(func);
+ if (ret) {
+ pr_err("%s: Unable to alloc port:%d\n", __func__, func);
+ goto destroy_wq;
+ }
+
+ gbam_debugfs_init();
+
+ return 0;
+
+destroy_wq:
+ destroy_workqueue(gbam_wq);
+
+ return ret;
+}
+
+void gbam_cleanup(enum bam_dmux_func_type func)
+{
+ gbam_debugfs_remove();
+ gbam_port_free(func);
+}
+
+int gbam_mbim_connect(struct usb_gadget *g, struct usb_ep *in,
+ struct usb_ep *out)
+{
+ struct data_port *gr;
+
+ gr = kzalloc(sizeof(*gr), GFP_ATOMIC);
+ if (!gr)
+ return -ENOMEM;
+ gr->in = in;
+ gr->out = out;
+ gr->cdev->gadget = g;
+
+ return gbam_connect(gr, BAM_DMUX_FUNC_MBIM);
+}
+
+void gbam_mbim_disconnect(void)
+{
+ struct gbam_port *port = bam_ports[BAM_DMUX_FUNC_MBIM].port;
+ struct data_port *gr = port->port_usb;
+
+ if (!gr) {
+ pr_err("%s: port_usb is NULL\n", __func__);
+ return;
+ }
+
+ gbam_disconnect(gr, BAM_DMUX_FUNC_MBIM);
+ kfree(gr);
+}
+
+int gbam_mbim_setup(void)
+{
+ int ret = 0;
+
+ if (!bam_ports[BAM_DMUX_FUNC_RMNET].port)
+ ret = gbam_setup(BAM_DMUX_FUNC_MBIM);
+
+ return ret;
+}
diff --git a/drivers/usb/gadget/function/u_rmnet.h b/drivers/usb/gadget/function/u_rmnet.h
index f207110..66bef35 100644
--- a/drivers/usb/gadget/function/u_rmnet.h
+++ b/drivers/usb/gadget/function/u_rmnet.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2011-2017, 2020, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2017, 2020-2021, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -23,7 +23,7 @@
enum bam_dmux_func_type {
BAM_DMUX_FUNC_RMNET,
- BAM_DMUX_FUNC_MBIM,
+ BAM_DMUX_FUNC_MBIM = 0,
BAM_DMUX_FUNC_DPL,
BAM_DMUX_NUM_FUNCS,
};
@@ -76,28 +76,15 @@
NR_XPORT_TYPES
};
-static inline int gbam_setup(enum bam_dmux_func_type func)
-{
- return 0;
-}
-
-static inline void gbam_cleanup(enum bam_dmux_func_type func)
-{
-}
-
-static inline int gbam_connect(struct data_port *gr,
- enum bam_dmux_func_type func)
-{
- return 0;
-}
-
-static inline void gbam_disconnect(struct data_port *gr,
- enum bam_dmux_func_type func)
-{
-}
+int gbam_connect(struct data_port *gr, enum bam_dmux_func_type func);
+void gbam_disconnect(struct data_port *gr, enum bam_dmux_func_type func);
+void gbam_cleanup(enum bam_dmux_func_type func);
+int gbam_setup(enum bam_dmux_func_type func);
+int gbam_mbim_connect(struct usb_gadget *g, struct usb_ep *in,
+ struct usb_ep *out);
int gbam_mbim_connect(struct usb_gadget *g, struct usb_ep *in,
- struct usb_ep *out);
+ struct usb_ep *out);
void gbam_mbim_disconnect(void);
int gbam_mbim_setup(void);
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 4e944c0..538d56e 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -1213,6 +1213,10 @@
INIT_WORK(&gadget->work, usb_gadget_state_work);
gadget->dev.parent = parent;
+ dma_set_coherent_mask(&gadget->dev, parent->coherent_dma_mask);
+ gadget->dev.dma_parms = parent->dma_parms;
+ gadget->dev.dma_mask = parent->dma_mask;
+
if (release)
gadget->dev.release = release;
else
diff --git a/gen_headers_arm.bp b/gen_headers_arm.bp
index b5a546f..e867834 100644
--- a/gen_headers_arm.bp
+++ b/gen_headers_arm.bp
@@ -148,6 +148,7 @@
"linux/b1lli.h",
"linux/batadv_packet.h",
"linux/batman_adv.h",
+ "linux/batterydata-interface.h",
"linux/baycom.h",
"linux/bcache.h",
"linux/bcm933xx_hcs.h",
@@ -623,6 +624,7 @@
"linux/virtio_scsi.h",
"linux/virtio_types.h",
"linux/virtio_vsock.h",
+ "linux/vm_bms.h",
"linux/vm_sockets.h",
"linux/vm_sockets_diag.h",
"linux/vmcore.h",
diff --git a/gen_headers_arm64.bp b/gen_headers_arm64.bp
index 17616f3..8b884b3 100644
--- a/gen_headers_arm64.bp
+++ b/gen_headers_arm64.bp
@@ -143,6 +143,7 @@
"linux/b1lli.h",
"linux/batadv_packet.h",
"linux/batman_adv.h",
+ "linux/batterydata-interface.h",
"linux/baycom.h",
"linux/bcache.h",
"linux/bcm933xx_hcs.h",
@@ -617,6 +618,7 @@
"linux/virtio_scsi.h",
"linux/virtio_types.h",
"linux/virtio_vsock.h",
+ "linux/vm_bms.h",
"linux/vm_sockets.h",
"linux/vm_sockets_diag.h",
"linux/vmcore.h",
diff --git a/include/linux/batterydata-interface.h b/include/linux/batterydata-interface.h
new file mode 100644
index 0000000..aa3acd26
--- /dev/null
+++ b/include/linux/batterydata-interface.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2014-2015, 2018, 2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <uapi/linux/batterydata-interface.h>
+
+int config_battery_data(struct bms_battery_data *profile);
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
index babac56..e3dcf04 100644
--- a/include/linux/diagchar.h
+++ b/include/linux/diagchar.h
@@ -922,7 +922,7 @@
/* LOG CODES */
static const uint32_t log_code_last_tbl[] = {
0x0, /* EQUIP ID 0 */
- 0x1D1E, /* EQUIP ID 1 */
+ 0x1D29, /* EQUIP ID 1 */
0x0, /* EQUIP ID 2 */
0x0, /* EQUIP ID 3 */
0x4910, /* EQUIP ID 4 */
diff --git a/include/uapi/linux/batterydata-interface.h b/include/uapi/linux/batterydata-interface.h
new file mode 100644
index 0000000..07cd900
--- /dev/null
+++ b/include/uapi/linux/batterydata-interface.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+#ifndef __BATTERYDATA_LIB_H__
+#define __BATTERYDATA_LIB_H__
+
+#include <linux/ioctl.h>
+
+/**
+ * struct battery_params - Battery profile data to be exchanged.
+ * @soc: SOC (state of charge) of the battery
+ * @ocv_uv: OCV (open circuit voltage) of the battery
+ * @rbatt_sf: RBATT scaling factor
+ * @batt_temp: Battery temperature in deci-degree.
+ * @slope: Slope of the OCV-SOC curve.
+ * @fcc_mah: FCC (full charge capacity) of the battery.
+ */
+struct battery_params {
+ int soc;
+ int ocv_uv;
+ int rbatt_sf;
+ int batt_temp;
+ int slope;
+ int fcc_mah;
+};
+
+/* IOCTLs to query battery profile data */
+#define BPIOCXSOC _IOWR('B', 0x01, struct battery_params) /* SOC */
+#define BPIOCXRBATT _IOWR('B', 0x02, struct battery_params) /* RBATT SF */
+#define BPIOCXSLOPE _IOWR('B', 0x03, struct battery_params) /* SLOPE */
+#define BPIOCXFCC _IOWR('B', 0x04, struct battery_params) /* FCC */
+
+#endif
diff --git a/include/uapi/linux/vm_bms.h b/include/uapi/linux/vm_bms.h
new file mode 100644
index 0000000..db537f4
--- /dev/null
+++ b/include/uapi/linux/vm_bms.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+#ifndef __VM_BMS_H__
+#define __VM_BMS_H__
+
+#define VM_BMS_DEVICE "/dev/vm_bms"
+#define MAX_FIFO_REGS 8
+
+/**
+ * struct qpnp_vm_bms_data - vm-bms data (passed to usersapce)
+ * @data_type: type of data filled up
+ * @num_fifo: count of valid fifo averages
+ * @fifo_uv: array of fifo averages in uv
+ * @sample_interval sample interval of the fifo data in ms
+ * @sample_count total samples in one fifo
+ * @acc_uv averaged accumulator value in uv
+ * @acc_count num of accumulated samples
+ * @seq_num sequence number of the data
+ */
+struct qpnp_vm_bms_data {
+ unsigned int num_fifo;
+ unsigned int fifo_uv[MAX_FIFO_REGS];
+ unsigned int sample_interval_ms;
+ unsigned int sample_count;
+ unsigned int acc_uv;
+ unsigned int acc_count;
+ unsigned int seq_num;
+};
+
+enum vmbms_power_usecase {
+ VMBMS_IGNORE_ALL_BIT = 1,
+ VMBMS_VOICE_CALL_BIT = (1 << 4),
+ VMBMS_STATIC_DISPLAY_BIT = (1 << 5),
+};
+
+#endif /* __VM_BMS_H__ */
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index b42e72c..8246b65 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -580,10 +580,12 @@
u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr;
u32 delta_us;
- if (!delta)
- delta = 1;
- delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
- tcp_rcv_rtt_update(tp, delta_us, 0);
+ if (likely(delta < INT_MAX / (USEC_PER_SEC / TCP_TS_HZ))) {
+ if (!delta)
+ delta = 1;
+ delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
+ tcp_rcv_rtt_update(tp, delta_us, 0);
+ }
}
}
@@ -2929,9 +2931,11 @@
if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
flag & FLAG_ACKED) {
u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr;
- u32 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
- seq_rtt_us = ca_rtt_us = delta_us;
+ if (likely(delta < INT_MAX / (USEC_PER_SEC / TCP_TS_HZ))) {
+ seq_rtt_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
+ ca_rtt_us = seq_rtt_us;
+ }
}
rs->rtt_us = ca_rtt_us; /* RTT of last (S)ACKed packet (or -1) */
if (seq_rtt_us < 0)
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index e2a2e13..5347e80 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2957,13 +2957,12 @@
#endif
TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
tp->retrans_out += tcp_skb_pcount(skb);
-
- /* Save stamp of the first retransmit. */
- if (!tp->retrans_stamp)
- tp->retrans_stamp = tcp_skb_timestamp(skb);
-
}
+ /* Save stamp of the first (attempted) retransmit. */
+ if (!tp->retrans_stamp)
+ tp->retrans_stamp = tcp_skb_timestamp(skb);
+
if (tp->undo_retrans < 0)
tp->undo_retrans = 0;
tp->undo_retrans += tcp_skb_pcount(skb);
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index dda4e53..5c16564 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -22,33 +22,21 @@
#include <linux/gfp.h>
#include <net/tcp.h>
-static u32 tcp_retransmit_stamp(const struct sock *sk)
-{
- u32 start_ts = tcp_sk(sk)->retrans_stamp;
-
- if (unlikely(!start_ts)) {
- struct sk_buff *head = tcp_rtx_queue_head(sk);
-
- if (!head)
- return 0;
- start_ts = tcp_skb_timestamp(head);
- }
- return start_ts;
-}
-
static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
u32 elapsed, start_ts;
+ s32 remaining;
- start_ts = tcp_retransmit_stamp(sk);
- if (!icsk->icsk_user_timeout || !start_ts)
+ start_ts = tcp_sk(sk)->retrans_stamp;
+ if (!icsk->icsk_user_timeout)
return icsk->icsk_rto;
elapsed = tcp_time_stamp(tcp_sk(sk)) - start_ts;
- if (elapsed >= icsk->icsk_user_timeout)
+ remaining = icsk->icsk_user_timeout - elapsed;
+ if (remaining <= 0)
return 1; /* user timeout has passed; fire ASAP */
- else
- return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(icsk->icsk_user_timeout - elapsed));
+
+ return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining));
}
static void set_tcp_default(void)
@@ -204,7 +192,20 @@
tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
}
+static unsigned int tcp_model_timeout(struct sock *sk,
+ unsigned int boundary,
+ unsigned int rto_base)
+{
+ unsigned int linear_backoff_thresh, timeout;
+ linear_backoff_thresh = ilog2(TCP_RTO_MAX / rto_base);
+ if (boundary <= linear_backoff_thresh)
+ timeout = ((2 << boundary) - 1) * rto_base;
+ else
+ timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
+ (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
+ return jiffies_to_msecs(timeout);
+}
/**
* retransmits_timed_out() - returns true if this connection has timed out
* @sk: The current socket
@@ -222,27 +223,21 @@
unsigned int boundary,
unsigned int timeout)
{
- const unsigned int rto_base = TCP_RTO_MIN;
- unsigned int linear_backoff_thresh, start_ts;
+ unsigned int start_ts;
if (!inet_csk(sk)->icsk_retransmits)
return false;
- start_ts = tcp_retransmit_stamp(sk);
- if (!start_ts)
- return false;
-
+ start_ts = tcp_sk(sk)->retrans_stamp;
if (likely(timeout == 0)) {
- linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
+ unsigned int rto_base = TCP_RTO_MIN;
- if (boundary <= linear_backoff_thresh)
- timeout = ((2 << boundary) - 1) * rto_base;
- else
- timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
- (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
- timeout = jiffies_to_msecs(timeout);
+ if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
+ rto_base = tcp_timeout_init(sk);
+ timeout = tcp_model_timeout(sk, boundary, rto_base);
}
- return (tcp_time_stamp(tcp_sk(sk)) - start_ts) >= timeout;
+
+ return (s32)(tcp_time_stamp(tcp_sk(sk)) - start_ts - timeout) >= 0;
}
/* A write timeout has occurred. Process the after effects. */
@@ -541,14 +536,13 @@
tcp_enter_loss(sk);
+ icsk->icsk_retransmits++;
if (tcp_retransmit_skb(sk, tcp_rtx_queue_head(sk), 1) > 0) {
/* Retransmission failed because of local congestion,
- * do not backoff.
+ * Let senders fight for local resources conservatively.
*/
- if (!icsk->icsk_retransmits)
- icsk->icsk_retransmits = 1;
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
- min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL),
+ TCP_RESOURCE_PROBE_INTERVAL,
TCP_RTO_MAX);
goto out;
}
@@ -569,7 +563,6 @@
* the 120 second clamps though!
*/
icsk->icsk_backoff++;
- icsk->icsk_retransmits++;
out_reset_timer:
/* If stream is thin, use linear timeouts. Since 'icsk_backoff' is