Merge "diag: Add missing SSID range" into msm-3.4
diff --git a/arch/arm/mach-msm/board-8064-pmic.c b/arch/arm/mach-msm/board-8064-pmic.c
index fc886ed..43a79b5 100644
--- a/arch/arm/mach-msm/board-8064-pmic.c
+++ b/arch/arm/mach-msm/board-8064-pmic.c
@@ -347,6 +347,7 @@
.update_time = 60000,
.max_voltage = MAX_VOLTAGE_MV,
.min_voltage = 3200,
+ .uvd_thresh_voltage = 4050,
.resume_voltage_delta = 100,
.term_current = 100,
.cool_temp = 10,
diff --git a/arch/arm/mach-msm/board-8064-storage.c b/arch/arm/mach-msm/board-8064-storage.c
index a33b62b..13d8b3b 100644
--- a/arch/arm/mach-msm/board-8064-storage.c
+++ b/arch/arm/mach-msm/board-8064-storage.c
@@ -383,6 +383,16 @@
apq8064_sdc3_pdata->status_irq = 0;
}
}
+ if (machine_is_apq8064_cdp()) {
+ int i;
+
+ for (i = 0;
+ i < apq8064_sdc3_pdata->pin_data->pad_data->\
+ drv->size;
+ i++)
+ apq8064_sdc3_pdata->pin_data->pad_data->\
+ drv->on[i].val = GPIO_CFG_10MA;
+ }
apq8064_add_sdcc(3, apq8064_sdc3_pdata);
}
diff --git a/arch/arm/mach-msm/board-8064.c b/arch/arm/mach-msm/board-8064.c
index f884631..08c3408 100644
--- a/arch/arm/mach-msm/board-8064.c
+++ b/arch/arm/mach-msm/board-8064.c
@@ -124,6 +124,10 @@
#define QFPROM_RAW_FEAT_CONFIG_ROW0_MSB (MSM_QFPROM_BASE + 0x23c)
#define QFPROM_RAW_OEM_CONFIG_ROW0_LSB (MSM_QFPROM_BASE + 0x220)
+/* PCIE AXI address space */
+#define PCIE_AXI_BAR_PHYS 0x08000000
+#define PCIE_AXI_BAR_SIZE SZ_128M
+
/* PCIe power enable pmic gpio */
#define PCIE_PWR_EN_PMIC_GPIO 13
#define PCIE_RST_N_PMIC_MPP 1
@@ -2063,6 +2067,8 @@
static struct msm_pcie_platform msm_pcie_platform_data = {
.gpio = msm_pcie_gpio_info,
+ .axi_addr = PCIE_AXI_BAR_PHYS,
+ .axi_size = PCIE_AXI_BAR_SIZE,
};
static int __init mpq8064_pcie_enabled(void)
diff --git a/arch/arm/mach-msm/board-8930-gpiomux.c b/arch/arm/mach-msm/board-8930-gpiomux.c
index 000f080..e0f012a 100644
--- a/arch/arm/mach-msm/board-8930-gpiomux.c
+++ b/arch/arm/mach-msm/board-8930-gpiomux.c
@@ -250,6 +250,28 @@
.drv = GPIOMUX_DRV_2MA,
.pull = GPIOMUX_PULL_DOWN,
};
+
+static struct gpiomux_setting hdmi_active_3_cfg = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_2MA,
+ .pull = GPIOMUX_PULL_UP,
+ .dir = GPIOMUX_IN,
+};
+
+static struct gpiomux_setting hdmi_active_4_cfg = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_2MA,
+ .pull = GPIOMUX_PULL_UP,
+ .dir = GPIOMUX_OUT_HIGH,
+};
+
+static struct gpiomux_setting hdmi_active_5_cfg = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_2MA,
+ .pull = GPIOMUX_PULL_UP,
+ .dir = GPIOMUX_OUT_HIGH,
+};
+
#endif
#if defined(CONFIG_KS8851) || defined(CONFIG_KS8851_MODULE)
@@ -593,6 +615,32 @@
[GPIOMUX_SUSPENDED] = &hdmi_suspend_cfg,
},
},
+
+};
+
+static struct msm_gpiomux_config msm8930_mhl_configs[] __initdata = {
+ {
+ .gpio = 72,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &hdmi_active_3_cfg,
+ [GPIOMUX_SUSPENDED] = &hdmi_suspend_cfg,
+ },
+ },
+ {
+ .gpio = 71,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &hdmi_active_4_cfg,
+ [GPIOMUX_SUSPENDED] = &hdmi_suspend_cfg,
+ },
+ },
+ {
+ .gpio = 73,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &hdmi_active_5_cfg,
+ [GPIOMUX_SUSPENDED] = &hdmi_suspend_cfg,
+ },
+ },
+
};
#endif
@@ -699,6 +747,9 @@
#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL
msm_gpiomux_install(msm8960_hdmi_configs,
ARRAY_SIZE(msm8960_hdmi_configs));
+ if (machine_is_msm8930_fluid())
+ msm_gpiomux_install(msm8930_mhl_configs,
+ ARRAY_SIZE(msm8930_mhl_configs));
#endif
msm_gpiomux_install(msm8960_mdp_vsync_configs,
diff --git a/arch/arm/mach-msm/board-8930-gpu.c b/arch/arm/mach-msm/board-8930-gpu.c
index ec80be9..632fc9a 100644
--- a/arch/arm/mach-msm/board-8930-gpu.c
+++ b/arch/arm/mach-msm/board-8930-gpu.c
@@ -116,7 +116,7 @@
static struct kgsl_device_platform_data kgsl_3d0_pdata = {
.pwrlevel = {
{
- .gpu_freq = 450000000,
+ .gpu_freq = 400000000,
.bus_freq = 3,
.io_fraction = 0,
},
diff --git a/arch/arm/mach-msm/board-8930-pmic.c b/arch/arm/mach-msm/board-8930-pmic.c
index 5abb9f8..a1a4b7c 100644
--- a/arch/arm/mach-msm/board-8930-pmic.c
+++ b/arch/arm/mach-msm/board-8930-pmic.c
@@ -95,6 +95,8 @@
PM8XXX_GPIO_INPUT(11, PM_GPIO_PULL_UP_30),
/* haptics gpio */
PM8XXX_GPIO_OUTPUT_FUNC(7, 0, PM_GPIO_FUNC_1),
+ /* MHL PWR EN */
+ PM8XXX_GPIO_OUTPUT_VIN(5, 1, PM_GPIO_VIN_VPH),
};
/* Initial pm8038 MPP configurations */
@@ -210,6 +212,7 @@
.update_time = 60000,
.max_voltage = MAX_VOLTAGE_MV,
.min_voltage = 3200,
+ .uvd_thresh_voltage = 4050,
.resume_voltage_delta = 100,
.term_current = 100,
.cool_temp = 10,
diff --git a/arch/arm/mach-msm/board-8930-regulator.c b/arch/arm/mach-msm/board-8930-regulator.c
index bc370ba..f06a1b7 100644
--- a/arch/arm/mach-msm/board-8930-regulator.c
+++ b/arch/arm/mach-msm/board-8930-regulator.c
@@ -90,6 +90,7 @@
REGULATOR_SUPPLY("CDC_VDDA_TX", "sitar1p1-slim"),
REGULATOR_SUPPLY("CDC_VDDA_RX", "sitar1p1-slim"),
REGULATOR_SUPPLY("vddp", "0-0048"),
+ REGULATOR_SUPPLY("mhl_iovcc18", "0-0039"),
};
VREG_CONSUMERS(L12) = {
REGULATOR_SUPPLY("8038_l12", NULL),
@@ -125,6 +126,7 @@
REGULATOR_SUPPLY("CDC_VDDA_A_1P2V", "sitar-slim"),
REGULATOR_SUPPLY("VDDD_CDC_D", "sitar1p1-slim"),
REGULATOR_SUPPLY("CDC_VDDA_A_1P2V", "sitar1p1-slim"),
+ REGULATOR_SUPPLY("mhl_avcc12", "0-0039"),
};
VREG_CONSUMERS(L21) = {
REGULATOR_SUPPLY("8038_l21", NULL),
@@ -194,6 +196,7 @@
VREG_CONSUMERS(EXT_5V) = {
REGULATOR_SUPPLY("ext_5v", NULL),
REGULATOR_SUPPLY("hdmi_mvs", "hdmi_msm.0"),
+ REGULATOR_SUPPLY("mhl_usb_hs_switch", "msm_otg"),
};
VREG_CONSUMERS(EXT_OTG_SW) = {
REGULATOR_SUPPLY("ext_otg_sw", NULL),
diff --git a/arch/arm/mach-msm/board-8930.c b/arch/arm/mach-msm/board-8930.c
index 1a61dbb..e695241 100644
--- a/arch/arm/mach-msm/board-8930.c
+++ b/arch/arm/mach-msm/board-8930.c
@@ -106,6 +106,11 @@
#define KS8851_IRQ_GPIO 90
#define HAP_SHIFT_LVL_OE_GPIO 47
+#define HDMI_MHL_MUX_GPIO 73
+#define MHL_GPIO_INT 72
+#define MHL_GPIO_RESET 71
+#define MHL_GPIO_PWR_EN 5
+
#if defined(CONFIG_GPIO_SX150X) || defined(CONFIG_GPIO_SX150X_MODULE)
struct sx150x_platform_data msm8930_sx150x_data[] = {
@@ -776,6 +781,8 @@
.cfilt2_mv = 1800,
.bias1_cfilt_sel = SITAR_CFILT1_SEL,
.bias2_cfilt_sel = SITAR_CFILT2_SEL,
+ .bias1_cap_mode = MICBIAS_EXT_BYP_CAP,
+ .bias2_cap_mode = MICBIAS_NO_EXT_BYP_CAP,
},
.regulator = {
{
@@ -840,6 +847,8 @@
.cfilt2_mv = 1800,
.bias1_cfilt_sel = SITAR_CFILT1_SEL,
.bias2_cfilt_sel = SITAR_CFILT2_SEL,
+ .bias1_cap_mode = MICBIAS_EXT_BYP_CAP,
+ .bias2_cap_mode = MICBIAS_NO_EXT_BYP_CAP,
},
.regulator = {
{
@@ -1732,7 +1741,7 @@
#define MXT_TS_GPIO_IRQ 11
#define MXT_TS_RESET_GPIO 52
-static const u8 mxt_config_data_8930[] = {
+static const u8 mxt_config_data_8930_v1[] = {
/* T6 Object */
0, 0, 0, 0, 0, 0,
/* T38 Object */
@@ -1777,6 +1786,43 @@
0, 0, 0, 0,
};
+static const u8 mxt_config_data_8930_v2[] = {
+ /* T6 Object */
+ 0, 0, 0, 0, 0, 0,
+ /* T38 Object */
+ 15, 4, 0, 9, 7, 12, 0, 0,
+ /* T7 Object */
+ 32, 16, 50,
+ /* T8 Object */
+ 30, 0, 5, 10, 0, 0, 10, 10, 0, 0,
+ /* T9 Object */
+ 131, 0, 0, 19, 11, 0, 16, 50, 1, 3,
+ 12, 7, 2, 0, 4, 5, 2, 10, 43, 4,
+ 54, 2, -25, 29, 38, 18, 143, 40, 207, 80,
+ 17, 5, 50, 50, 0,
+ /* T18 Object */
+ 0, 0,
+ /* T19 Object */
+ 0, 0, 0, 0, 0, 0,
+ /* T25 Object */
+ 0, 0, 0, 0, 0, 0,
+ /* T42 Object */
+ 3, 60, 20, 20, 150, 0, 0, 0,
+ /* T46 Object */
+ 0, 3, 28, 28, 0, 0, 1, 0, 0,
+ /* T47 Object */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* T48 Object */
+ 1, 3, 82, 0, 0, 0, 0, 0, 0, 0,
+ 16, 30, 0, 6, 6, 0, 0, 124, 4, 100,
+ 0, 0, 0, 5, 0, 42, 0, 1, 0, 40,
+ 52, 20, 0, 0, 0, 50, 1, 5, 2, 1,
+ 4, 5, 3, -25, 29, 38, 18, 143, 40, 207,
+ 80, 10, 5, 2,
+ /* T55 Object */
+ 0, 0, 0, 0,
+};
+
static ssize_t mxt224e_vkeys_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@@ -1824,12 +1870,33 @@
static struct mxt_config_info mxt_config_array[] = {
{
- .config = mxt_config_data_8930,
- .config_length = ARRAY_SIZE(mxt_config_data_8930),
+ .config = mxt_config_data_8930_v1,
+ .config_length = ARRAY_SIZE(mxt_config_data_8930_v1),
.family_id = 0x81,
.variant_id = 0x01,
.version = 0x10,
.build = 0xAA,
+ .bootldr_id = MXT_BOOTLOADER_ID_224E,
+ .fw_name = "atmel_8930_fluid_v2_0_AB.hex",
+ },
+ {
+ .config = mxt_config_data_8930_v2,
+ .config_length = ARRAY_SIZE(mxt_config_data_8930_v2),
+ .family_id = 0x81,
+ .variant_id = 0x15,
+ .version = 0x11,
+ .build = 0xAA,
+ .bootldr_id = MXT_BOOTLOADER_ID_224E,
+ .fw_name = "atmel_8930_fluid_v2_0_AB.hex",
+ },
+ {
+ .config = mxt_config_data_8930_v2,
+ .config_length = ARRAY_SIZE(mxt_config_data_8930_v2),
+ .family_id = 0x81,
+ .variant_id = 0x01,
+ .version = 0x20,
+ .build = 0xAB,
+ .bootldr_id = MXT_BOOTLOADER_ID_224E,
},
};
@@ -1861,6 +1928,28 @@
},
};
+#define MHL_POWER_GPIO PM8038_GPIO_PM_TO_SYS(MHL_GPIO_PWR_EN)
+static struct msm_mhl_platform_data mhl_platform_data = {
+ .irq = MSM_GPIO_TO_INT(MHL_GPIO_INT),
+ .gpio_mhl_int = MHL_GPIO_INT,
+ .gpio_mhl_reset = MHL_GPIO_RESET,
+ .gpio_mhl_power = MHL_POWER_GPIO,
+ .gpio_hdmi_mhl_mux = HDMI_MHL_MUX_GPIO,
+};
+
+static struct i2c_board_info sii_device_info[] __initdata = {
+ {
+ /*
+ * keeps SI 8334 as the default
+ * MHL TX
+ */
+ I2C_BOARD_INFO("sii8334", 0x39),
+ .platform_data = &mhl_platform_data,
+ .flags = I2C_CLIENT_WAKE,
+ },
+};
+
+
#ifdef MSM8930_PHASE_2
#define GPIO_VOLUME_UP PM8038_GPIO_PM_TO_SYS(3)
@@ -2395,6 +2484,12 @@
mxt_device_info_8930,
ARRAY_SIZE(mxt_device_info_8930),
},
+ {
+ I2C_SURF | I2C_FFA | I2C_LIQUID | I2C_FLUID,
+ MSM_8930_GSBI9_QUP_I2C_BUS_ID,
+ sii_device_info,
+ ARRAY_SIZE(sii_device_info),
+ },
};
#endif /* CONFIG_I2C */
diff --git a/arch/arm/mach-msm/board-8960-pmic.c b/arch/arm/mach-msm/board-8960-pmic.c
index 19564e9..17b0b6f 100644
--- a/arch/arm/mach-msm/board-8960-pmic.c
+++ b/arch/arm/mach-msm/board-8960-pmic.c
@@ -418,6 +418,7 @@
.update_time = 60000,
.max_voltage = MAX_VOLTAGE_MV,
.min_voltage = 3200,
+ .uvd_thresh_voltage = 4050,
.resume_voltage_delta = 100,
.term_current = 100,
.cool_temp = 10,
diff --git a/arch/arm/mach-msm/board-8960.c b/arch/arm/mach-msm/board-8960.c
index 251c1de..4721c94 100644
--- a/arch/arm/mach-msm/board-8960.c
+++ b/arch/arm/mach-msm/board-8960.c
@@ -2280,60 +2280,13 @@
},
};
-#ifdef CONFIG_FB_MSM_HDMI_MHL_8334
-static void mhl_sii_reset_gpio(int on)
-{
- gpio_set_value(MHL_GPIO_RESET, on);
- return;
-}
-
-/*
- * Request for GPIO allocations
- * Set appropriate GPIO directions
- */
-static int mhl_sii_gpio_setup(int on)
-{
- int ret;
-
- if (on) {
- ret = gpio_request(MHL_GPIO_RESET, "W_RST#");
- if (ret < 0) {
- pr_err("GPIO RESET request failed: %d\n", ret);
- return -EBUSY;
- }
- ret = gpio_direction_output(MHL_GPIO_RESET, 1);
- if (ret < 0) {
- pr_err("SET GPIO RESET direction failed: %d\n", ret);
- gpio_free(MHL_GPIO_RESET);
- return -EBUSY;
- }
- ret = gpio_request(MHL_GPIO_INT, "W_INT");
- if (ret < 0) {
- pr_err("GPIO INT request failed: %d\n", ret);
- gpio_free(MHL_GPIO_RESET);
- return -EBUSY;
- }
- ret = gpio_direction_input(MHL_GPIO_INT);
- if (ret < 0) {
- pr_err("SET GPIO INTR direction failed: %d\n", ret);
- gpio_free(MHL_GPIO_RESET);
- gpio_free(MHL_GPIO_INT);
- return -EBUSY;
- }
- } else {
- gpio_free(MHL_GPIO_RESET);
- gpio_free(MHL_GPIO_INT);
- }
-
- return 0;
-}
-
static struct msm_mhl_platform_data mhl_platform_data = {
.irq = MSM_GPIO_TO_INT(4),
- .gpio_setup = mhl_sii_gpio_setup,
- .reset_pin = mhl_sii_reset_gpio,
+ .gpio_mhl_int = MHL_GPIO_INT,
+ .gpio_mhl_reset = MHL_GPIO_RESET,
+ .gpio_mhl_power = 0,
+ .gpio_hdmi_mhl_mux = 0,
};
-#endif
static struct i2c_board_info sii_device_info[] __initdata = {
{
@@ -3040,6 +2993,10 @@
msm8960_i2c_devices[i].info,
msm8960_i2c_devices[i].len);
}
+
+ if (!mhl_platform_data.gpio_mhl_power)
+ pr_debug("mhl device configured for ext debug board\n");
+
#ifdef CONFIG_MSM_CAMERA
if (msm8960_camera_i2c_devices.machs & mach_mask)
i2c_register_board_info(msm8960_camera_i2c_devices.bus,
diff --git a/arch/arm/mach-msm/clock-9615.c b/arch/arm/mach-msm/clock-9615.c
index f5ce5a7..3c9bd36 100644
--- a/arch/arm/mach-msm/clock-9615.c
+++ b/arch/arm/mach-msm/clock-9615.c
@@ -154,16 +154,16 @@
#define GCC_APCS_CLK_DIAG REG_GCC(0x001C)
/* MUX source input identifiers. */
-#define cxo_to_bb_mux 0
-#define pll8_to_bb_mux 3
-#define pll8_acpu_to_bb_mux 3
-#define pll14_to_bb_mux 4
-#define gnd_to_bb_mux 6
-#define cxo_to_xo_mux 0
-#define gnd_to_xo_mux 3
-#define cxo_to_lpa_mux 1
-#define pll4_to_lpa_mux 2
-#define gnd_to_lpa_mux 6
+#define cxo_to_bb_mux 0
+#define pll8_to_bb_mux 3
+#define pll8_activeonly_to_bb_mux 3
+#define pll14_to_bb_mux 4
+#define gnd_to_bb_mux 6
+#define cxo_to_xo_mux 0
+#define gnd_to_xo_mux 3
+#define cxo_to_lpa_mux 1
+#define pll4_to_lpa_mux 2
+#define gnd_to_lpa_mux 6
/* Test Vector Macros */
#define TEST_TYPE_PER_LS 1
@@ -276,7 +276,7 @@
},
};
-static struct pll_vote_clk pll0_acpu_clk = {
+static struct pll_vote_clk pll0_activeonly_clk = {
.en_reg = BB_PLL_ENA_SC0_REG,
.en_mask = BIT(0),
.status_reg = BB_PLL0_STATUS_REG,
@@ -284,10 +284,10 @@
.soft_vote = &soft_vote_pll0,
.soft_vote_mask = PLL_SOFT_VOTE_ACPU,
.c = {
- .dbg_name = "pll0_acpu_clk",
+ .dbg_name = "pll0_activeonly_clk",
.rate = 276000000,
.ops = &clk_ops_pll_acpu_vote,
- CLK_INIT(pll0_acpu_clk.c),
+ CLK_INIT(pll0_activeonly_clk.c),
.warned = true,
},
};
@@ -326,7 +326,7 @@
},
};
-static struct pll_vote_clk pll8_acpu_clk = {
+static struct pll_vote_clk pll8_activeonly_clk = {
.en_reg = BB_PLL_ENA_SC0_REG,
.en_mask = BIT(8),
.status_reg = BB_PLL8_STATUS_REG,
@@ -334,21 +334,21 @@
.soft_vote = &soft_vote_pll8,
.soft_vote_mask = PLL_SOFT_VOTE_ACPU,
.c = {
- .dbg_name = "pll8_acpu_clk",
+ .dbg_name = "pll8_activeonly_clk",
.rate = 384000000,
.ops = &clk_ops_pll_acpu_vote,
- CLK_INIT(pll8_acpu_clk.c),
+ CLK_INIT(pll8_activeonly_clk.c),
.warned = true,
},
};
-static struct pll_clk pll9_acpu_clk = {
+static struct pll_clk pll9_activeonly_clk = {
.mode_reg = SC_PLL0_MODE_REG,
.c = {
- .dbg_name = "pll9_acpu_clk",
+ .dbg_name = "pll9_activeonly_clk",
.rate = 440000000,
.ops = &clk_ops_local_pll,
- CLK_INIT(pll9_acpu_clk.c),
+ CLK_INIT(pll9_activeonly_clk.c),
.warned = true,
},
};
@@ -657,14 +657,20 @@
.ns_val = NS(23, 16, n, m, 5, 4, 3, d, 2, 0, s##_to_bb_mux), \
}
static struct clk_freq_tbl clk_tbl_usb[] = {
- F_USB( 0, gnd, 1, 0, 0),
+ F_USB( 0, gnd, 1, 0, 0),
F_USB(60000000, pll8, 1, 5, 32),
F_END
};
+static struct clk_freq_tbl clk_tbl_usb_hs1_sys[] = {
+ F_USB( 0, gnd, 1, 0, 0),
+ F_USB(60000000, pll8_activeonly, 1, 5, 32),
+ F_END
+};
+
static struct clk_freq_tbl clk_tbl_usb_hsic_sys[] = {
- F_USB( 0, gnd, 1, 0, 0),
- F_USB(64000000, pll8_acpu, 1, 1, 6),
+ F_USB( 0, gnd, 1, 0, 0),
+ F_USB(64000000, pll8_activeonly, 1, 1, 6),
F_END
};
@@ -708,7 +714,7 @@
.ns_mask = (BM(23, 16) | BM(6, 0)),
.mnd_en_mask = BIT(8),
.set_rate = set_rate_mnd,
- .freq_tbl = clk_tbl_usb,
+ .freq_tbl = clk_tbl_usb_hs1_sys,
.current_freq = &rcg_dummy_freq,
.c = {
.dbg_name = "usb_hs1_sys_clk",
@@ -1617,9 +1623,9 @@
CLK_LOOKUP("pll8", pll8_clk.c, NULL),
CLK_LOOKUP("pll14", pll14_clk.c, NULL),
- CLK_LOOKUP("pll0", pll0_acpu_clk.c, "acpu"),
- CLK_LOOKUP("pll8", pll8_acpu_clk.c, "acpu"),
- CLK_LOOKUP("pll9", pll9_acpu_clk.c, "acpu"),
+ CLK_LOOKUP("pll0", pll0_activeonly_clk.c, "acpu"),
+ CLK_LOOKUP("pll8", pll8_activeonly_clk.c, "acpu"),
+ CLK_LOOKUP("pll9", pll9_activeonly_clk.c, "acpu"),
CLK_LOOKUP("measure", measure_clk.c, "debug"),
@@ -1822,7 +1828,7 @@
pll9_lval = readl_relaxed(SC_PLL0_L_VAL_REG);
if (pll9_lval == 0x1C)
- pll9_acpu_clk.c.rate = 550000000;
+ pll9_activeonly_clk.c.rate = 550000000;
/* Enable PLL4 source on the LPASS Primary PLL Mux */
regval = readl_relaxed(LCC_PRI_PLL_CLK_CTL_REG);
diff --git a/arch/arm/mach-msm/devices-8064.c b/arch/arm/mach-msm/devices-8064.c
index 069d738..8a41a7c 100644
--- a/arch/arm/mach-msm/devices-8064.c
+++ b/arch/arm/mach-msm/devices-8064.c
@@ -95,14 +95,6 @@
#define PCIE20_PHYS 0x1b500000
#define PCIE20_SIZE SZ_4K
-/* AXI address for PCIE device BAR resources */
-#define PCIE_AXI_BAR_PHYS 0x08000000
-#define PCIE_AXI_BAR_SIZE SZ_8M
-
-/* AXI address for PCIE device config space */
-#define PCIE_AXI_CONF_PHYS 0x08c00000
-#define PCIE_AXI_CONF_SIZE SZ_4K
-
static struct msm_watchdog_pdata msm_watchdog_pdata = {
.pet_time = 10000,
.bark_time = 11000,
@@ -1639,13 +1631,13 @@
static struct resource resources_msm_pcie[] = {
{
- .name = "parf",
+ .name = "pcie_parf",
.start = PCIE20_PARF_PHYS,
.end = PCIE20_PARF_PHYS + PCIE20_PARF_SIZE - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "elbi",
+ .name = "pcie_elbi",
.start = PCIE20_ELBI_PHYS,
.end = PCIE20_ELBI_PHYS + PCIE20_ELBI_SIZE - 1,
.flags = IORESOURCE_MEM,
@@ -1656,18 +1648,6 @@
.end = PCIE20_PHYS + PCIE20_SIZE - 1,
.flags = IORESOURCE_MEM,
},
- {
- .name = "axi_bar",
- .start = PCIE_AXI_BAR_PHYS,
- .end = PCIE_AXI_BAR_PHYS + PCIE_AXI_BAR_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "axi_conf",
- .start = PCIE_AXI_CONF_PHYS,
- .end = PCIE_AXI_CONF_PHYS + PCIE_AXI_CONF_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
};
struct platform_device msm_device_pcie = {
diff --git a/arch/arm/mach-msm/include/mach/board.h b/arch/arm/mach-msm/include/mach/board.h
index 198cd38..8607177 100644
--- a/arch/arm/mach-msm/include/mach/board.h
+++ b/arch/arm/mach-msm/include/mach/board.h
@@ -491,12 +491,23 @@
int (*gpio_config)(int on);
int (*init_irq)(void);
bool (*check_hdcp_hw_support)(void);
+ bool is_mhl_enabled;
};
struct msm_mhl_platform_data {
int irq;
- int (*gpio_setup)(int on);
- void (*reset_pin)(int on);
+ /* GPIO no. for mhl intr */
+ uint32_t gpio_mhl_int;
+ /* GPIO no. for mhl block reset */
+ uint32_t gpio_mhl_reset;
+ /*
+ * below gpios are specific to targets
+ * that have the integrated MHL soln.
+ */
+ /* GPIO no. for mhl block power */
+ uint32_t gpio_mhl_power;
+ /* GPIO no. for hdmi-mhl mux */
+ uint32_t gpio_hdmi_mhl_mux;
};
struct msm_i2c_platform_data {
diff --git a/arch/arm/mach-msm/include/mach/msm_pcie.h b/arch/arm/mach-msm/include/mach/msm_pcie.h
index 008c984..8bc4317 100644
--- a/arch/arm/mach-msm/include/mach/msm_pcie.h
+++ b/arch/arm/mach-msm/include/mach/msm_pcie.h
@@ -32,6 +32,8 @@
/* msm pcie platfrom data */
struct msm_pcie_platform {
struct msm_pcie_gpio_info_t *gpio;
+ uint32_t axi_addr;
+ uint32_t axi_size;
};
#endif
diff --git a/arch/arm/mach-msm/msm_watchdog.c b/arch/arm/mach-msm/msm_watchdog.c
index 7ac3f74..b471426 100644
--- a/arch/arm/mach-msm/msm_watchdog.c
+++ b/arch/arm/mach-msm/msm_watchdog.c
@@ -27,6 +27,7 @@
#include <asm/hardware/gic.h>
#include <mach/msm_iomap.h>
#include <asm/mach-types.h>
+#include <asm/cacheflush.h>
#include <mach/scm.h>
#include <mach/socinfo.h>
#include "msm_watchdog.h"
@@ -105,6 +106,7 @@
/* Called from the FIQ bark handler */
void msm_wdog_bark_fin(void)
{
+ flush_cache_all();
pr_crit("\nApps Watchdog bark received - Calling Panic\n");
panic("Apps Watchdog Bark received\n");
}
diff --git a/arch/arm/mach-msm/pcie.c b/arch/arm/mach-msm/pcie.c
index 5818bef..f105356 100644
--- a/arch/arm/mach-msm/pcie.c
+++ b/arch/arm/mach-msm/pcie.c
@@ -27,6 +27,7 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
#include <linux/types.h>
#include <asm/mach/pci.h>
#include <mach/gpiomux.h>
@@ -72,6 +73,9 @@
#define RD 0
#define WR 1
+/* PCIE AXI address space */
+#define PCIE_AXI_CONF_SIZE SZ_1M
+
/* debug mask sys interface */
static int msm_pcie_debug_mask;
module_param_named(debug_mask, msm_pcie_debug_mask,
@@ -79,12 +83,15 @@
/* resources from device file */
enum msm_pcie_res {
+ /* platform defined resources */
MSM_PCIE_RES_PARF,
MSM_PCIE_RES_ELBI,
MSM_PCIE_RES_PCIE20,
- MSM_PCIE_RES_AXI_BAR,
- MSM_PCIE_RES_AXI_CONF,
- MSM_PCIE_MAX_RES
+ MSM_PCIE_MAX_PLATFORM_RES,
+
+ /* other resources */
+ MSM_PCIE_RES_AXI_CONF = MSM_PCIE_MAX_PLATFORM_RES,
+ MSM_PCIE_MAX_RES,
};
/* msm pcie device data */
@@ -107,11 +114,10 @@
/* resources */
static struct msm_pcie_res_info_t msm_pcie_res_info[MSM_PCIE_MAX_RES] = {
- {"parf", 0, 0, 0},
- {"elbi", 0, 0, 0},
- {"pcie20", 0, 0, 0},
- {"axi_bar", 0, 0, 0},
- {"axi_conf", 0, 0, 0},
+ {"pcie_parf", 0, 0},
+ {"pcie_elbi", 0, 0},
+ {"pcie20", 0, 0},
+ {"pcie_axi_conf", 0, 0},
};
int msm_pcie_get_debug_mask(void)
@@ -350,8 +356,7 @@
static void __init msm_pcie_config_controller(void)
{
struct msm_pcie_dev_t *dev = &msm_pcie_dev;
- struct msm_pcie_res_info_t *axi_bar = &dev->res[MSM_PCIE_RES_AXI_BAR];
- struct msm_pcie_res_info_t *axi_conf = &dev->res[MSM_PCIE_RES_AXI_CONF];
+ struct resource *axi_conf = dev->res[MSM_PCIE_RES_AXI_CONF].resource;
/*
* program and enable address translation region 0 (device config
@@ -384,9 +389,9 @@
writel_relaxed(0, dev->pcie20 + PCIE20_PLR_IATU_CTRL1);
writel_relaxed(BIT(31), dev->pcie20 + PCIE20_PLR_IATU_CTRL2);
- writel_relaxed(axi_bar->start, dev->pcie20 + PCIE20_PLR_IATU_LBAR);
+ writel_relaxed(dev->axi_bar_start, dev->pcie20 + PCIE20_PLR_IATU_LBAR);
writel_relaxed(0, dev->pcie20 + PCIE20_PLR_IATU_UBAR);
- writel_relaxed(axi_bar->end, dev->pcie20 + PCIE20_PLR_IATU_LAR);
+ writel_relaxed(dev->axi_bar_end, dev->pcie20 + PCIE20_PLR_IATU_LAR);
writel_relaxed(MSM_PCIE_DEV_BAR_ADDR,
dev->pcie20 + PCIE20_PLR_IATU_LTAR);
writel_relaxed(0, dev->pcie20 + PCIE20_PLR_IATU_UTAR);
@@ -404,8 +409,15 @@
for (i = 0; i < MSM_PCIE_MAX_RES; i++) {
info = &dev->res[i];
- res = platform_get_resource_byname(pdev,
- IORESOURCE_MEM, info->name);
+ if (i < MSM_PCIE_MAX_PLATFORM_RES) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ info->name);
+ } else {
+ res = dev->res[i].resource;
+ if (request_resource(&iomem_resource, res))
+ res = NULL;
+ }
+
if (!res) {
pr_err("can't get %s resource\n", info->name);
rc = -ENOMEM;
@@ -419,14 +431,15 @@
break;
}
- info->start = res->start;
- info->end = res->end;
+ info->resource = res;
}
if (rc) {
while (i--) {
iounmap(dev->res[i].base);
dev->res[i].base = NULL;
+ if (i >= MSM_PCIE_MAX_PLATFORM_RES)
+ release_resource(dev->res[i].resource);
}
} else {
dev->parf = dev->res[MSM_PCIE_RES_PARF].base;
@@ -445,6 +458,8 @@
for (i = 0; i < MSM_PCIE_MAX_RES; i++) {
iounmap(msm_pcie_dev.res[i].base);
msm_pcie_dev.res[i].base = NULL;
+ if (i >= MSM_PCIE_MAX_PLATFORM_RES)
+ release_resource(msm_pcie_dev.res[i].resource);
}
msm_pcie_dev.parf = NULL;
@@ -463,6 +478,13 @@
if (nr != 0)
return 0;
+ /*
+ * specify linux PCI framework to allocate device memory (BARs)
+ * from msm_pcie_dev.dev_mem_res resource.
+ */
+ sys->mem_offset = 0;
+ pci_add_resource(&sys->resources, &msm_pcie_dev.dev_mem_res);
+
/* assert PCIe reset link to keep EP in reset */
gpio_set_value_cansleep(dev->gpio[MSM_PCIE_GPIO_RST_N].num,
dev->gpio[MSM_PCIE_GPIO_RST_N].on);
@@ -556,7 +578,8 @@
PCIE_DBG("bus %d\n", nr);
if (nr == 0)
- bus = pci_scan_bus(sys->busnr, &msm_pcie_ops, sys);
+ bus = pci_scan_root_bus(NULL, sys->busnr, &msm_pcie_ops, sys,
+ &sys->resources);
return bus;
}
@@ -578,6 +601,7 @@
static int __init msm_pcie_probe(struct platform_device *pdev)
{
const struct msm_pcie_platform *pdata;
+ struct resource *res;
int rc;
PCIE_DBG("\n");
@@ -589,6 +613,31 @@
msm_pcie_dev.clk = msm_pcie_clk_info;
msm_pcie_dev.res = msm_pcie_res_info;
+ /* device memory resource */
+ res = &msm_pcie_dev.dev_mem_res;
+ res->name = "pcie_dev_mem";
+ res->start = MSM_PCIE_DEV_BAR_ADDR;
+ res->end = res->start + pdata->axi_size - 1;
+ res->flags = IORESOURCE_MEM;
+
+ /* axi address space = axi bar space + axi config space */
+ msm_pcie_dev.axi_bar_start = pdata->axi_addr;
+ msm_pcie_dev.axi_bar_end = pdata->axi_addr + pdata->axi_size -
+ PCIE_AXI_CONF_SIZE - 1;
+
+ /* axi config space resource */
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (!res) {
+ pr_err("can't allocate memory\n");
+ return -ENOMEM;
+ }
+
+ msm_pcie_dev.res[MSM_PCIE_RES_AXI_CONF].resource = res;
+ res->name = msm_pcie_dev.res[MSM_PCIE_RES_AXI_CONF].name;
+ res->start = msm_pcie_dev.axi_bar_end + 1;
+ res->end = res->start + PCIE_AXI_CONF_SIZE - 1;
+ res->flags = IORESOURCE_MEM;
+
rc = msm_pcie_get_resources(msm_pcie_dev.pdev);
if (rc)
return rc;
@@ -632,7 +681,6 @@
static int __init msm_pcie_init(void)
{
PCIE_DBG("\n");
- pcibios_min_io = 0x10000000;
pcibios_min_mem = 0x10000000;
return platform_driver_probe(&msm_pcie_driver, msm_pcie_probe);
}
@@ -649,22 +697,30 @@
msm_pcie_fixup_early);
/*
- * actual physical (BAR) address of the device resources starts from 0x10xxxxxx;
- * the system axi address for the device resources starts from 0x08xxxxxx;
- * correct the device resource structure here; address translation unit handles
- * the required translations
+ * actual physical (BAR) address of the device resources starts from
+ * MSM_PCIE_DEV_BAR_ADDR; the system axi address for the device resources starts
+ * from msm_pcie_dev.axi_bar_start; correct the device resource structure here;
+ * address translation unit handles the required translations
*/
static void __devinit msm_pcie_fixup_final(struct pci_dev *dev)
{
int i;
+ struct resource *res;
PCIE_DBG("vendor 0x%x 0x%x\n", dev->vendor, dev->device);
for (i = 0; i < PCI_NUM_RESOURCES; i++) {
- if (dev->resource[i].start & 0xFF000000) {
- dev->resource[i].start &= 0x00FFFFFF;
- dev->resource[i].start |= 0x08000000;
- dev->resource[i].end &= 0x00FFFFFF;
- dev->resource[i].end |= 0x08000000;
+ res = &dev->resource[i];
+ if (res->start & MSM_PCIE_DEV_BAR_ADDR) {
+ res->start -= MSM_PCIE_DEV_BAR_ADDR;
+ res->start += msm_pcie_dev.axi_bar_start;
+ res->end -= MSM_PCIE_DEV_BAR_ADDR;
+ res->end += msm_pcie_dev.axi_bar_start;
+
+ /* If Root Port, request for the changed resource */
+ if ((dev->vendor == PCIE_VENDOR_ID_RCP) &&
+ (dev->device == PCIE_DEVICE_ID_RCP)) {
+ insert_resource(&iomem_resource, res);
+ }
}
}
}
diff --git a/arch/arm/mach-msm/pcie.h b/arch/arm/mach-msm/pcie.h
index 4866ec5..fba6b11 100644
--- a/arch/arm/mach-msm/pcie.h
+++ b/arch/arm/mach-msm/pcie.h
@@ -45,10 +45,9 @@
/* resource info structure */
struct msm_pcie_res_info_t {
- char *name;
- uint32_t start;
- uint32_t end;
- void __iomem *base;
+ char *name;
+ struct resource *resource;
+ void __iomem *base;
};
/* msm pcie device structure */
@@ -64,6 +63,11 @@
void __iomem *elbi;
void __iomem *pcie20;
void __iomem *axi_conf;
+
+ uint32_t axi_bar_start;
+ uint32_t axi_bar_end;
+
+ struct resource dev_mem_res;
};
extern uint32_t msm_pcie_irq_init(struct msm_pcie_dev_t *dev);
diff --git a/arch/arm/mach-msm/smd_pkt.c b/arch/arm/mach-msm/smd_pkt.c
index f5f76f7..fdbc387 100644
--- a/arch/arm/mach-msm/smd_pkt.c
+++ b/arch/arm/mach-msm/smd_pkt.c
@@ -767,14 +767,14 @@
}
D_STATUS("Begin %s on smd_pkt_dev id:%d\n", __func__, smd_pkt_devp->i);
- wake_lock_init(&smd_pkt_devp->pa_wake_lock, WAKE_LOCK_SUSPEND,
- smd_pkt_dev_name[smd_pkt_devp->i]);
- INIT_WORK(&smd_pkt_devp->packet_arrival_work, packet_arrival_worker);
-
file->private_data = smd_pkt_devp;
mutex_lock(&smd_pkt_devp->ch_lock);
if (smd_pkt_devp->ch == 0) {
+ wake_lock_init(&smd_pkt_devp->pa_wake_lock, WAKE_LOCK_SUSPEND,
+ smd_pkt_dev_name[smd_pkt_devp->i]);
+ INIT_WORK(&smd_pkt_devp->packet_arrival_work,
+ packet_arrival_worker);
init_completion(&smd_pkt_devp->ch_allocated);
smd_pkt_devp->driver.probe = smd_pkt_dummy_probe;
scnprintf(smd_pkt_devp->pdriver_name, PDRIVER_NAME_MAX_SIZE,
@@ -881,10 +881,11 @@
smd_pkt_devp->driver.probe = NULL;
}
out:
+ if (!smd_pkt_devp->ch)
+ wake_lock_destroy(&smd_pkt_devp->pa_wake_lock);
+
mutex_unlock(&smd_pkt_devp->ch_lock);
- if (r < 0)
- wake_lock_destroy(&smd_pkt_devp->pa_wake_lock);
return r;
}
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index d8539f5..ecff3ea 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -373,6 +373,11 @@
KGSL_IOMMU_CTX_SHIFT) +
KGSL_IOMMU_CTX_TLBIASID);
*cmds++ = kgsl_mmu_get_hwpagetable_asid(&device->mmu);
+
+ cmds += __adreno_add_idle_indirect_cmds(cmds,
+ device->mmu.setstate_memory.gpuaddr +
+ KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+
cmds += adreno_add_read_cmds(device, cmds,
reg_map_desc[i]->gpuaddr +
(KGSL_IOMMU_CONTEXT_USER <<
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index a2dd649..b341485 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -17,6 +17,7 @@
#include <asm/cacheflush.h>
#include <linux/slab.h>
#include <linux/kmemleak.h>
+#include <linux/highmem.h>
#include "kgsl.h"
#include "kgsl_sharedmem.h"
@@ -489,9 +490,11 @@
struct kgsl_pagetable *pagetable,
size_t size, unsigned int protflags)
{
- int order, ret = 0;
+ int i, order, ret = 0;
int sglen = PAGE_ALIGN(size) / PAGE_SIZE;
- int i;
+ struct page **pages = NULL;
+ pgprot_t page_prot = pgprot_writecombine(PAGE_KERNEL);
+ void *ptr;
/*
* Add guard page to the end of the allocation when the
@@ -515,26 +518,53 @@
goto done;
}
+ /*
+ * Allocate space to store the list of pages to send to vmap.
+ * This is an array of pointers so we can track 1024 pages per page of
+ * allocation which means we can handle up to a 8MB buffer request with
+ * two pages; well within the acceptable limits for using kmalloc.
+ */
+
+ pages = kmalloc(sglen * sizeof(struct page *), GFP_KERNEL);
+
+ if (pages == NULL) {
+ KGSL_CORE_ERR("kmalloc (%d) failed\n",
+ sglen * sizeof(struct page *));
+ ret = -ENOMEM;
+ goto done;
+ }
+
kmemleak_not_leak(memdesc->sg);
memdesc->sglen = sglen;
sg_init_table(memdesc->sg, sglen);
for (i = 0; i < PAGE_ALIGN(size) / PAGE_SIZE; i++) {
- struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO |
- __GFP_HIGHMEM);
- if (!page) {
+
+ /*
+ * Don't use GFP_ZERO here because it is faster to memset the
+ * range ourselves (see below)
+ */
+
+ pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
+ if (pages[i] == NULL) {
ret = -ENOMEM;
memdesc->sglen = i;
goto done;
}
- flush_dcache_page(page);
- sg_set_page(&memdesc->sg[i], page, PAGE_SIZE, 0);
+
+ sg_set_page(&memdesc->sg[i], pages[i], PAGE_SIZE, 0);
}
/* ADd the guard page to the end of the sglist */
if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_IOMMU) {
+ /*
+ * It doesn't matter if we use GFP_ZERO here, this never
+ * gets mapped, and we only allocate it once in the life
+ * of the system
+ */
+
if (kgsl_guard_page == NULL)
kgsl_guard_page = alloc_page(GFP_KERNEL | __GFP_ZERO |
__GFP_HIGHMEM);
@@ -547,6 +577,44 @@
memdesc->sglen--;
}
+ /*
+ * All memory that goes to the user has to be zeroed out before it gets
+ * exposed to userspace. This means that the memory has to be mapped in
+ * the kernel, zeroed (memset) and then unmapped. This also means that
+ * the dcache has to be flushed to ensure coherency between the kernel
+ * and user pages. We used to pass __GFP_ZERO to alloc_page which mapped
+ * zeroed and unmaped each individual page, and then we had to turn
+ * around and call flush_dcache_page() on that page to clear the caches.
+ * This was killing us for performance. Instead, we found it is much
+ * faster to allocate the pages without GFP_ZERO, map the entire range,
+ * memset it, flush the range and then unmap - this results in a factor
+ * of 4 improvement for speed for large buffers. There is a small
+ * increase in speed for small buffers, but only on the order of a few
+ * microseconds at best. The only downside is that there needs to be
+ * enough temporary space in vmalloc to accomodate the map. This
+ * shouldn't be a problem, but if it happens, fall back to a much slower
+ * path
+ */
+
+ ptr = vmap(pages, i, VM_IOREMAP, page_prot);
+
+ if (ptr != NULL) {
+ memset(ptr, 0, memdesc->size);
+ dmac_flush_range(ptr, ptr + memdesc->size);
+ vunmap(ptr);
+ } else {
+ int j;
+
+ /* Very, very, very slow path */
+
+ for (j = 0; j < i; j++) {
+ ptr = kmap_atomic(pages[j]);
+ memset(ptr, 0, PAGE_SIZE);
+ dmac_flush_range(ptr, ptr + PAGE_SIZE);
+ kunmap_atomic(ptr);
+ }
+ }
+
outer_cache_range_op_sg(memdesc->sg, memdesc->sglen,
KGSL_CACHE_OP_FLUSH);
@@ -564,6 +632,8 @@
kgsl_driver.stats.histogram[order]++;
done:
+ kfree(pages);
+
if (ret)
kgsl_sharedmem_free(memdesc);
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 23317d6..1c70527 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -100,6 +100,7 @@
#define MXT_PROCI_PALM_T41 41
#define MXT_PROCI_TOUCHSUPPRESSION_T42 42
#define MXT_PROCI_STYLUS_T47 47
+#define MXT_PROCI_ADAPTIVETHRESHOLD_T55 55
#define MXT_PROCI_SHIELDLESS_T56 56
#define MXT_PROCG_NOISESUPPRESSION_T48 48
#define MXT_SPT_COMMSCONFIG_T18 18
@@ -228,7 +229,7 @@
#define MXT_BACKUP_VALUE 0x55
#define MXT_BACKUP_TIME 25 /* msec */
#define MXT224_RESET_TIME 65 /* msec */
-#define MXT224E_RESET_TIME 22 /* msec */
+#define MXT224E_RESET_TIME 150 /* msec */
#define MXT1386_RESET_TIME 250 /* msec */
#define MXT_RESET_TIME 250 /* msec */
#define MXT_RESET_NOCHGREAD 400 /* msec */
@@ -386,6 +387,7 @@
case MXT_SPT_USERDATA_T38:
case MXT_SPT_DIGITIZER_T43:
case MXT_SPT_CTECONFIG_T46:
+ case MXT_PROCI_ADAPTIVETHRESHOLD_T55:
return true;
default:
return false;
@@ -419,6 +421,7 @@
case MXT_SPT_USERDATA_T38:
case MXT_SPT_DIGITIZER_T43:
case MXT_SPT_CTECONFIG_T46:
+ case MXT_PROCI_ADAPTIVETHRESHOLD_T55:
return true;
default:
return false;
@@ -732,6 +735,36 @@
return __mxt_read_reg(data->client, reg + offset, 1, val);
}
+static int mxt_get_object_address(struct device *dev, u8 type)
+{
+ struct mxt_data *data = dev_get_drvdata(dev);
+ u8 obj_num, obj_buf[MXT_OBJECT_SIZE];
+ u16 reg;
+ int i, error;
+
+ error = mxt_read_reg(data->client, MXT_OBJECT_NUM, &obj_num);
+
+ if (error) {
+ dev_err(dev, "reading number of objects failed\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < obj_num; i++) {
+ reg = MXT_OBJECT_START + MXT_OBJECT_SIZE * i;
+ error = mxt_read_object_table(data->client,
+ reg, obj_buf);
+ if (error)
+ return error;
+
+ if (obj_buf[0] == type)
+ return obj_buf[2] << 8 | obj_buf[1];
+ }
+ /* If control reaches here, i = obj_num and object not found */
+ dev_err(dev, "Requested object %d not found.\n", type);
+ return -EINVAL;
+
+}
+
static int mxt_write_object(struct mxt_data *data,
u8 type, u8 offset, u8 val)
{
@@ -1543,6 +1576,7 @@
switch (data->info.family_id) {
case MXT224_ID:
+ case MXT224E_ID:
max_frame_size = MXT_SINGLE_FW_MAX_FRAME_SIZE;
break;
case MXT1386_ID:
@@ -1681,11 +1715,12 @@
const char *buf, size_t count)
{
struct mxt_data *data = dev_get_drvdata(dev);
- int error;
+ int error, address;
const char *fw_name;
u8 bootldr_id;
u8 cfg_version[MXT_CFG_VERSION_LEN] = {0};
+
/* If fw_name is set, then the existing firmware has an upgrade */
if (!data->fw_name) {
/*
@@ -1735,6 +1770,16 @@
data->cfg_version_idx = 0;
data->update_cfg = false;
+ /* T38 object address might have changed, read it from
+ touch controller */
+ address = mxt_get_object_address(dev, MXT_SPT_USERDATA_T38);
+ if (address < 0) {
+ dev_err(dev, "T38 required for touch operation\n");
+ return -EINVAL;
+ }
+
+ data->t38_start_addr = address;
+
error = __mxt_write_reg(data->client, data->t38_start_addr,
sizeof(cfg_version), cfg_version);
if (error)
diff --git a/drivers/input/touchscreen/cy8c_ts.c b/drivers/input/touchscreen/cy8c_ts.c
index f708582..88f7d1b 100644
--- a/drivers/input/touchscreen/cy8c_ts.c
+++ b/drivers/input/touchscreen/cy8c_ts.c
@@ -3,7 +3,7 @@
* drivers/input/touchscreen/cy8c_ts.c
*
* Copyright (C) 2009, 2010 Cypress Semiconductor, Inc.
- * Copyright (c) 2010, 2011 Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2010-2012 Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -387,6 +387,7 @@
input_set_drvdata(input_device, ts);
__set_bit(EV_ABS, input_device->evbit);
+ __set_bit(INPUT_PROP_DIRECT, input_device->propbit);
if (ts->device_id == CY8CTMA340) {
/* set up virtual key */
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index ebb4afe..15254fb 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -76,3 +76,14 @@
This driver is only of interest to those developing or
testing a host driver. Most people should say N here.
+
+config MMC_BLOCK_TEST
+ tristate "MMC block test"
+ depends on MMC_BLOCK && IOSCHED_TEST
+ default m
+ help
+ MMC block test can be used with test iosched to test the MMC block
+ device.
+ Currently used to test eMMC 4.5 features (packed commands, sanitize,
+ BKOPs).
+
diff --git a/drivers/mmc/card/Makefile b/drivers/mmc/card/Makefile
index c73b406..d55107f 100644
--- a/drivers/mmc/card/Makefile
+++ b/drivers/mmc/card/Makefile
@@ -8,3 +8,4 @@
obj-$(CONFIG_SDIO_UART) += sdio_uart.o
+obj-$(CONFIG_MMC_BLOCK_TEST) += mmc_block_test.o
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 1331aa4..a496df0 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -125,17 +125,6 @@
static DEFINE_MUTEX(open_lock);
-enum mmc_blk_status {
- MMC_BLK_SUCCESS = 0,
- MMC_BLK_PARTIAL,
- MMC_BLK_CMD_ERR,
- MMC_BLK_RETRY,
- MMC_BLK_ABORT,
- MMC_BLK_DATA_ERR,
- MMC_BLK_ECC_ERR,
- MMC_BLK_NOMEDIUM,
-};
-
enum {
MMC_PACKED_N_IDX = -1,
MMC_PACKED_N_ZERO,
@@ -1431,6 +1420,64 @@
}
EXPORT_SYMBOL(mmc_blk_init_packed_statistics);
+void print_mmc_packing_stats(struct mmc_card *card)
+{
+ int i;
+ int max_num_of_packed_reqs = 0;
+
+ if ((!card) || (!card->wr_pack_stats.packing_events))
+ return;
+
+ max_num_of_packed_reqs = card->ext_csd.max_packed_writes;
+
+ spin_lock(&card->wr_pack_stats.lock);
+
+ pr_info("%s: write packing statistics:\n",
+ mmc_hostname(card->host));
+
+ for (i = 1 ; i <= max_num_of_packed_reqs ; ++i) {
+ if (card->wr_pack_stats.packing_events[i] != 0)
+ pr_info("%s: Packed %d reqs - %d times\n",
+ mmc_hostname(card->host), i,
+ card->wr_pack_stats.packing_events[i]);
+ }
+
+ pr_info("%s: stopped packing due to the following reasons:\n",
+ mmc_hostname(card->host));
+
+ if (card->wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS])
+ pr_info("%s: %d times: exceedmax num of segments\n",
+ mmc_hostname(card->host),
+ card->wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS]);
+ if (card->wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS])
+ pr_info("%s: %d times: exceeding the max num of sectors\n",
+ mmc_hostname(card->host),
+ card->wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS]);
+ if (card->wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR])
+ pr_info("%s: %d times: wrong data direction\n",
+ mmc_hostname(card->host),
+ card->wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR]);
+ if (card->wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD])
+ pr_info("%s: %d times: flush or discard\n",
+ mmc_hostname(card->host),
+ card->wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD]);
+ if (card->wr_pack_stats.pack_stop_reason[EMPTY_QUEUE])
+ pr_info("%s: %d times: empty queue\n",
+ mmc_hostname(card->host),
+ card->wr_pack_stats.pack_stop_reason[EMPTY_QUEUE]);
+ if (card->wr_pack_stats.pack_stop_reason[REL_WRITE])
+ pr_info("%s: %d times: rel write\n",
+ mmc_hostname(card->host),
+ card->wr_pack_stats.pack_stop_reason[REL_WRITE]);
+ if (card->wr_pack_stats.pack_stop_reason[THRESHOLD])
+ pr_info("%s: %d times: Threshold\n",
+ mmc_hostname(card->host),
+ card->wr_pack_stats.pack_stop_reason[THRESHOLD]);
+
+ spin_unlock(&card->wr_pack_stats.lock);
+}
+EXPORT_SYMBOL(print_mmc_packing_stats);
+
static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
{
struct request_queue *q = mq->queue;
@@ -1628,7 +1675,18 @@
brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
mqrq->mmc_active.mrq = &brq->mrq;
- mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
+
+ /*
+ * This is intended for packed commands tests usage - in case these
+ * functions are not in use the respective pointers are NULL
+ */
+ if (mq->err_check_fn)
+ mqrq->mmc_active.err_check = mq->err_check_fn;
+ else
+ mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
+
+ if (mq->packed_test_fn)
+ mq->packed_test_fn(mq->queue, mqrq);
mmc_queue_bounce_pre(mqrq);
}
diff --git a/drivers/mmc/card/mmc_block_test.c b/drivers/mmc/card/mmc_block_test.c
new file mode 100644
index 0000000..0ace608
--- /dev/null
+++ b/drivers/mmc/card/mmc_block_test.c
@@ -0,0 +1,1533 @@
+
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* MMC block test */
+
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/debugfs.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/delay.h>
+#include <linux/test-iosched.h>
+#include "queue.h"
+
+#define MODULE_NAME "mmc_block_test"
+#define TEST_MAX_SECTOR_RANGE (600*1024*1024) /* 600 MB */
+#define TEST_MAX_BIOS_PER_REQ 120
+#define CMD23_PACKED_BIT (1 << 30)
+#define LARGE_PRIME_1 1103515367
+#define LARGE_PRIME_2 35757
+#define PACKED_HDR_VER_MASK 0x000000FF
+#define PACKED_HDR_RW_MASK 0x0000FF00
+#define PACKED_HDR_NUM_REQS_MASK 0x00FF0000
+#define PACKED_HDR_BITS_16_TO_29_SET 0x3FFF0000
+
+#define test_pr_debug(fmt, args...) pr_debug("%s: "fmt"\n", MODULE_NAME, args)
+#define test_pr_info(fmt, args...) pr_info("%s: "fmt"\n", MODULE_NAME, args)
+#define test_pr_err(fmt, args...) pr_err("%s: "fmt"\n", MODULE_NAME, args)
+
+enum is_random {
+ NON_RANDOM_TEST,
+ RANDOM_TEST,
+};
+
+enum mmc_block_test_testcases {
+ /* Start of send write packing test group */
+ SEND_WRITE_PACKING_MIN_TESTCASE,
+ TEST_STOP_DUE_TO_READ = SEND_WRITE_PACKING_MIN_TESTCASE,
+ TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS,
+ TEST_STOP_DUE_TO_FLUSH,
+ TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS,
+ TEST_STOP_DUE_TO_EMPTY_QUEUE,
+ TEST_STOP_DUE_TO_MAX_REQ_NUM,
+ TEST_STOP_DUE_TO_THRESHOLD,
+ SEND_WRITE_PACKING_MAX_TESTCASE = TEST_STOP_DUE_TO_THRESHOLD,
+
+ /* Start of err check test group */
+ ERR_CHECK_MIN_TESTCASE,
+ TEST_RET_ABORT = ERR_CHECK_MIN_TESTCASE,
+ TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS,
+ TEST_RET_PARTIAL_FOLLOWED_BY_ABORT,
+ TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS,
+ TEST_RET_PARTIAL_MAX_FAIL_IDX,
+ TEST_RET_RETRY,
+ TEST_RET_CMD_ERR,
+ TEST_RET_DATA_ERR,
+ ERR_CHECK_MAX_TESTCASE = TEST_RET_DATA_ERR,
+
+ /* Start of send invalid test group */
+ INVALID_CMD_MIN_TESTCASE,
+ TEST_HDR_INVALID_VERSION = INVALID_CMD_MIN_TESTCASE,
+ TEST_HDR_WRONG_WRITE_CODE,
+ TEST_HDR_INVALID_RW_CODE,
+ TEST_HDR_DIFFERENT_ADDRESSES,
+ TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL,
+ TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL,
+ TEST_HDR_CMD23_PACKED_BIT_SET,
+ TEST_CMD23_MAX_PACKED_WRITES,
+ TEST_CMD23_ZERO_PACKED_WRITES,
+ TEST_CMD23_PACKED_BIT_UNSET,
+ TEST_CMD23_REL_WR_BIT_SET,
+ TEST_CMD23_BITS_16TO29_SET,
+ TEST_CMD23_HDR_BLK_NOT_IN_COUNT,
+ INVALID_CMD_MAX_TESTCASE = TEST_CMD23_HDR_BLK_NOT_IN_COUNT,
+};
+
+enum mmc_block_test_group {
+ TEST_NO_GROUP,
+ TEST_GENERAL_GROUP,
+ TEST_SEND_WRITE_PACKING_GROUP,
+ TEST_ERR_CHECK_GROUP,
+ TEST_SEND_INVALID_GROUP,
+};
+
+struct mmc_block_test_debug {
+ struct dentry *send_write_packing_test;
+ struct dentry *err_check_test;
+ struct dentry *send_invalid_packed_test;
+ struct dentry *random_test_seed;
+};
+
+struct mmc_block_test_data {
+ /* The number of write requests that the test will issue */
+ int num_requests;
+ /* The expected write packing statistics for the current test */
+ struct mmc_wr_pack_stats exp_packed_stats;
+ /*
+ * A user-defined seed for random choices of number of bios written in
+ * a request, and of number of requests issued in a test
+ * This field is randomly updated after each use
+ */
+ unsigned int random_test_seed;
+ /* A retry counter used in err_check tests */
+ int err_check_counter;
+ /* Can be one of the values of enum test_group */
+ enum mmc_block_test_group test_group;
+ /*
+ * Indicates if the current testcase is running with random values of
+ * num_requests and num_bios (in each request)
+ */
+ int is_random;
+ /* Data structure for debugfs dentrys */
+ struct mmc_block_test_debug debug;
+ /*
+ * Data structure containing individual test information, including
+ * self-defined specific data
+ */
+ struct test_info test_info;
+ /* mmc block device test */
+ struct blk_dev_test_type bdt;
+};
+
+static struct mmc_block_test_data *mbtd;
+
+/*
+ * A callback assigned to the packed_test_fn field.
+ * Called from block layer in mmc_blk_packed_hdr_wrq_prep.
+ * Here we alter the packed header or CMD23 in order to send an invalid
+ * packed command to the card.
+ */
+static void test_invalid_packed_cmd(struct request_queue *q,
+ struct mmc_queue_req *mqrq)
+{
+ struct mmc_queue *mq = q->queuedata;
+ u32 *packed_cmd_hdr = mqrq->packed_cmd_hdr;
+ struct request *req = mqrq->req;
+ struct request *second_rq;
+ struct test_request *test_rq;
+ struct mmc_blk_request *brq = &mqrq->brq;
+ int num_requests;
+ int max_packed_reqs;
+
+ if (!mq) {
+ test_pr_err("%s: NULL mq", __func__);
+ return;
+ }
+
+ test_rq = (struct test_request *)req->elv.priv[0];
+ if (!test_rq) {
+ test_pr_err("%s: NULL test_rq", __func__);
+ return;
+ }
+ max_packed_reqs = mq->card->ext_csd.max_packed_writes;
+
+ switch (mbtd->test_info.testcase) {
+ case TEST_HDR_INVALID_VERSION:
+ test_pr_info("%s: set invalid header version", __func__);
+ /* Put 0 in header version field (1 byte, offset 0 in header) */
+ packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_VER_MASK;
+ break;
+ case TEST_HDR_WRONG_WRITE_CODE:
+ test_pr_info("%s: wrong write code", __func__);
+ /* Set R/W field with R value (1 byte, offset 1 in header) */
+ packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_RW_MASK;
+ packed_cmd_hdr[0] = packed_cmd_hdr[0] | 0x00000100;
+ break;
+ case TEST_HDR_INVALID_RW_CODE:
+ test_pr_info("%s: invalid r/w code", __func__);
+ /* Set R/W field with invalid value */
+ packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_RW_MASK;
+ packed_cmd_hdr[0] = packed_cmd_hdr[0] | 0x00000400;
+ break;
+ case TEST_HDR_DIFFERENT_ADDRESSES:
+ test_pr_info("%s: different addresses", __func__);
+ second_rq = list_entry(req->queuelist.next, struct request,
+ queuelist);
+ test_pr_info("%s: test_rq->sector=%ld, second_rq->sector=%ld",
+ __func__, (long)req->__sector,
+ (long)second_rq->__sector);
+ /*
+ * Put start sector of second write request in the first write
+ * request's cmd25 argument in the packed header
+ */
+ packed_cmd_hdr[3] = second_rq->__sector;
+ break;
+ case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
+ test_pr_info("%s: request num smaller than actual" , __func__);
+ num_requests = (packed_cmd_hdr[0] & PACKED_HDR_NUM_REQS_MASK)
+ >> 16;
+ /* num of entries is decremented by 1 */
+ num_requests = (num_requests - 1) << 16;
+ /*
+ * Set number of requests field in packed write header to be
+ * smaller than the actual number (1 byte, offset 2 in header)
+ */
+ packed_cmd_hdr[0] = (packed_cmd_hdr[0] &
+ ~PACKED_HDR_NUM_REQS_MASK) + num_requests;
+ break;
+ case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
+ test_pr_info("%s: request num larger than actual" , __func__);
+ num_requests = (packed_cmd_hdr[0] & PACKED_HDR_NUM_REQS_MASK)
+ >> 16;
+ /* num of entries is incremented by 1 */
+ num_requests = (num_requests + 1) << 16;
+ /*
+ * Set number of requests field in packed write header to be
+ * larger than the actual number (1 byte, offset 2 in header).
+ */
+ packed_cmd_hdr[0] = (packed_cmd_hdr[0] &
+ ~PACKED_HDR_NUM_REQS_MASK) + num_requests;
+ break;
+ case TEST_HDR_CMD23_PACKED_BIT_SET:
+ test_pr_info("%s: header CMD23 packed bit set" , __func__);
+ /*
+ * Set packed bit (bit 30) in cmd23 argument of first and second
+ * write requests in packed write header.
+ * These are located at bytes 2 and 4 in packed write header
+ */
+ packed_cmd_hdr[2] = packed_cmd_hdr[2] | CMD23_PACKED_BIT;
+ packed_cmd_hdr[4] = packed_cmd_hdr[4] | CMD23_PACKED_BIT;
+ break;
+ case TEST_CMD23_MAX_PACKED_WRITES:
+ test_pr_info("%s: CMD23 request num > max_packed_reqs",
+ __func__);
+ /*
+ * Set the individual packed cmd23 request num to
+ * max_packed_reqs + 1
+ */
+ brq->sbc.arg = MMC_CMD23_ARG_PACKED | (max_packed_reqs + 1);
+ break;
+ case TEST_CMD23_ZERO_PACKED_WRITES:
+ test_pr_info("%s: CMD23 request num = 0", __func__);
+ /* Set the individual packed cmd23 request num to zero */
+ brq->sbc.arg = MMC_CMD23_ARG_PACKED;
+ break;
+ case TEST_CMD23_PACKED_BIT_UNSET:
+ test_pr_info("%s: CMD23 packed bit unset", __func__);
+ /*
+ * Set the individual packed cmd23 packed bit to 0,
+ * although there is a packed write request
+ */
+ brq->sbc.arg &= ~CMD23_PACKED_BIT;
+ break;
+ case TEST_CMD23_REL_WR_BIT_SET:
+ test_pr_info("%s: CMD23 REL WR bit set", __func__);
+ /* Set the individual packed cmd23 reliable write bit */
+ brq->sbc.arg = MMC_CMD23_ARG_PACKED | MMC_CMD23_ARG_REL_WR;
+ break;
+ case TEST_CMD23_BITS_16TO29_SET:
+ test_pr_info("%s: CMD23 bits [16-29] set", __func__);
+ brq->sbc.arg = MMC_CMD23_ARG_PACKED |
+ PACKED_HDR_BITS_16_TO_29_SET;
+ break;
+ case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
+ test_pr_info("%s: CMD23 hdr not in block count", __func__);
+ brq->sbc.arg = MMC_CMD23_ARG_PACKED |
+ ((rq_data_dir(req) == READ) ? 0 : mqrq->packed_blocks);
+ break;
+ default:
+ test_pr_err("%s: unexpected testcase %d",
+ __func__, mbtd->test_info.testcase);
+ break;
+ }
+}
+
+/*
+ * A callback assigned to the err_check_fn field of the mmc_request by the
+ * MMC/card/block layer.
+ * Called upon request completion by the MMC/core layer.
+ * Here we emulate an error return value from the card.
+ */
+static int test_err_check(struct mmc_card *card, struct mmc_async_req *areq)
+{
+ struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
+ mmc_active);
+ struct request_queue *req_q = test_iosched_get_req_queue();
+ struct mmc_queue *mq;
+ int max_packed_reqs;
+ int ret = 0;
+
+ if (req_q)
+ mq = req_q->queuedata;
+ else {
+ test_pr_err("%s: NULL request_queue", __func__);
+ return 0;
+ }
+
+ if (!mq) {
+ test_pr_err("%s: %s: NULL mq", __func__,
+ mmc_hostname(card->host));
+ return 0;
+ }
+
+ max_packed_reqs = mq->card->ext_csd.max_packed_writes;
+
+ if (!mq_rq) {
+ test_pr_err("%s: %s: NULL mq_rq", __func__,
+ mmc_hostname(card->host));
+ return 0;
+ }
+
+ switch (mbtd->test_info.testcase) {
+ case TEST_RET_ABORT:
+ test_pr_info("%s: return abort", __func__);
+ ret = MMC_BLK_ABORT;
+ break;
+ case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
+ test_pr_info("%s: return partial followed by success",
+ __func__);
+ /*
+ * Since in this testcase num_requests is always >= 2,
+ * we can be sure that packed_fail_idx is always >= 1
+ */
+ mq_rq->packed_fail_idx = (mbtd->num_requests / 2);
+ test_pr_info("%s: packed_fail_idx = %d"
+ , __func__, mq_rq->packed_fail_idx);
+ mq->err_check_fn = NULL;
+ ret = MMC_BLK_PARTIAL;
+ break;
+ case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
+ if (!mbtd->err_check_counter) {
+ test_pr_info("%s: return partial followed by abort",
+ __func__);
+ mbtd->err_check_counter++;
+ /*
+ * Since in this testcase num_requests is always >= 3,
+ * we have that packed_fail_idx is always >= 1
+ */
+ mq_rq->packed_fail_idx = (mbtd->num_requests / 2);
+ test_pr_info("%s: packed_fail_idx = %d"
+ , __func__, mq_rq->packed_fail_idx);
+ ret = MMC_BLK_PARTIAL;
+ break;
+ }
+ mbtd->err_check_counter = 0;
+ mq->err_check_fn = NULL;
+ ret = MMC_BLK_ABORT;
+ break;
+ case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
+ test_pr_info("%s: return partial multiple until success",
+ __func__);
+ if (++mbtd->err_check_counter >= (mbtd->num_requests)) {
+ mq->err_check_fn = NULL;
+ mbtd->err_check_counter = 0;
+ ret = MMC_BLK_PARTIAL;
+ break;
+ }
+ mq_rq->packed_fail_idx = 1;
+ ret = MMC_BLK_PARTIAL;
+ break;
+ case TEST_RET_PARTIAL_MAX_FAIL_IDX:
+ test_pr_info("%s: return partial max fail_idx", __func__);
+ mq_rq->packed_fail_idx = max_packed_reqs - 1;
+ mq->err_check_fn = NULL;
+ ret = MMC_BLK_PARTIAL;
+ break;
+ case TEST_RET_RETRY:
+ test_pr_info("%s: return retry", __func__);
+ ret = MMC_BLK_RETRY;
+ break;
+ case TEST_RET_CMD_ERR:
+ test_pr_info("%s: return cmd err", __func__);
+ ret = MMC_BLK_CMD_ERR;
+ break;
+ case TEST_RET_DATA_ERR:
+ test_pr_info("%s: return data err", __func__);
+ ret = MMC_BLK_DATA_ERR;
+ break;
+ default:
+ test_pr_err("%s: unexpected testcase %d",
+ __func__, mbtd->test_info.testcase);
+ }
+
+ return ret;
+}
+
+/*
+ * This is a specific implementation for the get_test_case_str_fn function
+ * pointer in the test_info data structure. Given a valid test_data instance,
+ * the function returns a string resembling the test name, based on the testcase
+ */
+static char *get_test_case_str(struct test_data *td)
+{
+ if (!td) {
+ test_pr_err("%s: NULL td", __func__);
+ return NULL;
+ }
+
+ switch (td->test_info.testcase) {
+ case TEST_STOP_DUE_TO_FLUSH:
+ return "Test stop due to flush";
+ case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
+ return "Test stop due to flush after max-1 reqs";
+ case TEST_STOP_DUE_TO_READ:
+ return "Test stop due to read";
+ case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
+ return "Test stop due to read after max-1 reqs";
+ case TEST_STOP_DUE_TO_EMPTY_QUEUE:
+ return "Test stop due to empty queue";
+ case TEST_STOP_DUE_TO_MAX_REQ_NUM:
+ return "Test stop due to max req num";
+ case TEST_STOP_DUE_TO_THRESHOLD:
+ return "Test stop due to exceeding threshold";
+ case TEST_RET_ABORT:
+ return "Test err_check return abort";
+ case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
+ return "Test err_check return partial followed by success";
+ case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
+ return "Test err_check return partial followed by abort";
+ case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
+ return "Test err_check return partial multiple until success";
+ case TEST_RET_PARTIAL_MAX_FAIL_IDX:
+ return "Test err_check return partial max fail index";
+ case TEST_RET_RETRY:
+ return "Test err_check return retry";
+ case TEST_RET_CMD_ERR:
+ return "Test err_check return cmd error";
+ case TEST_RET_DATA_ERR:
+ return "Test err_check return data error";
+ case TEST_HDR_INVALID_VERSION:
+ return "Test invalid - wrong header version";
+ case TEST_HDR_WRONG_WRITE_CODE:
+ return "Test invalid - wrong write code";
+ case TEST_HDR_INVALID_RW_CODE:
+ return "Test invalid - wrong R/W code";
+ case TEST_HDR_DIFFERENT_ADDRESSES:
+ return "Test invalid - header different addresses";
+ case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
+ return "Test invalid - header req num smaller than actual";
+ case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
+ return "Test invalid - header req num larger than actual";
+ case TEST_HDR_CMD23_PACKED_BIT_SET:
+ return "Test invalid - header cmd23 packed bit set";
+ case TEST_CMD23_MAX_PACKED_WRITES:
+ return "Test invalid - cmd23 max packed writes";
+ case TEST_CMD23_ZERO_PACKED_WRITES:
+ return "Test invalid - cmd23 zero packed writes";
+ case TEST_CMD23_PACKED_BIT_UNSET:
+ return "Test invalid - cmd23 packed bit unset";
+ case TEST_CMD23_REL_WR_BIT_SET:
+ return "Test invalid - cmd23 rel wr bit set";
+ case TEST_CMD23_BITS_16TO29_SET:
+ return "Test invalid - cmd23 bits [16-29] set";
+ case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
+ return "Test invalid - cmd23 header block not in count";
+ default:
+ return "Unknown testcase";
+ }
+
+ return NULL;
+}
+
+/*
+ * Compare individual testcase's statistics to the expected statistics:
+ * Compare stop reason and number of packing events
+ */
+static int check_wr_packing_statistics(struct test_data *td)
+{
+ struct mmc_wr_pack_stats *mmc_packed_stats;
+ struct mmc_queue *mq = td->req_q->queuedata;
+ int max_packed_reqs = mq->card->ext_csd.max_packed_writes;
+ int i;
+ struct mmc_card *card = mq->card;
+ struct mmc_wr_pack_stats expected_stats;
+ int *stop_reason;
+ int ret = 0;
+
+ if (!mq) {
+ test_pr_err("%s: NULL mq", __func__);
+ return -EINVAL;
+ }
+
+ expected_stats = mbtd->exp_packed_stats;
+
+ mmc_packed_stats = mmc_blk_get_packed_statistics(card);
+ if (!mmc_packed_stats) {
+ test_pr_err("%s: NULL mmc_packed_stats", __func__);
+ return -EINVAL;
+ }
+
+ if (!mmc_packed_stats->packing_events) {
+ test_pr_err("%s: NULL packing_events", __func__);
+ return -EINVAL;
+ }
+
+ spin_lock(&mmc_packed_stats->lock);
+
+ if (!mmc_packed_stats->enabled) {
+ test_pr_err("%s write packing statistics are not enabled",
+ __func__);
+ ret = -EINVAL;
+ goto exit_err;
+ }
+
+ stop_reason = mmc_packed_stats->pack_stop_reason;
+
+ for (i = 1 ; i <= max_packed_reqs ; ++i) {
+ if (mmc_packed_stats->packing_events[i] !=
+ expected_stats.packing_events[i]) {
+ test_pr_err(
+ "%s: Wrong pack stats in index %d, got %d, expected %d",
+ __func__, i, mmc_packed_stats->packing_events[i],
+ expected_stats.packing_events[i]);
+ if (td->fs_wr_reqs_during_test)
+ goto cancel_round;
+ ret = -EINVAL;
+ goto exit_err;
+ }
+ }
+
+ if (mmc_packed_stats->pack_stop_reason[EXCEEDS_SEGMENTS] !=
+ expected_stats.pack_stop_reason[EXCEEDS_SEGMENTS]) {
+ test_pr_err(
+ "%s: Wrong pack stop reason EXCEEDS_SEGMENTS %d, expected %d",
+ __func__, stop_reason[EXCEEDS_SEGMENTS],
+ expected_stats.pack_stop_reason[EXCEEDS_SEGMENTS]);
+ if (td->fs_wr_reqs_during_test)
+ goto cancel_round;
+ ret = -EINVAL;
+ goto exit_err;
+ }
+
+ if (mmc_packed_stats->pack_stop_reason[EXCEEDS_SECTORS] !=
+ expected_stats.pack_stop_reason[EXCEEDS_SECTORS]) {
+ test_pr_err(
+ "%s: Wrong pack stop reason EXCEEDS_SECTORS %d, expected %d",
+ __func__, stop_reason[EXCEEDS_SECTORS],
+ expected_stats.pack_stop_reason[EXCEEDS_SECTORS]);
+ if (td->fs_wr_reqs_during_test)
+ goto cancel_round;
+ ret = -EINVAL;
+ goto exit_err;
+ }
+
+ if (mmc_packed_stats->pack_stop_reason[WRONG_DATA_DIR] !=
+ expected_stats.pack_stop_reason[WRONG_DATA_DIR]) {
+ test_pr_err(
+ "%s: Wrong pack stop reason WRONG_DATA_DIR %d, expected %d",
+ __func__, stop_reason[WRONG_DATA_DIR],
+ expected_stats.pack_stop_reason[WRONG_DATA_DIR]);
+ if (td->fs_wr_reqs_during_test)
+ goto cancel_round;
+ ret = -EINVAL;
+ goto exit_err;
+ }
+
+ if (mmc_packed_stats->pack_stop_reason[FLUSH_OR_DISCARD] !=
+ expected_stats.pack_stop_reason[FLUSH_OR_DISCARD]) {
+ test_pr_err(
+ "%s: Wrong pack stop reason FLUSH_OR_DISCARD %d, expected %d",
+ __func__, stop_reason[FLUSH_OR_DISCARD],
+ expected_stats.pack_stop_reason[FLUSH_OR_DISCARD]);
+ if (td->fs_wr_reqs_during_test)
+ goto cancel_round;
+ ret = -EINVAL;
+ goto exit_err;
+ }
+
+ if (mmc_packed_stats->pack_stop_reason[EMPTY_QUEUE] !=
+ expected_stats.pack_stop_reason[EMPTY_QUEUE]) {
+ test_pr_err(
+ "%s: Wrong pack stop reason EMPTY_QUEUE %d, expected %d",
+ __func__, stop_reason[EMPTY_QUEUE],
+ expected_stats.pack_stop_reason[EMPTY_QUEUE]);
+ if (td->fs_wr_reqs_during_test)
+ goto cancel_round;
+ ret = -EINVAL;
+ goto exit_err;
+ }
+
+ if (mmc_packed_stats->pack_stop_reason[REL_WRITE] !=
+ expected_stats.pack_stop_reason[REL_WRITE]) {
+ test_pr_err(
+ "%s: Wrong pack stop reason REL_WRITE %d, expected %d",
+ __func__, stop_reason[REL_WRITE],
+ expected_stats.pack_stop_reason[REL_WRITE]);
+ if (td->fs_wr_reqs_during_test)
+ goto cancel_round;
+ ret = -EINVAL;
+ goto exit_err;
+ }
+
+exit_err:
+ spin_unlock(&mmc_packed_stats->lock);
+ if (ret && mmc_packed_stats->enabled)
+ print_mmc_packing_stats(card);
+ return ret;
+cancel_round:
+ spin_unlock(&mmc_packed_stats->lock);
+ test_iosched_set_ignore_round(true);
+ return 0;
+}
+
+/*
+ * Pseudo-randomly choose a seed based on the last seed, and update it in
+ * seed_number. then return seed_number (mod max_val), or min_val.
+ */
+static unsigned int pseudo_random_seed(unsigned int *seed_number,
+ unsigned int min_val,
+ unsigned int max_val)
+{
+ int ret = 0;
+
+ if (!seed_number)
+ return 0;
+
+ *seed_number = ((unsigned int)(((unsigned long)*seed_number *
+ (unsigned long)LARGE_PRIME_1) + LARGE_PRIME_2));
+ ret = (unsigned int)((*seed_number) % max_val);
+
+ return (ret > min_val ? ret : min_val);
+}
+
+/*
+ * Given a pseudo-random seed, find a pseudo-random num_of_bios.
+ * Make sure that num_of_bios is not larger than TEST_MAX_SECTOR_RANGE
+ */
+static void pseudo_rnd_num_of_bios(unsigned int *num_bios_seed,
+ unsigned int *num_of_bios)
+{
+ do {
+ *num_of_bios = pseudo_random_seed(num_bios_seed, 1,
+ TEST_MAX_BIOS_PER_REQ);
+ if (!(*num_of_bios))
+ *num_of_bios = 1;
+ } while ((*num_of_bios) * BIO_U32_SIZE * 4 > TEST_MAX_SECTOR_RANGE);
+}
+
+/* Add a single read request to the given td's request queue */
+static int prepare_request_add_read(struct test_data *td)
+{
+ int ret;
+ int start_sec;
+
+ if (td)
+ start_sec = td->start_sector;
+ else {
+ test_pr_err("%s: NULL td", __func__);
+ return 0;
+ }
+
+ test_pr_info("%s: Adding a read request, first req_id=%d", __func__,
+ td->wr_rd_next_req_id);
+
+ ret = test_iosched_add_wr_rd_test_req(0, READ, start_sec, 2,
+ TEST_PATTERN_5A, NULL);
+ if (ret) {
+ test_pr_err("%s: failed to add a read request", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Add a single flush request to the given td's request queue */
+static int prepare_request_add_flush(struct test_data *td)
+{
+ int ret;
+
+ if (!td) {
+ test_pr_err("%s: NULL td", __func__);
+ return 0;
+ }
+
+ test_pr_info("%s: Adding a flush request, first req_id=%d", __func__,
+ td->unique_next_req_id);
+ ret = test_iosched_add_unique_test_req(0, REQ_UNIQUE_FLUSH,
+ 0, 0, NULL);
+ if (ret) {
+ test_pr_err("%s: failed to add a flush request", __func__);
+ return ret;
+ }
+
+ return ret;
+}
+
+/*
+ * Add num_requets amount of write requests to the given td's request queue.
+ * If random test mode is chosen we pseudo-randomly choose the number of bios
+ * for each write request, otherwise add between 1 to 5 bio per request.
+ */
+static int prepare_request_add_write_reqs(struct test_data *td,
+ int num_requests, int is_err_expected,
+ int is_random)
+{
+ int i;
+ unsigned int start_sec;
+ int num_bios;
+ int ret = 0;
+ unsigned int *bio_seed = &mbtd->random_test_seed;
+
+ if (td)
+ start_sec = td->start_sector;
+ else {
+ test_pr_err("%s: NULL td", __func__);
+ return ret;
+ }
+
+ test_pr_info("%s: Adding %d write requests, first req_id=%d", __func__,
+ num_requests, td->wr_rd_next_req_id);
+
+ for (i = 1 ; i <= num_requests ; i++) {
+ start_sec = td->start_sector + 4096 * td->num_of_write_bios;
+ if (is_random)
+ pseudo_rnd_num_of_bios(bio_seed, &num_bios);
+ else
+ /*
+ * For the non-random case, give num_bios a value
+ * between 1 and 5, to keep a small number of BIOs
+ */
+ num_bios = (i%5)+1;
+
+ ret = test_iosched_add_wr_rd_test_req(is_err_expected, WRITE,
+ start_sec, num_bios, TEST_PATTERN_5A, NULL);
+
+ if (ret) {
+ test_pr_err("%s: failed to add a write request",
+ __func__);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Prepare the write, read and flush requests for a generic packed commands
+ * testcase
+ */
+static int prepare_packed_requests(struct test_data *td, int is_err_expected,
+ int num_requests, int is_random)
+{
+ int ret = 0;
+ struct mmc_queue *mq;
+ int max_packed_reqs;
+ struct request_queue *req_q;
+
+ if (!td) {
+ pr_err("%s: NULL td", __func__);
+ return -EINVAL;
+ }
+
+ req_q = td->req_q;
+
+ if (!req_q) {
+ pr_err("%s: NULL request queue", __func__);
+ return -EINVAL;
+ }
+
+ mq = req_q->queuedata;
+ if (!mq) {
+ test_pr_err("%s: NULL mq", __func__);
+ return -EINVAL;
+ }
+
+ max_packed_reqs = mq->card->ext_csd.max_packed_writes;
+
+ if (mbtd->random_test_seed <= 0) {
+ mbtd->random_test_seed =
+ (unsigned int)(get_jiffies_64() & 0xFFFF);
+ test_pr_info("%s: got seed from jiffies %d",
+ __func__, mbtd->random_test_seed);
+ }
+
+ mmc_blk_init_packed_statistics(mq->card);
+
+ ret = prepare_request_add_write_reqs(td, num_requests, is_err_expected,
+ is_random);
+ if (ret)
+ return ret;
+
+ /* Avoid memory corruption in upcoming stats set */
+ if (td->test_info.testcase == TEST_STOP_DUE_TO_THRESHOLD)
+ num_requests--;
+
+ memset((void *)mbtd->exp_packed_stats.pack_stop_reason, 0,
+ sizeof(mbtd->exp_packed_stats.pack_stop_reason));
+ memset(mbtd->exp_packed_stats.packing_events, 0,
+ (max_packed_reqs + 1) * sizeof(u32));
+ if (num_requests <= max_packed_reqs)
+ mbtd->exp_packed_stats.packing_events[num_requests] = 1;
+
+ switch (td->test_info.testcase) {
+ case TEST_STOP_DUE_TO_FLUSH:
+ case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
+ ret = prepare_request_add_flush(td);
+ if (ret)
+ return ret;
+
+ mbtd->exp_packed_stats.pack_stop_reason[FLUSH_OR_DISCARD] = 1;
+ break;
+ case TEST_STOP_DUE_TO_READ:
+ case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
+ ret = prepare_request_add_read(td);
+ if (ret)
+ return ret;
+
+ mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
+ break;
+ case TEST_STOP_DUE_TO_THRESHOLD:
+ mbtd->exp_packed_stats.packing_events[num_requests] = 1;
+ mbtd->exp_packed_stats.packing_events[1] = 1;
+ mbtd->exp_packed_stats.pack_stop_reason[THRESHOLD] = 1;
+ mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
+ break;
+ case TEST_STOP_DUE_TO_MAX_REQ_NUM:
+ case TEST_RET_PARTIAL_MAX_FAIL_IDX:
+ mbtd->exp_packed_stats.pack_stop_reason[THRESHOLD] = 1;
+ break;
+ default:
+ mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
+ }
+ mbtd->num_requests = num_requests;
+
+ return 0;
+}
+
+/*
+ * Prepare requests for the TEST_RET_PARTIAL_FOLLOWED_BY_ABORT testcase.
+ * In this testcase we have mixed error expectations from different
+ * write requests, hence the special prepare function.
+ */
+static int prepare_partial_followed_by_abort(struct test_data *td,
+ int num_requests)
+{
+ int i, start_address;
+ int is_err_expected = 0;
+ int ret = 0;
+ struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
+ int max_packed_reqs;
+
+ if (!mq) {
+ test_pr_err("%s: NULL mq", __func__);
+ return -EINVAL;
+ }
+
+ max_packed_reqs = mq->card->ext_csd.max_packed_writes;
+
+ mmc_blk_init_packed_statistics(mq->card);
+
+ for (i = 1 ; i <= num_requests ; i++) {
+ if (i > (num_requests / 2))
+ is_err_expected = 1;
+
+ start_address = td->start_sector + 4096*td->num_of_write_bios;
+ ret = test_iosched_add_wr_rd_test_req(is_err_expected, WRITE,
+ start_address, (i%5)+1, TEST_PATTERN_5A, NULL);
+ if (ret) {
+ test_pr_err("%s: failed to add a write request",
+ __func__);
+ return ret;
+ }
+ }
+
+ memset((void *)&mbtd->exp_packed_stats.pack_stop_reason, 0,
+ sizeof(mbtd->exp_packed_stats.pack_stop_reason));
+ memset(mbtd->exp_packed_stats.packing_events, 0,
+ (max_packed_reqs + 1) * sizeof(u32));
+ mbtd->exp_packed_stats.packing_events[num_requests] = 1;
+ mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
+
+ mbtd->num_requests = num_requests;
+
+ return ret;
+}
+
+/*
+ * Get number of write requests for current testcase. If random test mode was
+ * chosen, pseudo-randomly choose the number of requests, otherwise set to
+ * two less than the packing threshold.
+ */
+static int get_num_requests(struct test_data *td)
+{
+ int *seed = &mbtd->random_test_seed;
+ struct request_queue *req_q;
+ struct mmc_queue *mq;
+ int max_num_requests;
+ int num_requests;
+ int min_num_requests = 2;
+ int is_random = mbtd->is_random;
+
+ req_q = test_iosched_get_req_queue();
+ if (req_q)
+ mq = req_q->queuedata;
+ else {
+ test_pr_err("%s: NULL request queue", __func__);
+ return 0;
+ }
+
+ if (!mq) {
+ test_pr_err("%s: NULL mq", __func__);
+ return -EINVAL;
+ }
+
+ max_num_requests = mq->card->ext_csd.max_packed_writes;
+ num_requests = max_num_requests - 2;
+
+ if (is_random) {
+ if (td->test_info.testcase ==
+ TEST_RET_PARTIAL_FOLLOWED_BY_ABORT)
+ min_num_requests = 3;
+
+ num_requests = pseudo_random_seed(seed, min_num_requests,
+ max_num_requests - 1);
+ }
+
+ return num_requests;
+}
+
+/*
+ * An implementation for the prepare_test_fn pointer in the test_info
+ * data structure. According to the testcase we add the right number of requests
+ * and decide if an error is expected or not.
+ */
+static int prepare_test(struct test_data *td)
+{
+ struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
+ int max_num_requests;
+ int num_requests = 0;
+ int ret = 0;
+ int is_random = mbtd->is_random;
+
+ if (!mq) {
+ test_pr_err("%s: NULL mq", __func__);
+ return -EINVAL;
+ }
+
+ max_num_requests = mq->card->ext_csd.max_packed_writes;
+
+ if (is_random && mbtd->random_test_seed == 0) {
+ mbtd->random_test_seed =
+ (unsigned int)(get_jiffies_64() & 0xFFFF);
+ test_pr_info("%s: got seed from jiffies %d",
+ __func__, mbtd->random_test_seed);
+ }
+
+ num_requests = get_num_requests(td);
+
+ if (mbtd->test_group == TEST_SEND_INVALID_GROUP)
+ mq->packed_test_fn =
+ test_invalid_packed_cmd;
+
+ if (mbtd->test_group == TEST_ERR_CHECK_GROUP)
+ mq->err_check_fn = test_err_check;
+
+ switch (td->test_info.testcase) {
+ case TEST_STOP_DUE_TO_FLUSH:
+ case TEST_STOP_DUE_TO_READ:
+ case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
+ case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
+ case TEST_STOP_DUE_TO_EMPTY_QUEUE:
+ case TEST_CMD23_PACKED_BIT_UNSET:
+ ret = prepare_packed_requests(td, 0, num_requests, is_random);
+ break;
+ case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
+ case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
+ ret = prepare_packed_requests(td, 0, max_num_requests - 1,
+ is_random);
+ break;
+ case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
+ ret = prepare_partial_followed_by_abort(td, num_requests);
+ break;
+ case TEST_STOP_DUE_TO_MAX_REQ_NUM:
+ case TEST_RET_PARTIAL_MAX_FAIL_IDX:
+ ret = prepare_packed_requests(td, 0, max_num_requests,
+ is_random);
+ break;
+ case TEST_STOP_DUE_TO_THRESHOLD:
+ ret = prepare_packed_requests(td, 0, max_num_requests + 1,
+ is_random);
+ break;
+ case TEST_RET_ABORT:
+ case TEST_RET_RETRY:
+ case TEST_RET_CMD_ERR:
+ case TEST_RET_DATA_ERR:
+ case TEST_HDR_INVALID_VERSION:
+ case TEST_HDR_WRONG_WRITE_CODE:
+ case TEST_HDR_INVALID_RW_CODE:
+ case TEST_HDR_DIFFERENT_ADDRESSES:
+ case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
+ case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
+ case TEST_CMD23_MAX_PACKED_WRITES:
+ case TEST_CMD23_ZERO_PACKED_WRITES:
+ case TEST_CMD23_REL_WR_BIT_SET:
+ case TEST_CMD23_BITS_16TO29_SET:
+ case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
+ case TEST_HDR_CMD23_PACKED_BIT_SET:
+ ret = prepare_packed_requests(td, 1, num_requests, is_random);
+ break;
+ default:
+ test_pr_info("%s: Invalid test case...", __func__);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+/*
+ * An implementation for the post_test_fn in the test_info data structure.
+ * In our case we just reset the function pointers in the mmc_queue in order for
+ * the FS to be able to dispatch it's requests correctly after the test is
+ * finished.
+ */
+static int post_test(struct test_data *td)
+{
+ struct mmc_queue *mq;
+
+ if (!td)
+ return -EINVAL;
+
+ mq = td->req_q->queuedata;
+
+ if (!mq) {
+ test_pr_err("%s: NULL mq", __func__);
+ return -EINVAL;
+ }
+
+ mq->packed_test_fn = NULL;
+ mq->err_check_fn = NULL;
+
+ return 0;
+}
+
+/*
+ * This function checks, based on the current test's test_group, that the
+ * packed commands capability and control are set right. In addition, we check
+ * if the card supports the packed command feature.
+ */
+static int validate_packed_commands_settings(void)
+{
+ struct request_queue *req_q;
+ struct mmc_queue *mq;
+ int max_num_requests;
+ struct mmc_host *host;
+
+ req_q = test_iosched_get_req_queue();
+ if (!req_q) {
+ test_pr_err("%s: test_iosched_get_req_queue failed", __func__);
+ test_iosched_set_test_result(TEST_FAILED);
+ return -EINVAL;
+ }
+
+ mq = req_q->queuedata;
+ if (!mq) {
+ test_pr_err("%s: NULL mq", __func__);
+ return -EINVAL;
+ }
+
+ max_num_requests = mq->card->ext_csd.max_packed_writes;
+ host = mq->card->host;
+
+ if (!(host->caps2 && MMC_CAP2_PACKED_WR)) {
+ test_pr_err("%s: Packed Write capability disabled, exit test",
+ __func__);
+ test_iosched_set_test_result(TEST_NOT_SUPPORTED);
+ return -EINVAL;
+ }
+
+ if (max_num_requests == 0) {
+ test_pr_err(
+ "%s: no write packing support, ext_csd.max_packed_writes=%d",
+ __func__, mq->card->ext_csd.max_packed_writes);
+ test_iosched_set_test_result(TEST_NOT_SUPPORTED);
+ return -EINVAL;
+ }
+
+ test_pr_info("%s: max number of packed requests supported is %d ",
+ __func__, max_num_requests);
+
+ switch (mbtd->test_group) {
+ case TEST_SEND_WRITE_PACKING_GROUP:
+ case TEST_ERR_CHECK_GROUP:
+ case TEST_SEND_INVALID_GROUP:
+ /* disable the packing control */
+ host->caps2 &= ~MMC_CAP2_PACKED_WR_CONTROL;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static bool message_repeat;
+static int test_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ message_repeat = 1;
+ return 0;
+}
+
+/* send_packing TEST */
+static ssize_t send_write_packing_test_write(struct file *file,
+ const char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ int ret = 0;
+ int i = 0;
+ int number = -1;
+ int j = 0;
+
+ test_pr_info("%s: -- send_write_packing TEST --", __func__);
+
+ sscanf(buf, "%d", &number);
+
+ if (number <= 0)
+ number = 1;
+
+
+ mbtd->test_group = TEST_SEND_WRITE_PACKING_GROUP;
+
+ if (validate_packed_commands_settings())
+ return count;
+
+ if (mbtd->random_test_seed > 0)
+ test_pr_info("%s: Test seed: %d", __func__,
+ mbtd->random_test_seed);
+
+ memset(&mbtd->test_info, 0, sizeof(struct test_info));
+
+ mbtd->test_info.data = mbtd;
+ mbtd->test_info.prepare_test_fn = prepare_test;
+ mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
+ mbtd->test_info.get_test_case_str_fn = get_test_case_str;
+ mbtd->test_info.post_test_fn = post_test;
+
+ for (i = 0 ; i < number ; ++i) {
+ test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
+ test_pr_info("%s: ====================", __func__);
+
+ for (j = SEND_WRITE_PACKING_MIN_TESTCASE ;
+ j <= SEND_WRITE_PACKING_MAX_TESTCASE ; j++) {
+
+ mbtd->test_info.testcase = j;
+ mbtd->is_random = RANDOM_TEST;
+ ret = test_iosched_start_test(&mbtd->test_info);
+ if (ret)
+ break;
+ /* Allow FS requests to be dispatched */
+ msleep(1000);
+ mbtd->test_info.testcase = j;
+ mbtd->is_random = NON_RANDOM_TEST;
+ ret = test_iosched_start_test(&mbtd->test_info);
+ if (ret)
+ break;
+ /* Allow FS requests to be dispatched */
+ msleep(1000);
+ }
+ }
+
+ test_pr_info("%s: Completed all the test cases.", __func__);
+
+ return count;
+}
+
+static ssize_t send_write_packing_test_read(struct file *file,
+ char __user *buffer,
+ size_t count,
+ loff_t *offset)
+{
+ memset((void *)buffer, 0, count);
+
+ snprintf(buffer, count,
+ "\nsend_write_packing_test\n"
+ "=========\n"
+ "Description:\n"
+ "This test checks the following scenarios\n"
+ "- Pack due to FLUSH message\n"
+ "- Pack due to FLUSH after threshold writes\n"
+ "- Pack due to READ message\n"
+ "- Pack due to READ after threshold writes\n"
+ "- Pack due to empty queue\n"
+ "- Pack due to threshold writes\n"
+ "- Pack due to one over threshold writes\n");
+
+ if (message_repeat == 1) {
+ message_repeat = 0;
+ return strnlen(buffer, count);
+ } else {
+ return 0;
+ }
+}
+
+const struct file_operations send_write_packing_test_ops = {
+ .open = test_open,
+ .write = send_write_packing_test_write,
+ .read = send_write_packing_test_read,
+};
+
+/* err_check TEST */
+static ssize_t err_check_test_write(struct file *file,
+ const char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ int ret = 0;
+ int i = 0;
+ int number = -1;
+ int j = 0;
+
+ test_pr_info("%s: -- err_check TEST --", __func__);
+
+ sscanf(buf, "%d", &number);
+
+ if (number <= 0)
+ number = 1;
+
+ mbtd->test_group = TEST_ERR_CHECK_GROUP;
+
+ if (validate_packed_commands_settings())
+ return count;
+
+ if (mbtd->random_test_seed > 0)
+ test_pr_info("%s: Test seed: %d", __func__,
+ mbtd->random_test_seed);
+
+ memset(&mbtd->test_info, 0, sizeof(struct test_info));
+
+ mbtd->test_info.data = mbtd;
+ mbtd->test_info.prepare_test_fn = prepare_test;
+ mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
+ mbtd->test_info.get_test_case_str_fn = get_test_case_str;
+ mbtd->test_info.post_test_fn = post_test;
+
+ for (i = 0 ; i < number ; ++i) {
+ test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
+ test_pr_info("%s: ====================", __func__);
+
+ for (j = ERR_CHECK_MIN_TESTCASE;
+ j <= ERR_CHECK_MAX_TESTCASE ; j++) {
+ mbtd->test_info.testcase = j;
+ mbtd->is_random = RANDOM_TEST;
+ ret = test_iosched_start_test(&mbtd->test_info);
+ if (ret)
+ break;
+ /* Allow FS requests to be dispatched */
+ msleep(1000);
+ mbtd->test_info.testcase = j;
+ mbtd->is_random = NON_RANDOM_TEST;
+ ret = test_iosched_start_test(&mbtd->test_info);
+ if (ret)
+ break;
+ /* Allow FS requests to be dispatched */
+ msleep(1000);
+ }
+ }
+
+ test_pr_info("%s: Completed all the test cases.", __func__);
+
+ return count;
+}
+
+static ssize_t err_check_test_read(struct file *file,
+ char __user *buffer,
+ size_t count,
+ loff_t *offset)
+{
+ memset((void *)buffer, 0, count);
+
+ snprintf(buffer, count,
+ "\nerr_check_TEST\n"
+ "=========\n"
+ "Description:\n"
+ "This test checks the following scenarios\n"
+ "- Return ABORT\n"
+ "- Return PARTIAL followed by success\n"
+ "- Return PARTIAL followed by abort\n"
+ "- Return PARTIAL multiple times until success\n"
+ "- Return PARTIAL with fail index = threshold\n"
+ "- Return RETRY\n"
+ "- Return CMD_ERR\n"
+ "- Return DATA_ERR\n");
+
+ if (message_repeat == 1) {
+ message_repeat = 0;
+ return strnlen(buffer, count);
+ } else {
+ return 0;
+ }
+}
+
+const struct file_operations err_check_test_ops = {
+ .open = test_open,
+ .write = err_check_test_write,
+ .read = err_check_test_read,
+};
+
+/* send_invalid_packed TEST */
+static ssize_t send_invalid_packed_test_write(struct file *file,
+ const char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ int ret = 0;
+ int i = 0;
+ int number = -1;
+ int j = 0;
+ int num_of_failures = 0;
+
+ test_pr_info("%s: -- send_invalid_packed TEST --", __func__);
+
+ sscanf(buf, "%d", &number);
+
+ if (number <= 0)
+ number = 1;
+
+ mbtd->test_group = TEST_SEND_INVALID_GROUP;
+
+ if (validate_packed_commands_settings())
+ return count;
+
+ if (mbtd->random_test_seed > 0)
+ test_pr_info("%s: Test seed: %d", __func__,
+ mbtd->random_test_seed);
+
+ memset(&mbtd->test_info, 0, sizeof(struct test_info));
+
+ mbtd->test_info.data = mbtd;
+ mbtd->test_info.prepare_test_fn = prepare_test;
+ mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
+ mbtd->test_info.get_test_case_str_fn = get_test_case_str;
+ mbtd->test_info.post_test_fn = post_test;
+
+ for (i = 0 ; i < number ; ++i) {
+ test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
+ test_pr_info("%s: ====================", __func__);
+
+ for (j = INVALID_CMD_MIN_TESTCASE;
+ j <= INVALID_CMD_MAX_TESTCASE ; j++) {
+
+ mbtd->test_info.testcase = j;
+ mbtd->is_random = RANDOM_TEST;
+ ret = test_iosched_start_test(&mbtd->test_info);
+ if (ret)
+ num_of_failures++;
+ /* Allow FS requests to be dispatched */
+ msleep(1000);
+
+ mbtd->test_info.testcase = j;
+ mbtd->is_random = NON_RANDOM_TEST;
+ ret = test_iosched_start_test(&mbtd->test_info);
+ if (ret)
+ num_of_failures++;
+ /* Allow FS requests to be dispatched */
+ msleep(1000);
+ }
+ }
+
+ test_pr_info("%s: Completed all the test cases.", __func__);
+
+ if (num_of_failures > 0) {
+ test_iosched_set_test_result(TEST_FAILED);
+ test_pr_err(
+ "There were %d failures during the test, TEST FAILED",
+ num_of_failures);
+ }
+ return count;
+}
+
+static ssize_t send_invalid_packed_test_read(struct file *file,
+ char __user *buffer,
+ size_t count,
+ loff_t *offset)
+{
+ memset((void *)buffer, 0, count);
+
+ snprintf(buffer, count,
+ "\nsend_invalid_packed_TEST\n"
+ "=========\n"
+ "Description:\n"
+ "This test checks the following scenarios\n"
+ "- Send an invalid header version\n"
+ "- Send the wrong write code\n"
+ "- Send an invalid R/W code\n"
+ "- Send wrong start address in header\n"
+ "- Send header with block_count smaller than actual\n"
+ "- Send header with block_count larger than actual\n"
+ "- Send header CMD23 packed bit set\n"
+ "- Send CMD23 with block count over threshold\n"
+ "- Send CMD23 with block_count equals zero\n"
+ "- Send CMD23 packed bit unset\n"
+ "- Send CMD23 reliable write bit set\n"
+ "- Send CMD23 bits [16-29] set\n"
+ "- Send CMD23 header block not in block_count\n");
+
+ if (message_repeat == 1) {
+ message_repeat = 0;
+ return strnlen(buffer, count);
+ } else {
+ return 0;
+ }
+}
+
+const struct file_operations send_invalid_packed_test_ops = {
+ .open = test_open,
+ .write = send_invalid_packed_test_write,
+ .read = send_invalid_packed_test_read,
+};
+
+static void mmc_block_test_debugfs_cleanup(void)
+{
+ debugfs_remove(mbtd->debug.random_test_seed);
+ debugfs_remove(mbtd->debug.send_write_packing_test);
+ debugfs_remove(mbtd->debug.err_check_test);
+ debugfs_remove(mbtd->debug.send_invalid_packed_test);
+}
+
+static int mmc_block_test_debugfs_init(void)
+{
+ struct dentry *utils_root, *tests_root;
+
+ utils_root = test_iosched_get_debugfs_utils_root();
+ tests_root = test_iosched_get_debugfs_tests_root();
+
+ if (!utils_root || !tests_root)
+ return -EINVAL;
+
+ mbtd->debug.random_test_seed = debugfs_create_u32(
+ "random_test_seed",
+ S_IRUGO | S_IWUGO,
+ utils_root,
+ &mbtd->random_test_seed);
+
+ if (!mbtd->debug.random_test_seed)
+ goto err_nomem;
+
+ mbtd->debug.send_write_packing_test =
+ debugfs_create_file("send_write_packing_test",
+ S_IRUGO | S_IWUGO,
+ tests_root,
+ NULL,
+ &send_write_packing_test_ops);
+
+ if (!mbtd->debug.send_write_packing_test)
+ goto err_nomem;
+
+ mbtd->debug.err_check_test =
+ debugfs_create_file("err_check_test",
+ S_IRUGO | S_IWUGO,
+ tests_root,
+ NULL,
+ &err_check_test_ops);
+
+ if (!mbtd->debug.err_check_test)
+ goto err_nomem;
+
+ mbtd->debug.send_invalid_packed_test =
+ debugfs_create_file("send_invalid_packed_test",
+ S_IRUGO | S_IWUGO,
+ tests_root,
+ NULL,
+ &send_invalid_packed_test_ops);
+
+ if (!mbtd->debug.send_invalid_packed_test)
+ goto err_nomem;
+
+ return 0;
+
+err_nomem:
+ mmc_block_test_debugfs_cleanup();
+ return -ENOMEM;
+}
+
+static void mmc_block_test_probe(void)
+{
+ struct request_queue *q = test_iosched_get_req_queue();
+ struct mmc_queue *mq;
+ int max_packed_reqs;
+
+ if (!q) {
+ test_pr_err("%s: NULL request queue", __func__);
+ return;
+ }
+
+ mq = q->queuedata;
+ if (!mq) {
+ test_pr_err("%s: NULL mq", __func__);
+ return;
+ }
+
+ max_packed_reqs = mq->card->ext_csd.max_packed_writes;
+ mbtd->exp_packed_stats.packing_events =
+ kzalloc((max_packed_reqs + 1) *
+ sizeof(*mbtd->exp_packed_stats.packing_events),
+ GFP_KERNEL);
+
+ mmc_block_test_debugfs_init();
+}
+
+static void mmc_block_test_remove(void)
+{
+ mmc_block_test_debugfs_cleanup();
+}
+
+static int __init mmc_block_test_init(void)
+{
+ mbtd = kzalloc(sizeof(struct mmc_block_test_data), GFP_KERNEL);
+ if (!mbtd) {
+ test_pr_err("%s: failed to allocate mmc_block_test_data",
+ __func__);
+ return -ENODEV;
+ }
+
+ mbtd->bdt.init_fn = mmc_block_test_probe;
+ mbtd->bdt.exit_fn = mmc_block_test_remove;
+ INIT_LIST_HEAD(&mbtd->bdt.list);
+ test_iosched_register(&mbtd->bdt);
+
+ return 0;
+}
+
+static void __exit mmc_block_test_exit(void)
+{
+ test_iosched_unregister(&mbtd->bdt);
+ kfree(mbtd);
+}
+
+module_init(mmc_block_test_init);
+module_exit(mmc_block_test_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MMC block test");
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index 6c29e0e..ec3d6d2 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -12,6 +12,17 @@
struct mmc_data data;
};
+enum mmc_blk_status {
+ MMC_BLK_SUCCESS = 0,
+ MMC_BLK_PARTIAL,
+ MMC_BLK_CMD_ERR,
+ MMC_BLK_RETRY,
+ MMC_BLK_ABORT,
+ MMC_BLK_DATA_ERR,
+ MMC_BLK_ECC_ERR,
+ MMC_BLK_NOMEDIUM,
+};
+
enum mmc_packed_cmd {
MMC_PACKED_NONE = 0,
MMC_PACKED_WRITE,
@@ -47,6 +58,8 @@
bool wr_packing_enabled;
int num_of_potential_packed_wr_reqs;
int num_wr_reqs_to_start_packing;
+ int (*err_check_fn) (struct mmc_card *, struct mmc_async_req *);
+ void (*packed_test_fn) (struct request_queue *, struct mmc_queue_req *);
};
extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
@@ -60,4 +73,6 @@
extern void mmc_queue_bounce_pre(struct mmc_queue_req *);
extern void mmc_queue_bounce_post(struct mmc_queue_req *);
+extern void print_mmc_packing_stats(struct mmc_card *card);
+
#endif
diff --git a/drivers/power/pm8921-bms.c b/drivers/power/pm8921-bms.c
index 85389d0..304dc6b 100644
--- a/drivers/power/pm8921-bms.c
+++ b/drivers/power/pm8921-bms.c
@@ -132,11 +132,14 @@
unsigned int rconn_mohm;
struct mutex last_ocv_uv_mutex;
int last_ocv_uv;
- int last_cc_uah; /* used for Iavg calc for UUC */
+ int last_cc_uah;
struct timeval t;
int last_uuc_uah;
int enable_fcc_learning;
int shutdown_soc;
+ int timer_uuc_expired;
+ struct delayed_work uuc_timer_work;
+ int uuc_uah_iavg_prev;
};
static int shutdown_soc_invalid;
@@ -1153,49 +1156,128 @@
return uuc;
}
-/* soc_rbatt when uuc_reported should be equal to uuc_now */
-#define SOC_RBATT_CHG 80
-#define SOC_RBATT_DISCHG 10
-static int calculate_unusable_charge_uah(struct pm8921_bms_chip *chip,
- int rbatt, int fcc_uah, int cc_uah,
- int soc_rbatt, int batt_temp, int chargecycles)
+#define SOC_RBATT_CHG 70
+#define SOC_RBATT_DISCHG 20
+
+static int uuc_iavg_div = 150;
+module_param(uuc_iavg_div, int, 0644);
+
+static int uuc_min_step_size = 120;
+module_param(uuc_min_step_size, int, 0644);
+
+static int uuc_multiplier = 1000;
+module_param(uuc_multiplier, int, 0644);
+
+#define UUC_TIMER_MS 120000
+
+static void uuc_timer_work(struct work_struct *work)
{
- struct timeval now;
- int delta_time_s;
+ struct pm8921_bms_chip *chip = container_of(work,
+ struct pm8921_bms_chip, uuc_timer_work.work);
+
+ pr_debug("UUC Timer expired\n");
+ /* indicates the system is done with the high load during bootup */
+ chip->timer_uuc_expired = 1;
+}
+
+static void calculate_iavg_ua(struct pm8921_bms_chip *chip, int cc_uah,
+ int *iavg_ua, int *delta_time_us)
+{
int delta_cc_uah;
- int iavg_ua, iavg_ma;
- int uuc_uah_itest, uuc_uah_iavg, uuc_now, uuc_reported;
- s64 stepsize = 0;
- int firsttime = 0;
+ struct timeval now;
delta_cc_uah = cc_uah - chip->last_cc_uah;
do_gettimeofday(&now);
if (chip->t.tv_sec != 0) {
- delta_time_s = (now.tv_sec - chip->t.tv_sec);
+ *delta_time_us = (now.tv_sec - chip->t.tv_sec) * USEC_PER_SEC
+ + now.tv_usec - chip->t.tv_usec;
} else {
- /* uuc calculation for the first time */
- delta_time_s = 0;
- firsttime = 1;
+ /* calculation for the first time */
+ *delta_time_us = 0;
}
- if (delta_time_s != 0)
- iavg_ua = div_s64((s64)delta_cc_uah * 3600, delta_time_s);
+ if (*delta_time_us != 0)
+ *iavg_ua = div_s64((s64)delta_cc_uah * 3600 * 1000000,
+ *delta_time_us);
else
- iavg_ua = 0;
+ *iavg_ua = 0;
- iavg_ma = iavg_ua/1000;
+ pr_debug("t.tv_sec = %d, now.tv_sec = %d delta_us = %d iavg_ua = %d\n",
+ (int)chip->t.tv_sec, (int)now.tv_sec,
+ *delta_time_us, (int)*iavg_ua);
+ /* remember cc_uah */
+ chip->last_cc_uah = cc_uah;
- pr_debug("t.tv_sec = %d, now.tv_sec = %d\n", (int)chip->t.tv_sec,
- (int)now.tv_sec);
+ /* remember this time */
+ chip->t = now;
+}
- pr_debug("delta_time_s = %d iavg_ma = %d\n", delta_time_s, iavg_ma);
+#define UUC_IAVG_THRESHOLD_UAH 50000
+static int scale_unusable_charge_uah(struct pm8921_bms_chip *chip,
+ bool charging, int uuc_uah_iavg, int uuc_uah_itest,
+ int uuc_uah_iavg_prev)
+{
+ int stepsize = 0;
+ int delta_uuc = 0;
+ int uuc_reported = 0;
- if (iavg_ma == 0) {
- pr_debug("Iavg = 0 returning last uuc = %d\n",
- chip->last_uuc_uah);
- uuc_reported = chip->last_uuc_uah;
- goto out;
+ if (charging) {
+ stepsize = max(uuc_min_step_size,
+ uuc_multiplier * (SOC_RBATT_CHG - last_soc));
+ /*
+ * set the delta only if uuc is decreasing. If it has increased
+ * simply report the last uuc since we don't want to report a
+ * higher uuc as charging progresses
+ */
+ if (chip->last_uuc_uah > uuc_uah_iavg)
+ delta_uuc = (chip->last_uuc_uah - uuc_uah_iavg)
+ / stepsize;
+ uuc_reported = chip->last_uuc_uah - delta_uuc;
+ } else {
+ stepsize = max(uuc_min_step_size,
+ uuc_multiplier * (last_soc - SOC_RBATT_DISCHG));
+ if (uuc_uah_itest > uuc_uah_iavg) {
+ if ((uuc_uah_iavg > uuc_uah_iavg_prev
+ + UUC_IAVG_THRESHOLD_UAH)
+ && chip->timer_uuc_expired)
+ /*
+ * there is a big jump in iavg current way past
+ * the bootup increase uuc to this high iavg
+ * based uuc in steps
+ */
+ delta_uuc = (uuc_uah_iavg - uuc_uah_iavg_prev)
+ / uuc_iavg_div;
+ else
+ /* increase uuc towards itest based uuc */
+ delta_uuc = (uuc_uah_itest - uuc_uah_iavg)
+ / stepsize;
+ } else {
+ /*
+ * the iavg based uuc was higher than itest based
+ * uuc. This means that iavg > itest. Itest represents
+ * the max current drawn from the device at anytime.
+ * If we find iavg > itest, ignore iavg and simply step
+ * up the uuc based on itest
+ */
+ delta_uuc = uuc_uah_itest / stepsize;
+ }
+ uuc_reported = min(uuc_uah_itest,
+ chip->last_uuc_uah + delta_uuc);
}
+ pr_debug("uuc_prev = %d stepsize = %d d_uuc = %d uuc_reported = %d\n",
+ chip->last_uuc_uah, (int)stepsize, delta_uuc,
+ uuc_reported);
+ return uuc_reported;
+}
+
+static int calculate_unusable_charge_uah(struct pm8921_bms_chip *chip,
+ int rbatt, int fcc_uah, int cc_uah,
+ int soc_rbatt, int batt_temp, int chargecycles,
+ int iavg_ua)
+{
+ int uuc_uah_itest, uuc_uah_iavg, uuc_reported;
+ static int firsttime = 1;
+ int iavg_ma = iavg_ua / 1000;
/* calculate unusable charge with itest */
uuc_uah_itest = calculate_uuc_uah_at_given_current(chip,
@@ -1212,6 +1294,8 @@
pr_debug("iavg = %d uuc_iavg = %d\n", iavg_ma, uuc_uah_iavg);
if (firsttime) {
+ chip->uuc_uah_iavg_prev = uuc_uah_iavg;
+
if (cc_uah < chip->last_cc_uah)
chip->last_uuc_uah = uuc_uah_itest;
else
@@ -1219,45 +1303,21 @@
pr_debug("firsttime uuc_prev = %d\n", chip->last_uuc_uah);
}
- uuc_now = min(uuc_uah_itest, uuc_uah_iavg);
+ uuc_reported = scale_unusable_charge_uah(chip,
+ cc_uah < chip->last_cc_uah,
+ uuc_uah_iavg, uuc_uah_itest,
+ chip->uuc_uah_iavg_prev);
- uuc_reported = -EINVAL;
- if (cc_uah < chip->last_cc_uah) {
- /* charging */
- if (uuc_now < chip->last_uuc_uah) {
- stepsize = max(1, (SOC_RBATT_CHG - soc_rbatt));
- /* uuc_reported = uuc_prev + deltauuc / stepsize */
- uuc_reported = div_s64 (stepsize * chip->last_uuc_uah
- + (uuc_now - chip->last_uuc_uah),
- stepsize);
- uuc_reported = max(0, uuc_reported);
- }
- } else {
- if (uuc_now > chip->last_uuc_uah) {
- stepsize = max(1, (soc_rbatt - SOC_RBATT_DISCHG));
- /* uuc_reported = uuc_prev + deltauuc / stepsize */
- uuc_reported = div_s64 (stepsize * chip->last_uuc_uah
- + (uuc_now - chip->last_uuc_uah),
- stepsize);
- uuc_reported = max(0, uuc_reported);
- }
- }
- if (uuc_reported == -EINVAL)
- uuc_reported = chip->last_uuc_uah;
+ /* remember the last uuc_uah_iavg */
+ chip->uuc_uah_iavg_prev = uuc_uah_iavg;
- pr_debug("uuc_now = %d uuc_prev = %d stepsize = %d uuc_reported = %d\n",
- uuc_now, chip->last_uuc_uah, (int)stepsize,
- uuc_reported);
-
-out:
/* remember the reported uuc */
chip->last_uuc_uah = uuc_reported;
- /* remember cc_uah */
- chip->last_cc_uah = cc_uah;
-
- /* remember this time */
- chip->t = now;
+ if (firsttime == 1) {
+ /* uuc calculation for the first time is done */
+ firsttime = 0;
+ }
return uuc_reported;
}
@@ -1283,7 +1343,9 @@
int *unusable_charge_uah,
int *remaining_charge_uah,
int *cc_uah,
- int *rbatt)
+ int *rbatt,
+ int *iavg_ua,
+ int *delta_time_us)
{
int soc_rbatt;
@@ -1309,10 +1371,11 @@
soc_rbatt = 0;
*rbatt = get_rbatt(chip, soc_rbatt, batt_temp);
+ calculate_iavg_ua(chip, *cc_uah, iavg_ua, delta_time_us);
+
*unusable_charge_uah = calculate_unusable_charge_uah(chip, *rbatt,
*fcc_uah, *cc_uah, soc_rbatt,
- batt_temp,
- chargecycles);
+ batt_temp, chargecycles, *iavg_ua);
pr_debug("UUC = %uuAh\n", *unusable_charge_uah);
}
@@ -1326,13 +1389,17 @@
int cc_uah;
int real_fcc_uah;
int rbatt;
+ int iavg_ua;
+ int delta_time_us;
calculate_soc_params(chip, raw, batt_temp, chargecycles,
&fcc_uah,
&unusable_charge_uah,
&remaining_charge_uah,
&cc_uah,
- &rbatt);
+ &rbatt,
+ &iavg_ua,
+ &delta_time_us);
real_fcc_uah = remaining_charge_uah - cc_uah;
*ret_fcc_uah = fcc_uah;
@@ -1522,13 +1589,17 @@
int cc_uah;
int rbatt;
int shutdown_adjusted_soc;
+ int iavg_ua;
+ int delta_time_us;
calculate_soc_params(chip, raw, batt_temp, chargecycles,
&fcc_uah,
&unusable_charge_uah,
&remaining_charge_uah,
&cc_uah,
- &rbatt);
+ &rbatt,
+ &iavg_ua,
+ &delta_time_us);
/* calculate remaining usable charge */
remaining_usable_charge_uah = remaining_charge_uah
@@ -1743,6 +1814,8 @@
int remaining_charge_uah;
int cc_uah;
int rbatt;
+ int iavg_ua;
+ int delta_time_us;
if (!the_chip) {
pr_err("called before initialization\n");
@@ -1768,7 +1841,9 @@
&unusable_charge_uah,
&remaining_charge_uah,
&cc_uah,
- &rbatt);
+ &rbatt,
+ &iavg_ua,
+ &delta_time_us);
mutex_unlock(&the_chip->last_ocv_uv_mutex);
return rbatt;
@@ -2704,6 +2779,10 @@
pm8921_bms_enable_irq(chip, PM8921_BMS_GOOD_OCV);
pm8921_bms_enable_irq(chip, PM8921_BMS_OCV_FOR_R);
+ INIT_DELAYED_WORK(&chip->uuc_timer_work, uuc_timer_work);
+ schedule_delayed_work(&chip->uuc_timer_work,
+ msecs_to_jiffies(UUC_TIMER_MS));
+
get_battery_uvolts(chip, &vbatt);
pr_info("OK battery_capacity_at_boot=%d volt = %d ocv = %d\n",
pm8921_bms_get_percent_charge(),
diff --git a/drivers/power/pm8921-charger.c b/drivers/power/pm8921-charger.c
index dc40c8e..f84e3ac 100644
--- a/drivers/power/pm8921-charger.c
+++ b/drivers/power/pm8921-charger.c
@@ -78,6 +78,7 @@
#define CHG_TTRIM 0x35C
#define CHG_COMP_OVR 0x20A
#define IUSB_FINE_RES 0x2B6
+#define OVP_USB_UVD 0x2B7
/* check EOC every 10 seconds */
#define EOC_CHECK_PERIOD_MS 10000
@@ -202,6 +203,7 @@
* @update_time: how frequently the userland needs to be updated
* @max_voltage_mv: the max volts the batt should be charged up to
* @min_voltage_mv: the min battery voltage before turning the FETon
+ * @uvd_voltage_mv: (PM8917 only) the falling UVD threshold voltage
* @cool_temp_dc: the cool temp threshold in deciCelcius
* @warm_temp_dc: the warm temp threshold in deciCelcius
* @resume_voltage_delta: the voltage delta from vdd max at which the
@@ -221,6 +223,7 @@
unsigned int update_time;
unsigned int max_voltage_mv;
unsigned int min_voltage_mv;
+ unsigned int uvd_voltage_mv;
int cool_temp_dc;
int warm_temp_dc;
unsigned int temp_check_period;
@@ -266,6 +269,7 @@
int rconn_mohm;
enum pm8921_chg_led_src_config led_src_config;
bool host_mode;
+ u8 active_path;
};
/* user space parameter to limit usb current */
@@ -587,6 +591,24 @@
return voltage_mv;
}
+#define PM8917_USB_UVD_MIN_MV 3850
+#define PM8917_USB_UVD_MAX_MV 4350
+#define PM8917_USB_UVD_STEP_MV 100
+#define PM8917_USB_UVD_MASK 0x7
+static int pm_chg_uvd_threshold_set(struct pm8921_chg_chip *chip, int thresh_mv)
+{
+ u8 temp;
+
+ if (thresh_mv < PM8917_USB_UVD_MIN_MV
+ || thresh_mv > PM8917_USB_UVD_MAX_MV) {
+ pr_err("bad mV=%d asked to set\n", thresh_mv);
+ return -EINVAL;
+ }
+ temp = (thresh_mv - PM8917_USB_UVD_MIN_MV) / PM8917_USB_UVD_STEP_MV;
+ return pm_chg_masked_write(chip, OVP_USB_UVD,
+ PM8917_USB_UVD_MASK, temp);
+}
+
#define PM8921_CHG_IBATMAX_MIN 325
#define PM8921_CHG_IBATMAX_MAX 2000
#define PM8921_CHG_I_MIN_MA 225
@@ -1740,6 +1762,16 @@
}
EXPORT_SYMBOL(pm8921_disable_input_current_limit);
+int pm8917_set_under_voltage_detection_threshold(int mv)
+{
+ if (!the_chip) {
+ pr_err("called before init\n");
+ return -EINVAL;
+ }
+ return pm_chg_uvd_threshold_set(the_chip, mv);
+}
+EXPORT_SYMBOL(pm8917_set_under_voltage_detection_threshold);
+
int pm8921_set_max_battery_charge_current(int ma)
{
if (!the_chip) {
@@ -1998,6 +2030,11 @@
return;
}
+ schedule_delayed_work(&chip->unplug_check_work,
+ round_jiffies_relative(msecs_to_jiffies
+ (UNPLUG_CHECK_WAIT_PERIOD_MS)));
+ pm8921_chg_enable_irq(chip, CHG_GONE_IRQ);
+
power_supply_set_online(chip->ext_psy, dc_present);
power_supply_set_charge_type(chip->ext_psy,
POWER_SUPPLY_CHARGE_TYPE_FAST);
@@ -2012,51 +2049,51 @@
power_supply_changed(&chip->batt_psy);
}
-static void turn_off_usb_ovp_fet(struct pm8921_chg_chip *chip)
+static void turn_off_ovp_fet(struct pm8921_chg_chip *chip, u16 ovptestreg)
{
u8 temp;
int rc;
- rc = pm8xxx_writeb(chip->dev->parent, USB_OVP_TEST, 0x30);
+ rc = pm8xxx_writeb(chip->dev->parent, ovptestreg, 0x30);
if (rc) {
- pr_err("Failed to write 0x30 to USB_OVP_TEST rc = %d\n", rc);
+ pr_err("Failed to write 0x30 to OVP_TEST rc = %d\n", rc);
return;
}
- rc = pm8xxx_readb(chip->dev->parent, USB_OVP_TEST, &temp);
+ rc = pm8xxx_readb(chip->dev->parent, ovptestreg, &temp);
if (rc) {
- pr_err("Failed to read from USB_OVP_TEST rc = %d\n", rc);
+ pr_err("Failed to read from OVP_TEST rc = %d\n", rc);
return;
}
/* set ovp fet disable bit and the write bit */
temp |= 0x81;
- rc = pm8xxx_writeb(chip->dev->parent, USB_OVP_TEST, temp);
+ rc = pm8xxx_writeb(chip->dev->parent, ovptestreg, temp);
if (rc) {
- pr_err("Failed to write 0x%x USB_OVP_TEST rc=%d\n", temp, rc);
+ pr_err("Failed to write 0x%x OVP_TEST rc=%d\n", temp, rc);
return;
}
}
-static void turn_on_usb_ovp_fet(struct pm8921_chg_chip *chip)
+static void turn_on_ovp_fet(struct pm8921_chg_chip *chip, u16 ovptestreg)
{
u8 temp;
int rc;
- rc = pm8xxx_writeb(chip->dev->parent, USB_OVP_TEST, 0x30);
+ rc = pm8xxx_writeb(chip->dev->parent, ovptestreg, 0x30);
if (rc) {
- pr_err("Failed to write 0x30 to USB_OVP_TEST rc = %d\n", rc);
+ pr_err("Failed to write 0x30 to OVP_TEST rc = %d\n", rc);
return;
}
- rc = pm8xxx_readb(chip->dev->parent, USB_OVP_TEST, &temp);
+ rc = pm8xxx_readb(chip->dev->parent, ovptestreg, &temp);
if (rc) {
- pr_err("Failed to read from USB_OVP_TEST rc = %d\n", rc);
+ pr_err("Failed to read from OVP_TEST rc = %d\n", rc);
return;
}
/* unset ovp fet disable bit and set the write bit */
temp &= 0xFE;
temp |= 0x80;
- rc = pm8xxx_writeb(chip->dev->parent, USB_OVP_TEST, temp);
+ rc = pm8xxx_writeb(chip->dev->parent, ovptestreg, temp);
if (rc) {
- pr_err("Failed to write 0x%x to USB_OVP_TEST rc = %d\n",
+ pr_err("Failed to write 0x%x to OVP_TEST rc = %d\n",
temp, rc);
return;
}
@@ -2065,38 +2102,66 @@
static int param_open_ovp_counter = 10;
module_param(param_open_ovp_counter, int, 0644);
+#define USB_ACTIVE_BIT BIT(5)
+#define DC_ACTIVE_BIT BIT(6)
+static int is_active_chg_plugged_in(struct pm8921_chg_chip *chip,
+ u8 active_chg_mask)
+{
+ if (active_chg_mask & USB_ACTIVE_BIT)
+ return pm_chg_get_rt_status(chip, USBIN_VALID_IRQ);
+ else if (active_chg_mask & DC_ACTIVE_BIT)
+ return pm_chg_get_rt_status(chip, DCIN_VALID_IRQ);
+ else
+ return 0;
+}
+
#define WRITE_BANK_4 0xC0
-#define USB_OVP_DEBOUNCE_TIME 0x06
+#define OVP_DEBOUNCE_TIME 0x06
static void unplug_ovp_fet_open(struct pm8921_chg_chip *chip)
{
- int chg_gone = 0, usb_chg_plugged_in = 0;
+ int chg_gone = 0, active_chg_plugged_in = 0;
int count = 0;
+ u8 active_mask = 0;
+ u16 ovpreg, ovptestreg;
+
+ if (is_usb_chg_plugged_in(chip) &&
+ (chip->active_path & USB_ACTIVE_BIT)) {
+ ovpreg = USB_OVP_CONTROL;
+ ovptestreg = USB_OVP_TEST;
+ active_mask = USB_ACTIVE_BIT;
+ } else if (is_dc_chg_plugged_in(chip) &&
+ (chip->active_path & DC_ACTIVE_BIT)) {
+ ovpreg = DC_OVP_CONTROL;
+ ovptestreg = DC_OVP_TEST;
+ active_mask = DC_ACTIVE_BIT;
+ } else {
+ return;
+ }
while (count++ < param_open_ovp_counter) {
- pm_chg_masked_write(chip, USB_OVP_CONTROL,
- USB_OVP_DEBOUNCE_TIME, 0x0);
+ pm_chg_masked_write(chip, ovpreg, OVP_DEBOUNCE_TIME, 0x0);
usleep(10);
- usb_chg_plugged_in = is_usb_chg_plugged_in(chip);
+ active_chg_plugged_in
+ = is_active_chg_plugged_in(chip, active_mask);
chg_gone = pm_chg_get_rt_status(chip, CHG_GONE_IRQ);
- pr_debug("OVP FET count = %d chg_gone=%d, usb_valid = %d\n",
- count, chg_gone, usb_chg_plugged_in);
+ pr_debug("OVP FET count = %d chg_gone=%d, active_valid = %d\n",
+ count, chg_gone, active_chg_plugged_in);
/* note usb_chg_plugged_in=0 => chg_gone=1 */
- if (chg_gone == 1 && usb_chg_plugged_in == 1) {
+ if (chg_gone == 1 && active_chg_plugged_in == 1) {
pr_debug("since chg_gone = 1 dis ovp_fet for 20msec\n");
- turn_off_usb_ovp_fet(chip);
+ turn_off_ovp_fet(chip, ovptestreg);
msleep(20);
- turn_on_usb_ovp_fet(chip);
+ turn_on_ovp_fet(chip, ovptestreg);
} else {
break;
}
}
- pm_chg_masked_write(chip, USB_OVP_CONTROL,
- USB_OVP_DEBOUNCE_TIME, 0x2);
- pr_debug("Exit count=%d chg_gone=%d, usb_valid=%d\n",
- count, chg_gone, usb_chg_plugged_in);
+ pm_chg_masked_write(chip, ovpreg, OVP_DEBOUNCE_TIME, 0x2);
+ pr_debug("Exit count=%d chg_gone=%d, active_valid=%d\n",
+ count, chg_gone, active_chg_plugged_in);
return;
}
@@ -2120,6 +2185,9 @@
i = find_usb_ma_value(*value);
if (i > 0)
i--;
+ while (!the_chip->iusb_fine_res && i > 0
+ && (usb_ma_table[i].value & PM8917_IUSB_FINE_RES))
+ i--;
*value = usb_ma_table[i].usb_ma;
}
}
@@ -2318,7 +2386,8 @@
static void attempt_reverse_boost_fix(struct pm8921_chg_chip *chip,
int count, int usb_ma)
{
- __pm8921_charger_vbus_draw(500);
+ if (usb_ma)
+ __pm8921_charger_vbus_draw(500);
pr_debug("count = %d iusb=500mA\n", count);
disable_input_voltage_regulation(chip);
pr_debug("count = %d disable_input_regulation\n", count);
@@ -2332,66 +2401,85 @@
pr_debug("count = %d restoring input regulation and usb_ma = %d\n",
count, usb_ma);
enable_input_voltage_regulation(chip);
- __pm8921_charger_vbus_draw(usb_ma);
+ if (usb_ma)
+ __pm8921_charger_vbus_draw(usb_ma);
}
#define VIN_ACTIVE_BIT BIT(0)
-#define UNPLUG_WRKARND_RESTORE_WAIT_PERIOD_US 200
-#define VIN_MIN_INCREASE_MV 100
+#define UNPLUG_WRKARND_RESTORE_WAIT_PERIOD_US 200
+#define VIN_MIN_INCREASE_MV 100
static void unplug_check_worker(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct pm8921_chg_chip *chip = container_of(dwork,
struct pm8921_chg_chip, unplug_check_work);
- u8 reg_loop;
- int ibat, usb_chg_plugged_in, usb_ma;
+ u8 reg_loop, active_path;
+ int rc, ibat, active_chg_plugged_in, usb_ma;
int chg_gone = 0;
reg_loop = 0;
- usb_chg_plugged_in = is_usb_chg_plugged_in(chip);
- if (!usb_chg_plugged_in) {
- pr_debug("Stopping Unplug Check Worker since USB is removed"
- "reg_loop = %d, fsm = %d ibat = %d\n",
- pm_chg_get_regulation_loop(chip),
- pm_chg_get_fsm_state(chip),
- get_prop_batt_current(chip)
- );
+
+ rc = pm8xxx_readb(chip->dev->parent, PBL_ACCESS1, &active_path);
+ if (rc) {
+ pr_err("Failed to read PBL_ACCESS1 rc=%d\n", rc);
+ return;
+ }
+ chip->active_path = active_path;
+
+ active_chg_plugged_in = is_active_chg_plugged_in(chip, active_path);
+ pr_debug("active_path = 0x%x, active_chg_plugged_in = %d\n",
+ active_path, active_chg_plugged_in);
+ if (active_path & USB_ACTIVE_BIT) {
+ pr_debug("USB charger active\n");
+
+ pm_chg_iusbmax_get(chip, &usb_ma);
+ if (usb_ma == 500 && !usb_target_ma) {
+ pr_debug("Stopping Unplug Check Worker USB == 500mA\n");
+ disable_input_voltage_regulation(chip);
+ return;
+ }
+
+ if (usb_ma <= 100) {
+ pr_debug(
+ "Unenumerated or suspended usb_ma = %d skip\n",
+ usb_ma);
+ goto check_again_later;
+ }
+ } else if (active_path & DC_ACTIVE_BIT) {
+ pr_debug("DC charger active\n");
+ } else {
+ /* No charger active */
+ if (!(is_usb_chg_plugged_in(chip)
+ && !(is_dc_chg_plugged_in(chip)))) {
+ pr_debug(
+ "Stop: chg removed reg_loop = %d, fsm = %d ibat = %d\n",
+ pm_chg_get_regulation_loop(chip),
+ pm_chg_get_fsm_state(chip),
+ get_prop_batt_current(chip)
+ );
+ }
return;
}
- pm_chg_iusbmax_get(chip, &usb_ma);
- if (usb_ma == 500 && !usb_target_ma) {
- pr_debug("Stopping Unplug Check Worker since USB == 500mA\n");
- disable_input_voltage_regulation(chip);
- return;
- }
-
- if (usb_ma <= 100) {
- pr_debug(
- "Unenumerated yet or suspended usb_ma = %d skipping\n",
- usb_ma);
- goto check_again_later;
- }
- if (pm8921_chg_is_enabled(chip, CHG_GONE_IRQ))
- pr_debug("chg gone irq is enabled\n");
-
- reg_loop = pm_chg_get_regulation_loop(chip);
- pr_debug("reg_loop=0x%x usb_ma = %d\n", reg_loop, usb_ma);
-
- if ((reg_loop & VIN_ACTIVE_BIT) && (usb_ma > USB_WALL_THRESHOLD_MA)) {
- decrease_usb_ma_value(&usb_ma);
- usb_target_ma = usb_ma;
- /* end AICL here */
- __pm8921_charger_vbus_draw(usb_ma);
- pr_debug("usb_now=%d, usb_target = %d\n",
- usb_ma, usb_target_ma);
+ if (active_path & USB_ACTIVE_BIT) {
+ reg_loop = pm_chg_get_regulation_loop(chip);
+ pr_debug("reg_loop=0x%x usb_ma = %d\n", reg_loop, usb_ma);
+ if ((reg_loop & VIN_ACTIVE_BIT) &&
+ (usb_ma > USB_WALL_THRESHOLD_MA)) {
+ decrease_usb_ma_value(&usb_ma);
+ usb_target_ma = usb_ma;
+ /* end AICL here */
+ __pm8921_charger_vbus_draw(usb_ma);
+ pr_debug("usb_now=%d, usb_target = %d\n",
+ usb_ma, usb_target_ma);
+ }
}
reg_loop = pm_chg_get_regulation_loop(chip);
pr_debug("reg_loop=0x%x usb_ma = %d\n", reg_loop, usb_ma);
+ ibat = get_prop_batt_current(chip);
if (reg_loop & VIN_ACTIVE_BIT) {
- ibat = get_prop_batt_current(chip);
pr_debug("ibat = %d fsm = %d reg_loop = 0x%x\n",
ibat, pm_chg_get_fsm_state(chip), reg_loop);
@@ -2399,25 +2487,36 @@
int count = 0;
while (count++ < param_vin_disable_counter
- && usb_chg_plugged_in == 1) {
- attempt_reverse_boost_fix(chip, count, usb_ma);
- usb_chg_plugged_in
- = is_usb_chg_plugged_in(chip);
+ && active_chg_plugged_in == 1) {
+ if (active_path & USB_ACTIVE_BIT)
+ attempt_reverse_boost_fix(chip,
+ count, usb_ma);
+ else
+ attempt_reverse_boost_fix(chip,
+ count, 0);
+ /* after reverse boost fix check if the active
+ * charger was detected as removed */
+ active_chg_plugged_in
+ = is_active_chg_plugged_in(chip,
+ active_path);
+ pr_debug("active_chg_plugged_in = %d\n",
+ active_chg_plugged_in);
}
}
}
- usb_chg_plugged_in = is_usb_chg_plugged_in(chip);
+ active_chg_plugged_in = is_active_chg_plugged_in(chip, active_path);
+ pr_debug("active_path = 0x%x, active_chg = %d\n",
+ active_path, active_chg_plugged_in);
chg_gone = pm_chg_get_rt_status(chip, CHG_GONE_IRQ);
- if (chg_gone == 1 && usb_chg_plugged_in == 1) {
- /* run the worker directly */
- pr_debug(" ver5 step: chg_gone=%d, usb_valid = %d\n",
- chg_gone, usb_chg_plugged_in);
+ if (chg_gone == 1 && active_chg_plugged_in == 1) {
+ pr_debug("chg_gone=%d, active_chg_plugged_in = %d\n",
+ chg_gone, active_chg_plugged_in);
unplug_ovp_fet_open(chip);
}
- if (!(reg_loop & VIN_ACTIVE_BIT)) {
+ if (!(reg_loop & VIN_ACTIVE_BIT) && (active_path & USB_ACTIVE_BIT)) {
/* only increase iusb_max if vin loop not active */
if (usb_ma < usb_target_ma) {
increase_usb_ma_value(&usb_ma);
@@ -3385,6 +3484,8 @@
#define ENUM_TIMER_STOP_BIT BIT(1)
#define BOOT_DONE_BIT BIT(6)
+#define BOOT_TIMER_EN_BIT BIT(1)
+#define BOOT_DONE_MASK (BOOT_DONE_BIT | BOOT_TIMER_EN_BIT)
#define CHG_BATFET_ON_BIT BIT(3)
#define CHG_VCP_EN BIT(0)
#define CHG_BAT_TEMP_DIS_BIT BIT(2)
@@ -3400,7 +3501,7 @@
detect_battery_removal(chip);
rc = pm_chg_masked_write(chip, SYS_CONFIG_2,
- BOOT_DONE_BIT, BOOT_DONE_BIT);
+ BOOT_DONE_MASK, BOOT_DONE_MASK);
if (rc) {
pr_err("Failed to set BOOT_DONE_BIT rc=%d\n", rc);
return rc;
@@ -3584,8 +3685,17 @@
pm8xxx_writeb(chip->dev->parent, CHG_BUCK_CTRL_TEST3, 0xAC);
/* Enable isub_fine resolution AICL for PM8917 */
- if (pm8xxx_get_version(chip->dev->parent) == PM8XXX_VERSION_8917)
+ if (pm8xxx_get_version(chip->dev->parent) == PM8XXX_VERSION_8917) {
chip->iusb_fine_res = true;
+ if (chip->uvd_voltage_mv)
+ rc = pm_chg_uvd_threshold_set(chip,
+ chip->uvd_voltage_mv);
+ if (rc) {
+ pr_err("Failed to set UVD threshold %drc=%d\n",
+ chip->uvd_voltage_mv, rc);
+ return rc;
+ }
+ }
pm8xxx_writeb(chip->dev->parent, CHG_BUCK_CTRL_TEST3, 0xD9);
@@ -3871,6 +3981,7 @@
chip->update_time = pdata->update_time;
chip->max_voltage_mv = pdata->max_voltage;
chip->min_voltage_mv = pdata->min_voltage;
+ chip->uvd_voltage_mv = pdata->uvd_thresh_voltage;
chip->resume_voltage_delta = pdata->resume_voltage_delta;
chip->term_current = pdata->term_current;
chip->vbat_channel = pdata->charger_cdata.vbat_channel;
diff --git a/drivers/tty/n_smux.c b/drivers/tty/n_smux.c
index c271ca4..cb09de3 100644
--- a/drivers/tty/n_smux.c
+++ b/drivers/tty/n_smux.c
@@ -358,6 +358,7 @@
void *data);
static void smux_uart_power_on_atomic(void);
static int smux_rx_flow_control_updated(struct smux_lch_t *ch);
+static void smux_flush_workqueues(void);
/**
* Convert TTY Error Flags to string for logging purposes.
@@ -513,7 +514,6 @@
}
ch->local_state = SMUX_LCH_LOCAL_CLOSED;
- ch->local_mode = SMUX_LCH_MODE_NORMAL;
ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
ch->remote_mode = SMUX_LCH_MODE_NORMAL;
ch->tx_flow_control = 0;
@@ -526,12 +526,6 @@
spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
}
-
- /* Flush TX/RX workqueues */
- SMUX_DBG("%s: flushing tx wq\n", __func__);
- flush_workqueue(smux_tx_wq);
- SMUX_DBG("%s: flushing rx wq\n", __func__);
- flush_workqueue(smux_rx_wq);
}
int smux_assert_lch_id(uint32_t lcid)
@@ -2232,12 +2226,13 @@
/**
* Power down the UART.
+ *
+ * Must be called with mutex_lha0 locked.
*/
-static void smux_uart_power_off(void)
+static void smux_uart_power_off_atomic(void)
{
struct uart_state *state;
- mutex_lock(&smux.mutex_lha0);
if (!smux.tty || !smux.tty->driver_data) {
pr_err("%s: unable to find UART port for tty %p\n",
__func__, smux.tty);
@@ -2246,6 +2241,15 @@
}
state = smux.tty->driver_data;
msm_hs_request_clock_off(state->uart_port);
+}
+
+/**
+ * Power down the UART.
+ */
+static void smux_uart_power_off(void)
+{
+ mutex_lock(&smux.mutex_lha0);
+ smux_uart_power_off_atomic();
mutex_unlock(&smux.mutex_lha0);
}
@@ -2327,6 +2331,9 @@
struct smux_pkt_t *pkt;
unsigned long flags;
+ if (smux.in_reset)
+ return;
+
spin_lock_irqsave(&smux.rx_lock_lha1, flags);
spin_lock(&smux.tx_lock_lha2);
@@ -2446,6 +2453,12 @@
SMUX_DBG("%s: %p, len=%d, flag=%d\n", __func__, data, len, flag);
used = 0;
do {
+ if (smux.in_reset) {
+ SMUX_DBG("%s: abort RX due to reset\n", __func__);
+ smux.rx_state = SMUX_RX_IDLE;
+ break;
+ }
+
SMUX_DBG("%s: state %d; %d of %d\n",
__func__, smux.rx_state, used, len);
initial_rx_state = smux.rx_state;
@@ -2494,7 +2507,7 @@
/* get next retry packet */
spin_lock_irqsave(&ch->state_lock_lhb1, flags);
- if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
+ if ((ch->local_state != SMUX_LCH_LOCAL_OPENED) || smux.in_reset) {
/* port has been closed - remove all retries */
while (!list_empty(&ch->rx_retry_queue)) {
retry = list_first_entry(&ch->rx_retry_queue,
@@ -2797,6 +2810,26 @@
return updated;
}
+/**
+ * Flush all SMUX workqueues.
+ *
+ * This sets the reset bit to abort any processing loops and then
+ * flushes the workqueues to ensure that no new pending work is
+ * running. Do not call with any locks used by workers held as
+ * this will result in a deadlock.
+ */
+static void smux_flush_workqueues(void)
+{
+ smux.in_reset = 1;
+
+ SMUX_DBG("%s: flushing tx wq\n", __func__);
+ flush_workqueue(smux_tx_wq);
+ SMUX_DBG("%s: flushing rx wq\n", __func__);
+ flush_workqueue(smux_rx_wq);
+ SMUX_DBG("%s: flushing notify wq\n", __func__);
+ flush_workqueue(smux_notify_wq);
+}
+
/**********************************************************************/
/* Kernel API */
/**********************************************************************/
@@ -2922,6 +2955,7 @@
ch->local_state,
SMUX_LCH_LOCAL_OPENING);
+ ch->rx_flow_control_auto = 0;
ch->local_state = SMUX_LCH_LOCAL_OPENING;
ch->priv = priv;
@@ -2948,6 +2982,7 @@
out:
spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+ smux_rx_flow_control_updated(ch);
if (tx_ready)
list_channel(ch);
return ret;
@@ -3341,6 +3376,7 @@
SMUX_DBG("%s: ssr - after shutdown\n", __func__);
/* Cleanup channels */
+ smux_flush_workqueues();
mutex_lock(&smux.mutex_lha0);
smux_lch_purge();
if (smux.tty)
@@ -3357,8 +3393,11 @@
spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
if (power_off_uart)
- smux_uart_power_off();
+ smux_uart_power_off_atomic();
+ smux.tx_activity_flag = 0;
+ smux.rx_activity_flag = 0;
+ smux.rx_state = SMUX_RX_IDLE;
smux.in_reset = 0;
mutex_unlock(&smux.mutex_lha0);
@@ -3440,6 +3479,8 @@
int i;
SMUX_DBG("%s: ldisc unload\n", __func__);
+ smux_flush_workqueues();
+
mutex_lock(&smux.mutex_lha0);
if (smux.ld_open_count <= 0) {
pr_err("%s: invalid ld count %d\n", __func__,
@@ -3447,7 +3488,6 @@
mutex_unlock(&smux.mutex_lha0);
return;
}
- smux.in_reset = 1;
--smux.ld_open_count;
/* Cleanup channels */
@@ -3466,11 +3506,15 @@
power_up_uart = 1;
smux.power_state = SMUX_PWR_OFF;
smux.powerdown_enabled = 0;
+ smux.tx_activity_flag = 0;
+ smux.rx_activity_flag = 0;
spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
if (power_up_uart)
smux_uart_power_on_atomic();
+ smux.rx_state = SMUX_RX_IDLE;
+
/* Disconnect from TTY */
smux.tty = NULL;
mutex_unlock(&smux.mutex_lha0);
diff --git a/drivers/tty/smux_test.c b/drivers/tty/smux_test.c
index e488a63..4c255a4 100644
--- a/drivers/tty/smux_test.c
+++ b/drivers/tty/smux_test.c
@@ -21,6 +21,7 @@
#include <linux/completion.h>
#include <linux/termios.h>
#include <linux/smux.h>
+#include <mach/subsystem_restart.h>
#include "smux_private.h"
#define DEBUG_BUFMAX 4096
@@ -207,6 +208,9 @@
struct list_head write_events;
};
+static int get_rx_buffer_mock(void *priv, void **pkt_priv,
+ void **buffer, int size);
+
/**
* Initialize mock callback data. Only call once.
*
@@ -673,6 +677,198 @@
}
/**
+ * Verify Basic Subsystem Restart Support
+ *
+ * Run a basic loopback test followed by a subsystem restart and then another
+ * loopback test.
+ */
+static int smux_ut_remote_ssr_basic(char *buf, int max)
+{
+ const struct test_vector test_data[] = {
+ {"hello\0world\n", sizeof("hello\0world\n")},
+ {0, 0},
+ };
+ int i = 0;
+ int failed = 0;
+ int ret;
+
+ i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
+ while (!failed) {
+ /* enable remote mode */
+ ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
+ SMUX_CH_OPTION_REMOTE_LOOPBACK, 0);
+ UT_ASSERT_INT(ret, ==, 0);
+
+ i += smux_ut_basic_core(buf + i, max - i, test_data, __func__);
+ subsystem_restart("external_modem");
+ msleep(5000);
+ i += smux_ut_basic_core(buf + i, max - i, test_data, __func__);
+ break;
+ }
+
+ if (failed) {
+ pr_err("%s: Failed\n", __func__);
+ i += scnprintf(buf + i, max - i, "\tFailed\n");
+ }
+ return i;
+}
+
+/**
+ * Verify Subsystem Restart Support During Port Open
+ */
+static int smux_ut_remote_ssr_open(char *buf, int max)
+{
+ static struct smux_mock_callback cb_data;
+ static int cb_initialized;
+ int ret;
+ int i = 0;
+ int failed = 0;
+
+ i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
+
+ if (!cb_initialized)
+ mock_cb_data_init(&cb_data);
+
+ mock_cb_data_reset(&cb_data);
+ while (!failed) {
+ ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
+ SMUX_CH_OPTION_REMOTE_LOOPBACK, 0);
+ UT_ASSERT_INT(ret, ==, 0);
+
+ /* open port */
+ ret = msm_smux_open(SMUX_TEST_LCID, &cb_data, smux_mock_cb,
+ get_rx_buffer);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ), >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_connected, ==, 1);
+ mock_cb_data_reset(&cb_data);
+
+ /* restart modem */
+ subsystem_restart("external_modem");
+
+ /* verify SSR events */
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, 5*HZ),
+ >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
+ UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 1);
+ mock_cb_data_reset(&cb_data);
+
+ /* close port */
+ ret = msm_smux_close(SMUX_TEST_LCID);
+ UT_ASSERT_INT(ret, ==, 0);
+ break;
+ }
+
+ if (!failed) {
+ i += scnprintf(buf + i, max - i, "\tOK\n");
+ } else {
+ pr_err("%s: Failed\n", __func__);
+ i += scnprintf(buf + i, max - i, "\tFailed\n");
+ i += mock_cb_data_print(&cb_data, buf + i, max - i);
+ msm_smux_close(SMUX_TEST_LCID);
+ }
+
+ mock_cb_data_reset(&cb_data);
+
+ return i;
+}
+
+/**
+ * Verify get_rx_buffer callback retry doesn't livelock SSR
+ * until all RX Bufffer Retries have timed out.
+ *
+ * @buf Buffer for status message
+ * @max Size of buffer
+ *
+ * @returns Number of bytes written to @buf
+ */
+static int smux_ut_remote_ssr_rx_buff_retry(char *buf, int max)
+{
+ static struct smux_mock_callback cb_data;
+ static int cb_initialized;
+ int i = 0;
+ int failed = 0;
+ int ret;
+
+ i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
+ pr_err("%s", buf);
+
+ if (!cb_initialized)
+ mock_cb_data_init(&cb_data);
+
+ mock_cb_data_reset(&cb_data);
+ while (!failed) {
+ /* open port for loopback */
+ ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
+ SMUX_CH_OPTION_REMOTE_LOOPBACK,
+ 0);
+ UT_ASSERT_INT(ret, ==, 0);
+
+ ret = msm_smux_open(SMUX_TEST_LCID, &cb_data,
+ smux_mock_cb, get_rx_buffer_mock);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ), >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_connected, ==, 1);
+ mock_cb_data_reset(&cb_data);
+
+ /* Queue up an RX buffer retry */
+ get_rx_buffer_mock_fail = 1;
+ ret = msm_smux_write(SMUX_TEST_LCID, (void *)1,
+ test_array, sizeof(test_array));
+ UT_ASSERT_INT(ret, ==, 0);
+ while (!cb_data.get_rx_buff_retry_count) {
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ),
+ >, 0);
+ INIT_COMPLETION(cb_data.cb_completion);
+ }
+ if (failed)
+ break;
+ mock_cb_data_reset(&cb_data);
+
+ /* trigger SSR */
+ subsystem_restart("external_modem");
+
+ /* verify SSR completed */
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, 5*HZ),
+ >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
+ UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 1);
+ mock_cb_data_reset(&cb_data);
+
+ /* close port */
+ ret = msm_smux_close(SMUX_TEST_LCID);
+ UT_ASSERT_INT(ret, ==, 0);
+ break;
+ }
+
+ if (!failed) {
+ i += scnprintf(buf + i, max - i, "\tOK\n");
+ } else {
+ pr_err("%s: Failed\n", __func__);
+ i += scnprintf(buf + i, max - i, "\tFailed\n");
+ i += mock_cb_data_print(&cb_data, buf + i, max - i);
+ msm_smux_close(SMUX_TEST_LCID);
+ }
+ mock_cb_data_reset(&cb_data);
+ return i;
+}
+/**
* Fill test pattern into provided buffer including an optional
* redzone 16 bytes before and 16 bytes after the buffer.
*
@@ -1793,6 +1989,12 @@
smux_ut_local_get_rx_buff_retry);
debug_create("ut_local_get_rx_buff_retry_auto", 0444, dent,
smux_ut_local_get_rx_buff_retry_auto);
+ debug_create("ut_remote_ssr_basic", 0444, dent,
+ smux_ut_remote_ssr_basic);
+ debug_create("ut_remote_ssr_open", 0444, dent,
+ smux_ut_remote_ssr_open);
+ debug_create("ut_remote_ssr_rx_buff_retry", 0444, dent,
+ smux_ut_remote_ssr_rx_buff_retry);
return 0;
}
diff --git a/drivers/usb/otg/msm_otg.c b/drivers/usb/otg/msm_otg.c
index bb6bb2c..c1e1e13 100644
--- a/drivers/usb/otg/msm_otg.c
+++ b/drivers/usb/otg/msm_otg.c
@@ -2766,28 +2766,46 @@
queue_work(system_nrt_wq, &motg->sm_work);
}
-static irqreturn_t msm_pmic_id_irq(int irq, void *data)
+static void msm_pmic_id_status_w(struct work_struct *w)
{
- struct msm_otg *motg = data;
+ struct msm_otg *motg = container_of(w, struct msm_otg,
+ pmic_id_status_work.work);
+ int work = 0;
+ unsigned long flags;
- if (aca_id_turned_on)
- return IRQ_HANDLED;
-
+ local_irq_save(flags);
if (irq_read_line(motg->pdata->pmic_id_irq)) {
- pr_debug("PMIC: ID set\n");
- set_bit(ID, &motg->inputs);
+ if (!test_and_set_bit(ID, &motg->inputs)) {
+ pr_debug("PMIC: ID set\n");
+ work = 1;
+ }
} else {
- pr_debug("PMIC: ID clear\n");
- clear_bit(ID, &motg->inputs);
- set_bit(A_BUS_REQ, &motg->inputs);
+ if (test_and_clear_bit(ID, &motg->inputs)) {
+ pr_debug("PMIC: ID clear\n");
+ set_bit(A_BUS_REQ, &motg->inputs);
+ work = 1;
+ }
}
- if (motg->phy.state != OTG_STATE_UNDEFINED) {
+ if (work && (motg->phy.state != OTG_STATE_UNDEFINED)) {
if (atomic_read(&motg->pm_suspended))
motg->sm_work_pending = true;
else
queue_work(system_nrt_wq, &motg->sm_work);
}
+ local_irq_restore(flags);
+
+}
+
+#define MSM_PMIC_ID_STATUS_DELAY 5 /* 5msec */
+static irqreturn_t msm_pmic_id_irq(int irq, void *data)
+{
+ struct msm_otg *motg = data;
+
+ if (!aca_id_turned_on)
+ /*schedule delayed work for 5msec for ID line state to settle*/
+ queue_delayed_work(system_nrt_wq, &motg->pmic_id_status_work,
+ msecs_to_jiffies(MSM_PMIC_ID_STATUS_DELAY));
return IRQ_HANDLED;
}
@@ -3402,6 +3420,7 @@
msm_otg_init_timer(motg);
INIT_WORK(&motg->sm_work, msm_otg_sm_work);
INIT_DELAYED_WORK(&motg->chg_work, msm_chg_detect_work);
+ INIT_DELAYED_WORK(&motg->pmic_id_status_work, msm_pmic_id_status_w);
setup_timer(&motg->id_timer, msm_otg_id_timer_func,
(unsigned long) motg);
ret = request_irq(motg->irq, msm_otg_irq, IRQF_SHARED,
@@ -3546,6 +3565,7 @@
pm8921_charger_unregister_vbus_sn(0);
msm_otg_debugfs_cleanup();
cancel_delayed_work_sync(&motg->chg_work);
+ cancel_delayed_work_sync(&motg->pmic_id_status_work);
cancel_work_sync(&motg->sm_work);
pm_runtime_resume(&pdev->dev);
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.h b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.h
index 3c082e4..0c6aa86 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.h
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.h
@@ -310,6 +310,7 @@
u32 header_in_start;
u32 min_dpb_num;
u32 y_cb_cr_size;
+ u32 yuv_size;
u32 dynamic_prop_change;
u32 dynmic_prop_change_req;
u32 flush_pending;
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_core.h b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_core.h
index 8a33512..db8a777 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_core.h
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_core.h
@@ -64,6 +64,7 @@
#define DDL_VIDC_1080P_48MHZ_TIMEOUT_VALUE (0xCB8)
#define DDL_VIDC_1080P_133MHZ_TIMEOUT_VALUE (0x2355)
#define DDL_VIDC_1080P_200MHZ_TIMEOUT_VALUE (0x3500)
+#define DDL_VIDC_1080P_MAX_TIMEOUT_MULTIPLIER (4)
#define DDL_CONTEXT_MEMORY (1024 * 15 * (VCD_MAX_NO_CLIENT + 1))
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c
index d7ebd54..949e5c0 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c
@@ -1087,8 +1087,39 @@
void ddl_set_vidc_timeout(struct ddl_client_context *ddl)
{
u32 vidc_time_out = 0;
+ s32 multiplier = 1;
+ u32 temp = DDL_VIDC_1080P_200MHZ_TIMEOUT_VALUE;
+ struct ddl_decoder_data *decoder = &ddl->codec_data.decoder;
+ struct vcd_frame_data *ip_bitstream = &(ddl->input_frame.vcd_frm);
+
if (ddl->codec_data.decoder.idr_only_decoding)
vidc_time_out = 2 * DDL_VIDC_1080P_200MHZ_TIMEOUT_VALUE;
+ else {
+ vidc_time_out = DDL_VIDC_1080P_200MHZ_TIMEOUT_VALUE;
+ multiplier = decoder->yuv_size - (ip_bitstream->data_len +
+ (ip_bitstream->data_len / 2));
+ if (multiplier <= 0) {
+ multiplier = decoder->yuv_size - ip_bitstream->data_len;
+ if (multiplier <= 0) {
+ if (ip_bitstream->data_len)
+ multiplier =
+ DDL_VIDC_1080P_MAX_TIMEOUT_MULTIPLIER;
+ }
+ }
+ if (multiplier == DDL_VIDC_1080P_MAX_TIMEOUT_MULTIPLIER)
+ vidc_time_out = vidc_time_out *
+ DDL_VIDC_1080P_MAX_TIMEOUT_MULTIPLIER;
+ else if (multiplier > 1) {
+ temp = (decoder->yuv_size * 1000) / multiplier;
+ temp = (temp * vidc_time_out) / 1000;
+ if (temp > (u32)(vidc_time_out *
+ DDL_VIDC_1080P_MAX_TIMEOUT_MULTIPLIER))
+ vidc_time_out = vidc_time_out *
+ DDL_VIDC_1080P_MAX_TIMEOUT_MULTIPLIER;
+ else
+ vidc_time_out = temp;
+ }
+ }
DDL_MSG_HIGH("%s Video core time out value = 0x%x",
__func__, vidc_time_out);
vidc_sm_set_video_core_timeout_value(
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c
index 58d1f23..6571245 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c
@@ -189,6 +189,9 @@
&decoder->frame_size.height);
progressive = seq_hdr_info->dec_progressive;
}
+ decoder->yuv_size = decoder->frame_size.width *
+ decoder->frame_size.height;
+ decoder->yuv_size += decoder->yuv_size / 2;
decoder->min_dpb_num = seq_hdr_info->min_num_dpb;
vidc_sm_get_min_yc_dpb_sizes(
&ddl->shared_mem[ddl->command_channel],
@@ -1266,6 +1269,9 @@
decoder->frame_size =
output_vcd_frm->dec_op_prop.frm_size;
decoder->client_frame_size = decoder->frame_size;
+ decoder->yuv_size = decoder->frame_size.width *
+ decoder->frame_size.height;
+ decoder->yuv_size += decoder->yuv_size / 2;
decoder->y_cb_cr_size =
ddl_get_yuv_buffer_size(&decoder->frame_size,
&decoder->buf_format,
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_vidc.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_vidc.c
index 14e1331..a6001eb 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_vidc.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_vidc.c
@@ -1077,6 +1077,7 @@
decoder->flush_pending = false;
} else
dec_param.dpb_flush = false;
+ ddl_set_vidc_timeout(ddl);
vidc_sm_set_frame_tag(&ddl->shared_mem[ddl->command_channel],
bit_stream->ip_frm_tag);
if (ddl_context->pix_cache_enable) {
diff --git a/include/linux/mfd/pm8xxx/pm8921-charger.h b/include/linux/mfd/pm8xxx/pm8921-charger.h
index 2186903..fca8700 100644
--- a/include/linux/mfd/pm8xxx/pm8921-charger.h
+++ b/include/linux/mfd/pm8xxx/pm8921-charger.h
@@ -67,6 +67,7 @@
* @min_voltage: the voltage (mV) where charging method switches from
* trickle to fast. This is also the minimum voltage the
* system operates at
+ * @uvd_thresh_voltage: the USB falling UVD threshold (mV) (PM8917 only)
* @resume_voltage_delta: the (mV) drop to wait for before resume charging
* after the battery has been fully charged
* @term_current: the charger current (mA) at which EOC happens
@@ -121,6 +122,7 @@
unsigned int update_time;
unsigned int max_voltage;
unsigned int min_voltage;
+ unsigned int uvd_thresh_voltage;
unsigned int resume_voltage_delta;
unsigned int term_current;
int cool_temp;
@@ -301,6 +303,10 @@
{
return -ENXIO;
}
+static inline int pm8917_set_under_voltage_detection_threshold(int mv)
+{
+ return -ENXIO;
+}
static inline int pm8921_disable_input_current_limit(bool disable)
{
return -ENXIO;
diff --git a/include/linux/mfd/wcd9xxx/pdata.h b/include/linux/mfd/wcd9xxx/pdata.h
index ba71293..1b7706b 100644
--- a/include/linux/mfd/wcd9xxx/pdata.h
+++ b/include/linux/mfd/wcd9xxx/pdata.h
@@ -16,6 +16,9 @@
#include <linux/slimbus/slimbus.h>
+#define MICBIAS_EXT_BYP_CAP 0x00
+#define MICBIAS_NO_EXT_BYP_CAP 0x01
+
#define SITAR_LDOH_1P95_V 0x0
#define SITAR_LDOH_2P35_V 0x1
#define SITAR_LDOH_2P75_V 0x2
@@ -99,10 +102,19 @@
u32 cfilt1_mv; /* in mv */
u32 cfilt2_mv; /* in mv */
u32 cfilt3_mv; /* in mv */
+ /* Different WCD9xxx series codecs may not
+ * have 4 mic biases. If a codec has fewer
+ * mic biases, some of these properties will
+ * not be used.
+ */
u8 bias1_cfilt_sel;
u8 bias2_cfilt_sel;
u8 bias3_cfilt_sel;
u8 bias4_cfilt_sel;
+ u8 bias1_cap_mode;
+ u8 bias2_cap_mode;
+ u8 bias3_cap_mode;
+ u8 bias4_cap_mode;
};
struct wcd9xxx_ocp_setting {
diff --git a/include/linux/mfd/wcd9xxx/wcd9304_registers.h b/include/linux/mfd/wcd9xxx/wcd9304_registers.h
index 53ae67b..f7c483c 100644
--- a/include/linux/mfd/wcd9xxx/wcd9304_registers.h
+++ b/include/linux/mfd/wcd9xxx/wcd9304_registers.h
@@ -590,6 +590,36 @@
#define SITAR_A_CDC_IIR1_COEF_B4_CTL__POR (0x00000000)
#define SITAR_A_CDC_IIR1_COEF_B5_CTL (0x34E)
#define SITAR_A_CDC_IIR1_COEF_B5_CTL__POR (0x00000000)
+#define SITAR_A_CDC_IIR2_GAIN_B1_CTL (0x350)
+#define SITAR_A_CDC_IIR2_GAIN_B1_CTL__POR (0x00000000)
+#define SITAR_A_CDC_IIR2_GAIN_B2_CTL (0x351)
+#define SITAR_A_CDC_IIR2_GAIN_B2_CTL__POR (0x00000000)
+#define SITAR_A_CDC_IIR2_GAIN_B3_CTL (0x352)
+#define SITAR_A_CDC_IIR2_GAIN_B3_CTL__POR (0x00000000)
+#define SITAR_A_CDC_IIR2_GAIN_B4_CTL (0x353)
+#define SITAR_A_CDC_IIR2_GAIN_B4_CTL__POR (0x00000000)
+#define SITAR_A_CDC_IIR2_GAIN_B5_CTL (0x354)
+#define SITAR_A_CDC_IIR2_GAIN_B5_CTL__POR (0x00000000)
+#define SITAR_A_CDC_IIR2_GAIN_B6_CTL (0x355)
+#define SITAR_A_CDC_IIR2_GAIN_B6_CTL__POR (0x00000000)
+#define SITAR_A_CDC_IIR2_GAIN_B7_CTL (0x356)
+#define SITAR_A_CDC_IIR2_GAIN_B7_CTL__POR (0x00000000)
+#define SITAR_A_CDC_IIR2_GAIN_B8_CTL (0x357)
+#define SITAR_A_CDC_IIR2_GAIN_B8_CTL__POR (0x00000000)
+#define SITAR_A_CDC_IIR2_CTL (0x358)
+#define SITAR_A_CDC_IIR2_CTL__POR (0x00000040)
+#define SITAR_A_CDC_IIR2_GAIN_TIMER_CTL (0x359)
+#define SITAR_A_CDC_IIR2_GAIN_TIMER_CTL__POR (0x00000000)
+#define SITAR_A_CDC_IIR2_COEF_B1_CTL (0x35A)
+#define SITAR_A_CDC_IIR2_COEF_B1_CTL__POR (0x00000000)
+#define SITAR_A_CDC_IIR2_COEF_B2_CTL (0x35B)
+#define SITAR_A_CDC_IIR2_COEF_B2_CTL__POR (0x00000000)
+#define SITAR_A_CDC_IIR2_COEF_B3_CTL (0x35C)
+#define SITAR_A_CDC_IIR2_COEF_B3_CTL__POR (0x00000000)
+#define SITAR_A_CDC_IIR2_COEF_B4_CTL (0x35D)
+#define SITAR_A_CDC_IIR2_COEF_B4_CTL__POR (0x00000000)
+#define SITAR_A_CDC_IIR2_COEF_B5_CTL (0x35E)
+#define SITAR_A_CDC_IIR2_COEF_B5_CTL__POR (0x00000000)
#define SITAR_A_CDC_TOP_GAIN_UPDATE (0x360)
#define SITAR_A_CDC_TOP_GAIN_UPDATE__POR (0x00000000)
#define SITAR_A_CDC_TOP_RDAC_DOUT_CTL (0x361)
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
index 68c1ffc..c0a23a3 100644
--- a/include/linux/usb/msm_hsusb.h
+++ b/include/linux/usb/msm_hsusb.h
@@ -322,6 +322,7 @@
int async_int;
unsigned cur_power;
struct delayed_work chg_work;
+ struct delayed_work pmic_id_status_work;
enum usb_chg_state chg_state;
enum usb_chg_type chg_type;
u8 dcd_retries;
diff --git a/include/sound/q6afe.h b/include/sound/q6afe.h
index f93af1f..a7264e8 100644
--- a/include/sound/q6afe.h
+++ b/include/sound/q6afe.h
@@ -95,8 +95,7 @@
int afe_unregister_get_events(u16 port_id);
int afe_rt_proxy_port_write(u32 buf_addr_p, int bytes);
int afe_rt_proxy_port_read(u32 buf_addr_p, int bytes);
-int afe_port_start_nowait(u16 port_id, union afe_port_config *afe_config,
- u32 rate);
+int afe_port_start(u16 port_id, union afe_port_config *afe_config, u32 rate);
int afe_port_stop_nowait(int port_id);
int afe_apply_gain(u16 port_id, u16 gain);
int afe_q6_interface_prepare(void);
diff --git a/include/sound/q6asm.h b/include/sound/q6asm.h
index 84e3150..d38dbd5 100644
--- a/include/sound/q6asm.h
+++ b/include/sound/q6asm.h
@@ -158,6 +158,7 @@
void *priv;
uint32_t io_mode;
uint64_t time_stamp;
+ atomic_t cmd_response;
};
void q6asm_audio_client_free(struct audio_client *ac);
diff --git a/sound/soc/codecs/wcd9304-tables.c b/sound/soc/codecs/wcd9304-tables.c
index 252cb0e..f0d76e8 100644
--- a/sound/soc/codecs/wcd9304-tables.c
+++ b/sound/soc/codecs/wcd9304-tables.c
@@ -288,6 +288,22 @@
[SITAR_A_CDC_IIR1_COEF_B3_CTL] = SITAR_A_CDC_IIR1_COEF_B3_CTL__POR,
[SITAR_A_CDC_IIR1_COEF_B4_CTL] = SITAR_A_CDC_IIR1_COEF_B4_CTL__POR,
[SITAR_A_CDC_IIR1_COEF_B5_CTL] = SITAR_A_CDC_IIR1_COEF_B5_CTL__POR,
+ [SITAR_A_CDC_IIR2_GAIN_B1_CTL] = SITAR_A_CDC_IIR2_GAIN_B1_CTL__POR,
+ [SITAR_A_CDC_IIR2_GAIN_B2_CTL] = SITAR_A_CDC_IIR2_GAIN_B2_CTL__POR,
+ [SITAR_A_CDC_IIR2_GAIN_B3_CTL] = SITAR_A_CDC_IIR2_GAIN_B3_CTL__POR,
+ [SITAR_A_CDC_IIR2_GAIN_B4_CTL] = SITAR_A_CDC_IIR2_GAIN_B4_CTL__POR,
+ [SITAR_A_CDC_IIR2_GAIN_B5_CTL] = SITAR_A_CDC_IIR2_GAIN_B5_CTL__POR,
+ [SITAR_A_CDC_IIR2_GAIN_B6_CTL] = SITAR_A_CDC_IIR2_GAIN_B6_CTL__POR,
+ [SITAR_A_CDC_IIR2_GAIN_B7_CTL] = SITAR_A_CDC_IIR2_GAIN_B7_CTL__POR,
+ [SITAR_A_CDC_IIR2_GAIN_B8_CTL] = SITAR_A_CDC_IIR2_GAIN_B8_CTL__POR,
+ [SITAR_A_CDC_IIR2_CTL] = SITAR_A_CDC_IIR2_CTL__POR,
+ [SITAR_A_CDC_IIR2_GAIN_TIMER_CTL] =
+ SITAR_A_CDC_IIR2_GAIN_TIMER_CTL__POR,
+ [SITAR_A_CDC_IIR2_COEF_B1_CTL] = SITAR_A_CDC_IIR2_COEF_B1_CTL__POR,
+ [SITAR_A_CDC_IIR2_COEF_B2_CTL] = SITAR_A_CDC_IIR2_COEF_B2_CTL__POR,
+ [SITAR_A_CDC_IIR2_COEF_B3_CTL] = SITAR_A_CDC_IIR2_COEF_B3_CTL__POR,
+ [SITAR_A_CDC_IIR2_COEF_B4_CTL] = SITAR_A_CDC_IIR2_COEF_B4_CTL__POR,
+ [SITAR_A_CDC_IIR2_COEF_B5_CTL] = SITAR_A_CDC_IIR2_COEF_B5_CTL__POR,
[SITAR_A_CDC_TOP_GAIN_UPDATE] = SITAR_A_CDC_TOP_GAIN_UPDATE__POR,
[SITAR_A_CDC_TOP_RDAC_DOUT_CTL] = SITAR_A_CDC_TOP_RDAC_DOUT_CTL__POR,
[SITAR_A_CDC_DEBUG_B1_CTL] = SITAR_A_CDC_DEBUG_B1_CTL__POR,
diff --git a/sound/soc/codecs/wcd9304.c b/sound/soc/codecs/wcd9304.c
index d9a8ae0..70d9fa9 100644
--- a/sound/soc/codecs/wcd9304.c
+++ b/sound/soc/codecs/wcd9304.c
@@ -378,9 +378,9 @@
int coeff_idx)
{
/* Address does not automatically update if reading */
- snd_soc_update_bits(codec,
+ snd_soc_write(codec,
(SITAR_A_CDC_IIR1_COEF_B1_CTL + 16 * iir_idx),
- 0x1F, band_idx * BAND_MAX + coeff_idx);
+ (band_idx * BAND_MAX + coeff_idx) & 0x1F);
/* Mask bits top 2 bits since they are reserved */
return ((snd_soc_read(codec,
@@ -439,27 +439,27 @@
{
/* Mask top 3 bits, 6-8 are reserved */
/* Update address manually each time */
- snd_soc_update_bits(codec,
+ snd_soc_write(codec,
(SITAR_A_CDC_IIR1_COEF_B1_CTL + 16 * iir_idx),
- 0x1F, band_idx * BAND_MAX + coeff_idx);
+ (band_idx * BAND_MAX + coeff_idx) & 0x1F);
/* Mask top 2 bits, 7-8 are reserved */
- snd_soc_update_bits(codec,
+ snd_soc_write(codec,
(SITAR_A_CDC_IIR1_COEF_B2_CTL + 16 * iir_idx),
- 0x3F, (value >> 24) & 0x3F);
+ (value >> 24) & 0x3F);
/* Isolate 8bits at a time */
- snd_soc_update_bits(codec,
+ snd_soc_write(codec,
(SITAR_A_CDC_IIR1_COEF_B3_CTL + 16 * iir_idx),
- 0xFF, (value >> 16) & 0xFF);
+ (value >> 16) & 0xFF);
- snd_soc_update_bits(codec,
+ snd_soc_write(codec,
(SITAR_A_CDC_IIR1_COEF_B4_CTL + 16 * iir_idx),
- 0xFF, (value >> 8) & 0xFF);
+ (value >> 8) & 0xFF);
- snd_soc_update_bits(codec,
+ snd_soc_write(codec,
(SITAR_A_CDC_IIR1_COEF_B5_CTL + 16 * iir_idx),
- 0xFF, value & 0xFF);
+ value & 0xFF);
}
static int sitar_put_iir_band_audio_mixer(
@@ -562,9 +562,6 @@
SOC_SINGLE_TLV("ADC2 Volume", SITAR_A_TX_1_2_EN, 1, 3, 0, analog_gain),
SOC_SINGLE_TLV("ADC3 Volume", SITAR_A_TX_3_EN, 5, 3, 0, analog_gain),
- SOC_SINGLE("MICBIAS1 CAPLESS Switch", SITAR_A_MICB_1_CTL, 4, 1, 1),
- SOC_SINGLE("MICBIAS2 CAPLESS Switch", SITAR_A_MICB_2_CTL, 4, 1, 1),
-
SOC_SINGLE_EXT("ANC Slot", SND_SOC_NOPM, 0, 0, 100, sitar_get_anc_slot,
sitar_put_anc_slot),
@@ -689,7 +686,7 @@
"ZERO", "EAR_HPH_L", "EAR_LINE_1",
};
-static const char *iir1_inp1_text[] = {
+static const char const *iir_inp1_text[] = {
"ZERO", "DEC1", "DEC2", "DEC3", "DEC4", "ZERO", "ZERO", "ZERO",
"ZERO", "ZERO", "ZERO", "RX1", "RX2", "RX3", "RX4", "RX5",
};
@@ -761,7 +758,10 @@
SOC_ENUM_SINGLE(SITAR_A_CDC_CONN_ANC_B2_CTL, 0, 3, anc1_fb_mux_text);
static const struct soc_enum iir1_inp1_mux_enum =
- SOC_ENUM_SINGLE(SITAR_A_CDC_CONN_EQ1_B1_CTL, 0, 16, iir1_inp1_text);
+ SOC_ENUM_SINGLE(SITAR_A_CDC_CONN_EQ1_B1_CTL, 0, 16, iir_inp1_text);
+
+static const struct soc_enum iir2_inp1_mux_enum =
+ SOC_ENUM_SINGLE(SITAR_A_CDC_CONN_EQ2_B1_CTL, 0, 16, iir_inp1_text);
static const struct snd_kcontrol_new rx_mix1_inp1_mux =
SOC_DAPM_ENUM("RX1 MIX1 INP1 Mux", rx_mix1_inp1_chain_enum);
@@ -823,6 +823,9 @@
static const struct snd_kcontrol_new iir1_inp1_mux =
SOC_DAPM_ENUM("IIR1 INP1 Mux", iir1_inp1_mux_enum);
+static const struct snd_kcontrol_new iir2_inp1_mux =
+ SOC_DAPM_ENUM("IIR2 INP1 Mux", iir2_inp1_mux_enum);
+
static const struct snd_kcontrol_new anc1_mux =
SOC_DAPM_ENUM("ANC1 MUX Mux", anc1_mux_enum);
@@ -1960,6 +1963,8 @@
/* Sidetone */
SND_SOC_DAPM_MUX("IIR1 INP1 MUX", SND_SOC_NOPM, 0, 0, &iir1_inp1_mux),
SND_SOC_DAPM_PGA("IIR1", SITAR_A_CDC_CLK_SD_CTL, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MUX("IIR2 INP1 MUX", SND_SOC_NOPM, 0, 0, &iir2_inp1_mux),
+ SND_SOC_DAPM_PGA("IIR2", SITAR_A_CDC_CLK_SD_CTL, 1, 0, NULL, 0),
};
@@ -2058,31 +2063,37 @@
{"RX1 MIX1 INP1", "RX3", "SLIM RX3"},
{"RX1 MIX1 INP1", "RX4", "SLIM RX4"},
{"RX1 MIX1 INP1", "IIR1", "IIR1"},
+ {"RX1 MIX1 INP1", "IIR2", "IIR2"},
{"RX1 MIX1 INP2", "RX1", "SLIM RX1"},
{"RX1 MIX1 INP2", "RX2", "SLIM RX2"},
{"RX1 MIX1 INP2", "RX3", "SLIM RX3"},
{"RX1 MIX1 INP2", "RX4", "SLIM RX4"},
{"RX1 MIX1 INP2", "IIR1", "IIR1"},
+ {"RX1 MIX1 INP2", "IIR2", "IIR2"},
{"RX2 MIX1 INP1", "RX1", "SLIM RX1"},
{"RX2 MIX1 INP1", "RX2", "SLIM RX2"},
{"RX2 MIX1 INP1", "RX3", "SLIM RX3"},
{"RX2 MIX1 INP1", "RX4", "SLIM RX4"},
{"RX2 MIX1 INP1", "IIR1", "IIR1"},
+ {"RX2 MIX1 INP1", "IIR2", "IIR2"},
{"RX2 MIX1 INP2", "RX1", "SLIM RX1"},
{"RX2 MIX1 INP2", "RX2", "SLIM RX2"},
{"RX2 MIX1 INP2", "RX3", "SLIM RX3"},
{"RX2 MIX1 INP2", "RX4", "SLIM RX4"},
{"RX2 MIX1 INP2", "IIR1", "IIR1"},
+ {"RX2 MIX1 INP2", "IIR2", "IIR2"},
{"RX3 MIX1 INP1", "RX1", "SLIM RX1"},
{"RX3 MIX1 INP1", "RX2", "SLIM RX2"},
{"RX3 MIX1 INP1", "RX3", "SLIM RX3"},
{"RX3 MIX1 INP1", "RX4", "SLIM RX4"},
{"RX3 MIX1 INP1", "IIR1", "IIR1"},
+ {"RX3 MIX1 INP1", "IIR2", "IIR2"},
{"RX3 MIX1 INP2", "RX1", "SLIM RX1"},
{"RX3 MIX1 INP2", "RX2", "SLIM RX2"},
{"RX3 MIX1 INP2", "RX3", "SLIM RX3"},
{"RX3 MIX1 INP2", "RX4", "SLIM RX4"},
{"RX3 MIX1 INP2", "IIR1", "IIR1"},
+ {"RX3 MIX1 INP2", "IIR2", "IIR2"},
/* TX */
@@ -2138,6 +2149,26 @@
/* IIR */
{"IIR1", NULL, "IIR1 INP1 MUX"},
{"IIR1 INP1 MUX", "DEC1", "DEC1 MUX"},
+ {"IIR1 INP1 MUX", "DEC2", "DEC2 MUX"},
+ {"IIR1 INP1 MUX", "DEC3", "DEC3 MUX"},
+ {"IIR1 INP1 MUX", "DEC4", "DEC4 MUX"},
+ {"IIR1 INP1 MUX", "RX1", "SLIM RX1"},
+ {"IIR1 INP1 MUX", "RX2", "SLIM RX2"},
+ {"IIR1 INP1 MUX", "RX3", "SLIM RX3"},
+ {"IIR1 INP1 MUX", "RX4", "SLIM RX4"},
+ {"IIR1 INP1 MUX", "RX5", "SLIM RX5"},
+
+ {"IIR2", NULL, "IIR2 INP1 MUX"},
+ {"IIR2 INP1 MUX", "DEC1", "DEC1 MUX"},
+ {"IIR2 INP1 MUX", "DEC2", "DEC2 MUX"},
+ {"IIR2 INP1 MUX", "DEC3", "DEC3 MUX"},
+ {"IIR2 INP1 MUX", "DEC4", "DEC4 MUX"},
+ {"IIR2 INP1 MUX", "RX1", "SLIM RX1"},
+ {"IIR2 INP1 MUX", "RX2", "SLIM RX2"},
+ {"IIR2 INP1 MUX", "RX3", "SLIM RX3"},
+ {"IIR2 INP1 MUX", "RX4", "SLIM RX4"},
+ {"IIR2 INP1 MUX", "RX5", "SLIM RX5"},
+
{"MIC BIAS1 Internal1", NULL, "LDO_H"},
{"MIC BIAS1 External", NULL, "LDO_H"},
{"MIC BIAS2 Internal1", NULL, "LDO_H"},
@@ -4644,6 +4675,12 @@
snd_soc_update_bits(codec, SITAR_A_MICB_2_CTL, 0x60,
(pdata->micbias.bias2_cfilt_sel << 5));
+ /* Set micbias capless mode */
+ snd_soc_update_bits(codec, SITAR_A_MICB_1_CTL, 0x10,
+ (pdata->micbias.bias1_cap_mode << 4));
+ snd_soc_update_bits(codec, SITAR_A_MICB_2_CTL, 0x10,
+ (pdata->micbias.bias2_cap_mode << 4));
+
for (i = 0; i < 6; j++, i += 2) {
if (flag & (0x01 << i)) {
value = (leg_mode & (0x01 << i)) ? 0x10 : 0x00;
diff --git a/sound/soc/msm/msm-dai-q6-hdmi.c b/sound/soc/msm/msm-dai-q6-hdmi.c
index dfb090e..c082ed7 100644
--- a/sound/soc/msm/msm-dai-q6-hdmi.c
+++ b/sound/soc/msm/msm-dai-q6-hdmi.c
@@ -158,54 +158,19 @@
int rc = 0;
if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
- /* PORT START should be set if prepare called in active state */
- rc = afe_q6_interface_prepare();
+ rc = afe_port_start(dai->id, &dai_data->port_config,
+ dai_data->rate);
if (IS_ERR_VALUE(rc))
- dev_err(dai->dev, "fail to open AFE APR\n");
+ dev_err(dai->dev, "fail to open AFE port %x\n",
+ dai->id);
+ else
+ set_bit(STATUS_PORT_STARTED,
+ dai_data->status_mask);
}
+
return rc;
}
-static int msm_dai_q6_hdmi_trigger(struct snd_pcm_substream *substream, int cmd,
- struct snd_soc_dai *dai)
-{
- struct msm_dai_q6_hdmi_dai_data *dai_data = dev_get_drvdata(dai->dev);
-
- /* Start/stop port without waiting for Q6 AFE response. Need to have
- * native q6 AFE driver propagates AFE response in order to handle
- * port start/stop command error properly if error does arise.
- */
- pr_debug("%s:port:%d cmd:%d dai_data->status_mask = %ld",
- __func__, dai->id, cmd, *dai_data->status_mask);
-
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- case SNDRV_PCM_TRIGGER_RESUME:
- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
- afe_port_start_nowait(dai->id, &dai_data->port_config,
- dai_data->rate);
-
- set_bit(STATUS_PORT_STARTED, dai_data->status_mask);
- }
- break;
- case SNDRV_PCM_TRIGGER_STOP:
- case SNDRV_PCM_TRIGGER_SUSPEND:
- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- if (test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
- afe_port_stop_nowait(dai->id);
- clear_bit(STATUS_PORT_STARTED, dai_data->status_mask);
- }
- break;
-
- default:
- dev_err(dai->dev, "invalid Trigger command = %d\n", cmd);
- return -EINVAL;
- }
-
- return 0;
-}
-
static int msm_dai_q6_hdmi_dai_probe(struct snd_soc_dai *dai)
{
struct msm_dai_q6_hdmi_dai_data *dai_data;
@@ -253,7 +218,6 @@
static struct snd_soc_dai_ops msm_dai_q6_hdmi_ops = {
.prepare = msm_dai_q6_hdmi_prepare,
- .trigger = msm_dai_q6_hdmi_trigger,
.hw_params = msm_dai_q6_hdmi_hw_params,
.shutdown = msm_dai_q6_hdmi_shutdown,
};
diff --git a/sound/soc/msm/msm-dai-q6.c b/sound/soc/msm/msm-dai-q6.c
index 147316e..fb74c0a 100644
--- a/sound/soc/msm/msm-dai-q6.c
+++ b/sound/soc/msm/msm-dai-q6.c
@@ -407,55 +407,21 @@
(substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
&mi2s_dai_data->rx_dai.mi2s_dai_data :
&mi2s_dai_data->tx_dai.mi2s_dai_data);
+ u16 port_id = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
+ MI2S_RX : MI2S_TX);
int rc = 0;
if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
/* PORT START should be set if prepare called in active state */
- rc = afe_q6_interface_prepare();
+ rc = afe_port_start(port_id, &dai_data->port_config,
+ dai_data->rate);
+
if (IS_ERR_VALUE(rc))
- dev_err(dai->dev, "fail to open AFE APR\n");
- }
- return rc;
-}
-
-static int msm_dai_q6_mi2s_trigger(struct snd_pcm_substream *substream, int cmd,
- struct snd_soc_dai *dai)
-{
- struct msm_dai_q6_mi2s_dai_data *mi2s_dai_data =
- dev_get_drvdata(dai->dev);
- struct msm_dai_q6_dai_data *dai_data =
- (substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
- &mi2s_dai_data->rx_dai.mi2s_dai_data :
- &mi2s_dai_data->tx_dai.mi2s_dai_data);
- u16 port_id = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
- MI2S_RX : MI2S_TX);
- int rc = 0;
-
- dev_dbg(dai->dev, "%s: cmd:%d dai_data->status_mask = %ld",
- __func__, cmd, *dai_data->status_mask);
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- case SNDRV_PCM_TRIGGER_RESUME:
- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
- afe_port_start_nowait(port_id,
- &dai_data->port_config, dai_data->rate);
+ dev_err(dai->dev, "fail to open AFE port %x\n",
+ dai->id);
+ else
set_bit(STATUS_PORT_STARTED,
dai_data->status_mask);
- }
- break;
- case SNDRV_PCM_TRIGGER_STOP:
- case SNDRV_PCM_TRIGGER_SUSPEND:
- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- if (test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
- afe_port_stop_nowait(port_id);
- clear_bit(STATUS_PORT_STARTED,
- dai_data->status_mask);
- }
- break;
-
- default:
- rc = -EINVAL;
}
return rc;
@@ -906,21 +872,20 @@
/*
* For AUX PCM Interface the below sequence of clk
- * settings and afe_open is a strict requirement.
- *
- * Also using afe_open instead of afe_port_start_nowait
- * to make sure the port is open before deasserting the
- * clock line. This is required because pcm register is
- * not written before clock deassert. Hence the hw does
- * not get updated with new setting if the below clock
- * assert/deasset and afe_open sequence is not followed.
+ * settings and opening of afe port is a strict requirement.
+ * afe_port_start is called to make sure to make sure the port
+ * is open before deasserting the clock line. This is
+ * required because pcm register is not written before
+ * clock deassert. Hence the hw does not get updated with
+ * new setting if the below clock assert/deasset and afe_port_start
+ * sequence is not followed.
*/
clk_reset(pcm_clk, CLK_RESET_ASSERT);
- afe_open(PCM_RX, &dai_data->port_config, dai_data->rate);
+ afe_port_start(PCM_RX, &dai_data->port_config, dai_data->rate);
- afe_open(PCM_TX, &dai_data->port_config, dai_data->rate);
+ afe_port_start(PCM_TX, &dai_data->port_config, dai_data->rate);
if (dai_data->rate == 8000) {
pcm_clk_rate = auxpcm_pdata->mode_8k.pcm_clk_rate;
} else if (dai_data->rate == 16000) {
@@ -988,21 +953,22 @@
/*
* For AUX PCM Interface the below sequence of clk
- * settings and afe_open is a strict requirement.
- *
- * Also using afe_open instead of afe_port_start_nowait
- * to make sure the port is open before deasserting the
- * clock line. This is required because pcm register is
- * not written before clock deassert. Hence the hw does
- * not get updated with new setting if the below clock
- * assert/deasset and afe_open sequence is not followed.
+ * settings and opening of afe port is a strict requirement.
+ * afe_port_start is called to make sure to make sure the port
+ * is open before deasserting the clock line. This is
+ * required because pcm register is not written before
+ * clock deassert. Hence the hw does not get updated with
+ * new setting if the below clock assert/deasset and afe_port_start
+ * sequence is not followed.
*/
clk_reset(sec_pcm_clk, CLK_RESET_ASSERT);
- afe_open(SECONDARY_PCM_RX, &dai_data->port_config, dai_data->rate);
+ afe_port_start(SECONDARY_PCM_RX, &dai_data->port_config,
+ dai_data->rate);
- afe_open(SECONDARY_PCM_TX, &dai_data->port_config, dai_data->rate);
+ afe_port_start(SECONDARY_PCM_TX, &dai_data->port_config,
+ dai_data->rate);
if (dai_data->rate == 8000) {
pcm_clk_rate = auxpcm_pdata->mode_8k.pcm_clk_rate;
} else if (dai_data->rate == 16000) {
@@ -1034,11 +1000,24 @@
int rc = 0;
if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
- /* PORT START should be set if prepare called in active state */
- rc = afe_q6_interface_prepare();
+ switch (dai->id) {
+ case VOICE_PLAYBACK_TX:
+ case VOICE_RECORD_TX:
+ case VOICE_RECORD_RX:
+ rc = afe_start_pseudo_port(dai->id);
+ default:
+ rc = afe_port_start(dai->id, &dai_data->port_config,
+ dai_data->rate);
+ }
+
if (IS_ERR_VALUE(rc))
- dev_err(dai->dev, "fail to open AFE APR\n");
+ dev_err(dai->dev, "fail to open AFE port %x\n",
+ dai->id);
+ else
+ set_bit(STATUS_PORT_STARTED,
+ dai_data->status_mask);
}
+
return rc;
}
@@ -1071,63 +1050,6 @@
}
-static int msm_dai_q6_trigger(struct snd_pcm_substream *substream, int cmd,
- struct snd_soc_dai *dai)
-{
- struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
- int rc = 0;
-
- /* Start/stop port without waiting for Q6 AFE response. Need to have
- * native q6 AFE driver propagates AFE response in order to handle
- * port start/stop command error properly if error does arise.
- */
- pr_debug("%s:port:%d cmd:%d dai_data->status_mask = %ld",
- __func__, dai->id, cmd, *dai_data->status_mask);
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- case SNDRV_PCM_TRIGGER_RESUME:
- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
- switch (dai->id) {
- case VOICE_PLAYBACK_TX:
- case VOICE_RECORD_TX:
- case VOICE_RECORD_RX:
- afe_pseudo_port_start_nowait(dai->id);
- break;
- default:
- afe_port_start_nowait(dai->id,
- &dai_data->port_config, dai_data->rate);
- break;
- }
- set_bit(STATUS_PORT_STARTED,
- dai_data->status_mask);
- }
- break;
- case SNDRV_PCM_TRIGGER_STOP:
- case SNDRV_PCM_TRIGGER_SUSPEND:
- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- if (test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
- switch (dai->id) {
- case VOICE_PLAYBACK_TX:
- case VOICE_RECORD_TX:
- case VOICE_RECORD_RX:
- afe_pseudo_port_stop_nowait(dai->id);
- break;
- default:
- afe_port_stop_nowait(dai->id);
- break;
- }
- clear_bit(STATUS_PORT_STARTED,
- dai_data->status_mask);
- }
- break;
-
- default:
- rc = -EINVAL;
- }
-
- return rc;
-}
static int msm_dai_q6_dai_auxpcm_probe(struct snd_soc_dai *dai)
{
struct msm_dai_q6_dai_data *dai_data;
@@ -1535,7 +1457,6 @@
static struct snd_soc_dai_ops msm_dai_q6_mi2s_ops = {
.startup = msm_dai_q6_mi2s_startup,
.prepare = msm_dai_q6_mi2s_prepare,
- .trigger = msm_dai_q6_mi2s_trigger,
.hw_params = msm_dai_q6_mi2s_hw_params,
.shutdown = msm_dai_q6_mi2s_shutdown,
.set_fmt = msm_dai_q6_mi2s_set_fmt,
@@ -1543,7 +1464,6 @@
static struct snd_soc_dai_ops msm_dai_q6_ops = {
.prepare = msm_dai_q6_prepare,
- .trigger = msm_dai_q6_trigger,
.hw_params = msm_dai_q6_hw_params,
.shutdown = msm_dai_q6_shutdown,
.set_fmt = msm_dai_q6_set_fmt,
diff --git a/sound/soc/msm/msm-pcm-voip.c b/sound/soc/msm/msm-pcm-voip.c
index 570d71c..b18117c 100644
--- a/sound/soc/msm/msm-pcm-voip.c
+++ b/sound/soc/msm/msm-pcm-voip.c
@@ -106,10 +106,9 @@
wait_queue_head_t in_wait;
struct mutex lock;
- struct mutex in_lock;
- struct mutex out_lock;
spinlock_t dsp_lock;
+ spinlock_t dsp_ul_lock;
uint32_t mode;
uint32_t rate_type;
@@ -268,7 +267,7 @@
return;
/* Copy up-link packet into out_queue. */
- spin_lock_irqsave(&prtd->dsp_lock, dsp_flags);
+ spin_lock_irqsave(&prtd->dsp_ul_lock, dsp_flags);
/* discarding UL packets till start is received */
if (!list_empty(&prtd->free_out_queue) && prtd->capture_start) {
@@ -321,10 +320,10 @@
pr_debug("ul_pkt: pkt_len =%d, frame.len=%d\n", pkt_len,
buf_node->frame.len);
prtd->pcm_capture_irq_pos += prtd->pcm_capture_count;
- spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
+ spin_unlock_irqrestore(&prtd->dsp_ul_lock, dsp_flags);
snd_pcm_period_elapsed(prtd->capture_substream);
} else {
- spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
+ spin_unlock_irqrestore(&prtd->dsp_ul_lock, dsp_flags);
pr_err("UL data dropped\n");
}
@@ -516,6 +515,7 @@
struct voip_buf_node *buf_node = NULL;
struct snd_pcm_runtime *runtime = substream->runtime;
struct voip_drv_info *prtd = runtime->private_data;
+ unsigned long dsp_flags;
int count = frames_to_bytes(runtime, frames);
pr_debug("%s: count = %d, frames=%d\n", __func__, count, (int)frames);
@@ -525,8 +525,8 @@
prtd->state == VOIP_STOPPED),
1 * HZ);
if (ret > 0) {
- mutex_lock(&prtd->in_lock);
if (count <= VOIP_MAX_VOC_PKT_SIZE) {
+ spin_lock_irqsave(&prtd->dsp_lock, dsp_flags);
buf_node =
list_first_entry(&prtd->free_in_queue,
struct voip_buf_node, list);
@@ -539,13 +539,13 @@
ret = copy_from_user(&buf_node->frame,
buf, count);
list_add_tail(&buf_node->list, &prtd->in_queue);
+ spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
} else {
pr_err("%s: Write cnt %d is > VOIP_MAX_VOC_PKT_SIZE\n",
__func__, count);
ret = -ENOMEM;
}
- mutex_unlock(&prtd->in_lock);
} else if (ret == 0) {
pr_err("%s: No free DL buffs\n", __func__);
ret = -ETIMEDOUT;
@@ -564,6 +564,7 @@
struct voip_buf_node *buf_node = NULL;
struct snd_pcm_runtime *runtime = substream->runtime;
struct voip_drv_info *prtd = runtime->private_data;
+ unsigned long dsp_flags;
count = frames_to_bytes(runtime, frames);
@@ -575,9 +576,9 @@
1 * HZ);
if (ret > 0) {
- mutex_lock(&prtd->out_lock);
if (count <= VOIP_MAX_VOC_PKT_SIZE) {
+ spin_lock_irqsave(&prtd->dsp_ul_lock, dsp_flags);
buf_node = list_first_entry(&prtd->out_queue,
struct voip_buf_node, list);
list_del(&buf_node->list);
@@ -596,13 +597,14 @@
}
list_add_tail(&buf_node->list,
&prtd->free_out_queue);
+ spin_unlock_irqrestore(&prtd->dsp_ul_lock, dsp_flags);
+
} else {
pr_err("%s: Read count %d > VOIP_MAX_VOC_PKT_SIZE\n",
__func__, count);
ret = -ENOMEM;
}
- mutex_unlock(&prtd->out_lock);
} else if (ret == 0) {
pr_err("%s: No UL data available\n", __func__);
@@ -636,6 +638,7 @@
struct snd_pcm_substream *p_substream, *c_substream;
struct snd_pcm_runtime *runtime;
struct voip_drv_info *prtd;
+ unsigned long dsp_flags;
if (substream == NULL) {
pr_err("substream is NULL\n");
@@ -674,7 +677,7 @@
goto capt;
}
if (p_dma_buf->area != NULL) {
- mutex_lock(&prtd->in_lock);
+ spin_lock_irqsave(&prtd->dsp_lock, dsp_flags);
list_for_each_safe(ptr, next, &prtd->in_queue) {
buf_node = list_entry(ptr,
struct voip_buf_node, list);
@@ -685,11 +688,11 @@
struct voip_buf_node, list);
list_del(&buf_node->list);
}
+ spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
dma_free_coherent(p_substream->pcm->card->dev,
runtime->hw.buffer_bytes_max, p_dma_buf->area,
p_dma_buf->addr);
p_dma_buf->area = NULL;
- mutex_unlock(&prtd->in_lock);
}
/* release out_queue and free_out_queue */
capt: c_substream = prtd->capture_substream;
@@ -703,7 +706,7 @@
goto done;
}
if (c_dma_buf->area != NULL) {
- mutex_lock(&prtd->out_lock);
+ spin_lock_irqsave(&prtd->dsp_ul_lock, dsp_flags);
list_for_each_safe(ptr, next, &prtd->out_queue) {
buf_node = list_entry(ptr,
struct voip_buf_node, list);
@@ -714,11 +717,11 @@
struct voip_buf_node, list);
list_del(&buf_node->list);
}
+ spin_unlock_irqrestore(&prtd->dsp_ul_lock, dsp_flags);
dma_free_coherent(c_substream->pcm->card->dev,
runtime->hw.buffer_bytes_max, c_dma_buf->area,
c_dma_buf->addr);
c_dma_buf->area = NULL;
- mutex_unlock(&prtd->out_lock);
}
done:
prtd->capture_substream = NULL;
@@ -888,19 +891,15 @@
for (i = 0; i < VOIP_MAX_Q_LEN; i++) {
buf_node = (void *)dma_buf->area + offset;
- mutex_lock(&voip_info.in_lock);
list_add_tail(&buf_node->list,
&voip_info.free_in_queue);
- mutex_unlock(&voip_info.in_lock);
offset = offset + sizeof(struct voip_buf_node);
}
} else {
for (i = 0; i < VOIP_MAX_Q_LEN; i++) {
buf_node = (void *) dma_buf->area + offset;
- mutex_lock(&voip_info.out_lock);
list_add_tail(&buf_node->list,
&voip_info.free_out_queue);
- mutex_unlock(&voip_info.out_lock);
offset = offset + sizeof(struct voip_buf_node);
}
}
@@ -1142,10 +1141,9 @@
memset(&voip_info, 0, sizeof(voip_info));
voip_info.mode = MODE_PCM;
mutex_init(&voip_info.lock);
- mutex_init(&voip_info.in_lock);
- mutex_init(&voip_info.out_lock);
spin_lock_init(&voip_info.dsp_lock);
+ spin_lock_init(&voip_info.dsp_ul_lock);
init_waitqueue_head(&voip_info.out_wait);
init_waitqueue_head(&voip_info.in_wait);
diff --git a/sound/soc/msm/qdsp6/q6afe.c b/sound/soc/msm/qdsp6/q6afe.c
index 7b16adb..2f6772d 100644
--- a/sound/soc/msm/qdsp6/q6afe.c
+++ b/sound/soc/msm/qdsp6/q6afe.c
@@ -376,11 +376,10 @@
if ((afe_cal_addr[path].cal_paddr != cal_block.cal_paddr) ||
(cal_block.cal_size > afe_cal_addr[path].cal_size)) {
if (afe_cal_addr[path].cal_paddr != 0)
- afe_cmd_memory_unmap_nowait(
+ afe_cmd_memory_unmap(
afe_cal_addr[path].cal_paddr);
- afe_cmd_memory_map_nowait(cal_block.cal_paddr,
- cal_block.cal_size);
+ afe_cmd_memory_map(cal_block.cal_paddr, cal_block.cal_size);
afe_cal_addr[path].cal_paddr = cal_block.cal_paddr;
afe_cal_addr[path].cal_size = cal_block.cal_size;
}
@@ -400,12 +399,21 @@
"cal size = %d, cal addr = 0x%x\n", __func__,
port_id, path, cal_block.cal_size, cal_block.cal_paddr);
+ atomic_set(&this_afe.state, 1);
result = apr_send_pkt(this_afe.apr, (uint32_t *) &afe_cal);
if (result < 0) {
pr_err("%s: AFE cal for port %d failed\n",
__func__, port_id);
}
+ result = wait_event_timeout(this_afe.wait,
+ (atomic_read(&this_afe.state) == 0),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!result) {
+ pr_err("%s: wait_event timeout SET AFE CAL\n", __func__);
+ goto done;
+ }
+
pr_debug("%s: AFE cal sent for path %d device!\n", __func__, path);
done:
return;
@@ -421,8 +429,11 @@
afe_send_cal_block(RX_CAL, port_id);
}
-int afe_port_start_nowait(u16 port_id, union afe_port_config *afe_config,
- u32 rate) /* This function is no blocking */
+/* This function sends multi-channel HDMI configuration command and AFE
+ * calibration which is only supported by QDSP6 on 8960 and onward.
+ */
+int afe_port_start(u16 port_id, union afe_port_config *afe_config,
+ u32 rate)
{
struct afe_port_start_command start;
struct afe_audioif_config_command config;
@@ -442,11 +453,9 @@
(port_id == RT_PROXY_DAI_001_TX))
port_id = VIRTUAL_ID_TO_PORTID(port_id);
- if (this_afe.apr == NULL) {
- pr_err("%s: AFE APR is not registered\n", __func__);
- ret = -ENODEV;
+ ret = afe_q6_interface_prepare();
+ if (IS_ERR_VALUE(ret))
return ret;
- }
if (port_id == HDMI_RX) {
config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
@@ -513,6 +522,8 @@
config.port_id = port_id;
config.port = *afe_config;
+ atomic_set(&this_afe.state, 1);
+ atomic_set(&this_afe.status, 0);
ret = apr_send_pkt(this_afe.apr, (uint32_t *) &config);
if (ret < 0) {
pr_err("%s: AFE enable for port %d failed\n", __func__,
@@ -521,6 +532,21 @@
goto fail_cmd;
}
+ ret = wait_event_timeout(this_afe.wait,
+ (atomic_read(&this_afe.state) == 0),
+ msecs_to_jiffies(TIMEOUT_MS));
+
+ if (!ret) {
+ pr_err("%s: wait_event timeout IF CONFIG\n", __func__);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+ if (atomic_read(&this_afe.status) != 0) {
+ pr_err("%s: config cmd failed\n", __func__);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+
/* send AFE cal */
afe_send_cal(port_id);
@@ -535,6 +561,7 @@
start.gain = 0x2000;
start.sample_rate = rate;
+ atomic_set(&this_afe.state, 1);
ret = apr_send_pkt(this_afe.apr, (uint32_t *) &start);
if (IS_ERR_VALUE(ret)) {
@@ -544,6 +571,15 @@
goto fail_cmd;
}
+ ret = wait_event_timeout(this_afe.wait,
+ (atomic_read(&this_afe.state) == 0),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout PORT START\n", __func__);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+
if (this_afe.task != current)
this_afe.task = current;
@@ -555,6 +591,7 @@
return ret;
}
+/* This function should be used by 8660 exclusively */
int afe_open(u16 port_id, union afe_port_config *afe_config, int rate)
{
struct afe_port_start_command start;
diff --git a/sound/soc/msm/qdsp6/q6asm.c b/sound/soc/msm/qdsp6/q6asm.c
index 9136f93..2993e37 100644
--- a/sound/soc/msm/qdsp6/q6asm.c
+++ b/sound/soc/msm/qdsp6/q6asm.c
@@ -445,6 +445,7 @@
spin_lock_init(&ac->port[lcnt].dsp_lock);
}
atomic_set(&ac->cmd_state, 0);
+ atomic_set(&ac->cmd_response, 0);
pr_debug("%s: session[%d]\n", __func__, ac->session);
@@ -863,6 +864,10 @@
case ASM_STREAM_CMD_OPEN_READ_COMPRESSED:
if (atomic_read(&ac->cmd_state)) {
atomic_set(&ac->cmd_state, 0);
+ if (payload[1] == ADSP_EUNSUPPORTED)
+ atomic_set(&ac->cmd_response, 1);
+ else
+ atomic_set(&ac->cmd_response, 0);
wake_up(&ac->cmd_wait);
}
if (ac->cb)
@@ -1438,6 +1443,10 @@
rc);
goto fail_cmd;
}
+ if (atomic_read(&ac->cmd_response)) {
+ pr_err("%s: format = %x not supported\n", __func__, format);
+ goto fail_cmd;
+ }
return 0;
fail_cmd:
return -EINVAL;