Merge "msm: kgsl: Add VBIF error detection" into msm-3.0
diff --git a/Documentation/devicetree/bindings/arm/msm/rpm-smd.txt b/Documentation/devicetree/bindings/arm/msm/rpm-smd.txt
new file mode 100644
index 0000000..8ebd3ba
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/rpm-smd.txt
@@ -0,0 +1,30 @@
+Resource Power Manager(RPM)
+
+RPM is a dedicated hardware engine for managing shared SoC resources,
+which includes buses, clocks, power rails, etc. The goal of RPM is
+to achieve the maximum power savings while satisfying the SoC's
+operational and performance requirements. RPM accepts resource
+requests from multiple RPM masters. It arbitrates and aggregates the
+requests, and configures the shared resources. The RPM masters are
+the application processor, the modem processor, as well as hardware
+accelerators. The RPM driver communicates with the hardware engine using
+SMD.
+
+The devicetree representation of the SPM block should be:
+
+Required properties
+
+- compatible: "qcom,rpm-smd"
+- rpm-channel-name: The string corresponding to the channel name of the
+ peripheral subsystem
+- rpm-channel-type: The interal SMD edge for this subsystem found in
+ <mach/msm_smd.h>
+
+Example:
+
+ qcom,rpm-smd {
+ compatible = "qcom,rpm-smd"
+ qcom,rpm-channel-name = "rpm_requests";
+ qcom,rpm-channel-type = 15; /* SMD_APPS_RPM */
+ }
+}
diff --git a/arch/arm/boot/dts/msmcopper-regulator.dtsi b/arch/arm/boot/dts/msmcopper-regulator.dtsi
index 48d5720..bb26e00 100644
--- a/arch/arm/boot/dts/msmcopper-regulator.dtsi
+++ b/arch/arm/boot/dts/msmcopper-regulator.dtsi
@@ -313,6 +313,7 @@
regulator-max-microvolt = <1150000>;
qcom,enable-time = <500>;
qcom,pull-down-enable = <1>;
+ regulator-always-on;
status = "okay";
};
diff --git a/arch/arm/boot/dts/msmcopper.dtsi b/arch/arm/boot/dts/msmcopper.dtsi
index 66f5fa1..2230e0e 100644
--- a/arch/arm/boot/dts/msmcopper.dtsi
+++ b/arch/arm/boot/dts/msmcopper.dtsi
@@ -134,7 +134,7 @@
compatible = "qcom,spi-qup-v2";
reg = <0xf9924000 0x1000>;
interrupts = <0 96 0>;
- spi-max-frequency = <24000000>;
+ spi-max-frequency = <25000000>;
};
slim@fe12f000 {
@@ -278,7 +278,7 @@
reg = <0xfe200000 0x00100>,
<0xfd485100 0x00010>;
- qcom,firmware-name = "lpass";
+ qcom,firmware-name = "adsp";
};
qcom,pronto@fb21b000 {
@@ -294,4 +294,10 @@
qcom,ocmem@fdd00000 {
compatible = "qcom,msm_ocmem";
};
+
+ qcom,rpm-smd {
+ compatible = "qcom,rpm-smd";
+ rpm-channel-name = "rpm_requests";
+ rpm-channel-type = <15>; /* SMD_APPS_RPM */
+ };
};
diff --git a/arch/arm/configs/msm-copper_defconfig b/arch/arm/configs/msm-copper_defconfig
index c644cf9..de469da 100644
--- a/arch/arm/configs/msm-copper_defconfig
+++ b/arch/arm/configs/msm-copper_defconfig
@@ -202,3 +202,4 @@
CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRC_CCITT=y
CONFIG_LIBCRC32C=y
+CONFIG_MSM_TZ_LOG=y
diff --git a/arch/arm/configs/msm8960-perf_defconfig b/arch/arm/configs/msm8960-perf_defconfig
index 11b1f73..c93598d 100644
--- a/arch/arm/configs/msm8960-perf_defconfig
+++ b/arch/arm/configs/msm8960-perf_defconfig
@@ -227,7 +227,7 @@
CONFIG_BT_BNEP_PROTO_FILTER=y
CONFIG_BT_HIDP=y
CONFIG_BT_HCISMD=y
-CONFIG_CFG80211=y
+CONFIG_CFG80211=m
# CONFIG_CFG80211_WEXT is not set
CONFIG_RFKILL=y
CONFIG_GENLOCK=y
@@ -331,6 +331,7 @@
CONFIG_MT9M114=y
CONFIG_IMX074_ACT=y
CONFIG_MSM_CAMERA_FLASH_SC628A=y
+CONFIG_MSM_CAMERA_FLASH_TPS61310=y
CONFIG_OV2720=y
CONFIG_MSM_CAMERA_SENSOR=y
CONFIG_MSM_ACTUATOR=y
diff --git a/arch/arm/configs/msm8960_defconfig b/arch/arm/configs/msm8960_defconfig
index 886a691..06211d5 100644
--- a/arch/arm/configs/msm8960_defconfig
+++ b/arch/arm/configs/msm8960_defconfig
@@ -234,7 +234,7 @@
CONFIG_BT_BNEP_PROTO_FILTER=y
CONFIG_BT_HIDP=y
CONFIG_BT_HCISMD=y
-CONFIG_CFG80211=y
+CONFIG_CFG80211=m
# CONFIG_CFG80211_WEXT is not set
CONFIG_RFKILL=y
CONFIG_GENLOCK=y
@@ -340,6 +340,7 @@
CONFIG_MT9M114=y
CONFIG_IMX074_ACT=y
CONFIG_MSM_CAMERA_FLASH_SC628A=y
+CONFIG_MSM_CAMERA_FLASH_TPS61310=y
CONFIG_OV2720=y
CONFIG_MSM_CAMERA_SENSOR=y
CONFIG_MSM_ACTUATOR=y
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index c9d0000..423e71e 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -257,8 +257,8 @@
select MSM_PIL
select MSM_SPM_V2
select MSM_L2_SPM
- select MSM_RPM
select MSM_PM8X60 if PM
+ select MSM_RPM_SMD
config ARCH_FSM9XXX
bool "FSM9XXX"
@@ -387,6 +387,10 @@
bool "Resource Power Manager"
select MSM_MPM
+config MSM_RPM_SMD
+ depends on MSM_SMD
+ bool "Support for using SMD as the transport layer for communicatons with RPM"
+
config MSM_MPM
bool "Modem Power Manager"
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index 2295679..6a90d6d 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -364,3 +364,5 @@
obj-$(CONFIG_MSM_HSIC_SYSMON) += hsic_sysmon.o
obj-$(CONFIG_MSM_HSIC_SYSMON_TEST) += hsic_sysmon_test.o
+
+obj-$(CONFIG_MSM_RPM_SMD) += rpm-smd.o
diff --git a/arch/arm/mach-msm/acpuclock-8960.c b/arch/arm/mach-msm/acpuclock-8960.c
index a65c5b0..6986a29 100644
--- a/arch/arm/mach-msm/acpuclock-8960.c
+++ b/arch/arm/mach-msm/acpuclock-8960.c
@@ -370,6 +370,7 @@
},
[L2] = {
.hfpll_base = MSM_HFPLL_BASE + 0x400,
+ .hfpll_vdd_tbl = hfpll_vdd_dig_tbl_8930,
.aux_clk_sel = MSM_APCS_GCC_BASE + 0x028,
.l2cpmr_iaddr = L2CPMR_IADDR,
.vreg[VREG_HFPLL_B] = { "hfpll_l2", 1800000,
diff --git a/arch/arm/mach-msm/board-8064-camera.c b/arch/arm/mach-msm/board-8064-camera.c
index 36953ef..114b271 100644
--- a/arch/arm/mach-msm/board-8064-camera.c
+++ b/arch/arm/mach-msm/board-8064-camera.c
@@ -188,6 +188,7 @@
.flash_sr_type = MSM_CAMERA_FLASH_SRC_EXT,
._fsrc.ext_driver_src.led_en = VFE_CAMIF_TIMER1_GPIO,
._fsrc.ext_driver_src.led_flash_en = VFE_CAMIF_TIMER2_GPIO,
+ ._fsrc.ext_driver_src.flash_id = MAM_CAMERA_EXT_LED_FLASH_SC628A,
};
static struct msm_gpiomux_config apq8064_cam_2d_configs[] = {
diff --git a/arch/arm/mach-msm/board-8064-gpiomux.c b/arch/arm/mach-msm/board-8064-gpiomux.c
index 3431cd0..e873498 100644
--- a/arch/arm/mach-msm/board-8064-gpiomux.c
+++ b/arch/arm/mach-msm/board-8064-gpiomux.c
@@ -342,6 +342,12 @@
.pull = GPIOMUX_PULL_KEEPER,
};
+static struct gpiomux_setting mbhc_hs_detect = {
+ .func = GPIOMUX_FUNC_1,
+ .drv = GPIOMUX_DRV_2MA,
+ .pull = GPIOMUX_PULL_NONE,
+};
+
static struct gpiomux_setting cdc_mclk = {
.func = GPIOMUX_FUNC_1,
.drv = GPIOMUX_DRV_8MA,
@@ -730,6 +736,12 @@
static struct msm_gpiomux_config apq8064_audio_codec_configs[] __initdata = {
{
+ .gpio = 38,
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &mbhc_hs_detect,
+ },
+ },
+ {
.gpio = 39,
.settings = {
[GPIOMUX_SUSPENDED] = &cdc_mclk,
diff --git a/arch/arm/mach-msm/board-8064-regulator.c b/arch/arm/mach-msm/board-8064-regulator.c
index 3d2dbaa..40222b8 100644
--- a/arch/arm/mach-msm/board-8064-regulator.c
+++ b/arch/arm/mach-msm/board-8064-regulator.c
@@ -549,7 +549,7 @@
* ID name always_on pd min_uV max_uV en_t supply
* system_uA reg_ID
*/
- PM8XXX_NLDO1200(L26, "8921_l26", 0, 1, 1050000, 1050000, 200, "8921_s7",
+ PM8XXX_NLDO1200(L26, "8921_l26", 0, 1, 375000, 1050000, 200, "8921_s7",
0, 1),
/* ID name always_on pd en_t supply reg_ID */
diff --git a/arch/arm/mach-msm/board-8064.c b/arch/arm/mach-msm/board-8064.c
index c123138..7ab3894 100644
--- a/arch/arm/mach-msm/board-8064.c
+++ b/arch/arm/mach-msm/board-8064.c
@@ -95,14 +95,15 @@
#ifdef CONFIG_MSM_IOMMU
#define MSM_ION_MM_SIZE 0x3800000
#define MSM_ION_SF_SIZE 0
+#define MSM_ION_QSECOM_SIZE 0x780000 /* (7.5MB) */
#define MSM_ION_HEAP_NUM 7
#else
#define MSM_ION_MM_SIZE MSM_PMEM_ADSP_SIZE
#define MSM_ION_SF_SIZE MSM_PMEM_SIZE
+#define MSM_ION_QSECOM_SIZE 0x600000 /* (6MB) */
#define MSM_ION_HEAP_NUM 8
#endif
#define MSM_ION_MM_FW_SIZE 0x200000 /* (2MB) */
-#define MSM_ION_QSECOM_SIZE 0x300000 /* (3MB) */
#define MSM_ION_MFC_SIZE SZ_8K
#define MSM_ION_AUDIO_SIZE MSM_PMEM_AUDIO_SIZE
#else
@@ -864,9 +865,11 @@
static void __init apq8064_ehci_host_init(void)
{
- if (machine_is_apq8064_liquid()) {
- msm_ehci_host_pdata3.dock_connect_irq =
- PM8921_MPP_IRQ(PM8921_IRQ_BASE, 9);
+ if (machine_is_apq8064_liquid() || machine_is_mpq8064_cdp() ||
+ machine_is_mpq8064_hrd() || machine_is_mpq8064_dtv()) {
+ if (machine_is_apq8064_liquid())
+ msm_ehci_host_pdata3.dock_connect_irq =
+ PM8921_MPP_IRQ(PM8921_IRQ_BASE, 9);
apq8064_device_ehci_host3.dev.platform_data =
&msm_ehci_host_pdata3;
@@ -1129,23 +1132,31 @@
#define ISA1200_HAP_LEN_GPIO PM8921_GPIO_PM_TO_SYS(20)
#define ISA1200_HAP_CLK PM8921_GPIO_PM_TO_SYS(44)
-static int isa1200_power(int on)
+static int isa1200_clk_enable(bool on)
{
int rc = 0;
- gpio_set_value_cansleep(ISA1200_HAP_CLK, !!on);
+ gpio_set_value_cansleep(ISA1200_HAP_CLK, on);
- if (on)
+ if (on) {
rc = pm8xxx_aux_clk_control(CLK_MP3_2, XO_DIV_1, true);
- else
+ if (rc) {
+ pr_err("%s: unable to write aux clock register(%d)\n",
+ __func__, rc);
+ goto err_gpio_dis;
+ }
+ } else {
rc = pm8xxx_aux_clk_control(CLK_MP3_2, XO_DIV_NONE, true);
-
- if (rc) {
- pr_err("%s: unable to write aux clock register(%d)\n",
- __func__, rc);
+ if (rc)
+ pr_err("%s: unable to write aux clock register(%d)\n",
+ __func__, rc);
}
return rc;
+
+err_gpio_dis:
+ gpio_set_value_cansleep(ISA1200_HAP_CLK, !on);
+ return rc;
}
static int isa1200_dev_setup(bool enable)
@@ -1187,7 +1198,7 @@
static struct isa1200_platform_data isa1200_1_pdata = {
.name = "vibrator",
.dev_setup = isa1200_dev_setup,
- .power_on = isa1200_power,
+ .clk_enable = isa1200_clk_enable,
.hap_en_gpio = ISA1200_HAP_EN_GPIO,
.hap_len_gpio = ISA1200_HAP_LEN_GPIO,
.max_timeout = 15000,
@@ -2279,6 +2290,33 @@
.id = -1,
};
+static int rf4ce_gpio_init(void)
+{
+ if (!machine_is_mpq8064_cdp())
+ return -EINVAL;
+
+ /* CC2533 SRDY Input */
+ if (!gpio_request(SX150X_GPIO(4, 6), "rf4ce_srdy")) {
+ gpio_direction_input(SX150X_GPIO(4, 6));
+ gpio_export(SX150X_GPIO(4, 6), true);
+ }
+
+ /* CC2533 MRDY Output */
+ if (!gpio_request(SX150X_GPIO(4, 5), "rf4ce_mrdy")) {
+ gpio_direction_output(SX150X_GPIO(4, 5), 1);
+ gpio_export(SX150X_GPIO(4, 5), true);
+ }
+
+ /* CC2533 Reset Output */
+ if (!gpio_request(SX150X_GPIO(4, 7), "rf4ce_reset")) {
+ gpio_direction_output(SX150X_GPIO(4, 7), 0);
+ gpio_export(SX150X_GPIO(4, 7), true);
+ }
+
+ return 0;
+}
+late_initcall(rf4ce_gpio_init);
+
static struct platform_device *mpq_devices[] __initdata = {
&msm_device_sps_apq8064,
&mpq8064_device_qup_i2c_gsbi5,
diff --git a/arch/arm/mach-msm/board-8930-camera.c b/arch/arm/mach-msm/board-8930-camera.c
index cc5b13c..c9d720c 100644
--- a/arch/arm/mach-msm/board-8930-camera.c
+++ b/arch/arm/mach-msm/board-8930-camera.c
@@ -198,12 +198,9 @@
#ifdef CONFIG_MSM_CAMERA_FLASH
static struct msm_camera_sensor_flash_src msm_flash_src = {
.flash_sr_type = MSM_CAMERA_FLASH_SRC_EXT,
- ._fsrc.ext_driver_src.led_en = GPIO_CAM_GP_LED_EN1,
- ._fsrc.ext_driver_src.led_flash_en = GPIO_CAM_GP_LED_EN2,
-#if defined(CONFIG_I2C) && (defined(CONFIG_GPIO_SX150X) || \
- defined(CONFIG_GPIO_SX150X_MODULE))
- ._fsrc.ext_driver_src.expander_info = cam_expander_info,
-#endif
+ ._fsrc.ext_driver_src.led_en = VFE_CAMIF_TIMER1_GPIO,
+ ._fsrc.ext_driver_src.led_flash_en = VFE_CAMIF_TIMER2_GPIO,
+ ._fsrc.ext_driver_src.flash_id = MAM_CAMERA_EXT_LED_FLASH_TPS61310,
};
#endif
@@ -536,7 +533,8 @@
};
static struct msm_camera_sensor_flash_data flash_s5k3l1yx = {
- .flash_type = MSM_CAMERA_FLASH_NONE,
+ .flash_type = MSM_CAMERA_FLASH_LED,
+ .flash_src = &msm_flash_src
};
static struct msm_camera_csi_lane_params s5k3l1yx_csi_lane_params = {
@@ -585,6 +583,15 @@
struct msm_camera_sensor_info *s_info;
s_info = &msm_camera_sensor_s5k3l1yx_data;
s_info->sensor_platform_info->mount_angle = 0;
+ msm_flash_src._fsrc.ext_driver_src.led_en =
+ GPIO_CAM_GP_LED_EN1;
+ msm_flash_src._fsrc.ext_driver_src.led_flash_en =
+ GPIO_CAM_GP_LED_EN2;
+#if defined(CONFIG_I2C) && (defined(CONFIG_GPIO_SX150X) || \
+ defined(CONFIG_GPIO_SX150X_MODULE))
+ msm_flash_src._fsrc.ext_driver_src.expander_info =
+ cam_expander_info;
+#endif
}
platform_device_register(&msm_camera_server);
@@ -615,11 +622,9 @@
I2C_BOARD_INFO("s5k3l1yx", 0x20),
.platform_data = &msm_camera_sensor_s5k3l1yx_data,
},
-#ifdef CONFIG_MSM_CAMERA_FLASH_SC628A
{
- I2C_BOARD_INFO("sc628a", 0x6E),
+ I2C_BOARD_INFO("tps61310", 0x66),
},
-#endif
};
struct msm_camera_board_info msm8930_camera_board_info = {
diff --git a/arch/arm/mach-msm/board-8930-regulator.c b/arch/arm/mach-msm/board-8930-regulator.c
index fc89a11..2f24c95 100644
--- a/arch/arm/mach-msm/board-8930-regulator.c
+++ b/arch/arm/mach-msm/board-8930-regulator.c
@@ -84,6 +84,9 @@
REGULATOR_SUPPLY("VDDIO_CDC", "sitar-slim"),
REGULATOR_SUPPLY("CDC_VDDA_TX", "sitar-slim"),
REGULATOR_SUPPLY("CDC_VDDA_RX", "sitar-slim"),
+ REGULATOR_SUPPLY("VDDIO_CDC", "sitar1p1-slim"),
+ REGULATOR_SUPPLY("CDC_VDDA_TX", "sitar1p1-slim"),
+ REGULATOR_SUPPLY("CDC_VDDA_RX", "sitar1p1-slim"),
REGULATOR_SUPPLY("vddp", "0-0048"),
};
VREG_CONSUMERS(L12) = {
@@ -118,6 +121,8 @@
REGULATOR_SUPPLY("8038_l20", NULL),
REGULATOR_SUPPLY("VDDD_CDC_D", "sitar-slim"),
REGULATOR_SUPPLY("CDC_VDDA_A_1P2V", "sitar-slim"),
+ REGULATOR_SUPPLY("VDDD_CDC_D", "sitar1p1-slim"),
+ REGULATOR_SUPPLY("CDC_VDDA_A_1P2V", "sitar1p1-slim"),
};
VREG_CONSUMERS(L21) = {
REGULATOR_SUPPLY("8038_l21", NULL),
@@ -159,6 +164,7 @@
VREG_CONSUMERS(S4) = {
REGULATOR_SUPPLY("8038_s4", NULL),
REGULATOR_SUPPLY("CDC_VDD_CP", "sitar-slim"),
+ REGULATOR_SUPPLY("CDC_VDD_CP", "sitar1p1-slim"),
};
VREG_CONSUMERS(S5) = {
REGULATOR_SUPPLY("8038_s5", NULL),
@@ -446,11 +452,11 @@
* ID name always_on pd min_uV max_uV en_t supply
* system_uA reg_ID
*/
- PM8XXX_NLDO1200(L16, "8038_l16", 0, 1, 1050000, 1050000, 200, "8038_s3",
+ PM8XXX_NLDO1200(L16, "8038_l16", 0, 1, 375000, 1050000, 200, "8038_s3",
0, 0),
- PM8XXX_NLDO1200(L19, "8038_l19", 0, 1, 1050000, 1050000, 200, "8038_s3",
+ PM8XXX_NLDO1200(L19, "8038_l19", 0, 1, 375000, 1050000, 200, "8038_s3",
0, 1),
- PM8XXX_NLDO1200(L27, "8038_l27", 0, 1, 1050000, 1050000, 200, "8038_s3",
+ PM8XXX_NLDO1200(L27, "8038_l27", 0, 1, 375000, 1050000, 200, "8038_s3",
0, 2),
};
diff --git a/arch/arm/mach-msm/board-8930.c b/arch/arm/mach-msm/board-8930.c
index 1c4251c..0fff814 100644
--- a/arch/arm/mach-msm/board-8930.c
+++ b/arch/arm/mach-msm/board-8930.c
@@ -131,14 +131,15 @@
#ifdef CONFIG_MSM_IOMMU
#define MSM_ION_MM_SIZE 0x3800000 /* Need to be multiple of 64K */
#define MSM_ION_SF_SIZE 0x0
+#define MSM_ION_QSECOM_SIZE 0x780000 /* (7.5MB) */
#define MSM_ION_HEAP_NUM 7
#else
#define MSM_ION_SF_SIZE MSM_PMEM_SIZE
#define MSM_ION_MM_SIZE MSM_PMEM_ADSP_SIZE
+#define MSM_ION_QSECOM_SIZE 0x600000 /* (6MB) */
#define MSM_ION_HEAP_NUM 8
#endif
#define MSM_ION_MM_FW_SIZE 0x200000 /* (2MB) */
-#define MSM_ION_QSECOM_SIZE 0x300000 /* (3MB) */
#define MSM_ION_MFC_SIZE SZ_8K
#define MSM_ION_AUDIO_SIZE MSM_PMEM_AUDIO_SIZE
@@ -797,6 +798,70 @@
.platform_data = &sitar_platform_data,
},
};
+
+static struct wcd9xxx_pdata sitar1p1_platform_data = {
+ .slimbus_slave_device = {
+ .name = "sitar-slave",
+ .e_addr = {0, 0, 0x70, 0, 0x17, 2},
+ },
+ .irq = MSM_GPIO_TO_INT(62),
+ .irq_base = SITAR_INTERRUPT_BASE,
+ .num_irqs = NR_WCD9XXX_IRQS,
+ .reset_gpio = 42,
+ .micbias = {
+ .ldoh_v = SITAR_LDOH_2P85_V,
+ .cfilt1_mv = 1800,
+ .cfilt2_mv = 1800,
+ .bias1_cfilt_sel = SITAR_CFILT1_SEL,
+ .bias2_cfilt_sel = SITAR_CFILT2_SEL,
+ },
+ .regulator = {
+ {
+ .name = "CDC_VDD_CP",
+ .min_uV = 1950000,
+ .max_uV = 2200000,
+ .optimum_uA = WCD9XXX_CDC_VDDA_CP_CUR_MAX,
+ },
+ {
+ .name = "CDC_VDDA_RX",
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .optimum_uA = WCD9XXX_CDC_VDDA_RX_CUR_MAX,
+ },
+ {
+ .name = "CDC_VDDA_TX",
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .optimum_uA = WCD9XXX_CDC_VDDA_TX_CUR_MAX,
+ },
+ {
+ .name = "VDDIO_CDC",
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .optimum_uA = WCD9XXX_VDDIO_CDC_CUR_MAX,
+ },
+ {
+ .name = "VDDD_CDC_D",
+ .min_uV = 1200000,
+ .max_uV = 1200000,
+ .optimum_uA = WCD9XXX_VDDD_CDC_D_CUR_MAX,
+ },
+ {
+ .name = "CDC_VDDA_A_1P2V",
+ .min_uV = 1200000,
+ .max_uV = 1200000,
+ .optimum_uA = WCD9XXX_VDDD_CDC_A_CUR_MAX,
+ },
+ },
+};
+
+static struct slim_device msm_slim_sitar1p1 = {
+ .name = "sitar1p1-slim",
+ .e_addr = {0, 1, 0x70, 0, 0x17, 2},
+ .dev = {
+ .platform_data = &sitar1p1_platform_data,
+ },
+};
#endif
@@ -806,6 +871,10 @@
.bus_num = 1,
.slim_slave = &msm_slim_sitar,
},
+ {
+ .bus_num = 1,
+ .slim_slave = &msm_slim_sitar1p1,
+ },
#endif
/* add more slimbus slaves as needed */
};
@@ -2267,6 +2336,8 @@
platform_device_register(&msm8930_device_rpm_regulator);
msm_clock_init(&msm8930_clock_init_data);
msm8960_device_otg.dev.platform_data = &msm_otg_pdata;
+ android_usb_pdata.swfi_latency =
+ msm_rpmrs_levels[0].latency_us;
msm8930_init_gpiomux();
msm8960_device_qup_spi_gsbi1.dev.platform_data =
&msm8960_qup_spi_gsbi1_pdata;
diff --git a/arch/arm/mach-msm/board-8960-camera.c b/arch/arm/mach-msm/board-8960-camera.c
index 371bb53..ad9b03d 100644
--- a/arch/arm/mach-msm/board-8960-camera.c
+++ b/arch/arm/mach-msm/board-8960-camera.c
@@ -197,6 +197,7 @@
.flash_sr_type = MSM_CAMERA_FLASH_SRC_EXT,
._fsrc.ext_driver_src.led_en = VFE_CAMIF_TIMER1_GPIO,
._fsrc.ext_driver_src.led_flash_en = VFE_CAMIF_TIMER2_GPIO,
+ ._fsrc.ext_driver_src.flash_id = MAM_CAMERA_EXT_LED_FLASH_SC628A,
};
#endif
diff --git a/arch/arm/mach-msm/board-8960-regulator.c b/arch/arm/mach-msm/board-8960-regulator.c
index 3923ecf..edb6f03 100644
--- a/arch/arm/mach-msm/board-8960-regulator.c
+++ b/arch/arm/mach-msm/board-8960-regulator.c
@@ -497,11 +497,11 @@
* ID name always_on pd min_uV max_uV en_t supply
* system_uA reg_ID
*/
- PM8XXX_NLDO1200(L26, "8921_l26", 0, 1, 1050000, 1050000, 200, "8921_s7",
+ PM8XXX_NLDO1200(L26, "8921_l26", 0, 1, 375000, 1050000, 200, "8921_s7",
0, 1),
- PM8XXX_NLDO1200(L27, "8921_l27", 0, 1, 1050000, 1050000, 200, "8921_s7",
+ PM8XXX_NLDO1200(L27, "8921_l27", 0, 1, 375000, 1050000, 200, "8921_s7",
0, 2),
- PM8XXX_NLDO1200(L28, "8921_l28", 0, 1, 1050000, 1050000, 200, "8921_s7",
+ PM8XXX_NLDO1200(L28, "8921_l28", 0, 1, 375000, 1050000, 200, "8921_s7",
0, 3),
PM8XXX_LDO(L29, "8921_l29", 0, 1, 2050000, 2100000, 200, "8921_s8",
0, 4),
diff --git a/arch/arm/mach-msm/board-9615.c b/arch/arm/mach-msm/board-9615.c
index 06affd4..67697d2 100644
--- a/arch/arm/mach-msm/board-9615.c
+++ b/arch/arm/mach-msm/board-9615.c
@@ -754,6 +754,7 @@
.phy_type = SNPS_28NM_INTEGRATED_PHY,
.vbus_power = msm_hsusb_vbus_power,
.disable_reset_on_disconnect = true,
+ .enable_lpm_on_dev_suspend = true,
};
static struct msm_hsic_peripheral_platform_data msm_hsic_peripheral_pdata = {
diff --git a/arch/arm/mach-msm/board-copper.c b/arch/arm/mach-msm/board-copper.c
index d33f478..f5fe3d1 100644
--- a/arch/arm/mach-msm/board-copper.c
+++ b/arch/arm/mach-msm/board-copper.c
@@ -39,6 +39,7 @@
#endif
#include <mach/msm_memtypes.h>
#include <mach/msm_smd.h>
+#include <mach/rpm-smd.h>
#include <mach/qpnp-int.h>
#include <mach/socinfo.h>
#include "clock.h"
@@ -390,6 +391,40 @@
.id = -1,
};
+#define SHARED_IMEM_TZ_BASE 0xFE805720
+static struct resource tzlog_resources[] = {
+ {
+ .start = SHARED_IMEM_TZ_BASE,
+ .end = SHARED_IMEM_TZ_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+struct platform_device apq_device_tz_log = {
+ .name = "tz_log",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(tzlog_resources),
+ .resource = tzlog_resources,
+};
+
+#ifdef CONFIG_HW_RANDOM_MSM
+/* PRNG device */
+#define MSM_PRNG_PHYS 0xF9BFF000
+static struct resource rng_resources = {
+ .flags = IORESOURCE_MEM,
+ .start = MSM_PRNG_PHYS,
+ .end = MSM_PRNG_PHYS + SZ_512 - 1,
+};
+
+struct platform_device msm8974_device_rng = {
+ .name = "msm_rng",
+ .id = 0,
+ .num_resources = 1,
+ .resource = &rng_resources,
+};
+#endif
+
+
void __init msm_copper_add_devices(void)
{
#ifdef CONFIG_ION_MSM
@@ -399,6 +434,10 @@
platform_device_register(&android_usb_device);
platform_add_devices(msm_copper_stub_regulator_devices,
msm_copper_stub_regulator_devices_len);
+ platform_device_register(&apq_device_tz_log);
+#ifdef CONFIG_HW_RANDOM_MSM
+ platform_device_register(&msm8974_device_rng);
+#endif
}
/*
@@ -410,6 +449,7 @@
void __init msm_copper_add_drivers(void)
{
msm_smd_init();
+ msm_rpm_driver_init();
msm_spm_device_init();
regulator_stub_init();
}
diff --git a/arch/arm/mach-msm/clock-7x30.c b/arch/arm/mach-msm/clock-7x30.c
index f51eb5b..fcd6386 100644
--- a/arch/arm/mach-msm/clock-7x30.c
+++ b/arch/arm/mach-msm/clock-7x30.c
@@ -2651,14 +2651,14 @@
}
#endif /* CONFIG_DEBUG_FS */
-static struct clk_ops measure_clk_ops = {
+static struct clk_ops clk_ops_measure = {
.set_parent = measure_clk_set_parent,
.get_rate = measure_clk_get_rate,
};
static struct clk measure_clk = {
.dbg_name = "measure_clk",
- .ops = &measure_clk_ops,
+ .ops = &clk_ops_measure,
CLK_INIT(measure_clk),
};
diff --git a/arch/arm/mach-msm/clock-8960.c b/arch/arm/mach-msm/clock-8960.c
index 1ebf597..3ee59b1 100644
--- a/arch/arm/mach-msm/clock-8960.c
+++ b/arch/arm/mach-msm/clock-8960.c
@@ -42,6 +42,7 @@
/* Peripheral clock registers. */
#define ADM0_PBUS_CLK_CTL_REG REG(0x2208)
+#define SFAB_SATA_S_HCLK_CTL_REG REG(0x2480)
#define CE1_HCLK_CTL_REG REG(0x2720)
#define CE1_CORE_CLK_CTL_REG REG(0x2724)
#define PRNG_CLK_NS_REG REG(0x2E80)
@@ -108,10 +109,12 @@
#define TSIF_REF_CLK_MD_REG REG(0x270C)
#define TSIF_REF_CLK_NS_REG REG(0x2710)
#define TSSC_CLK_CTL_REG REG(0x2CA0)
+#define SATA_HCLK_CTL_REG REG(0x2C00)
#define SATA_CLK_SRC_NS_REG REG(0x2C08)
#define SATA_RXOOB_CLK_CTL_REG REG(0x2C0C)
#define SATA_PMALIVE_CLK_CTL_REG REG(0x2C10)
#define SATA_PHY_REF_CLK_CTL_REG REG(0x2C14)
+#define SATA_ACLK_CTL_REG REG(0x2C20)
#define SATA_PHY_CFG_CLK_CTL_REG REG(0x2C40)
#define USB_FSn_HCLK_CTL_REG(n) REG(0x2960+(0x20*((n)-1)))
#define USB_FSn_RESET_REG(n) REG(0x2974+(0x20*((n)-1)))
@@ -1926,6 +1929,69 @@
}
};
+#define F_SATA(f, s, d) \
+ { \
+ .freq_hz = f, \
+ .src_clk = &s##_clk.c, \
+ .ns_val = NS_DIVSRC(6, 3, d, 2, 0, s##_to_bb_mux), \
+ }
+
+static struct clk_freq_tbl clk_tbl_sata[] = {
+ F_SATA( 0, gnd, 1),
+ F_SATA( 48000000, pll8, 8),
+ F_SATA(100000000, pll3, 12),
+ F_END
+};
+
+static struct rcg_clk sata_src_clk = {
+ .b = {
+ .ctl_reg = SATA_CLK_SRC_NS_REG,
+ .halt_check = NOCHECK,
+ },
+ .ns_reg = SATA_CLK_SRC_NS_REG,
+ .root_en_mask = BIT(7),
+ .ns_mask = BM(6, 0),
+ .set_rate = set_rate_nop,
+ .freq_tbl = clk_tbl_sata,
+ .current_freq = &rcg_dummy_freq,
+ .c = {
+ .dbg_name = "sata_src_clk",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOW, 50000000, NOMINAL, 100000000),
+ CLK_INIT(sata_src_clk.c),
+ },
+};
+
+static struct branch_clk sata_rxoob_clk = {
+ .b = {
+ .ctl_reg = SATA_RXOOB_CLK_CTL_REG,
+ .en_mask = BIT(4),
+ .halt_reg = CLK_HALT_MSS_SMPSS_MISC_STATE_REG,
+ .halt_bit = 26,
+ },
+ .parent = &sata_src_clk.c,
+ .c = {
+ .dbg_name = "sata_rxoob_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(sata_rxoob_clk.c),
+ },
+};
+
+static struct branch_clk sata_pmalive_clk = {
+ .b = {
+ .ctl_reg = SATA_PMALIVE_CLK_CTL_REG,
+ .en_mask = BIT(4),
+ .halt_reg = CLK_HALT_MSS_SMPSS_MISC_STATE_REG,
+ .halt_bit = 25,
+ },
+ .parent = &sata_src_clk.c,
+ .c = {
+ .dbg_name = "sata_pmalive_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(sata_pmalive_clk.c),
+ },
+};
+
static struct branch_clk sata_phy_ref_clk = {
.b = {
.ctl_reg = SATA_PHY_REF_CLK_CTL_REG,
@@ -1941,6 +2007,47 @@
},
};
+static struct branch_clk sata_a_clk = {
+ .b = {
+ .ctl_reg = SATA_ACLK_CTL_REG,
+ .en_mask = BIT(4),
+ .halt_reg = CLK_HALT_AFAB_SFAB_STATEA_REG,
+ .halt_bit = 12,
+ },
+ .c = {
+ .dbg_name = "sata_a_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(sata_a_clk.c),
+ },
+};
+
+static struct branch_clk sata_p_clk = {
+ .b = {
+ .ctl_reg = SATA_HCLK_CTL_REG,
+ .en_mask = BIT(4),
+ .halt_reg = CLK_HALT_MSS_SMPSS_MISC_STATE_REG,
+ .halt_bit = 27,
+ },
+ .c = {
+ .dbg_name = "sata_p_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(sata_p_clk.c),
+ },
+};
+
+static struct branch_clk sfab_sata_s_p_clk = {
+ .b = {
+ .ctl_reg = SFAB_SATA_S_HCLK_CTL_REG,
+ .en_mask = BIT(4),
+ .halt_reg = CLK_HALT_AFAB_SFAB_STATEB_REG,
+ .halt_bit = 14,
+ },
+ .c = {
+ .dbg_name = "sfab_sata_s_p_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(sfab_sata_s_p_clk.c),
+ },
+};
static struct branch_clk pcie_p_clk = {
.b = {
.ctl_reg = PCIE_HCLK_CTL_REG,
@@ -4550,7 +4657,11 @@
{ TEST_PER_LS(0x56), &gsbi7_uart_clk.c },
{ TEST_PER_LS(0x58), &gsbi7_qup_clk.c },
{ TEST_PER_LS(0x59), &gsbi8_p_clk.c },
+ { TEST_PER_LS(0x59), &sfab_sata_s_p_clk.c },
{ TEST_PER_LS(0x5A), &gsbi8_uart_clk.c },
+ { TEST_PER_LS(0x5A), &sata_p_clk.c },
+ { TEST_PER_LS(0x5B), &sata_rxoob_clk.c },
+ { TEST_PER_LS(0x5C), &sata_pmalive_clk.c },
{ TEST_PER_LS(0x5C), &gsbi8_qup_clk.c },
{ TEST_PER_LS(0x5D), &gsbi9_p_clk.c },
{ TEST_PER_LS(0x5E), &gsbi9_uart_clk.c },
@@ -4606,6 +4717,7 @@
{ TEST_PER_HS(0x26), &q6sw_clk },
{ TEST_PER_HS(0x27), &q6fw_clk },
{ TEST_PER_HS(0x2A), &adm0_clk.c },
+ { TEST_PER_HS(0x31), &sata_a_clk.c },
{ TEST_PER_HS(0x2D), &pcie_phy_ref_clk.c },
{ TEST_PER_HS(0x32), &pcie_a_clk.c },
{ TEST_PER_HS(0x34), &ebi1_clk.c },
@@ -4882,7 +4994,7 @@
}
#endif /* CONFIG_DEBUG_FS */
-static struct clk_ops measure_clk_ops = {
+static struct clk_ops clk_ops_measure = {
.set_parent = measure_clk_set_parent,
.get_rate = measure_clk_get_rate,
};
@@ -4890,7 +5002,7 @@
static struct measure_clk measure_clk = {
.c = {
.dbg_name = "measure_clk",
- .ops = &measure_clk_ops,
+ .ops = &clk_ops_measure,
CLK_INIT(measure_clk.c),
},
.multiplier = 1,
@@ -4967,6 +5079,12 @@
CLK_LOOKUP("sys_clk", usb_fs1_sys_clk.c, ""),
CLK_LOOKUP("ref_clk", sata_phy_ref_clk.c, ""),
CLK_LOOKUP("cfg_clk", sata_phy_cfg_clk.c, ""),
+ CLK_LOOKUP("src_clk", sata_src_clk.c, ""),
+ CLK_LOOKUP("core_rxoob_clk", sata_rxoob_clk.c, ""),
+ CLK_LOOKUP("core_pmalive_clk", sata_pmalive_clk.c, ""),
+ CLK_LOOKUP("bus_clk", sata_a_clk.c, ""),
+ CLK_LOOKUP("iface_clk", sata_p_clk.c, ""),
+ CLK_LOOKUP("slave_iface_clk", sfab_sata_s_p_clk.c, ""),
CLK_LOOKUP("iface_clk", ce3_p_clk.c, "qce.0"),
CLK_LOOKUP("iface_clk", ce3_p_clk.c, "qcrypto.0"),
CLK_LOOKUP("core_clk", ce3_core_clk.c, "qce.0"),
@@ -5982,9 +6100,14 @@
if (cpu_is_msm8960() || cpu_is_apq8064())
rmwreg(0x2, DSI2_BYTE_NS_REG, 0x7);
- /* Source the sata_phy_ref_clk from PXO */
- if (cpu_is_apq8064())
+ /*
+ * Source the sata_phy_ref_clk from PXO and set predivider of
+ * sata_pmalive_clk to 1.
+ */
+ if (cpu_is_apq8064()) {
rmwreg(0, SATA_PHY_REF_CLK_CTL_REG, 0x1);
+ rmwreg(0, SATA_PMALIVE_CLK_CTL_REG, 0x3);
+ }
/*
* TODO: Programming below PLLs and prng_clk is temporary and
diff --git a/arch/arm/mach-msm/clock-8x60.c b/arch/arm/mach-msm/clock-8x60.c
index 8d2b37a..6972302 100644
--- a/arch/arm/mach-msm/clock-8x60.c
+++ b/arch/arm/mach-msm/clock-8x60.c
@@ -3445,7 +3445,7 @@
}
#endif /* CONFIG_DEBUG_FS */
-static struct clk_ops measure_clk_ops = {
+static struct clk_ops clk_ops_measure = {
.set_parent = measure_clk_set_parent,
.get_rate = measure_clk_get_rate,
};
@@ -3453,7 +3453,7 @@
static struct measure_clk measure_clk = {
.c = {
.dbg_name = "measure_clk",
- .ops = &measure_clk_ops,
+ .ops = &clk_ops_measure,
CLK_INIT(measure_clk.c),
},
.multiplier = 1,
diff --git a/arch/arm/mach-msm/clock-9615.c b/arch/arm/mach-msm/clock-9615.c
index a950e46..66d849a 100644
--- a/arch/arm/mach-msm/clock-9615.c
+++ b/arch/arm/mach-msm/clock-9615.c
@@ -1560,7 +1560,7 @@
}
#endif /* CONFIG_DEBUG_FS */
-static struct clk_ops measure_clk_ops = {
+static struct clk_ops clk_ops_measure = {
.set_parent = measure_clk_set_parent,
.get_rate = measure_clk_get_rate,
};
@@ -1568,7 +1568,7 @@
static struct measure_clk measure_clk = {
.c = {
.dbg_name = "measure_clk",
- .ops = &measure_clk_ops,
+ .ops = &clk_ops_measure,
CLK_INIT(measure_clk.c),
},
.multiplier = 1,
diff --git a/arch/arm/mach-msm/clock-copper.c b/arch/arm/mach-msm/clock-copper.c
index 80403f8..c0245a3 100644
--- a/arch/arm/mach-msm/clock-copper.c
+++ b/arch/arm/mach-msm/clock-copper.c
@@ -4699,7 +4699,7 @@
}
#endif /* CONFIG_DEBUG_FS */
-static struct clk_ops measure_clk_ops = {
+static struct clk_ops clk_ops_measure = {
.set_parent = measure_clk_set_parent,
.get_rate = measure_clk_get_rate,
};
@@ -4707,7 +4707,7 @@
static struct measure_clk measure_clk = {
.c = {
.dbg_name = "measure_clk",
- .ops = &measure_clk_ops,
+ .ops = &clk_ops_measure,
CLK_INIT(measure_clk.c),
},
.multiplier = 1,
@@ -4910,6 +4910,7 @@
CLK_LOOKUP("bus_clk", q6ss_ahb_lfabif_clk.c, "pil-q6v5-lpass"),
CLK_LOOKUP("mem_clk", gcc_boot_rom_ahb_clk.c, ""),
CLK_LOOKUP("bus_clk", gcc_mss_cfg_ahb_clk.c, ""),
+ CLK_DUMMY("core_clk", PRNG_CLK , "msm_rng.0", OFF),
/* TODO: Remove dummy clocks as soon as they become unnecessary */
CLK_DUMMY("phy_clk", NULL, "msm_otg", OFF),
diff --git a/arch/arm/mach-msm/clock-local.c b/arch/arm/mach-msm/clock-local.c
index e1b3381..4f365fa 100644
--- a/arch/arm/mach-msm/clock-local.c
+++ b/arch/arm/mach-msm/clock-local.c
@@ -627,20 +627,16 @@
return HANDOFF_ENABLED_CLK;
}
-struct clk_ops clk_ops_gnd = {
-};
+struct clk_ops clk_ops_empty;
struct fixed_clk gnd_clk = {
.c = {
.dbg_name = "ground_clk",
- .ops = &clk_ops_gnd,
+ .ops = &clk_ops_empty,
CLK_INIT(gnd_clk.c),
},
};
-struct clk_ops clk_ops_measure = {
-};
-
static int branch_clk_enable(struct clk *clk)
{
unsigned long flags;
diff --git a/arch/arm/mach-msm/clock-local.h b/arch/arm/mach-msm/clock-local.h
index a419d69..ffc7057 100644
--- a/arch/arm/mach-msm/clock-local.h
+++ b/arch/arm/mach-msm/clock-local.h
@@ -77,7 +77,7 @@
*/
#define DEFINE_CLK_MEASURE(name) \
struct clk name = { \
- .ops = &clk_ops_measure, \
+ .ops = &clk_ops_empty, \
.dbg_name = #name, \
CLK_INIT(name), \
}; \
@@ -264,7 +264,7 @@
struct clk c;
};
-extern struct clk_ops clk_ops_measure;
+extern struct clk_ops clk_ops_empty;
static inline struct measure_clk *to_measure_clk(struct clk *clk)
{
diff --git a/arch/arm/mach-msm/clock-local2.h b/arch/arm/mach-msm/clock-local2.h
index c8d53cb..547e633 100644
--- a/arch/arm/mach-msm/clock-local2.h
+++ b/arch/arm/mach-msm/clock-local2.h
@@ -153,8 +153,6 @@
struct clk c;
};
-extern struct clk_ops clk_ops_measure;
-
static inline struct measure_clk *to_measure_clk(struct clk *clk)
{
return container_of(clk, struct measure_clk, c);
diff --git a/arch/arm/mach-msm/clock-pcom-lookup.c b/arch/arm/mach-msm/clock-pcom-lookup.c
index d842d45..f71d6d5 100644
--- a/arch/arm/mach-msm/clock-pcom-lookup.c
+++ b/arch/arm/mach-msm/clock-pcom-lookup.c
@@ -39,7 +39,7 @@
.id = PLL_0,
.mode_reg = PLLn_MODE(0),
.c = {
- .ops = &clk_pll_ops,
+ .ops = &clk_ops_pll,
.dbg_name = "pll0_clk",
CLK_INIT(pll0_clk.c),
},
@@ -49,7 +49,7 @@
.id = PLL_1,
.mode_reg = PLLn_MODE(1),
.c = {
- .ops = &clk_pll_ops,
+ .ops = &clk_ops_pll,
.dbg_name = "pll1_clk",
CLK_INIT(pll1_clk.c),
},
@@ -59,7 +59,7 @@
.id = PLL_2,
.mode_reg = PLLn_MODE(2),
.c = {
- .ops = &clk_pll_ops,
+ .ops = &clk_ops_pll,
.dbg_name = "pll2_clk",
CLK_INIT(pll2_clk.c),
},
@@ -69,7 +69,7 @@
.id = PLL_4,
.mode_reg = PLL4_MODE,
.c = {
- .ops = &clk_pll_ops,
+ .ops = &clk_ops_pll,
.dbg_name = "pll4_clk",
CLK_INIT(pll4_clk.c),
},
diff --git a/arch/arm/mach-msm/clock-pll.c b/arch/arm/mach-msm/clock-pll.c
index 3a232c5..ead4fcb 100644
--- a/arch/arm/mach-msm/clock-pll.c
+++ b/arch/arm/mach-msm/clock-pll.c
@@ -427,7 +427,7 @@
return HANDOFF_ENABLED_CLK;
}
-struct clk_ops clk_pll_ops = {
+struct clk_ops clk_ops_pll = {
.enable = pll_clk_enable,
.disable = pll_clk_disable,
.handoff = pll_clk_handoff,
diff --git a/arch/arm/mach-msm/clock-pll.h b/arch/arm/mach-msm/clock-pll.h
index 231668f..a8c642f 100644
--- a/arch/arm/mach-msm/clock-pll.h
+++ b/arch/arm/mach-msm/clock-pll.h
@@ -43,7 +43,7 @@
void *const __iomem *base;
};
-extern struct clk_ops clk_pll_ops;
+extern struct clk_ops clk_ops_pll;
static inline struct pll_shared_clk *to_pll_shared_clk(struct clk *clk)
{
diff --git a/arch/arm/mach-msm/devices-8064.c b/arch/arm/mach-msm/devices-8064.c
index 29a90e4..41980b3 100644
--- a/arch/arm/mach-msm/devices-8064.c
+++ b/arch/arm/mach-msm/devices-8064.c
@@ -477,13 +477,24 @@
*/
struct msm_dai_auxpcm_pdata apq_auxpcm_pdata = {
.clk = "pcm_clk",
- .mode = AFE_PCM_CFG_MODE_PCM,
- .sync = AFE_PCM_CFG_SYNC_INT,
- .frame = AFE_PCM_CFG_FRM_256BPF,
- .quant = AFE_PCM_CFG_QUANT_LINEAR_NOPAD,
- .slot = 0,
- .data = AFE_PCM_CFG_CDATAOE_MASTER,
- .pcm_clk_rate = 2048000,
+ .mode_8k = {
+ .mode = AFE_PCM_CFG_MODE_PCM,
+ .sync = AFE_PCM_CFG_SYNC_INT,
+ .frame = AFE_PCM_CFG_FRM_256BPF,
+ .quant = AFE_PCM_CFG_QUANT_LINEAR_NOPAD,
+ .slot = 0,
+ .data = AFE_PCM_CFG_CDATAOE_MASTER,
+ .pcm_clk_rate = 2048000,
+ },
+ .mode_16k = {
+ .mode = AFE_PCM_CFG_MODE_PCM,
+ .sync = AFE_PCM_CFG_SYNC_INT,
+ .frame = AFE_PCM_CFG_FRM_256BPF,
+ .quant = AFE_PCM_CFG_QUANT_LINEAR_NOPAD,
+ .slot = 0,
+ .data = AFE_PCM_CFG_CDATAOE_MASTER,
+ .pcm_clk_rate = 4096000,
+ }
};
struct platform_device apq_cpudai_auxpcm_rx = {
@@ -2301,11 +2312,17 @@
.flags = IORESOURCE_MEM,
},
{
- .name = "vcap",
+ .name = "vc_irq",
.start = VCAP_VC,
.end = VCAP_VC,
.flags = IORESOURCE_IRQ,
},
+ {
+ .name = "vp_irq",
+ .start = VCAP_VP,
+ .end = VCAP_VP,
+ .flags = IORESOURCE_IRQ,
+ },
};
static unsigned vcap_gpios[] = {
diff --git a/arch/arm/mach-msm/devices-8960.c b/arch/arm/mach-msm/devices-8960.c
index f1553b6..e474e36 100644
--- a/arch/arm/mach-msm/devices-8960.c
+++ b/arch/arm/mach-msm/devices-8960.c
@@ -1892,13 +1892,24 @@
*/
struct msm_dai_auxpcm_pdata auxpcm_pdata = {
.clk = "pcm_clk",
- .mode = AFE_PCM_CFG_MODE_PCM,
- .sync = AFE_PCM_CFG_SYNC_INT,
- .frame = AFE_PCM_CFG_FRM_256BPF,
- .quant = AFE_PCM_CFG_QUANT_LINEAR_NOPAD,
- .slot = 0,
- .data = AFE_PCM_CFG_CDATAOE_MASTER,
- .pcm_clk_rate = 2048000,
+ .mode_8k = {
+ .mode = AFE_PCM_CFG_MODE_PCM,
+ .sync = AFE_PCM_CFG_SYNC_INT,
+ .frame = AFE_PCM_CFG_FRM_256BPF,
+ .quant = AFE_PCM_CFG_QUANT_LINEAR_NOPAD,
+ .slot = 0,
+ .data = AFE_PCM_CFG_CDATAOE_MASTER,
+ .pcm_clk_rate = 2048000,
+ },
+ .mode_16k = {
+ .mode = AFE_PCM_CFG_MODE_PCM,
+ .sync = AFE_PCM_CFG_SYNC_INT,
+ .frame = AFE_PCM_CFG_FRM_256BPF,
+ .quant = AFE_PCM_CFG_QUANT_LINEAR_NOPAD,
+ .slot = 0,
+ .data = AFE_PCM_CFG_CDATAOE_MASTER,
+ .pcm_clk_rate = 4096000,
+ }
};
struct platform_device msm_cpudai_auxpcm_rx = {
diff --git a/arch/arm/mach-msm/devices-9615.c b/arch/arm/mach-msm/devices-9615.c
index 8eb1580..76d79a6 100644
--- a/arch/arm/mach-msm/devices-9615.c
+++ b/arch/arm/mach-msm/devices-9615.c
@@ -135,6 +135,8 @@
},
};
+#define MSM_HSUSB_RESUME_GPIO 79
+
static struct resource resources_hsusb[] = {
{
.start = MSM9615_HSUSB_PHYS,
@@ -146,6 +148,12 @@
.end = USB1_HS_IRQ,
.flags = IORESOURCE_IRQ,
},
+ {
+ .start = MSM_HSUSB_RESUME_GPIO,
+ .end = MSM_HSUSB_RESUME_GPIO,
+ .name = "USB_RESUME",
+ .flags = IORESOURCE_IO,
+ },
};
static struct resource resources_usb_bam[] = {
@@ -448,13 +456,24 @@
*/
struct msm_dai_auxpcm_pdata auxpcm_pdata = {
.clk = "pcm_clk",
- .mode = AFE_PCM_CFG_MODE_PCM,
- .sync = AFE_PCM_CFG_SYNC_INT,
- .frame = AFE_PCM_CFG_FRM_256BPF,
- .quant = AFE_PCM_CFG_QUANT_LINEAR_NOPAD,
- .slot = 0,
- .data = AFE_PCM_CFG_CDATAOE_MASTER,
- .pcm_clk_rate = 2048000,
+ .mode_8k = {
+ .mode = AFE_PCM_CFG_MODE_PCM,
+ .sync = AFE_PCM_CFG_SYNC_INT,
+ .frame = AFE_PCM_CFG_FRM_256BPF,
+ .quant = AFE_PCM_CFG_QUANT_LINEAR_NOPAD,
+ .slot = 0,
+ .data = AFE_PCM_CFG_CDATAOE_MASTER,
+ .pcm_clk_rate = 2048000,
+ },
+ .mode_16k = {
+ .mode = AFE_PCM_CFG_MODE_PCM,
+ .sync = AFE_PCM_CFG_SYNC_INT,
+ .frame = AFE_PCM_CFG_FRM_256BPF,
+ .quant = AFE_PCM_CFG_QUANT_LINEAR_NOPAD,
+ .slot = 0,
+ .data = AFE_PCM_CFG_CDATAOE_MASTER,
+ .pcm_clk_rate = 4096000,
+ }
};
struct platform_device msm_cpudai_auxpcm_rx = {
diff --git a/arch/arm/mach-msm/devices-iommu.c b/arch/arm/mach-msm/devices-iommu.c
index 9a03afd..2e0253b 100644
--- a/arch/arm/mach-msm/devices-iommu.c
+++ b/arch/arm/mach-msm/devices-iommu.c
@@ -568,26 +568,26 @@
.mids = {1, -1}
};
-static struct msm_iommu_ctx_dev mdp_vg1_ctx = {
- .name = "mdp_vg1",
+static struct msm_iommu_ctx_dev mdp_port0_cb0_ctx = {
+ .name = "mdp_port0_cb0",
.num = 0,
.mids = {0, 2, -1}
};
-static struct msm_iommu_ctx_dev mdp_rgb1_ctx = {
- .name = "mdp_rgb1",
+static struct msm_iommu_ctx_dev mdp_port0_cb1_ctx = {
+ .name = "mdp_port0_cb1",
.num = 1,
.mids = {1, 3, 4, 5, 6, 7, 8, 9, 10, -1}
};
-static struct msm_iommu_ctx_dev mdp_vg2_ctx = {
- .name = "mdp_vg2",
+static struct msm_iommu_ctx_dev mdp_port1_cb0_ctx = {
+ .name = "mdp_port1_cb0",
.num = 0,
.mids = {0, 2, -1}
};
-static struct msm_iommu_ctx_dev mdp_rgb2_ctx = {
- .name = "mdp_rgb2",
+static struct msm_iommu_ctx_dev mdp_port1_cb1_ctx = {
+ .name = "mdp_port1_cb1",
.num = 1,
.mids = {1, 3, 4, 5, 6, 7, 8, 9, 10, -1}
};
@@ -732,39 +732,39 @@
},
};
-static struct platform_device msm_device_mdp_vg1_ctx = {
+static struct platform_device msm_device_mdp_port0_cb0_ctx = {
.name = "msm_iommu_ctx",
.id = 4,
.dev = {
.parent = &msm_device_iommu_mdp0.dev,
- .platform_data = &mdp_vg1_ctx,
+ .platform_data = &mdp_port0_cb0_ctx,
},
};
-static struct platform_device msm_device_mdp_rgb1_ctx = {
+static struct platform_device msm_device_mdp_port0_cb1_ctx = {
.name = "msm_iommu_ctx",
.id = 5,
.dev = {
.parent = &msm_device_iommu_mdp0.dev,
- .platform_data = &mdp_rgb1_ctx,
+ .platform_data = &mdp_port0_cb1_ctx,
},
};
-static struct platform_device msm_device_mdp_vg2_ctx = {
+static struct platform_device msm_device_mdp_port1_cb0_ctx = {
.name = "msm_iommu_ctx",
.id = 6,
.dev = {
.parent = &msm_device_iommu_mdp1.dev,
- .platform_data = &mdp_vg2_ctx,
+ .platform_data = &mdp_port1_cb0_ctx,
},
};
-static struct platform_device msm_device_mdp_rgb2_ctx = {
+static struct platform_device msm_device_mdp_port1_cb1_ctx = {
.name = "msm_iommu_ctx",
.id = 7,
.dev = {
.parent = &msm_device_iommu_mdp1.dev,
- .platform_data = &mdp_rgb2_ctx,
+ .platform_data = &mdp_port1_cb1_ctx,
},
};
@@ -950,10 +950,10 @@
static struct platform_device *msm_iommu_common_ctx_devs[] = {
&msm_device_vpe_src_ctx,
&msm_device_vpe_dst_ctx,
- &msm_device_mdp_vg1_ctx,
- &msm_device_mdp_rgb1_ctx,
- &msm_device_mdp_vg2_ctx,
- &msm_device_mdp_rgb2_ctx,
+ &msm_device_mdp_port0_cb0_ctx,
+ &msm_device_mdp_port0_cb1_ctx,
+ &msm_device_mdp_port1_cb0_ctx,
+ &msm_device_mdp_port1_cb1_ctx,
&msm_device_rot_src_ctx,
&msm_device_rot_dst_ctx,
&msm_device_ijpeg_src_ctx,
diff --git a/arch/arm/mach-msm/devices.h b/arch/arm/mach-msm/devices.h
index d01a229..100d99a 100644
--- a/arch/arm/mach-msm/devices.h
+++ b/arch/arm/mach-msm/devices.h
@@ -396,3 +396,7 @@
extern struct platform_device apq8064_rtb_device;
extern struct platform_device msm8960_cache_dump_device;
+
+extern struct platform_device apq_device_tz_log;
+
+extern struct platform_device msm8974_device_rng;
diff --git a/arch/arm/mach-msm/include/mach/board.h b/arch/arm/mach-msm/include/mach/board.h
index 56210d5..17ac3ac 100644
--- a/arch/arm/mach-msm/include/mach/board.h
+++ b/arch/arm/mach-msm/include/mach/board.h
@@ -125,9 +125,15 @@
const struct pmic8058_leds_platform_data *driver_channel;
};
+enum msm_camera_ext_led_flash_id {
+ MAM_CAMERA_EXT_LED_FLASH_SC628A,
+ MAM_CAMERA_EXT_LED_FLASH_TPS61310,
+};
+
struct msm_camera_sensor_flash_external {
uint32_t led_en;
uint32_t led_flash_en;
+ enum msm_camera_ext_led_flash_id flash_id;
struct msm_cam_expander_info *expander_info;
};
diff --git a/arch/arm/mach-msm/include/mach/iommu_domains.h b/arch/arm/mach-msm/include/mach/iommu_domains.h
index dfb100c..1a3a022 100644
--- a/arch/arm/mach-msm/include/mach/iommu_domains.h
+++ b/arch/arm/mach-msm/include/mach/iommu_domains.h
@@ -56,14 +56,28 @@
unsigned int domain_alloc_flags;
};
+
+struct msm_iova_partition {
+ unsigned long start;
+ unsigned long size;
+};
+
+struct msm_iova_layout {
+ struct msm_iova_partition *partitions;
+ int npartitions;
+ const char *client_name;
+ unsigned int domain_flags;
+};
+
#if defined(CONFIG_MSM_IOMMU)
extern struct iommu_domain *msm_get_iommu_domain(int domain_num);
-extern unsigned long msm_allocate_iova_address(unsigned int iommu_domain,
+extern int msm_allocate_iova_address(unsigned int iommu_domain,
unsigned int partition_no,
unsigned long size,
- unsigned long align);
+ unsigned long align,
+ unsigned long *iova);
extern void msm_free_iova_address(unsigned long iova,
unsigned int iommu_domain,
@@ -97,16 +111,19 @@
unsigned int partition_no,
unsigned long size);
+extern int msm_register_domain(struct msm_iova_layout *layout);
+
#else
static inline struct iommu_domain
*msm_get_iommu_domain(int subsys_id) { return NULL; }
-static inline unsigned long msm_allocate_iova_address(unsigned int iommu_domain,
+static inline int msm_allocate_iova_address(unsigned int iommu_domain,
unsigned int partition_no,
unsigned long size,
- unsigned long align) { return 0; }
+ unsigned long align,
+ unsigned long *iova) { return -ENOMEM; }
static inline void msm_free_iova_address(unsigned long iova,
unsigned int iommu_domain,
@@ -153,6 +170,11 @@
{
return;
}
+
+static inline int msm_register_domain(struct msm_iova_layout *layout)
+{
+ return -ENODEV;
+}
#endif
#endif
diff --git a/arch/arm/mach-msm/include/mach/rpm-smd.h b/arch/arm/mach-msm/include/mach/rpm-smd.h
new file mode 100644
index 0000000..ff58fed
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/rpm-smd.h
@@ -0,0 +1,254 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_RPM_SMD_H
+#define __ARCH_ARM_MACH_MSM_RPM_SMD_H
+
+/**
+ * enum msm_rpm_set - RPM enumerations for sleep/active set
+ * %MSM_RPM_CTX_SET_0: Set resource parameters for active mode.
+ * %MSM_RPM_CTX_SET_SLEEP: Set resource parameters for sleep.
+ */
+enum msm_rpm_set {
+ MSM_RPM_CTX_ACTIVE_SET,
+ MSM_RPM_CTX_SLEEP_SET,
+};
+
+struct msm_rpm_request;
+
+struct msm_rpm_kvp {
+ uint32_t key;
+ uint32_t length;
+ uint8_t *data;
+};
+#ifdef CONFIG_MSM_RPM_SMD
+/**
+ * msm_rpm_request() - Creates a parent element to identify the
+ * resource on the RPM, that stores the KVPs for different fields modified
+ * for a hardware resource
+ *
+ * @set: if the device is setting the active/sleep set parameter
+ * for the resource
+ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
+ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
+ * @num_elements: number of KVPs pairs associated with the resource
+ *
+ * returns pointer to a msm_rpm_request on success, NULL on error
+ */
+struct msm_rpm_request *msm_rpm_create_request(
+ enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, int num_elements);
+
+/**
+ * msm_rpm_request_noirq() - Creates a parent element to identify the
+ * resource on the RPM, that stores the KVPs for different fields modified
+ * for a hardware resource. This function is similar to msm_rpm_create_request
+ * except that it has to be called with interrupts masked.
+ *
+ * @set: if the device is setting the active/sleep set parameter
+ * for the resource
+ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
+ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
+ * @num_elements: number of KVPs pairs associated with the resource
+ *
+ * returns pointer to a msm_rpm_request on success, NULL on error
+ */
+struct msm_rpm_request *msm_rpm_create_request_noirq(
+ enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, int num_elements);
+
+/**
+ * msm_rpm_add_kvp_data() - Adds a Key value pair to a existing RPM resource.
+ *
+ * @handle: RPM resource handle to which the data should be appended
+ * @key: unsigned integer identify the parameter modified
+ * @data: byte array that contains the value corresponding to key.
+ * @size: size of data in bytes.
+ *
+ * returns 0 on success or errno
+ */
+int msm_rpm_add_kvp_data(struct msm_rpm_request *handle,
+ uint32_t key, const uint8_t *data, int size);
+
+/**
+ * msm_rpm_add_kvp_data_noirq() - Adds a Key value pair to a existing RPM
+ * resource. This function is similar to msm_rpm_add_kvp_data except that it
+ * has to be called with interrupts masked.
+ *
+ * @handle: RPM resource handle to which the data should be appended
+ * @key: unsigned integer identify the parameter modified
+ * @data: byte array that contains the value corresponding to key.
+ * @size: size of data in bytes.
+ *
+ * returns 0 on success or errno
+ */
+int msm_rpm_add_kvp_data_noirq(struct msm_rpm_request *handle,
+ uint32_t key, const uint8_t *data, int size);
+
+/** msm_rpm_free_request() - clean up the RPM request handle created with
+ * msm_rpm_create_request
+ *
+ * @handle: RPM resource handle to be cleared.
+ */
+
+void msm_rpm_free_request(struct msm_rpm_request *handle);
+
+/**
+ * msm_rpm_send_request() - Send the RPM messages using SMD. The function
+ * assigns a message id before sending the data out to the RPM. RPM hardware
+ * uses the message id to acknowledge the messages.
+ *
+ * @handle: pointer to the msm_rpm_request for the resource being modified.
+ *
+ * returns non-zero message id on success and zero on a failed transaction.
+ * The drivers use message id to wait for ACK from RPM.
+ */
+int msm_rpm_send_request(struct msm_rpm_request *handle);
+
+/**
+ * msm_rpm_send_request_noirq() - Send the RPM messages using SMD. The
+ * function assigns a message id before sending the data out to the RPM.
+ * RPM hardware uses the message id to acknowledge the messages. This function
+ * is similar to msm_rpm_send_request except that it has to be called with
+ * interrupts masked.
+ *
+ * @handle: pointer to the msm_rpm_request for the resource being modified.
+ *
+ * returns non-zero message id on success and zero on a failed transaction.
+ * The drivers use message id to wait for ACK from RPM.
+ */
+int msm_rpm_send_request_noirq(struct msm_rpm_request *handle);
+
+/**
+ * msm_rpm_wait_for_ack() - A blocking call that waits for acknowledgment of
+ * a message from RPM.
+ *
+ * @msg_id: the return from msm_rpm_send_requests
+ *
+ * returns 0 on success or errno
+ */
+int msm_rpm_wait_for_ack(uint32_t msg_id);
+
+/**
+ * msm_rpm_wait_for_ack_noirq() - A blocking call that waits for acknowledgment
+ * of a message from RPM. This function is similar to msm_rpm_wait_for_ack
+ * except that it has to be called with interrupts masked.
+ *
+ * @msg_id: the return from msm_rpm_send_request
+ *
+ * returns 0 on success or errno
+ */
+int msm_rpm_wait_for_ack_noirq(uint32_t msg_id);
+
+/**
+ * msm_rpm_send_message() -Wrapper function for clients to send data given an
+ * array of key value pairs.
+ *
+ * @set: if the device is setting the active/sleep set parameter
+ * for the resource
+ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
+ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
+ * @kvp: array of KVP data.
+ * @nelem: number of KVPs pairs associated with the message.
+ *
+ * returns 0 on success and errno on failure.
+ */
+int msm_rpm_send_message(enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems);
+
+/**
+ * msm_rpm_send_message_noirq() -Wrapper function for clients to send data
+ * given an array of key value pairs. This function is similar to the
+ * msm_rpm_send_message() except that it has to be called with interrupts
+ * disabled. Clients should choose the irq version when possible for system
+ * performance.
+ *
+ * @set: if the device is setting the active/sleep set parameter
+ * for the resource
+ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
+ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
+ * @kvp: array of KVP data.
+ * @nelem: number of KVPs pairs associated with the message.
+ *
+ * returns 0 on success and errno on failure.
+ */
+int msm_rpm_send_message_noirq(enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems);
+
+/**
+ * msm_rpm_driver_init() - Initialization function that registers for a
+ * rpm platform driver.
+ *
+ * returns 0 on success.
+ */
+int __init msm_rpm_driver_init(void);
+
+#else
+
+static inline struct msm_rpm_request *msm_rpm_create_request(
+ enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, int num_elements)
+{
+ return NULL;
+}
+
+static inline struct msm_rpm_request *msm_rpm_create_request_noirq(
+ enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, int num_elements)
+{
+ return NULL;
+
+}
+static inline uint32_t msm_rpm_add_kvp_data(struct msm_rpm_request *handle,
+ uint32_t key, const uint8_t *data, int count)
+{
+ return 0;
+}
+static inline uint32_t msm_rpm_add_kvp_data_noirq(
+ struct msm_rpm_request *handle, uint32_t key,
+ const uint8_t *data, int count)
+{
+ return 0;
+}
+
+static inline void msm_rpm_free_request(struct msm_rpm_request *handle)
+{
+ return ;
+}
+
+static inline int msm_rpm_send_request(struct msm_rpm_request *handle)
+{
+ return 0;
+}
+
+static inline int msm_rpm_send_request_noirq(struct msm_rpm_request *handle)
+{
+ return 0;
+
+}
+static inline int msm_rpm_wait_for_ack(uint32_t msg_id)
+{
+ return 0;
+
+}
+static inline int msm_rpm_wait_for_ack_noirq(uint32_t msg_id)
+{
+ return 0;
+}
+
+static inline int __init msm_rpm_driver_init(void)
+{
+ return 0;
+}
+#endif
+#endif /*__ARCH_ARM_MACH_MSM_RPM_SMD_H*/
diff --git a/arch/arm/mach-msm/iommu_domains.c b/arch/arm/mach-msm/iommu_domains.c
index 34c16d1..271e252b 100644
--- a/arch/arm/mach-msm/iommu_domains.c
+++ b/arch/arm/mach-msm/iommu_domains.c
@@ -10,27 +10,34 @@
* GNU General Public License for more details.
*/
-#include <mach/msm_subsystem_map.h>
-#include <linux/memory_alloc.h>
+#include <linux/init.h>
#include <linux/iommu.h>
+#include <linux/memory_alloc.h>
#include <linux/platform_device.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <asm/sizes.h>
#include <asm/page.h>
-#include <linux/init.h>
#include <mach/iommu.h>
#include <mach/iommu_domains.h>
#include <mach/socinfo.h>
+#include <mach/msm_subsystem_map.h>
/* dummy 64K for overmapping */
char iommu_dummy[2*SZ_64K-4];
-struct msm_iommu_domain_state {
- struct msm_iommu_domain *domains;
- int ndomains;
+struct msm_iova_data {
+ struct rb_node node;
+ struct mem_pool *pools;
+ int npools;
+ struct iommu_domain *domain;
+ int domain_num;
};
-static struct msm_iommu_domain_state domain_state;
+static struct rb_root domain_root;
+DEFINE_MUTEX(domain_mutex);
+static atomic_t domain_nums = ATOMIC_INIT(-1);
int msm_iommu_map_extra(struct iommu_domain *domain,
unsigned long start_iova,
@@ -127,9 +134,10 @@
if (size & (align - 1))
return -EINVAL;
- iova = msm_allocate_iova_address(domain_no, partition_no, size, align);
+ ret = msm_allocate_iova_address(domain_no, partition_no, size, align,
+ &iova);
- if (!iova)
+ if (ret)
return -ENOMEM;
ret = msm_iommu_map_iova_phys(msm_get_iommu_domain(domain_no), iova,
@@ -152,73 +160,210 @@
msm_free_iova_address(iova, domain_no, partition_no, size);
}
+static struct msm_iova_data *find_domain(int domain_num)
+{
+ struct rb_root *root = &domain_root;
+ struct rb_node *p = root->rb_node;
+
+ mutex_lock(&domain_mutex);
+
+ while (p) {
+ struct msm_iova_data *node;
+
+ node = rb_entry(p, struct msm_iova_data, node);
+ if (domain_num < node->domain_num)
+ p = p->rb_left;
+ else if (domain_num > domain_num)
+ p = p->rb_right;
+ else {
+ mutex_unlock(&domain_mutex);
+ return node;
+ }
+ }
+ mutex_unlock(&domain_mutex);
+ return NULL;
+}
+
+static int add_domain(struct msm_iova_data *node)
+{
+ struct rb_root *root = &domain_root;
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+
+ mutex_lock(&domain_mutex);
+ while (*p) {
+ struct msm_iova_data *tmp;
+ parent = *p;
+
+ tmp = rb_entry(parent, struct msm_iova_data, node);
+
+ if (node->domain_num < tmp->domain_num)
+ p = &(*p)->rb_left;
+ else if (node->domain_num > tmp->domain_num)
+ p = &(*p)->rb_right;
+ else
+ BUG();
+ }
+ rb_link_node(&node->node, parent, p);
+ rb_insert_color(&node->node, root);
+ mutex_unlock(&domain_mutex);
+ return 0;
+}
+
struct iommu_domain *msm_get_iommu_domain(int domain_num)
{
- if (domain_num >= 0 && domain_num < domain_state.ndomains)
- return domain_state.domains[domain_num].domain;
+ struct msm_iova_data *data;
+
+ data = find_domain(domain_num);
+
+ if (data)
+ return data->domain;
else
return NULL;
}
-unsigned long msm_allocate_iova_address(unsigned int iommu_domain,
+int msm_allocate_iova_address(unsigned int iommu_domain,
unsigned int partition_no,
unsigned long size,
- unsigned long align)
+ unsigned long align,
+ unsigned long *iova)
{
+ struct msm_iova_data *data;
struct mem_pool *pool;
- unsigned long iova;
+ unsigned long va;
- if (iommu_domain >= domain_state.ndomains)
- return 0;
+ data = find_domain(iommu_domain);
- if (partition_no >= domain_state.domains[iommu_domain].npools)
- return 0;
+ if (!data)
+ return -EINVAL;
- pool = &domain_state.domains[iommu_domain].iova_pools[partition_no];
+ if (partition_no >= data->npools)
+ return -EINVAL;
+
+ pool = &data->pools[partition_no];
if (!pool->gpool)
- return 0;
+ return -EINVAL;
- iova = gen_pool_alloc_aligned(pool->gpool, size, ilog2(align));
- if (iova)
+ va = gen_pool_alloc_aligned(pool->gpool, size, ilog2(align));
+ if (va) {
pool->free -= size;
+ /* Offset because genpool can't handle 0 addresses */
+ if (pool->paddr == 0)
+ va -= SZ_4K;
+ *iova = va;
+ return 0;
+ }
- return iova;
+ return -ENOMEM;
}
void msm_free_iova_address(unsigned long iova,
- unsigned int iommu_domain,
- unsigned int partition_no,
- unsigned long size)
+ unsigned int iommu_domain,
+ unsigned int partition_no,
+ unsigned long size)
{
+ struct msm_iova_data *data;
struct mem_pool *pool;
- if (iommu_domain >= domain_state.ndomains) {
+ data = find_domain(iommu_domain);
+
+ if (!data) {
WARN(1, "Invalid domain %d\n", iommu_domain);
return;
}
- if (partition_no >= domain_state.domains[iommu_domain].npools) {
+ if (partition_no >= data->npools) {
WARN(1, "Invalid partition %d for domain %d\n",
partition_no, iommu_domain);
return;
}
- pool = &domain_state.domains[iommu_domain].iova_pools[partition_no];
+ pool = &data->pools[partition_no];
if (!pool)
return;
pool->free += size;
+
+ /* Offset because genpool can't handle 0 addresses */
+ if (pool->paddr == 0)
+ iova += SZ_4K;
+
gen_pool_free(pool->gpool, iova, size);
}
+int msm_register_domain(struct msm_iova_layout *layout)
+{
+ int i;
+ struct msm_iova_data *data;
+ struct mem_pool *pools;
+
+ if (!layout)
+ return -EINVAL;
+
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+
+ if (!data)
+ return -ENOMEM;
+
+ pools = kmalloc(sizeof(struct mem_pool) * layout->npartitions,
+ GFP_KERNEL);
+
+ if (!pools)
+ goto out;
+
+ for (i = 0; i < layout->npartitions; i++) {
+ if (layout->partitions[i].size == 0)
+ continue;
+
+ pools[i].gpool = gen_pool_create(PAGE_SHIFT, -1);
+
+ if (!pools[i].gpool)
+ continue;
+
+ pools[i].paddr = layout->partitions[i].start;
+ pools[i].size = layout->partitions[i].size;
+
+ /*
+ * genalloc can't handle a pool starting at address 0.
+ * For now, solve this problem by offsetting the value
+ * put in by 4k.
+ * gen pool address = actual address + 4k
+ */
+ if (pools[i].paddr == 0)
+ layout->partitions[i].start += SZ_4K;
+
+ if (gen_pool_add(pools[i].gpool,
+ layout->partitions[i].start,
+ layout->partitions[i].size, -1)) {
+ gen_pool_destroy(pools[i].gpool);
+ pools[i].gpool = NULL;
+ continue;
+ }
+ }
+
+ data->pools = pools;
+ data->npools = layout->npartitions;
+ data->domain_num = atomic_inc_return(&domain_nums);
+ data->domain = iommu_domain_alloc(layout->domain_flags);
+
+ add_domain(data);
+
+ return data->domain_num;
+
+out:
+ kfree(data);
+
+ return -EINVAL;
+}
+
int msm_use_iommu()
{
/*
* If there are no domains, don't bother trying to use the iommu
*/
- return domain_state.ndomains && iommu_found();
+ return iommu_found();
}
static int __init iommu_domain_probe(struct platform_device *pdev)
@@ -229,64 +374,52 @@
if (!p)
return -ENODEV;
- domain_state.domains = p->domains;
- domain_state.ndomains = p->ndomains;
+ for (i = 0; i < p->ndomains; i++) {
+ struct msm_iova_layout l;
+ struct msm_iova_partition *part;
+ struct msm_iommu_domain *domains;
- for (i = 0; i < domain_state.ndomains; i++) {
- domain_state.domains[i].domain = iommu_domain_alloc(
- p->domain_alloc_flags);
- if (!domain_state.domains[i].domain)
+ domains = p->domains;
+ l.npartitions = domains[i].npools;
+ part = kmalloc(
+ sizeof(struct msm_iova_partition) * l.npartitions,
+ GFP_KERNEL);
+
+ if (!part) {
+ pr_info("%s: could not allocate space for domain %d",
+ __func__, i);
continue;
-
- for (j = 0; j < domain_state.domains[i].npools; j++) {
- struct mem_pool *pool = &domain_state.domains[i].
- iova_pools[j];
- mutex_init(&pool->pool_mutex);
- if (pool->size) {
- pool->gpool = gen_pool_create(PAGE_SHIFT, -1);
-
- if (!pool->gpool) {
- pr_err("%s: could not allocate pool\n",
- __func__);
- pr_err("%s: domain %d iova space %d\n",
- __func__, i, j);
- continue;
- }
-
- if (gen_pool_add(pool->gpool, pool->paddr,
- pool->size, -1)) {
- pr_err("%s: could not add memory\n",
- __func__);
- pr_err("%s: domain %d pool %d\n",
- __func__, i, j);
- gen_pool_destroy(pool->gpool);
- pool->gpool = NULL;
- continue;
- }
- } else {
- pool->gpool = NULL;
- }
}
+
+ for (j = 0; j < l.npartitions; j++) {
+ part[j].start = p->domains[i].iova_pools[j].paddr;
+ part[j].size = p->domains[i].iova_pools[j].size;
+ }
+
+ l.partitions = part;
+
+ msm_register_domain(&l);
+
+ kfree(part);
}
for (i = 0; i < p->nnames; i++) {
- int domain_idx;
struct device *ctx = msm_iommu_get_ctx(
p->domain_names[i].name);
+ struct iommu_domain *domain;
if (!ctx)
continue;
- domain_idx = p->domain_names[i].domain;
+ domain = msm_get_iommu_domain(p->domain_names[i].domain);
- if (!domain_state.domains[domain_idx].domain)
+ if (!domain)
continue;
- if (iommu_attach_device(domain_state.domains[domain_idx].domain,
- ctx)) {
- WARN(1, "%s: could not attach domain %d to context %s."
+ if (iommu_attach_device(domain, ctx)) {
+ WARN(1, "%s: could not attach domain %p to context %s."
" iommu programming will not occur.\n",
- __func__, domain_idx,
+ __func__, domain,
p->domain_names[i].name);
continue;
}
diff --git a/arch/arm/mach-msm/peripheral-loader.c b/arch/arm/mach-msm/peripheral-loader.c
index 7208a48..9d0ce0d 100644
--- a/arch/arm/mach-msm/peripheral-loader.c
+++ b/arch/arm/mach-msm/peripheral-loader.c
@@ -578,6 +578,10 @@
"invalid proxy voting. ignoring\n"))
((struct pil_reset_ops *)desc->ops)->proxy_unvote = NULL;
+ WARN(desc->ops->proxy_unvote && !desc->proxy_timeout,
+ "A proxy timeout of 0 ms was specified for %s. Specify one in "
+ "desc->proxy_timeout.\n", desc->name);
+
pil = kzalloc(sizeof(*pil), GFP_KERNEL);
if (!pil)
return ERR_PTR(-ENOMEM);
diff --git a/arch/arm/mach-msm/pil-q6v4.c b/arch/arm/mach-msm/pil-q6v4.c
index 8446e42..131a74b 100644
--- a/arch/arm/mach-msm/pil-q6v4.c
+++ b/arch/arm/mach-msm/pil-q6v4.c
@@ -116,14 +116,9 @@
int err;
struct q6v4_data *drv = dev_get_drvdata(dev);
- err = regulator_set_voltage(drv->vreg, 1050000, 1050000);
+ err = regulator_set_voltage(drv->vreg, 375000, 375000);
if (err) {
- dev_err(dev, "Failed to set regulator's voltage.\n");
- return err;
- }
- err = regulator_set_optimum_mode(drv->vreg, 100000);
- if (err < 0) {
- dev_err(dev, "Failed to set regulator's mode.\n");
+ dev_err(dev, "Failed to set regulator's voltage step.\n");
return err;
}
err = regulator_enable(drv->vreg);
@@ -131,6 +126,18 @@
dev_err(dev, "Failed to enable regulator.\n");
return err;
}
+
+ /*
+ * Q6 hardware requires a two step voltage ramp-up.
+ * Delay between the steps.
+ */
+ udelay(100);
+
+ err = regulator_set_voltage(drv->vreg, 1050000, 1050000);
+ if (err) {
+ dev_err(dev, "Failed to set regulator's voltage.\n");
+ return err;
+ }
drv->vreg_enabled = true;
return 0;
}
@@ -411,6 +418,12 @@
if (IS_ERR(drv->vreg))
return PTR_ERR(drv->vreg);
+ ret = regulator_set_optimum_mode(drv->vreg, 100000);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to set regulator's mode.\n");
+ return ret;
+ }
+
drv->xo = devm_clk_get(&pdev->dev, "xo");
if (IS_ERR(drv->xo))
return PTR_ERR(drv->xo);
diff --git a/arch/arm/mach-msm/pil-q6v5-lpass.c b/arch/arm/mach-msm/pil-q6v5-lpass.c
index 8691ac7..311f8a7 100644
--- a/arch/arm/mach-msm/pil-q6v5-lpass.c
+++ b/arch/arm/mach-msm/pil-q6v5-lpass.c
@@ -15,7 +15,6 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/io.h>
-#include <linux/iopoll.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/clk.h>
@@ -23,28 +22,14 @@
#include "peripheral-loader.h"
#include "pil-q6v5.h"
-/* Register Offsets */
#define QDSP6SS_RST_EVB 0x010
-#define AXI_HALTREQ 0x0
-#define AXI_HALTACK 0x4
-#define AXI_IDLE 0x8
-
-#define HALT_ACK_TIMEOUT_US 100000
+#define PROXY_TIMEOUT_MS 10000
static int pil_lpass_shutdown(struct pil_desc *pil)
{
struct q6v5_data *drv = dev_get_drvdata(pil->dev);
- int ret;
- u32 status;
- writel_relaxed(1, drv->axi_halt_base + AXI_HALTREQ);
- ret = readl_poll_timeout(drv->axi_halt_base + AXI_HALTACK,
- status, status, 50, HALT_ACK_TIMEOUT_US);
- if (ret)
- dev_err(pil->dev, "Port halt timeout\n");
- else if (!readl_relaxed(drv->axi_halt_base + AXI_IDLE))
- dev_err(pil->dev, "Port halt failed\n");
- writel_relaxed(0, drv->axi_halt_base + AXI_HALTREQ);
+ pil_q6v5_halt_axi_port(pil, drv->axi_halt_base);
/*
* If the shutdown function is called before the reset function, clocks
@@ -98,7 +83,6 @@
{
struct q6v5_data *drv;
struct pil_desc *desc;
- struct resource *res;
desc = pil_q6v5_init(pdev);
if (IS_ERR(desc))
@@ -110,12 +94,7 @@
desc->ops = &pil_lpass_ops;
desc->owner = THIS_MODULE;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- drv->axi_halt_base = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!drv->axi_halt_base)
- return -ENOMEM;
+ desc->proxy_timeout = PROXY_TIMEOUT_MS;
drv->pil = msm_pil_register(desc);
if (IS_ERR(drv->pil))
diff --git a/arch/arm/mach-msm/pil-q6v5.c b/arch/arm/mach-msm/pil-q6v5.c
index 3b9d542..6a96990 100644
--- a/arch/arm/mach-msm/pil-q6v5.c
+++ b/arch/arm/mach-msm/pil-q6v5.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/elf.h>
#include <linux/err.h>
#include <linux/of.h>
@@ -25,11 +26,18 @@
#include "peripheral-loader.h"
#include "pil-q6v5.h"
-/* Register Offsets */
+/* QDSP6SS Register Offsets */
#define QDSP6SS_RESET 0x014
#define QDSP6SS_GFMUX_CTL 0x020
#define QDSP6SS_PWR_CTL 0x030
+/* AXI Halt Register Offsets */
+#define AXI_HALTREQ 0x0
+#define AXI_HALTACK 0x4
+#define AXI_IDLE 0x8
+
+#define HALT_ACK_TIMEOUT_US 100000
+
/* QDSP6SS_RESET */
#define Q6SS_CORE_ARES BIT(1)
#define Q6SS_ETM_ISDB_ARES BIT(3)
@@ -68,6 +76,27 @@
}
EXPORT_SYMBOL(pil_q6v5_remove_proxy_votes);
+void pil_q6v5_halt_axi_port(struct pil_desc *pil, void __iomem *halt_base)
+{
+ int ret;
+ u32 status;
+
+ /* Assert halt request */
+ writel_relaxed(1, halt_base + AXI_HALTREQ);
+
+ /* Wait for halt */
+ ret = readl_poll_timeout(halt_base + AXI_HALTACK,
+ status, status != 0, 50, HALT_ACK_TIMEOUT_US);
+ if (ret)
+ dev_warn(pil->dev, "Port %p halt timeout\n", halt_base);
+ else if (!readl_relaxed(halt_base + AXI_IDLE))
+ dev_warn(pil->dev, "Port %p halt failed\n", halt_base);
+
+ /* Clear halt request (port will remain halted until reset) */
+ writel_relaxed(0, halt_base + AXI_HALTREQ);
+}
+EXPORT_SYMBOL(pil_q6v5_halt_axi_port);
+
int pil_q6v5_init_image(struct pil_desc *pil, const u8 *metadata,
size_t size)
{
@@ -210,6 +239,11 @@
resource_size(res));
if (!drv->reg_base)
return ERR_PTR(-ENOMEM);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ drv->axi_halt_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!drv->axi_halt_base)
+ return ERR_PTR(-ENOMEM);
desc = devm_kzalloc(&pdev->dev, sizeof(*desc), GFP_KERNEL);
if (!desc)
diff --git a/arch/arm/mach-msm/pil-q6v5.h b/arch/arm/mach-msm/pil-q6v5.h
index 5f283da..a9a8d07 100644
--- a/arch/arm/mach-msm/pil-q6v5.h
+++ b/arch/arm/mach-msm/pil-q6v5.h
@@ -35,6 +35,7 @@
int pil_q6v5_make_proxy_votes(struct pil_desc *pil);
void pil_q6v5_remove_proxy_votes(struct pil_desc *pil);
+void pil_q6v5_halt_axi_port(struct pil_desc *pil, void __iomem *halt_base);
int pil_q6v5_init_image(struct pil_desc *pil, const u8 *metadata,
size_t size);
void pil_q6v5_shutdown(struct pil_desc *pil);
diff --git a/arch/arm/mach-msm/platsmp-8625.c b/arch/arm/mach-msm/platsmp-8625.c
index 3826b12..91f1133 100644
--- a/arch/arm/mach-msm/platsmp-8625.c
+++ b/arch/arm/mach-msm/platsmp-8625.c
@@ -170,6 +170,8 @@
{
unsigned long timeout;
+ preset_lpj = loops_per_jiffy;
+
if (cold_boot_done == false) {
if (msm8625_release_secondary()) {
pr_err("Failed to release secondary core\n");
diff --git a/arch/arm/mach-msm/rpc_pmapp.c b/arch/arm/mach-msm/rpc_pmapp.c
index 811e63c..1d18553 100644
--- a/arch/arm/mach-msm/rpc_pmapp.c
+++ b/arch/arm/mach-msm/rpc_pmapp.c
@@ -548,7 +548,7 @@
int pmapp_disp_backlight_set_brightness(int value)
{
- if (value < 0 || value > 100)
+ if (value < 0 || value > 255)
return -EINVAL;
return pmapp_rpc_set_only(value, 0, 0, 0, 1,
diff --git a/arch/arm/mach-msm/rpm-notifier.h b/arch/arm/mach-msm/rpm-notifier.h
new file mode 100644
index 0000000..df8d9b3
--- /dev/null
+++ b/arch/arm/mach-msm/rpm-notifier.h
@@ -0,0 +1,27 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ARCH_ARM_MACH_MSM_RPM_NOTIF_H
+#define __ARCH_ARM_MACH_MSM_RPM_NOTIF_H
+
+struct msm_rpm_notifier_data {
+ uint32_t rsc_type;
+ uint32_t rsc_id;
+ uint32_t key;
+ uint32_t size;
+ uint8_t *value;
+};
+
+int msm_rpm_register_notifier(struct notifier_block *nb);
+int msm_rpm_unregister_notifier(struct notifier_block *nb);
+
+#endif /*__ARCH_ARM_MACH_MSM_RPM_NOTIF_H */
diff --git a/arch/arm/mach-msm/rpm-smd.c b/arch/arm/mach-msm/rpm-smd.c
new file mode 100644
index 0000000..75f4d92
--- /dev/null
+++ b/arch/arm/mach-msm/rpm-smd.c
@@ -0,0 +1,826 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/notifier.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <mach/socinfo.h>
+#include <mach/msm_smd.h>
+#include <mach/rpm-smd.h>
+#include "rpm-notifier.h"
+
+struct msm_rpm_driver_data {
+ const char *ch_name;
+ uint32_t ch_type;
+ smd_channel_t *ch_info;
+ struct work_struct work;
+ spinlock_t smd_lock_write;
+ spinlock_t smd_lock_read;
+ struct completion smd_open;
+};
+
+#define DEFAULT_BUFFER_SIZE 256
+#define GFP_FLAG(noirq) (noirq ? GFP_ATOMIC : GFP_KERNEL)
+#define INV_HDR "resource does not exist"
+#define ERR "err\0"
+#define MAX_ERR_BUFFER_SIZE 60
+
+static struct atomic_notifier_head msm_rpm_sleep_notifier;
+static bool standalone;
+
+int msm_rpm_register_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&msm_rpm_sleep_notifier, nb);
+}
+
+int msm_rpm_unregister_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&msm_rpm_sleep_notifier, nb);
+}
+
+static struct workqueue_struct *msm_rpm_smd_wq;
+
+enum {
+ MSM_RPM_MSG_REQUEST_TYPE = 0,
+ MSM_RPM_MSG_TYPE_NR,
+};
+
+static const uint32_t msm_rpm_request_service[MSM_RPM_MSG_TYPE_NR] = {
+ 0x716572, /* 'req\0' */
+};
+
+/*the order of fields matter and reflect the order expected by the RPM*/
+struct rpm_request_header {
+ uint32_t service_type;
+ uint32_t request_len;
+};
+
+struct rpm_message_header {
+ uint32_t msg_id;
+ enum msm_rpm_set set;
+ uint32_t resource_type;
+ uint32_t resource_id;
+ uint32_t data_len;
+};
+
+struct msm_rpm_kvp_data {
+ uint32_t key;
+ uint32_t nbytes; /* number of bytes */
+ uint8_t *value;
+ bool valid;
+};
+
+static atomic_t msm_rpm_msg_id = ATOMIC_INIT(0);
+
+static struct msm_rpm_driver_data msm_rpm_data;
+
+struct msm_rpm_request {
+ struct rpm_request_header req_hdr;
+ struct rpm_message_header msg_hdr;
+ struct msm_rpm_kvp_data *kvp;
+ uint32_t num_elements;
+ uint32_t write_idx;
+ uint8_t *buf;
+ uint32_t numbytes;
+};
+
+/*
+ * Data related to message acknowledgement
+ */
+
+LIST_HEAD(msm_rpm_wait_list);
+
+struct msm_rpm_wait_data {
+ struct list_head list;
+ uint32_t msg_id;
+ bool ack_recd;
+ int errno;
+ struct completion ack;
+};
+DEFINE_SPINLOCK(msm_rpm_list_lock);
+
+struct msm_rpm_ack_msg {
+ uint32_t req;
+ uint32_t req_len;
+ uint32_t rsc_id;
+ uint32_t msg_len;
+ uint32_t id_ack;
+};
+
+static int irq_process;
+
+LIST_HEAD(msm_rpm_ack_list);
+
+static void msm_rpm_notify_sleep_chain(struct rpm_message_header *hdr,
+ struct msm_rpm_kvp_data *kvp)
+{
+ struct msm_rpm_notifier_data notif;
+
+ notif.rsc_type = hdr->resource_type;
+ notif.rsc_id = hdr->resource_id;
+ notif.key = kvp->key;
+ notif.size = kvp->nbytes;
+ notif.value = kvp->value;
+ atomic_notifier_call_chain(&msm_rpm_sleep_notifier, 0, ¬if);
+}
+
+static int msm_rpm_add_kvp_data_common(struct msm_rpm_request *handle,
+ uint32_t key, const uint8_t *data, int size, bool noirq)
+{
+ int i;
+ int data_size, msg_size;
+
+ if (!handle)
+ return -EINVAL;
+
+ data_size = ALIGN(size, SZ_4);
+ msg_size = data_size + sizeof(struct rpm_request_header);
+
+ for (i = 0; i < handle->write_idx; i++) {
+ if (handle->kvp[i].key != key)
+ continue;
+ if (handle->kvp[i].nbytes != data_size) {
+ kfree(handle->kvp[i].value);
+ handle->kvp[i].value = NULL;
+ } else {
+ if (!memcmp(handle->kvp[i].value, data, data_size))
+ return 0;
+ }
+ break;
+ }
+
+ if (i >= handle->num_elements)
+ return -ENOMEM;
+
+ if (i == handle->write_idx)
+ handle->write_idx++;
+
+ if (!handle->kvp[i].value) {
+ handle->kvp[i].value = kzalloc(data_size, GFP_FLAG(noirq));
+
+ if (!handle->kvp[i].value)
+ return -ENOMEM;
+ } else {
+ /* We enter the else case, if a key already exists but the
+ * data doesn't match. In which case, we should zero the data
+ * out.
+ */
+ memset(handle->kvp[i].value, 0, data_size);
+ }
+
+ if (!handle->kvp[i].valid)
+ handle->msg_hdr.data_len += msg_size;
+ else
+ handle->msg_hdr.data_len += (data_size - handle->kvp[i].nbytes);
+
+ handle->kvp[i].nbytes = data_size;
+ handle->kvp[i].key = key;
+ memcpy(handle->kvp[i].value, data, size);
+ handle->kvp[i].valid = true;
+
+ if (handle->msg_hdr.set == MSM_RPM_CTX_SLEEP_SET)
+ msm_rpm_notify_sleep_chain(&handle->msg_hdr, &handle->kvp[i]);
+
+ return 0;
+
+}
+
+static struct msm_rpm_request *msm_rpm_create_request_common(
+ enum msm_rpm_set set, uint32_t rsc_type, uint32_t rsc_id,
+ int num_elements, bool noirq)
+{
+ struct msm_rpm_request *cdata;
+
+ cdata = kzalloc(sizeof(struct msm_rpm_request),
+ GFP_FLAG(noirq));
+
+ if (!cdata) {
+ printk(KERN_INFO"%s():Cannot allocate memory for client data\n",
+ __func__);
+ goto cdata_alloc_fail;
+ }
+
+ cdata->msg_hdr.set = set;
+ cdata->msg_hdr.resource_type = rsc_type;
+ cdata->msg_hdr.resource_id = rsc_id;
+ cdata->msg_hdr.data_len = 0;
+
+ cdata->num_elements = num_elements;
+ cdata->write_idx = 0;
+
+ cdata->kvp = kzalloc(sizeof(struct msm_rpm_kvp_data) * num_elements,
+ GFP_FLAG(noirq));
+
+ if (!cdata->kvp) {
+ pr_warn("%s(): Cannot allocate memory for key value data\n",
+ __func__);
+ goto kvp_alloc_fail;
+ }
+
+ cdata->buf = kzalloc(DEFAULT_BUFFER_SIZE, GFP_FLAG(noirq));
+
+ if (!cdata->buf)
+ goto buf_alloc_fail;
+
+ cdata->numbytes = DEFAULT_BUFFER_SIZE;
+ return cdata;
+
+buf_alloc_fail:
+ kfree(cdata->kvp);
+kvp_alloc_fail:
+ kfree(cdata);
+cdata_alloc_fail:
+ return NULL;
+
+}
+
+void msm_rpm_free_request(struct msm_rpm_request *handle)
+{
+ int i;
+
+ if (!handle)
+ return;
+ for (i = 0; i < handle->write_idx; i++)
+ kfree(handle->kvp[i].value);
+ kfree(handle->kvp);
+ kfree(handle);
+}
+EXPORT_SYMBOL(msm_rpm_free_request);
+
+struct msm_rpm_request *msm_rpm_create_request(
+ enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, int num_elements)
+{
+ return msm_rpm_create_request_common(set, rsc_type, rsc_id,
+ num_elements, false);
+}
+EXPORT_SYMBOL(msm_rpm_create_request);
+
+struct msm_rpm_request *msm_rpm_create_request_noirq(
+ enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, int num_elements)
+{
+ return msm_rpm_create_request_common(set, rsc_type, rsc_id,
+ num_elements, true);
+}
+EXPORT_SYMBOL(msm_rpm_create_request_noirq);
+
+int msm_rpm_add_kvp_data(struct msm_rpm_request *handle,
+ uint32_t key, const uint8_t *data, int size)
+{
+ return msm_rpm_add_kvp_data_common(handle, key, data, size, false);
+
+}
+EXPORT_SYMBOL(msm_rpm_add_kvp_data);
+
+int msm_rpm_add_kvp_data_noirq(struct msm_rpm_request *handle,
+ uint32_t key, const uint8_t *data, int size)
+{
+ return msm_rpm_add_kvp_data_common(handle, key, data, size, true);
+}
+EXPORT_SYMBOL(msm_rpm_add_kvp_data_noirq);
+
+/* Runs in interrupt context */
+static void msm_rpm_notify(void *data, unsigned event)
+{
+ struct msm_rpm_driver_data *pdata = (struct msm_rpm_driver_data *)data;
+ BUG_ON(!pdata);
+
+ if (!(pdata->ch_info))
+ return;
+
+ switch (event) {
+ case SMD_EVENT_DATA:
+ queue_work(msm_rpm_smd_wq, &pdata->work);
+ break;
+ case SMD_EVENT_OPEN:
+ complete(&pdata->smd_open);
+ break;
+ case SMD_EVENT_CLOSE:
+ case SMD_EVENT_STATUS:
+ case SMD_EVENT_REOPEN_READY:
+ break;
+ default:
+ pr_info("Unknown SMD event\n");
+
+ }
+}
+
+static struct msm_rpm_wait_data *msm_rpm_get_entry_from_msg_id(uint32_t msg_id)
+{
+ struct list_head *ptr;
+ struct msm_rpm_wait_data *elem;
+ unsigned long flags;
+
+ spin_lock_irqsave(&msm_rpm_list_lock, flags);
+
+ list_for_each(ptr, &msm_rpm_wait_list) {
+ elem = list_entry(ptr, struct msm_rpm_wait_data, list);
+ if (elem && (elem->msg_id == msg_id))
+ break;
+ elem = NULL;
+ }
+ spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
+ return elem;
+}
+
+static int msm_rpm_get_next_msg_id(void)
+{
+ int id;
+
+ do {
+ id = atomic_inc_return(&msm_rpm_msg_id);
+ } while ((id == 0) || msm_rpm_get_entry_from_msg_id(id));
+
+ return id;
+}
+
+static int msm_rpm_add_wait_list(uint32_t msg_id)
+{
+ unsigned long flags;
+ struct msm_rpm_wait_data *data =
+ kzalloc(sizeof(struct msm_rpm_wait_data), GFP_ATOMIC);
+
+ if (!data)
+ return -ENOMEM;
+
+ init_completion(&data->ack);
+ data->ack_recd = false;
+ data->msg_id = msg_id;
+ spin_lock_irqsave(&msm_rpm_list_lock, flags);
+ list_add(&data->list, &msm_rpm_wait_list);
+ spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
+
+ return 0;
+}
+
+static void msm_rpm_free_list_entry(struct msm_rpm_wait_data *elem)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&msm_rpm_list_lock, flags);
+ list_del(&elem->list);
+ spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
+ kfree(elem);
+}
+
+static void msm_rpm_process_ack(uint32_t msg_id, int errno)
+{
+ struct list_head *ptr;
+ struct msm_rpm_wait_data *elem;
+ unsigned long flags;
+
+ spin_lock_irqsave(&msm_rpm_list_lock, flags);
+
+ list_for_each(ptr, &msm_rpm_wait_list) {
+ elem = list_entry(ptr, struct msm_rpm_wait_data, list);
+ if (elem && (elem->msg_id == msg_id)) {
+ elem->errno = errno;
+ elem->ack_recd = true;
+ complete(&elem->ack);
+ break;
+ }
+ elem = NULL;
+ }
+ WARN_ON(!elem);
+
+ spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
+}
+
+struct msm_rpm_kvp_packet {
+ uint32_t id;
+ uint32_t len;
+ uint32_t val;
+};
+
+static inline uint32_t msm_rpm_get_msg_id_from_ack(uint8_t *buf)
+{
+ return ((struct msm_rpm_ack_msg *)buf)->id_ack;
+}
+
+static inline int msm_rpm_get_error_from_ack(uint8_t *buf)
+{
+ uint8_t *tmp;
+ uint32_t req_len = ((struct msm_rpm_ack_msg *)buf)->req_len;
+
+ int rc = -ENODEV;
+
+ req_len -= sizeof(struct msm_rpm_ack_msg);
+ req_len += 2 * sizeof(uint32_t);
+ if (!req_len)
+ return 0;
+
+ tmp = buf + sizeof(struct msm_rpm_ack_msg);
+
+ BUG_ON(memcmp(tmp, ERR, sizeof(uint32_t)));
+
+ tmp += 2 * sizeof(uint32_t);
+
+ if (!(memcmp(tmp, INV_HDR, min(req_len, sizeof(INV_HDR))-1)))
+ rc = -EINVAL;
+
+ return rc;
+}
+
+static void msm_rpm_read_smd_data(char *buf)
+{
+ int pkt_sz;
+ int bytes_read = 0;
+
+ pkt_sz = smd_cur_packet_size(msm_rpm_data.ch_info);
+
+ BUG_ON(pkt_sz > MAX_ERR_BUFFER_SIZE);
+
+ if (pkt_sz != smd_read_avail(msm_rpm_data.ch_info))
+ return;
+
+ BUG_ON(pkt_sz == 0);
+
+ do {
+ int len;
+
+ len = smd_read(msm_rpm_data.ch_info, buf + bytes_read, pkt_sz);
+ pkt_sz -= len;
+ bytes_read += len;
+
+ } while (pkt_sz > 0);
+
+ BUG_ON(pkt_sz < 0);
+}
+
+static void msm_rpm_smd_work(struct work_struct *work)
+{
+ uint32_t msg_id;
+ int errno;
+ char buf[MAX_ERR_BUFFER_SIZE] = {0};
+ unsigned long flags;
+
+ while (smd_is_pkt_avail(msm_rpm_data.ch_info) && !irq_process) {
+ spin_lock_irqsave(&msm_rpm_data.smd_lock_read, flags);
+ msm_rpm_read_smd_data(buf);
+ spin_unlock_irqrestore(&msm_rpm_data.smd_lock_read, flags);
+ msg_id = msm_rpm_get_msg_id_from_ack(buf);
+ errno = msm_rpm_get_error_from_ack(buf);
+ msm_rpm_process_ack(msg_id, errno);
+ }
+}
+
+static int msm_rpm_send_data(struct msm_rpm_request *cdata,
+ int msg_type, bool noirq)
+{
+ uint8_t *tmpbuff;
+ int i, ret, msg_size;
+ unsigned long flags;
+
+ int req_hdr_sz, msg_hdr_sz;
+
+ if (!cdata->msg_hdr.data_len)
+ return 0;
+ req_hdr_sz = sizeof(cdata->req_hdr);
+ msg_hdr_sz = sizeof(cdata->msg_hdr);
+
+ cdata->req_hdr.service_type = msm_rpm_request_service[msg_type];
+
+ cdata->msg_hdr.msg_id = msm_rpm_get_next_msg_id();
+
+ cdata->req_hdr.request_len = cdata->msg_hdr.data_len + msg_hdr_sz;
+ msg_size = cdata->req_hdr.request_len + req_hdr_sz;
+
+ /* populate data_len */
+ if (msg_size > cdata->numbytes) {
+ kfree(cdata->buf);
+ cdata->numbytes = msg_size;
+ cdata->buf = kzalloc(msg_size, GFP_FLAG(noirq));
+ }
+
+ if (!cdata->buf)
+ return 0;
+
+ tmpbuff = cdata->buf;
+
+ memcpy(tmpbuff, &cdata->req_hdr, req_hdr_sz + msg_hdr_sz);
+
+ tmpbuff += req_hdr_sz + msg_hdr_sz;
+
+ for (i = 0; (i < cdata->write_idx); i++) {
+ /* Sanity check */
+ BUG_ON((tmpbuff - cdata->buf) > cdata->numbytes);
+
+ if (!cdata->kvp[i].valid)
+ continue;
+
+ memcpy(tmpbuff, &cdata->kvp[i].key, sizeof(uint32_t));
+ tmpbuff += sizeof(uint32_t);
+
+ memcpy(tmpbuff, &cdata->kvp[i].nbytes, sizeof(uint32_t));
+ tmpbuff += sizeof(uint32_t);
+
+ memcpy(tmpbuff, cdata->kvp[i].value, cdata->kvp[i].nbytes);
+ tmpbuff += cdata->kvp[i].nbytes;
+ }
+
+ if (standalone) {
+ for (i = 0; (i < cdata->write_idx); i++)
+ cdata->kvp[i].valid = false;
+
+ cdata->msg_hdr.data_len = 0;
+ ret = cdata->msg_hdr.msg_id;
+ return ret;
+ }
+
+ msm_rpm_add_wait_list(cdata->msg_hdr.msg_id);
+
+ spin_lock_irqsave(&msm_rpm_data.smd_lock_write, flags);
+
+ ret = smd_write_avail(msm_rpm_data.ch_info);
+
+ if (ret < 0) {
+ pr_warn("%s(): SMD not initialized\n", __func__);
+ spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write, flags);
+ return 0;
+ }
+
+ while ((ret < msg_size)) {
+ if (!noirq) {
+ spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write,
+ flags);
+ cpu_relax();
+ spin_lock_irqsave(&msm_rpm_data.smd_lock_write, flags);
+ } else
+ udelay(5);
+ ret = smd_write_avail(msm_rpm_data.ch_info);
+ }
+
+ ret = smd_write(msm_rpm_data.ch_info, &cdata->buf[0], msg_size);
+ spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write, flags);
+
+ if (ret == msg_size) {
+ for (i = 0; (i < cdata->write_idx); i++)
+ cdata->kvp[i].valid = false;
+ cdata->msg_hdr.data_len = 0;
+ ret = cdata->msg_hdr.msg_id;
+ } else if (ret < msg_size) {
+ struct msm_rpm_wait_data *rc;
+ ret = 0;
+ pr_info("Failed to write data msg_size:%d ret:%d\n",
+ msg_size, ret);
+ rc = msm_rpm_get_entry_from_msg_id(cdata->msg_hdr.msg_id);
+ if (rc)
+ msm_rpm_free_list_entry(rc);
+ }
+ return ret;
+}
+
+int msm_rpm_send_request(struct msm_rpm_request *handle)
+{
+ return msm_rpm_send_data(handle, MSM_RPM_MSG_REQUEST_TYPE, false);
+}
+EXPORT_SYMBOL(msm_rpm_send_request);
+
+int msm_rpm_send_request_noirq(struct msm_rpm_request *handle)
+{
+ return msm_rpm_send_data(handle, MSM_RPM_MSG_REQUEST_TYPE, true);
+}
+EXPORT_SYMBOL(msm_rpm_send_request_noirq);
+
+int msm_rpm_wait_for_ack(uint32_t msg_id)
+{
+ struct msm_rpm_wait_data *elem;
+ int rc = 0;
+
+ if (!msg_id)
+ return -EINVAL;
+
+ if (standalone)
+ return 0;
+
+ elem = msm_rpm_get_entry_from_msg_id(msg_id);
+ if (!elem)
+ return 0;
+
+ rc = wait_for_completion_timeout(&elem->ack, msecs_to_jiffies(1));
+ if (!rc) {
+ pr_warn("%s(): Timed out after 1 ms\n", __func__);
+ rc = -ETIMEDOUT;
+ } else {
+ rc = elem->errno;
+ msm_rpm_free_list_entry(elem);
+ }
+ return rc;
+}
+EXPORT_SYMBOL(msm_rpm_wait_for_ack);
+
+int msm_rpm_wait_for_ack_noirq(uint32_t msg_id)
+{
+ struct msm_rpm_wait_data *elem;
+ unsigned long flags;
+ int rc = 0;
+ uint32_t id = 0;
+ int count = 0;
+
+ if (!msg_id)
+ return -EINVAL;
+
+ if (standalone)
+ return 0;
+
+ spin_lock_irqsave(&msm_rpm_data.smd_lock_read, flags);
+ irq_process = true;
+
+ elem = msm_rpm_get_entry_from_msg_id(msg_id);
+
+ if (!elem)
+ /* Should this be a bug
+ * Is it ok for another thread to read the msg?
+ */
+ goto wait_ack_cleanup;
+
+ while ((id != msg_id) && (count++ < 10)) {
+ if (smd_is_pkt_avail(msm_rpm_data.ch_info)) {
+ int errno;
+ char buf[MAX_ERR_BUFFER_SIZE] = {};
+
+ msm_rpm_read_smd_data(buf);
+ id = msm_rpm_get_msg_id_from_ack(buf);
+ errno = msm_rpm_get_error_from_ack(buf);
+ msm_rpm_process_ack(id, errno);
+ } else
+ udelay(100);
+ }
+
+ if (count == 10) {
+ rc = -ETIMEDOUT;
+ pr_warn("%s(): Timed out after 1ms\n", __func__);
+ } else {
+ rc = elem->errno;
+ msm_rpm_free_list_entry(elem);
+ }
+wait_ack_cleanup:
+ irq_process = false;
+ spin_unlock_irqrestore(&msm_rpm_data.smd_lock_read, flags);
+ return rc;
+}
+EXPORT_SYMBOL(msm_rpm_wait_for_ack_noirq);
+
+int msm_rpm_send_message(enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
+{
+ int i, rc;
+ struct msm_rpm_request *req =
+ msm_rpm_create_request(set, rsc_type, rsc_id, nelems);
+ if (!req)
+ return -ENOMEM;
+
+ for (i = 0; i < nelems; i++) {
+ rc = msm_rpm_add_kvp_data(req, kvp[i].key,
+ kvp[i].data, kvp[i].length);
+ if (rc)
+ goto bail;
+ }
+
+ rc = msm_rpm_wait_for_ack(msm_rpm_send_request(req));
+bail:
+ msm_rpm_free_request(req);
+ return rc;
+}
+EXPORT_SYMBOL(msm_rpm_send_message);
+
+int msm_rpm_send_message_noirq(enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
+{
+ int i, rc;
+ struct msm_rpm_request *req =
+ msm_rpm_create_request_noirq(set, rsc_type, rsc_id, nelems);
+ if (!req)
+ return -ENOMEM;
+
+ for (i = 0; i < nelems; i++) {
+ rc = msm_rpm_add_kvp_data_noirq(req, kvp[i].key,
+ kvp[i].data, kvp[i].length);
+ if (rc)
+ goto bail;
+ }
+
+ rc = msm_rpm_wait_for_ack_noirq(msm_rpm_send_request_noirq(req));
+bail:
+ msm_rpm_free_request(req);
+ return rc;
+}
+EXPORT_SYMBOL(msm_rpm_send_message_noirq);
+static bool msm_rpm_set_standalone(void)
+{
+ if (machine_is_copper()) {
+ pr_warn("%s(): Running in standalone mode, requests "
+ "will not be sent to RPM\n", __func__);
+ standalone = true;
+ }
+ return standalone;
+}
+
+static int __devinit msm_rpm_dev_probe(struct platform_device *pdev)
+{
+ char *key = NULL;
+ int ret;
+
+ key = "rpm-channel-name";
+ ret = of_property_read_string(pdev->dev.of_node, key,
+ &msm_rpm_data.ch_name);
+ if (ret)
+ goto fail;
+
+ key = "rpm-channel-type";
+ ret = of_property_read_u32(pdev->dev.of_node, key,
+ &msm_rpm_data.ch_type);
+ if (ret)
+ goto fail;
+
+ init_completion(&msm_rpm_data.smd_open);
+ spin_lock_init(&msm_rpm_data.smd_lock_write);
+ spin_lock_init(&msm_rpm_data.smd_lock_read);
+ INIT_WORK(&msm_rpm_data.work, msm_rpm_smd_work);
+
+ if (smd_named_open_on_edge(msm_rpm_data.ch_name, msm_rpm_data.ch_type,
+ &msm_rpm_data.ch_info, &msm_rpm_data,
+ msm_rpm_notify)) {
+ pr_info("Cannot open RPM channel %s %d\n", msm_rpm_data.ch_name,
+ msm_rpm_data.ch_type);
+
+ msm_rpm_set_standalone();
+ BUG_ON(!standalone);
+ complete(&msm_rpm_data.smd_open);
+ }
+
+ ret = wait_for_completion_timeout(&msm_rpm_data.smd_open,
+ msecs_to_jiffies(5));
+
+ BUG_ON(!ret);
+
+ smd_disable_read_intr(msm_rpm_data.ch_info);
+
+ if (!standalone) {
+ msm_rpm_smd_wq = create_singlethread_workqueue("rpm-smd");
+ if (!msm_rpm_smd_wq)
+ return -EINVAL;
+ }
+
+ of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+ return 0;
+fail:
+ pr_err("%s(): Failed to read node: %s, key=%s\n", __func__,
+ pdev->dev.of_node->full_name, key);
+ return -EINVAL;
+}
+
+static struct of_device_id msm_rpm_match_table[] = {
+ {.compatible = "qcom,rpm-smd"},
+ {},
+};
+
+static struct platform_driver msm_rpm_device_driver = {
+ .probe = msm_rpm_dev_probe,
+ .driver = {
+ .name = "rpm-smd",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_rpm_match_table,
+ },
+};
+
+int __init msm_rpm_driver_init(void)
+{
+ static bool registered;
+
+ if (registered)
+ return 0;
+ registered = true;
+
+ return platform_driver_register(&msm_rpm_device_driver);
+}
+EXPORT_SYMBOL(msm_rpm_driver_init);
+late_initcall(msm_rpm_driver_init);
diff --git a/arch/arm/mach-msm/smd_pkt.c b/arch/arm/mach-msm/smd_pkt.c
index abcd336..b9cba8c 100644
--- a/arch/arm/mach-msm/smd_pkt.c
+++ b/arch/arm/mach-msm/smd_pkt.c
@@ -40,7 +40,7 @@
#ifdef CONFIG_ARCH_FSM9XXX
#define NUM_SMD_PKT_PORTS 4
#else
-#define NUM_SMD_PKT_PORTS 14
+#define NUM_SMD_PKT_PORTS 15
#endif
#define LOOPBACK_INX (NUM_SMD_PKT_PORTS - 1)
@@ -677,6 +677,7 @@
"apr_apps2",
"smdcntl8",
"smd_sns_adsp",
+ "smd_cxm_qmi",
"smd_pkt_loopback",
};
@@ -694,6 +695,7 @@
"apr_apps2",
"DATA40_CNTL",
"SENSOR",
+ "CXM_QMI_PORT_8064",
"LOOPBACK",
};
@@ -711,6 +713,7 @@
SMD_APPS_QDSP,
SMD_APPS_MODEM,
SMD_APPS_QDSP,
+ SMD_APPS_WCNSS,
SMD_APPS_MODEM,
};
#endif
diff --git a/arch/arm/mach-msm/subsystem_map.c b/arch/arm/mach-msm/subsystem_map.c
index 4a1285b..916686f 100644
--- a/arch/arm/mach-msm/subsystem_map.c
+++ b/arch/arm/mach-msm/subsystem_map.c
@@ -375,12 +375,13 @@
partition_no = msm_subsystem_get_partition_no(
subsys_ids[i]);
- iova_start = msm_allocate_iova_address(domain_no,
+ ret = msm_allocate_iova_address(domain_no,
partition_no,
map_size,
- max(min_align, SZ_4K));
+ max(min_align, SZ_4K),
+ &iova_start);
- if (!iova_start) {
+ if (ret) {
pr_err("%s: could not allocate iova address\n",
__func__);
continue;
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
index 404538a..bacba58 100644
--- a/arch/arm/vfp/vfphw.S
+++ b/arch/arm/vfp/vfphw.S
@@ -159,6 +159,22 @@
tst r5, #FPSCR_IXE
bne process_exception
+#ifdef CONFIG_ARCH_MSM_KRAIT
+ @ Krait does not set FPEXC.DEX for unsupported short vector instructions
+ mrc p15, 0, r2, c0, c0, 0
+ ldr r4, =0xff00fc00
+ and r4, r2, r4
+ ldr r2, =0x51000400
+ cmp r2, r4
+ bne skip
+
+ tst r5, #FPSCR_LENGTH_MASK
+ beq skip
+ orr r1, r1, #FPEXC_DEX
+ b process_exception
+skip:
+#endif
+
@ Fall into hand on to next handler - appropriate coproc instr
@ not recognised by VFP
diff --git a/block/blk-core.c b/block/blk-core.c
index 35ae52d..a6a8ccb 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1467,7 +1467,7 @@
goto end_io;
}
- if (unlikely(!(bio->bi_rw & REQ_DISCARD) &&
+ if (unlikely(!(bio->bi_rw & (REQ_DISCARD | REQ_SANITIZE)) &&
nr_sectors > queue_max_hw_sectors(q))) {
printk(KERN_ERR "bio too big device %s (%u > %u)\n",
bdevname(bio->bi_bdev, b),
@@ -1521,6 +1521,14 @@
goto end_io;
}
+ if ((bio->bi_rw & REQ_SANITIZE) &&
+ (!blk_queue_sanitize(q))) {
+ pr_info("%s - got a SANITIZE request but the queue "
+ "doesn't support sanitize requests", __func__);
+ err = -EOPNOTSUPP;
+ goto end_io;
+ }
+
if (blk_throtl_bio(q, &bio))
goto end_io;
@@ -1611,7 +1619,8 @@
* If it's a regular read/write or a barrier with data attached,
* go through the normal accounting stuff before submission.
*/
- if (bio_has_data(bio) && !(rw & REQ_DISCARD)) {
+ if (bio_has_data(bio) &&
+ (!(rw & (REQ_DISCARD | REQ_SANITIZE)))) {
if (rw & WRITE) {
count_vm_events(PGPGOUT, count);
} else {
@@ -1657,7 +1666,7 @@
*/
int blk_rq_check_limits(struct request_queue *q, struct request *rq)
{
- if (rq->cmd_flags & REQ_DISCARD)
+ if (rq->cmd_flags & (REQ_DISCARD | REQ_SANITIZE))
return 0;
if (blk_rq_sectors(rq) > queue_max_sectors(q) ||
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 78e627e..39a7f25 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -112,6 +112,57 @@
EXPORT_SYMBOL(blkdev_issue_discard);
/**
+ * blkdev_issue_sanitize - queue a sanitize request
+ * @bdev: blockdev to issue sanitize for
+ * @gfp_mask: memory allocation flags (for bio_alloc)
+ *
+ * Description:
+ * Issue a sanitize request for the specified block device
+ */
+int blkdev_issue_sanitize(struct block_device *bdev, gfp_t gfp_mask)
+{
+ DECLARE_COMPLETION_ONSTACK(wait);
+ struct request_queue *q = bdev_get_queue(bdev);
+ int type = REQ_WRITE | REQ_SANITIZE;
+ struct bio_batch bb;
+ struct bio *bio;
+ int ret = 0;
+
+ if (!q)
+ return -ENXIO;
+
+ if (!blk_queue_sanitize(q)) {
+ pr_err("%s - card doesn't support sanitize", __func__);
+ return -EOPNOTSUPP;
+ }
+
+ bio = bio_alloc(gfp_mask, 1);
+ if (!bio)
+ return -ENOMEM;
+
+ atomic_set(&bb.done, 1);
+ bb.flags = 1 << BIO_UPTODATE;
+ bb.wait = &wait;
+
+ bio->bi_end_io = bio_batch_end_io;
+ bio->bi_bdev = bdev;
+ bio->bi_private = &bb;
+
+ atomic_inc(&bb.done);
+ submit_bio(type, bio);
+
+ /* Wait for bios in-flight */
+ if (!atomic_dec_and_test(&bb.done))
+ wait_for_completion(&wait);
+
+ if (!test_bit(BIO_UPTODATE, &bb.flags))
+ ret = -EIO;
+
+ return ret;
+}
+EXPORT_SYMBOL(blkdev_issue_sanitize);
+
+/**
* blkdev_issue_zeroout - generate number of zero filed write bios
* @bdev: blockdev to issue
* @sector: start sector
diff --git a/block/blk-merge.c b/block/blk-merge.c
index cfcc37c..f3ed15b 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -383,6 +383,12 @@
return 0;
/*
+ * Don't merge file system requests and sanitize requests
+ */
+ if ((req->cmd_flags & REQ_SANITIZE) != (next->cmd_flags & REQ_SANITIZE))
+ return 0;
+
+ /*
* not contiguous
*/
if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
diff --git a/block/elevator.c b/block/elevator.c
index b0b38ce..78a14b5 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -89,6 +89,12 @@
return 0;
/*
+ * Don't merge sanitize requests
+ */
+ if ((bio->bi_rw & REQ_SANITIZE) != (rq->bio->bi_rw & REQ_SANITIZE))
+ return 0;
+
+ /*
* different data direction or already started, don't merge
*/
if (bio_data_dir(bio) != rq_data_dir(rq))
@@ -657,7 +663,7 @@
if (rq->cmd_flags & REQ_SOFTBARRIER) {
/* barriers are scheduling boundary, update end_sector */
if (rq->cmd_type == REQ_TYPE_FS ||
- (rq->cmd_flags & REQ_DISCARD)) {
+ (rq->cmd_flags & (REQ_DISCARD | REQ_SANITIZE))) {
q->end_sector = rq_end_sector(rq);
q->boundary_rq = rq;
}
diff --git a/block/ioctl.c b/block/ioctl.c
index 1124cd2..dbc103b 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -131,6 +131,11 @@
return blkdev_issue_discard(bdev, start, len, GFP_KERNEL, flags);
}
+static int blk_ioctl_sanitize(struct block_device *bdev)
+{
+ return blkdev_issue_sanitize(bdev, GFP_KERNEL);
+}
+
static int put_ushort(unsigned long arg, unsigned short val)
{
return put_user(val, (unsigned short __user *)arg);
@@ -215,6 +220,10 @@
set_device_ro(bdev, n);
return 0;
+ case BLKSANITIZE:
+ ret = blk_ioctl_sanitize(bdev);
+ break;
+
case BLKDISCARD:
case BLKSECDISCARD: {
uint64_t range[2];
diff --git a/drivers/char/msm_rotator.c b/drivers/char/msm_rotator.c
index 81a9fa7..6cd1806 100644
--- a/drivers/char/msm_rotator.c
+++ b/drivers/char/msm_rotator.c
@@ -45,6 +45,7 @@
#define MSM_ROTATOR_START (MSM_ROTATOR_BASE+0x0030)
#define MSM_ROTATOR_MAX_BURST_SIZE (MSM_ROTATOR_BASE+0x0050)
#define MSM_ROTATOR_HW_VERSION (MSM_ROTATOR_BASE+0x0070)
+#define MSM_ROTATOR_SW_RESET (MSM_ROTATOR_BASE+0x0074)
#define MSM_ROTATOR_SRC_SIZE (MSM_ROTATOR_BASE+0x1108)
#define MSM_ROTATOR_SRCP0_ADDR (MSM_ROTATOR_BASE+0x110c)
#define MSM_ROTATOR_SRCP1_ADDR (MSM_ROTATOR_BASE+0x1110)
@@ -906,8 +907,7 @@
break;
if (s == MAX_SESSIONS) {
- dev_dbg(msm_rotator_dev->device,
- "%s() : Attempt to use invalid session_id %d\n",
+ pr_err("%s() : Attempt to use invalid session_id %d\n",
__func__, s);
rc = -EINVAL;
goto do_rotate_unlock_mutex;
@@ -1129,11 +1129,13 @@
break;
default:
rc = -EINVAL;
+ pr_err("%s(): Unsupported format %u\n", __func__, format);
goto do_rotate_exit;
}
if (rc != 0) {
msm_rotator_dev->last_session_idx = INVALID_SESSION;
+ pr_err("%s(): Invalid session error\n", __func__);
goto do_rotate_exit;
}
@@ -1145,8 +1147,11 @@
wait_event(msm_rotator_dev->wq,
(msm_rotator_dev->processing == 0));
status = (unsigned char)ioread32(MSM_ROTATOR_INTR_STATUS);
- if ((status & 0x03) != 0x01)
+ if ((status & 0x03) != 0x01) {
+ pr_err("%s(): AXI Bus Error, issuing SW_RESET\n", __func__);
+ iowrite32(0x1, MSM_ROTATOR_SW_RESET);
rc = -EFAULT;
+ }
iowrite32(0, MSM_ROTATOR_INTR_ENABLE);
iowrite32(3, MSM_ROTATOR_INTR_CLEAR);
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c
index bbb13f3..56f986d 100644
--- a/drivers/gpu/ion/ion.c
+++ b/drivers/gpu/ion/ion.c
@@ -1674,12 +1674,158 @@
return size;
}
+/**
+ * Searches through a clients handles to find if the buffer is owned
+ * by this client. Used for debug output.
+ * @param client pointer to candidate owner of buffer
+ * @param buf pointer to buffer that we are trying to find the owner of
+ * @return 1 if found, 0 otherwise
+ */
+static int ion_debug_find_buffer_owner(const struct ion_client *client,
+ const struct ion_buffer *buf)
+{
+ struct rb_node *n;
+
+ for (n = rb_first(&client->handles); n; n = rb_next(n)) {
+ const struct ion_handle *handle = rb_entry(n,
+ const struct ion_handle,
+ node);
+ if (handle->buffer == buf)
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * Adds mem_map_data pointer to the tree of mem_map
+ * Used for debug output.
+ * @param mem_map The mem_map tree
+ * @param data The new data to add to the tree
+ */
+static void ion_debug_mem_map_add(struct rb_root *mem_map,
+ struct mem_map_data *data)
+{
+ struct rb_node **p = &mem_map->rb_node;
+ struct rb_node *parent = NULL;
+ struct mem_map_data *entry;
+
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct mem_map_data, node);
+
+ if (data->addr < entry->addr) {
+ p = &(*p)->rb_left;
+ } else if (data->addr > entry->addr) {
+ p = &(*p)->rb_right;
+ } else {
+ pr_err("%s: mem_map_data already found.", __func__);
+ BUG();
+ }
+ }
+ rb_link_node(&data->node, parent, p);
+ rb_insert_color(&data->node, mem_map);
+}
+
+/**
+ * Search for an owner of a buffer by iterating over all ION clients.
+ * @param dev ion device containing pointers to all the clients.
+ * @param buffer pointer to buffer we are trying to find the owner of.
+ * @return name of owner.
+ */
+const char *ion_debug_locate_owner(const struct ion_device *dev,
+ const struct ion_buffer *buffer)
+{
+ struct rb_node *j;
+ const char *client_name = NULL;
+
+ for (j = rb_first(&dev->user_clients); j && !client_name;
+ j = rb_next(j)) {
+ struct ion_client *client = rb_entry(j, struct ion_client,
+ node);
+ if (ion_debug_find_buffer_owner(client, buffer))
+ client_name = client->name;
+ }
+ for (j = rb_first(&dev->kernel_clients); j && !client_name;
+ j = rb_next(j)) {
+ struct ion_client *client = rb_entry(j, struct ion_client,
+ node);
+ if (ion_debug_find_buffer_owner(client, buffer))
+ client_name = client->name;
+ }
+ return client_name;
+}
+
+/**
+ * Create a mem_map of the heap.
+ * @param s seq_file to log error message to.
+ * @param heap The heap to create mem_map for.
+ * @param mem_map The mem map to be created.
+ */
+void ion_debug_mem_map_create(struct seq_file *s, struct ion_heap *heap,
+ struct rb_root *mem_map)
+{
+ struct ion_device *dev = heap->dev;
+ struct rb_node *n;
+
+ for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
+ struct ion_buffer *buffer =
+ rb_entry(n, struct ion_buffer, node);
+ if (buffer->heap->id == heap->id) {
+ struct mem_map_data *data =
+ kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ seq_printf(s, "ERROR: out of memory. "
+ "Part of memory map will not be logged\n");
+ break;
+ }
+ data->addr = buffer->priv_phys;
+ data->addr_end = buffer->priv_phys + buffer->size-1;
+ data->size = buffer->size;
+ data->client_name = ion_debug_locate_owner(dev, buffer);
+ ion_debug_mem_map_add(mem_map, data);
+ }
+ }
+}
+
+/**
+ * Free the memory allocated by ion_debug_mem_map_create
+ * @param mem_map The mem map to free.
+ */
+static void ion_debug_mem_map_destroy(struct rb_root *mem_map)
+{
+ if (mem_map) {
+ struct rb_node *n;
+ while ((n = rb_first(mem_map)) != 0) {
+ struct mem_map_data *data =
+ rb_entry(n, struct mem_map_data, node);
+ rb_erase(&data->node, mem_map);
+ kfree(data);
+ }
+ }
+}
+
+/**
+ * Print heap debug information.
+ * @param s seq_file to log message to.
+ * @param heap pointer to heap that we will print debug information for.
+ */
+static void ion_heap_print_debug(struct seq_file *s, struct ion_heap *heap)
+{
+ if (heap->ops->print_debug) {
+ struct rb_root mem_map = RB_ROOT;
+ ion_debug_mem_map_create(s, heap, &mem_map);
+ heap->ops->print_debug(heap, s, &mem_map);
+ ion_debug_mem_map_destroy(&mem_map);
+ }
+}
+
static int ion_debug_heap_show(struct seq_file *s, void *unused)
{
struct ion_heap *heap = s->private;
struct ion_device *dev = heap->dev;
struct rb_node *n;
+ mutex_lock(&dev->lock);
seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
for (n = rb_first(&dev->user_clients); n; n = rb_next(n)) {
struct ion_client *client = rb_entry(n, struct ion_client,
@@ -1703,8 +1849,8 @@
seq_printf(s, "%16.s %16u %16x\n", client->name, client->pid,
size);
}
- if (heap->ops->print_debug)
- heap->ops->print_debug(heap, s);
+ ion_heap_print_debug(s, heap);
+ mutex_unlock(&dev->lock);
return 0;
}
diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c
index ca2380b..710583b 100644
--- a/drivers/gpu/ion/ion_carveout_heap.c
+++ b/drivers/gpu/ion/ion_carveout_heap.c
@@ -251,7 +251,8 @@
return 0;
}
-static int ion_carveout_print_debug(struct ion_heap *heap, struct seq_file *s)
+static int ion_carveout_print_debug(struct ion_heap *heap, struct seq_file *s,
+ const struct rb_root *mem_map)
{
struct ion_carveout_heap *carveout_heap =
container_of(heap, struct ion_carveout_heap, heap);
@@ -260,6 +261,44 @@
carveout_heap->allocated_bytes);
seq_printf(s, "total heap size: %lx\n", carveout_heap->total_size);
+ if (mem_map) {
+ unsigned long base = carveout_heap->base;
+ unsigned long size = carveout_heap->total_size;
+ unsigned long end = base+size;
+ unsigned long last_end = base;
+ struct rb_node *n;
+
+ seq_printf(s, "\nMemory Map\n");
+ seq_printf(s, "%16.s %14.s %14.s %14.s\n",
+ "client", "start address", "end address",
+ "size (hex)");
+
+ for (n = rb_first(mem_map); n; n = rb_next(n)) {
+ struct mem_map_data *data =
+ rb_entry(n, struct mem_map_data, node);
+ const char *client_name = "(null)";
+
+ if (last_end < data->addr) {
+ seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n",
+ "FREE", last_end, data->addr-1,
+ data->addr-last_end,
+ data->addr-last_end);
+ }
+
+ if (data->client_name)
+ client_name = data->client_name;
+
+ seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n",
+ client_name, data->addr,
+ data->addr_end,
+ data->size, data->size);
+ last_end = data->addr_end+1;
+ }
+ if (last_end < end) {
+ seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n", "FREE",
+ last_end, end-1, end-last_end, end-last_end);
+ }
+ }
return 0;
}
@@ -287,13 +326,12 @@
extra = iova_length - buffer->size;
- data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
- data->mapped_size, align);
+ ret = msm_allocate_iova_address(domain_num, partition_num,
+ data->mapped_size, align,
+ &data->iova_addr);
- if (!data->iova_addr) {
- ret = -ENOMEM;
+ if (ret)
goto out;
- }
domain = msm_get_iommu_domain(domain_num);
diff --git a/drivers/gpu/ion/ion_cp_heap.c b/drivers/gpu/ion/ion_cp_heap.c
index 052b778..fcbf1d4 100644
--- a/drivers/gpu/ion/ion_cp_heap.c
+++ b/drivers/gpu/ion/ion_cp_heap.c
@@ -563,7 +563,8 @@
return 0;
}
-static int ion_cp_print_debug(struct ion_heap *heap, struct seq_file *s)
+static int ion_cp_print_debug(struct ion_heap *heap, struct seq_file *s,
+ const struct rb_root *mem_map)
{
unsigned long total_alloc;
unsigned long total_size;
@@ -588,6 +589,45 @@
seq_printf(s, "heap protected: %s\n", heap_protected ? "Yes" : "No");
seq_printf(s, "reusable: %s\n", cp_heap->reusable ? "Yes" : "No");
+ if (mem_map) {
+ unsigned long base = cp_heap->base;
+ unsigned long size = cp_heap->total_size;
+ unsigned long end = base+size;
+ unsigned long last_end = base;
+ struct rb_node *n;
+
+ seq_printf(s, "\nMemory Map\n");
+ seq_printf(s, "%16.s %14.s %14.s %14.s\n",
+ "client", "start address", "end address",
+ "size (hex)");
+
+ for (n = rb_first(mem_map); n; n = rb_next(n)) {
+ struct mem_map_data *data =
+ rb_entry(n, struct mem_map_data, node);
+ const char *client_name = "(null)";
+
+ if (last_end < data->addr) {
+ seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n",
+ "FREE", last_end, data->addr-1,
+ data->addr-last_end,
+ data->addr-last_end);
+ }
+
+ if (data->client_name)
+ client_name = data->client_name;
+
+ seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n",
+ client_name, data->addr,
+ data->addr_end,
+ data->size, data->size);
+ last_end = data->addr_end+1;
+ }
+ if (last_end < end) {
+ seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n", "FREE",
+ last_end, end-1, end-last_end, end-last_end);
+ }
+ }
+
return 0;
}
@@ -647,13 +687,15 @@
}
if (!ret_value && domain) {
unsigned long temp_phys = cp_heap->base;
- unsigned long temp_iova =
- msm_allocate_iova_address(domain_num, partition,
- virt_addr_len, SZ_64K);
- if (!temp_iova) {
+ unsigned long temp_iova;
+
+ ret_value = msm_allocate_iova_address(domain_num, partition,
+ virt_addr_len, SZ_64K,
+ &temp_iova);
+
+ if (ret_value) {
pr_err("%s: could not allocate iova from domain %lu, partition %d\n",
__func__, domain_num, partition);
- ret_value = -ENOMEM;
goto out;
}
cp_heap->iommu_iova[domain_num] = temp_iova;
@@ -742,13 +784,12 @@
extra = iova_length - buffer->size;
- data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
- data->mapped_size, align);
+ ret = msm_allocate_iova_address(domain_num, partition_num,
+ data->mapped_size, align,
+ &data->iova_addr);
- if (!data->iova_addr) {
- ret = -ENOMEM;
+ if (ret)
goto out;
- }
domain = msm_get_iommu_domain(domain_num);
@@ -918,6 +959,14 @@
cp_heap = NULL;
}
+void ion_cp_heap_get_base(struct ion_heap *heap, unsigned long *base,
+ unsigned long *size) \
+{
+ struct ion_cp_heap *cp_heap =
+ container_of(heap, struct ion_cp_heap, heap);
+ *base = cp_heap->base;
+ *size = cp_heap->total_size;
+}
/* SCM related code for locking down memory for content protection */
diff --git a/drivers/gpu/ion/ion_iommu_heap.c b/drivers/gpu/ion/ion_iommu_heap.c
index 312ca42..70bdc7f 100644
--- a/drivers/gpu/ion/ion_iommu_heap.c
+++ b/drivers/gpu/ion/ion_iommu_heap.c
@@ -190,13 +190,12 @@
data->mapped_size = iova_length;
extra = iova_length - buffer->size;
- data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
- data->mapped_size, align);
+ ret = msm_allocate_iova_address(domain_num, partition_num,
+ data->mapped_size, align,
+ &data->iova_addr);
- if (!data->iova_addr) {
- ret = -ENOMEM;
+ if (!data->iova_addr)
goto out;
- }
domain = msm_get_iommu_domain(domain_num);
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h
index 98e11cf..00ce33f 100644
--- a/drivers/gpu/ion/ion_priv.h
+++ b/drivers/gpu/ion/ion_priv.h
@@ -155,7 +155,8 @@
unsigned long iova_length,
unsigned long flags);
void (*unmap_iommu)(struct ion_iommu_map *data);
- int (*print_debug)(struct ion_heap *heap, struct seq_file *s);
+ int (*print_debug)(struct ion_heap *heap, struct seq_file *s,
+ const struct rb_root *mem_map);
int (*secure_heap)(struct ion_heap *heap);
int (*unsecure_heap)(struct ion_heap *heap);
};
@@ -185,7 +186,22 @@
const char *name;
};
-
+/**
+ * struct mem_map_data - represents information about the memory map for a heap
+ * @node: rb node used to store in the tree of mem_map_data
+ * @addr: start address of memory region.
+ * @addr: end address of memory region.
+ * @size: size of memory region
+ * @client_name: name of the client who owns this buffer.
+ *
+ */
+struct mem_map_data {
+ struct rb_node node;
+ unsigned long addr;
+ unsigned long addr_end;
+ unsigned long size;
+ const char *client_name;
+};
#define iommu_map_domain(__m) ((__m)->domain_info[1])
#define iommu_map_partition(__m) ((__m)->domain_info[0])
@@ -298,4 +314,9 @@
void *uaddr, unsigned long offset, unsigned long len,
unsigned int cmd);
+void ion_cp_heap_get_base(struct ion_heap *heap, unsigned long *base,
+ unsigned long *size);
+
+void ion_mem_map_show(struct ion_heap *heap);
+
#endif /* _ION_PRIV_H */
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c
index ed9ae27..26c6632 100644
--- a/drivers/gpu/ion/ion_system_heap.c
+++ b/drivers/gpu/ion/ion_system_heap.c
@@ -204,7 +204,8 @@
return 0;
}
-static int ion_system_print_debug(struct ion_heap *heap, struct seq_file *s)
+static int ion_system_print_debug(struct ion_heap *heap, struct seq_file *s,
+ const struct rb_root *unused)
{
seq_printf(s, "total bytes currently allocated: %lx\n",
(unsigned long) atomic_read(&system_heap_allocated));
@@ -240,13 +241,12 @@
data->mapped_size = iova_length;
extra = iova_length - buffer->size;
- data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
- data->mapped_size, align);
+ ret = msm_allocate_iova_address(domain_num, partition_num,
+ data->mapped_size, align,
+ &data->iova_addr);
- if (!data->iova_addr) {
- ret = -ENOMEM;
+ if (ret)
goto out;
- }
domain = msm_get_iommu_domain(domain_num);
@@ -423,7 +423,8 @@
}
static int ion_system_contig_print_debug(struct ion_heap *heap,
- struct seq_file *s)
+ struct seq_file *s,
+ const struct rb_root *unused)
{
seq_printf(s, "total bytes currently allocated: %lx\n",
(unsigned long) atomic_read(&system_contig_heap_allocated));
@@ -458,13 +459,12 @@
data->mapped_size = iova_length;
extra = iova_length - buffer->size;
- data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
- data->mapped_size, align);
+ ret = msm_allocate_iova_address(domain_num, partition_num,
+ data->mapped_size, align,
+ &data->iova_addr);
- if (!data->iova_addr) {
- ret = -ENOMEM;
+ if (ret)
goto out;
- }
domain = msm_get_iommu_domain(domain_num);
diff --git a/drivers/gpu/msm/adreno_a2xx.c b/drivers/gpu/msm/adreno_a2xx.c
index ae846da..8a132df 100644
--- a/drivers/gpu/msm/adreno_a2xx.c
+++ b/drivers/gpu/msm/adreno_a2xx.c
@@ -830,9 +830,6 @@
*cmds++ = 0x00010000;
if (adreno_is_a22x(adreno_dev)) {
- *cmds++ = cp_type3_packet(CP_SET_DRAW_INIT_FLAGS, 1);
- *cmds++ = 0;
-
*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
*cmds++ = CP_REG(REG_A220_RB_LRZ_VSC_CONTROL);
*cmds++ = 0x0000000;
@@ -1052,9 +1049,6 @@
*cmds++ = 0x00010000;
if (adreno_is_a22x(adreno_dev)) {
- *cmds++ = cp_type3_packet(CP_SET_DRAW_INIT_FLAGS, 1);
- *cmds++ = 0;
-
*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
*cmds++ = CP_REG(REG_A220_RB_LRZ_VSC_CONTROL);
*cmds++ = 0x0000000;
diff --git a/drivers/gpu/msm/adreno_postmortem.c b/drivers/gpu/msm/adreno_postmortem.c
index ec38f75..7bb65ca 100644
--- a/drivers/gpu/msm/adreno_postmortem.c
+++ b/drivers/gpu/msm/adreno_postmortem.c
@@ -725,8 +725,8 @@
current_context));
context = idr_find(&device->context_idr, context_id);
if (context) {
- ts_processed = device->ftbl->readtimestamp(device, context,
- KGSL_TIMESTAMP_RETIRED);
+ ts_processed = kgsl_readtimestamp(device, context,
+ KGSL_TIMESTAMP_RETIRED);
KGSL_LOG_DUMP(device, "CTXT: %d TIMESTM RTRD: %08X\n",
context->id, ts_processed);
} else
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 8d900b0..3d46221 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -928,8 +928,8 @@
" context id is invalid.\n");
return -EINVAL;
}
- retired_timestamp = device->ftbl->readtimestamp(device, context,
- KGSL_TIMESTAMP_RETIRED);
+ retired_timestamp = kgsl_readtimestamp(device, context,
+ KGSL_TIMESTAMP_RETIRED);
KGSL_DRV_ERR(device, "GPU successfully executed till ts: %x\n",
retired_timestamp);
/*
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index cf09f52..1a34e80 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -78,8 +78,7 @@
if (context == NULL)
return -EINVAL;
}
- cur_ts = device->ftbl->readtimestamp(device, context,
- KGSL_TIMESTAMP_RETIRED);
+ cur_ts = kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED);
/* Check to see if the requested timestamp has already fired */
@@ -135,8 +134,7 @@
struct kgsl_event *event, *event_tmp;
unsigned int id, cur;
- cur = device->ftbl->readtimestamp(device, context,
- KGSL_TIMESTAMP_RETIRED);
+ cur = kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED);
id = context->id;
list_for_each_entry_safe(event, event_tmp, &device->events, list) {
@@ -173,8 +171,8 @@
if (event->owner != owner)
continue;
- cur = device->ftbl->readtimestamp(device, event->context,
- KGSL_TIMESTAMP_RETIRED);
+ cur = kgsl_readtimestamp(device, event->context,
+ KGSL_TIMESTAMP_RETIRED);
id = event->context ? event->context->id : KGSL_MEMSTORE_GLOBAL;
/*
@@ -388,6 +386,7 @@
if (context == NULL)
return;
device = context->dev_priv->device;
+ trace_kgsl_context_detach(device, context);
id = context->id;
if (device->ftbl->drawctxt_destroy)
@@ -425,8 +424,8 @@
/* Process expired events */
list_for_each_entry_safe(event, event_tmp, &device->events, list) {
- ts_processed = device->ftbl->readtimestamp(device,
- event->context, KGSL_TIMESTAMP_RETIRED);
+ ts_processed = kgsl_readtimestamp(device, event->context,
+ KGSL_TIMESTAMP_RETIRED);
if (timestamp_cmp(ts_processed, event->timestamp) < 0)
continue;
@@ -521,8 +520,8 @@
{
unsigned int ts_processed;
- ts_processed = device->ftbl->readtimestamp(device, context,
- KGSL_TIMESTAMP_RETIRED);
+ ts_processed = kgsl_readtimestamp(device, context,
+ KGSL_TIMESTAMP_RETIRED);
return (timestamp_cmp(ts_processed, timestamp) >= 0);
}
@@ -1021,19 +1020,25 @@
unsigned int timeout)
{
int result = 0;
+ struct kgsl_device *device = dev_priv->device;
+ unsigned int context_id = context ? context->id : KGSL_MEMSTORE_GLOBAL;
/* Set the active count so that suspend doesn't do the wrong thing */
- dev_priv->device->active_cnt++;
+ device->active_cnt++;
- trace_kgsl_waittimestamp_entry(dev_priv->device,
- context ? context->id : KGSL_MEMSTORE_GLOBAL,
- timestamp, timeout);
+ trace_kgsl_waittimestamp_entry(device, context_id,
+ kgsl_readtimestamp(device, context,
+ KGSL_TIMESTAMP_RETIRED),
+ timestamp, timeout);
- result = dev_priv->device->ftbl->waittimestamp(dev_priv->device,
+ result = device->ftbl->waittimestamp(dev_priv->device,
context, timestamp, timeout);
- trace_kgsl_waittimestamp_exit(dev_priv->device, result);
+ trace_kgsl_waittimestamp_exit(device,
+ kgsl_readtimestamp(device, context,
+ KGSL_TIMESTAMP_RETIRED),
+ result);
/* Fire off any pending suspend operations that are in flight */
@@ -1050,7 +1055,7 @@
{
struct kgsl_device_waittimestamp *param = data;
- return _device_waittimestamp(dev_priv, KGSL_MEMSTORE_GLOBAL,
+ return _device_waittimestamp(dev_priv, NULL,
param->timestamp, param->timeout);
}
@@ -1151,7 +1156,7 @@
¶m->timestamp,
param->flags);
- trace_kgsl_issueibcmds(dev_priv->device, param, result);
+ trace_kgsl_issueibcmds(dev_priv->device, param, ibdesc, result);
free_ibdesc:
kfree(ibdesc);
@@ -1164,8 +1169,7 @@
struct kgsl_context *context, unsigned int type,
unsigned int *timestamp)
{
- *timestamp = dev_priv->device->ftbl->readtimestamp(dev_priv->device,
- context, type);
+ *timestamp = kgsl_readtimestamp(dev_priv->device, context, type);
trace_kgsl_readtimestamp(dev_priv->device,
context ? context->id : KGSL_MEMSTORE_GLOBAL,
@@ -1209,7 +1213,7 @@
spin_lock(&entry->priv->mem_lock);
rb_erase(&entry->node, &entry->priv->mem_rb);
spin_unlock(&entry->priv->mem_lock);
- trace_kgsl_mem_timestamp_free(entry, id, timestamp);
+ trace_kgsl_mem_timestamp_free(device, entry, id, timestamp, 0);
kgsl_mem_entry_detach_process(entry);
}
@@ -1220,27 +1224,25 @@
int result = 0;
struct kgsl_mem_entry *entry = NULL;
struct kgsl_device *device = dev_priv->device;
- unsigned int cur;
unsigned int context_id = context ? context->id : KGSL_MEMSTORE_GLOBAL;
spin_lock(&dev_priv->process_priv->mem_lock);
entry = kgsl_sharedmem_find(dev_priv->process_priv, gpuaddr);
spin_unlock(&dev_priv->process_priv->mem_lock);
- if (entry) {
- cur = device->ftbl->readtimestamp(device, context,
- KGSL_TIMESTAMP_RETIRED);
-
- trace_kgsl_mem_timestamp_queue(entry, context_id, cur);
- result = kgsl_add_event(dev_priv->device, context_id,
- timestamp, kgsl_freemem_event_cb,
- entry, dev_priv);
- } else {
+ if (!entry) {
KGSL_DRV_ERR(dev_priv->device,
- "invalid gpuaddr %08x\n", gpuaddr);
+ "invalid gpuaddr %08x\n", gpuaddr);
result = -EINVAL;
+ goto done;
}
-
+ trace_kgsl_mem_timestamp_queue(device, entry, context_id,
+ kgsl_readtimestamp(device, context,
+ KGSL_TIMESTAMP_RETIRED),
+ timestamp);
+ result = kgsl_add_event(dev_priv->device, context_id, timestamp,
+ kgsl_freemem_event_cb, entry, dev_priv);
+done:
return result;
}
@@ -1287,11 +1289,14 @@
goto done;
}
- if (dev_priv->device->ftbl->drawctxt_create)
+ if (dev_priv->device->ftbl->drawctxt_create) {
result = dev_priv->device->ftbl->drawctxt_create(
dev_priv->device, dev_priv->process_priv->pagetable,
context, param->flags);
-
+ if (result)
+ goto done;
+ }
+ trace_kgsl_context_create(dev_priv->device, context, param->flags);
param->drawctxt_id = context->id;
done:
if (result && context)
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index da3e4b2..b67f460 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -122,6 +122,8 @@
int (*map_kernel_mem)(struct kgsl_memdesc *);
};
+#define KGSL_MEMDESC_GUARD_PAGE BIT(0)
+
/* shared memory allocation */
struct kgsl_memdesc {
struct kgsl_pagetable *pagetable;
@@ -133,6 +135,7 @@
struct scatterlist *sg;
unsigned int sglen;
struct kgsl_memdesc_ops *ops;
+ int flags;
};
/* List of different memory entry types */
@@ -155,7 +158,6 @@
int flags;
void *priv_data;
struct rb_node node;
- uint32_t free_timestamp;
unsigned int context_id;
/* back pointer to private structure under whose context this
* allocation is made */
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 5b6522a..5b2fd31 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -294,6 +294,13 @@
return device->ftbl->gpuid(device);
}
+static inline unsigned int kgsl_readtimestamp(struct kgsl_device *device,
+ struct kgsl_context *context,
+ enum kgsl_timestamp_type type)
+{
+ return device->ftbl->readtimestamp(device, context, type);
+}
+
static inline int kgsl_create_device_sysfs_files(struct device *root,
const struct device_attribute **list)
{
diff --git a/drivers/gpu/msm/kgsl_gpummu.c b/drivers/gpu/msm/kgsl_gpummu.c
index ab47f40..429d035 100644
--- a/drivers/gpu/msm/kgsl_gpummu.c
+++ b/drivers/gpu/msm/kgsl_gpummu.c
@@ -21,6 +21,7 @@
#include "kgsl_mmu.h"
#include "kgsl_device.h"
#include "kgsl_sharedmem.h"
+#include "kgsl_trace.h"
#define KGSL_PAGETABLE_SIZE \
ALIGN(KGSL_PAGETABLE_ENTRIES(CONFIG_MSM_KGSL_PAGE_TABLE_SIZE) * \
@@ -410,6 +411,9 @@
reg & ~(PAGE_SIZE - 1),
kgsl_mmu_get_ptname_from_ptbase(ptbase),
reg & 0x02 ? "WRITE" : "READ", (reg >> 4) & 0xF);
+ trace_kgsl_mmu_pagefault(mmu->device, reg & ~(PAGE_SIZE - 1),
+ kgsl_mmu_get_ptname_from_ptbase(ptbase),
+ reg & 0x02 ? "WRITE" : "READ");
}
static void *kgsl_gpummu_create_pagetable(void)
@@ -591,7 +595,7 @@
{
unsigned int numpages;
unsigned int pte, ptefirst, ptelast, superpte;
- unsigned int range = memdesc->size;
+ unsigned int range = kgsl_sg_size(memdesc->sg, memdesc->sglen);
struct kgsl_gpummu_pt *gpummu_pt = mmu_specific_pt;
/* All GPU addresses as assigned are page aligned, but some
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index fb65565..2050827 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -25,6 +25,79 @@
#include "kgsl_iommu.h"
#include "adreno_pm4types.h"
#include "adreno.h"
+#include "kgsl_trace.h"
+
+static struct kgsl_iommu_unit *get_iommu_unit(struct device *dev)
+{
+ int i, j, k;
+
+ for (i = 0; i < KGSL_DEVICE_MAX; i++) {
+ struct kgsl_mmu *mmu;
+ struct kgsl_iommu *iommu;
+
+ if (kgsl_driver.devp[i] == NULL)
+ continue;
+
+ mmu = kgsl_get_mmu(kgsl_driver.devp[i]);
+ if (mmu == NULL || mmu->priv == NULL)
+ continue;
+
+ iommu = mmu->priv;
+
+ for (j = 0; j < iommu->unit_count; j++) {
+ struct kgsl_iommu_unit *iommu_unit =
+ &iommu->iommu_units[j];
+ for (k = 0; k < iommu_unit->dev_count; k++) {
+ if (iommu_unit->dev[k].dev == dev)
+ return iommu_unit;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static struct kgsl_iommu_device *get_iommu_device(struct kgsl_iommu_unit *unit,
+ struct device *dev)
+{
+ int k;
+
+ for (k = 0; unit && k < unit->dev_count; k++) {
+ if (unit->dev[k].dev == dev)
+ return &(unit->dev[k]);
+ }
+
+ return NULL;
+}
+
+static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
+ struct device *dev, unsigned long addr, int flags)
+{
+ struct kgsl_iommu_unit *iommu_unit = get_iommu_unit(dev);
+ struct kgsl_iommu_device *iommu_dev = get_iommu_device(iommu_unit, dev);
+ unsigned int ptbase, fsr;
+
+ if (!iommu_dev) {
+ KGSL_CORE_ERR("Invalid IOMMU device %p\n", dev);
+ return -ENOSYS;
+ }
+
+ ptbase = iommu_get_pt_base_addr(domain);
+
+ fsr = KGSL_IOMMU_GET_IOMMU_REG(iommu_unit->reg_map.hostptr,
+ iommu_dev->ctx_id, FSR);
+
+ KGSL_MEM_CRIT(iommu_dev->kgsldev,
+ "GPU PAGE FAULT: addr = %lX pid = %d\n",
+ addr, kgsl_mmu_get_ptname_from_ptbase(ptbase));
+ KGSL_MEM_CRIT(iommu_dev->kgsldev, "context = %d FSR = %X\n",
+ iommu_dev->ctx_id, fsr);
+
+ trace_kgsl_mmu_pagefault(iommu_dev->kgsldev, addr,
+ kgsl_mmu_get_ptname_from_ptbase(ptbase), 0);
+
+ return 0;
+}
/*
* kgsl_iommu_disable_clk - Disable iommu clocks
@@ -167,7 +240,11 @@
KGSL_CORE_ERR("Failed to create iommu domain\n");
kfree(iommu_pt);
return NULL;
+ } else {
+ iommu_set_fault_handler(iommu_pt->domain,
+ kgsl_iommu_fault_handler);
}
+
return iommu_pt;
}
@@ -304,6 +381,8 @@
}
iommu_unit->dev[iommu_unit->dev_count].ctx_id =
data->iommu_ctxs[i].ctx_id;
+ iommu_unit->dev[iommu_unit->dev_count].kgsldev = mmu->device;
+
KGSL_DRV_INFO(mmu->device,
"Obtained dev handle %p for iommu context %s\n",
iommu_unit->dev[iommu_unit->dev_count].dev,
@@ -654,7 +733,7 @@
struct kgsl_memdesc *memdesc)
{
int ret;
- unsigned int range = memdesc->size;
+ unsigned int range = kgsl_sg_size(memdesc->sg, memdesc->sglen);
struct kgsl_iommu_pt *iommu_pt = mmu_specific_pt;
/* All GPU addresses as assigned are page aligned, but some
@@ -684,6 +763,7 @@
int ret;
unsigned int iommu_virt_addr;
struct kgsl_iommu_pt *iommu_pt = mmu_specific_pt;
+ int size = kgsl_sg_size(memdesc->sg, memdesc->sglen);
BUG_ON(NULL == iommu_pt);
@@ -691,11 +771,11 @@
iommu_virt_addr = memdesc->gpuaddr;
ret = iommu_map_range(iommu_pt->domain, iommu_virt_addr, memdesc->sg,
- memdesc->size, (IOMMU_READ | IOMMU_WRITE));
+ size, (IOMMU_READ | IOMMU_WRITE));
if (ret) {
KGSL_CORE_ERR("iommu_map_range(%p, %x, %p, %d, %d) "
"failed with err: %d\n", iommu_pt->domain,
- iommu_virt_addr, memdesc->sg, memdesc->size,
+ iommu_virt_addr, memdesc->sg, size,
(IOMMU_READ | IOMMU_WRITE), ret);
return ret;
}
diff --git a/drivers/gpu/msm/kgsl_iommu.h b/drivers/gpu/msm/kgsl_iommu.h
index d4de656..efc3d9c 100644
--- a/drivers/gpu/msm/kgsl_iommu.h
+++ b/drivers/gpu/msm/kgsl_iommu.h
@@ -18,6 +18,8 @@
/* IOMMU registers and masks */
#define KGSL_IOMMU_TTBR0 0x10
#define KGSL_IOMMU_TTBR1 0x14
+#define KGSL_IOMMU_FSR 0x20
+
#define KGSL_IOMMU_TTBR0_PA_MASK 0x0003FFFF
#define KGSL_IOMMU_TTBR0_PA_SHIFT 14
#define KGSL_IOMMU_CTX_TLBIALL 0x800
@@ -75,6 +77,7 @@
unsigned int pt_lsb;
enum kgsl_iommu_context_id ctx_id;
bool clk_enabled;
+ struct kgsl_device *kgsldev;
};
/*
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index b882807..9092b96 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -595,6 +595,7 @@
{
int ret;
struct gen_pool *pool;
+ int size;
if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
if (memdesc->sglen == 1) {
@@ -614,13 +615,15 @@
}
}
+ size = kgsl_sg_size(memdesc->sg, memdesc->sglen);
+
/* Allocate from kgsl pool if it exists for global mappings */
pool = _get_pool(pagetable, memdesc->priv);
- memdesc->gpuaddr = gen_pool_alloc(pool, memdesc->size);
+ memdesc->gpuaddr = gen_pool_alloc(pool, size);
if (memdesc->gpuaddr == 0) {
KGSL_CORE_ERR("gen_pool_alloc(%d) failed from pool: %s\n",
- memdesc->size,
+ size,
(pool == pagetable->kgsl_pool) ?
"kgsl_pool" : "general_pool");
KGSL_CORE_ERR(" [%d] allocated=%d, entries=%d\n",
@@ -644,7 +647,7 @@
KGSL_STATS_ADD(1, pagetable->stats.entries,
pagetable->stats.max_entries);
- KGSL_STATS_ADD(memdesc->size, pagetable->stats.mapped,
+ KGSL_STATS_ADD(size, pagetable->stats.mapped,
pagetable->stats.max_mapped);
spin_unlock(&pagetable->lock);
@@ -653,7 +656,7 @@
err_free_gpuaddr:
spin_unlock(&pagetable->lock);
- gen_pool_free(pool, memdesc->gpuaddr, memdesc->size);
+ gen_pool_free(pool, memdesc->gpuaddr, size);
memdesc->gpuaddr = 0;
return ret;
}
@@ -664,6 +667,8 @@
struct kgsl_memdesc *memdesc)
{
struct gen_pool *pool;
+ int size;
+
if (memdesc->size == 0 || memdesc->gpuaddr == 0)
return 0;
@@ -671,6 +676,9 @@
memdesc->gpuaddr = 0;
return 0;
}
+
+ size = kgsl_sg_size(memdesc->sg, memdesc->sglen);
+
if (KGSL_MMU_TYPE_IOMMU != kgsl_mmu_get_mmutype())
spin_lock(&pagetable->lock);
pagetable->pt_ops->mmu_unmap(pagetable->priv, memdesc);
@@ -678,12 +686,12 @@
spin_lock(&pagetable->lock);
/* Remove the statistics */
pagetable->stats.entries--;
- pagetable->stats.mapped -= memdesc->size;
+ pagetable->stats.mapped -= size;
spin_unlock(&pagetable->lock);
pool = _get_pool(pagetable, memdesc->priv);
- gen_pool_free(pool, memdesc->gpuaddr, memdesc->size);
+ gen_pool_free(pool, memdesc->gpuaddr, size);
/*
* Don't clear the gpuaddr on global mappings because they
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index a51f29f..8829102 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -63,6 +63,13 @@
}
+/*
+ * One page allocation for a guard region to protect against over-zealous
+ * GPU pre-fetch
+ */
+
+static struct page *kgsl_guard_page;
+
/**
* Given a kobj, find the process structure attached to it
*/
@@ -333,13 +340,20 @@
{
int i = 0;
struct scatterlist *sg;
+ int sglen = memdesc->sglen;
+
+ /* Don't free the guard page if it was used */
+ if (memdesc->flags & KGSL_MEMDESC_GUARD_PAGE)
+ sglen--;
+
kgsl_driver.stats.page_alloc -= memdesc->size;
+
if (memdesc->hostptr) {
vunmap(memdesc->hostptr);
kgsl_driver.stats.vmalloc -= memdesc->size;
}
if (memdesc->sg)
- for_each_sg(memdesc->sg, sg, memdesc->sglen, i)
+ for_each_sg(memdesc->sg, sg, sglen, i)
__free_page(sg_page(sg));
}
@@ -362,17 +376,23 @@
pgprot_t page_prot = pgprot_writecombine(PAGE_KERNEL);
struct page **pages = NULL;
struct scatterlist *sg;
+ int sglen = memdesc->sglen;
int i;
+
+ /* Don't map the guard page if it exists */
+ if (memdesc->flags & KGSL_MEMDESC_GUARD_PAGE)
+ sglen--;
+
/* create a list of pages to call vmap */
- pages = vmalloc(memdesc->sglen * sizeof(struct page *));
+ pages = vmalloc(sglen * sizeof(struct page *));
if (!pages) {
KGSL_CORE_ERR("vmalloc(%d) failed\n",
- memdesc->sglen * sizeof(struct page *));
+ sglen * sizeof(struct page *));
return -ENOMEM;
}
- for_each_sg(memdesc->sg, sg, memdesc->sglen, i)
+ for_each_sg(memdesc->sg, sg, sglen, i)
pages[i] = sg_page(sg);
- memdesc->hostptr = vmap(pages, memdesc->sglen,
+ memdesc->hostptr = vmap(pages, sglen,
VM_IOREMAP, page_prot);
KGSL_STATS_ADD(memdesc->size, kgsl_driver.stats.vmalloc,
kgsl_driver.stats.vmalloc_max);
@@ -471,6 +491,14 @@
int sglen = PAGE_ALIGN(size) / PAGE_SIZE;
int i;
+ /*
+ * Add guard page to the end of the allocation when the
+ * IOMMU is in use.
+ */
+
+ if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_IOMMU)
+ sglen++;
+
memdesc->size = size;
memdesc->pagetable = pagetable;
memdesc->priv = KGSL_MEMFLAGS_CACHED;
@@ -490,7 +518,7 @@
memdesc->sglen = sglen;
sg_init_table(memdesc->sg, sglen);
- for (i = 0; i < memdesc->sglen; i++) {
+ for (i = 0; i < PAGE_ALIGN(size) / PAGE_SIZE; i++) {
struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO |
__GFP_HIGHMEM);
if (!page) {
@@ -501,6 +529,22 @@
flush_dcache_page(page);
sg_set_page(&memdesc->sg[i], page, PAGE_SIZE, 0);
}
+
+ /* ADd the guard page to the end of the sglist */
+
+ if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_IOMMU) {
+ if (kgsl_guard_page == NULL)
+ kgsl_guard_page = alloc_page(GFP_KERNEL | __GFP_ZERO |
+ __GFP_HIGHMEM);
+
+ if (kgsl_guard_page != NULL) {
+ sg_set_page(&memdesc->sg[sglen - 1], kgsl_guard_page,
+ PAGE_SIZE, 0);
+ memdesc->flags |= KGSL_MEMDESC_GUARD_PAGE;
+ } else
+ memdesc->sglen--;
+ }
+
outer_cache_range_op_sg(memdesc->sg, memdesc->sglen,
KGSL_CACHE_OP_FLUSH);
diff --git a/drivers/gpu/msm/kgsl_sharedmem.h b/drivers/gpu/msm/kgsl_sharedmem.h
index fb8dd95..034ade4 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.h
+++ b/drivers/gpu/msm/kgsl_sharedmem.h
@@ -157,4 +157,15 @@
return ret;
}
+static inline int kgsl_sg_size(struct scatterlist *sg, int sglen)
+{
+ int i, size = 0;
+ struct scatterlist *s;
+
+ for_each_sg(sg, s, sglen, i) {
+ size += s->length;
+ }
+
+ return size;
+}
#endif /* __KGSL_SHAREDMEM_H */
diff --git a/drivers/gpu/msm/kgsl_snapshot.c b/drivers/gpu/msm/kgsl_snapshot.c
index f61c74f..080cb15 100644
--- a/drivers/gpu/msm/kgsl_snapshot.c
+++ b/drivers/gpu/msm/kgsl_snapshot.c
@@ -63,10 +63,10 @@
* return the global timestamp for all contexts
*/
- header->timestamp_queued = device->ftbl->readtimestamp(device,
- context, KGSL_TIMESTAMP_QUEUED);
- header->timestamp_retired = device->ftbl->readtimestamp(device,
- context, KGSL_TIMESTAMP_RETIRED);
+ header->timestamp_queued = kgsl_readtimestamp(device, context,
+ KGSL_TIMESTAMP_QUEUED);
+ header->timestamp_retired = kgsl_readtimestamp(device, context,
+ KGSL_TIMESTAMP_RETIRED);
_ctxtptr += sizeof(struct kgsl_snapshot_linux_context);
diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h
index 84d7f94..60231f6 100644
--- a/drivers/gpu/msm/kgsl_trace.h
+++ b/drivers/gpu/msm/kgsl_trace.h
@@ -22,6 +22,7 @@
#define TRACE_INCLUDE_FILE kgsl_trace
#include <linux/tracepoint.h>
+#include "kgsl_device.h"
struct kgsl_device;
struct kgsl_ringbuffer_issueibcmds;
@@ -33,9 +34,11 @@
TRACE_EVENT(kgsl_issueibcmds,
TP_PROTO(struct kgsl_device *device,
- struct kgsl_ringbuffer_issueibcmds *cmd, int result),
+ struct kgsl_ringbuffer_issueibcmds *cmd,
+ struct kgsl_ibdesc *ibdesc,
+ int result),
- TP_ARGS(device, cmd, result),
+ TP_ARGS(device, cmd, ibdesc, result),
TP_STRUCT__entry(
__string(device_name, device->name)
@@ -50,7 +53,7 @@
TP_fast_assign(
__assign_str(device_name, device->name);
__entry->drawctxt_id = cmd->drawctxt_id;
- __entry->ibdesc_addr = cmd->ibdesc_addr;
+ __entry->ibdesc_addr = ibdesc[0].gpuaddr;
__entry->numibs = cmd->numibs;
__entry->timestamp = cmd->timestamp;
__entry->flags = cmd->flags;
@@ -58,14 +61,19 @@
),
TP_printk(
- "d_name=%s ctx=%u ib=%u numibs=%u timestamp=%u "
- "flags=%u result=%d",
+ "d_name=%s ctx=%u ib=0x%u numibs=%u timestamp=0x%x "
+ "flags=0x%x(%s) result=%d",
__get_str(device_name),
__entry->drawctxt_id,
__entry->ibdesc_addr,
__entry->numibs,
__entry->timestamp,
__entry->flags,
+ __entry->flags ? __print_flags(__entry->flags, "|",
+ { KGSL_CONTEXT_SAVE_GMEM, "SAVE_GMEM" },
+ { KGSL_CONTEXT_SUBMIT_IB_LIST, "IB_LIST" },
+ { KGSL_CONTEXT_CTX_SWITCH, "CTX_SWITCH" })
+ : "None",
__entry->result
)
);
@@ -97,7 +105,7 @@
),
TP_printk(
- "d_name=%s context_id=%u type=%u timestamp=%u",
+ "d_name=%s context_id=%u type=%u timestamp=0x%x",
__get_str(device_name),
__entry->context_id,
__entry->type,
@@ -112,30 +120,34 @@
TP_PROTO(struct kgsl_device *device,
unsigned int context_id,
- unsigned int timestamp,
+ unsigned int curr_ts,
+ unsigned int wait_ts,
unsigned int timeout),
- TP_ARGS(device, context_id, timestamp, timeout),
+ TP_ARGS(device, context_id, curr_ts, wait_ts, timeout),
TP_STRUCT__entry(
__string(device_name, device->name)
__field(unsigned int, context_id)
- __field(unsigned int, timestamp)
+ __field(unsigned int, curr_ts)
+ __field(unsigned int, wait_ts)
__field(unsigned int, timeout)
),
TP_fast_assign(
__assign_str(device_name, device->name);
__entry->context_id = context_id;
- __entry->timestamp = timestamp;
+ __entry->curr_ts = curr_ts;
+ __entry->wait_ts = wait_ts;
__entry->timeout = timeout;
),
TP_printk(
- "d_name=%s context_id=%u timestamp=%u timeout=%u",
+ "d_name=%s context_id=%u curr_ts=%u timestamp=0x%x timeout=%u",
__get_str(device_name),
__entry->context_id,
- __entry->timestamp,
+ __entry->curr_ts,
+ __entry->wait_ts,
__entry->timeout
)
);
@@ -145,23 +157,27 @@
*/
TRACE_EVENT(kgsl_waittimestamp_exit,
- TP_PROTO(struct kgsl_device *device, int result),
+ TP_PROTO(struct kgsl_device *device, unsigned int curr_ts,
+ int result),
- TP_ARGS(device, result),
+ TP_ARGS(device, curr_ts, result),
TP_STRUCT__entry(
__string(device_name, device->name)
+ __field(unsigned int, curr_ts)
__field(int, result)
),
TP_fast_assign(
__assign_str(device_name, device->name);
+ __entry->curr_ts = curr_ts;
__entry->result = result;
),
TP_printk(
- "d_name=%s result=%d",
+ "d_name=%s curr_ts=%u result=%d",
__get_str(device_name),
+ __entry->curr_ts,
__entry->result
)
);
@@ -343,12 +359,13 @@
DECLARE_EVENT_CLASS(kgsl_mem_timestamp_template,
- TP_PROTO(struct kgsl_mem_entry *mem_entry, unsigned int id,
- unsigned int curr_ts),
+ TP_PROTO(struct kgsl_device *device, struct kgsl_mem_entry *mem_entry,
+ unsigned int id, unsigned int curr_ts, unsigned int free_ts),
- TP_ARGS(mem_entry, id, curr_ts),
+ TP_ARGS(device, mem_entry, id, curr_ts, free_ts),
TP_STRUCT__entry(
+ __string(device_name, device->name)
__field(unsigned int, gpuaddr)
__field(unsigned int, size)
__field(int, type)
@@ -358,33 +375,120 @@
),
TP_fast_assign(
+ __assign_str(device_name, device->name);
__entry->gpuaddr = mem_entry->memdesc.gpuaddr;
__entry->size = mem_entry->memdesc.size;
__entry->drawctxt_id = id;
__entry->type = mem_entry->memtype;
__entry->curr_ts = curr_ts;
- __entry->free_ts = mem_entry->free_timestamp;
+ __entry->free_ts = free_ts;
),
TP_printk(
- "gpuaddr=0x%08x size=%d type=%d ctx=%u curr_ts=0x%08x free_ts=0x%08x",
- __entry->gpuaddr, __entry->size, __entry->type,
- __entry->drawctxt_id, __entry->curr_ts, __entry->free_ts
+ "d_name=%s gpuaddr=0x%08x size=%d type=%d ctx=%u"
+ " curr_ts=0x%08x free_ts=0x%08x",
+ __get_str(device_name),
+ __entry->gpuaddr,
+ __entry->size,
+ __entry->type,
+ __entry->drawctxt_id,
+ __entry->curr_ts,
+ __entry->free_ts
)
);
DEFINE_EVENT(kgsl_mem_timestamp_template, kgsl_mem_timestamp_queue,
- TP_PROTO(struct kgsl_mem_entry *mem_entry, unsigned int id,
- unsigned int curr_ts),
- TP_ARGS(mem_entry, id, curr_ts)
+ TP_PROTO(struct kgsl_device *device, struct kgsl_mem_entry *mem_entry,
+ unsigned int id, unsigned int curr_ts, unsigned int free_ts),
+ TP_ARGS(device, mem_entry, id, curr_ts, free_ts)
);
DEFINE_EVENT(kgsl_mem_timestamp_template, kgsl_mem_timestamp_free,
- TP_PROTO(struct kgsl_mem_entry *mem_entry, unsigned int id,
- unsigned int curr_ts),
- TP_ARGS(mem_entry, id, curr_ts)
+ TP_PROTO(struct kgsl_device *device, struct kgsl_mem_entry *mem_entry,
+ unsigned int id, unsigned int curr_ts, unsigned int free_ts),
+ TP_ARGS(device, mem_entry, id, curr_ts, free_ts)
);
+TRACE_EVENT(kgsl_context_create,
+
+ TP_PROTO(struct kgsl_device *device, struct kgsl_context *context,
+ unsigned int flags),
+
+ TP_ARGS(device, context, flags),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, id)
+ __field(unsigned int, flags)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->id = context->id;
+ __entry->flags = flags;
+ ),
+
+ TP_printk(
+ "d_name=%s ctx=%u flags=0x%x %s",
+ __get_str(device_name), __entry->id, __entry->flags,
+ __entry->flags ? __print_flags(__entry->flags, "|",
+ { KGSL_CONTEXT_NO_GMEM_ALLOC , "NO_GMEM_ALLOC" },
+ { KGSL_CONTEXT_PREAMBLE, "PREAMBLE" },
+ { KGSL_CONTEXT_TRASH_STATE, "TRASH_STATE" },
+ { KGSL_CONTEXT_PER_CONTEXT_TS, "PER_CONTEXT_TS" })
+ : "None"
+ )
+);
+
+TRACE_EVENT(kgsl_context_detach,
+
+ TP_PROTO(struct kgsl_device *device, struct kgsl_context *context),
+
+ TP_ARGS(device, context),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, id)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->id = context->id;
+ ),
+
+ TP_printk(
+ "d_name=%s ctx=%u",
+ __get_str(device_name), __entry->id
+ )
+);
+
+TRACE_EVENT(kgsl_mmu_pagefault,
+
+ TP_PROTO(struct kgsl_device *device, unsigned int page,
+ unsigned int pt, const char *op),
+
+ TP_ARGS(device, page, pt, op),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, page)
+ __field(unsigned int, pt)
+ __string(op, op)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->page = page;
+ __entry->pt = pt;
+ __assign_str(op, op);
+ ),
+
+ TP_printk(
+ "d_name=%s page=0x%08x pt=%d op=%s\n",
+ __get_str(device_name), __entry->page, __entry->pt,
+ __get_str(op)
+ )
+);
#endif /* _KGSL_TRACE_H */
diff --git a/drivers/media/dvb/dvb-core/demux.h b/drivers/media/dvb/dvb-core/demux.h
index ff0c9d8..a57ad44 100644
--- a/drivers/media/dvb/dvb-core/demux.h
+++ b/drivers/media/dvb/dvb-core/demux.h
@@ -136,6 +136,8 @@
struct timespec timeout);
int (*start_filtering) (struct dmx_ts_feed* feed);
int (*stop_filtering) (struct dmx_ts_feed* feed);
+ int (*set_indexing_params) (struct dmx_ts_feed *feed,
+ struct dmx_indexing_video_params *params);
};
/*--------------------------------------------------------------------------*/
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index ed3f731..1d310f2 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -1186,6 +1186,24 @@
return ret;
}
+ /* Support indexing for video PES */
+ if ((para->pes_type == DMX_PES_VIDEO0) ||
+ (para->pes_type == DMX_PES_VIDEO1) ||
+ (para->pes_type == DMX_PES_VIDEO2) ||
+ (para->pes_type == DMX_PES_VIDEO3)) {
+
+ if (tsfeed->set_indexing_params) {
+ ret = tsfeed->set_indexing_params(tsfeed,
+ ¶->video_params);
+
+ if (ret < 0) {
+ dmxdev->demux->release_ts_feed(dmxdev->demux,
+ tsfeed);
+ return ret;
+ }
+ }
+ }
+
ret = tsfeed->start_filtering(tsfeed);
if (ret < 0) {
dmxdev->demux->release_ts_feed(dmxdev->demux, tsfeed);
@@ -1464,6 +1482,23 @@
if (params->pes_type > DMX_PES_OTHER || params->pes_type < 0)
return -EINVAL;
+ if (params->flags & DMX_ENABLE_INDEXING) {
+ if (!(dmxdev->capabilities & DMXDEV_CAP_INDEXING))
+ return -EINVAL;
+
+ /* can do indexing only on video PES */
+ if ((params->pes_type != DMX_PES_VIDEO0) &&
+ (params->pes_type != DMX_PES_VIDEO1) &&
+ (params->pes_type != DMX_PES_VIDEO2) &&
+ (params->pes_type != DMX_PES_VIDEO3))
+ return -EINVAL;
+
+ /* can do indexing only when recording */
+ if ((params->output != DMX_OUT_TS_TAP) &&
+ (params->output != DMX_OUT_TSDEMUX_TAP))
+ return -EINVAL;
+ }
+
dmxdevfilter->type = DMXDEV_TYPE_PES;
memcpy(&dmxdevfilter->params, params,
sizeof(struct dmx_pes_filter_params));
diff --git a/drivers/media/dvb/dvb-core/dmxdev.h b/drivers/media/dvb/dvb-core/dmxdev.h
index 82f8f6d..4c52e84 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.h
+++ b/drivers/media/dvb/dvb-core/dmxdev.h
@@ -108,6 +108,7 @@
#define DMXDEV_CAP_DUPLEX 0x1
#define DMXDEV_CAP_PULL_MODE 0x2
#define DMXDEV_CAP_PCR_EXTRACTION 0x4
+#define DMXDEV_CAP_INDEXING 0x8
enum dmx_playback_mode_t playback_mode;
dmx_source_t source;
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c
index 966b48d..0ff2a55 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb/dvb-core/dvb_demux.c
@@ -1027,6 +1027,18 @@
return ret;
}
+static int dmx_ts_set_indexing_params(
+ struct dmx_ts_feed *ts_feed,
+ struct dmx_indexing_video_params *params)
+{
+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+
+ memcpy(&feed->indexing_params, params,
+ sizeof(struct dmx_indexing_video_params));
+
+ return 0;
+}
+
static int dvbdmx_allocate_ts_feed(struct dmx_demux *dmx,
struct dmx_ts_feed **ts_feed,
dmx_ts_cb callback)
@@ -1048,6 +1060,8 @@
feed->pid = 0xffff;
feed->peslen = 0xfffa;
feed->buffer = NULL;
+ memset(&feed->indexing_params, 0,
+ sizeof(struct dmx_indexing_video_params));
/* default behaviour - pass first PES data even if it is
* partial PES data from previous PES that we didn't receive its header.
@@ -1063,6 +1077,7 @@
(*ts_feed)->start_filtering = dmx_ts_feed_start_filtering;
(*ts_feed)->stop_filtering = dmx_ts_feed_stop_filtering;
(*ts_feed)->set = dmx_ts_feed_set;
+ (*ts_feed)->set_indexing_params = dmx_ts_set_indexing_params;
if (!(feed->filter = dvb_dmx_filter_alloc(demux))) {
feed->state = DMX_STATE_FREE;
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
index 297f3df..17f4960 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.h
+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
@@ -95,10 +95,12 @@
int cc;
int pusi_seen; /* prevents feeding of garbage from previous section */
- u16 peslen;
+ u32 peslen;
struct list_head list_head;
unsigned int index; /* a unique index for each feed (can be used as hardware pid filter index) */
+
+ struct dmx_indexing_video_params indexing_params;
};
struct dvb_demux {
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 444d3d4..d2eabb9 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -158,6 +158,7 @@
obj-$(CONFIG_MSM_VCAP) += vcap_v4l2.o
obj-$(CONFIG_MSM_VCAP) += vcap_vc.o
+obj-$(CONFIG_MSM_VCAP) += vcap_vp.o
obj-$(CONFIG_VIDEO_AK881X) += ak881x.o
obj-$(CONFIG_VIDEO_OMAP2) += omap2cam.o
diff --git a/drivers/media/video/msm/Kconfig b/drivers/media/video/msm/Kconfig
index b602c37..ab4a6f2 100644
--- a/drivers/media/video/msm/Kconfig
+++ b/drivers/media/video/msm/Kconfig
@@ -177,6 +177,15 @@
supports spotlight and flash light modes with
differrent current levels.
+config MSM_CAMERA_FLASH_TPS61310
+ bool "Qualcomm MSM camera tps61310 flash support"
+ depends on MSM_CAMERA
+ default n
+ ---help---
+ Enable support for LED flash for msm camera.
+ It is a Texas Instruments multiple LED Flash
+ for camera flash and video light applications.
+
config IMX072
bool "Sensor imx072 (Sony 5M)"
default n
diff --git a/drivers/media/video/msm/actuators/msm_actuator.c b/drivers/media/video/msm/actuators/msm_actuator.c
index 85470b7..3a8ae9e 100644
--- a/drivers/media/video/msm/actuators/msm_actuator.c
+++ b/drivers/media/video/msm/actuators/msm_actuator.c
@@ -375,38 +375,13 @@
int32_t msm_actuator_power_down(struct msm_actuator_ctrl_t *a_ctrl)
{
int32_t rc = 0;
- int16_t step_pos = 0;
- int16_t i = 0;
- CDBG("%s called\n", __func__);
-
- if (a_ctrl->step_position_table) {
- if (a_ctrl->step_position_table[a_ctrl->curr_step_pos] >=
- a_ctrl->step_position_table[a_ctrl->pwd_step]) {
- step_pos = (a_ctrl->
- step_position_table[a_ctrl->curr_step_pos] -
- a_ctrl->step_position_table[a_ctrl->
- pwd_step]) / 10;
- for (i = 0; i < 10; i++) {
- rc = a_ctrl->func_tbl->
- actuator_i2c_write(a_ctrl,
- i * step_pos, 0);
- usleep(500);
- }
- rc = a_ctrl->func_tbl->actuator_i2c_write(a_ctrl,
- a_ctrl->step_position_table[a_ctrl->
- curr_step_pos],
- 0);
- }
- CDBG("%s after msm_actuator_set_default_focus\n", __func__);
- kfree(a_ctrl->step_position_table);
- }
-
if (a_ctrl->vcm_enable) {
rc = gpio_direction_output(a_ctrl->vcm_pwd, 0);
if (!rc)
gpio_free(a_ctrl->vcm_pwd);
}
+ kfree(a_ctrl->step_position_table);
a_ctrl->step_position_table = NULL;
return rc;
}
diff --git a/drivers/media/video/msm/flash.c b/drivers/media/video/msm/flash.c
index 0d17e13..ba86d8c 100644
--- a/drivers/media/video/msm/flash.c
+++ b/drivers/media/video/msm/flash.c
@@ -1,5 +1,5 @@
-/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -30,54 +30,54 @@
MSM_CAM_FLASH_ON,
};
-#if defined CONFIG_MSM_CAMERA_FLASH_SC628A
static struct i2c_client *sc628a_client;
-static const struct i2c_device_id sc628a_i2c_id[] = {
- {"sc628a", 0},
- { }
-};
-
-static int32_t sc628a_i2c_txdata(unsigned short saddr,
+static int32_t flash_i2c_txdata(struct i2c_client *client,
unsigned char *txdata, int length)
{
struct i2c_msg msg[] = {
{
- .addr = saddr,
+ .addr = client->addr >> 1,
.flags = 0,
.len = length,
.buf = txdata,
},
};
- if (i2c_transfer(sc628a_client->adapter, msg, 1) < 0) {
- CDBG("sc628a_i2c_txdata faild 0x%x\n", saddr);
+ if (i2c_transfer(client->adapter, msg, 1) < 0) {
+ CDBG("flash_i2c_txdata faild 0x%x\n", client->addr >> 1);
return -EIO;
}
return 0;
}
-static int32_t sc628a_i2c_write_b_flash(uint8_t waddr, uint8_t bdata)
+static int32_t flash_i2c_write_b(struct i2c_client *client,
+ uint8_t baddr, uint8_t bdata)
{
int32_t rc = -EFAULT;
unsigned char buf[2];
- if (!sc628a_client)
+ if (!client)
return -ENOTSUPP;
memset(buf, 0, sizeof(buf));
- buf[0] = waddr;
+ buf[0] = baddr;
buf[1] = bdata;
- rc = sc628a_i2c_txdata(sc628a_client->addr>>1, buf, 2);
+ rc = flash_i2c_txdata(client, buf, 2);
if (rc < 0) {
CDBG("i2c_write_b failed, addr = 0x%x, val = 0x%x!\n",
- waddr, bdata);
+ baddr, bdata);
}
usleep_range(4000, 5000);
return rc;
}
+static const struct i2c_device_id sc628a_i2c_id[] = {
+ {"sc628a", 0},
+ { }
+};
+
static int sc628a_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -91,7 +91,7 @@
sc628a_client = client;
- CDBG("sc628a_probe successed! rc = %d\n", rc);
+ CDBG("sc628a_probe success rc = %d\n", rc);
return 0;
probe_failure:
@@ -107,7 +107,49 @@
.name = "sc628a",
},
};
-#endif
+
+static struct i2c_client *tps61310_client;
+
+static const struct i2c_device_id tps61310_i2c_id[] = {
+ {"tps61310", 0},
+ { }
+};
+
+static int tps61310_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int rc = 0;
+ CDBG("%s enter\n", __func__);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ pr_err("i2c_check_functionality failed\n");
+ goto probe_failure;
+ }
+
+ tps61310_client = client;
+
+ rc = flash_i2c_write_b(tps61310_client, 0x01, 0x00);
+ if (rc < 0) {
+ tps61310_client = NULL;
+ goto probe_failure;
+ }
+
+ CDBG("%s success! rc = %d\n", __func__, rc);
+ return 0;
+
+probe_failure:
+ pr_err("%s failed! rc = %d\n", __func__, rc);
+ return rc;
+}
+
+static struct i2c_driver tps61310_i2c_driver = {
+ .id_table = tps61310_i2c_id,
+ .probe = tps61310_i2c_probe,
+ .remove = __exit_p(tps61310_i2c_remove),
+ .driver = {
+ .name = "tps61310",
+ },
+};
static int config_flash_gpio_table(enum msm_cam_flash_stat stat,
struct msm_camera_sensor_strobe_flash_data *sfdata)
@@ -278,18 +320,34 @@
{
int rc = 0;
-#if defined CONFIG_MSM_CAMERA_FLASH_SC628A
switch (led_state) {
case MSM_CAMERA_LED_INIT:
- if (!sc628a_client) {
- rc = i2c_add_driver(&sc628a_i2c_driver);
- if (rc < 0 || sc628a_client == NULL) {
- rc = -ENOTSUPP;
- CDBG("I2C add driver failed");
- return rc;
+ if (external->flash_id == MAM_CAMERA_EXT_LED_FLASH_SC628A) {
+ if (!sc628a_client) {
+ rc = i2c_add_driver(&sc628a_i2c_driver);
+ if (rc < 0 || sc628a_client == NULL) {
+ pr_err("sc628a_i2c_driver add failed\n");
+ rc = -ENOTSUPP;
+ return rc;
+ }
}
+ } else if (external->flash_id ==
+ MAM_CAMERA_EXT_LED_FLASH_TPS61310) {
+ if (!tps61310_client) {
+ rc = i2c_add_driver(&tps61310_i2c_driver);
+ if (rc < 0 || tps61310_client == NULL) {
+ pr_err("tps61310_i2c_driver add failed\n");
+ rc = -ENOTSUPP;
+ return rc;
+ }
+ }
+ } else {
+ pr_err("Flash id not supported\n");
+ rc = -ENOTSUPP;
+ return rc;
}
+
#if defined(CONFIG_GPIO_SX150X) || defined(CONFIG_GPIO_SX150X_MODULE)
if (external->expander_info && !sx150x_client) {
struct i2c_adapter *adapter =
@@ -298,20 +356,37 @@
sx150x_client = i2c_new_device(adapter,
external->expander_info->board_info);
if (!sx150x_client || !adapter) {
+ pr_err("sx150x_client is not available\n");
rc = -ENOTSUPP;
- i2c_del_driver(&sc628a_i2c_driver);
- sc628a_client = NULL;
+ if (sc628a_client) {
+ i2c_del_driver(&sc628a_i2c_driver);
+ sc628a_client = NULL;
+ }
+ if (tps61310_client) {
+ i2c_del_driver(&tps61310_i2c_driver);
+ tps61310_client = NULL;
+ }
return rc;
}
+ i2c_put_adapter(adapter);
}
#endif
- rc = gpio_request(external->led_en, "sc628a");
+ if (sc628a_client)
+ rc = gpio_request(external->led_en, "sc628a");
+ if (tps61310_client)
+ rc = gpio_request(external->led_en, "tps61310");
+
if (!rc) {
gpio_direction_output(external->led_en, 0);
} else {
- goto err1;
+ goto error;
}
- rc = gpio_request(external->led_flash_en, "sc628a");
+
+ if (sc628a_client)
+ rc = gpio_request(external->led_flash_en, "sc628a");
+ if (tps61310_client)
+ rc = gpio_request(external->led_flash_en, "tps61310");
+
if (!rc) {
gpio_direction_output(external->led_flash_en, 0);
break;
@@ -319,19 +394,32 @@
gpio_set_value_cansleep(external->led_en, 0);
gpio_free(external->led_en);
-
-err1:
- i2c_del_driver(&sc628a_i2c_driver);
- sc628a_client = NULL;
-
+error:
+ pr_err("%s gpio request failed\n", __func__);
+ if (sc628a_client) {
+ i2c_del_driver(&sc628a_i2c_driver);
+ sc628a_client = NULL;
+ }
+ if (tps61310_client) {
+ i2c_del_driver(&tps61310_i2c_driver);
+ tps61310_client = NULL;
+ }
break;
case MSM_CAMERA_LED_RELEASE:
- if (sc628a_client) {
+ if (sc628a_client || tps61310_client) {
gpio_set_value_cansleep(external->led_en, 0);
gpio_free(external->led_en);
gpio_set_value_cansleep(external->led_flash_en, 0);
gpio_free(external->led_flash_en);
+ if (sc628a_client) {
+ i2c_del_driver(&sc628a_i2c_driver);
+ sc628a_client = NULL;
+ }
+ if (tps61310_client) {
+ i2c_del_driver(&tps61310_i2c_driver);
+ tps61310_client = NULL;
+ }
}
#if defined(CONFIG_GPIO_SX150X) || defined(CONFIG_GPIO_SX150X_MODULE)
if (external->expander_info && sx150x_client) {
@@ -342,37 +430,38 @@
break;
case MSM_CAMERA_LED_OFF:
- rc = sc628a_i2c_write_b_flash(0x02, 0x0);
- if (sc628a_client) {
- gpio_set_value_cansleep(external->led_en, 0);
- gpio_set_value_cansleep(external->led_flash_en, 0);
- }
+ if (sc628a_client)
+ rc = flash_i2c_write_b(sc628a_client, 0x02, 0x00);
+ if (tps61310_client)
+ rc = flash_i2c_write_b(tps61310_client, 0x01, 0x00);
+ gpio_set_value_cansleep(external->led_en, 0);
+ gpio_set_value_cansleep(external->led_flash_en, 0);
break;
case MSM_CAMERA_LED_LOW:
- if (sc628a_client) {
- gpio_set_value_cansleep(external->led_en, 1);
- gpio_set_value_cansleep(external->led_flash_en, 1);
- usleep_range(2000, 3000);
- }
- rc = sc628a_i2c_write_b_flash(0x02, 0x06);
+ gpio_set_value_cansleep(external->led_en, 1);
+ gpio_set_value_cansleep(external->led_flash_en, 1);
+ usleep_range(2000, 3000);
+ if (sc628a_client)
+ rc = flash_i2c_write_b(sc628a_client, 0x02, 0x06);
+ if (tps61310_client)
+ rc = flash_i2c_write_b(tps61310_client, 0x01, 0x86);
break;
case MSM_CAMERA_LED_HIGH:
- if (sc628a_client) {
- gpio_set_value_cansleep(external->led_en, 1);
- gpio_set_value_cansleep(external->led_flash_en, 1);
- usleep_range(2000, 3000);
- }
- rc = sc628a_i2c_write_b_flash(0x02, 0x49);
+ gpio_set_value_cansleep(external->led_en, 1);
+ gpio_set_value_cansleep(external->led_flash_en, 1);
+ usleep_range(2000, 3000);
+ if (sc628a_client)
+ rc = flash_i2c_write_b(sc628a_client, 0x02, 0x49);
+ if (tps61310_client)
+ rc = flash_i2c_write_b(tps61310_client, 0x01, 0x8B);
break;
default:
rc = -EFAULT;
break;
}
-#endif
-
return rc;
}
diff --git a/drivers/media/video/msm/msm_isp.c b/drivers/media/video/msm/msm_isp.c
index d678d86..315f218 100644
--- a/drivers/media/video/msm/msm_isp.c
+++ b/drivers/media/video/msm/msm_isp.c
@@ -246,7 +246,6 @@
pr_err("%s: Invalid vdata type: %d\n", __func__, vdata->type);
break;
}
- msm_isp_sync_free(vdata);
return rc;
}
diff --git a/drivers/media/video/msm/msm_vfe31_v4l2.c b/drivers/media/video/msm/msm_vfe31_v4l2.c
index 90ba214..89615ec 100644
--- a/drivers/media/video/msm/msm_vfe31_v4l2.c
+++ b/drivers/media/video/msm/msm_vfe31_v4l2.c
@@ -423,20 +423,15 @@
static void vfe31_subdev_notify(int id, int path)
{
- struct msm_vfe_resp *rp;
+ struct msm_vfe_resp rp;
unsigned long flags;
spin_lock_irqsave(&vfe31_ctrl->sd_notify_lock, flags);
- rp = msm_isp_sync_alloc(sizeof(struct msm_vfe_resp), GFP_ATOMIC);
- if (!rp) {
- CDBG("rp: cannot allocate buffer\n");
- spin_unlock_irqrestore(&vfe31_ctrl->sd_notify_lock, flags);
- return;
- }
+ memset(&rp, 0, sizeof(struct msm_vfe_resp));
CDBG("vfe31_subdev_notify : msgId = %d\n", id);
- rp->evt_msg.type = MSM_CAMERA_MSG;
- rp->evt_msg.msg_id = path;
- rp->type = id;
- v4l2_subdev_notify(&vfe31_ctrl->subdev, NOTIFY_VFE_BUF_EVT, rp);
+ rp.evt_msg.type = MSM_CAMERA_MSG;
+ rp.evt_msg.msg_id = path;
+ rp.type = id;
+ v4l2_subdev_notify(&vfe31_ctrl->subdev, NOTIFY_VFE_BUF_EVT, &rp);
spin_unlock_irqrestore(&vfe31_ctrl->sd_notify_lock, flags);
}
diff --git a/drivers/media/video/msm/msm_vfe32.c b/drivers/media/video/msm/msm_vfe32.c
index 3e8d3be..d50b778 100644
--- a/drivers/media/video/msm/msm_vfe32.c
+++ b/drivers/media/video/msm/msm_vfe32.c
@@ -418,19 +418,15 @@
static void vfe32_subdev_notify(int id, int path)
{
- struct msm_vfe_resp *rp;
+ struct msm_vfe_resp rp;
unsigned long flags = 0;
spin_lock_irqsave(&vfe32_ctrl->sd_notify_lock, flags);
- rp = msm_isp_sync_alloc(sizeof(struct msm_vfe_resp), GFP_ATOMIC);
- if (!rp) {
- CDBG("rp: cannot allocate buffer\n");
- return;
- }
CDBG("vfe32_subdev_notify : msgId = %d\n", id);
- rp->evt_msg.type = MSM_CAMERA_MSG;
- rp->evt_msg.msg_id = path;
- rp->type = id;
- v4l2_subdev_notify(&vfe32_ctrl->subdev, NOTIFY_VFE_BUF_EVT, rp);
+ memset(&rp, 0, sizeof(struct msm_vfe_resp));
+ rp.evt_msg.type = MSM_CAMERA_MSG;
+ rp.evt_msg.msg_id = path;
+ rp.type = id;
+ v4l2_subdev_notify(&vfe32_ctrl->subdev, NOTIFY_VFE_BUF_EVT, &rp);
spin_unlock_irqrestore(&vfe32_ctrl->sd_notify_lock, flags);
}
diff --git a/drivers/media/video/msm/msm_vfe7x27a_v4l2.c b/drivers/media/video/msm/msm_vfe7x27a_v4l2.c
index 0f71404..f9414a5 100644
--- a/drivers/media/video/msm/msm_vfe7x27a_v4l2.c
+++ b/drivers/media/video/msm/msm_vfe7x27a_v4l2.c
@@ -840,19 +840,15 @@
static void vfe2x_subdev_notify(int id, int path)
{
- struct msm_vfe_resp *rp;
+ struct msm_vfe_resp rp;
unsigned long flags = 0;
spin_lock_irqsave(&vfe2x_ctrl->sd_notify_lock, flags);
- rp = msm_isp_sync_alloc(sizeof(struct msm_vfe_resp), GFP_ATOMIC);
- if (!rp) {
- CDBG("rp: cannot allocate buffer\n");
- return;
- }
+ memset(&rp, 0, sizeof(struct msm_vfe_resp));
CDBG("vfe2x_subdev_notify : msgId = %d\n", id);
- rp->evt_msg.type = MSM_CAMERA_MSG;
- rp->evt_msg.msg_id = path;
- rp->type = id;
- v4l2_subdev_notify(&vfe2x_ctrl->subdev, NOTIFY_VFE_BUF_EVT, rp);
+ rp.evt_msg.type = MSM_CAMERA_MSG;
+ rp.evt_msg.msg_id = path;
+ rp.type = id;
+ v4l2_subdev_notify(&vfe2x_ctrl->subdev, NOTIFY_VFE_BUF_EVT, &rp);
spin_unlock_irqrestore(&vfe2x_ctrl->sd_notify_lock, flags);
}
diff --git a/drivers/media/video/msm/sensors/msm_sensor.c b/drivers/media/video/msm/sensors/msm_sensor.c
index ff5bb49..d163427 100644
--- a/drivers/media/video/msm/sensors/msm_sensor.c
+++ b/drivers/media/video/msm/sensors/msm_sensor.c
@@ -22,6 +22,13 @@
uint16_t cur_line = 0;
uint16_t exp_fl_lines = 0;
if (s_ctrl->sensor_exp_gain_info) {
+ if (s_ctrl->prev_gain && s_ctrl->prev_line &&
+ s_ctrl->func_tbl->sensor_write_exp_gain)
+ s_ctrl->func_tbl->sensor_write_exp_gain(
+ s_ctrl,
+ s_ctrl->prev_gain,
+ s_ctrl->prev_line);
+
msm_camera_i2c_read(s_ctrl->sensor_i2c_client,
s_ctrl->sensor_exp_gain_info->coarse_int_time_addr,
&cur_line,
@@ -429,6 +436,8 @@
s_ctrl,
cdata.cfg.exp_gain.gain,
cdata.cfg.exp_gain.line);
+ s_ctrl->prev_gain = cdata.cfg.exp_gain.gain;
+ s_ctrl->prev_line = cdata.cfg.exp_gain.line;
break;
case CFG_SET_PICT_EXP_GAIN:
diff --git a/drivers/media/video/msm/sensors/msm_sensor.h b/drivers/media/video/msm/sensors/msm_sensor.h
index 22cc05b..0e51409 100644
--- a/drivers/media/video/msm/sensors/msm_sensor.h
+++ b/drivers/media/video/msm/sensors/msm_sensor.h
@@ -153,6 +153,8 @@
uint16_t curr_line_length_pclk;
uint16_t curr_frame_length_lines;
+ uint16_t prev_gain;
+ uint16_t prev_line;
uint32_t fps_divider;
enum msm_sensor_resolution_t curr_res;
diff --git a/drivers/media/video/msm/wfd/wfd-ioctl.c b/drivers/media/video/msm/wfd/wfd-ioctl.c
index 2242aa8..4f6c09d 100644
--- a/drivers/media/video/msm/wfd/wfd-ioctl.c
+++ b/drivers/media/video/msm/wfd/wfd-ioctl.c
@@ -498,7 +498,6 @@
if (rc)
WFD_MSG_ERR("Failed to free output buffer\n");
wfd_unregister_out_buf(inst, minfo);
- wfd_free_input_buffers(wfd_dev, inst);
}
static int mdp_output_thread(void *data)
@@ -1344,12 +1343,13 @@
inst = filp->private_data;
if (inst) {
wfdioc_streamoff(filp, NULL, V4L2_BUF_TYPE_VIDEO_CAPTURE);
- vb2_queue_release(&inst->vid_bufq);
rc = v4l2_subdev_call(&wfd_dev->mdp_sdev, core, ioctl,
MDP_CLOSE, (void *)inst->mdp_inst);
if (rc)
WFD_MSG_ERR("Failed to CLOSE mdp subdevice: %d\n", rc);
+ vb2_queue_release(&inst->vid_bufq);
+ wfd_free_input_buffers(wfd_dev, inst);
rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl,
CLOSE, (void *)inst->venc_inst);
diff --git a/drivers/media/video/msm_vidc/msm_vdec.c b/drivers/media/video/msm_vidc/msm_vdec.c
index 3c279c9..3011a2b 100644
--- a/drivers/media/video/msm_vidc/msm_vdec.c
+++ b/drivers/media/video/msm_vidc/msm_vdec.c
@@ -239,37 +239,12 @@
{
int rc = 0;
struct vb2_queue *q;
- unsigned long flags;
- struct list_head *ptr, *next;
- struct internal_buf *buf;
- struct extradata_buf *ebuf;
q = msm_comm_get_vb2q(inst, i);
if (!q) {
pr_err("Failed to find buffer queue for type = %d\n", i);
return -EINVAL;
}
- if (i == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
- spin_lock_irqsave(&inst->lock, flags);
- if (!list_empty(&inst->internalbufs)) {
- list_for_each_safe(ptr, next, &inst->internalbufs) {
- buf = list_entry(ptr, struct internal_buf,
- list);
- list_del(&buf->list);
- msm_smem_free(inst->mem_client, buf->handle);
- kfree(buf);
- }
- }
- if (!list_empty(&inst->extradatabufs)) {
- list_for_each_safe(ptr, next, &inst->extradatabufs) {
- ebuf = list_entry(ptr, struct extradata_buf,
- list);
- ebuf->device_addr = 0;
- }
- }
- spin_unlock_irqrestore(&inst->lock, flags);
- }
-
pr_debug("Calling streamoff\n");
rc = vb2_streamoff(q, i);
if (rc)
@@ -286,8 +261,7 @@
switch (b->type) {
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
break;
- case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: {
- struct extradata_buf *binfo;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
for (i = 0; i < b->length; i++) {
pr_err("device_addr = %ld, size = %d\n",
b->m.planes[i].m.userptr,
@@ -297,19 +271,8 @@
buffer_info.num_buffers = 1;
buffer_info.align_device_addr =
b->m.planes[i].m.userptr;
- binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
- if (!binfo) {
- pr_err("Failed to allocate shared mem\n");
- return -ENOMEM;
- }
- binfo->device_addr = b->m.planes[i].m.userptr;
- rc = msm_comm_allocate_extradata_buffers(inst, binfo);
- if (rc) {
- pr_err("msm_comm_allocate_extradata_buffers failed");
- break;
- }
- buffer_info.extradata_size = binfo->handle->size;
- buffer_info.extradata_addr = binfo->handle->device_addr;
+ buffer_info.extradata_size = 0;
+ buffer_info.extradata_addr = 0;
rc = vidc_hal_session_set_buffers((void *)inst->session,
&buffer_info);
if (rc) {
@@ -318,7 +281,6 @@
}
}
break;
- }
default:
pr_err("Buffer type not recognized: %d\n", b->type);
break;
@@ -336,10 +298,9 @@
switch (b->type) {
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
break;
- case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: {
- struct extradata_buf *addr;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
for (i = 0; i < b->length; i++) {
- pr_err("Release device_addr = %ld, size = %d\n",
+ pr_debug("Release device_addr = %ld, size = %d\n",
b->m.planes[i].m.userptr,
b->m.planes[i].length);
buffer_info.buffer_size = b->m.planes[i].length;
@@ -347,26 +308,13 @@
buffer_info.num_buffers = 1;
buffer_info.align_device_addr =
b->m.planes[i].m.userptr;
- if (!list_empty(&inst->extradatabufs)) {
- list_for_each_entry(addr, &inst->
- extradatabufs, list) {
- if (addr->device_addr ==
- buffer_info.
- align_device_addr) {
- buffer_info.extradata_addr =
- addr->handle->
- device_addr;
- break;
- }
- }
- }
+ buffer_info.extradata_addr = 0;
rc = vidc_hal_session_release_buffers(
(void *)inst->session, &buffer_info);
if (rc)
pr_err("vidc_hal_session_release_buffers failed");
}
break;
- }
default:
pr_err("Buffer type not recognized: %d\n", b->type);
break;
diff --git a/drivers/media/video/msm_vidc/msm_vidc.c b/drivers/media/video/msm_vidc/msm_vidc.c
index d589bf5..09d37ce 100644
--- a/drivers/media/video/msm_vidc/msm_vidc.c
+++ b/drivers/media/video/msm_vidc/msm_vidc.c
@@ -246,7 +246,6 @@
inst->session_type = session_type;
INIT_LIST_HEAD(&inst->pendingq);
INIT_LIST_HEAD(&inst->internalbufs);
- INIT_LIST_HEAD(&inst->extradatabufs);
inst->state = MSM_VIDC_CORE_UNINIT_DONE;
inst->core = core;
for (i = SESSION_MSG_INDEX(SESSION_MSG_START);
@@ -301,7 +300,6 @@
struct list_head *ptr, *next;
struct vb2_buf_entry *entry;
struct internal_buf *buf;
-
if (inst) {
spin_lock_irqsave(&inst->lock, flags);
if (!list_empty(&inst->pendingq)) {
diff --git a/drivers/media/video/msm_vidc/msm_vidc_common.c b/drivers/media/video/msm_vidc/msm_vidc_common.c
index 8a51301..31879b7 100644
--- a/drivers/media/video/msm_vidc/msm_vidc_common.c
+++ b/drivers/media/video/msm_vidc/msm_vidc_common.c
@@ -892,23 +892,9 @@
&frame_data);
pr_debug("Sent etb to HAL\n");
} else if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
- struct extradata_buf *addr;
frame_data.filled_len = 0;
frame_data.buffer_type = HAL_BUFFER_OUTPUT;
frame_data.extradata_addr = 0;
- if (!list_empty(&inst->extradatabufs)) {
- list_for_each_entry(addr, &inst->extradatabufs,
- list) {
- if (addr->device_addr ==
- frame_data.
- device_addr) {
- frame_data.extradata_addr =
- addr->
- handle->device_addr;
- break;
- }
- }
- }
pr_debug("Sending ftb to hal...: Alloc: %d :filled: %d"
" extradata_addr: %d\n", frame_data.alloc_len,
frame_data.filled_len,
@@ -969,49 +955,31 @@
return rc;
}
-int msm_comm_allocate_extradata_buffers(struct msm_vidc_inst *inst,
- struct extradata_buf *binfo)
-{
- int rc = 0;
- unsigned long flags;
- struct msm_smem *handle;
- pr_debug("Extradata: num = %d, size = %d, align = %d\n",
- inst->buff_req.buffer[4].buffer_count_actual,
- inst->buff_req.buffer[4].buffer_size,
- inst->buff_req.buffer[4].buffer_alignment);
- if (!inst->buff_req.buffer[4].buffer_size) {
- pr_err("invalid size: %d",
- inst->buff_req.buffer[4].buffer_size);
- rc = -ENOMEM;
- goto err_no_mem;
- }
- handle = msm_smem_alloc(inst->mem_client,
- inst->buff_req.buffer[4].buffer_size,
- inst->buff_req.buffer[4].buffer_alignment, 0);
- if (!handle) {
- pr_err("Failed to allocate Extradata memory\n");
- rc = -ENOMEM;
- goto err_no_mem;
- }
- binfo->handle = handle;
- spin_lock_irqsave(&inst->lock, flags);
- list_add_tail(&binfo->list, &inst->extradatabufs);
- spin_unlock_irqrestore(&inst->lock, flags);
-err_no_mem:
- return rc;
-}
-
int msm_comm_set_scratch_buffers(struct msm_vidc_inst *inst)
{
int rc = 0;
struct msm_smem *handle;
struct internal_buf *binfo;
+ struct list_head *ptr, *next;
struct vidc_buffer_addr_info buffer_info;
unsigned long flags;
int i;
pr_debug("scratch: num = %d, size = %d\n",
inst->buff_req.buffer[6].buffer_count_actual,
inst->buff_req.buffer[6].buffer_size);
+ spin_lock_irqsave(&inst->lock, flags);
+ if (!list_empty(&inst->internalbufs)) {
+ list_for_each_safe(ptr, next, &inst->internalbufs) {
+ binfo = list_entry(ptr, struct internal_buf,
+ list);
+ list_del(&binfo->list);
+ msm_smem_free(inst->mem_client, binfo->handle);
+ kfree(binfo);
+ }
+ }
+ spin_unlock_irqrestore(&inst->lock, flags);
+
+
for (i = 0; i < inst->buff_req.buffer[6].buffer_count_actual;
i++) {
handle = msm_smem_alloc(inst->mem_client,
diff --git a/drivers/media/video/msm_vidc/msm_vidc_common.h b/drivers/media/video/msm_vidc/msm_vidc_common.h
index 45bfa7b..2fafa79 100644
--- a/drivers/media/video/msm_vidc/msm_vidc_common.h
+++ b/drivers/media/video/msm_vidc/msm_vidc_common.h
@@ -27,8 +27,6 @@
struct msm_vidc_inst *inst, enum v4l2_buf_type type);
int msm_comm_try_state(struct msm_vidc_inst *inst, int state);
int msm_comm_try_get_bufreqs(struct msm_vidc_inst *inst);
-int msm_comm_allocate_extradata_buffers(struct msm_vidc_inst *inst,
- struct extradata_buf *binfo);
int msm_comm_set_scratch_buffers(struct msm_vidc_inst *inst);
int msm_comm_qbuf(struct vb2_buffer *vb);
#define IS_PRIV_CTRL(idx) (\
diff --git a/drivers/media/video/msm_vidc/msm_vidc_internal.h b/drivers/media/video/msm_vidc/msm_vidc_internal.h
index 2c7853b..fb1ab58 100644
--- a/drivers/media/video/msm_vidc/msm_vidc_internal.h
+++ b/drivers/media/video/msm_vidc/msm_vidc_internal.h
@@ -90,12 +90,6 @@
struct msm_smem *handle;
};
-struct extradata_buf {
- struct list_head list;
- struct msm_smem *handle;
- u32 device_addr;
-};
-
struct msm_vidc_format {
char name[64];
u8 description[32];
@@ -149,7 +143,6 @@
spinlock_t lock;
struct list_head pendingq;
struct list_head internalbufs;
- struct list_head extradatabufs;
struct buffer_requirements buff_req;
void *mem_client;
struct v4l2_ctrl_handler ctrl_handler;
diff --git a/drivers/media/video/vcap_v4l2.c b/drivers/media/video/vcap_v4l2.c
index 219f7a0..dd5bd0f 100644
--- a/drivers/media/video/vcap_v4l2.c
+++ b/drivers/media/video/vcap_v4l2.c
@@ -43,6 +43,7 @@
#include <media/vcap_v4l2.h>
#include <media/vcap_fmt.h>
#include "vcap_vc.h"
+#include "vcap_vp.h"
#define NUM_INPUTS 1
#define MSM_VCAP_DRV_NAME "msm_vcap"
@@ -57,6 +58,28 @@
printk(KERN_DEBUG "VCAP: " fmt, ## arg); \
} while (0)
+enum vcap_op_mode determine_mode(struct vcap_client_data *cd)
+{
+ if (cd->set_cap == 1 && cd->set_vp_o == 0 &&
+ cd->set_decode == 0)
+ return VC_VCAP_OP;
+ else if (cd->set_cap == 1 && cd->set_vp_o == 1 &&
+ cd->set_decode == 0)
+ return VC_AND_VP_VCAP_OP;
+ else if (cd->set_cap == 0 && cd->set_vp_o == 1 &&
+ cd->set_decode == 1)
+ return VP_VCAP_OP;
+ else
+ return UNKNOWN_VCAP_OP;
+}
+
+void dealloc_resources(struct vcap_client_data *cd)
+{
+ cd->set_cap = false;
+ cd->set_decode = false;
+ cd->set_vp_o = false;
+}
+
int get_phys_addr(struct vcap_dev *dev, struct vb2_queue *q,
struct v4l2_buffer *b)
{
@@ -103,6 +126,8 @@
&buf->paddr, (size_t *)&len);
if (rc < 0) {
pr_err("%s: Could not get phys addr\n", __func__);
+ ion_free(dev->ion_client, buf->ion_handle);
+ buf->ion_handle = NULL;
return -EFAULT;
}
@@ -148,7 +173,7 @@
return 0;
}
-/* Videobuf operations */
+/* VC Videobuf operations */
static int capture_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
unsigned int *nplanes, unsigned long sizes[],
@@ -157,7 +182,6 @@
*nbuffers += 2;
if (*nbuffers > VIDEO_MAX_FRAME)
return -EINVAL;
-
*nplanes = 1;
return 0;
}
@@ -240,6 +264,197 @@
.buf_cleanup = capture_buffer_cleanup,
};
+/* VP I/P Videobuf operations */
+
+static int vp_in_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned long sizes[],
+ void *alloc_ctxs[])
+{
+ if (*nbuffers >= VIDEO_MAX_FRAME && *nbuffers < 5)
+ *nbuffers = 5;
+
+ *nplanes = 1;
+ return 0;
+}
+
+static int vp_in_buffer_init(struct vb2_buffer *vb)
+{
+ return 0;
+}
+
+static int vp_in_buffer_prepare(struct vb2_buffer *vb)
+{
+ return 0;
+}
+
+static void vp_in_buffer_queue(struct vb2_buffer *vb)
+{
+ struct vcap_client_data *cd = vb2_get_drv_priv(vb->vb2_queue);
+ struct vcap_buffer *buf = container_of(vb, struct vcap_buffer, vb);
+ struct vp_action *vp_act = &cd->vid_vp_action;
+ struct vb2_queue *q = vb->vb2_queue;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&cd->cap_slock, flags);
+ list_add_tail(&buf->list, &vp_act->in_active);
+ spin_unlock_irqrestore(&cd->cap_slock, flags);
+
+ if (atomic_read(&cd->dev->vp_enabled) == 0) {
+ if (cd->vid_vp_action.vp_state == VP_FRAME1) {
+ if (atomic_read(&q->queued_count) > 1 &&
+ atomic_read(&cd->vp_out_vidq.queued_count) > 0)
+ /* Valid code flow for VC-VP mode */
+ kickoff_vp(cd);
+ } else {
+ /* VP has already kicked off just needs cont */
+ continue_vp(cd);
+ }
+ }
+}
+
+static int vp_in_start_streaming(struct vb2_queue *vq)
+{
+ dprintk(2, "VP IN start streaming\n");
+ return 0;
+}
+
+static int vp_in_stop_streaming(struct vb2_queue *vq)
+{
+ struct vcap_client_data *c_data = vb2_get_drv_priv(vq);
+ struct vb2_buffer *vb;
+
+ dprintk(2, "VP stop streaming\n");
+
+ while (!list_empty(&c_data->vid_vp_action.in_active)) {
+ struct vcap_buffer *buf;
+ buf = list_entry(c_data->vid_vp_action.in_active.next,
+ struct vcap_buffer, list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+
+ /* clean ion handles */
+ list_for_each_entry(vb, &vq->queued_list, queued_entry)
+ free_ion_handle_work(c_data->dev, vb);
+ return 0;
+}
+
+static int vp_in_buffer_finish(struct vb2_buffer *vb)
+{
+ return 0;
+}
+
+static void vp_in_buffer_cleanup(struct vb2_buffer *vb)
+{
+}
+
+static struct vb2_ops vp_in_video_qops = {
+ .queue_setup = vp_in_queue_setup,
+ .buf_init = vp_in_buffer_init,
+ .buf_prepare = vp_in_buffer_prepare,
+ .buf_queue = vp_in_buffer_queue,
+ .start_streaming = vp_in_start_streaming,
+ .stop_streaming = vp_in_stop_streaming,
+ .buf_finish = vp_in_buffer_finish,
+ .buf_cleanup = vp_in_buffer_cleanup,
+};
+
+
+/* VP O/P Videobuf operations */
+
+static int vp_out_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned long sizes[],
+ void *alloc_ctxs[])
+{
+ if (*nbuffers >= VIDEO_MAX_FRAME && *nbuffers < 3)
+ *nbuffers = 3;
+
+ *nplanes = 1;
+ return 0;
+}
+
+static int vp_out_buffer_init(struct vb2_buffer *vb)
+{
+ return 0;
+}
+
+static int vp_out_buffer_prepare(struct vb2_buffer *vb)
+{
+ return 0;
+}
+
+static void vp_out_buffer_queue(struct vb2_buffer *vb)
+{
+ struct vcap_client_data *cd = vb2_get_drv_priv(vb->vb2_queue);
+ struct vcap_buffer *buf = container_of(vb, struct vcap_buffer, vb);
+ struct vp_action *vp_act = &cd->vid_vp_action;
+ struct vb2_queue *q = vb->vb2_queue;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&cd->cap_slock, flags);
+ list_add_tail(&buf->list, &vp_act->out_active);
+ spin_unlock_irqrestore(&cd->cap_slock, flags);
+
+ if (atomic_read(&cd->dev->vp_enabled) == 0) {
+ if (cd->vid_vp_action.vp_state == VP_FRAME1) {
+ if (atomic_read(&q->queued_count) > 0 &&
+ atomic_read(&
+ cd->vp_in_vidq.queued_count) > 1)
+ kickoff_vp(cd);
+ } else {
+ /* VP has already kicked off just needs cont */
+ continue_vp(cd);
+ }
+ }
+}
+
+static int vp_out_start_streaming(struct vb2_queue *vq)
+{
+ return 0;
+}
+
+static int vp_out_stop_streaming(struct vb2_queue *vq)
+{
+ struct vcap_client_data *c_data = vb2_get_drv_priv(vq);
+ struct vb2_buffer *vb;
+
+ dprintk(2, "VP out q stop streaming\n");
+ vp_stop_capture(c_data);
+
+ while (!list_empty(&c_data->vid_vp_action.out_active)) {
+ struct vcap_buffer *buf;
+ buf = list_entry(c_data->vid_vp_action.out_active.next,
+ struct vcap_buffer, list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+
+ /* clean ion handles */
+ list_for_each_entry(vb, &vq->queued_list, queued_entry)
+ free_ion_handle_work(c_data->dev, vb);
+ return 0;
+}
+
+static int vp_out_buffer_finish(struct vb2_buffer *vb)
+{
+ return 0;
+}
+
+static void vp_out_buffer_cleanup(struct vb2_buffer *vb)
+{
+}
+
+static struct vb2_ops vp_out_video_qops = {
+ .queue_setup = vp_out_queue_setup,
+ .buf_init = vp_out_buffer_init,
+ .buf_prepare = vp_out_buffer_prepare,
+ .buf_queue = vp_out_buffer_queue,
+ .start_streaming = vp_out_start_streaming,
+ .stop_streaming = vp_out_stop_streaming,
+ .buf_finish = vp_out_buffer_finish,
+ .buf_cleanup = vp_out_buffer_cleanup,
+};
+
/* IOCTL vidioc handling */
static int vidioc_querycap(struct file *file, void *priv,
@@ -279,20 +494,16 @@
struct v4l2_format *f)
{
int size;
-#ifdef NEW_S_FMT
+ struct vcap_priv_fmt *priv_fmt;
struct v4l2_format_vc_ext *vc_format;
-#endif
struct vcap_client_data *c_data = file->private_data;
- switch (f->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
-#ifdef NEW_S_FMT
- vc_format = (struct v4l2_format_vc_ext *) f->fmt.raw_data;
+ priv_fmt = (struct vcap_priv_fmt *) f->fmt.raw_data;
+
+ switch (priv_fmt->type) {
+ case VC_TYPE:
+ vc_format = (struct v4l2_format_vc_ext *) &priv_fmt->u.timing;
c_data->vc_format = *vc_format;
-#else
- c_data->vc_format =
- vcap_vc_lut[f->fmt.pix.priv];
-#endif
config_vc_format(c_data);
@@ -304,22 +515,46 @@
else
size *= 2;
-#ifndef NEW_S_FMT
- f->fmt.pix.bytesperline = size;
+ priv_fmt->u.timing.bytesperline = size;
size *= (c_data->vc_format.vactive_end -
c_data->vc_format.vactive_start);
- f->fmt.pix.sizeimage = size;
-#endif
+ priv_fmt->u.timing.sizeimage = size;
vcap_ctrl->vc_client = c_data;
+ c_data->set_cap = true;
break;
- case V4L2_BUF_TYPE_INTERLACED_IN_DECODER:
- c_data->vp_buf_type_field = V4L2_BUF_TYPE_INTERLACED_IN_DECODER;
- c_data->vp_format.field = f->fmt.pix.field;
- c_data->vp_format.height = f->fmt.pix.height;
- c_data->vp_format.width = f->fmt.pix.width;
- c_data->vp_format.pixelformat = f->fmt.pix.pixelformat;
+ case VP_IN_TYPE:
+ vcap_ctrl->vp_client = c_data;
+ c_data->vp_in_fmt.width = priv_fmt->u.pix.width;
+ c_data->vp_in_fmt.height = priv_fmt->u.pix.height;
+ c_data->vp_in_fmt.pixfmt = priv_fmt->u.pix.pixelformat;
+
+ if (priv_fmt->u.pix.priv)
+ c_data->vid_vp_action.nr_enabled = 1;
+
+ size = c_data->vp_in_fmt.width * c_data->vp_in_fmt.height;
+ if (c_data->vp_in_fmt.pixfmt == V4L2_PIX_FMT_NV16)
+ size = size * 2;
+ else
+ size = size / 2 * 3;
+ priv_fmt->u.pix.sizeimage = size;
+ c_data->set_decode = true;
break;
- case V4L2_BUF_TYPE_INTERLACED_IN_AFE:
+ case VP_OUT_TYPE:
+ vcap_ctrl->vp_client = c_data;
+ c_data->vp_out_fmt.width = priv_fmt->u.pix.width;
+ c_data->vp_out_fmt.height = priv_fmt->u.pix.height;
+ c_data->vp_out_fmt.pixfmt = priv_fmt->u.pix.pixelformat;
+
+ if (priv_fmt->u.pix.priv)
+ c_data->vid_vp_action.nr_enabled = 1;
+
+ size = c_data->vp_out_fmt.width * c_data->vp_out_fmt.height;
+ if (c_data->vp_out_fmt.pixfmt == V4L2_PIX_FMT_NV16)
+ size = size * 2;
+ else
+ size = size / 2 * 3;
+ priv_fmt->u.pix.sizeimage = size;
+ c_data->set_vp_o = true;
break;
default:
break;
@@ -332,9 +567,55 @@
struct v4l2_requestbuffers *rb)
{
struct vcap_client_data *c_data = file->private_data;
+ int rc;
+
+ dprintk(3, "In Req Buf %08x\n", (unsigned int)rb->type);
+ c_data->op_mode = determine_mode(c_data);
+ if (c_data->op_mode == UNKNOWN_VCAP_OP) {
+ pr_err("VCAP Error: %s: VCAP in unknown mode\n", __func__);
+ return -ENOTRECOVERABLE;
+ }
+
switch (rb->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- return vb2_reqbufs(&c_data->vc_vidq, rb);
+ if (c_data->op_mode == VC_AND_VP_VCAP_OP) {
+ if (c_data->vc_format.color_space) {
+ pr_err("VCAP Err: %s: VP No RGB support\n",
+ __func__);
+ return -ENOTRECOVERABLE;
+ }
+ if (!c_data->vc_format.mode) {
+ pr_err("VCAP Err: VP No prog support\n");
+ return -ENOTRECOVERABLE;
+ }
+ if (rb->count < 6) {
+ pr_err("VCAP Err: Not enough buf for VC_VP\n");
+ return -EINVAL;
+ }
+ rc = vb2_reqbufs(&c_data->vc_vidq, rb);
+ if (rc < 0)
+ return rc;
+
+ c_data->vp_in_fmt.width =
+ (c_data->vc_format.hactive_end -
+ c_data->vc_format.hactive_start);
+ c_data->vp_in_fmt.height =
+ (c_data->vc_format.vactive_end -
+ c_data->vc_format.vactive_start);
+ /* VC outputs YCbCr 4:2:2 */
+ c_data->vp_in_fmt.pixfmt = V4L2_PIX_FMT_NV16;
+ rb->type = V4L2_BUF_TYPE_INTERLACED_IN_DECODER;
+ rc = vb2_reqbufs(&c_data->vp_in_vidq, rb);
+ rb->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ return rc;
+
+ } else {
+ return vb2_reqbufs(&c_data->vc_vidq, rb);
+ }
+ case V4L2_BUF_TYPE_INTERLACED_IN_DECODER:
+ return vb2_reqbufs(&c_data->vp_in_vidq, rb);
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return vb2_reqbufs(&c_data->vp_out_vidq, rb);
default:
pr_err("VCAP Error: %s: Unknown buffer type\n", __func__);
return -EINVAL;
@@ -359,16 +640,57 @@
static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
{
struct vcap_client_data *c_data = file->private_data;
+ struct vb2_buffer *vb;
+ struct vb2_queue *q;
int rc;
+ dprintk(3, "In Q Buf %08x\n", (unsigned int)p->type);
switch (p->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (get_phys_addr(c_data->dev, &c_data->vc_vidq, p))
- return -EINVAL;
+ if (c_data->op_mode == VC_AND_VP_VCAP_OP) {
+ /* If buffer in vp_in_q it will be coming back */
+ q = &c_data->vp_in_vidq;
+ if (p->index >= q->num_buffers) {
+ dprintk(1, "qbuf: buffer index out of range\n");
+ return -EINVAL;
+ }
+
+ vb = q->bufs[p->index];
+ if (NULL == vb) {
+ dprintk(1, "qbuf: buffer is NULL\n");
+ return -EINVAL;
+ }
+
+ if (vb->state != VB2_BUF_STATE_DEQUEUED) {
+ dprintk(1, "qbuf: buffer already in use\n");
+ return -EINVAL;
+ }
+ }
+ rc = get_phys_addr(c_data->dev, &c_data->vc_vidq, p);
+ if (rc < 0)
+ return rc;
rc = vb2_qbuf(&c_data->vc_vidq, p);
if (rc < 0)
free_ion_handle(c_data->dev, &c_data->vc_vidq, p);
return rc;
+ case V4L2_BUF_TYPE_INTERLACED_IN_DECODER:
+ if (c_data->op_mode == VC_AND_VP_VCAP_OP)
+ return -EINVAL;
+ rc = get_phys_addr(c_data->dev, &c_data->vp_in_vidq, p);
+ if (rc < 0)
+ return rc;
+ rc = vb2_qbuf(&c_data->vp_in_vidq, p);
+ if (rc < 0)
+ free_ion_handle(c_data->dev, &c_data->vp_in_vidq, p);
+ return rc;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ rc = get_phys_addr(c_data->dev, &c_data->vp_out_vidq, p);
+ if (rc < 0)
+ return rc;
+ rc = vb2_qbuf(&c_data->vp_out_vidq, p);
+ if (rc < 0)
+ free_ion_handle(c_data->dev, &c_data->vp_out_vidq, p);
+ return rc;
default:
pr_err("VCAP Error: %s: Unknown buffer type\n", __func__);
return -EINVAL;
@@ -381,12 +703,29 @@
struct vcap_client_data *c_data = file->private_data;
int rc;
+ dprintk(3, "In DQ Buf %08x\n", (unsigned int)p->type);
switch (p->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ if (c_data->op_mode == VC_AND_VP_VCAP_OP)
+ return -EINVAL;
rc = vb2_dqbuf(&c_data->vc_vidq, p, file->f_flags & O_NONBLOCK);
if (rc < 0)
return rc;
return free_ion_handle(c_data->dev, &c_data->vc_vidq, p);
+ case V4L2_BUF_TYPE_INTERLACED_IN_DECODER:
+ if (c_data->op_mode == VC_AND_VP_VCAP_OP)
+ return -EINVAL;
+ rc = vb2_dqbuf(&c_data->vp_in_vidq, p, file->f_flags &
+ O_NONBLOCK);
+ if (rc < 0)
+ return rc;
+ return free_ion_handle(c_data->dev, &c_data->vp_in_vidq, p);
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ rc = vb2_dqbuf(&c_data->vp_out_vidq, p, file->f_flags &
+ O_NONBLOCK);
+ if (rc < 0)
+ return rc;
+ return free_ion_handle(c_data->dev, &c_data->vp_out_vidq, p);
default:
pr_err("VCAP Error: %s: Unknown buffer type", __func__);
return -EINVAL;
@@ -394,15 +733,153 @@
return 0;
}
+/*
+ * When calling streamon on multiple queues there is a need to first verify
+ * that the steamon will succeed on all queues, similarly for streamoff
+ */
+int streamon_validate_q(struct vb2_queue *q)
+{
+ if (q->fileio) {
+ dprintk(1, "streamon: file io in progress\n");
+ return -EBUSY;
+ }
+
+ if (q->streaming) {
+ dprintk(1, "streamon: already streaming\n");
+ return -EBUSY;
+ }
+
+ if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ if (list_empty(&q->queued_list)) {
+ dprintk(1, "streamon: no output buffers queued\n");
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
{
struct vcap_client_data *c_data = file->private_data;
+ int rc;
- switch (i) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ dprintk(3, "In Stream ON\n");
+ if (determine_mode(c_data) != c_data->op_mode) {
+ pr_err("VCAP Error: %s: s_fmt called after req_buf", __func__);
+ return -ENOTRECOVERABLE;
+ }
+
+ switch (c_data->op_mode) {
+ case VC_VCAP_OP:
+ c_data->dev->vc_client = c_data;
+ config_vc_format(c_data);
return vb2_streamon(&c_data->vc_vidq, i);
+ case VP_VCAP_OP:
+ rc = streamon_validate_q(&c_data->vp_in_vidq);
+ if (rc < 0)
+ return rc;
+ rc = streamon_validate_q(&c_data->vp_out_vidq);
+ if (rc < 0)
+ return rc;
+
+ c_data->dev->vp_client = c_data;
+
+ rc = config_vp_format(c_data);
+ if (rc < 0)
+ return rc;
+ rc = init_motion_buf(c_data);
+ if (rc < 0)
+ return rc;
+ if (c_data->vid_vp_action.nr_enabled) {
+ rc = init_nr_buf(c_data);
+ if (rc < 0)
+ goto s_on_deinit_m_buf;
+ }
+
+ c_data->vid_vp_action.vp_state = VP_FRAME1;
+
+ rc = vb2_streamon(&c_data->vp_in_vidq,
+ V4L2_BUF_TYPE_INTERLACED_IN_DECODER);
+ if (rc < 0)
+ goto s_on_deinit_nr_buf;
+
+ rc = vb2_streamon(&c_data->vp_out_vidq,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ if (rc < 0)
+ goto s_on_deinit_nr_buf;
+ return rc;
+ case VC_AND_VP_VCAP_OP:
+ rc = streamon_validate_q(&c_data->vc_vidq);
+ if (rc < 0)
+ return rc;
+ rc = streamon_validate_q(&c_data->vp_in_vidq);
+ if (rc < 0)
+ return rc;
+ rc = streamon_validate_q(&c_data->vp_out_vidq);
+ if (rc < 0)
+ return rc;
+
+ c_data->dev->vc_client = c_data;
+ c_data->dev->vp_client = c_data;
+ c_data->dev->vc_to_vp_work.cd = c_data;
+
+ rc = config_vc_format(c_data);
+ if (rc < 0)
+ return rc;
+ rc = config_vp_format(c_data);
+ if (rc < 0)
+ return rc;
+ rc = init_motion_buf(c_data);
+ if (rc < 0)
+ return rc;
+ if (c_data->vid_vp_action.nr_enabled) {
+ rc = init_nr_buf(c_data);
+ if (rc < 0)
+ goto s_on_deinit_m_buf;
+ }
+ c_data->streaming = 1;
+
+ c_data->vid_vp_action.vp_state = VP_FRAME1;
+
+ /* These stream on calls should not fail */
+ rc = vb2_streamon(&c_data->vc_vidq,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ if (rc < 0)
+ goto s_on_deinit_nr_buf;
+
+ rc = vb2_streamon(&c_data->vp_in_vidq,
+ V4L2_BUF_TYPE_INTERLACED_IN_DECODER);
+ if (rc < 0)
+ goto s_on_deinit_nr_buf;
+
+ rc = vb2_streamon(&c_data->vp_out_vidq,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ if (rc < 0)
+ goto s_on_deinit_nr_buf;
+ return rc;
default:
- pr_err("VCAP Error: %s: Unknown buffer type", __func__);
+ pr_err("VCAP Error: %s: Operation Mode type", __func__);
+ return -ENOTRECOVERABLE;
+ }
+ return 0;
+
+s_on_deinit_nr_buf:
+ if (c_data->vid_vp_action.nr_enabled)
+ deinit_nr_buf(c_data);
+s_on_deinit_m_buf:
+ deinit_motion_buf(c_data);
+ return rc;
+}
+
+int streamoff_validate_q(struct vb2_queue *q)
+{
+ if (q->fileio) {
+ dprintk(1, "streamoff: file io in progress\n");
+ return -EBUSY;
+ }
+
+ if (!q->streaming) {
+ dprintk(1, "streamoff: not streaming\n");
return -EINVAL;
}
return 0;
@@ -413,21 +890,78 @@
struct vcap_client_data *c_data = file->private_data;
int rc;
- switch (i) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ switch (c_data->op_mode) {
+ case VC_VCAP_OP:
rc = vb2_streamoff(&c_data->vc_vidq, i);
if (rc >= 0)
atomic_set(&c_data->dev->vc_enabled, 0);
return rc;
+ case VP_VCAP_OP:
+ rc = streamoff_validate_q(&c_data->vp_in_vidq);
+ if (rc < 0)
+ return rc;
+ rc = streamoff_validate_q(&c_data->vp_out_vidq);
+ if (rc < 0)
+ return rc;
+
+ /* These stream on calls should not fail */
+ rc = vb2_streamoff(&c_data->vp_in_vidq,
+ V4L2_BUF_TYPE_INTERLACED_IN_DECODER);
+ if (rc < 0)
+ return rc;
+
+ rc = vb2_streamoff(&c_data->vp_out_vidq,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ if (rc < 0)
+ return rc;
+
+ deinit_motion_buf(c_data);
+ if (c_data->vid_vp_action.nr_enabled)
+ deinit_nr_buf(c_data);
+ atomic_set(&c_data->dev->vp_enabled, 0);
+ return rc;
+ case VC_AND_VP_VCAP_OP:
+ rc = streamoff_validate_q(&c_data->vc_vidq);
+ if (rc < 0)
+ return rc;
+ rc = streamoff_validate_q(&c_data->vp_in_vidq);
+ if (rc < 0)
+ return rc;
+ rc = streamoff_validate_q(&c_data->vp_out_vidq);
+ if (rc < 0)
+ return rc;
+
+ /* These stream on calls should not fail */
+ c_data->streaming = 0;
+ rc = vb2_streamoff(&c_data->vc_vidq,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ if (rc < 0)
+ return rc;
+
+ rc = vb2_streamoff(&c_data->vp_in_vidq,
+ V4L2_BUF_TYPE_INTERLACED_IN_DECODER);
+ if (rc < 0)
+ return rc;
+
+ rc = vb2_streamoff(&c_data->vp_out_vidq,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ if (rc < 0)
+ return rc;
+
+ deinit_motion_buf(c_data);
+ if (c_data->vid_vp_action.nr_enabled)
+ deinit_nr_buf(c_data);
+ atomic_set(&c_data->dev->vc_enabled, 0);
+ atomic_set(&c_data->dev->vp_enabled, 0);
+ return rc;
default:
- pr_err("VCAP Error: %s: Unknown buffer type", __func__);
- break;
+ pr_err("VCAP Error: %s: Unknown Operation mode", __func__);
+ return -ENOTRECOVERABLE;
}
return 0;
}
/* VCAP fops */
-
static void *vcap_ops_get_userptr(void *alloc_ctx, unsigned long vaddr,
unsigned long size, int write)
{
@@ -464,7 +998,7 @@
spin_lock_init(&c_data->cap_slock);
- /* initialize queue */
+ /* initialize vc queue */
q = &c_data->vc_vidq;
memset(q, 0, sizeof(c_data->vc_vidq));
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
@@ -473,17 +1007,49 @@
q->buf_struct_size = sizeof(struct vcap_buffer);
q->ops = &capture_video_qops;
q->mem_ops = &vcap_mem_ops;
+ ret = vb2_queue_init(q);
+ if (ret < 0)
+ goto vc_q_failed;
+
+ /* initialize vp in queue */
+ q = &c_data->vp_in_vidq;
+ memset(q, 0, sizeof(c_data->vp_in_vidq));
+ q->type = V4L2_BUF_TYPE_INTERLACED_IN_DECODER;
+ q->io_modes = VB2_USERPTR;
+ q->drv_priv = c_data;
+ q->buf_struct_size = sizeof(struct vcap_buffer);
+ q->ops = &vp_in_video_qops;
+ q->mem_ops = &vcap_mem_ops;
+ ret = vb2_queue_init(q);
+ if (ret < 0)
+ goto vp_in_q_failed;
+
+ /* initialize vp out queue */
+ q = &c_data->vp_out_vidq;
+ memset(q, 0, sizeof(c_data->vp_out_vidq));
+ q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ q->io_modes = VB2_USERPTR;
+ q->drv_priv = c_data;
+ q->buf_struct_size = sizeof(struct vcap_buffer);
+ q->ops = &vp_out_video_qops;
+ q->mem_ops = &vcap_mem_ops;
ret = vb2_queue_init(q);
if (ret < 0)
- goto open_failed;
+ goto vp_out_q_failed;
INIT_LIST_HEAD(&c_data->vid_vc_action.active);
+ INIT_LIST_HEAD(&c_data->vid_vp_action.in_active);
+ INIT_LIST_HEAD(&c_data->vid_vp_action.out_active);
file->private_data = c_data;
return 0;
-open_failed:
+vp_out_q_failed:
+ vb2_queue_release(&c_data->vp_in_vidq);
+vp_in_q_failed:
+ vb2_queue_release(&c_data->vc_vidq);
+vc_q_failed:
kfree(c_data);
return ret;
}
@@ -491,6 +1057,8 @@
static int vcap_close(struct file *file)
{
struct vcap_client_data *c_data = file->private_data;
+ vb2_queue_release(&c_data->vp_out_vidq);
+ vb2_queue_release(&c_data->vp_in_vidq);
vb2_queue_release(&c_data->vc_vidq);
c_data->dev->vc_client = NULL;
c_data->dev->vp_client = NULL;
@@ -498,13 +1066,60 @@
return 0;
}
+unsigned int poll_work(struct vb2_queue *q, struct file *file,
+ poll_table *wait, bool write_q)
+{
+ unsigned long flags;
+ struct vb2_buffer *vb = NULL;
+
+ if (q->num_buffers == 0)
+ return POLLERR;
+
+ if (list_empty(&q->queued_list))
+ return POLLERR;
+
+ poll_wait(file, &q->done_wq, wait);
+
+ spin_lock_irqsave(&q->done_lock, flags);
+ if (!list_empty(&q->done_list))
+ vb = list_first_entry(&q->done_list, struct vb2_buffer,
+ done_entry);
+ spin_unlock_irqrestore(&q->done_lock, flags);
+
+ if (vb && (vb->state == VB2_BUF_STATE_DONE
+ || vb->state == VB2_BUF_STATE_ERROR)) {
+ return (write_q) ? POLLOUT | POLLWRNORM :
+ POLLIN | POLLRDNORM;
+ }
+ return 0;
+}
+
static unsigned int vcap_poll(struct file *file,
struct poll_table_struct *wait)
{
struct vcap_client_data *c_data = file->private_data;
- struct vb2_queue *q = &c_data->vc_vidq;
+ struct vb2_queue *q;
+ unsigned int mask = 0;
- return vb2_poll(q, file, wait);
+ switch (c_data->op_mode) {
+ case VC_VCAP_OP:
+ q = &c_data->vc_vidq;
+ return vb2_poll(q, file, wait);
+ case VP_VCAP_OP:
+ q = &c_data->vp_in_vidq;
+ mask = poll_work(q, file, wait, 0);
+ q = &c_data->vp_out_vidq;
+ mask |= poll_work(q, file, wait, 1);
+ return mask;
+ case VC_AND_VP_VCAP_OP:
+ q = &c_data->vp_out_vidq;
+ mask = poll_work(q, file, wait, 0);
+ return mask;
+ default:
+ pr_err("VCAP Error: %s: Unknown operation mode", __func__);
+ return POLLERR;
+ }
+ return 0;
}
/* V4L2 and video device structures */
@@ -522,6 +1137,10 @@
.vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+ .vidioc_s_fmt_type_private = vidioc_s_fmt_vid_cap,
+ .vidioc_g_fmt_type_private = vidioc_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_out_mplane = vidioc_s_fmt_vid_cap,
+ .vidioc_g_fmt_vid_out_mplane = vidioc_g_fmt_vid_cap,
.vidioc_reqbufs = vidioc_reqbufs,
.vidioc_querybuf = vidioc_querybuf,
.vidioc_qbuf = vidioc_qbuf,
@@ -537,9 +1156,9 @@
.release = video_device_release,
};
-int vcap_reg_powerup(struct vcap_dev *dev, struct device *ddev)
+int vcap_reg_powerup(struct vcap_dev *dev)
{
- dev->fs_vcap = regulator_get(ddev, "vdd");
+ dev->fs_vcap = regulator_get(NULL, "fs_vcap");
if (IS_ERR(dev->fs_vcap)) {
pr_err("%s: Regulator FS_VCAP get failed %ld\n", __func__,
PTR_ERR(dev->fs_vcap));
@@ -715,7 +1334,7 @@
{
int rc;
- rc = vcap_reg_powerup(dev, ddev);
+ rc = vcap_reg_powerup(dev);
if (rc < 0)
goto reg_failed;
rc = vcap_clk_powerup(dev, ddev);
@@ -751,6 +1370,11 @@
return 0;
}
+static irqreturn_t vcap_vp_handler(int irq_num, void *data)
+{
+ return vp_handler(vcap_ctrl);
+}
+
static irqreturn_t vcap_vc_handler(int irq_num, void *data)
{
return vc_handler(vcap_ctrl);
@@ -793,26 +1417,44 @@
goto free_resource;
}
- dev->vcapirq = platform_get_resource_byname(pdev,
- IORESOURCE_IRQ, "vcap");
- if (!dev->vcapirq) {
- pr_err("%s: no irq resource?\n", __func__);
+ dev->vcirq = platform_get_resource_byname(pdev,
+ IORESOURCE_IRQ, "vc_irq");
+ if (!dev->vcirq) {
+ pr_err("%s: no vc irq resource?\n", __func__);
+ ret = -ENODEV;
+ goto free_resource;
+ }
+ dev->vpirq = platform_get_resource_byname(pdev,
+ IORESOURCE_IRQ, "vp_irq");
+ if (!dev->vpirq) {
+ pr_err("%s: no vp irq resource?\n", __func__);
ret = -ENODEV;
goto free_resource;
}
- ret = request_irq(dev->vcapirq->start, vcap_vc_handler,
- IRQF_TRIGGER_RISING, "vcap", 0);
+
+ ret = request_irq(dev->vcirq->start, vcap_vc_handler,
+ IRQF_TRIGGER_RISING, "vc_irq", 0);
if (ret < 0) {
- pr_err("%s: irq request fail\n", __func__);
+ pr_err("%s: vc irq request fail\n", __func__);
ret = -EBUSY;
goto free_resource;
}
+ disable_irq(dev->vcirq->start);
- disable_irq(dev->vcapirq->start);
+ ret = request_irq(dev->vpirq->start, vcap_vp_handler,
+ IRQF_TRIGGER_RISING, "vp_irq", 0);
+
+ if (ret < 0) {
+ pr_err("%s: vp irq request fail\n", __func__);
+ ret = -EBUSY;
+ goto free_resource;
+ }
+ disable_irq(dev->vpirq->start);
snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name),
"%s", MSM_VCAP_DRV_NAME);
+
ret = v4l2_device_register(NULL, &dev->v4l2_dev);
if (ret)
goto free_resource;
@@ -842,17 +1484,25 @@
dev->vfd = vfd;
video_set_drvdata(vfd, dev);
- dev->ion_client = msm_ion_client_create(-1, "vcap");
- if (IS_ERR((void *)dev->ion_client)) {
- pr_err("could not get ion client");
+ dev->vcap_wq = create_workqueue("vcap");
+ if (!dev->vcap_wq) {
+ pr_err("Could not create workqueue");
goto rel_vdev;
}
+ dev->ion_client = msm_ion_client_create(-1, "vcap");
+ if (IS_ERR((void *)dev->ion_client)) {
+ pr_err("could not get ion client");
+ goto rel_vcap_wq;
+ }
+
atomic_set(&dev->vc_enabled, 0);
+ atomic_set(&dev->vp_enabled, 0);
dprintk(1, "Exit probe succesfully");
return 0;
-
+rel_vcap_wq:
+ destroy_workqueue(dev->vcap_wq);
rel_vdev:
video_device_release(vfd);
deinit_vc:
@@ -874,6 +1524,8 @@
{
struct vcap_dev *dev = vcap_ctrl;
ion_client_destroy(dev->ion_client);
+ flush_workqueue(dev->vcap_wq);
+ destroy_workqueue(dev->vcap_wq);
video_device_release(dev->vfd);
deinit_vc();
vcap_disable(dev);
diff --git a/drivers/media/video/vcap_vc.c b/drivers/media/video/vcap_vc.c
index ed0bc25..2c4a243 100644
--- a/drivers/media/video/vcap_vc.c
+++ b/drivers/media/video/vcap_vc.c
@@ -34,401 +34,6 @@
printk(KERN_DEBUG "VC: " fmt, ## arg); \
} while (0)
-struct v4l2_format_vc_ext vcap_vc_lut[] = {
- /* 1080p */
- {
- HAL_VCAP_YUV_1080p_60_RH, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 148.5,
- 32, 2200, 192, 2112, 4, 24, 0, 2, 0, 44, 0, 0, 0, 0,
- 0, 0
- },
- {
- HAL_VCAP_YUV_1080p_60_FL, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 148.5,
- 1125, 2200, 192, 2112, 41, 1121, 0, 5, 0, 44, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_RGB_1080p_60_FL, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_RGB, 148.5,
- 1125, 2200, 192, 2112, 41, 1121, 0, 5, 0, 44, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_1080p_24_FL, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 74.25,
- 1125, 2750, 192, 2112, 41, 1121, 0, 5, 0, 44, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_1080p_24_RH, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 74.25,
- 112, 2750, 192, 2112, 4, 110, 0, 2, 0, 44, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_1080p_24_RW, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 74.25,
- 1125, 275, 19, 211, 41, 1121, 0, 5, 0, 16, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_1080p_60_RW, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 148.5,
- 1125, 200, 22, 182, 41, 1121, 0, 5, 0, 16, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_1080p_50_FL, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 148.5,
- 1125, 2640, 192, 2112, 41, 1121, 0, 5, 0, 44, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_1080p_50_RH, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 148.5,
- 15, 2640, 192, 2112, 6, 13, 0, 5, 0, 44, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_1080p_25_FL, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 74.25,
- 1125, 2640, 192, 2112, 41, 1121, 0, 5, 0, 44, 0, 0,
- 0, 0, 0, 0
- },
- {
- HAL_VCAP_YUV_1080p_25_RH, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 74.25,
- 10, 2640, 192, 2112, 4, 8, 0, 2, 0, 44, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_1080p_30_RH, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 74.25,
- 1125, 2200, 192, 2112, 41, 1121, 0, 5, 0, 44, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_RGB_1080p_25_FL, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_RGB, 74.25,
- 1125, 2640, 192, 2112, 41, 1121, 0, 5, 0, 44, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_RGB_1080p_25_RH, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_RGB, 74.25,
- 10, 2640, 192, 2112, 4, 8, 0, 2, 0, 44, 0, 0, 0,
- 0, 0, 0
- },
- /* 1080i */
- {
- HAL_VCAP_YUV_1080i_60_FL, HAL_VCAP_MODE_INT, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 74.25,
- 1125, 2200, 192, 2112, 20, 560, 0, 5, 0, 44, 583, 1123, 1100,
- 1100, 563, 568
- },
- {
- HAL_VCAP_YUV_1080i_60_RH, HAL_VCAP_MODE_INT, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 74.25,
- 18, 2200, 192, 2112, 3, 7, 0, 2, 0, 44, 11, 15, 1100,
- 1100, 8, 10
- },
- {
- HAL_VCAP_YUV_1080i_60_RW, HAL_VCAP_MODE_INT, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 74.25,
- 1125, 220, 19, 211, 20, 560, 0, 5, 0, 4, 583, 1123, 110,
- 110, 563, 568
- },
- {
- HAL_VCAP_YUV_1080i_50_FL, HAL_VCAP_MODE_INT, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 72.00,
- 1125, 2640, 192, 2112, 20, 560, 0, 5, 0, 44, 583, 1123, 1320,
- 1320, 563, 568
- },
- {
- HAL_VCAP_YUV_1080i_50_RH, HAL_VCAP_MODE_INT, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 72.00,
- 52, 2640, 192, 2112, 4, 24, 0, 2, 0, 44, 30, 50, 1320,
- 1320, 26, 28},
- {
- HAL_VCAP_YUV_1080i_50_RW, HAL_VCAP_MODE_INT, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 72.00,
- 1125, 264, 19, 211, 20, 560, 0, 5, 0, 4, 583, 1123, 110,
- 110, 563, 568
- },
- {
- HAL_VCAP_RGB_1080i_50_FL, HAL_VCAP_MODE_INT, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_RGB, 72.00,
- 1125, 2640, 192, 2112, 20, 560, 0, 5, 0, 44, 583, 1123, 1320,
- 1320, 563, 568
- },
- {
- HAL_VCAP_RGB_1080i_50_RH, HAL_VCAP_MODE_INT, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_RGB, 72.00,
- 52, 2640, 192, 2112, 4, 24, 0, 2, 0, 44, 30, 50, 1320,
- 1320, 26, 28
- },
- /* 480i */
- {
- HAL_VCAP_YUV_480i_60_RH, HAL_VCAP_MODE_INT, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 27.027,
- 20, 1716, 238, 1678, 3, 7, 0, 2, 0, 124, 14, 18, 820,
- 820, 10, 12
- },
- {
- HAL_VCAP_YUV_480i_60_FL, HAL_VCAP_MODE_INT, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 27.027,
- 525, 1716, 238, 1678, 18, 258, 0, 3, 0, 124, 281, 521, 858,
- 858, 262, 265
- },
- {
- HAL_VCAP_YUV_480i_60_RW, HAL_VCAP_MODE_INT, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 27.027,
- 525, 172, 24, 168, 18, 258, 0, 3, 0, 12, 281, 521, 86,
- 86, 262, 265
- },
- {
- HAL_VCAP_YUV_2880_480i_60_FL, HAL_VCAP_MODE_INT,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS,
- HAL_VCAP_YUV, 54.000, 525, 3432, 476, 3356, 18, 258, 0, 3,
- 0, 248, 281, 521, 1716, 1716, 262, 265
- },
- {
- HAL_VCAP_YUV_2880_480i_60_RH, HAL_VCAP_MODE_INT,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS,
- HAL_VCAP_YUV, 54.000, 32, 3432, 476, 3356, 4, 14, 0, 3, 0,
- 248, 20, 30, 1716, 1716, 16, 19
- },
- /* 480p */
- {
- HAL_VCAP_YUV_480p_60_RH, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 27.027,
- 8, 858, 122, 842, 2, 5, 0, 1, 0, 62, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_RGB_480p_60_RH, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS, HAL_VCAP_RGB, 27.027,
- 52, 858, 122, 842, 3, 50, 0, 2, 0, 62, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_RGB_480p_60_FL, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS, HAL_VCAP_RGB, 27.027,
- 525, 858, 122, 842, 36, 516, 0, 6, 0, 62, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_480p_60_FL, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 27.027,
- 525, 858, 122, 842, 36, 516, 0, 6, 0, 62, 0, 0, 0, 0, 0, 0
- },
- {
- HAL_VCAP_YUV_480p_60_RW, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 27.027,
- 525, 86, 12, 84, 36, 516, 0, 6, 0, 6, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_2880_480p_60_FL, HAL_VCAP_MODE_PRO,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS,
- HAL_VCAP_YUV, 108.000, 525, 3432, 488, 3368, 36, 516, 0, 6,
- 0, 248, 0, 0, 0, 0, 0, 0
- },
- {
- HAL_VCAP_YUV_2880_480p_60_RH, HAL_VCAP_MODE_PRO,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS,
- HAL_VCAP_YUV, 108.000, 25, 3432, 488, 3368, 8, 22, 0, 6, 0,
- 248, 0, 0, 0, 0, 0, 0
- },
- /* 720p */
- {
- HAL_VCAP_YUV_720p_60_FL, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 74.25,
- 750, 1650, 260, 1540, 25, 745, 0, 5, 0, 40, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_RGB_720p_60_FL, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_RGB, 74.25,
- 750, 1650, 260, 1540, 25, 745, 0, 5, 0, 40, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_720p_60_RW, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 74.25,
- 750, 165, 26, 154, 25, 745, 0, 5, 0, 4, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_720p_60_RH, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 74.25,
- 35, 1650, 260, 1540, 5, 32, 0, 3, 0, 40, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_720p_50_FL, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 74.25,
- 750, 1980, 260, 1540, 25, 745, 0, 5, 0, 40, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_720p_50_RW, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 74.25,
- 750, 198, 26, 154, 25, 745, 0, 5, 0, 4, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_720p_50_RH, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 74.25,
- 6, 1980, 260, 1540, 2, 5, 0, 1, 0, 40, 0, 0, 0,
- 0, 0, 0
- },
- /* 576p */
- {
- HAL_VCAP_YUV_576p_50_FL, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 27.0,
- 625, 864, 132, 852, 44, 620, 0, 5, 0, 64, 0, 0, 0,
- 0, 0, 0},
- {
- HAL_VCAP_RGB_576p_50_FL, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS, HAL_VCAP_RGB, 27.0,
- 625, 864, 132, 852, 44, 620, 0, 5, 0, 64, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_576p_50_RW, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 27.0,
- 625, 86, 13, 85, 44, 620, 0, 5, 0, 6, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_576p_50_RH, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 27.0,
- 25, 864, 132, 852, 4, 23, 0, 3, 0, 64, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_1440_576p_50_RH, HAL_VCAP_MODE_PRO,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS,
- HAL_VCAP_YUV, 54.000, 25, 1728, 264, 1704, 6, 23, 0, 5, 0,
- 128, 0, 0, 0, 0, 0, 0
- },
- {
- HAL_VCAP_YUV_2880_576p_50_FL, HAL_VCAP_MODE_PRO,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS,
- HAL_VCAP_YUV, 108.000, 625, 3456, 528, 3408, 44, 620, 0, 5,
- 0, 256, 0, 0, 0, 0, 0, 0
- },
- {
- HAL_VCAP_YUV_2880_576p_50_RH, HAL_VCAP_MODE_PRO,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS,
- HAL_VCAP_YUV, 108.000, 25, 3456, 528, 3408, 6, 23, 0, 5, 0,
- 256, 0, 0, 0, 0, 0, 0
- },
- /* 576i */
- {
- HAL_VCAP_YUV_576i_50_FL, HAL_VCAP_MODE_INT, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 27.0,
- 625, 1728, 264, 1704, 22, 310, 0, 3, 0, 126, 335, 623, 864,
- 864, 313, 316
- },
- {
- HAL_VCAP_YUV_576i_50_RW, HAL_VCAP_MODE_INT, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 27.0,
- 625, 172, 26, 170, 22, 310, 0, 3, 0, 13, 335, 623, 86,
- 86, 313, 316
- },
- {
- HAL_VCAP_YUV_576i_50_RH, HAL_VCAP_MODE_INT, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 27.0,
- 29, 1728, 264, 1704, 3, 13, 0, 1, 0, 126, 16, 26, 864, 864,
- 14, 15
- },
- /* XGA 1024x768 */
- {
- HAL_VCAP_YUV_XGA_FL, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 63.5,
- 798, 1328, 256, 1280, 27, 795, 0, 4, 0, 104, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_XGA_RH, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 63.5,
- 12, 1328, 256, 1280, 6, 10, 0, 4, 0, 104, 0, 0, 0, 0,
- 0, 0
- },
- {
- HAL_VCAP_YUV_XGA_RB, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 63.5,
- 12, 1216, 112, 1136, 6, 10, 0, 4, 0, 32, 0, 0, 0, 0,
- 0, 0
- },
- /* SXGA 1280x1024 */
- {
- HAL_VCAP_YUV_SXGA_FL, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 109.0,
- 1063, 1712, 352, 1632, 36, 1060, 0, 7, 0, 136, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_RGB_SXGA_FL, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_RGB, 109.0,
- 1063, 1712, 352, 1632, 36, 1060, 0, 7, 0, 136, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_SXGA_RH, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 109.0,
- 17, 1712, 352, 1632, 8, 15, 0, 7, 0, 136, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_SXGA_RB, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 109.0,
- 17, 1440, 112, 1392, 8, 15, 0, 7, 0, 32, 0, 0, 0, 0,
- 0, 0
- },
- /* UXGA 1600x1200 */
- {
- HAL_VCAP_YUV_UXGA_FL, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 161.0,
- 1245, 2160, 448, 2048, 42, 1242, 0, 4, 0, 168, 0,
- 0, 0, 0, 0, 0
- },
- {
- HAL_VCAP_RGB_UXGA_FL, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_RGB, 161.0,
- 1245, 2160, 448, 2048, 42, 1242, 0, 4, 0, 168, 0,
- 0, 0, 0, 0, 0
- },
- {
- HAL_VCAP_YUV_UXGA_RH, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 161.0,
- 12, 2160, 448, 2048, 6, 10, 0, 4, 0, 168, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_UXGA_RB, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 161.0,
- 12, 1808, 112, 1712, 6, 10, 0, 4, 0, 32, 0, 0, 0, 0,
- 0, 0
- },
- /* test odd height */
- {
- HAL_VCAP_ODD_HEIGHT, HAL_VCAP_MODE_INT, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_NEG, HAL_VCAP_YUV, 148.5,
- 65, 1728, 264, 1704, 5, 20, 0, 3, 0, 126, 25, 40, 864,
- 864, 21, 24
- },
- /* test odd width RGB only */
- {
- HAL_VCAP_ODD_WIDTH, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_NEG, HAL_VCAP_RGB, 148.5,
- 52, 859, 122, 843, 3, 50, 0, 2, 0, 62, 0, 0, 0, 0, 0, 0
- },
-};
-
void config_buffer(struct vcap_client_data *c_data,
struct vcap_buffer *buf,
void __iomem *y_addr,
@@ -446,6 +51,61 @@
}
}
+static void mov_buf_to_vp(struct work_struct *work)
+{
+ struct vp_work_t *vp_work = container_of(work, struct vp_work_t, work);
+ struct v4l2_buffer p;
+ struct vb2_buffer *vb_vc;
+ struct vcap_buffer *buf_vc;
+ struct vb2_buffer *vb_vp;
+ struct vcap_buffer *buf_vp;
+
+ int rc;
+ p.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ p.memory = V4L2_MEMORY_USERPTR;
+ while (1) {
+ if (!vp_work->cd->streaming)
+ return;
+ rc = vb2_dqbuf(&vp_work->cd->vc_vidq, &p, O_NONBLOCK);
+ if (rc < 0)
+ return;
+
+ vb_vc = vp_work->cd->vc_vidq.bufs[p.index];
+ if (NULL == vb_vc) {
+ dprintk(1, "%s: buffer is NULL\n", __func__);
+ vb2_qbuf(&vp_work->cd->vc_vidq, &p);
+ return;
+ }
+ buf_vc = container_of(vb_vc, struct vcap_buffer, vb);
+
+ vb_vp = vp_work->cd->vp_in_vidq.bufs[p.index];
+ if (NULL == vb_vp) {
+ dprintk(1, "%s: buffer is NULL\n", __func__);
+ vb2_qbuf(&vp_work->cd->vc_vidq, &p);
+ return;
+ }
+ buf_vp = container_of(vb_vp, struct vcap_buffer, vb);
+ buf_vp->ion_handle = buf_vc->ion_handle;
+ buf_vp->paddr = buf_vc->paddr;
+ buf_vc->ion_handle = NULL;
+ buf_vc->paddr = 0;
+
+ p.type = V4L2_BUF_TYPE_INTERLACED_IN_DECODER;
+
+ /* This call should not fail */
+ rc = vb2_qbuf(&vp_work->cd->vp_in_vidq, &p);
+ if (rc < 0) {
+ pr_err("%s: qbuf to vp_in failed\n", __func__);
+ buf_vc->ion_handle = buf_vp->ion_handle;
+ buf_vc->paddr = buf_vp->paddr;
+ buf_vp->ion_handle = NULL;
+ buf_vp->paddr = 0;
+ p.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vb2_qbuf(&vp_work->cd->vc_vidq, &p);
+ }
+ }
+}
+
irqreturn_t vc_handler(struct vcap_dev *dev)
{
uint32_t irq, timestamp;
@@ -454,6 +114,7 @@
struct vb2_buffer *vb = NULL;
struct vcap_client_data *c_data;
+
irq = readl_relaxed(VCAP_VC_INT_STATUS);
dprintk(1, "%s: irq=0x%08x\n", __func__, irq);
@@ -545,6 +206,10 @@
dev->vc_client->vid_vc_action.buf_ind = VC_BUF1;
irq = VC_BUF2;
}
+
+ if (c_data->op_mode == VC_AND_VP_VCAP_OP)
+ queue_work(dev->vcap_wq, &dev->vc_to_vp_work.work);
+
writel_relaxed(irq, VCAP_VC_INT_CLEAR);
return IRQ_HANDLED;
@@ -604,11 +269,11 @@
VCAP_VC_C_ADDR_2);
rc = readl_relaxed(VCAP_VC_CTRL);
- writel_relaxed(rc | 0x1, VCAP_VC_CTRL);
+ writel_iowmb(rc | 0x1, VCAP_VC_CTRL);
writel_relaxed(0x6, VCAP_VC_INT_MASK);
- enable_irq(dev->vcapirq->start);
+ enable_irq(dev->vcirq->start);
return 0;
}
@@ -618,9 +283,12 @@
int rc;
rc = readl_relaxed(VCAP_VC_CTRL);
- writel_relaxed(rc & ~(0x1), VCAP_VC_CTRL);
+ writel_iowmb(rc & ~(0x1), VCAP_VC_CTRL);
- disable_irq(c_data->dev->vcapirq->start);
+ if (atomic_read(&dev->vc_enabled) == 1)
+ disable_irq(dev->vcirq->start);
+
+ flush_workqueue(dev->vcap_wq);
}
int config_vc_format(struct vcap_client_data *c_data)
@@ -646,14 +314,16 @@
}
writel_relaxed(0x00000000, VCAP_SW_RESET_REQ);
- writel_relaxed(0x00000102, VCAP_VC_NPL_CTRL);
+ writel_iowmb(0x00000102, VCAP_VC_NPL_CTRL);
rc = readl_relaxed(VCAP_VC_NPL_CTRL);
rc = readl_relaxed(VCAP_VC_NPL_CTRL);
- writel_relaxed(0x00000002, VCAP_VC_NPL_CTRL);
+ writel_iowmb(0x00000002, VCAP_VC_NPL_CTRL);
dprintk(2, "%s: Starting VC configuration\n", __func__);
- writel_relaxed(0x00000002, VCAP_VC_NPL_CTRL);
- writel_relaxed(0x00000004 | vc_format->color_space << 1, VCAP_VC_CTRL);
+ writel_iowmb(0x00000002, VCAP_VC_NPL_CTRL);
+ writel_iowmb(0x00000004 | vc_format->color_space << 1 |
+ vc_format->mode << 3 |
+ vc_format->mode << 10, VCAP_VC_CTRL);
writel_relaxed(vc_format->h_polar << 4 |
vc_format->v_polar << 0, VCAP_VC_POLARITY);
@@ -677,7 +347,7 @@
vc_format->hsync_start), VCAP_VC_HSYNC_HPOS);
writel_relaxed(((vc_format->f2_vsync_h_end << 16) |
vc_format->f2_vsync_h_start), VCAP_VC_VSYNC_F2_HPOS);
- writel_relaxed(0x000033FF, VCAP_VC_BUF_CTRL);
+ writel_iowmb(0x000033FF, VCAP_VC_BUF_CTRL);
rc = vc_format->hactive_end - vc_format->hactive_start;
if (vc_format->color_space)
@@ -694,6 +364,7 @@
writel_relaxed(0x2f6ad272, VCAP_VC_IN_CTRL4);
writel_relaxed(0x00006b38, VCAP_VC_IN_CTRL5);
+ writel_iowmb(0x00000001 , VCAP_OFFSET(0x0d00));
dprintk(2, "%s: Done VC configuration\n", __func__);
return 0;
@@ -706,6 +377,7 @@
dprintk(1, "Hardware version: %08x\n", result);
if (result != VCAP_HARDWARE_VERSION)
return -ENODEV;
+ INIT_WORK(&dev->vc_to_vp_work.work, mov_buf_to_vp);
return 0;
}
diff --git a/drivers/media/video/vcap_vc.h b/drivers/media/video/vcap_vc.h
index e431038..57d13cd 100644
--- a/drivers/media/video/vcap_vc.h
+++ b/drivers/media/video/vcap_vc.h
@@ -13,11 +13,9 @@
#ifndef VCAP_VC_H
#define VCAP_VC_H
-/* #define NEW_S_FMT */
#include <linux/interrupt.h>
#include <media/vcap_v4l2.h>
-extern struct v4l2_format_vc_ext vcap_vc_lut[];
#define VCAP_HARDWARE_VERSION 0x10000000
diff --git a/drivers/media/video/vcap_vp.c b/drivers/media/video/vcap_vp.c
new file mode 100644
index 0000000..f8dfdc1
--- /dev/null
+++ b/drivers/media/video/vcap_vp.c
@@ -0,0 +1,606 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+#include <mach/camera.h>
+#include <linux/io.h>
+#include <mach/clk.h>
+#include <linux/clk.h>
+
+#include <media/vcap_v4l2.h>
+#include <media/vcap_fmt.h>
+#include "vcap_vp.h"
+
+static unsigned debug;
+
+#define dprintk(level, fmt, arg...) \
+ do { \
+ if (debug >= level) \
+ printk(KERN_DEBUG "VP: " fmt, ## arg); \
+ } while (0)
+
+void config_nr_buffer(struct vcap_client_data *c_data,
+ struct vcap_buffer *buf)
+{
+ struct vcap_dev *dev = c_data->dev;
+ int size = c_data->vp_in_fmt.height * c_data->vp_in_fmt.width;
+
+ writel_relaxed(buf->paddr, VCAP_VP_NR_T2_Y_BASE_ADDR);
+ writel_relaxed(buf->paddr + size, VCAP_VP_NR_T2_C_BASE_ADDR);
+}
+
+void config_in_buffer(struct vcap_client_data *c_data,
+ struct vcap_buffer *buf)
+{
+ struct vcap_dev *dev = c_data->dev;
+ int size = c_data->vp_in_fmt.height * c_data->vp_in_fmt.width;
+
+ writel_relaxed(buf->paddr, VCAP_VP_T2_Y_BASE_ADDR);
+ writel_relaxed(buf->paddr + size, VCAP_VP_T2_C_BASE_ADDR);
+}
+
+void config_out_buffer(struct vcap_client_data *c_data,
+ struct vcap_buffer *buf)
+{
+ struct vcap_dev *dev = c_data->dev;
+ int size;
+ size = c_data->vp_out_fmt.height * c_data->vp_out_fmt.width;
+ writel_relaxed(buf->paddr, VCAP_VP_OUT_Y_BASE_ADDR);
+ writel_relaxed(buf->paddr + size, VCAP_VP_OUT_C_BASE_ADDR);
+}
+
+int vp_setup_buffers(struct vcap_client_data *c_data)
+{
+ struct vp_action *vp_act;
+ struct vcap_dev *dev;
+ unsigned long flags = 0;
+
+ if (!c_data->streaming)
+ return -ENOEXEC;
+ dev = c_data->dev;
+ dprintk(2, "Start setup buffers\n");
+
+ /* No need to verify vp_client is not NULL caller does so */
+ vp_act = &dev->vp_client->vid_vp_action;
+
+ spin_lock_irqsave(&dev->vp_client->cap_slock, flags);
+ if (list_empty(&vp_act->in_active)) {
+ spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
+ dprintk(1, "%s: VP We have no more input buffers\n",
+ __func__);
+ return -EAGAIN;
+ }
+
+ if (list_empty(&vp_act->out_active)) {
+ spin_unlock_irqrestore(&dev->vp_client->cap_slock,
+ flags);
+ dprintk(1, "%s: VP We have no more output buffers\n",
+ __func__);
+ return -EAGAIN;
+ }
+
+ vp_act->bufT2 = list_entry(vp_act->in_active.next,
+ struct vcap_buffer, list);
+ list_del(&vp_act->bufT2->list);
+
+ vp_act->bufOut = list_entry(vp_act->out_active.next,
+ struct vcap_buffer, list);
+ list_del(&vp_act->bufOut->list);
+ spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
+
+ config_in_buffer(c_data, vp_act->bufT2);
+ config_out_buffer(c_data, vp_act->bufOut);
+ return 0;
+}
+
+static void mov_buf_to_vc(struct work_struct *work)
+{
+ struct vp_work_t *vp_work = container_of(work, struct vp_work_t, work);
+ struct v4l2_buffer p;
+ struct vb2_buffer *vb_vc;
+ struct vcap_buffer *buf_vc;
+ struct vb2_buffer *vb_vp;
+ struct vcap_buffer *buf_vp;
+ int rc;
+
+ p.type = V4L2_BUF_TYPE_INTERLACED_IN_DECODER;
+ p.memory = V4L2_MEMORY_USERPTR;
+
+ /* This loop exits when there is no more buffers left */
+ while (1) {
+ if (!vp_work->cd->streaming)
+ return;
+ rc = vb2_dqbuf(&vp_work->cd->vp_in_vidq, &p, O_NONBLOCK);
+ if (rc < 0)
+ return;
+
+ vb_vc = vp_work->cd->vc_vidq.bufs[p.index];
+ if (NULL == vb_vc) {
+ dprintk(1, "%s: buffer is NULL\n", __func__);
+ vb2_qbuf(&vp_work->cd->vp_in_vidq, &p);
+ return;
+ }
+ buf_vc = container_of(vb_vc, struct vcap_buffer, vb);
+
+ vb_vp = vp_work->cd->vp_in_vidq.bufs[p.index];
+ if (NULL == vb_vp) {
+ dprintk(1, "%s: buffer is NULL\n", __func__);
+ vb2_qbuf(&vp_work->cd->vp_in_vidq, &p);
+ return;
+ }
+ buf_vp = container_of(vb_vp, struct vcap_buffer, vb);
+ buf_vc->ion_handle = buf_vp->ion_handle;
+ buf_vc->paddr = buf_vp->paddr;
+ buf_vp->ion_handle = NULL;
+ buf_vp->paddr = 0;
+
+ p.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ /* This call should not fail */
+ rc = vb2_qbuf(&vp_work->cd->vc_vidq, &p);
+ if (rc < 0) {
+ dprintk(1, "%s: qbuf to vc failed\n", __func__);
+ buf_vp->ion_handle = buf_vc->ion_handle;
+ buf_vp->paddr = buf_vc->paddr;
+ buf_vc->ion_handle = NULL;
+ buf_vc->paddr = 0;
+ p.type = V4L2_BUF_TYPE_INTERLACED_IN_DECODER;
+ vb2_qbuf(&vp_work->cd->vp_in_vidq, &p);
+ }
+ }
+}
+
+static void vp_wq_fnc(struct work_struct *work)
+{
+ struct vp_work_t *vp_work = container_of(work, struct vp_work_t, work);
+ struct vcap_dev *dev;
+ struct vp_action *vp_act;
+ uint32_t irq;
+ int rc;
+#ifndef TOP_FIELD_FIX
+ bool top_field;
+#endif
+
+ if (vp_work && vp_work->cd && vp_work->cd->dev)
+ dev = vp_work->cd->dev;
+ else
+ return;
+
+ vp_act = &dev->vp_client->vid_vp_action;
+ irq = vp_work->irq;
+
+ rc = readl_relaxed(VCAP_OFFSET(0x048));
+ while (!(rc & 0x00000100))
+ rc = readl_relaxed(VCAP_OFFSET(0x048));
+
+ writel_relaxed(0x00000000, VCAP_VP_BAL_VMOTION_STATE);
+ writel_relaxed(0x40000000, VCAP_VP_REDUCT_AVG_MOTION2);
+
+ /* Queue the done buffers */
+ if (vp_act->vp_state == VP_NORMAL &&
+ vp_act->bufNR.nr_pos != TM1_BUF) {
+ vb2_buffer_done(&vp_act->bufTm1->vb, VB2_BUF_STATE_DONE);
+ if (vp_work->cd->op_mode == VC_AND_VP_VCAP_OP)
+ queue_work(dev->vcap_wq, &dev->vp_to_vc_work.work);
+ }
+
+ vb2_buffer_done(&vp_act->bufOut->vb, VB2_BUF_STATE_DONE);
+
+ /* Cycle to next state */
+ if (vp_act->vp_state != VP_NORMAL)
+ vp_act->vp_state++;
+#ifdef TOP_FIELD_FIX
+ vp_act->top_field = !vp_act->top_field;
+#endif
+
+ /* Cycle Buffers*/
+ if (vp_work->cd->vid_vp_action.nr_enabled) {
+ if (vp_act->bufNR.nr_pos == TM1_BUF)
+ vp_act->bufNR.nr_pos = BUF_NOT_IN_USE;
+
+ if (vp_act->bufNR.nr_pos != BUF_NOT_IN_USE)
+ vp_act->bufNR.nr_pos++;
+
+ vp_act->bufTm1 = vp_act->bufT0;
+ vp_act->bufT0 = vp_act->bufT1;
+ vp_act->bufT1 = vp_act->bufNRT2;
+ vp_act->bufNRT2 = vp_act->bufT2;
+ config_nr_buffer(vp_work->cd, vp_act->bufNRT2);
+ } else {
+ vp_act->bufTm1 = vp_act->bufT0;
+ vp_act->bufT0 = vp_act->bufT1;
+ vp_act->bufT1 = vp_act->bufT2;
+ }
+
+ rc = vp_setup_buffers(vp_work->cd);
+ if (rc < 0) {
+ /* setup_buf failed because we are waiting for buffers */
+ writel_relaxed(0x00000000, VCAP_VP_INTERRUPT_ENABLE);
+ writel_iowmb(irq, VCAP_VP_INT_CLEAR);
+ atomic_set(&dev->vp_enabled, 0);
+ return;
+ }
+
+ /* Config VP */
+#ifndef TOP_FIELD_FIX
+ if (vp_act->bufT2->vb.v4l2_buf.field == V4L2_FIELD_TOP)
+ top_field = 1;
+#endif
+
+#ifdef TOP_FIELD_FIX
+ writel_iowmb(0x00000000 | vp_act->top_field << 0, VCAP_VP_CTRL);
+ writel_iowmb(0x00030000 | vp_act->top_field << 0, VCAP_VP_CTRL);
+#else
+ writel_iowmb(0x00000000 | top_field, VCAP_VP_CTRL);
+ writel_iowmb(0x00030000 | top_field, VCAP_VP_CTRL);
+#endif
+ enable_irq(dev->vpirq->start);
+ writel_iowmb(irq, VCAP_VP_INT_CLEAR);
+}
+
+irqreturn_t vp_handler(struct vcap_dev *dev)
+{
+ struct vcap_client_data *c_data;
+ struct vp_action *vp_act;
+ uint32_t irq;
+ int rc;
+
+ irq = readl_relaxed(VCAP_VP_INT_STATUS);
+
+ dprintk(1, "%s: irq=0x%08x\n", __func__, irq);
+ if (!irq & VP_PIC_DONE) {
+ writel_relaxed(irq, VCAP_VP_INT_CLEAR);
+ pr_err("VP IRQ shows some error\n");
+ return IRQ_HANDLED;
+ }
+
+ if (dev->vp_client == NULL) {
+ writel_relaxed(irq, VCAP_VP_INT_CLEAR);
+ pr_err("VC: There is no active vp client\n");
+ return IRQ_HANDLED;
+ }
+
+ vp_act = &dev->vp_client->vid_vp_action;
+ c_data = dev->vp_client;
+
+ if (vp_act->vp_state == VP_UNKNOWN) {
+ writel_relaxed(irq, VCAP_VP_INT_CLEAR);
+ pr_err("%s: VP is in an unknown state\n",
+ __func__);
+ return -EAGAIN;
+ }
+
+ INIT_WORK(&dev->vp_work.work, vp_wq_fnc);
+ dev->vp_work.cd = c_data;
+ dev->vp_work.irq = irq;
+ rc = queue_work(dev->vcap_wq, &dev->vp_work.work);
+
+ disable_irq_nosync(dev->vpirq->start);
+ return IRQ_HANDLED;
+}
+
+void vp_stop_capture(struct vcap_client_data *c_data)
+{
+ struct vcap_dev *dev = c_data->dev;
+
+ writel_iowmb(0x00000000, VCAP_VP_CTRL);
+ flush_workqueue(dev->vcap_wq);
+
+ if (atomic_read(&dev->vp_enabled) == 1)
+ disable_irq(dev->vpirq->start);
+
+ writel_iowmb(0x00000001, VCAP_VP_SW_RESET);
+ writel_iowmb(0x00000000, VCAP_VP_SW_RESET);
+}
+
+int config_vp_format(struct vcap_client_data *c_data)
+{
+ struct vcap_dev *dev = c_data->dev;
+
+ INIT_WORK(&dev->vp_to_vc_work.work, mov_buf_to_vc);
+ dev->vp_to_vc_work.cd = c_data;
+
+ /* SW restart VP */
+ writel_iowmb(0x00000001, VCAP_VP_SW_RESET);
+ writel_iowmb(0x00000000, VCAP_VP_SW_RESET);
+
+ /* Film Mode related settings */
+ writel_iowmb(0x00000000, VCAP_VP_FILM_PROJECTION_T0);
+ writel_relaxed(0x00000000, VCAP_VP_FILM_PROJECTION_T2);
+ writel_relaxed(0x00000000, VCAP_VP_FILM_PAST_MAX_PROJ);
+ writel_relaxed(0x00000000, VCAP_VP_FILM_PAST_MIN_PROJ);
+ writel_relaxed(0x00000000, VCAP_VP_FILM_SEQUENCE_HIST);
+ writel_relaxed(0x00000000, VCAP_VP_FILM_MODE_STATE);
+
+ writel_relaxed(0x00000000, VCAP_VP_BAL_VMOTION_STATE);
+ writel_relaxed(0x00000010, VCAP_VP_REDUCT_AVG_MOTION);
+ writel_relaxed(0x40000000, VCAP_VP_REDUCT_AVG_MOTION2);
+ writel_relaxed(0x40000000, VCAP_VP_NR_AVG_LUMA);
+ writel_relaxed(0x40000000, VCAP_VP_NR_AVG_CHROMA);
+ writel_relaxed(0x40000000, VCAP_VP_NR_CTRL_LUMA);
+ writel_relaxed(0x40000000, VCAP_VP_NR_CTRL_CHROMA);
+ writel_relaxed(0x00000000, VCAP_VP_BAL_AVG_BLEND);
+ writel_relaxed(0x00000000, VCAP_VP_VMOTION_HIST);
+ writel_relaxed(0x05047D19, VCAP_VP_FILM_ANALYSIS_CONFIG);
+ writel_relaxed(0x20260200, VCAP_VP_FILM_STATE_CONFIG);
+ writel_relaxed(0x23A60114, VCAP_VP_FVM_CONFIG);
+ writel_relaxed(0x03043210, VCAP_VP_FILM_ANALYSIS_CONFIG2);
+ writel_relaxed(0x04DB7A51, VCAP_VP_MIXED_ANALYSIS_CONFIG);
+ writel_relaxed(0x14224916, VCAP_VP_SPATIAL_CONFIG);
+ writel_relaxed(0x83270400, VCAP_VP_SPATIAL_CONFIG2);
+ writel_relaxed(0x0F000F92, VCAP_VP_SPATIAL_CONFIG3);
+ writel_relaxed(0x00000000, VCAP_VP_TEMPORAL_CONFIG);
+ writel_relaxed(0x00000000, VCAP_VP_PIXEL_DIFF_CONFIG);
+ writel_relaxed(0x0C090511, VCAP_VP_H_FREQ_CONFIG);
+ writel_relaxed(0x0A000000, VCAP_VP_NR_CONFIG);
+ writel_relaxed(0x008F4149, VCAP_VP_NR_LUMA_CONFIG);
+ writel_relaxed(0x008F4149, VCAP_VP_NR_CHROMA_CONFIG);
+ writel_relaxed(0x43C0FD0C, VCAP_VP_BAL_CONFIG);
+ writel_relaxed(0x00000255, VCAP_VP_BAL_MOTION_CONFIG);
+ writel_relaxed(0x24154252, VCAP_VP_BAL_LIGHT_COMB);
+ writel_relaxed(0x10024414, VCAP_VP_BAL_VMOTION_CONFIG);
+ writel_relaxed(0x00000002, VCAP_VP_NR_CONFIG2);
+ writel_relaxed((c_data->vp_out_fmt.height-1)<<16 |
+ (c_data->vp_out_fmt.width - 1), VCAP_VP_FRAME_SIZE);
+ writel_relaxed(0x00000000, VCAP_VP_SPLIT_SCRN_CTRL);
+
+ return 0;
+}
+
+int init_motion_buf(struct vcap_client_data *c_data)
+{
+ struct vcap_dev *dev = c_data->dev;
+ void *buf;
+ unsigned long motion_base_addr;
+ uint32_t size = ((c_data->vp_out_fmt.width + 63) >> 6) *
+ ((c_data->vp_out_fmt.height + 7) >> 3) * 16;
+
+ if (c_data->vid_vp_action.bufMotion) {
+ pr_err("Motion buffer has already been created");
+ return -ENOEXEC;
+ }
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ c_data->vid_vp_action.bufMotion = buf;
+ motion_base_addr = virt_to_phys(buf);
+ writel_iowmb(motion_base_addr, VCAP_VP_MOTION_EST_ADDR);
+ return 0;
+}
+
+void deinit_motion_buf(struct vcap_client_data *c_data)
+{
+ struct vcap_dev *dev = c_data->dev;
+ void *buf;
+
+ if (!c_data->vid_vp_action.bufMotion) {
+ dprintk(1, "Motion buffer has not been created");
+ return;
+ }
+
+ buf = c_data->vid_vp_action.bufMotion;
+
+ writel_iowmb(0x00000000, VCAP_VP_MOTION_EST_ADDR);
+ c_data->vid_vp_action.bufMotion = NULL;
+ kfree(buf);
+ return;
+}
+
+int init_nr_buf(struct vcap_client_data *c_data)
+{
+ struct vcap_dev *dev = c_data->dev;
+ struct nr_buffer *buf;
+ uint32_t frame_size, tot_size, rc;
+
+ if (c_data->vid_vp_action.bufNR.vaddr) {
+ pr_err("NR buffer has already been created");
+ return -ENOEXEC;
+ }
+ buf = &c_data->vid_vp_action.bufNR;
+
+ frame_size = c_data->vp_in_fmt.width * c_data->vp_in_fmt.height;
+ if (c_data->vp_in_fmt.pixfmt == V4L2_PIX_FMT_NV16)
+ tot_size = frame_size * 2;
+ else
+ tot_size = frame_size / 2 * 3;
+
+ buf->vaddr = kzalloc(tot_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf->paddr = virt_to_phys(buf->vaddr);
+ rc = readl_relaxed(VCAP_VP_NR_CONFIG2);
+ rc |= 0x02D00001;
+ writel_relaxed(rc, VCAP_VP_NR_CONFIG2);
+ writel_relaxed(buf->paddr, VCAP_VP_NR_T2_Y_BASE_ADDR);
+ writel_relaxed(buf->paddr + frame_size, VCAP_VP_NR_T2_C_BASE_ADDR);
+ buf->nr_pos = NRT2_BUF;
+ return 0;
+}
+
+void deinit_nr_buf(struct vcap_client_data *c_data)
+{
+ struct vcap_dev *dev = c_data->dev;
+ struct nr_buffer *buf;
+ uint32_t rc;
+
+ if (!c_data->vid_vp_action.bufNR.vaddr) {
+ pr_err("NR buffer has not been created");
+ return;
+ }
+
+ buf = &c_data->vid_vp_action.bufNR;
+
+ rc = readl_relaxed(VCAP_VP_NR_CONFIG2);
+ rc &= !(0x02D00001);
+ writel_relaxed(rc, VCAP_VP_NR_CONFIG2);
+
+ kfree(buf->vaddr);
+ buf->paddr = 0;
+ buf->vaddr = NULL;
+ return;
+}
+
+int kickoff_vp(struct vcap_client_data *c_data)
+{
+ struct vcap_dev *dev;
+ struct vp_action *vp_act;
+ unsigned long flags = 0;
+ unsigned int chroma_fmt = 0;
+ int size;
+#ifndef TOP_FIELD_FIX
+ bool top_field;
+#endif
+
+ if (!c_data->streaming)
+ return -ENOEXEC;
+
+ dev = c_data->dev;
+ dprintk(2, "Start Kickoff\n");
+
+ if (dev->vp_client == NULL) {
+ pr_err("No active vp client\n");
+ return -ENODEV;
+ }
+ vp_act = &dev->vp_client->vid_vp_action;
+
+ spin_lock_irqsave(&dev->vp_client->cap_slock, flags);
+ if (list_empty(&vp_act->in_active)) {
+ spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
+ pr_err("%s: VP We have no more input buffers\n",
+ __func__);
+ return -EAGAIN;
+ }
+
+ vp_act->bufT1 = list_entry(vp_act->in_active.next,
+ struct vcap_buffer, list);
+ list_del(&vp_act->bufT1->list);
+
+ if (list_empty(&vp_act->in_active)) {
+ spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
+ list_add(&vp_act->bufT1->list, &vp_act->in_active);
+ pr_err("%s: VP We have no more input buffers\n",
+ __func__);
+ return -EAGAIN;
+ }
+
+ vp_act->bufT2 = list_entry(vp_act->in_active.next,
+ struct vcap_buffer, list);
+ list_del(&vp_act->bufT2->list);
+
+ if (list_empty(&vp_act->out_active)) {
+ spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
+ list_add(&vp_act->bufT2->list, &vp_act->in_active);
+ list_add(&vp_act->bufT1->list, &vp_act->in_active);
+ pr_err("%s: VP We have no more output buffers\n",
+ __func__);
+ return -EAGAIN;
+ }
+
+ vp_act->bufOut = list_entry(vp_act->out_active.next,
+ struct vcap_buffer, list);
+ list_del(&vp_act->bufOut->list);
+ spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
+
+ size = c_data->vp_in_fmt.height * c_data->vp_in_fmt.width;
+ writel_relaxed(vp_act->bufT1->paddr, VCAP_VP_T1_Y_BASE_ADDR);
+ writel_relaxed(vp_act->bufT1->paddr + size, VCAP_VP_T1_C_BASE_ADDR);
+
+ config_in_buffer(c_data, vp_act->bufT2);
+ config_out_buffer(c_data, vp_act->bufOut);
+
+ /* Config VP */
+ if (c_data->vp_in_fmt.pixfmt == V4L2_PIX_FMT_NV16)
+ chroma_fmt = 1;
+ writel_relaxed((c_data->vp_in_fmt.width / 16) << 20 |
+ chroma_fmt << 11 | 0x2 << 4, VCAP_VP_IN_CONFIG);
+
+ chroma_fmt = 0;
+ if (c_data->vp_in_fmt.pixfmt == V4L2_PIX_FMT_NV16)
+ chroma_fmt = 1;
+
+ writel_relaxed((c_data->vp_in_fmt.width / 16) << 20 |
+ chroma_fmt << 11 | 0x1 << 4, VCAP_VP_OUT_CONFIG);
+
+ /* Enable Interrupt */
+#ifdef TOP_FIELD_FIX
+ vp_act->top_field = 1;
+#else
+ if (vp_act->bufT2->vb.v4l2_buf.field == V4L2_FIELD_TOP)
+ top_field = 1;
+#endif
+ vp_act->vp_state = VP_FRAME2;
+ writel_relaxed(0x01100101, VCAP_VP_INTERRUPT_ENABLE);
+#ifdef TOP_FIELD_FIX
+ writel_iowmb(0x00000000 | vp_act->top_field << 0, VCAP_VP_CTRL);
+ writel_iowmb(0x00030000 | vp_act->top_field << 0, VCAP_VP_CTRL);
+#else
+ writel_iowmb(0x00000000 | top_field, VCAP_VP_CTRL);
+ writel_iowmb(0x00030000 | top_field, VCAP_VP_CTRL);
+#endif
+ atomic_set(&c_data->dev->vp_enabled, 1);
+ enable_irq(dev->vpirq->start);
+ return 0;
+}
+
+int continue_vp(struct vcap_client_data *c_data)
+{
+ struct vcap_dev *dev;
+ struct vp_action *vp_act;
+ int rc;
+#ifndef TOP_FIELD_FIX
+ bool top_field;
+#endif
+
+ dprintk(2, "Start Continue\n");
+ dev = c_data->dev;
+
+ if (dev->vp_client == NULL) {
+ pr_err("No active vp client\n");
+ return -ENODEV;
+ }
+ vp_act = &dev->vp_client->vid_vp_action;
+
+ if (vp_act->vp_state == VP_UNKNOWN) {
+ pr_err("%s: VP is in an unknown state\n",
+ __func__);
+ return -EAGAIN;
+ }
+
+ rc = vp_setup_buffers(c_data);
+ if (rc < 0)
+ return rc;
+
+#ifndef TOP_FIELD_FIX
+ if (vp_act->bufT2->vb.v4l2_buf.field == V4L2_FIELD_TOP)
+ top_field = 1;
+#endif
+
+ /* Config VP & Enable Interrupt */
+ writel_relaxed(0x01100101, VCAP_VP_INTERRUPT_ENABLE);
+#ifdef TOP_FIELD_FIX
+ writel_iowmb(0x00000000 | vp_act->top_field << 0, VCAP_VP_CTRL);
+ writel_iowmb(0x00030000 | vp_act->top_field << 0, VCAP_VP_CTRL);
+#else
+ writel_iowmb(0x00000000 | top_field, VCAP_VP_CTRL);
+ writel_iowmb(0x00030000 | top_field, VCAP_VP_CTRL);
+#endif
+
+ atomic_set(&c_data->dev->vp_enabled, 1);
+ enable_irq(dev->vpirq->start);
+ return 0;
+}
diff --git a/drivers/media/video/vcap_vp.h b/drivers/media/video/vcap_vp.h
new file mode 100644
index 0000000..47ad8d4
--- /dev/null
+++ b/drivers/media/video/vcap_vp.h
@@ -0,0 +1,103 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef VCAP_VP_H
+#define VCAP_VP_H
+
+#include <linux/interrupt.h>
+
+#include <media/vcap_v4l2.h>
+
+#define VCAP_BASE (dev->vcapbase)
+#define VCAP_OFFSET(off) (VCAP_BASE + off)
+
+#define VCAP_VP_INT_STATUS (VCAP_BASE + 0x404)
+#define VCAP_VP_INT_CLEAR (VCAP_BASE + 0x40C)
+
+#define VCAP_VP_SW_RESET (VCAP_BASE + 0x410)
+#define VCAP_VP_INTERRUPT_ENABLE (VCAP_BASE + 0x408)
+
+#define VCAP_VP_FILM_PROJECTION_T0 (VCAP_BASE + 0x50C)
+#define VCAP_VP_FILM_PROJECTION_T2 (VCAP_BASE + 0x508)
+#define VCAP_VP_FILM_PAST_MAX_PROJ (VCAP_BASE + 0x510)
+#define VCAP_VP_FILM_PAST_MIN_PROJ (VCAP_BASE + 0x514)
+#define VCAP_VP_FILM_SEQUENCE_HIST (VCAP_BASE + 0x504)
+#define VCAP_VP_FILM_MODE_STATE (VCAP_BASE + 0x500)
+
+#define VCAP_VP_BAL_VMOTION_STATE (VCAP_BASE + 0x690)
+#define VCAP_VP_REDUCT_AVG_MOTION (VCAP_BASE + 0x610)
+#define VCAP_VP_REDUCT_AVG_MOTION2 (VCAP_BASE + 0x614)
+
+#define VCAP_VP_NR_AVG_LUMA (VCAP_BASE + 0x608)
+#define VCAP_VP_NR_AVG_CHROMA (VCAP_BASE + 0x60C)
+#define VCAP_VP_NR_CTRL_LUMA (VCAP_BASE + 0x600)
+#define VCAP_VP_NR_CTRL_CHROMA (VCAP_BASE + 0x604)
+
+#define VCAP_VP_BAL_AVG_BLEND (VCAP_BASE + 0x694)
+#define VCAP_VP_VMOTION_HIST (VCAP_BASE + 0x6F8)
+
+#define VCAP_VP_MOTION_EST_ADDR (VCAP_BASE + 0x4E0)
+#define VCAP_VP_FILM_ANALYSIS_CONFIG (VCAP_BASE + 0x520)
+#define VCAP_VP_FILM_STATE_CONFIG (VCAP_BASE + 0x524)
+
+#define VCAP_VP_FVM_CONFIG (VCAP_BASE + 0x550)
+#define VCAP_VP_FILM_ANALYSIS_CONFIG2 (VCAP_BASE + 0x52C)
+#define VCAP_VP_MIXED_ANALYSIS_CONFIG (VCAP_BASE + 0x530)
+
+#define VCAP_VP_SPATIAL_CONFIG (VCAP_BASE + 0x580)
+#define VCAP_VP_SPATIAL_CONFIG2 (VCAP_BASE + 0x584)
+#define VCAP_VP_SPATIAL_CONFIG3 (VCAP_BASE + 0x588)
+#define VCAP_VP_TEMPORAL_CONFIG (VCAP_BASE + 0x5C0)
+
+#define VCAP_VP_PIXEL_DIFF_CONFIG (VCAP_BASE + 0x6FC)
+#define VCAP_VP_H_FREQ_CONFIG (VCAP_BASE + 0x528)
+#define VCAP_VP_NR_CONFIG (VCAP_BASE + 0x620)
+#define VCAP_VP_NR_LUMA_CONFIG (VCAP_BASE + 0x624)
+#define VCAP_VP_NR_CHROMA_CONFIG (VCAP_BASE + 0x628)
+#define VCAP_VP_BAL_CONFIG (VCAP_BASE + 0x680)
+#define VCAP_VP_BAL_MOTION_CONFIG (VCAP_BASE + 0x684)
+#define VCAP_VP_BAL_LIGHT_COMB (VCAP_BASE + 0x688)
+#define VCAP_VP_BAL_VMOTION_CONFIG (VCAP_BASE + 0x68C)
+
+#define VCAP_VP_NR_CONFIG2 (VCAP_BASE + 0x484)
+#define VCAP_VP_FRAME_SIZE (VCAP_BASE + 0x48C)
+#define VCAP_VP_SPLIT_SCRN_CTRL (VCAP_BASE + 0x750)
+
+#define VCAP_VP_IN_CONFIG (VCAP_BASE + 0x480)
+#define VCAP_VP_OUT_CONFIG (VCAP_BASE + 0x488)
+
+#define VCAP_VP_T2_Y_BASE_ADDR (VCAP_BASE + 0x4C0)
+#define VCAP_VP_T2_C_BASE_ADDR (VCAP_BASE + 0x4C4)
+#define VCAP_VP_OUT_Y_BASE_ADDR (VCAP_BASE + 0x4CC)
+#define VCAP_VP_OUT_C_BASE_ADDR (VCAP_BASE + 0x4D0)
+#define VCAP_VP_OUT_CR_BASE_ADDR (VCAP_BASE + 0x4D4)
+
+#define VCAP_VP_CTRL (VCAP_BASE + 0x4D8)
+
+#define VCAP_VP_T1_Y_BASE_ADDR (VCAP_BASE + 0x4A8)
+#define VCAP_VP_T1_C_BASE_ADDR (VCAP_BASE + 0x4Ac)
+#define VCAP_VP_NR_T2_Y_BASE_ADDR (VCAP_BASE + 0x4B4)
+#define VCAP_VP_NR_T2_C_BASE_ADDR (VCAP_BASE + 0x4B8)
+
+#define VP_PIC_DONE (0x1 << 0)
+
+irqreturn_t vp_handler(struct vcap_dev *dev);
+int config_vp_format(struct vcap_client_data *c_data);
+void vp_stop_capture(struct vcap_client_data *c_data);
+int init_motion_buf(struct vcap_client_data *c_data);
+void deinit_motion_buf(struct vcap_client_data *c_data);
+int init_nr_buf(struct vcap_client_data *c_data);
+void deinit_nr_buf(struct vcap_client_data *c_data);
+int kickoff_vp(struct vcap_client_data *c_data);
+int continue_vp(struct vcap_client_data *c_data);
+
+#endif
diff --git a/drivers/mfd/wcd9xxx-core.c b/drivers/mfd/wcd9xxx-core.c
index b128a61..e2dff4b 100644
--- a/drivers/mfd/wcd9xxx-core.c
+++ b/drivers/mfd/wcd9xxx-core.c
@@ -1054,6 +1054,22 @@
.suspend = wcd9xxx_slim_suspend,
};
+static const struct slim_device_id sitar1p1_slimtest_id[] = {
+ {"sitar1p1-slim", 0},
+ {}
+};
+static struct slim_driver sitar1p1_slim_driver = {
+ .driver = {
+ .name = "sitar1p1-slim",
+ .owner = THIS_MODULE,
+ },
+ .probe = wcd9xxx_slim_probe,
+ .remove = wcd9xxx_slim_remove,
+ .id_table = sitar1p1_slimtest_id,
+ .resume = wcd9xxx_slim_resume,
+ .suspend = wcd9xxx_slim_suspend,
+};
+
static const struct slim_device_id slimtest_id[] = {
{"tabla-slim", 0},
{}
@@ -1116,7 +1132,7 @@
static int __init wcd9xxx_init(void)
{
- int ret1, ret2, ret3, ret4;
+ int ret1, ret2, ret3, ret4, ret5;
ret1 = slim_driver_register(&tabla_slim_driver);
if (ret1 != 0)
@@ -1131,10 +1147,14 @@
pr_err("failed to add the I2C driver\n");
ret4 = slim_driver_register(&sitar_slim_driver);
- if (ret1 != 0)
+ if (ret4 != 0)
pr_err("Failed to register sitar SB driver: %d\n", ret4);
- return (ret1 && ret2 && ret3 && ret4) ? -1 : 0;
+ ret5 = slim_driver_register(&sitar1p1_slim_driver);
+ if (ret5 != 0)
+ pr_err("Failed to register sitar SB driver: %d\n", ret5);
+
+ return (ret1 && ret2 && ret3 && ret4 && ret5) ? -1 : 0;
}
module_init(wcd9xxx_init);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index a162586..ca54265 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -363,33 +363,24 @@
show_perf(struct device *dev, struct device_attribute *attr, char *buf)
{
struct mmc_host *host = dev_get_drvdata(dev);
- int64_t rtime_mmcq, wtime_mmcq, rtime_drv, wtime_drv;
- unsigned long rbytes_mmcq, wbytes_mmcq, rbytes_drv, wbytes_drv;
+ int64_t rtime_drv, wtime_drv;
+ unsigned long rbytes_drv, wbytes_drv;
spin_lock(&host->lock);
- rbytes_mmcq = host->perf.rbytes_mmcq;
- wbytes_mmcq = host->perf.wbytes_mmcq;
rbytes_drv = host->perf.rbytes_drv;
wbytes_drv = host->perf.wbytes_drv;
- rtime_mmcq = ktime_to_us(host->perf.rtime_mmcq);
- wtime_mmcq = ktime_to_us(host->perf.wtime_mmcq);
rtime_drv = ktime_to_us(host->perf.rtime_drv);
wtime_drv = ktime_to_us(host->perf.wtime_drv);
spin_unlock(&host->lock);
- return snprintf(buf, PAGE_SIZE, "Write performance at MMCQ Level:"
- "%lu bytes in %lld microseconds\n"
- "Read performance at MMCQ Level:"
- "%lu bytes in %lld microseconds\n"
- "Write performance at driver Level:"
+ return snprintf(buf, PAGE_SIZE, "Write performance at driver Level:"
"%lu bytes in %lld microseconds\n"
"Read performance at driver Level:"
"%lu bytes in %lld microseconds\n",
- wbytes_mmcq, wtime_mmcq, rbytes_mmcq,
- rtime_mmcq, wbytes_drv, wtime_drv,
+ wbytes_drv, wtime_drv,
rbytes_drv, rtime_drv);
}
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 9385087..8fce9a6 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -464,6 +464,11 @@
ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 |
ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 |
ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24;
+
+ card->ext_csd.max_packed_writes =
+ ext_csd[EXT_CSD_MAX_PACKED_WRITES];
+ card->ext_csd.max_packed_reads =
+ ext_csd[EXT_CSD_MAX_PACKED_READS];
}
out:
@@ -1183,6 +1188,25 @@
card->ext_csd.cache_ctrl = err ? 0 : 1;
}
+ if ((host->caps2 & MMC_CAP2_PACKED_CMD) &&
+ (card->ext_csd.max_packed_writes > 0) &&
+ (card->ext_csd.max_packed_reads > 0)) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_EXP_EVENTS_CTRL,
+ EXT_CSD_PACKED_EVENT_EN,
+ card->ext_csd.generic_cmd6_time);
+ if (err && err != -EBADMSG)
+ goto free_card;
+ if (err) {
+ pr_warning("%s: Enabling packed event failed\n",
+ mmc_hostname(card->host));
+ card->ext_csd.packed_event_en = 0;
+ err = 0;
+ } else {
+ card->ext_csd.packed_event_en = 1;
+ }
+ }
+
if (!oldcard)
host->card = card;
diff --git a/drivers/net/wireless/wcnss/wcnss_wlan.c b/drivers/net/wireless/wcnss/wcnss_wlan.c
index cc95fcd..ad9dc7d 100644
--- a/drivers/net/wireless/wcnss/wcnss_wlan.c
+++ b/drivers/net/wireless/wcnss/wcnss_wlan.c
@@ -45,7 +45,7 @@
int smd_channel_ready;
unsigned int serial_number;
int thermal_mitigation;
- void (*tm_notify)(int);
+ void (*tm_notify)(struct device *, int);
struct wcnss_wlan_config wlan_config;
struct delayed_work wcnss_work;
} *penv = NULL;
@@ -99,7 +99,7 @@
return -EINVAL;
penv->thermal_mitigation = value;
if (penv->tm_notify)
- (penv->tm_notify)(value);
+ (penv->tm_notify)(dev, value);
return count;
}
@@ -275,14 +275,16 @@
}
EXPORT_SYMBOL(wcnss_wlan_unregister_pm_ops);
-void wcnss_register_thermal_mitigation(void (*tm_notify)(int))
+void wcnss_register_thermal_mitigation(struct device *dev,
+ void (*tm_notify)(struct device *, int))
{
- if (penv && tm_notify)
+ if (penv && dev && tm_notify)
penv->tm_notify = tm_notify;
}
EXPORT_SYMBOL(wcnss_register_thermal_mitigation);
-void wcnss_unregister_thermal_mitigation(void (*tm_notify)(int))
+void wcnss_unregister_thermal_mitigation(
+ void (*tm_notify)(struct device *, int))
{
if (penv && tm_notify) {
if (tm_notify != penv->tm_notify)
diff --git a/drivers/platform/msm/sps/sps_bam.c b/drivers/platform/msm/sps/sps_bam.c
index 0abd739..e0289ad 100644
--- a/drivers/platform/msm/sps/sps_bam.c
+++ b/drivers/platform/msm/sps/sps_bam.c
@@ -17,6 +17,7 @@
#include <linux/slab.h> /* kzalloc() */
#include <linux/interrupt.h> /* request_irq() */
#include <linux/memory.h> /* memset */
+#include <linux/vmalloc.h>
#include "sps_bam.h"
#include "bam.h"
@@ -914,7 +915,11 @@
dev->pipe_remote_mask &= ~(1UL << pipe_index);
bam_pipe_exit(dev->base, pipe_index, dev->props.ee);
if (pipe->sys.desc_cache != NULL) {
- kfree(pipe->sys.desc_cache);
+ u32 size = pipe->num_descs * sizeof(void *);
+ if (pipe->desc_size + size <= PAGE_SIZE)
+ kfree(pipe->sys.desc_cache);
+ else
+ vfree(pipe->sys.desc_cache);
pipe->sys.desc_cache = NULL;
}
dev->pipes[pipe_index] = BAM_PIPE_UNASSIGNED;
@@ -1034,8 +1039,16 @@
&& (pipe->state & BAM_STATE_BAM2BAM) == 0) {
/* Allocate both descriptor cache and user pointer array */
size = pipe->num_descs * sizeof(void *);
- pipe->sys.desc_cache =
- kzalloc(pipe->desc_size + size, GFP_KERNEL);
+
+ if (pipe->desc_size + size <= PAGE_SIZE)
+ pipe->sys.desc_cache =
+ kzalloc(pipe->desc_size + size, GFP_KERNEL);
+ else {
+ pipe->sys.desc_cache =
+ vmalloc(pipe->desc_size + size);
+ memset(pipe->sys.desc_cache, 0, pipe->desc_size + size);
+ }
+
if (pipe->sys.desc_cache == NULL) {
/*** MUST BE LAST POINT OF FAILURE (see below) *****/
SPS_ERR("sps:Desc cache error: BAM 0x%x pipe %d: %d",
diff --git a/drivers/thermal/msm8960_tsens.c b/drivers/thermal/msm8960_tsens.c
index fbb377e..78a1292 100644
--- a/drivers/thermal/msm8960_tsens.c
+++ b/drivers/thermal/msm8960_tsens.c
@@ -723,7 +723,8 @@
} else if (tmdev->hw_type == APQ_8064) {
reg_cntl |= TSENS_8960_SLP_CLK_ENA |
(TSENS_MEASURE_PERIOD << 18) |
- TSENS_8064_SENSORS_EN;
+ (((1 << tmdev->tsens_num_sensor) - 1)
+ << TSENS_SENSOR0_SHIFT);
writel_relaxed(reg_cntl, TSENS_CNTL_ADDR);
reg_status_cntl = readl_relaxed(TSENS_8064_STATUS_CNTL);
reg_status_cntl |= TSENS_MIN_STATUS_MASK |
@@ -823,7 +824,8 @@
} else if (tmdev->hw_type == APQ_8064) {
reg_cntl |= TSENS_8960_SLP_CLK_ENA |
(TSENS_MEASURE_PERIOD << 18) |
- TSENS_8064_SENSORS_EN;
+ (((1 << tmdev->tsens_num_sensor) - 1)
+ << TSENS_SENSOR0_SHIFT);
writel_relaxed(reg_cntl, TSENS_CNTL_ADDR);
reg_status_cntl = readl_relaxed(TSENS_8064_STATUS_CNTL);
reg_status_cntl |= TSENS_LOWER_STATUS_CLR |
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index bd7cc05..ae5a62c 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -319,6 +319,25 @@
This line discipline provides support for the GSM MUX protocol and
presents the mux as a set of 61 individual tty devices.
+config N_SMUX
+ tristate "SMUX line discipline support"
+ depends on NET && SERIAL_MSM_HS
+ help
+ This line discipline provides support for the Serial MUX protocol
+ and provides a TTY and kernel API for multiple logical channels.
+
+config N_SMUX_LOOPBACK
+ tristate "SMUX line discipline loopback support"
+ depends on N_SMUX
+ help
+ Provides loopback and unit testing support for the Serial MUX Protocol.
+
+config SMUX_CTL
+ tristate "SMUX control driver"
+ depends on N_SMUX
+ help
+ Support for SMUX control driver on top of serial MUX.
+
config TRACE_ROUTER
tristate "Trace data router for MIPI P1149.7 cJTAG standard"
depends on TRACE_SINK
diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile
index ea89b0b..3078e8d 100644
--- a/drivers/tty/Makefile
+++ b/drivers/tty/Makefile
@@ -6,6 +6,9 @@
obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o
obj-$(CONFIG_N_HDLC) += n_hdlc.o
obj-$(CONFIG_N_GSM) += n_gsm.o
+obj-$(CONFIG_N_SMUX) += n_smux.o
+obj-$(CONFIG_N_SMUX_LOOPBACK) += smux_test.o smux_loopback.o
+obj-$(CONFIG_SMUX_CTL) += smux_ctl.o
obj-$(CONFIG_TRACE_ROUTER) += n_tracerouter.o
obj-$(CONFIG_TRACE_SINK) += n_tracesink.o
obj-$(CONFIG_R3964) += n_r3964.o
diff --git a/drivers/tty/n_smux.c b/drivers/tty/n_smux.c
new file mode 100644
index 0000000..7ba54fe
--- /dev/null
+++ b/drivers/tty/n_smux.c
@@ -0,0 +1,2938 @@
+/* drivers/tty/n_smux.c
+ *
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/tty_driver.h>
+#include <linux/smux.h>
+#include <linux/list.h>
+#include <linux/kfifo.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <mach/msm_serial_hs.h>
+#include "smux_private.h"
+#include "smux_loopback.h"
+
+#define SMUX_NOTIFY_FIFO_SIZE 128
+#define SMUX_TX_QUEUE_SIZE 256
+#define SMUX_GET_RX_BUFF_MAX_RETRY_CNT 2
+#define SMUX_WM_LOW 2
+#define SMUX_WM_HIGH 4
+#define SMUX_PKT_LOG_SIZE 80
+
+/* Maximum size we can accept in a single RX buffer */
+#define TTY_RECEIVE_ROOM 65536
+#define TTY_BUFFER_FULL_WAIT_MS 50
+
+/* maximum sleep time between wakeup attempts */
+#define SMUX_WAKEUP_DELAY_MAX (1 << 20)
+
+/* minimum delay for scheduling delayed work */
+#define SMUX_WAKEUP_DELAY_MIN (1 << 15)
+
+/* inactivity timeout for no rx/tx activity */
+#define SMUX_INACTIVITY_TIMEOUT_MS 1000
+
+enum {
+ MSM_SMUX_DEBUG = 1U << 0,
+ MSM_SMUX_INFO = 1U << 1,
+ MSM_SMUX_POWER_INFO = 1U << 2,
+ MSM_SMUX_PKT = 1U << 3,
+};
+
+static int smux_debug_mask;
+module_param_named(debug_mask, smux_debug_mask,
+ int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+/* Simulated wakeup used for testing */
+int smux_byte_loopback;
+module_param_named(byte_loopback, smux_byte_loopback,
+ int, S_IRUGO | S_IWUSR | S_IWGRP);
+int smux_simulate_wakeup_delay = 1;
+module_param_named(simulate_wakeup_delay, smux_simulate_wakeup_delay,
+ int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define SMUX_DBG(x...) do { \
+ if (smux_debug_mask & MSM_SMUX_DEBUG) \
+ pr_info(x); \
+} while (0)
+
+#define SMUX_LOG_PKT_RX(pkt) do { \
+ if (smux_debug_mask & MSM_SMUX_PKT) \
+ smux_log_pkt(pkt, 1); \
+} while (0)
+
+#define SMUX_LOG_PKT_TX(pkt) do { \
+ if (smux_debug_mask & MSM_SMUX_PKT) \
+ smux_log_pkt(pkt, 0); \
+} while (0)
+
+/**
+ * Return true if channel is fully opened (both
+ * local and remote sides are in the OPENED state).
+ */
+#define IS_FULLY_OPENED(ch) \
+ (ch && (ch)->local_state == SMUX_LCH_LOCAL_OPENED \
+ && (ch)->remote_state == SMUX_LCH_REMOTE_OPENED)
+
+static struct platform_device smux_devs[] = {
+ {.name = "SMUX_CTL", .id = -1},
+ {.name = "SMUX_RMNET", .id = -1},
+ {.name = "SMUX_DUN_DATA_HSUART", .id = 0},
+ {.name = "SMUX_RMNET_DATA_HSUART", .id = 1},
+ {.name = "SMUX_RMNET_CTL_HSUART", .id = 0},
+ {.name = "SMUX_DIAG", .id = -1},
+};
+
+enum {
+ SMUX_CMD_STATUS_RTC = 1 << 0,
+ SMUX_CMD_STATUS_RTR = 1 << 1,
+ SMUX_CMD_STATUS_RI = 1 << 2,
+ SMUX_CMD_STATUS_DCD = 1 << 3,
+ SMUX_CMD_STATUS_FLOW_CNTL = 1 << 4,
+};
+
+/* Channel mode */
+enum {
+ SMUX_LCH_MODE_NORMAL,
+ SMUX_LCH_MODE_LOCAL_LOOPBACK,
+ SMUX_LCH_MODE_REMOTE_LOOPBACK,
+};
+
+enum {
+ SMUX_RX_IDLE,
+ SMUX_RX_MAGIC,
+ SMUX_RX_HDR,
+ SMUX_RX_PAYLOAD,
+ SMUX_RX_FAILURE,
+};
+
+/**
+ * Power states.
+ *
+ * The _FLUSH states are internal transitional states and are not part of the
+ * official state machine.
+ */
+enum {
+ SMUX_PWR_OFF,
+ SMUX_PWR_TURNING_ON,
+ SMUX_PWR_ON,
+ SMUX_PWR_TURNING_OFF_FLUSH,
+ SMUX_PWR_TURNING_OFF,
+ SMUX_PWR_OFF_FLUSH,
+};
+
+/**
+ * Logical Channel Structure. One instance per channel.
+ *
+ * Locking Hierarchy
+ * Each lock has a postfix that describes the locking level. If multiple locks
+ * are required, only increasing lock hierarchy numbers may be locked which
+ * ensures avoiding a deadlock.
+ *
+ * Locking Example
+ * If state_lock_lhb1 is currently held and the TX list needs to be
+ * manipulated, then tx_lock_lhb2 may be locked since it's locking hierarchy
+ * is greater. However, if tx_lock_lhb2 is held, then state_lock_lhb1 may
+ * not be acquired since it would result in a deadlock.
+ *
+ * Note that the Line Discipline locks (*_lha) should always be acquired
+ * before the logical channel locks.
+ */
+struct smux_lch_t {
+ /* channel state */
+ spinlock_t state_lock_lhb1;
+ uint8_t lcid;
+ unsigned local_state;
+ unsigned local_mode;
+ uint8_t local_tiocm;
+
+ unsigned remote_state;
+ unsigned remote_mode;
+ uint8_t remote_tiocm;
+
+ int tx_flow_control;
+
+ /* client callbacks and private data */
+ void *priv;
+ void (*notify)(void *priv, int event_type, const void *metadata);
+ int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
+ int size);
+
+ /* TX Info */
+ spinlock_t tx_lock_lhb2;
+ struct list_head tx_queue;
+ struct list_head tx_ready_list;
+ unsigned tx_pending_data_cnt;
+ unsigned notify_lwm;
+};
+
+union notifier_metadata {
+ struct smux_meta_disconnected disconnected;
+ struct smux_meta_read read;
+ struct smux_meta_write write;
+ struct smux_meta_tiocm tiocm;
+};
+
+struct smux_notify_handle {
+ void (*notify)(void *priv, int event_type, const void *metadata);
+ void *priv;
+ int event_type;
+ union notifier_metadata *metadata;
+};
+
+/**
+ * Line discipline and module structure.
+ *
+ * Only one instance since multiple instances of line discipline are not
+ * allowed.
+ */
+struct smux_ldisc_t {
+ spinlock_t lock_lha0;
+
+ int is_initialized;
+ int in_reset;
+ int ld_open_count;
+ struct tty_struct *tty;
+
+ /* RX State Machine */
+ spinlock_t rx_lock_lha1;
+ unsigned char recv_buf[SMUX_MAX_PKT_SIZE];
+ unsigned int recv_len;
+ unsigned int pkt_remain;
+ unsigned rx_state;
+ unsigned rx_activity_flag;
+
+ /* TX / Power */
+ spinlock_t tx_lock_lha2;
+ struct list_head lch_tx_ready_list;
+ unsigned power_state;
+ unsigned pwr_wakeup_delay_us;
+ unsigned tx_activity_flag;
+ unsigned powerdown_enabled;
+};
+
+
+/* data structures */
+static struct smux_lch_t smux_lch[SMUX_NUM_LOGICAL_CHANNELS];
+static struct smux_ldisc_t smux;
+static const char *tty_error_type[] = {
+ [TTY_NORMAL] = "normal",
+ [TTY_OVERRUN] = "overrun",
+ [TTY_BREAK] = "break",
+ [TTY_PARITY] = "parity",
+ [TTY_FRAME] = "framing",
+};
+
+static const char *smux_cmds[] = {
+ [SMUX_CMD_DATA] = "DATA",
+ [SMUX_CMD_OPEN_LCH] = "OPEN",
+ [SMUX_CMD_CLOSE_LCH] = "CLOSE",
+ [SMUX_CMD_STATUS] = "STATUS",
+ [SMUX_CMD_PWR_CTL] = "PWR",
+ [SMUX_CMD_BYTE] = "Raw Byte",
+};
+
+static void smux_notify_local_fn(struct work_struct *work);
+static DECLARE_WORK(smux_notify_local, smux_notify_local_fn);
+
+static struct workqueue_struct *smux_notify_wq;
+static size_t handle_size;
+static struct kfifo smux_notify_fifo;
+static int queued_fifo_notifications;
+static DEFINE_SPINLOCK(notify_lock_lhc1);
+
+static struct workqueue_struct *smux_tx_wq;
+static void smux_tx_worker(struct work_struct *work);
+static DECLARE_WORK(smux_tx_work, smux_tx_worker);
+
+static void smux_wakeup_worker(struct work_struct *work);
+static DECLARE_WORK(smux_wakeup_work, smux_wakeup_worker);
+static DECLARE_DELAYED_WORK(smux_wakeup_delayed_work, smux_wakeup_worker);
+
+static void smux_inactivity_worker(struct work_struct *work);
+static DECLARE_WORK(smux_inactivity_work, smux_inactivity_worker);
+static DECLARE_DELAYED_WORK(smux_delayed_inactivity_work,
+ smux_inactivity_worker);
+
+static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch);
+static void list_channel(struct smux_lch_t *ch);
+static int smux_send_status_cmd(struct smux_lch_t *ch);
+static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt);
+
+/**
+ * Convert TTY Error Flags to string for logging purposes.
+ *
+ * @flag TTY_* flag
+ * @returns String description or NULL if unknown
+ */
+static const char *tty_flag_to_str(unsigned flag)
+{
+ if (flag < ARRAY_SIZE(tty_error_type))
+ return tty_error_type[flag];
+ return NULL;
+}
+
+/**
+ * Convert SMUX Command to string for logging purposes.
+ *
+ * @cmd SMUX command
+ * @returns String description or NULL if unknown
+ */
+static const char *cmd_to_str(unsigned cmd)
+{
+ if (cmd < ARRAY_SIZE(smux_cmds))
+ return smux_cmds[cmd];
+ return NULL;
+}
+
+/**
+ * Set the reset state due to an unrecoverable failure.
+ */
+static void smux_enter_reset(void)
+{
+ pr_err("%s: unrecoverable failure, waiting for ssr\n", __func__);
+ smux.in_reset = 1;
+}
+
+static int lch_init(void)
+{
+ unsigned int id;
+ struct smux_lch_t *ch;
+ int i = 0;
+
+ handle_size = sizeof(struct smux_notify_handle *);
+
+ smux_notify_wq = create_singlethread_workqueue("smux_notify_wq");
+ smux_tx_wq = create_singlethread_workqueue("smux_tx_wq");
+
+ if (IS_ERR(smux_notify_wq) || IS_ERR(smux_tx_wq)) {
+ SMUX_DBG("%s: create_singlethread_workqueue ENOMEM\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ i |= kfifo_alloc(&smux_notify_fifo,
+ SMUX_NOTIFY_FIFO_SIZE * handle_size,
+ GFP_KERNEL);
+ i |= smux_loopback_init();
+
+ if (i) {
+ pr_err("%s: out of memory error\n", __func__);
+ return -ENOMEM;
+ }
+
+ for (id = 0 ; id < SMUX_NUM_LOGICAL_CHANNELS; id++) {
+ ch = &smux_lch[id];
+
+ spin_lock_init(&ch->state_lock_lhb1);
+ ch->lcid = id;
+ ch->local_state = SMUX_LCH_LOCAL_CLOSED;
+ ch->local_mode = SMUX_LCH_MODE_NORMAL;
+ ch->local_tiocm = 0x0;
+ ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
+ ch->remote_mode = SMUX_LCH_MODE_NORMAL;
+ ch->remote_tiocm = 0x0;
+ ch->tx_flow_control = 0;
+ ch->priv = 0;
+ ch->notify = 0;
+ ch->get_rx_buffer = 0;
+
+ spin_lock_init(&ch->tx_lock_lhb2);
+ INIT_LIST_HEAD(&ch->tx_queue);
+ INIT_LIST_HEAD(&ch->tx_ready_list);
+ ch->tx_pending_data_cnt = 0;
+ ch->notify_lwm = 0;
+ }
+
+ return 0;
+}
+
+int smux_assert_lch_id(uint32_t lcid)
+{
+ if (lcid >= SMUX_NUM_LOGICAL_CHANNELS)
+ return -ENXIO;
+ else
+ return 0;
+}
+
+/**
+ * Log packet information for debug purposes.
+ *
+ * @pkt Packet to log
+ * @is_recv 1 = RX packet; 0 = TX Packet
+ *
+ * [DIR][LCID] [LOCAL_STATE][LOCAL_MODE]:[REMOTE_STATE][REMOTE_MODE] PKT Info
+ *
+ * PKT Info:
+ * [CMD] flags [flags] len [PAYLOAD_LEN]:[PAD_LEN] [Payload hex bytes]
+ *
+ * Direction: R = Receive, S = Send
+ * Local State: C = Closed; c = closing; o = opening; O = Opened
+ * Local Mode: L = Local loopback; R = Remote loopback; N = Normal
+ * Remote State: C = Closed; O = Opened
+ * Remote Mode: R = Remote loopback; N = Normal
+ */
+static void smux_log_pkt(struct smux_pkt_t *pkt, int is_recv)
+{
+ char logbuf[SMUX_PKT_LOG_SIZE];
+ char cmd_extra[16];
+ int i = 0;
+ int count;
+ int len;
+ char local_state;
+ char local_mode;
+ char remote_state;
+ char remote_mode;
+ struct smux_lch_t *ch;
+ unsigned char *data;
+
+ ch = &smux_lch[pkt->hdr.lcid];
+
+ switch (ch->local_state) {
+ case SMUX_LCH_LOCAL_CLOSED:
+ local_state = 'C';
+ break;
+ case SMUX_LCH_LOCAL_OPENING:
+ local_state = 'o';
+ break;
+ case SMUX_LCH_LOCAL_OPENED:
+ local_state = 'O';
+ break;
+ case SMUX_LCH_LOCAL_CLOSING:
+ local_state = 'c';
+ break;
+ default:
+ local_state = 'U';
+ break;
+ }
+
+ switch (ch->local_mode) {
+ case SMUX_LCH_MODE_LOCAL_LOOPBACK:
+ local_mode = 'L';
+ break;
+ case SMUX_LCH_MODE_REMOTE_LOOPBACK:
+ local_mode = 'R';
+ break;
+ case SMUX_LCH_MODE_NORMAL:
+ local_mode = 'N';
+ break;
+ default:
+ local_mode = 'U';
+ break;
+ }
+
+ switch (ch->remote_state) {
+ case SMUX_LCH_REMOTE_CLOSED:
+ remote_state = 'C';
+ break;
+ case SMUX_LCH_REMOTE_OPENED:
+ remote_state = 'O';
+ break;
+
+ default:
+ remote_state = 'U';
+ break;
+ }
+
+ switch (ch->remote_mode) {
+ case SMUX_LCH_MODE_REMOTE_LOOPBACK:
+ remote_mode = 'R';
+ break;
+ case SMUX_LCH_MODE_NORMAL:
+ remote_mode = 'N';
+ break;
+ default:
+ remote_mode = 'U';
+ break;
+ }
+
+ /* determine command type (ACK, etc) */
+ cmd_extra[0] = '\0';
+ switch (pkt->hdr.cmd) {
+ case SMUX_CMD_OPEN_LCH:
+ if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
+ snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
+ break;
+ case SMUX_CMD_CLOSE_LCH:
+ if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
+ snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
+ break;
+ };
+
+ i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
+ "smux: %c%d %c%c:%c%c %s%s flags %x len %d:%d ",
+ is_recv ? 'R' : 'S', pkt->hdr.lcid,
+ local_state, local_mode,
+ remote_state, remote_mode,
+ cmd_to_str(pkt->hdr.cmd), cmd_extra, pkt->hdr.flags,
+ pkt->hdr.payload_len, pkt->hdr.pad_len);
+
+ len = (pkt->hdr.payload_len > 16) ? 16 : pkt->hdr.payload_len;
+ data = (unsigned char *)pkt->payload;
+ for (count = 0; count < len; count++)
+ i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
+ "%02x ", (unsigned)data[count]);
+
+ pr_info("%s\n", logbuf);
+}
+
+static void smux_notify_local_fn(struct work_struct *work)
+{
+ struct smux_notify_handle *notify_handle = NULL;
+ union notifier_metadata *metadata = NULL;
+ unsigned long flags;
+ int i;
+
+ for (;;) {
+ /* retrieve notification */
+ spin_lock_irqsave(¬ify_lock_lhc1, flags);
+ if (kfifo_len(&smux_notify_fifo) >= handle_size) {
+ i = kfifo_out(&smux_notify_fifo,
+ ¬ify_handle,
+ handle_size);
+ if (i != handle_size) {
+ pr_err("%s: unable to retrieve handle %d expected %d\n",
+ __func__, i, handle_size);
+ spin_unlock_irqrestore(¬ify_lock_lhc1, flags);
+ break;
+ }
+ } else {
+ spin_unlock_irqrestore(¬ify_lock_lhc1, flags);
+ break;
+ }
+ --queued_fifo_notifications;
+ spin_unlock_irqrestore(¬ify_lock_lhc1, flags);
+
+ /* notify client */
+ metadata = notify_handle->metadata;
+ notify_handle->notify(notify_handle->priv,
+ notify_handle->event_type,
+ metadata);
+
+ kfree(metadata);
+ kfree(notify_handle);
+ }
+}
+
+/**
+ * Initialize existing packet.
+ */
+void smux_init_pkt(struct smux_pkt_t *pkt)
+{
+ memset(pkt, 0x0, sizeof(*pkt));
+ pkt->hdr.magic = SMUX_MAGIC;
+ INIT_LIST_HEAD(&pkt->list);
+}
+
+/**
+ * Allocate and initialize packet.
+ *
+ * If a payload is needed, either set it directly and ensure that it's freed or
+ * use smd_alloc_pkt_payload() to allocate a packet and it will be freed
+ * automatically when smd_free_pkt() is called.
+ */
+struct smux_pkt_t *smux_alloc_pkt(void)
+{
+ struct smux_pkt_t *pkt;
+
+ /* Consider a free list implementation instead of kmalloc */
+ pkt = kmalloc(sizeof(struct smux_pkt_t), GFP_ATOMIC);
+ if (!pkt) {
+ pr_err("%s: out of memory\n", __func__);
+ return NULL;
+ }
+ smux_init_pkt(pkt);
+ pkt->allocated = 1;
+
+ return pkt;
+}
+
+/**
+ * Free packet.
+ *
+ * @pkt Packet to free (may be NULL)
+ *
+ * If payload was allocated using smux_alloc_pkt_payload(), then it is freed as
+ * well. Otherwise, the caller is responsible for freeing the payload.
+ */
+void smux_free_pkt(struct smux_pkt_t *pkt)
+{
+ if (pkt) {
+ if (pkt->free_payload)
+ kfree(pkt->payload);
+ if (pkt->allocated)
+ kfree(pkt);
+ }
+}
+
+/**
+ * Allocate packet payload.
+ *
+ * @pkt Packet to add payload to
+ *
+ * @returns 0 on success, <0 upon error
+ *
+ * A flag is set to signal smux_free_pkt() to free the payload.
+ */
+int smux_alloc_pkt_payload(struct smux_pkt_t *pkt)
+{
+ if (!pkt)
+ return -EINVAL;
+
+ pkt->payload = kmalloc(pkt->hdr.payload_len, GFP_ATOMIC);
+ pkt->free_payload = 1;
+ if (!pkt->payload) {
+ pr_err("%s: unable to malloc %d bytes for payload\n",
+ __func__, pkt->hdr.payload_len);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int schedule_notify(uint8_t lcid, int event,
+ const union notifier_metadata *metadata)
+{
+ struct smux_notify_handle *notify_handle = 0;
+ union notifier_metadata *meta_copy = 0;
+ struct smux_lch_t *ch;
+ int i;
+ unsigned long flags;
+ int ret = 0;
+
+ ch = &smux_lch[lcid];
+ notify_handle = kzalloc(sizeof(struct smux_notify_handle),
+ GFP_ATOMIC);
+ if (!notify_handle) {
+ pr_err("%s: out of memory\n", __func__);
+ ret = -ENOMEM;
+ goto free_out;
+ }
+
+ notify_handle->notify = ch->notify;
+ notify_handle->priv = ch->priv;
+ notify_handle->event_type = event;
+ if (metadata) {
+ meta_copy = kzalloc(sizeof(union notifier_metadata),
+ GFP_ATOMIC);
+ if (!meta_copy) {
+ pr_err("%s: out of memory\n", __func__);
+ ret = -ENOMEM;
+ goto free_out;
+ }
+ *meta_copy = *metadata;
+ notify_handle->metadata = meta_copy;
+ } else {
+ notify_handle->metadata = NULL;
+ }
+
+ spin_lock_irqsave(¬ify_lock_lhc1, flags);
+ i = kfifo_avail(&smux_notify_fifo);
+ if (i < handle_size) {
+ pr_err("%s: fifo full error %d expected %d\n",
+ __func__, i, handle_size);
+ ret = -ENOMEM;
+ goto unlock_out;
+ }
+
+ i = kfifo_in(&smux_notify_fifo, ¬ify_handle, handle_size);
+ if (i < 0 || i != handle_size) {
+ pr_err("%s: fifo not available error %d (expected %d)\n",
+ __func__, i, handle_size);
+ ret = -ENOSPC;
+ goto unlock_out;
+ }
+ ++queued_fifo_notifications;
+
+unlock_out:
+ spin_unlock_irqrestore(¬ify_lock_lhc1, flags);
+
+free_out:
+ queue_work(smux_notify_wq, &smux_notify_local);
+ if (ret < 0 && notify_handle) {
+ kfree(notify_handle->metadata);
+ kfree(notify_handle);
+ }
+ return ret;
+}
+
+/**
+ * Returns the serialized size of a packet.
+ *
+ * @pkt Packet to serialize
+ *
+ * @returns Serialized length of packet
+ */
+static unsigned int smux_serialize_size(struct smux_pkt_t *pkt)
+{
+ unsigned int size;
+
+ size = sizeof(struct smux_hdr_t);
+ size += pkt->hdr.payload_len;
+ size += pkt->hdr.pad_len;
+
+ return size;
+}
+
+/**
+ * Serialize packet @pkt into output buffer @data.
+ *
+ * @pkt Packet to serialize
+ * @out Destination buffer pointer
+ * @out_len Size of serialized packet
+ *
+ * @returns 0 for success
+ */
+int smux_serialize(struct smux_pkt_t *pkt, char *out,
+ unsigned int *out_len)
+{
+ char *data_start = out;
+
+ if (smux_serialize_size(pkt) > SMUX_MAX_PKT_SIZE) {
+ pr_err("%s: packet size %d too big\n",
+ __func__, smux_serialize_size(pkt));
+ return -E2BIG;
+ }
+
+ memcpy(out, &pkt->hdr, sizeof(struct smux_hdr_t));
+ out += sizeof(struct smux_hdr_t);
+ if (pkt->payload) {
+ memcpy(out, pkt->payload, pkt->hdr.payload_len);
+ out += pkt->hdr.payload_len;
+ }
+ if (pkt->hdr.pad_len) {
+ memset(out, 0x0, pkt->hdr.pad_len);
+ out += pkt->hdr.pad_len;
+ }
+ *out_len = out - data_start;
+ return 0;
+}
+
+/**
+ * Serialize header and provide pointer to the data.
+ *
+ * @pkt Packet
+ * @out[out] Pointer to the serialized header data
+ * @out_len[out] Pointer to the serialized header length
+ */
+static void smux_serialize_hdr(struct smux_pkt_t *pkt, char **out,
+ unsigned int *out_len)
+{
+ *out = (char *)&pkt->hdr;
+ *out_len = sizeof(struct smux_hdr_t);
+}
+
+/**
+ * Serialize payload and provide pointer to the data.
+ *
+ * @pkt Packet
+ * @out[out] Pointer to the serialized payload data
+ * @out_len[out] Pointer to the serialized payload length
+ */
+static void smux_serialize_payload(struct smux_pkt_t *pkt, char **out,
+ unsigned int *out_len)
+{
+ *out = pkt->payload;
+ *out_len = pkt->hdr.payload_len;
+}
+
+/**
+ * Serialize padding and provide pointer to the data.
+ *
+ * @pkt Packet
+ * @out[out] Pointer to the serialized padding (always NULL)
+ * @out_len[out] Pointer to the serialized payload length
+ *
+ * Since the padding field value is undefined, only the size of the patting
+ * (@out_len) is set and the buffer pointer (@out) will always be NULL.
+ */
+static void smux_serialize_padding(struct smux_pkt_t *pkt, char **out,
+ unsigned int *out_len)
+{
+ *out = NULL;
+ *out_len = pkt->hdr.pad_len;
+}
+
+/**
+ * Write data to TTY framework and handle breaking the writes up if needed.
+ *
+ * @data Data to write
+ * @len Length of data
+ *
+ * @returns 0 for success, < 0 for failure
+ */
+static int write_to_tty(char *data, unsigned len)
+{
+ int data_written;
+
+ if (!data)
+ return 0;
+
+ while (len > 0) {
+ data_written = smux.tty->ops->write(smux.tty, data, len);
+ if (data_written >= 0) {
+ len -= data_written;
+ data += data_written;
+ } else {
+ pr_err("%s: TTY write returned error %d\n",
+ __func__, data_written);
+ return data_written;
+ }
+
+ if (len)
+ tty_wait_until_sent(smux.tty,
+ msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
+
+ /* FUTURE - add SSR logic */
+ }
+ return 0;
+}
+
+/**
+ * Write packet to TTY.
+ *
+ * @pkt packet to write
+ *
+ * @returns 0 on success
+ */
+static int smux_tx_tty(struct smux_pkt_t *pkt)
+{
+ char *data;
+ unsigned int len;
+ int ret;
+
+ if (!smux.tty) {
+ pr_err("%s: TTY not initialized", __func__);
+ return -ENOTTY;
+ }
+
+ if (pkt->hdr.cmd == SMUX_CMD_BYTE) {
+ SMUX_DBG("%s: tty send single byte\n", __func__);
+ ret = write_to_tty(&pkt->hdr.flags, 1);
+ return ret;
+ }
+
+ smux_serialize_hdr(pkt, &data, &len);
+ ret = write_to_tty(data, len);
+ if (ret) {
+ pr_err("%s: failed %d to write header %d\n",
+ __func__, ret, len);
+ return ret;
+ }
+
+ smux_serialize_payload(pkt, &data, &len);
+ ret = write_to_tty(data, len);
+ if (ret) {
+ pr_err("%s: failed %d to write payload %d\n",
+ __func__, ret, len);
+ return ret;
+ }
+
+ smux_serialize_padding(pkt, &data, &len);
+ while (len > 0) {
+ char zero = 0x0;
+ ret = write_to_tty(&zero, 1);
+ if (ret) {
+ pr_err("%s: failed %d to write padding %d\n",
+ __func__, ret, len);
+ return ret;
+ }
+ --len;
+ }
+ return 0;
+}
+
+/**
+ * Send a single character.
+ *
+ * @ch Character to send
+ */
+static void smux_send_byte(char ch)
+{
+ struct smux_pkt_t pkt;
+
+ smux_init_pkt(&pkt);
+
+ pkt.hdr.cmd = SMUX_CMD_BYTE;
+ pkt.hdr.flags = ch;
+ pkt.hdr.lcid = 0;
+ pkt.hdr.flags = ch;
+ SMUX_LOG_PKT_TX(&pkt);
+ if (!smux_byte_loopback)
+ smux_tx_tty(&pkt);
+ else
+ smux_tx_loopback(&pkt);
+}
+
+/**
+ * Receive a single-character packet (used for internal testing).
+ *
+ * @ch Character to receive
+ * @lcid Logical channel ID for packet
+ *
+ * @returns 0 for success
+ *
+ * Called with rx_lock_lha1 locked.
+ */
+static int smux_receive_byte(char ch, int lcid)
+{
+ struct smux_pkt_t pkt;
+
+ smux_init_pkt(&pkt);
+ pkt.hdr.lcid = lcid;
+ pkt.hdr.cmd = SMUX_CMD_BYTE;
+ pkt.hdr.flags = ch;
+
+ return smux_dispatch_rx_pkt(&pkt);
+}
+
+/**
+ * Queue packet for transmit.
+ *
+ * @pkt_ptr Packet to queue
+ * @ch Channel to queue packet on
+ * @queue Queue channel on ready list
+ */
+static void smux_tx_queue(struct smux_pkt_t *pkt_ptr, struct smux_lch_t *ch,
+ int queue)
+{
+ unsigned long flags;
+
+ SMUX_DBG("%s: queuing pkt %p\n", __func__, pkt_ptr);
+
+ spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
+ list_add_tail(&pkt_ptr->list, &ch->tx_queue);
+ spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
+
+ if (queue)
+ list_channel(ch);
+}
+
+/**
+ * Handle receive OPEN ACK command.
+ *
+ * @pkt Received packet
+ *
+ * @returns 0 for success
+ *
+ * Called with rx_lock_lha1 already locked.
+ */
+static int smux_handle_rx_open_ack(struct smux_pkt_t *pkt)
+{
+ uint8_t lcid;
+ int ret;
+ struct smux_lch_t *ch;
+ int enable_powerdown = 0;
+
+ lcid = pkt->hdr.lcid;
+ ch = &smux_lch[lcid];
+
+ spin_lock(&ch->state_lock_lhb1);
+ if (ch->local_state == SMUX_LCH_LOCAL_OPENING) {
+ SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
+ ch->local_state,
+ SMUX_LCH_LOCAL_OPENED);
+
+ if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
+ enable_powerdown = 1;
+
+ ch->local_state = SMUX_LCH_LOCAL_OPENED;
+ if (ch->remote_state == SMUX_LCH_REMOTE_OPENED)
+ schedule_notify(lcid, SMUX_CONNECTED, NULL);
+ ret = 0;
+ } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
+ SMUX_DBG("Remote loopback OPEN ACK received\n");
+ ret = 0;
+ } else {
+ pr_err("%s: lcid %d state 0x%x open ack invalid\n",
+ __func__, lcid, ch->local_state);
+ ret = -EINVAL;
+ }
+ spin_unlock(&ch->state_lock_lhb1);
+
+ if (enable_powerdown) {
+ spin_lock(&smux.tx_lock_lha2);
+ if (!smux.powerdown_enabled) {
+ smux.powerdown_enabled = 1;
+ SMUX_DBG("%s: enabling power-collapse support\n",
+ __func__);
+ }
+ spin_unlock(&smux.tx_lock_lha2);
+ }
+
+ return ret;
+}
+
+static int smux_handle_close_ack(struct smux_pkt_t *pkt)
+{
+ uint8_t lcid;
+ int ret;
+ struct smux_lch_t *ch;
+ union notifier_metadata meta_disconnected;
+ unsigned long flags;
+
+ lcid = pkt->hdr.lcid;
+ ch = &smux_lch[lcid];
+ meta_disconnected.disconnected.is_ssr = 0;
+
+ spin_lock_irqsave(&ch->state_lock_lhb1, flags);
+
+ if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
+ SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
+ SMUX_LCH_LOCAL_CLOSING,
+ SMUX_LCH_LOCAL_CLOSED);
+ ch->local_state = SMUX_LCH_LOCAL_CLOSED;
+ if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED)
+ schedule_notify(lcid, SMUX_DISCONNECTED,
+ &meta_disconnected);
+ ret = 0;
+ } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
+ SMUX_DBG("Remote loopback CLOSE ACK received\n");
+ ret = 0;
+ } else {
+ pr_err("%s: lcid %d state 0x%x close ack invalid\n",
+ __func__, lcid, ch->local_state);
+ ret = -EINVAL;
+ }
+ spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+ return ret;
+}
+
+/**
+ * Handle receive OPEN command.
+ *
+ * @pkt Received packet
+ *
+ * @returns 0 for success
+ *
+ * Called with rx_lock_lha1 already locked.
+ */
+static int smux_handle_rx_open_cmd(struct smux_pkt_t *pkt)
+{
+ uint8_t lcid;
+ int ret;
+ struct smux_lch_t *ch;
+ struct smux_pkt_t *ack_pkt;
+ int tx_ready = 0;
+ int enable_powerdown = 0;
+
+ if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
+ return smux_handle_rx_open_ack(pkt);
+
+ lcid = pkt->hdr.lcid;
+ ch = &smux_lch[lcid];
+
+ spin_lock(&ch->state_lock_lhb1);
+
+ if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED) {
+ SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
+ SMUX_LCH_REMOTE_CLOSED,
+ SMUX_LCH_REMOTE_OPENED);
+
+ ch->remote_state = SMUX_LCH_REMOTE_OPENED;
+ if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
+ enable_powerdown = 1;
+
+ /* Send Open ACK */
+ ack_pkt = smux_alloc_pkt();
+ if (!ack_pkt) {
+ /* exit out to allow retrying this later */
+ ret = -ENOMEM;
+ goto out;
+ }
+ ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
+ ack_pkt->hdr.flags = SMUX_CMD_OPEN_ACK
+ | SMUX_CMD_OPEN_POWER_COLLAPSE;
+ ack_pkt->hdr.lcid = lcid;
+ ack_pkt->hdr.payload_len = 0;
+ ack_pkt->hdr.pad_len = 0;
+ if (pkt->hdr.flags & SMUX_CMD_OPEN_REMOTE_LOOPBACK) {
+ ch->remote_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
+ ack_pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
+ }
+ smux_tx_queue(ack_pkt, ch, 0);
+ tx_ready = 1;
+
+ if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
+ /*
+ * Send an Open command to the remote side to
+ * simulate our local client doing it.
+ */
+ ack_pkt = smux_alloc_pkt();
+ if (ack_pkt) {
+ ack_pkt->hdr.lcid = lcid;
+ ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
+ ack_pkt->hdr.flags =
+ SMUX_CMD_OPEN_POWER_COLLAPSE;
+ ack_pkt->hdr.payload_len = 0;
+ ack_pkt->hdr.pad_len = 0;
+ smux_tx_queue(ack_pkt, ch, 0);
+ tx_ready = 1;
+ } else {
+ pr_err("%s: Remote loopack allocation failure\n",
+ __func__);
+ }
+ } else if (ch->local_state == SMUX_LCH_LOCAL_OPENED) {
+ schedule_notify(lcid, SMUX_CONNECTED, NULL);
+ }
+ ret = 0;
+ } else {
+ pr_err("%s: lcid %d remote state 0x%x open invalid\n",
+ __func__, lcid, ch->remote_state);
+ ret = -EINVAL;
+ }
+
+out:
+ spin_unlock(&ch->state_lock_lhb1);
+
+ if (enable_powerdown) {
+ spin_lock(&smux.tx_lock_lha2);
+ smux.powerdown_enabled = 1;
+ SMUX_DBG("%s: enabling power-collapse support\n", __func__);
+ spin_unlock(&smux.tx_lock_lha2);
+ }
+
+ if (tx_ready)
+ list_channel(ch);
+
+ return ret;
+}
+
+/**
+ * Handle receive CLOSE command.
+ *
+ * @pkt Received packet
+ *
+ * @returns 0 for success
+ *
+ * Called with rx_lock_lha1 already locked.
+ */
+static int smux_handle_rx_close_cmd(struct smux_pkt_t *pkt)
+{
+ uint8_t lcid;
+ int ret;
+ struct smux_lch_t *ch;
+ struct smux_pkt_t *ack_pkt;
+ union notifier_metadata meta_disconnected;
+ int tx_ready = 0;
+
+ if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
+ return smux_handle_close_ack(pkt);
+
+ lcid = pkt->hdr.lcid;
+ ch = &smux_lch[lcid];
+ meta_disconnected.disconnected.is_ssr = 0;
+
+ spin_lock(&ch->state_lock_lhb1);
+ if (ch->remote_state == SMUX_LCH_REMOTE_OPENED) {
+ SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
+ SMUX_LCH_REMOTE_OPENED,
+ SMUX_LCH_REMOTE_CLOSED);
+
+ ack_pkt = smux_alloc_pkt();
+ if (!ack_pkt) {
+ /* exit out to allow retrying this later */
+ ret = -ENOMEM;
+ goto out;
+ }
+ ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
+ ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
+ ack_pkt->hdr.flags = SMUX_CMD_CLOSE_ACK;
+ ack_pkt->hdr.lcid = lcid;
+ ack_pkt->hdr.payload_len = 0;
+ ack_pkt->hdr.pad_len = 0;
+ smux_tx_queue(ack_pkt, ch, 0);
+ tx_ready = 1;
+
+ if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
+ /*
+ * Send a Close command to the remote side to simulate
+ * our local client doing it.
+ */
+ ack_pkt = smux_alloc_pkt();
+ if (ack_pkt) {
+ ack_pkt->hdr.lcid = lcid;
+ ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
+ ack_pkt->hdr.flags = 0;
+ ack_pkt->hdr.payload_len = 0;
+ ack_pkt->hdr.pad_len = 0;
+ smux_tx_queue(ack_pkt, ch, 0);
+ tx_ready = 1;
+ } else {
+ pr_err("%s: Remote loopack allocation failure\n",
+ __func__);
+ }
+ }
+
+ if (ch->local_state == SMUX_LCH_LOCAL_CLOSED)
+ schedule_notify(lcid, SMUX_DISCONNECTED,
+ &meta_disconnected);
+ ret = 0;
+ } else {
+ pr_err("%s: lcid %d remote state 0x%x close invalid\n",
+ __func__, lcid, ch->remote_state);
+ ret = -EINVAL;
+ }
+out:
+ spin_unlock(&ch->state_lock_lhb1);
+ if (tx_ready)
+ list_channel(ch);
+
+ return ret;
+}
+
+/*
+ * Handle receive DATA command.
+ *
+ * @pkt Received packet
+ *
+ * @returns 0 for success
+ *
+ * Called with rx_lock_lha1 already locked.
+ */
+static int smux_handle_rx_data_cmd(struct smux_pkt_t *pkt)
+{
+ uint8_t lcid;
+ int ret;
+ int i;
+ int tmp;
+ int rx_len;
+ struct smux_lch_t *ch;
+ union notifier_metadata metadata;
+ int remote_loopback;
+ int tx_ready = 0;
+ struct smux_pkt_t *ack_pkt;
+ unsigned long flags;
+
+ if (!pkt || smux_assert_lch_id(pkt->hdr.lcid))
+ return -ENXIO;
+
+ lcid = pkt->hdr.lcid;
+ ch = &smux_lch[lcid];
+ spin_lock_irqsave(&ch->state_lock_lhb1, flags);
+ remote_loopback = ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK;
+
+ if (ch->local_state != SMUX_LCH_LOCAL_OPENED
+ && !remote_loopback) {
+ pr_err("smux: ch %d error data on local state 0x%x",
+ lcid, ch->local_state);
+ ret = -EIO;
+ goto out;
+ }
+
+ if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
+ pr_err("smux: ch %d error data on remote state 0x%x",
+ lcid, ch->remote_state);
+ ret = -EIO;
+ goto out;
+ }
+
+ rx_len = pkt->hdr.payload_len;
+ if (rx_len == 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ for (i = 0; i < SMUX_GET_RX_BUFF_MAX_RETRY_CNT; ++i) {
+ metadata.read.pkt_priv = 0;
+ metadata.read.buffer = 0;
+
+ if (!remote_loopback) {
+ tmp = ch->get_rx_buffer(ch->priv,
+ (void **)&metadata.read.pkt_priv,
+ (void **)&metadata.read.buffer,
+ rx_len);
+ if (tmp == 0 && metadata.read.buffer) {
+ /* place data into RX buffer */
+ memcpy(metadata.read.buffer, pkt->payload,
+ rx_len);
+ metadata.read.len = rx_len;
+ schedule_notify(lcid, SMUX_READ_DONE,
+ &metadata);
+ ret = 0;
+ break;
+ } else if (tmp == -EAGAIN) {
+ ret = -ENOMEM;
+ } else if (tmp < 0) {
+ schedule_notify(lcid, SMUX_READ_FAIL, NULL);
+ ret = -ENOMEM;
+ break;
+ } else if (!metadata.read.buffer) {
+ pr_err("%s: get_rx_buffer() buffer is NULL\n",
+ __func__);
+ ret = -ENOMEM;
+ }
+ } else {
+ /* Echo the data back to the remote client. */
+ ack_pkt = smux_alloc_pkt();
+ if (ack_pkt) {
+ ack_pkt->hdr.lcid = lcid;
+ ack_pkt->hdr.cmd = SMUX_CMD_DATA;
+ ack_pkt->hdr.flags = 0;
+ ack_pkt->hdr.payload_len = pkt->hdr.payload_len;
+ ack_pkt->payload = pkt->payload;
+ ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
+ smux_tx_queue(ack_pkt, ch, 0);
+ tx_ready = 1;
+ } else {
+ pr_err("%s: Remote loopack allocation failure\n",
+ __func__);
+ }
+ }
+ }
+
+out:
+ spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+
+ if (tx_ready)
+ list_channel(ch);
+
+ return ret;
+}
+
+/**
+ * Handle receive byte command for testing purposes.
+ *
+ * @pkt Received packet
+ *
+ * @returns 0 for success
+ */
+static int smux_handle_rx_byte_cmd(struct smux_pkt_t *pkt)
+{
+ uint8_t lcid;
+ int ret;
+ struct smux_lch_t *ch;
+ union notifier_metadata metadata;
+ unsigned long flags;
+
+ if (!pkt || smux_assert_lch_id(pkt->hdr.lcid))
+ return -ENXIO;
+
+ lcid = pkt->hdr.lcid;
+ ch = &smux_lch[lcid];
+ spin_lock_irqsave(&ch->state_lock_lhb1, flags);
+
+ if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
+ pr_err("smux: ch %d error data on local state 0x%x",
+ lcid, ch->local_state);
+ ret = -EIO;
+ goto out;
+ }
+
+ if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
+ pr_err("smux: ch %d error data on remote state 0x%x",
+ lcid, ch->remote_state);
+ ret = -EIO;
+ goto out;
+ }
+
+ metadata.read.pkt_priv = (void *)(int)pkt->hdr.flags;
+ metadata.read.buffer = 0;
+ schedule_notify(lcid, SMUX_READ_DONE, &metadata);
+ ret = 0;
+
+out:
+ spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+ return ret;
+}
+
+/**
+ * Handle receive status command.
+ *
+ * @pkt Received packet
+ *
+ * @returns 0 for success
+ *
+ * Called with rx_lock_lha1 already locked.
+ */
+static int smux_handle_rx_status_cmd(struct smux_pkt_t *pkt)
+{
+ uint8_t lcid;
+ int ret;
+ struct smux_lch_t *ch;
+ union notifier_metadata meta;
+ unsigned long flags;
+ int tx_ready = 0;
+
+ lcid = pkt->hdr.lcid;
+ ch = &smux_lch[lcid];
+
+ spin_lock_irqsave(&ch->state_lock_lhb1, flags);
+ meta.tiocm.tiocm_old = ch->remote_tiocm;
+ meta.tiocm.tiocm_new = pkt->hdr.flags;
+
+ /* update logical channel flow control */
+ if ((meta.tiocm.tiocm_old & SMUX_CMD_STATUS_FLOW_CNTL) ^
+ (meta.tiocm.tiocm_new & SMUX_CMD_STATUS_FLOW_CNTL)) {
+ /* logical channel flow control changed */
+ if (pkt->hdr.flags & SMUX_CMD_STATUS_FLOW_CNTL) {
+ /* disabled TX */
+ SMUX_DBG("TX Flow control enabled\n");
+ ch->tx_flow_control = 1;
+ } else {
+ /* re-enable channel */
+ SMUX_DBG("TX Flow control disabled\n");
+ ch->tx_flow_control = 0;
+ tx_ready = 1;
+ }
+ }
+ meta.tiocm.tiocm_old = msm_smux_tiocm_get_atomic(ch);
+ ch->remote_tiocm = pkt->hdr.flags;
+ meta.tiocm.tiocm_new = msm_smux_tiocm_get_atomic(ch);
+
+ /* client notification for status change */
+ if (IS_FULLY_OPENED(ch)) {
+ if (meta.tiocm.tiocm_old != meta.tiocm.tiocm_new)
+ schedule_notify(lcid, SMUX_TIOCM_UPDATE, &meta);
+ ret = 0;
+ }
+ spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+ if (tx_ready)
+ list_channel(ch);
+
+ return ret;
+}
+
+/**
+ * Handle receive power command.
+ *
+ * @pkt Received packet
+ *
+ * @returns 0 for success
+ *
+ * Called with rx_lock_lha1 already locked.
+ */
+static int smux_handle_rx_power_cmd(struct smux_pkt_t *pkt)
+{
+ int tx_ready = 0;
+ struct smux_pkt_t *ack_pkt;
+
+ spin_lock(&smux.tx_lock_lha2);
+ if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK) {
+ /* local sleep request ack */
+ if (smux.power_state == SMUX_PWR_TURNING_OFF) {
+ /* Power-down complete, turn off UART */
+ SMUX_DBG("%s: Power %d->%d\n", __func__,
+ smux.power_state, SMUX_PWR_OFF_FLUSH);
+ smux.power_state = SMUX_PWR_OFF_FLUSH;
+ queue_work(smux_tx_wq, &smux_inactivity_work);
+ } else {
+ pr_err("%s: sleep request ack invalid in state %d\n",
+ __func__, smux.power_state);
+ }
+ } else {
+ /* remote sleep request */
+ if (smux.power_state == SMUX_PWR_ON
+ || smux.power_state == SMUX_PWR_TURNING_OFF) {
+ ack_pkt = smux_alloc_pkt();
+ if (ack_pkt) {
+ SMUX_DBG("%s: Power %d->%d\n", __func__,
+ smux.power_state,
+ SMUX_PWR_TURNING_OFF_FLUSH);
+
+ /* send power-down request */
+ ack_pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
+ ack_pkt->hdr.flags = SMUX_CMD_PWR_CTL_ACK;
+ ack_pkt->hdr.lcid = pkt->hdr.lcid;
+ smux_tx_queue(ack_pkt,
+ &smux_lch[ack_pkt->hdr.lcid], 0);
+ tx_ready = 1;
+ smux.power_state = SMUX_PWR_TURNING_OFF_FLUSH;
+ queue_delayed_work(smux_tx_wq,
+ &smux_delayed_inactivity_work,
+ msecs_to_jiffies(
+ SMUX_INACTIVITY_TIMEOUT_MS));
+ }
+ } else {
+ pr_err("%s: sleep request invalid in state %d\n",
+ __func__, smux.power_state);
+ }
+ }
+ spin_unlock(&smux.tx_lock_lha2);
+
+ if (tx_ready)
+ list_channel(&smux_lch[ack_pkt->hdr.lcid]);
+
+ return 0;
+}
+
+/**
+ * Handle dispatching a completed packet for receive processing.
+ *
+ * @pkt Packet to process
+ *
+ * @returns 0 for success
+ *
+ * Called with rx_lock_lha1 already locked.
+ */
+static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt)
+{
+ int ret;
+
+ SMUX_LOG_PKT_RX(pkt);
+
+ switch (pkt->hdr.cmd) {
+ case SMUX_CMD_OPEN_LCH:
+ ret = smux_handle_rx_open_cmd(pkt);
+ break;
+
+ case SMUX_CMD_DATA:
+ ret = smux_handle_rx_data_cmd(pkt);
+ break;
+
+ case SMUX_CMD_CLOSE_LCH:
+ ret = smux_handle_rx_close_cmd(pkt);
+ break;
+
+ case SMUX_CMD_STATUS:
+ ret = smux_handle_rx_status_cmd(pkt);
+ break;
+
+ case SMUX_CMD_PWR_CTL:
+ ret = smux_handle_rx_power_cmd(pkt);
+ break;
+
+ case SMUX_CMD_BYTE:
+ ret = smux_handle_rx_byte_cmd(pkt);
+ break;
+
+ default:
+ pr_err("%s: command %d unknown\n", __func__, pkt->hdr.cmd);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+/**
+ * Deserializes a packet and dispatches it to the packet receive logic.
+ *
+ * @data Raw data for one packet
+ * @len Length of the data
+ *
+ * @returns 0 for success
+ *
+ * Called with rx_lock_lha1 already locked.
+ */
+static int smux_deserialize(unsigned char *data, int len)
+{
+ struct smux_pkt_t recv;
+ uint8_t lcid;
+
+ smux_init_pkt(&recv);
+
+ /*
+ * It may be possible to optimize this to not use the
+ * temporary buffer.
+ */
+ memcpy(&recv.hdr, data, sizeof(struct smux_hdr_t));
+
+ if (recv.hdr.magic != SMUX_MAGIC) {
+ pr_err("%s: invalid header magic\n", __func__);
+ return -EINVAL;
+ }
+
+ lcid = recv.hdr.lcid;
+ if (smux_assert_lch_id(lcid)) {
+ pr_err("%s: invalid channel id %d\n", __func__, lcid);
+ return -ENXIO;
+ }
+
+ if (recv.hdr.payload_len)
+ recv.payload = data + sizeof(struct smux_hdr_t);
+
+ return smux_dispatch_rx_pkt(&recv);
+}
+
+/**
+ * Handle wakeup request byte.
+ *
+ * Called with rx_lock_lha1 already locked.
+ */
+static void smux_handle_wakeup_req(void)
+{
+ spin_lock(&smux.tx_lock_lha2);
+ if (smux.power_state == SMUX_PWR_OFF
+ || smux.power_state == SMUX_PWR_TURNING_ON) {
+ /* wakeup system */
+ SMUX_DBG("%s: Power %d->%d\n", __func__,
+ smux.power_state, SMUX_PWR_ON);
+ smux.power_state = SMUX_PWR_ON;
+ queue_work(smux_tx_wq, &smux_wakeup_work);
+ queue_work(smux_tx_wq, &smux_tx_work);
+ queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
+ msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
+ smux_send_byte(SMUX_WAKEUP_ACK);
+ } else {
+ smux_send_byte(SMUX_WAKEUP_ACK);
+ }
+ spin_unlock(&smux.tx_lock_lha2);
+}
+
+/**
+ * Handle wakeup request ack.
+ *
+ * Called with rx_lock_lha1 already locked.
+ */
+static void smux_handle_wakeup_ack(void)
+{
+ spin_lock(&smux.tx_lock_lha2);
+ if (smux.power_state == SMUX_PWR_TURNING_ON) {
+ /* received response to wakeup request */
+ SMUX_DBG("%s: Power %d->%d\n", __func__,
+ smux.power_state, SMUX_PWR_ON);
+ smux.power_state = SMUX_PWR_ON;
+ queue_work(smux_tx_wq, &smux_tx_work);
+ queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
+ msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
+
+ } else if (smux.power_state != SMUX_PWR_ON) {
+ /* invalid message */
+ pr_err("%s: wakeup request ack invalid in state %d\n",
+ __func__, smux.power_state);
+ }
+ spin_unlock(&smux.tx_lock_lha2);
+}
+
+/**
+ * RX State machine - IDLE state processing.
+ *
+ * @data New RX data to process
+ * @len Length of the data
+ * @used Return value of length processed
+ * @flag Error flag - TTY_NORMAL 0 for no failure
+ *
+ * Called with rx_lock_lha1 locked.
+ */
+static void smux_rx_handle_idle(const unsigned char *data,
+ int len, int *used, int flag)
+{
+ int i;
+
+ if (flag) {
+ if (smux_byte_loopback)
+ smux_receive_byte(SMUX_UT_ECHO_ACK_FAIL,
+ smux_byte_loopback);
+ pr_err("%s: TTY error 0x%x - ignoring\n", __func__, flag);
+ ++*used;
+ return;
+ }
+
+ for (i = *used; i < len && smux.rx_state == SMUX_RX_IDLE; i++) {
+ switch (data[i]) {
+ case SMUX_MAGIC_WORD1:
+ smux.rx_state = SMUX_RX_MAGIC;
+ break;
+ case SMUX_WAKEUP_REQ:
+ smux_handle_wakeup_req();
+ break;
+ case SMUX_WAKEUP_ACK:
+ smux_handle_wakeup_ack();
+ break;
+ default:
+ /* unexpected character */
+ if (smux_byte_loopback && data[i] == SMUX_UT_ECHO_REQ)
+ smux_receive_byte(SMUX_UT_ECHO_ACK_OK,
+ smux_byte_loopback);
+ pr_err("%s: parse error 0x%02x - ignoring\n", __func__,
+ (unsigned)data[i]);
+ break;
+ }
+ }
+
+ *used = i;
+}
+
+/**
+ * RX State machine - Header Magic state processing.
+ *
+ * @data New RX data to process
+ * @len Length of the data
+ * @used Return value of length processed
+ * @flag Error flag - TTY_NORMAL 0 for no failure
+ *
+ * Called with rx_lock_lha1 locked.
+ */
+static void smux_rx_handle_magic(const unsigned char *data,
+ int len, int *used, int flag)
+{
+ int i;
+
+ if (flag) {
+ pr_err("%s: TTY RX error %d\n", __func__, flag);
+ smux_enter_reset();
+ smux.rx_state = SMUX_RX_FAILURE;
+ ++*used;
+ return;
+ }
+
+ for (i = *used; i < len && smux.rx_state == SMUX_RX_MAGIC; i++) {
+ /* wait for completion of the magic */
+ if (data[i] == SMUX_MAGIC_WORD2) {
+ smux.recv_len = 0;
+ smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD1;
+ smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD2;
+ smux.rx_state = SMUX_RX_HDR;
+ } else {
+ /* unexpected / trash character */
+ pr_err("%s: rx parse error for char %c; *used=%d, len=%d\n",
+ __func__, data[i], *used, len);
+ smux.rx_state = SMUX_RX_IDLE;
+ }
+ }
+
+ *used = i;
+}
+
+/**
+ * RX State machine - Packet Header state processing.
+ *
+ * @data New RX data to process
+ * @len Length of the data
+ * @used Return value of length processed
+ * @flag Error flag - TTY_NORMAL 0 for no failure
+ *
+ * Called with rx_lock_lha1 locked.
+ */
+static void smux_rx_handle_hdr(const unsigned char *data,
+ int len, int *used, int flag)
+{
+ int i;
+ struct smux_hdr_t *hdr;
+
+ if (flag) {
+ pr_err("%s: TTY RX error %d\n", __func__, flag);
+ smux_enter_reset();
+ smux.rx_state = SMUX_RX_FAILURE;
+ ++*used;
+ return;
+ }
+
+ for (i = *used; i < len && smux.rx_state == SMUX_RX_HDR; i++) {
+ smux.recv_buf[smux.recv_len++] = data[i];
+
+ if (smux.recv_len == sizeof(struct smux_hdr_t)) {
+ /* complete header received */
+ hdr = (struct smux_hdr_t *)smux.recv_buf;
+ smux.pkt_remain = hdr->payload_len + hdr->pad_len;
+ smux.rx_state = SMUX_RX_PAYLOAD;
+ }
+ }
+ *used = i;
+}
+
+/**
+ * RX State machine - Packet Payload state processing.
+ *
+ * @data New RX data to process
+ * @len Length of the data
+ * @used Return value of length processed
+ * @flag Error flag - TTY_NORMAL 0 for no failure
+ *
+ * Called with rx_lock_lha1 locked.
+ */
+static void smux_rx_handle_pkt_payload(const unsigned char *data,
+ int len, int *used, int flag)
+{
+ int remaining;
+
+ if (flag) {
+ pr_err("%s: TTY RX error %d\n", __func__, flag);
+ smux_enter_reset();
+ smux.rx_state = SMUX_RX_FAILURE;
+ ++*used;
+ return;
+ }
+
+ /* copy data into rx buffer */
+ if (smux.pkt_remain < (len - *used))
+ remaining = smux.pkt_remain;
+ else
+ remaining = len - *used;
+
+ memcpy(&smux.recv_buf[smux.recv_len], &data[*used], remaining);
+ smux.recv_len += remaining;
+ smux.pkt_remain -= remaining;
+ *used += remaining;
+
+ if (smux.pkt_remain == 0) {
+ /* complete packet received */
+ smux_deserialize(smux.recv_buf, smux.recv_len);
+ smux.rx_state = SMUX_RX_IDLE;
+ }
+}
+
+/**
+ * Feed data to the receive state machine.
+ *
+ * @data Pointer to data block
+ * @len Length of data
+ * @flag TTY_NORMAL (0) for no error, otherwise TTY Error Flag
+ *
+ * Called with rx_lock_lha1 locked.
+ */
+void smux_rx_state_machine(const unsigned char *data,
+ int len, int flag)
+{
+ unsigned long flags;
+ int used;
+ int initial_rx_state;
+
+
+ SMUX_DBG("%s: %p, len=%d, flag=%d\n", __func__, data, len, flag);
+ spin_lock_irqsave(&smux.rx_lock_lha1, flags);
+ used = 0;
+ smux.rx_activity_flag = 1;
+ do {
+ SMUX_DBG("%s: state %d; %d of %d\n",
+ __func__, smux.rx_state, used, len);
+ initial_rx_state = smux.rx_state;
+
+ switch (smux.rx_state) {
+ case SMUX_RX_IDLE:
+ smux_rx_handle_idle(data, len, &used, flag);
+ break;
+ case SMUX_RX_MAGIC:
+ smux_rx_handle_magic(data, len, &used, flag);
+ break;
+ case SMUX_RX_HDR:
+ smux_rx_handle_hdr(data, len, &used, flag);
+ break;
+ case SMUX_RX_PAYLOAD:
+ smux_rx_handle_pkt_payload(data, len, &used, flag);
+ break;
+ default:
+ SMUX_DBG("%s: invalid state %d\n",
+ __func__, smux.rx_state);
+ smux.rx_state = SMUX_RX_IDLE;
+ break;
+ }
+ } while (used < len || smux.rx_state != initial_rx_state);
+ spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
+}
+
+/**
+ * Add channel to transmit-ready list and trigger transmit worker.
+ *
+ * @ch Channel to add
+ */
+static void list_channel(struct smux_lch_t *ch)
+{
+ unsigned long flags;
+
+ SMUX_DBG("%s: listing channel %d\n",
+ __func__, ch->lcid);
+
+ spin_lock_irqsave(&smux.tx_lock_lha2, flags);
+ spin_lock(&ch->tx_lock_lhb2);
+ smux.tx_activity_flag = 1;
+ if (list_empty(&ch->tx_ready_list))
+ list_add_tail(&ch->tx_ready_list, &smux.lch_tx_ready_list);
+ spin_unlock(&ch->tx_lock_lhb2);
+ spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
+
+ queue_work(smux_tx_wq, &smux_tx_work);
+}
+
+/**
+ * Transmit packet on correct transport and then perform client
+ * notification.
+ *
+ * @ch Channel to transmit on
+ * @pkt Packet to transmit
+ */
+static void smux_tx_pkt(struct smux_lch_t *ch, struct smux_pkt_t *pkt)
+{
+ union notifier_metadata meta_write;
+ int ret;
+
+ if (ch && pkt) {
+ SMUX_LOG_PKT_TX(pkt);
+ if (ch->local_mode == SMUX_LCH_MODE_LOCAL_LOOPBACK)
+ ret = smux_tx_loopback(pkt);
+ else
+ ret = smux_tx_tty(pkt);
+
+ if (pkt->hdr.cmd == SMUX_CMD_DATA) {
+ /* notify write-done */
+ meta_write.write.pkt_priv = pkt->priv;
+ meta_write.write.buffer = pkt->payload;
+ meta_write.write.len = pkt->hdr.payload_len;
+ if (ret >= 0) {
+ SMUX_DBG("%s: PKT write done", __func__);
+ schedule_notify(ch->lcid, SMUX_WRITE_DONE,
+ &meta_write);
+ } else {
+ pr_err("%s: failed to write pkt %d\n",
+ __func__, ret);
+ schedule_notify(ch->lcid, SMUX_WRITE_FAIL,
+ &meta_write);
+ }
+ }
+ }
+}
+
+/**
+ * Power-up the UART.
+ */
+static void smux_uart_power_on(void)
+{
+ struct uart_state *state;
+
+ if (!smux.tty || !smux.tty->driver_data) {
+ pr_err("%s: unable to find UART port for tty %p\n",
+ __func__, smux.tty);
+ return;
+ }
+ state = smux.tty->driver_data;
+ msm_hs_request_clock_on(state->uart_port);
+}
+
+/**
+ * Power down the UART.
+ */
+static void smux_uart_power_off(void)
+{
+ struct uart_state *state;
+
+ if (!smux.tty || !smux.tty->driver_data) {
+ pr_err("%s: unable to find UART port for tty %p\n",
+ __func__, smux.tty);
+ return;
+ }
+ state = smux.tty->driver_data;
+ msm_hs_request_clock_off(state->uart_port);
+}
+
+/**
+ * TX Wakeup Worker
+ *
+ * @work Not used
+ *
+ * Do an exponential back-off wakeup sequence with a maximum period
+ * of approximately 1 second (1 << 20 microseconds).
+ */
+static void smux_wakeup_worker(struct work_struct *work)
+{
+ unsigned long flags;
+ unsigned wakeup_delay;
+ int complete = 0;
+
+ for (;;) {
+ spin_lock_irqsave(&smux.tx_lock_lha2, flags);
+ if (smux.power_state == SMUX_PWR_ON) {
+ /* wakeup complete */
+ complete = 1;
+ spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
+ break;
+ } else {
+ /* retry */
+ wakeup_delay = smux.pwr_wakeup_delay_us;
+ smux.pwr_wakeup_delay_us <<= 1;
+ if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
+ smux.pwr_wakeup_delay_us =
+ SMUX_WAKEUP_DELAY_MAX;
+ }
+ spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
+ SMUX_DBG("%s: triggering wakeup\n", __func__);
+ smux_send_byte(SMUX_WAKEUP_REQ);
+
+ if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
+ SMUX_DBG("%s: sleeping for %u us\n", __func__,
+ wakeup_delay);
+ usleep_range(wakeup_delay, 2*wakeup_delay);
+ } else {
+ /* schedule delayed work */
+ SMUX_DBG("%s: scheduling delayed wakeup in %u ms\n",
+ __func__, wakeup_delay / 1000);
+ queue_delayed_work(smux_tx_wq,
+ &smux_wakeup_delayed_work,
+ msecs_to_jiffies(wakeup_delay / 1000));
+ break;
+ }
+ }
+
+ if (complete) {
+ SMUX_DBG("%s: wakeup complete\n", __func__);
+ /*
+ * Cancel any pending retry. This avoids a race condition with
+ * a new power-up request because:
+ * 1) this worker doesn't modify the state
+ * 2) this worker is processed on the same single-threaded
+ * workqueue as new TX wakeup requests
+ */
+ cancel_delayed_work(&smux_wakeup_delayed_work);
+ }
+}
+
+
+/**
+ * Inactivity timeout worker. Periodically scheduled when link is active.
+ * When it detects inactivity, it will power-down the UART link.
+ *
+ * @work Work structure (not used)
+ */
+static void smux_inactivity_worker(struct work_struct *work)
+{
+ int tx_ready = 0;
+ struct smux_pkt_t *pkt;
+ unsigned long flags;
+
+ spin_lock_irqsave(&smux.rx_lock_lha1, flags);
+ spin_lock(&smux.tx_lock_lha2);
+
+ if (!smux.tx_activity_flag && !smux.rx_activity_flag) {
+ /* no activity */
+ if (smux.powerdown_enabled) {
+ if (smux.power_state == SMUX_PWR_ON) {
+ /* start power-down sequence */
+ pkt = smux_alloc_pkt();
+ if (pkt) {
+ SMUX_DBG("%s: Power %d->%d\n", __func__,
+ smux.power_state,
+ SMUX_PWR_TURNING_OFF);
+ smux.power_state = SMUX_PWR_TURNING_OFF;
+
+ /* send power-down request */
+ pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
+ pkt->hdr.flags = 0;
+ pkt->hdr.lcid = 0;
+ smux_tx_queue(pkt,
+ &smux_lch[SMUX_TEST_LCID],
+ 0);
+ tx_ready = 1;
+ }
+ }
+ } else {
+ SMUX_DBG("%s: link inactive, but powerdown disabled\n",
+ __func__);
+ }
+ }
+ smux.tx_activity_flag = 0;
+ smux.rx_activity_flag = 0;
+
+ spin_unlock(&smux.tx_lock_lha2);
+ spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
+
+ if (tx_ready)
+ list_channel(&smux_lch[SMUX_TEST_LCID]);
+
+ if ((smux.power_state == SMUX_PWR_OFF_FLUSH) ||
+ (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH)) {
+ /* ready to power-down the UART */
+ SMUX_DBG("%s: Power %d->%d\n", __func__,
+ smux.power_state, SMUX_PWR_OFF);
+ smux_uart_power_off();
+ spin_lock_irqsave(&smux.tx_lock_lha2, flags);
+ smux.power_state = SMUX_PWR_OFF;
+ spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
+ }
+
+ /* reschedule inactivity worker */
+ if (smux.power_state != SMUX_PWR_OFF)
+ queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
+ msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
+}
+
+/**
+ * Transmit worker handles serializing and transmitting packets onto the
+ * underlying transport.
+ *
+ * @work Work structure (not used)
+ */
+static void smux_tx_worker(struct work_struct *work)
+{
+ struct smux_pkt_t *pkt;
+ struct smux_lch_t *ch;
+ unsigned low_wm_notif;
+ unsigned lcid;
+ unsigned long flags;
+
+
+ /*
+ * Transmit packets in round-robin fashion based upon ready
+ * channels.
+ *
+ * To eliminate the need to hold a lock for the entire
+ * iteration through the channel ready list, the head of the
+ * ready-channel list is always the next channel to be
+ * processed. To send a packet, the first valid packet in
+ * the head channel is removed and the head channel is then
+ * rescheduled at the end of the queue by removing it and
+ * inserting after the tail. The locks can then be released
+ * while the packet is processed.
+ */
+ for (;;) {
+ pkt = NULL;
+ low_wm_notif = 0;
+
+ /* get the next ready channel */
+ spin_lock_irqsave(&smux.tx_lock_lha2, flags);
+ if (list_empty(&smux.lch_tx_ready_list)) {
+ /* no ready channels */
+ SMUX_DBG("%s: no more ready channels, exiting\n",
+ __func__);
+ spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
+ break;
+ }
+ smux.tx_activity_flag = 1;
+
+ if (smux.power_state != SMUX_PWR_ON
+ && smux.power_state != SMUX_PWR_TURNING_OFF
+ && smux.power_state != SMUX_PWR_TURNING_OFF_FLUSH) {
+ /* Link isn't ready to transmit */
+ if (smux.power_state == SMUX_PWR_OFF) {
+ /* link is off, trigger wakeup */
+ smux.pwr_wakeup_delay_us = 1;
+ SMUX_DBG("%s: Power %d->%d\n", __func__,
+ smux.power_state,
+ SMUX_PWR_TURNING_ON);
+ smux.power_state = SMUX_PWR_TURNING_ON;
+ spin_unlock_irqrestore(&smux.tx_lock_lha2,
+ flags);
+ smux_uart_power_on();
+ queue_work(smux_tx_wq, &smux_wakeup_work);
+ } else {
+ SMUX_DBG("%s: can not tx with power state %d\n",
+ __func__,
+ smux.power_state);
+ spin_unlock_irqrestore(&smux.tx_lock_lha2,
+ flags);
+ }
+ break;
+ }
+
+ /* get the next packet to send and rotate channel list */
+ ch = list_first_entry(&smux.lch_tx_ready_list,
+ struct smux_lch_t,
+ tx_ready_list);
+
+ spin_lock(&ch->state_lock_lhb1);
+ spin_lock(&ch->tx_lock_lhb2);
+ if (!list_empty(&ch->tx_queue)) {
+ /*
+ * If remote TX flow control is enabled or
+ * the channel is not fully opened, then only
+ * send command packets.
+ */
+ if (ch->tx_flow_control || !IS_FULLY_OPENED(ch)) {
+ struct smux_pkt_t *curr;
+ list_for_each_entry(curr, &ch->tx_queue, list) {
+ if (curr->hdr.cmd != SMUX_CMD_DATA) {
+ pkt = curr;
+ break;
+ }
+ }
+ } else {
+ /* get next cmd/data packet to send */
+ pkt = list_first_entry(&ch->tx_queue,
+ struct smux_pkt_t, list);
+ }
+ }
+
+ if (pkt) {
+ list_del(&pkt->list);
+
+ /* update packet stats */
+ if (pkt->hdr.cmd == SMUX_CMD_DATA) {
+ --ch->tx_pending_data_cnt;
+ if (ch->notify_lwm &&
+ ch->tx_pending_data_cnt
+ <= SMUX_WM_LOW) {
+ ch->notify_lwm = 0;
+ low_wm_notif = 1;
+ }
+ }
+
+ /* advance to the next ready channel */
+ list_rotate_left(&smux.lch_tx_ready_list);
+ } else {
+ /* no data in channel to send, remove from ready list */
+ list_del(&ch->tx_ready_list);
+ INIT_LIST_HEAD(&ch->tx_ready_list);
+ }
+ lcid = ch->lcid;
+ spin_unlock(&ch->tx_lock_lhb2);
+ spin_unlock(&ch->state_lock_lhb1);
+ spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
+
+ if (low_wm_notif)
+ schedule_notify(lcid, SMUX_LOW_WM_HIT, NULL);
+
+ /* send the packet */
+ smux_tx_pkt(ch, pkt);
+ smux_free_pkt(pkt);
+ }
+}
+
+
+/**********************************************************************/
+/* Kernel API */
+/**********************************************************************/
+
+/**
+ * Set or clear channel option using the SMUX_CH_OPTION_* channel
+ * flags.
+ *
+ * @lcid Logical channel ID
+ * @set Options to set
+ * @clear Options to clear
+ *
+ * @returns 0 for success, < 0 for failure
+ */
+int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear)
+{
+ unsigned long flags;
+ struct smux_lch_t *ch;
+ int tx_ready = 0;
+ int ret = 0;
+
+ if (smux_assert_lch_id(lcid))
+ return -ENXIO;
+
+ ch = &smux_lch[lcid];
+ spin_lock_irqsave(&ch->state_lock_lhb1, flags);
+
+ /* Local loopback mode */
+ if (set & SMUX_CH_OPTION_LOCAL_LOOPBACK)
+ ch->local_mode = SMUX_LCH_MODE_LOCAL_LOOPBACK;
+
+ if (clear & SMUX_CH_OPTION_LOCAL_LOOPBACK)
+ ch->local_mode = SMUX_LCH_MODE_NORMAL;
+
+ /* Remote loopback mode */
+ if (set & SMUX_CH_OPTION_REMOTE_LOOPBACK)
+ ch->local_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
+
+ if (clear & SMUX_CH_OPTION_REMOTE_LOOPBACK)
+ ch->local_mode = SMUX_LCH_MODE_NORMAL;
+
+ /* Flow control */
+ if (set & SMUX_CH_OPTION_REMOTE_TX_STOP) {
+ ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
+ ret = smux_send_status_cmd(ch);
+ tx_ready = 1;
+ }
+
+ if (clear & SMUX_CH_OPTION_REMOTE_TX_STOP) {
+ ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
+ ret = smux_send_status_cmd(ch);
+ tx_ready = 1;
+ }
+
+ spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+
+ if (tx_ready)
+ list_channel(ch);
+
+ return ret;
+}
+
+/**
+ * Starts the opening sequence for a logical channel.
+ *
+ * @lcid Logical channel ID
+ * @priv Free for client usage
+ * @notify Event notification function
+ * @get_rx_buffer Function used to provide a receive buffer to SMUX
+ *
+ * @returns 0 for success, <0 otherwise
+ *
+ * A channel must be fully closed (either not previously opened or
+ * msm_smux_close() has been called and the SMUX_DISCONNECTED has been
+ * received.
+ *
+ * One the remote side is opened, the client will receive a SMUX_CONNECTED
+ * event.
+ */
+int msm_smux_open(uint8_t lcid, void *priv,
+ void (*notify)(void *priv, int event_type, const void *metadata),
+ int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
+ int size))
+{
+ int ret;
+ struct smux_lch_t *ch;
+ struct smux_pkt_t *pkt;
+ int tx_ready = 0;
+ unsigned long flags;
+
+ if (smux_assert_lch_id(lcid))
+ return -ENXIO;
+
+ ch = &smux_lch[lcid];
+ spin_lock_irqsave(&ch->state_lock_lhb1, flags);
+
+ if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ if (ch->local_state != SMUX_LCH_LOCAL_CLOSED) {
+ pr_err("%s: open lcid %d local state %x invalid\n",
+ __func__, lcid, ch->local_state);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
+ ch->local_state,
+ SMUX_LCH_LOCAL_OPENING);
+
+ ch->local_state = SMUX_LCH_LOCAL_OPENING;
+
+ ch->priv = priv;
+ ch->notify = notify;
+ ch->get_rx_buffer = get_rx_buffer;
+ ret = 0;
+
+ /* Send Open Command */
+ pkt = smux_alloc_pkt();
+ if (!pkt) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ pkt->hdr.magic = SMUX_MAGIC;
+ pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
+ pkt->hdr.flags = SMUX_CMD_OPEN_POWER_COLLAPSE;
+ if (ch->local_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK)
+ pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
+ pkt->hdr.lcid = lcid;
+ pkt->hdr.payload_len = 0;
+ pkt->hdr.pad_len = 0;
+ smux_tx_queue(pkt, ch, 0);
+ tx_ready = 1;
+
+out:
+ spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+ if (tx_ready)
+ list_channel(ch);
+ return ret;
+}
+
+/**
+ * Starts the closing sequence for a logical channel.
+ *
+ * @lcid Logical channel ID
+ *
+ * @returns 0 for success, <0 otherwise
+ *
+ * Once the close event has been acknowledge by the remote side, the client
+ * will receive a SMUX_DISCONNECTED notification.
+ */
+int msm_smux_close(uint8_t lcid)
+{
+ int ret = 0;
+ struct smux_lch_t *ch;
+ struct smux_pkt_t *pkt;
+ int tx_ready = 0;
+ unsigned long flags;
+
+ if (smux_assert_lch_id(lcid))
+ return -ENXIO;
+
+ ch = &smux_lch[lcid];
+ spin_lock_irqsave(&ch->state_lock_lhb1, flags);
+ ch->local_tiocm = 0x0;
+ ch->remote_tiocm = 0x0;
+ ch->tx_pending_data_cnt = 0;
+ ch->notify_lwm = 0;
+
+ /* Purge TX queue */
+ spin_lock(&ch->tx_lock_lhb2);
+ while (!list_empty(&ch->tx_queue)) {
+ pkt = list_first_entry(&ch->tx_queue, struct smux_pkt_t,
+ list);
+ list_del(&pkt->list);
+
+ if (pkt->hdr.cmd == SMUX_CMD_OPEN_LCH) {
+ /* Open was never sent, just force to closed state */
+ union notifier_metadata meta_disconnected;
+
+ ch->local_state = SMUX_LCH_LOCAL_CLOSED;
+ meta_disconnected.disconnected.is_ssr = 0;
+ schedule_notify(lcid, SMUX_DISCONNECTED,
+ &meta_disconnected);
+ } else if (pkt->hdr.cmd == SMUX_CMD_DATA) {
+ /* Notify client of failed write */
+ union notifier_metadata meta_write;
+
+ meta_write.write.pkt_priv = pkt->priv;
+ meta_write.write.buffer = pkt->payload;
+ meta_write.write.len = pkt->hdr.payload_len;
+ schedule_notify(ch->lcid, SMUX_WRITE_FAIL, &meta_write);
+ }
+ smux_free_pkt(pkt);
+ }
+ spin_unlock(&ch->tx_lock_lhb2);
+
+ /* Send Close Command */
+ if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
+ ch->local_state == SMUX_LCH_LOCAL_OPENING) {
+ SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
+ ch->local_state,
+ SMUX_LCH_LOCAL_CLOSING);
+
+ ch->local_state = SMUX_LCH_LOCAL_CLOSING;
+ pkt = smux_alloc_pkt();
+ if (pkt) {
+ pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
+ pkt->hdr.flags = 0;
+ pkt->hdr.lcid = lcid;
+ pkt->hdr.payload_len = 0;
+ pkt->hdr.pad_len = 0;
+ smux_tx_queue(pkt, ch, 0);
+ tx_ready = 1;
+ } else {
+ pr_err("%s: pkt allocation failed\n", __func__);
+ ret = -ENOMEM;
+ }
+ }
+ spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+
+ if (tx_ready)
+ list_channel(ch);
+
+ return ret;
+}
+
+/**
+ * Write data to a logical channel.
+ *
+ * @lcid Logical channel ID
+ * @pkt_priv Client data that will be returned with the SMUX_WRITE_DONE or
+ * SMUX_WRITE_FAIL notification.
+ * @data Data to write
+ * @len Length of @data
+ *
+ * @returns 0 for success, <0 otherwise
+ *
+ * Data may be written immediately after msm_smux_open() is called,
+ * but the data will wait in the transmit queue until the channel has
+ * been fully opened.
+ *
+ * Once the data has been written, the client will receive either a completion
+ * (SMUX_WRITE_DONE) or a failure notice (SMUX_WRITE_FAIL).
+ */
+int msm_smux_write(uint8_t lcid, void *pkt_priv, const void *data, int len)
+{
+ struct smux_lch_t *ch;
+ struct smux_pkt_t *pkt;
+ int tx_ready = 0;
+ unsigned long flags;
+ int ret;
+
+ if (smux_assert_lch_id(lcid))
+ return -ENXIO;
+
+ ch = &smux_lch[lcid];
+ spin_lock_irqsave(&ch->state_lock_lhb1, flags);
+
+ if (ch->local_state != SMUX_LCH_LOCAL_OPENED &&
+ ch->local_state != SMUX_LCH_LOCAL_OPENING) {
+ pr_err("%s: hdr.invalid local state %d channel %d\n",
+ __func__, ch->local_state, lcid);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len > SMUX_MAX_PKT_SIZE - sizeof(struct smux_hdr_t)) {
+ pr_err("%s: payload %d too large\n",
+ __func__, len);
+ ret = -E2BIG;
+ goto out;
+ }
+
+ pkt = smux_alloc_pkt();
+ if (!pkt) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ pkt->hdr.cmd = SMUX_CMD_DATA;
+ pkt->hdr.lcid = lcid;
+ pkt->hdr.flags = 0;
+ pkt->hdr.payload_len = len;
+ pkt->payload = (void *)data;
+ pkt->priv = pkt_priv;
+ pkt->hdr.pad_len = 0;
+
+ spin_lock(&ch->tx_lock_lhb2);
+ /* verify high watermark */
+ SMUX_DBG("%s: pending %d", __func__, ch->tx_pending_data_cnt);
+
+ if (ch->tx_pending_data_cnt >= SMUX_WM_HIGH) {
+ pr_err("%s: ch %d high watermark %d exceeded %d\n",
+ __func__, lcid, SMUX_WM_HIGH,
+ ch->tx_pending_data_cnt);
+ ret = -EAGAIN;
+ goto out_inner;
+ }
+
+ /* queue packet for transmit */
+ if (++ch->tx_pending_data_cnt == SMUX_WM_HIGH) {
+ ch->notify_lwm = 1;
+ pr_err("%s: high watermark hit\n", __func__);
+ schedule_notify(lcid, SMUX_HIGH_WM_HIT, NULL);
+ }
+ list_add_tail(&pkt->list, &ch->tx_queue);
+
+ /* add to ready list */
+ if (IS_FULLY_OPENED(ch))
+ tx_ready = 1;
+
+ ret = 0;
+
+out_inner:
+ spin_unlock(&ch->tx_lock_lhb2);
+
+out:
+ if (ret)
+ smux_free_pkt(pkt);
+ spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+
+ if (tx_ready)
+ list_channel(ch);
+
+ return ret;
+}
+
+/**
+ * Returns true if the TX queue is currently full (high water mark).
+ *
+ * @lcid Logical channel ID
+ * @returns 0 if channel is not full
+ * 1 if it is full
+ * < 0 for error
+ */
+int msm_smux_is_ch_full(uint8_t lcid)
+{
+ struct smux_lch_t *ch;
+ unsigned long flags;
+ int is_full = 0;
+
+ if (smux_assert_lch_id(lcid))
+ return -ENXIO;
+
+ ch = &smux_lch[lcid];
+
+ spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
+ if (ch->tx_pending_data_cnt >= SMUX_WM_HIGH)
+ is_full = 1;
+ spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
+
+ return is_full;
+}
+
+/**
+ * Returns true if the TX queue has space for more packets it is at or
+ * below the low water mark).
+ *
+ * @lcid Logical channel ID
+ * @returns 0 if channel is above low watermark
+ * 1 if it's at or below the low watermark
+ * < 0 for error
+ */
+int msm_smux_is_ch_low(uint8_t lcid)
+{
+ struct smux_lch_t *ch;
+ unsigned long flags;
+ int is_low = 0;
+
+ if (smux_assert_lch_id(lcid))
+ return -ENXIO;
+
+ ch = &smux_lch[lcid];
+
+ spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
+ if (ch->tx_pending_data_cnt <= SMUX_WM_LOW)
+ is_low = 1;
+ spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
+
+ return is_low;
+}
+
+/**
+ * Send TIOCM status update.
+ *
+ * @ch Channel for update
+ *
+ * @returns 0 for success, <0 for failure
+ *
+ * Channel lock must be held before calling.
+ */
+static int smux_send_status_cmd(struct smux_lch_t *ch)
+{
+ struct smux_pkt_t *pkt;
+
+ if (!ch)
+ return -EINVAL;
+
+ pkt = smux_alloc_pkt();
+ if (!pkt)
+ return -ENOMEM;
+
+ pkt->hdr.lcid = ch->lcid;
+ pkt->hdr.cmd = SMUX_CMD_STATUS;
+ pkt->hdr.flags = ch->local_tiocm;
+ pkt->hdr.payload_len = 0;
+ pkt->hdr.pad_len = 0;
+ smux_tx_queue(pkt, ch, 0);
+
+ return 0;
+}
+
+/**
+ * Internal helper function for getting the TIOCM status with
+ * state_lock_lhb1 already locked.
+ *
+ * @ch Channel pointer
+ *
+ * @returns TIOCM status
+ */
+static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch)
+{
+ long status = 0x0;
+
+ status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DSR : 0;
+ status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_CTS : 0;
+ status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RI) ? TIOCM_RI : 0;
+ status |= (ch->remote_tiocm & SMUX_CMD_STATUS_DCD) ? TIOCM_CD : 0;
+
+ status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DTR : 0;
+ status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_RTS : 0;
+
+ return status;
+}
+
+/**
+ * Get the TIOCM status bits.
+ *
+ * @lcid Logical channel ID
+ *
+ * @returns >= 0 TIOCM status bits
+ * < 0 Error condition
+ */
+long msm_smux_tiocm_get(uint8_t lcid)
+{
+ struct smux_lch_t *ch;
+ unsigned long flags;
+ long status = 0x0;
+
+ if (smux_assert_lch_id(lcid))
+ return -ENXIO;
+
+ ch = &smux_lch[lcid];
+ spin_lock_irqsave(&ch->state_lock_lhb1, flags);
+ status = msm_smux_tiocm_get_atomic(ch);
+ spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+
+ return status;
+}
+
+/**
+ * Set/clear the TIOCM status bits.
+ *
+ * @lcid Logical channel ID
+ * @set Bits to set
+ * @clear Bits to clear
+ *
+ * @returns 0 for success; < 0 for failure
+ *
+ * If a bit is specified in both the @set and @clear masks, then the clear bit
+ * definition will dominate and the bit will be cleared.
+ */
+int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear)
+{
+ struct smux_lch_t *ch;
+ unsigned long flags;
+ uint8_t old_status;
+ uint8_t status_set = 0x0;
+ uint8_t status_clear = 0x0;
+ int tx_ready = 0;
+ int ret = 0;
+
+ if (smux_assert_lch_id(lcid))
+ return -ENXIO;
+
+ ch = &smux_lch[lcid];
+ spin_lock_irqsave(&ch->state_lock_lhb1, flags);
+
+ status_set |= (set & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
+ status_set |= (set & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
+ status_set |= (set & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
+ status_set |= (set & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
+
+ status_clear |= (clear & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
+ status_clear |= (clear & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
+ status_clear |= (clear & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
+ status_clear |= (clear & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
+
+ old_status = ch->local_tiocm;
+ ch->local_tiocm |= status_set;
+ ch->local_tiocm &= ~status_clear;
+
+ if (ch->local_tiocm != old_status) {
+ ret = smux_send_status_cmd(ch);
+ tx_ready = 1;
+ }
+ spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+
+ if (tx_ready)
+ list_channel(ch);
+
+ return ret;
+}
+
+/**********************************************************************/
+/* Line Discipline Interface */
+/**********************************************************************/
+static int smuxld_open(struct tty_struct *tty)
+{
+ int i;
+ int tmp;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!smux.is_initialized)
+ return -ENODEV;
+
+ spin_lock_irqsave(&smux.lock_lha0, flags);
+ if (smux.ld_open_count) {
+ pr_err("%s: %p multiple instances not supported\n",
+ __func__, tty);
+ ret = -EEXIST;
+ goto out;
+ }
+
+ ++smux.ld_open_count;
+ if (tty->ops->write == NULL) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* connect to TTY */
+ smux.tty = tty;
+ tty->disc_data = &smux;
+ tty->receive_room = TTY_RECEIVE_ROOM;
+ tty_driver_flush_buffer(tty);
+
+ /* power-down the UART if we are idle */
+ spin_lock(&smux.tx_lock_lha2);
+ if (smux.power_state == SMUX_PWR_OFF) {
+ SMUX_DBG("%s: powering off uart\n", __func__);
+ smux.power_state = SMUX_PWR_OFF_FLUSH;
+ spin_unlock(&smux.tx_lock_lha2);
+ queue_work(smux_tx_wq, &smux_inactivity_work);
+ } else {
+ spin_unlock(&smux.tx_lock_lha2);
+ }
+
+ /* register platform devices */
+ for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
+ tmp = platform_device_register(&smux_devs[i]);
+ if (tmp)
+ pr_err("%s: error %d registering device %s\n",
+ __func__, tmp, smux_devs[i].name);
+ }
+
+out:
+ spin_unlock_irqrestore(&smux.lock_lha0, flags);
+ return ret;
+}
+
+static void smuxld_close(struct tty_struct *tty)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&smux.lock_lha0, flags);
+ if (smux.ld_open_count <= 0) {
+ pr_err("%s: invalid ld count %d\n", __func__,
+ smux.ld_open_count);
+ goto out;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(smux_devs); ++i)
+ platform_device_unregister(&smux_devs[i]);
+
+ --smux.ld_open_count;
+
+out:
+ spin_unlock_irqrestore(&smux.lock_lha0, flags);
+}
+
+/**
+ * Receive data from TTY Line Discipline.
+ *
+ * @tty TTY structure
+ * @cp Character data
+ * @fp Flag data
+ * @count Size of character and flag data
+ */
+void smuxld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
+{
+ int i;
+ int last_idx = 0;
+ const char *tty_name = NULL;
+ char *f;
+
+ if (smux_debug_mask & MSM_SMUX_DEBUG)
+ print_hex_dump(KERN_INFO, "smux tty rx: ", DUMP_PREFIX_OFFSET,
+ 16, 1, cp, count, true);
+
+ /* verify error flags */
+ for (i = 0, f = fp; i < count; ++i, ++f) {
+ if (*f != TTY_NORMAL) {
+ if (tty)
+ tty_name = tty->name;
+ pr_err("%s: TTY %s Error %d (%s)\n", __func__,
+ tty_name, *f, tty_flag_to_str(*f));
+
+ /* feed all previous valid data to the parser */
+ smux_rx_state_machine(cp + last_idx, i - last_idx,
+ TTY_NORMAL);
+
+ /* feed bad data to parser */
+ smux_rx_state_machine(cp + i, 1, *f);
+ last_idx = i + 1;
+ }
+ }
+
+ /* feed data to RX state machine */
+ smux_rx_state_machine(cp + last_idx, count - last_idx, TTY_NORMAL);
+}
+
+static void smuxld_flush_buffer(struct tty_struct *tty)
+{
+ pr_err("%s: not supported\n", __func__);
+}
+
+static ssize_t smuxld_chars_in_buffer(struct tty_struct *tty)
+{
+ pr_err("%s: not supported\n", __func__);
+ return -ENODEV;
+}
+
+static ssize_t smuxld_read(struct tty_struct *tty, struct file *file,
+ unsigned char __user *buf, size_t nr)
+{
+ pr_err("%s: not supported\n", __func__);
+ return -ENODEV;
+}
+
+static ssize_t smuxld_write(struct tty_struct *tty, struct file *file,
+ const unsigned char *buf, size_t nr)
+{
+ pr_err("%s: not supported\n", __func__);
+ return -ENODEV;
+}
+
+static int smuxld_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ pr_err("%s: not supported\n", __func__);
+ return -ENODEV;
+}
+
+static unsigned int smuxld_poll(struct tty_struct *tty, struct file *file,
+ struct poll_table_struct *tbl)
+{
+ pr_err("%s: not supported\n", __func__);
+ return -ENODEV;
+}
+
+static void smuxld_write_wakeup(struct tty_struct *tty)
+{
+ pr_err("%s: not supported\n", __func__);
+}
+
+static struct tty_ldisc_ops smux_ldisc_ops = {
+ .owner = THIS_MODULE,
+ .magic = TTY_LDISC_MAGIC,
+ .name = "n_smux",
+ .open = smuxld_open,
+ .close = smuxld_close,
+ .flush_buffer = smuxld_flush_buffer,
+ .chars_in_buffer = smuxld_chars_in_buffer,
+ .read = smuxld_read,
+ .write = smuxld_write,
+ .ioctl = smuxld_ioctl,
+ .poll = smuxld_poll,
+ .receive_buf = smuxld_receive_buf,
+ .write_wakeup = smuxld_write_wakeup
+};
+
+static int __init smux_init(void)
+{
+ int ret;
+
+ spin_lock_init(&smux.lock_lha0);
+
+ spin_lock_init(&smux.rx_lock_lha1);
+ smux.rx_state = SMUX_RX_IDLE;
+ smux.power_state = SMUX_PWR_OFF;
+ smux.pwr_wakeup_delay_us = 1;
+ smux.powerdown_enabled = 0;
+ smux.rx_activity_flag = 0;
+ smux.tx_activity_flag = 0;
+ smux.recv_len = 0;
+ smux.tty = NULL;
+ smux.ld_open_count = 0;
+ smux.in_reset = 0;
+ smux.is_initialized = 1;
+ smux_byte_loopback = 0;
+
+ spin_lock_init(&smux.tx_lock_lha2);
+ INIT_LIST_HEAD(&smux.lch_tx_ready_list);
+
+ ret = tty_register_ldisc(N_SMUX, &smux_ldisc_ops);
+ if (ret != 0) {
+ pr_err("%s: error %d registering line discipline\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = lch_init();
+ if (ret != 0) {
+ pr_err("%s: lch_init failed\n", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit smux_exit(void)
+{
+ int ret;
+
+ ret = tty_unregister_ldisc(N_SMUX);
+ if (ret != 0) {
+ pr_err("%s error %d unregistering line discipline\n",
+ __func__, ret);
+ return;
+ }
+}
+
+module_init(smux_init);
+module_exit(smux_exit);
+
+MODULE_DESCRIPTION("Serial Mux TTY Line Discipline");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_LDISC(N_SMUX);
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index d310381..d0b8323 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -1964,12 +1964,25 @@
INIT_WORK(&msm_uport->clock_off_w, hsuart_clock_off_work);
mutex_init(&msm_uport->clk_mutex);
+ clk_prepare_enable(msm_uport->clk);
+ if (msm_uport->pclk)
+ clk_prepare_enable(msm_uport->pclk);
+
ret = uartdm_init_port(uport);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ clk_disable_unprepare(msm_uport->clk);
+ if (msm_uport->pclk)
+ clk_disable_unprepare(msm_uport->pclk);
return ret;
+ }
/* configure the CR Protection to Enable */
msm_hs_write(uport, UARTDM_CR_ADDR, CR_PROTECTION_EN);
+
+ clk_disable_unprepare(msm_uport->clk);
+ if (msm_uport->pclk)
+ clk_disable_unprepare(msm_uport->pclk);
+
/*
* Enable Command register protection before going ahead as this hw
* configuration makes sure that issued cmd to CR register gets complete
diff --git a/drivers/tty/smux_ctl.c b/drivers/tty/smux_ctl.c
new file mode 100644
index 0000000..26a49a0
--- /dev/null
+++ b/drivers/tty/smux_ctl.c
@@ -0,0 +1,938 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Serial Mux Control Driver -- Provides a binary serial muxed control
+ * port interface.
+ */
+
+#define DEBUG
+
+#include <linux/cdev.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+#include <linux/atomic.h>
+#include <linux/platform_device.h>
+#include <linux/smux.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+
+#include <asm/ioctls.h>
+
+#define MAX_WRITE_RETRY 5
+#define MAGIC_NO_V1 0x33FC
+#define DEVICE_NAME "smuxctl"
+#define SMUX_CTL_MAX_BUF_SIZE 2048
+#define SMUX_CTL_MODULE_NAME "smux_ctl"
+#define DEBUG
+#define DEBUG_LOOPBACK
+
+static int msm_smux_ctl_debug_mask;
+module_param_named(debug_mask, msm_smux_ctl_debug_mask,
+ int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+static uint32_t smux_ctl_ch_id[] = {
+ SMUX_DATA_CTL_0,
+ SMUX_DATA_CTL_1,
+ SMUX_DATA_CTL_2,
+ SMUX_DATA_CTL_3,
+ SMUX_DATA_CTL_4,
+ SMUX_DATA_CTL_5,
+ SMUX_DATA_CTL_6,
+ SMUX_DATA_CTL_7,
+ SMUX_USB_RMNET_CTL_0,
+ SMUX_CSVT_CTL_0
+};
+
+#define SMUX_CTL_NUM_CHANNELS ARRAY_SIZE(smux_ctl_ch_id)
+
+struct smux_ctl_dev {
+ int id;
+ char name[10];
+ struct cdev cdev;
+ struct device *devicep;
+ struct mutex dev_lock;
+ atomic_t ref_count;
+ int state;
+ int is_channel_reset;
+ int is_high_wm;
+ int write_pending;
+
+ struct mutex rx_lock;
+ uint32_t read_avail;
+ struct list_head rx_list;
+
+ wait_queue_head_t read_wait_queue;
+ wait_queue_head_t write_wait_queue;
+
+ struct {
+ uint32_t bytes_tx;
+ uint32_t bytes_rx;
+ uint32_t pkts_tx;
+ uint32_t pkts_rx;
+ uint32_t cnt_ssr;
+ uint32_t cnt_read_fail;
+ uint32_t cnt_write_fail;
+ uint32_t cnt_high_wm_hit;
+ } stats;
+
+} *smux_ctl_devp[SMUX_CTL_NUM_CHANNELS];
+
+struct smux_ctl_pkt {
+ int data_size;
+ void *data;
+};
+
+struct smux_ctl_list_elem {
+ struct list_head list;
+ struct smux_ctl_pkt ctl_pkt;
+};
+
+struct class *smux_ctl_classp;
+static dev_t smux_ctl_number;
+static uint32_t smux_ctl_inited;
+
+enum {
+ MSM_SMUX_CTL_DEBUG = 1U << 0,
+ MSM_SMUX_CTL_DUMP_BUFFER = 1U << 1,
+};
+
+#if defined(DEBUG)
+
+static const char *smux_ctl_event_str[] = {
+ "SMUX_CONNECTED",
+ "SMUX_DISCONNECTED",
+ "SMUX_READ_DONE",
+ "SMUX_READ_FAIL",
+ "SMUX_WRITE_DONE",
+ "SMUX_WRITE_FAIL",
+ "SMUX_TIOCM_UPDATE",
+ "SMUX_LOW_WM_HIT",
+ "SMUX_HIGH_WM_HIT",
+};
+
+#define SMUXCTL_DUMP_BUFFER(prestr, cnt, buf) \
+do { \
+ if (msm_smux_ctl_debug_mask & MSM_SMUX_CTL_DUMP_BUFFER) { \
+ int i; \
+ pr_err("%s", prestr); \
+ for (i = 0; i < cnt; i++) \
+ pr_err("%.2x", buf[i]); \
+ pr_err("\n"); \
+ } \
+} while (0)
+
+#define SMUXCTL_DBG(x...) \
+do { \
+ if (msm_smux_ctl_debug_mask & MSM_SMUX_CTL_DEBUG) \
+ pr_err(x); \
+} while (0)
+
+
+#else
+#define SMUXCTL_DUMP_BUFFER(prestr, cnt, buf) do {} while (0)
+#define SMUXCTL_DBG(x...) do {} while (0)
+#endif
+
+#if defined(DEBUG_LOOPBACK)
+#define SMUXCTL_SET_LOOPBACK(lcid) \
+ msm_smux_set_ch_option(lcid, SMUX_CH_OPTION_LOCAL_LOOPBACK, 0)
+#else
+#define SMUXCTL_SET_LOOPBACK(lcid) do {} while (0)
+#endif
+
+static int get_ctl_dev_index(int id)
+{
+ int dev_index;
+ for (dev_index = 0; dev_index < SMUX_CTL_NUM_CHANNELS; dev_index++) {
+ if (smux_ctl_ch_id[dev_index] == id)
+ return dev_index;
+ }
+ return -ENODEV;
+}
+
+static int smux_ctl_get_rx_buf_cb(void *priv, void **pkt_priv,
+ void **buffer, int size)
+{
+ void *buf = NULL;
+ int id = ((struct smux_ctl_dev *)(priv))->id;
+ int dev_index;
+
+ if (id < 0 || id > smux_ctl_ch_id[SMUX_CTL_NUM_CHANNELS - 1])
+ return -ENODEV;
+
+ if (!buffer || 0 >= size)
+ return -EINVAL;
+
+ dev_index = get_ctl_dev_index(id);
+ if (dev_index < 0) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: Ch%d is not "
+ "exported to user-space\n",
+ __func__, id);
+ return -ENODEV;
+ }
+
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s: Allocating Rx buf size %d "
+ "for ch%d\n",
+ __func__, size, smux_ctl_devp[dev_index]->id);
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: buffer allocation failed: "
+ "Ch%d, size %d ", __func__, id, size);
+ return -ENOMEM;
+ }
+
+ *buffer = buf;
+ *pkt_priv = NULL;
+ return 0;
+
+}
+
+void smux_ctl_notify_cb(void *priv, int event_type, const void *metadata)
+{
+ int id = ((struct smux_ctl_dev *)(priv))->id;
+ struct smux_ctl_list_elem *list_elem = NULL;
+ int dev_index;
+ void *data;
+ int len;
+
+ if (id < 0 || id > smux_ctl_ch_id[SMUX_CTL_NUM_CHANNELS - 1])
+ return;
+
+ dev_index = get_ctl_dev_index(id);
+ if (dev_index < 0) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: Ch%d is not exported "
+ "to user-space\n", __func__, id);
+ return;
+ }
+
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s: Ch%d, Event %d (%s)\n",
+ __func__, smux_ctl_devp[dev_index]->id,
+ event_type, smux_ctl_event_str[event_type]);
+
+
+ switch (event_type) {
+ case SMUX_CONNECTED:
+ mutex_lock(&smux_ctl_devp[dev_index]->dev_lock);
+ smux_ctl_devp[dev_index]->state = SMUX_CONNECTED;
+ smux_ctl_devp[dev_index]->is_high_wm = 0;
+ smux_ctl_devp[dev_index]->is_channel_reset = 0;
+ smux_ctl_devp[dev_index]->read_avail = 0;
+ mutex_unlock(&smux_ctl_devp[dev_index]->dev_lock);
+ wake_up(&smux_ctl_devp[dev_index]->write_wait_queue);
+ break;
+
+ case SMUX_DISCONNECTED:
+ mutex_lock(&smux_ctl_devp[dev_index]->dev_lock);
+ smux_ctl_devp[dev_index]->state = SMUX_DISCONNECTED;
+ smux_ctl_devp[dev_index]->is_channel_reset =
+ ((struct smux_meta_disconnected *)metadata)->is_ssr;
+ if (smux_ctl_devp[dev_index]->is_channel_reset)
+ smux_ctl_devp[dev_index]->stats.cnt_ssr++;
+ mutex_unlock(&smux_ctl_devp[dev_index]->dev_lock);
+ wake_up(&smux_ctl_devp[dev_index]->write_wait_queue);
+ wake_up(&smux_ctl_devp[dev_index]->read_wait_queue);
+ break;
+
+ case SMUX_READ_FAIL:
+ data = ((struct smux_meta_read *)metadata)->buffer;
+ kfree(data);
+ mutex_lock(&smux_ctl_devp[dev_index]->dev_lock);
+ smux_ctl_devp[dev_index]->stats.cnt_read_fail++;
+ mutex_unlock(&smux_ctl_devp[dev_index]->dev_lock);
+ wake_up(&smux_ctl_devp[dev_index]->read_wait_queue);
+ break;
+
+ case SMUX_READ_DONE:
+ data = ((struct smux_meta_read *)metadata)->buffer;
+ len = ((struct smux_meta_read *)metadata)->len;
+
+ if (data && len > 0) {
+ list_elem = kmalloc(sizeof(struct smux_ctl_list_elem),
+ GFP_KERNEL);
+ if (list_elem) {
+ list_elem->ctl_pkt.data = data;
+ list_elem->ctl_pkt.data_size = len;
+
+ mutex_lock(&smux_ctl_devp[dev_index]->rx_lock);
+ list_add_tail(&list_elem->list,
+ &smux_ctl_devp[dev_index]->rx_list);
+ smux_ctl_devp[dev_index]->read_avail += len;
+ mutex_unlock(
+ &smux_ctl_devp[dev_index]->rx_lock);
+ } else {
+ kfree(data);
+ }
+ }
+
+ wake_up(&smux_ctl_devp[dev_index]->read_wait_queue);
+ break;
+
+ case SMUX_WRITE_DONE:
+ mutex_lock(&smux_ctl_devp[dev_index]->dev_lock);
+ smux_ctl_devp[dev_index]->write_pending = 0;
+ mutex_unlock(&smux_ctl_devp[dev_index]->dev_lock);
+ data = ((struct smux_meta_write *)metadata)->buffer;
+ kfree(data);
+ wake_up(&smux_ctl_devp[dev_index]->write_wait_queue);
+ break;
+
+ case SMUX_WRITE_FAIL:
+ data = ((struct smux_meta_write *)metadata)->buffer;
+ kfree(data);
+ mutex_lock(&smux_ctl_devp[dev_index]->dev_lock);
+ smux_ctl_devp[dev_index]->stats.cnt_write_fail++;
+ mutex_unlock(&smux_ctl_devp[dev_index]->dev_lock);
+ wake_up(&smux_ctl_devp[dev_index]->write_wait_queue);
+ break;
+
+ case SMUX_LOW_WM_HIT:
+ mutex_lock(&smux_ctl_devp[dev_index]->dev_lock);
+ smux_ctl_devp[dev_index]->is_high_wm = 0;
+ mutex_unlock(&smux_ctl_devp[dev_index]->dev_lock);
+ wake_up(&smux_ctl_devp[dev_index]->write_wait_queue);
+ break;
+
+ case SMUX_HIGH_WM_HIT:
+ mutex_lock(&smux_ctl_devp[dev_index]->dev_lock);
+ smux_ctl_devp[dev_index]->is_high_wm = 1;
+ smux_ctl_devp[dev_index]->stats.cnt_high_wm_hit++;
+ mutex_unlock(&smux_ctl_devp[dev_index]->dev_lock);
+ break;
+
+ case SMUX_TIOCM_UPDATE:
+ default:
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: Event %d not supported\n",
+ __func__, event_type);
+ break;
+
+ }
+
+}
+
+int smux_ctl_open(struct inode *inode, struct file *file)
+{
+ int r = 0;
+ struct smux_ctl_dev *devp;
+
+ if (!smux_ctl_inited)
+ return -EIO;
+
+ devp = container_of(inode->i_cdev, struct smux_ctl_dev, cdev);
+ if (!devp)
+ return -ENODEV;
+
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s called on smuxctl%d device\n",
+ __func__, devp->id);
+
+ if (1 == atomic_add_return(1, &devp->ref_count)) {
+
+ SMUXCTL_SET_LOOPBACK(devp->id);
+ r = msm_smux_open(devp->id,
+ devp,
+ smux_ctl_notify_cb,
+ smux_ctl_get_rx_buf_cb);
+ if (r < 0) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: smux_open failed "
+ "for smuxctl%d with rc %d\n",
+ __func__, devp->id, r);
+ atomic_dec(&devp->ref_count);
+ return r;
+ }
+
+ r = wait_event_interruptible_timeout(
+ devp->write_wait_queue,
+ (devp->state == SMUX_CONNECTED),
+ (5 * HZ));
+ if (r == 0)
+ r = -ETIMEDOUT;
+
+ if (r < 0) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: "
+ "SMUX open timed out: %d, LCID %d\n",
+ __func__, r, devp->id);
+ atomic_dec(&devp->ref_count);
+ msm_smux_close(devp->id);
+ return r;
+
+ } else if (devp->state != SMUX_CONNECTED) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: "
+ "Invalid open notification\n", __func__);
+ r = -ENODEV;
+ atomic_dec(&devp->ref_count);
+ msm_smux_close(devp->id);
+ return r;
+ }
+ }
+
+ file->private_data = devp;
+ return 0;
+}
+
+int smux_ctl_release(struct inode *inode, struct file *file)
+{
+ struct smux_ctl_dev *devp;
+ struct smux_ctl_list_elem *list_elem = NULL;
+
+ devp = file->private_data;
+ if (!devp)
+ return -EINVAL;
+
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s called on smuxctl%d device\n",
+ __func__, devp->id);
+
+ mutex_lock(&devp->dev_lock);
+ if (atomic_dec_and_test(&devp->ref_count)) {
+ mutex_lock(&devp->rx_lock);
+ while (!list_empty(&devp->rx_list)) {
+ list_elem = list_first_entry(
+ &devp->rx_list,
+ struct smux_ctl_list_elem,
+ list);
+ list_del(&list_elem->list);
+ kfree(list_elem->ctl_pkt.data);
+ kfree(list_elem);
+ }
+ devp->read_avail = 0;
+ mutex_unlock(&devp->rx_lock);
+ msm_smux_close(devp->id);
+ }
+ mutex_unlock(&devp->dev_lock);
+ file->private_data = NULL;
+
+ return 0;
+}
+
+static int smux_ctl_readable(int id)
+{
+ int r;
+ int dev_index;
+
+ if (id < 0 || id > smux_ctl_ch_id[SMUX_CTL_NUM_CHANNELS - 1])
+ return -ENODEV;
+
+ dev_index = get_ctl_dev_index(id);
+ if (dev_index < 0) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: Ch%d "
+ "is not exported to user-space\n",
+ __func__, id);
+ return -ENODEV;
+ }
+
+ mutex_lock(&smux_ctl_devp[dev_index]->dev_lock);
+
+ if (signal_pending(current))
+ r = -ERESTARTSYS;
+
+ if (smux_ctl_devp[dev_index]->state == SMUX_DISCONNECTED &&
+ smux_ctl_devp[dev_index]->is_channel_reset != 0)
+ r = -ENETRESET;
+
+ else if (smux_ctl_devp[dev_index]->state != SMUX_CONNECTED)
+ r = -ENODEV;
+
+ else
+ r = smux_ctl_devp[dev_index]->read_avail;
+
+
+ mutex_unlock(&smux_ctl_devp[dev_index]->dev_lock);
+
+ return r;
+
+}
+
+ssize_t smux_ctl_read(struct file *file,
+ char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ int r = 0, id, bytes_to_read, read_err;
+ struct smux_ctl_dev *devp;
+ struct smux_ctl_list_elem *list_elem = NULL;
+
+ devp = file->private_data;
+
+ if (!devp)
+ return -ENODEV;
+
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s: read from ch%d\n",
+ __func__, devp->id);
+
+ id = devp->id;
+ mutex_lock(&devp->rx_lock);
+ while (devp->read_avail <= 0) {
+ mutex_unlock(&devp->rx_lock);
+ r = wait_event_interruptible(devp->read_wait_queue,
+ 0 != (read_err = smux_ctl_readable(id)));
+
+ if (r < 0) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s:"
+ "wait_event_interruptible "
+ "ret %i\n", __func__, r);
+ return r;
+ }
+
+ if (read_err < 0) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s:"
+ " Read block failed for Ch%d, err %d\n",
+ __func__, devp->id, read_err);
+ return read_err;
+ }
+
+ mutex_lock(&devp->rx_lock);
+ }
+
+ if (list_empty(&devp->rx_list)) {
+ mutex_unlock(&devp->rx_lock);
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s: "
+ "Nothing in ch%d's rx_list\n", __func__,
+ devp->id);
+ return -EAGAIN;
+ }
+
+ list_elem = list_first_entry(&devp->rx_list,
+ struct smux_ctl_list_elem, list);
+ bytes_to_read = (uint32_t)(list_elem->ctl_pkt.data_size);
+ if (bytes_to_read > count) {
+ mutex_unlock(&devp->rx_lock);
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: "
+ "Packet size %d > buf size %d\n", __func__,
+ bytes_to_read, count);
+ return -ENOMEM;
+ }
+
+ if (copy_to_user(buf, list_elem->ctl_pkt.data, bytes_to_read)) {
+ mutex_unlock(&devp->rx_lock);
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: "
+ "copy_to_user failed for ch%d\n", __func__,
+ devp->id);
+ return -EFAULT;
+ }
+
+ devp->read_avail -= bytes_to_read;
+ list_del(&list_elem->list);
+ kfree(list_elem->ctl_pkt.data);
+ kfree(list_elem);
+ devp->stats.pkts_rx++;
+ devp->stats.bytes_rx += bytes_to_read;
+ mutex_unlock(&devp->rx_lock);
+
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s: "
+ "Returning %d bytes to ch%d\n", __func__,
+ bytes_to_read, devp->id);
+ return bytes_to_read;
+}
+
+static int smux_ctl_writeable(int id)
+{
+ int r;
+ int dev_index;
+
+ if (id < 0 || id > smux_ctl_ch_id[SMUX_CTL_NUM_CHANNELS - 1])
+ return -ENODEV;
+
+ dev_index = get_ctl_dev_index(id);
+ if (dev_index < 0) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: "
+ "Ch%d is not exported to user-space\n",
+ __func__, id);
+ return -ENODEV;
+ }
+
+ mutex_lock(&smux_ctl_devp[dev_index]->dev_lock);
+
+ if (signal_pending(current))
+ r = -ERESTARTSYS;
+ else if (smux_ctl_devp[dev_index]->state == SMUX_DISCONNECTED &&
+ smux_ctl_devp[dev_index]->is_channel_reset != 0)
+ r = -ENETRESET;
+ else if (smux_ctl_devp[dev_index]->state != SMUX_CONNECTED)
+ r = -ENODEV;
+ else if (smux_ctl_devp[dev_index]->is_high_wm ||
+ smux_ctl_devp[dev_index]->write_pending)
+ r = 0;
+ else
+ r = SMUX_CTL_MAX_BUF_SIZE;
+
+ mutex_unlock(&smux_ctl_devp[dev_index]->dev_lock);
+
+ return r;
+
+}
+
+ssize_t smux_ctl_write(struct file *file,
+ const char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ int r = 0, id, write_err;
+ char *temp_buf;
+ struct smux_ctl_dev *devp;
+
+ if (count <= 0)
+ return -EINVAL;
+
+ devp = file->private_data;
+ if (!devp)
+ return -ENODEV;
+
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s: writing %i bytes on ch%d\n",
+ __func__, count, devp->id);
+
+ id = devp->id;
+ r = wait_event_interruptible(devp->write_wait_queue,
+ 0 != (write_err = smux_ctl_writeable(id)));
+
+ if (r < 0) {
+ pr_err(SMUX_CTL_MODULE_NAME
+ ": %s: wait_event_interruptible "
+ "ret %i\n", __func__, r);
+ return r;
+ }
+
+ if (write_err < 0) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s:"
+ "Write block failed for Ch%d, err %d\n",
+ __func__, devp->id, write_err);
+ return write_err;
+ }
+
+ temp_buf = kmalloc(count, GFP_KERNEL);
+ if (!temp_buf) {
+ pr_err(SMUX_CTL_MODULE_NAME
+ ": %s: temp_buf alloc failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(temp_buf, buf, count)) {
+ pr_err(SMUX_CTL_MODULE_NAME
+ ": %s: copy_from_user failed\n", __func__);
+ kfree(temp_buf);
+ return -EFAULT;
+ }
+
+ mutex_lock(&devp->dev_lock);
+ devp->write_pending = 1;
+ mutex_unlock(&devp->dev_lock);
+
+ r = msm_smux_write(id, NULL, (void *)temp_buf, count);
+ if (r < 0) {
+ pr_err(SMUX_CTL_MODULE_NAME
+ ": %s: smux_write on Ch%dfailed, err %d\n",
+ __func__, id, r);
+ mutex_lock(&devp->dev_lock);
+ devp->write_pending = 0;
+ mutex_unlock(&devp->dev_lock);
+ return r;
+ }
+
+ r = wait_event_interruptible(devp->write_wait_queue,
+ 0 != (write_err = smux_ctl_writeable(id)));
+ if (r < 0) {
+ pr_err(SMUX_CTL_MODULE_NAME " :%s: wait_event_interruptible "
+ "ret %i\n", __func__, r);
+ mutex_lock(&devp->dev_lock);
+ devp->write_pending = 0;
+ mutex_unlock(&devp->dev_lock);
+ return r;
+ }
+
+ mutex_lock(&devp->dev_lock);
+ devp->write_pending = 0;
+ devp->stats.pkts_tx++;
+ devp->stats.bytes_tx += count;
+ mutex_unlock(&devp->dev_lock);
+ return count;
+}
+
+static long smux_ctl_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+ struct smux_ctl_dev *devp;
+
+ devp = file->private_data;
+ if (!devp)
+ return -ENODEV;
+
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s called on smuxctl%d device\n",
+ __func__, devp->id);
+
+ switch (cmd) {
+ case TIOCMGET:
+ ret = msm_smux_tiocm_get(devp->id);
+ break;
+ case TIOCMSET:
+ ret = msm_smux_tiocm_set(devp->id, arg, ~arg);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static const struct file_operations smux_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = smux_ctl_open,
+ .release = smux_ctl_release,
+ .read = smux_ctl_read,
+ .write = smux_ctl_write,
+ .unlocked_ioctl = smux_ctl_ioctl,
+};
+
+static int smux_ctl_probe(struct platform_device *pdev)
+{
+ int i;
+ int r;
+
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s Begins\n", __func__);
+
+ for (i = 0; i < SMUX_CTL_NUM_CHANNELS; ++i) {
+ smux_ctl_devp[i] = kzalloc(sizeof(struct smux_ctl_dev),
+ GFP_KERNEL);
+ if (IS_ERR(smux_ctl_devp[i])) {
+ pr_err(SMUX_CTL_MODULE_NAME
+ ": %s kmalloc() ENOMEM\n", __func__);
+ r = -ENOMEM;
+ goto error0;
+ }
+
+ smux_ctl_devp[i]->id = smux_ctl_ch_id[i];
+ atomic_set(&smux_ctl_devp[i]->ref_count, 0);
+ smux_ctl_devp[i]->is_high_wm = 0;
+ smux_ctl_devp[i]->write_pending = 0;
+ smux_ctl_devp[i]->is_channel_reset = 0;
+ smux_ctl_devp[i]->state = SMUX_DISCONNECTED;
+ smux_ctl_devp[i]->read_avail = 0;
+
+ smux_ctl_devp[i]->stats.bytes_tx = 0;
+ smux_ctl_devp[i]->stats.bytes_rx = 0;
+ smux_ctl_devp[i]->stats.pkts_tx = 0;
+ smux_ctl_devp[i]->stats.pkts_rx = 0;
+ smux_ctl_devp[i]->stats.cnt_ssr = 0;
+ smux_ctl_devp[i]->stats.cnt_read_fail = 0;
+ smux_ctl_devp[i]->stats.cnt_write_fail = 0;
+ smux_ctl_devp[i]->stats.cnt_high_wm_hit = 0;
+
+ mutex_init(&smux_ctl_devp[i]->dev_lock);
+ init_waitqueue_head(&smux_ctl_devp[i]->read_wait_queue);
+ init_waitqueue_head(&smux_ctl_devp[i]->write_wait_queue);
+ mutex_init(&smux_ctl_devp[i]->rx_lock);
+ INIT_LIST_HEAD(&smux_ctl_devp[i]->rx_list);
+ }
+
+ r = alloc_chrdev_region(&smux_ctl_number, 0, SMUX_CTL_NUM_CHANNELS,
+ DEVICE_NAME);
+ if (IS_ERR_VALUE(r)) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: "
+ "alloc_chrdev_region() ret %i.\n",
+ __func__, r);
+ goto error0;
+ }
+
+ smux_ctl_classp = class_create(THIS_MODULE, DEVICE_NAME);
+ if (IS_ERR(smux_ctl_classp)) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: "
+ "class_create() ENOMEM\n", __func__);
+ r = -ENOMEM;
+ goto error1;
+ }
+
+ for (i = 0; i < SMUX_CTL_NUM_CHANNELS; ++i) {
+ cdev_init(&smux_ctl_devp[i]->cdev, &smux_ctl_fops);
+ smux_ctl_devp[i]->cdev.owner = THIS_MODULE;
+
+ r = cdev_add(&smux_ctl_devp[i]->cdev, (smux_ctl_number + i), 1);
+
+ if (IS_ERR_VALUE(r)) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: "
+ "cdev_add() ret %i\n", __func__, r);
+ kfree(smux_ctl_devp[i]);
+ goto error2;
+ }
+
+ smux_ctl_devp[i]->devicep =
+ device_create(smux_ctl_classp, NULL,
+ (smux_ctl_number + i), NULL,
+ DEVICE_NAME "%d", smux_ctl_ch_id[i]);
+
+ if (IS_ERR(smux_ctl_devp[i]->devicep)) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: "
+ "device_create() ENOMEM\n", __func__);
+ r = -ENOMEM;
+ cdev_del(&smux_ctl_devp[i]->cdev);
+ kfree(smux_ctl_devp[i]);
+ goto error2;
+ }
+ }
+
+ smux_ctl_inited = 1;
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s: "
+ "SMUX Control Port Driver Initialized.\n", __func__);
+ return 0;
+
+error2:
+ while (--i >= 0) {
+ cdev_del(&smux_ctl_devp[i]->cdev);
+ device_destroy(smux_ctl_classp,
+ MKDEV(MAJOR(smux_ctl_number), i));
+ }
+
+ class_destroy(smux_ctl_classp);
+ i = SMUX_CTL_NUM_CHANNELS;
+
+error1:
+ unregister_chrdev_region(MAJOR(smux_ctl_number),
+ SMUX_CTL_NUM_CHANNELS);
+
+error0:
+ while (--i >= 0)
+ kfree(smux_ctl_devp[i]);
+
+ return r;
+}
+
+static int smux_ctl_remove(struct platform_device *pdev)
+{
+ int i;
+
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s Begins\n", __func__);
+
+ for (i = 0; i < SMUX_CTL_NUM_CHANNELS; ++i) {
+ cdev_del(&smux_ctl_devp[i]->cdev);
+ kfree(smux_ctl_devp[i]);
+ device_destroy(smux_ctl_classp,
+ MKDEV(MAJOR(smux_ctl_number), i));
+ }
+ class_destroy(smux_ctl_classp);
+ unregister_chrdev_region(MAJOR(smux_ctl_number),
+ SMUX_CTL_NUM_CHANNELS);
+
+ return 0;
+}
+
+static struct platform_driver smux_ctl_driver = {
+ .probe = smux_ctl_probe,
+ .remove = smux_ctl_remove,
+ .driver = {
+ .name = "SMUX_CTL",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init smux_ctl_init(void)
+{
+ msm_smux_ctl_debug_mask = MSM_SMUX_CTL_DEBUG | MSM_SMUX_CTL_DUMP_BUFFER;
+
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s Begins\n", __func__);
+ return platform_driver_register(&smux_ctl_driver);
+}
+
+
+#if defined(CONFIG_DEBUG_FS)
+
+#define DEBUG_BUFMAX 4096
+static char debug_buffer[DEBUG_BUFMAX];
+
+static ssize_t debug_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int bsize = 0;
+ int i;
+ if (!smux_ctl_inited) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: SMUX_CTL not yet inited\n",
+ __func__);
+ return -EIO;
+ }
+
+ bsize += scnprintf(debug_buffer + bsize, DEBUG_BUFMAX - bsize,
+ "SMUX_CTL Channel States:\n");
+
+ for (i = 0; i < SMUX_CTL_NUM_CHANNELS; ++i) {
+ bsize += scnprintf(debug_buffer + bsize, DEBUG_BUFMAX - bsize,
+ "Ch%02d %s RefCnt=%01d State=%02d "
+ "SSR=%02d HighWM=%02d ReadAvail=%04d WritePending=%02d\n",
+ smux_ctl_devp[i]->id,
+ smux_ctl_devp[i]->name,
+ atomic_read(&smux_ctl_devp[i]->ref_count),
+ smux_ctl_devp[i]->state,
+ smux_ctl_devp[i]->is_channel_reset,
+ smux_ctl_devp[i]->is_high_wm,
+ smux_ctl_devp[i]->read_avail,
+ smux_ctl_devp[i]->write_pending);
+ }
+
+ bsize += scnprintf(debug_buffer + bsize, DEBUG_BUFMAX - bsize,
+ "\nSMUX_CTL Channel Statistics:\n");
+ for (i = 0; i < SMUX_CTL_NUM_CHANNELS; ++i) {
+ bsize += scnprintf(debug_buffer + bsize, DEBUG_BUFMAX - bsize,
+ "Ch%02d %s BytesTX=%08d "
+ "BytesRx=%08d PktsTx=%04d PktsRx=%04d"
+ "CntSSR=%02d CntHighWM=%02d "
+ "CntReadFail%02d CntWriteFailed=%02d\n",
+ smux_ctl_devp[i]->id,
+ smux_ctl_devp[i]->name,
+ smux_ctl_devp[i]->stats.bytes_tx,
+ smux_ctl_devp[i]->stats.bytes_rx,
+ smux_ctl_devp[i]->stats.pkts_tx,
+ smux_ctl_devp[i]->stats.pkts_rx,
+ smux_ctl_devp[i]->stats.cnt_ssr,
+ smux_ctl_devp[i]->stats.cnt_high_wm_hit,
+ smux_ctl_devp[i]->stats.cnt_read_fail,
+ smux_ctl_devp[i]->stats.cnt_write_fail);
+ }
+
+ return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static const struct file_operations debug_ops = {
+ .read = debug_read,
+ .open = debug_open,
+};
+
+static int __init smux_debugfs_init(void)
+{
+ struct dentry *dent;
+
+ dent = debugfs_create_dir("smux_ctl", 0);
+ if (!IS_ERR(dent))
+ debugfs_create_file("smux_ctl_state", 0444, dent,
+ NULL, &debug_ops);
+
+ return 0;
+}
+
+late_initcall(smux_debugfs_init);
+#endif
+
+module_init(smux_ctl_init);
+MODULE_DESCRIPTION("MSM SMUX Control Port");
+MODULE_LICENSE("GPL v2");
+
+
diff --git a/drivers/tty/smux_loopback.c b/drivers/tty/smux_loopback.c
new file mode 100644
index 0000000..52ce17f
--- /dev/null
+++ b/drivers/tty/smux_loopback.c
@@ -0,0 +1,289 @@
+/* drivers/tty/smux_loopback.c
+ *
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/workqueue.h>
+#include <linux/kfifo.h>
+#include <linux/slab.h>
+#include <linux/smux.h>
+#include "smux_private.h"
+
+#define SMUX_LOOP_FIFO_SIZE 128
+
+static void smux_loopback_rx_worker(struct work_struct *work);
+static struct workqueue_struct *smux_loopback_wq;
+static DECLARE_WORK(smux_loopback_work, smux_loopback_rx_worker);
+static struct kfifo smux_loop_pkt_fifo;
+static DEFINE_SPINLOCK(hw_fn_lock);
+
+/**
+ * Initialize loopback framework (called by n_smux.c).
+ */
+int smux_loopback_init(void)
+{
+ int ret = 0;
+
+ spin_lock_init(&hw_fn_lock);
+ smux_loopback_wq = create_singlethread_workqueue("smux_loopback_wq");
+ if (IS_ERR(smux_loopback_wq)) {
+ pr_err("%s: failed to create workqueue\n", __func__);
+ return -ENOMEM;
+ }
+
+ ret |= kfifo_alloc(&smux_loop_pkt_fifo,
+ SMUX_LOOP_FIFO_SIZE * sizeof(struct smux_pkt_t *),
+ GFP_KERNEL);
+
+ return ret;
+}
+
+/**
+ * Simulate a write to the TTY hardware by duplicating
+ * the TX packet and putting it into the RX queue.
+ *
+ * @pkt Packet to write
+ *
+ * @returns 0 on success
+ */
+int smux_tx_loopback(struct smux_pkt_t *pkt_ptr)
+{
+ struct smux_pkt_t *send_pkt;
+ unsigned long flags;
+ int i;
+ int ret;
+
+ /* duplicate packet */
+ send_pkt = smux_alloc_pkt();
+ send_pkt->hdr = pkt_ptr->hdr;
+ if (pkt_ptr->hdr.payload_len) {
+ ret = smux_alloc_pkt_payload(send_pkt);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ memcpy(send_pkt->payload, pkt_ptr->payload,
+ pkt_ptr->hdr.payload_len);
+ }
+
+ /* queue duplicate as pseudo-RX data */
+ spin_lock_irqsave(&hw_fn_lock, flags);
+ i = kfifo_avail(&smux_loop_pkt_fifo);
+ if (i < sizeof(struct smux_pkt_t *)) {
+ pr_err("%s: no space in fifo\n", __func__);
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ i = kfifo_in(&smux_loop_pkt_fifo,
+ &send_pkt,
+ sizeof(struct smux_pkt_t *));
+ if (i < 0) {
+ pr_err("%s: fifo error\n", __func__);
+ ret = -ENOMEM;
+ goto unlock;
+ }
+ queue_work(smux_loopback_wq, &smux_loopback_work);
+ ret = 0;
+
+unlock:
+ spin_unlock_irqrestore(&hw_fn_lock, flags);
+out:
+ return ret;
+}
+
+/**
+ * Receive loopback byte processor.
+ *
+ * @pkt Incoming packet
+ */
+static void smux_loopback_rx_byte(struct smux_pkt_t *pkt)
+{
+ static int simulated_retry_cnt;
+ const char ack = SMUX_WAKEUP_ACK;
+
+ switch (pkt->hdr.flags) {
+ case SMUX_WAKEUP_REQ:
+ /* reply with ACK after appropriate delays */
+ ++simulated_retry_cnt;
+ if (simulated_retry_cnt >= smux_simulate_wakeup_delay) {
+ pr_err("%s: completed %d of %d\n",
+ __func__, simulated_retry_cnt,
+ smux_simulate_wakeup_delay);
+ pr_err("%s: simulated wakeup\n", __func__);
+ simulated_retry_cnt = 0;
+ smux_rx_state_machine(&ack, 1, 0);
+ } else {
+ /* force retry */
+ pr_err("%s: dropping wakeup request %d of %d\n",
+ __func__, simulated_retry_cnt,
+ smux_simulate_wakeup_delay);
+ }
+ break;
+ case SMUX_WAKEUP_ACK:
+ /* this shouldn't happen since we don't send requests */
+ pr_err("%s: wakeup ACK unexpected\n", __func__);
+ break;
+
+ default:
+ /* invalid character */
+ pr_err("%s: invalid character 0x%x\n",
+ __func__, (unsigned)pkt->hdr.flags);
+ break;
+ }
+}
+
+/**
+ * Simulated remote hardware used for local loopback testing.
+ *
+ * @work Not used
+ */
+static void smux_loopback_rx_worker(struct work_struct *work)
+{
+ struct smux_pkt_t *pkt;
+ struct smux_pkt_t reply_pkt;
+ char *data;
+ int len;
+ int lcid;
+ int i;
+ unsigned long flags;
+
+ data = kzalloc(SMUX_MAX_PKT_SIZE, GFP_ATOMIC);
+
+ spin_lock_irqsave(&hw_fn_lock, flags);
+ while (kfifo_len(&smux_loop_pkt_fifo) >= sizeof(struct smux_pkt_t *)) {
+ i = kfifo_out(&smux_loop_pkt_fifo, &pkt,
+ sizeof(struct smux_pkt_t *));
+ spin_unlock_irqrestore(&hw_fn_lock, flags);
+
+ if (pkt->hdr.magic != SMUX_MAGIC) {
+ pr_err("%s: invalid magic %x\n", __func__,
+ pkt->hdr.magic);
+ return;
+ }
+
+ lcid = pkt->hdr.lcid;
+ if (smux_assert_lch_id(lcid)) {
+ pr_err("%s: invalid channel id %d\n", __func__, lcid);
+ return;
+ }
+
+ switch (pkt->hdr.cmd) {
+ case SMUX_CMD_OPEN_LCH:
+ if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
+ break;
+
+ /* Reply with Open ACK */
+ smux_init_pkt(&reply_pkt);
+ reply_pkt.hdr.lcid = lcid;
+ reply_pkt.hdr.cmd = SMUX_CMD_OPEN_LCH;
+ reply_pkt.hdr.flags = SMUX_CMD_OPEN_ACK
+ | SMUX_CMD_OPEN_POWER_COLLAPSE;
+ reply_pkt.hdr.payload_len = 0;
+ reply_pkt.hdr.pad_len = 0;
+ smux_serialize(&reply_pkt, data, &len);
+ smux_rx_state_machine(data, len, 0);
+
+ /* Send Remote Open */
+ smux_init_pkt(&reply_pkt);
+ reply_pkt.hdr.lcid = lcid;
+ reply_pkt.hdr.cmd = SMUX_CMD_OPEN_LCH;
+ reply_pkt.hdr.flags = SMUX_CMD_OPEN_POWER_COLLAPSE;
+ reply_pkt.hdr.payload_len = 0;
+ reply_pkt.hdr.pad_len = 0;
+ smux_serialize(&reply_pkt, data, &len);
+ smux_rx_state_machine(data, len, 0);
+ break;
+
+ case SMUX_CMD_CLOSE_LCH:
+ if (pkt->hdr.flags == SMUX_CMD_CLOSE_ACK)
+ break;
+
+ /* Reply with Close ACK */
+ smux_init_pkt(&reply_pkt);
+ reply_pkt.hdr.lcid = lcid;
+ reply_pkt.hdr.cmd = SMUX_CMD_CLOSE_LCH;
+ reply_pkt.hdr.flags = SMUX_CMD_CLOSE_ACK;
+ reply_pkt.hdr.payload_len = 0;
+ reply_pkt.hdr.pad_len = 0;
+ smux_serialize(&reply_pkt, data, &len);
+ smux_rx_state_machine(data, len, 0);
+
+ /* Send Remote Close */
+ smux_init_pkt(&reply_pkt);
+ reply_pkt.hdr.lcid = lcid;
+ reply_pkt.hdr.cmd = SMUX_CMD_CLOSE_LCH;
+ reply_pkt.hdr.flags = 0;
+ reply_pkt.hdr.payload_len = 0;
+ reply_pkt.hdr.pad_len = 0;
+ smux_serialize(&reply_pkt, data, &len);
+ smux_rx_state_machine(data, len, 0);
+ break;
+
+ case SMUX_CMD_DATA:
+ /* Echo back received data */
+ smux_init_pkt(&reply_pkt);
+ reply_pkt.hdr.lcid = lcid;
+ reply_pkt.hdr.cmd = SMUX_CMD_DATA;
+ reply_pkt.hdr.flags = 0;
+ reply_pkt.hdr.payload_len = pkt->hdr.payload_len;
+ reply_pkt.payload = pkt->payload;
+ reply_pkt.hdr.pad_len = pkt->hdr.pad_len;
+ smux_serialize(&reply_pkt, data, &len);
+ smux_rx_state_machine(data, len, 0);
+ break;
+
+ case SMUX_CMD_STATUS:
+ /* Echo back received status */
+ smux_init_pkt(&reply_pkt);
+ reply_pkt.hdr.lcid = lcid;
+ reply_pkt.hdr.cmd = SMUX_CMD_STATUS;
+ reply_pkt.hdr.flags = pkt->hdr.flags;
+ reply_pkt.hdr.payload_len = 0;
+ reply_pkt.payload = NULL;
+ reply_pkt.hdr.pad_len = pkt->hdr.pad_len;
+ smux_serialize(&reply_pkt, data, &len);
+ smux_rx_state_machine(data, len, 0);
+ break;
+
+ case SMUX_CMD_PWR_CTL:
+ /* reply with ack */
+ smux_init_pkt(&reply_pkt);
+ reply_pkt.hdr.lcid = lcid;
+ reply_pkt.hdr.cmd = SMUX_CMD_PWR_CTL;
+ reply_pkt.hdr.flags = SMUX_CMD_PWR_CTL_SLEEP_REQ
+ | SMUX_CMD_PWR_CTL_ACK;
+ reply_pkt.hdr.payload_len = 0;
+ reply_pkt.payload = NULL;
+ reply_pkt.hdr.pad_len = pkt->hdr.pad_len;
+ smux_serialize(&reply_pkt, data, &len);
+ smux_rx_state_machine(data, len, 0);
+ break;
+
+ case SMUX_CMD_BYTE:
+ smux_loopback_rx_byte(pkt);
+ break;
+
+ default:
+ pr_err("%s: unknown command %d\n",
+ __func__, pkt->hdr.cmd);
+ break;
+ };
+
+ smux_free_pkt(pkt);
+ spin_lock_irqsave(&hw_fn_lock, flags);
+ }
+ spin_unlock_irqrestore(&hw_fn_lock, flags);
+ kfree(data);
+}
diff --git a/drivers/tty/smux_loopback.h b/drivers/tty/smux_loopback.h
new file mode 100644
index 0000000..85c6c23
--- /dev/null
+++ b/drivers/tty/smux_loopback.h
@@ -0,0 +1,39 @@
+/* drivers/tty/smux_loopback.h
+ *
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef SMUX_LOOPBACK_H
+#define SMUX_LOOPBACK_H
+
+#include "smux_private.h"
+
+#ifdef CONFIG_N_SMUX_LOOPBACK
+
+int smux_loopback_init(void);
+int smux_tx_loopback(struct smux_pkt_t *pkt_ptr);
+
+#else
+static inline int smux_loopback_init(void)
+{
+ return 0;
+}
+
+static inline int smux_tx_loopback(struct smux_pkt_t *pkt_ptr)
+{
+ return -ENODEV;
+}
+
+
+#endif /* CONFIG_N_SMUX_LOOPBACK */
+#endif /* SMUX_LOOPBACK_H */
+
diff --git a/drivers/tty/smux_private.h b/drivers/tty/smux_private.h
new file mode 100644
index 0000000..5ce8fb8
--- /dev/null
+++ b/drivers/tty/smux_private.h
@@ -0,0 +1,115 @@
+/* drivers/tty/smux_private.h
+ *
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef SMUX_PRIVATE_H
+#define SMUX_PRIVATE_H
+
+#define SMUX_MAX_PKT_SIZE 8192
+
+/* SMUX Protocol Characters */
+#define SMUX_MAGIC 0x33FC
+#define SMUX_MAGIC_WORD1 0xFC
+#define SMUX_MAGIC_WORD2 0x33
+#define SMUX_WAKEUP_REQ 0xFD
+#define SMUX_WAKEUP_ACK 0xFE
+
+/* Unit testing characters */
+#define SMUX_UT_ECHO_REQ 0xF0
+#define SMUX_UT_ECHO_ACK_OK 0xF1
+#define SMUX_UT_ECHO_ACK_FAIL 0xF2
+
+struct tty_struct;
+
+/* Packet header. */
+struct smux_hdr_t {
+ uint16_t magic;
+ uint8_t flags;
+ uint8_t cmd;
+ uint8_t pad_len;
+ uint8_t lcid;
+ uint16_t payload_len;
+};
+
+/* Internal packet structure. */
+struct smux_pkt_t {
+ struct smux_hdr_t hdr;
+ int allocated;
+ unsigned char *payload;
+ int free_payload;
+ struct list_head list;
+ void *priv;
+};
+
+/* SMUX Packet Commands */
+enum {
+ SMUX_CMD_DATA = 0x0,
+ SMUX_CMD_OPEN_LCH = 0x1,
+ SMUX_CMD_CLOSE_LCH = 0x2,
+ SMUX_CMD_STATUS = 0x3,
+ SMUX_CMD_PWR_CTL = 0x4,
+
+ SMUX_CMD_BYTE, /* for internal usage */
+ SMUX_NUM_COMMANDS
+};
+
+/* Open command flags */
+enum {
+ SMUX_CMD_OPEN_ACK = 1 << 0,
+ SMUX_CMD_OPEN_POWER_COLLAPSE = 1 << 1,
+ SMUX_CMD_OPEN_REMOTE_LOOPBACK = 1 << 2,
+};
+
+/* Close command flags */
+enum {
+ SMUX_CMD_CLOSE_ACK = 1 << 0,
+};
+
+/* Power command flags */
+enum {
+ SMUX_CMD_PWR_CTL_ACK = 1 << 0,
+ SMUX_CMD_PWR_CTL_SLEEP_REQ = 1 << 1,
+};
+
+/* Local logical channel states */
+enum {
+ SMUX_LCH_LOCAL_CLOSED,
+ SMUX_LCH_LOCAL_OPENING,
+ SMUX_LCH_LOCAL_OPENED,
+ SMUX_LCH_LOCAL_CLOSING,
+};
+
+/* Remote logical channel states */
+enum {
+ SMUX_LCH_REMOTE_CLOSED,
+ SMUX_LCH_REMOTE_OPENED,
+};
+
+
+int smux_assert_lch_id(uint32_t lcid);
+void smux_init_pkt(struct smux_pkt_t *pkt);
+struct smux_pkt_t *smux_alloc_pkt(void);
+int smux_alloc_pkt_payload(struct smux_pkt_t *pkt);
+void smux_free_pkt(struct smux_pkt_t *pkt);
+int smux_serialize(struct smux_pkt_t *pkt, char *out,
+ unsigned int *out_len);
+
+void smux_rx_state_machine(const unsigned char *data, int len, int flag);
+void smuxld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count);
+
+/* testing parameters */
+extern int smux_byte_loopback;
+extern int smux_simulate_wakeup_delay;
+
+#endif /* SMUX_PRIVATE_H */
diff --git a/drivers/tty/smux_test.c b/drivers/tty/smux_test.c
new file mode 100644
index 0000000..242c66e
--- /dev/null
+++ b/drivers/tty/smux_test.c
@@ -0,0 +1,1222 @@
+/* drivers/tty/smux_test.c
+ *
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/termios.h>
+#include <linux/smux.h>
+#include "smux_private.h"
+
+#define DEBUG_BUFMAX 4096
+
+/**
+ * Unit test assertion for logging test cases.
+ *
+ * @a lval
+ * @b rval
+ * @cmp comparison operator
+ *
+ * Assertion fails if (@a cmp @b) is not true which then
+ * logs the function and line number where the error occurred
+ * along with the values of @a and @b.
+ *
+ * Assumes that the following local variables exist:
+ * @buf - buffer to write failure message to
+ * @i - number of bytes written to buffer
+ * @max - maximum size of the buffer
+ * @failed - set to true if test fails
+ */
+#define UT_ASSERT_INT(a, cmp, b) \
+ if (!((a)cmp(b))) { \
+ i += scnprintf(buf + i, max - i, \
+ "%s:%d Fail: " #a "(%d) " #cmp " " #b "(%d)\n", \
+ __func__, __LINE__, \
+ a, b); \
+ failed = 1; \
+ break; \
+ } \
+ do {} while (0)
+
+#define UT_ASSERT_PTR(a, cmp, b) \
+ if (!((a)cmp(b))) { \
+ i += scnprintf(buf + i, max - i, \
+ "%s:%d Fail: " #a "(%p) " #cmp " " #b "(%p)\n", \
+ __func__, __LINE__, \
+ a, b); \
+ failed = 1; \
+ break; \
+ } \
+ do {} while (0)
+
+#define UT_ASSERT_UINT(a, cmp, b) \
+ if (!((a)cmp(b))) { \
+ i += scnprintf(buf + i, max - i, \
+ "%s:%d Fail: " #a "(%u) " #cmp " " #b "(%u)\n", \
+ __func__, __LINE__, \
+ a, b); \
+ failed = 1; \
+ break; \
+ } \
+ do {} while (0)
+
+static unsigned char test_array[] = {1, 1, 2, 3, 5, 8, 13, 21, 34, 55,
+ 89, 144, 233};
+
+/* Used for mapping local to remote TIOCM signals */
+struct tiocm_test_vector {
+ uint32_t input;
+ uint32_t set_old;
+ uint32_t set_new;
+ uint32_t clr_old;
+};
+
+/**
+ * Allocates a new buffer for SMUX for every call.
+ */
+int get_rx_buffer(void *priv, void **pkt_priv, void **buffer, int size)
+{
+ void *rx_buf;
+
+ rx_buf = kmalloc(size, GFP_ATOMIC);
+ *pkt_priv = (void *)0x1234;
+ *buffer = rx_buf;
+
+ return 0;
+}
+
+/* Test vector for packet tests. */
+struct test_vector {
+ const char *data;
+ const unsigned len;
+};
+
+/* Mock object metadata for SMUX_READ_DONE event */
+struct mock_read_event {
+ struct list_head list;
+ struct smux_meta_read meta;
+};
+
+/* Mock object metadata for SMUX_WRITE_DONE event */
+struct mock_write_event {
+ struct list_head list;
+ struct smux_meta_write meta;
+};
+
+/* Mock object for all SMUX callback events */
+struct smux_mock_callback {
+ int cb_count;
+ struct completion cb_completion;
+ spinlock_t lock;
+
+ /* status changes */
+ int event_connected;
+ int event_disconnected;
+ int event_disconnected_ssr;
+ int event_low_wm;
+ int event_high_wm;
+
+ /* TIOCM changes */
+ int event_tiocm;
+ struct smux_meta_tiocm tiocm_meta;
+
+ /* read event data */
+ int event_read_done;
+ int event_read_failed;
+ struct list_head read_events;
+
+ /* write event data */
+ int event_write_done;
+ int event_write_failed;
+ struct list_head write_events;
+};
+
+/**
+ * Initialize mock callback data. Only call once.
+ *
+ * @cb Mock callback data
+ */
+void mock_cb_data_init(struct smux_mock_callback *cb)
+{
+ init_completion(&cb->cb_completion);
+ spin_lock_init(&cb->lock);
+ INIT_LIST_HEAD(&cb->read_events);
+ INIT_LIST_HEAD(&cb->write_events);
+}
+
+/**
+ * Reset mock callback data to default values.
+ *
+ * @cb Mock callback data
+ *
+ * All packets are freed and counters reset to zero.
+ */
+void mock_cb_data_reset(struct smux_mock_callback *cb)
+{
+ cb->cb_count = 0;
+ INIT_COMPLETION(cb->cb_completion);
+ cb->event_connected = 0;
+ cb->event_disconnected = 0;
+ cb->event_disconnected_ssr = 0;
+ cb->event_low_wm = 0;
+ cb->event_high_wm = 0;
+ cb->event_tiocm = 0;
+ cb->tiocm_meta.tiocm_old = 0;
+ cb->tiocm_meta.tiocm_new = 0;
+
+ cb->event_read_done = 0;
+ cb->event_read_failed = 0;
+ while (!list_empty(&cb->read_events)) {
+ struct mock_read_event *meta;
+ meta = list_first_entry(&cb->read_events,
+ struct mock_read_event,
+ list);
+ kfree(meta->meta.buffer);
+ list_del(&meta->list);
+ kfree(meta);
+ }
+
+ cb->event_write_done = 0;
+ cb->event_write_failed = 0;
+ while (!list_empty(&cb->write_events)) {
+ struct mock_write_event *meta;
+ meta = list_first_entry(&cb->write_events,
+ struct mock_write_event,
+ list);
+ list_del(&meta->list);
+ kfree(meta);
+ }
+}
+
+/**
+ * Dump the values of the mock callback data for debug purposes.
+ *
+ * @cb Mock callback data
+ * @buf Print buffer
+ * @max Maximum number of characters to print
+ *
+ * @returns Number of characters added to buffer
+ */
+static int mock_cb_data_print(const struct smux_mock_callback *cb,
+ char *buf, int max)
+{
+ int i = 0;
+
+ i += scnprintf(buf + i, max - i,
+ "\tcb_count=%d\n"
+ "\tcb_completion.done=%d\n"
+ "\tevent_connected=%d\n"
+ "\tevent_disconnected=%d\n"
+ "\tevent_disconnected_ssr=%d\n"
+ "\tevent_low_wm=%d\n"
+ "\tevent_high_wm=%d\n"
+ "\tevent_tiocm=%d\n"
+ "\tevent_read_done=%d\n"
+ "\tevent_read_failed=%d\n"
+ "\tread_events=%d\n"
+ "\tevent_write_done=%d\n"
+ "\tevent_write_failed=%d\n"
+ "\twrite_events=%d\n",
+ cb->cb_count,
+ cb->cb_completion.done,
+ cb->event_connected,
+ cb->event_disconnected,
+ cb->event_disconnected_ssr,
+ cb->event_low_wm,
+ cb->event_high_wm,
+ cb->event_tiocm,
+ cb->event_read_done,
+ cb->event_read_failed,
+ !list_empty(&cb->read_events),
+ cb->event_write_done,
+ cb->event_write_failed,
+ list_empty(&cb->write_events)
+ );
+
+ return i;
+}
+
+/**
+ * Mock object event callback. Used to logs events for analysis in the unit
+ * tests.
+ */
+void smux_mock_cb(void *priv, int event, const void *metadata)
+{
+ struct smux_mock_callback *cb_data_ptr;
+ struct mock_write_event *write_event_meta;
+ struct mock_read_event *read_event_meta;
+ unsigned long flags;
+
+ cb_data_ptr = (struct smux_mock_callback *)priv;
+ if (cb_data_ptr == NULL) {
+ pr_err("%s: invalid private data\n", __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&cb_data_ptr->lock, flags);
+ switch (event) {
+ case SMUX_CONNECTED:
+ ++cb_data_ptr->event_connected;
+ break;
+
+ case SMUX_DISCONNECTED:
+ ++cb_data_ptr->event_disconnected;
+ cb_data_ptr->event_disconnected_ssr =
+ ((struct smux_meta_disconnected *)metadata)->is_ssr;
+ break;
+
+ case SMUX_READ_DONE:
+ ++cb_data_ptr->event_read_done;
+ read_event_meta = kmalloc(sizeof(struct mock_read_event),
+ GFP_ATOMIC);
+ if (read_event_meta) {
+ read_event_meta->meta =
+ *(struct smux_meta_read *)metadata;
+ list_add_tail(&read_event_meta->list,
+ &cb_data_ptr->read_events);
+ }
+ break;
+
+ case SMUX_READ_FAIL:
+ ++cb_data_ptr->event_read_failed;
+ read_event_meta = kmalloc(sizeof(struct mock_read_event),
+ GFP_ATOMIC);
+ if (read_event_meta) {
+ read_event_meta->meta =
+ *(struct smux_meta_read *)metadata;
+ list_add_tail(&read_event_meta->list,
+ &cb_data_ptr->read_events);
+ }
+ break;
+
+ case SMUX_WRITE_DONE:
+ ++cb_data_ptr->event_write_done;
+ write_event_meta = kmalloc(sizeof(struct mock_write_event),
+ GFP_ATOMIC);
+ if (write_event_meta) {
+ write_event_meta->meta =
+ *(struct smux_meta_write *)metadata;
+ list_add_tail(&write_event_meta->list,
+ &cb_data_ptr->write_events);
+ }
+ break;
+
+ case SMUX_WRITE_FAIL:
+ ++cb_data_ptr->event_write_failed;
+ write_event_meta = kmalloc(sizeof(struct mock_write_event),
+ GFP_ATOMIC);
+ if (write_event_meta) {
+ write_event_meta->meta =
+ *(struct smux_meta_write *)metadata;
+ list_add_tail(&write_event_meta->list,
+ &cb_data_ptr->write_events);
+ }
+ break;
+
+ case SMUX_LOW_WM_HIT:
+ ++cb_data_ptr->event_low_wm;
+ break;
+
+ case SMUX_HIGH_WM_HIT:
+ ++cb_data_ptr->event_high_wm;
+ break;
+
+ case SMUX_TIOCM_UPDATE:
+ ++cb_data_ptr->event_tiocm;
+ cb_data_ptr->tiocm_meta = *(struct smux_meta_tiocm *)metadata;
+ break;
+
+ default:
+ pr_err("%s: unknown event %d\n", __func__, event);
+ };
+
+ ++cb_data_ptr->cb_count;
+ complete(&cb_data_ptr->cb_completion);
+ spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
+}
+
+/**
+ * Test Read/write usage.
+ *
+ * @buf Output buffer for failure/status messages
+ * @max Size of @buf
+ * @vectors Test vector data (must end with NULL item)
+ * @name Name of the test case for failure messages
+ *
+ * Perform a sanity test consisting of opening a port, writing test packet(s),
+ * reading the response(s), and closing the port.
+ *
+ * The port should already be configured to use either local or remote
+ * loopback.
+ */
+static int smux_ut_basic_core(char *buf, int max,
+ const struct test_vector *vectors,
+ const char *name)
+{
+ int i = 0;
+ int failed = 0;
+ static struct smux_mock_callback cb_data;
+ static int cb_initialized;
+ int ret;
+
+ if (!cb_initialized)
+ mock_cb_data_init(&cb_data);
+
+ mock_cb_data_reset(&cb_data);
+ while (!failed) {
+ struct mock_write_event *write_event;
+ struct mock_read_event *read_event;
+
+ /* open port */
+ ret = msm_smux_open(SMUX_TEST_LCID, &cb_data, smux_mock_cb,
+ get_rx_buffer);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ), >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_connected, ==, 1);
+ mock_cb_data_reset(&cb_data);
+
+ /* write, read, and verify the test vector data */
+ for (; vectors->data != NULL; ++vectors) {
+ const char *test_data = vectors->data;
+ const unsigned test_len = vectors->len;
+
+ i += scnprintf(buf + i, max - i,
+ "Writing vector %p len %d\n",
+ test_data, test_len);
+
+ /* write data */
+ msm_smux_write(SMUX_TEST_LCID, (void *)0xCAFEFACE,
+ test_data, test_len);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ), >, 0);
+
+ /* wait for write and echo'd read to complete */
+ INIT_COMPLETION(cb_data.cb_completion);
+ if (cb_data.cb_count < 2)
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ),
+ >, 0);
+
+ UT_ASSERT_INT(cb_data.cb_count, >=, 1);
+ UT_ASSERT_INT(cb_data.event_write_done, ==, 1);
+ UT_ASSERT_INT(list_empty(&cb_data.write_events), ==, 0);
+
+ write_event = list_first_entry(&cb_data.write_events,
+ struct mock_write_event, list);
+ UT_ASSERT_PTR(write_event->meta.pkt_priv, ==,
+ (void *)0xCAFEFACE);
+ UT_ASSERT_PTR(write_event->meta.buffer, ==,
+ (void *)test_data);
+ UT_ASSERT_INT(write_event->meta.len, ==, test_len);
+
+ /* verify read event */
+ UT_ASSERT_INT(cb_data.event_read_done, ==, 1);
+ UT_ASSERT_INT(list_empty(&cb_data.read_events), ==, 0);
+ read_event = list_first_entry(&cb_data.read_events,
+ struct mock_read_event, list);
+ UT_ASSERT_PTR(read_event->meta.pkt_priv, ==,
+ (void *)0x1234);
+ UT_ASSERT_PTR(read_event->meta.buffer, !=, NULL);
+
+ if (read_event->meta.len != test_len ||
+ memcmp(read_event->meta.buffer,
+ test_data, test_len)) {
+ /* data mismatch */
+ char linebuff[80];
+
+ hex_dump_to_buffer(test_data, test_len,
+ 16, 1, linebuff, sizeof(linebuff), 1);
+ i += scnprintf(buf + i, max - i,
+ "Expected:\n%s\n\n", linebuff);
+
+ hex_dump_to_buffer(read_event->meta.buffer,
+ read_event->meta.len,
+ 16, 1, linebuff, sizeof(linebuff), 1);
+ i += scnprintf(buf + i, max - i,
+ "Actual:\n%s\n", linebuff);
+ failed = 1;
+ break;
+ }
+ mock_cb_data_reset(&cb_data);
+ }
+
+ /* close port */
+ ret = msm_smux_close(SMUX_TEST_LCID);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ),
+ >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
+ UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 0);
+ break;
+ }
+
+ if (!failed) {
+ i += scnprintf(buf + i, max - i, "\tOK\n");
+ } else {
+ pr_err("%s: Failed\n", name);
+ i += scnprintf(buf + i, max - i, "\tFailed\n");
+ i += mock_cb_data_print(&cb_data, buf + i, max - i);
+ msm_smux_close(SMUX_TEST_LCID);
+ }
+
+ mock_cb_data_reset(&cb_data);
+ return i;
+}
+
+/**
+ * Verify Basic Local Loopback Support
+ *
+ * Perform a sanity test consisting of opening a port in local loopback
+ * mode and writing a packet and reading the echo'd packet back.
+ */
+static int smux_ut_basic(char *buf, int max)
+{
+ const struct test_vector test_data[] = {
+ {"hello\0world\n", sizeof("hello\0world\n")},
+ {0, 0},
+ };
+ int i = 0;
+ int failed = 0;
+ int ret;
+
+ i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
+ while (!failed) {
+ /* enable loopback mode */
+ ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
+ SMUX_CH_OPTION_LOCAL_LOOPBACK, 0);
+ UT_ASSERT_INT(ret, ==, 0);
+
+ i += smux_ut_basic_core(buf + i, max - i, test_data, __func__);
+ break;
+ }
+
+ if (failed) {
+ pr_err("%s: Failed\n", __func__);
+ i += scnprintf(buf + i, max - i, "\tFailed\n");
+ }
+ return i;
+}
+
+/**
+ * Verify Basic Remote Loopback Support
+ *
+ * Perform a sanity test consisting of opening a port in remote loopback
+ * mode and writing a packet and reading the echo'd packet back.
+ */
+static int smux_ut_remote_basic(char *buf, int max)
+{
+ const struct test_vector test_data[] = {
+ {"hello\0world\n", sizeof("hello\0world\n")},
+ {0, 0},
+ };
+ int i = 0;
+ int failed = 0;
+ int ret;
+
+ i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
+ while (!failed) {
+ /* enable remote mode */
+ ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
+ SMUX_CH_OPTION_REMOTE_LOOPBACK, 0);
+ UT_ASSERT_INT(ret, ==, 0);
+
+ i += smux_ut_basic_core(buf + i, max - i, test_data, __func__);
+ break;
+ }
+
+ if (failed) {
+ pr_err("%s: Failed\n", __func__);
+ i += scnprintf(buf + i, max - i, "\tFailed\n");
+ }
+ return i;
+}
+
+/**
+ * Fill test pattern into provided buffer including an optional
+ * redzone 16 bytes before and 16 bytes after the buffer.
+ *
+ * buf ---------
+ * redzone
+ * --------- <- returned pointer
+ * data
+ * --------- <- returned pointer + len
+ * redzone
+ * ---------
+ *
+ * @buf Pointer to the buffer of size len or len+32 (redzone)
+ * @len Length of the *data* buffer (excluding 32-byte redzone)
+ * @redzone If true, adds redzone data
+ *
+ * @returns pointer to buffer (buf + 16 if redzone enabled)
+ */
+uint8_t *test_pattern_fill(char *buf, int len, int redzone)
+{
+ void *ret;
+ uint8_t ch;
+
+ ret = buf;
+ if (redzone) {
+ memset((char *)buf, 0xAB, 16);
+ memset((char *)buf + len, 0xBA, 16);
+ ret += 16;
+ }
+
+ /* fill with test pattern */
+ for (ch = 0; len > 0; --len, ++ch)
+ *buf++ = (char)ch;
+
+ return ret;
+}
+
+/**
+ * Verify test pattern generated by test_pattern_fill.
+ *
+ * @buf_ptr Pointer to buffer pointer
+ * @len Length of the *data* buffer (excluding 32-byte redzone)
+ * @redzone If true, verifies redzone and adjusts *buf_ptr
+ * @errmsg Buffer for error message
+ * @errmsg_max Size of error message buffer
+ *
+ * @returns 0 for success; length of error message otherwise
+ */
+unsigned test_pattern_verify(char **buf_ptr, int len, int redzone,
+ char *errmsg, int errmsg_max)
+{
+ int n;
+ int i = 0;
+ char linebuff[80];
+
+ if (redzone) {
+ *buf_ptr -= 16;
+
+ /* verify prefix redzone */
+ for (n = 0; n < 16; ++n) {
+ if (*buf_ptr[n] != 0xAB) {
+ hex_dump_to_buffer(*buf_ptr, 16,
+ 16, 1, linebuff, sizeof(linebuff), 1);
+ i += scnprintf(errmsg + i, errmsg_max - i,
+ "Redzone violation: %s\n", linebuff);
+ break;
+ }
+ }
+
+ /* verify postfix redzone */
+ for (n = 0; n < 16; ++n) {
+ if (*buf_ptr[len + n] != 0xBA) {
+ hex_dump_to_buffer(&(*buf_ptr)[len], 16,
+ 16, 1, linebuff, sizeof(linebuff), 1);
+ i += scnprintf(errmsg + i, errmsg_max - i,
+ "Redzone violation: %s\n", linebuff);
+ break;
+ }
+ }
+ }
+ return i;
+}
+
+/**
+ * Write a multiple packets in ascending size and verify packet is received
+ * correctly.
+ *
+ * @buf Buffer for status message
+ * @max Size of buffer
+ * @name Name of the test for error reporting
+ *
+ * @returns Number of bytes written to @buf
+ *
+ * Requires that the port already be opened and loopback mode is
+ * configured correctly (if required).
+ */
+static int smux_ut_loopback_big_pkt(char *buf, int max, const char *name)
+{
+ struct test_vector test_data[] = {
+ {0, 64},
+ {0, 128},
+ {0, 256},
+ {0, 512},
+ {0, 1024},
+ {0, 2048},
+ {0, 4096},
+ {0, 0},
+ };
+ int i = 0;
+ int failed = 0;
+ struct test_vector *tv;
+
+ /* generate test data */
+ for (tv = test_data; tv->len > 0; ++tv) {
+ tv->data = kmalloc(tv->len + 32, GFP_KERNEL);
+ pr_err("%s: allocating %p len %d\n",
+ __func__, tv->data, tv->len);
+ if (!tv->data) {
+ i += scnprintf(buf + i, max - i,
+ "%s: Unable to allocate %d bytes\n",
+ __func__, tv->len);
+ failed = 1;
+ goto out;
+ }
+ test_pattern_fill((uint8_t *)tv->data, tv->len, 1);
+ }
+
+ /* run test */
+ i += scnprintf(buf + i, max - i, "Running %s\n", name);
+ while (!failed) {
+ i += smux_ut_basic_core(buf + i, max - i, test_data, name);
+ break;
+ }
+
+out:
+ if (failed) {
+ pr_err("%s: Failed\n", name);
+ i += scnprintf(buf + i, max - i, "\tFailed\n");
+ }
+
+ for (tv = test_data; tv->len > 0; ++tv) {
+ if (!tv->data) {
+ i += test_pattern_verify((char **)&tv->data,
+ tv->len, 1, buf + i, max - i);
+ pr_err("%s: freeing %p len %d\n", __func__,
+ tv->data, tv->len);
+ kfree(tv->data);
+ }
+ }
+
+ return i;
+}
+
+/**
+ * Verify Large-packet Local Loopback Support.
+ *
+ * @buf Buffer for status message
+ * @max Size of buffer
+ *
+ * @returns Number of bytes written to @buf
+ *
+ * Open port in local loopback mode and write a multiple packets in ascending
+ * size and verify packet is received correctly.
+ */
+static int smux_ut_local_big_pkt(char *buf, int max)
+{
+ int i = 0;
+ int ret;
+
+ ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
+ SMUX_CH_OPTION_LOCAL_LOOPBACK, 0);
+
+ if (ret == 0) {
+ smux_byte_loopback = SMUX_TEST_LCID;
+ i += smux_ut_loopback_big_pkt(buf, max, __func__);
+ smux_byte_loopback = 0;
+ } else {
+ i += scnprintf(buf + i, max - i,
+ "%s: Unable to set loopback mode\n",
+ __func__);
+ }
+
+ return i;
+}
+
+/**
+ * Verify Large-packet Remote Loopback Support.
+ *
+ * @buf Buffer for status message
+ * @max Size of buffer
+ *
+ * @returns Number of bytes written to @buf
+ *
+ * Open port in remote loopback mode and write a multiple packets in ascending
+ * size and verify packet is received correctly.
+ */
+static int smux_ut_remote_big_pkt(char *buf, int max)
+{
+ int i = 0;
+ int ret;
+
+ ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
+ SMUX_CH_OPTION_REMOTE_LOOPBACK, 0);
+ if (ret == 0) {
+ i += smux_ut_loopback_big_pkt(buf, max, __func__);
+ } else {
+ i += scnprintf(buf + i, max - i,
+ "%s: Unable to set loopback mode\n",
+ __func__);
+ }
+
+ return i;
+}
+
+/**
+ * Verify set and get operations for each TIOCM bit.
+ *
+ * @buf Buffer for status message
+ * @max Size of buffer
+ * @name Name of the test for error reporting
+ *
+ * @returns Number of bytes written to @buf
+ */
+static int smux_ut_tiocm(char *buf, int max, const char *name)
+{
+ static struct smux_mock_callback cb_data;
+ static int cb_initialized;
+ static const struct tiocm_test_vector tiocm_vectors[] = {
+ /* bit to set, set old, set new, clear old */
+ {TIOCM_DTR, TIOCM_DTR, TIOCM_DTR | TIOCM_DSR, TIOCM_DSR},
+ {TIOCM_RTS, TIOCM_RTS, TIOCM_RTS | TIOCM_CTS, TIOCM_CTS},
+ {TIOCM_RI, 0x0, TIOCM_RI, TIOCM_RI},
+ {TIOCM_CD, 0x0, TIOCM_CD, TIOCM_CD},
+ };
+ int i = 0;
+ int failed = 0;
+ int n;
+ int ret;
+
+ i += scnprintf(buf + i, max - i, "Running %s\n", name);
+
+ if (!cb_initialized)
+ mock_cb_data_init(&cb_data);
+
+ mock_cb_data_reset(&cb_data);
+ while (!failed) {
+ /* open port */
+ ret = msm_smux_open(SMUX_TEST_LCID, &cb_data, smux_mock_cb,
+ get_rx_buffer);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ), >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_connected, ==, 1);
+ mock_cb_data_reset(&cb_data);
+
+ /* set and clear each TIOCM bit */
+ for (n = 0; n < ARRAY_SIZE(tiocm_vectors) && !failed; ++n) {
+ /* set signal and verify */
+ ret = msm_smux_tiocm_set(SMUX_TEST_LCID,
+ tiocm_vectors[n].input, 0x0);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ), >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_tiocm, ==, 1);
+ UT_ASSERT_INT(cb_data.tiocm_meta.tiocm_old, ==,
+ tiocm_vectors[n].set_old);
+ UT_ASSERT_INT(cb_data.tiocm_meta.tiocm_new, ==,
+ tiocm_vectors[n].set_new);
+ mock_cb_data_reset(&cb_data);
+
+ /* clear signal and verify */
+ ret = msm_smux_tiocm_set(SMUX_TEST_LCID, 0x0,
+ tiocm_vectors[n].input);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ),
+ >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_tiocm, ==, 1);
+ UT_ASSERT_INT(cb_data.tiocm_meta.tiocm_old, ==,
+ tiocm_vectors[n].clr_old);
+ UT_ASSERT_INT(cb_data.tiocm_meta.tiocm_new, ==, 0x0);
+ mock_cb_data_reset(&cb_data);
+ }
+ if (failed)
+ break;
+
+ /* close port */
+ ret = msm_smux_close(SMUX_TEST_LCID);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ),
+ >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
+ UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 0);
+ break;
+ }
+
+ if (!failed) {
+ i += scnprintf(buf + i, max - i, "\tOK\n");
+ } else {
+ pr_err("%s: Failed\n", name);
+ i += scnprintf(buf + i, max - i, "\tFailed\n");
+ i += mock_cb_data_print(&cb_data, buf + i, max - i);
+ msm_smux_close(SMUX_TEST_LCID);
+ }
+
+ mock_cb_data_reset(&cb_data);
+ return i;
+}
+
+/**
+ * Verify TIOCM Status Bits for local loopback.
+ *
+ * @buf Buffer for status message
+ * @max Size of buffer
+ *
+ * @returns Number of bytes written to @buf
+ */
+static int smux_ut_local_tiocm(char *buf, int max)
+{
+ int i = 0;
+ int ret;
+
+ ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
+ SMUX_CH_OPTION_LOCAL_LOOPBACK, 0);
+
+ if (ret == 0) {
+ smux_byte_loopback = SMUX_TEST_LCID;
+ i += smux_ut_tiocm(buf, max, __func__);
+ smux_byte_loopback = 0;
+ } else {
+ i += scnprintf(buf + i, max - i,
+ "%s: Unable to set loopback mode\n",
+ __func__);
+ }
+
+ return i;
+}
+
+/**
+ * Verify TIOCM Status Bits for remote loopback.
+ *
+ * @buf Buffer for status message
+ * @max Size of buffer
+ *
+ * @returns Number of bytes written to @buf
+ */
+static int smux_ut_remote_tiocm(char *buf, int max)
+{
+ int i = 0;
+ int ret;
+
+ ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
+ SMUX_CH_OPTION_REMOTE_LOOPBACK, 0);
+ if (ret == 0) {
+ i += smux_ut_tiocm(buf, max, __func__);
+ } else {
+ i += scnprintf(buf + i, max - i,
+ "%s: Unable to set loopback mode\n",
+ __func__);
+ }
+
+ return i;
+}
+
+/**
+ * Verify High/Low Watermark notifications.
+ *
+ * @buf Buffer for status message
+ * @max Size of buffer
+ *
+ * @returns Number of bytes written to @buf
+ */
+static int smux_ut_local_wm(char *buf, int max)
+{
+ static struct smux_mock_callback cb_data;
+ static int cb_initialized;
+ int i = 0;
+ int failed = 0;
+ int ret;
+
+ i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
+ pr_err("%s", buf);
+
+ if (!cb_initialized)
+ mock_cb_data_init(&cb_data);
+
+ mock_cb_data_reset(&cb_data);
+ smux_byte_loopback = SMUX_TEST_LCID;
+ while (!failed) {
+ /* open port for loopback with TX disabled */
+ ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
+ SMUX_CH_OPTION_LOCAL_LOOPBACK
+ | SMUX_CH_OPTION_REMOTE_TX_STOP,
+ 0);
+ UT_ASSERT_INT(ret, ==, 0);
+
+ ret = msm_smux_open(SMUX_TEST_LCID, &cb_data, smux_mock_cb,
+ get_rx_buffer);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ), >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_connected, ==, 1);
+ mock_cb_data_reset(&cb_data);
+
+ /* transmit 4 packets and verify high-watermark notification */
+ ret = 0;
+ ret |= msm_smux_write(SMUX_TEST_LCID, (void *)1,
+ test_array, sizeof(test_array));
+ ret |= msm_smux_write(SMUX_TEST_LCID, (void *)2,
+ test_array, sizeof(test_array));
+ ret |= msm_smux_write(SMUX_TEST_LCID, (void *)3,
+ test_array, sizeof(test_array));
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 0);
+ UT_ASSERT_INT(cb_data.event_high_wm, ==, 0);
+
+ ret = msm_smux_write(SMUX_TEST_LCID, (void *)4,
+ test_array, sizeof(test_array));
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ),
+ >, 0);
+ UT_ASSERT_INT(cb_data.event_high_wm, ==, 1);
+ UT_ASSERT_INT(cb_data.event_low_wm, ==, 0);
+ mock_cb_data_reset(&cb_data);
+
+ /* exceed watermark and verify failure return value */
+ ret = msm_smux_write(SMUX_TEST_LCID, (void *)5,
+ test_array, sizeof(test_array));
+ UT_ASSERT_INT(ret, ==, -EAGAIN);
+
+ /* re-enable TX and verify low-watermark notification */
+ ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
+ 0, SMUX_CH_OPTION_REMOTE_TX_STOP);
+ UT_ASSERT_INT(ret, ==, 0);
+ while (cb_data.cb_count < 9) {
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ),
+ >, 0);
+ INIT_COMPLETION(cb_data.cb_completion);
+ }
+ if (failed)
+ break;
+
+ UT_ASSERT_INT(cb_data.event_high_wm, ==, 0);
+ UT_ASSERT_INT(cb_data.event_low_wm, ==, 1);
+ UT_ASSERT_INT(cb_data.event_write_done, ==, 4);
+ mock_cb_data_reset(&cb_data);
+
+ /* close port */
+ ret = msm_smux_close(SMUX_TEST_LCID);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ),
+ >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
+ UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 0);
+ break;
+ }
+
+ if (!failed) {
+ i += scnprintf(buf + i, max - i, "\tOK\n");
+ } else {
+ pr_err("%s: Failed\n", __func__);
+ i += scnprintf(buf + i, max - i, "\tFailed\n");
+ i += mock_cb_data_print(&cb_data, buf + i, max - i);
+ msm_smux_close(SMUX_TEST_LCID);
+ }
+ smux_byte_loopback = 0;
+ mock_cb_data_reset(&cb_data);
+ return i;
+}
+
+/**
+ * Verify smuxld_receive_buf regular and error processing.
+ *
+ * @buf Buffer for status message
+ * @max Size of buffer
+ *
+ * @returns Number of bytes written to @buf
+ */
+static int smux_ut_local_smuxld_receive_buf(char *buf, int max)
+{
+ static struct smux_mock_callback cb_data;
+ static int cb_initialized;
+ struct mock_read_event *meta;
+ int i = 0;
+ int failed = 0;
+ int ret;
+ char data[] = {SMUX_UT_ECHO_REQ,
+ SMUX_UT_ECHO_REQ, SMUX_UT_ECHO_REQ,
+ };
+ char flags[] = {0x0, 0x1, 0x0,};
+
+
+ i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
+
+ if (!cb_initialized)
+ mock_cb_data_init(&cb_data);
+
+ mock_cb_data_reset(&cb_data);
+ smux_byte_loopback = SMUX_TEST_LCID;
+ while (!failed) {
+ /* open port for loopback */
+ ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
+ SMUX_CH_OPTION_LOCAL_LOOPBACK, 0);
+ UT_ASSERT_INT(ret, ==, 0);
+
+ ret = msm_smux_open(SMUX_TEST_LCID, &cb_data, smux_mock_cb,
+ get_rx_buffer);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ), >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_connected, ==, 1);
+ mock_cb_data_reset(&cb_data);
+
+ /*
+ * Verify RX error processing by sending 3 echo requests:
+ * one OK, one fail, and a final OK
+ *
+ * The parsing framework should process the requests
+ * and send us three BYTE command packets with
+ * ECHO ACK FAIL and ECHO ACK OK characters.
+ */
+ smuxld_receive_buf(0, data, flags, sizeof(data));
+
+ /* verify response characters */
+ do {
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ), >, 0);
+ INIT_COMPLETION(cb_data.cb_completion);
+ } while (cb_data.cb_count < 3);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 3);
+ UT_ASSERT_INT(cb_data.event_read_done, ==, 3);
+
+ meta = list_first_entry(&cb_data.read_events,
+ struct mock_read_event, list);
+ UT_ASSERT_INT((int)meta->meta.pkt_priv, ==,
+ SMUX_UT_ECHO_ACK_OK);
+ list_del(&meta->list);
+
+ meta = list_first_entry(&cb_data.read_events,
+ struct mock_read_event, list);
+ UT_ASSERT_INT((int)meta->meta.pkt_priv, ==,
+ SMUX_UT_ECHO_ACK_FAIL);
+ list_del(&meta->list);
+
+ meta = list_first_entry(&cb_data.read_events,
+ struct mock_read_event, list);
+ UT_ASSERT_INT((int)meta->meta.pkt_priv, ==,
+ SMUX_UT_ECHO_ACK_OK);
+ list_del(&meta->list);
+ mock_cb_data_reset(&cb_data);
+
+ /* close port */
+ ret = msm_smux_close(SMUX_TEST_LCID);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ),
+ >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
+ UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 0);
+ break;
+ }
+
+ if (!failed) {
+ i += scnprintf(buf + i, max - i, "\tOK\n");
+ } else {
+ pr_err("%s: Failed\n", __func__);
+ i += scnprintf(buf + i, max - i, "\tFailed\n");
+ i += mock_cb_data_print(&cb_data, buf + i, max - i);
+ msm_smux_close(SMUX_TEST_LCID);
+ }
+ smux_byte_loopback = 0;
+ mock_cb_data_reset(&cb_data);
+ return i;
+}
+
+static char debug_buffer[DEBUG_BUFMAX];
+
+static ssize_t debug_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int (*fill)(char *buf, int max) = file->private_data;
+ int bsize;
+
+ if (*ppos != 0)
+ return 0;
+
+ bsize = fill(debug_buffer, DEBUG_BUFMAX);
+ return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static const struct file_operations debug_ops = {
+ .read = debug_read,
+ .open = debug_open,
+};
+
+static void debug_create(const char *name, mode_t mode,
+ struct dentry *dent,
+ int (*fill)(char *buf, int max))
+{
+ debugfs_create_file(name, mode, dent, fill, &debug_ops);
+}
+
+static int __init smux_debugfs_init(void)
+{
+ struct dentry *dent;
+
+ dent = debugfs_create_dir("n_smux", 0);
+ if (IS_ERR(dent))
+ return PTR_ERR(dent);
+
+ /*
+ * Add Unit Test entries.
+ *
+ * The idea with unit tests is that you can run all of them
+ * from ADB shell by doing:
+ * adb shell
+ * cat ut*
+ *
+ * And if particular tests fail, you can then repeatedly run the failing
+ * tests as you debug and resolve the failing test.
+ */
+ debug_create("ut_local_basic", 0444, dent, smux_ut_basic);
+ debug_create("ut_remote_basic", 0444, dent, smux_ut_remote_basic);
+ debug_create("ut_local_big_pkt", 0444, dent, smux_ut_local_big_pkt);
+ debug_create("ut_remote_big_pkt", 0444, dent, smux_ut_remote_big_pkt);
+ debug_create("ut_local_tiocm", 0444, dent, smux_ut_local_tiocm);
+ debug_create("ut_remote_tiocm", 0444, dent, smux_ut_remote_tiocm);
+ debug_create("ut_local_wm", 0444, dent, smux_ut_local_wm);
+ debug_create("ut_local_smuxld_receive_buf", 0444, dent,
+ smux_ut_local_smuxld_receive_buf);
+
+ return 0;
+}
+
+late_initcall(smux_debugfs_init);
+
diff --git a/drivers/usb/gadget/ci13xxx_msm.c b/drivers/usb/gadget/ci13xxx_msm.c
index 863143b..7fd120f 100644
--- a/drivers/usb/gadget/ci13xxx_msm.c
+++ b/drivers/usb/gadget/ci13xxx_msm.c
@@ -16,6 +16,7 @@
#include <linux/pm_runtime.h>
#include <linux/usb/msm_hsusb_hw.h>
#include <linux/usb/ulpi.h>
+#include <mach/gpio.h>
#include "ci13xxx_udc.c"
@@ -24,7 +25,11 @@
struct ci13xxx_udc_context {
int irq;
void __iomem *regs;
+ int wake_gpio;
+ int wake_irq;
+ bool wake_irq_state;
};
+
static struct ci13xxx_udc_context _udc_ctxt;
static irqreturn_t msm_udc_irq(int irq, void *data)
@@ -32,22 +37,71 @@
return udc_irq();
}
+static void ci13xxx_msm_suspend(void)
+{
+ struct device *dev = _udc->gadget.dev.parent;
+ dev_dbg(dev, "ci13xxx_msm_suspend\n");
+
+ if (_udc_ctxt.wake_irq && !_udc_ctxt.wake_irq_state) {
+ enable_irq_wake(_udc_ctxt.wake_irq);
+ enable_irq(_udc_ctxt.wake_irq);
+ _udc_ctxt.wake_irq_state = true;
+ }
+}
+
+static void ci13xxx_msm_resume(void)
+{
+ struct device *dev = _udc->gadget.dev.parent;
+ dev_dbg(dev, "ci13xxx_msm_resume\n");
+
+ if (_udc_ctxt.wake_irq && _udc_ctxt.wake_irq_state) {
+ disable_irq_wake(_udc_ctxt.wake_irq);
+ disable_irq(_udc_ctxt.wake_irq);
+ _udc_ctxt.wake_irq_state = false;
+ }
+}
+
static void ci13xxx_msm_notify_event(struct ci13xxx *udc, unsigned event)
{
struct device *dev = udc->gadget.dev.parent;
switch (event) {
case CI13XXX_CONTROLLER_RESET_EVENT:
- dev_dbg(dev, "CI13XXX_CONTROLLER_RESET_EVENT received\n");
+ dev_info(dev, "CI13XXX_CONTROLLER_RESET_EVENT received\n");
writel(0, USB_AHBBURST);
writel_relaxed(0x08, USB_AHBMODE);
break;
+ case CI13XXX_CONTROLLER_DISCONNECT_EVENT:
+ dev_info(dev, "CI13XXX_CONTROLLER_DISCONNECT_EVENT received\n");
+ ci13xxx_msm_resume();
+ break;
+ case CI13XXX_CONTROLLER_SUSPEND_EVENT:
+ dev_info(dev, "CI13XXX_CONTROLLER_SUSPEND_EVENT received\n");
+ ci13xxx_msm_suspend();
+ break;
+ case CI13XXX_CONTROLLER_RESUME_EVENT:
+ dev_info(dev, "CI13XXX_CONTROLLER_RESUME_EVENT received\n");
+ ci13xxx_msm_resume();
+ break;
+
default:
dev_dbg(dev, "unknown ci13xxx_udc event\n");
break;
}
}
+static irqreturn_t ci13xxx_msm_resume_irq(int irq, void *data)
+{
+ struct ci13xxx *udc = _udc;
+
+ if (udc->transceiver && udc->vbus_active && udc->suspended)
+ otg_set_suspend(udc->transceiver, 0);
+ else if (!udc->suspended)
+ ci13xxx_msm_resume();
+
+ return IRQ_HANDLED;
+}
+
static struct ci13xxx_udc_driver ci13xxx_msm_udc_driver = {
.name = "ci13xxx_msm",
.flags = CI13XXX_REGS_SHARED |
@@ -60,6 +114,52 @@
.notify_event = ci13xxx_msm_notify_event,
};
+static int ci13xxx_msm_install_wake_gpio(struct platform_device *pdev,
+ struct resource *res)
+{
+ int wake_irq;
+ int ret;
+
+ dev_dbg(&pdev->dev, "ci13xxx_msm_install_wake_gpio\n");
+
+ _udc_ctxt.wake_gpio = res->start;
+ gpio_request(_udc_ctxt.wake_gpio, "USB_RESUME");
+ gpio_direction_input(_udc_ctxt.wake_gpio);
+ wake_irq = MSM_GPIO_TO_INT(_udc_ctxt.wake_gpio);
+ if (wake_irq < 0) {
+ dev_err(&pdev->dev, "could not register USB_RESUME GPIO.\n");
+ return -ENXIO;
+ }
+
+ dev_dbg(&pdev->dev, "_udc_ctxt.gpio_irq = %d and irq = %d\n",
+ _udc_ctxt.wake_gpio, wake_irq);
+ ret = request_irq(wake_irq, ci13xxx_msm_resume_irq,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "usb resume", NULL);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "could not register USB_RESUME IRQ.\n");
+ goto gpio_free;
+ }
+ disable_irq(wake_irq);
+ _udc_ctxt.wake_irq = wake_irq;
+
+ return 0;
+
+gpio_free:
+ gpio_free(_udc_ctxt.wake_gpio);
+ _udc_ctxt.wake_gpio = 0;
+ return ret;
+}
+
+static void ci13xxx_msm_uninstall_wake_gpio(struct platform_device *pdev)
+{
+ dev_dbg(&pdev->dev, "ci13xxx_msm_uninstall_wake_gpio\n");
+
+ if (_udc_ctxt.wake_gpio) {
+ gpio_free(_udc_ctxt.wake_gpio);
+ _udc_ctxt.wake_gpio = 0;
+ }
+}
+
static int ci13xxx_msm_probe(struct platform_device *pdev)
{
struct resource *res;
@@ -92,11 +192,20 @@
goto udc_remove;
}
+ res = platform_get_resource_byname(pdev, IORESOURCE_IO, "USB_RESUME");
+ if (res) {
+ ret = ci13xxx_msm_install_wake_gpio(pdev, res);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "gpio irq install failed\n");
+ goto udc_remove;
+ }
+ }
+
ret = request_irq(_udc_ctxt.irq, msm_udc_irq, IRQF_SHARED, pdev->name,
pdev);
if (ret < 0) {
dev_err(&pdev->dev, "request_irq failed\n");
- goto udc_remove;
+ goto gpio_uninstall;
}
pm_runtime_no_callbacks(&pdev->dev);
@@ -104,6 +213,8 @@
return 0;
+gpio_uninstall:
+ ci13xxx_msm_uninstall_wake_gpio(pdev);
udc_remove:
udc_remove();
iounmap:
@@ -116,6 +227,7 @@
{
pm_runtime_disable(&pdev->dev);
free_irq(_udc_ctxt.irq, pdev);
+ ci13xxx_msm_uninstall_wake_gpio(pdev);
udc_remove();
iounmap(_udc_ctxt.regs);
return 0;
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index b612a0b..b29ef82 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -2005,6 +2005,51 @@
}
/**
+ * isr_resume_handler: USB PCI interrupt handler
+ * @udc: UDC device
+ *
+ */
+static void isr_resume_handler(struct ci13xxx *udc)
+{
+ udc->gadget.speed = hw_port_is_high_speed() ?
+ USB_SPEED_HIGH : USB_SPEED_FULL;
+ if (udc->suspended) {
+ spin_unlock(udc->lock);
+ if (udc->udc_driver->notify_event)
+ udc->udc_driver->notify_event(udc,
+ CI13XXX_CONTROLLER_RESUME_EVENT);
+ if (udc->transceiver)
+ otg_set_suspend(udc->transceiver, 0);
+ udc->driver->resume(&udc->gadget);
+ spin_lock(udc->lock);
+ udc->suspended = 0;
+ }
+}
+
+/**
+ * isr_resume_handler: USB SLI interrupt handler
+ * @udc: UDC device
+ *
+ */
+static void isr_suspend_handler(struct ci13xxx *udc)
+{
+ if (udc->gadget.speed != USB_SPEED_UNKNOWN &&
+ udc->vbus_active) {
+ if (udc->suspended == 0) {
+ spin_unlock(udc->lock);
+ udc->driver->suspend(&udc->gadget);
+ if (udc->udc_driver->notify_event)
+ udc->udc_driver->notify_event(udc,
+ CI13XXX_CONTROLLER_SUSPEND_EVENT);
+ if (udc->transceiver)
+ otg_set_suspend(udc->transceiver, 1);
+ spin_lock(udc->lock);
+ udc->suspended = 1;
+ }
+ }
+}
+
+/**
* isr_get_status_complete: get_status request complete function
* @ep: endpoint
* @req: request handled
@@ -2865,6 +2910,9 @@
} else {
hw_device_state(0);
_gadget_stop_activity(&udc->gadget);
+ if (udc->udc_driver->notify_event)
+ udc->udc_driver->notify_event(udc,
+ CI13XXX_CONTROLLER_DISCONNECT_EVENT);
pm_runtime_put_sync(&_gadget->dev);
}
}
@@ -3174,14 +3222,7 @@
}
if (USBi_PCI & intr) {
isr_statistics.pci++;
- udc->gadget.speed = hw_port_is_high_speed() ?
- USB_SPEED_HIGH : USB_SPEED_FULL;
- if (udc->suspended) {
- spin_unlock(udc->lock);
- udc->driver->resume(&udc->gadget);
- spin_lock(udc->lock);
- udc->suspended = 0;
- }
+ isr_resume_handler(udc);
}
if (USBi_UEI & intr)
isr_statistics.uei++;
@@ -3190,15 +3231,7 @@
isr_tr_complete_handler(udc);
}
if (USBi_SLI & intr) {
- if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
- udc->suspended = 1;
- spin_unlock(udc->lock);
- udc->driver->suspend(&udc->gadget);
- if (udc->udc_driver->notify_event)
- udc->udc_driver->notify_event(udc,
- CI13XXX_CONTROLLER_SUSPEND_EVENT);
- spin_lock(udc->lock);
- }
+ isr_suspend_handler(udc);
isr_statistics.sli++;
}
retval = IRQ_HANDLED;
diff --git a/drivers/usb/gadget/ci13xxx_udc.h b/drivers/usb/gadget/ci13xxx_udc.h
index 8e2b093..8cb62da 100644
--- a/drivers/usb/gadget/ci13xxx_udc.h
+++ b/drivers/usb/gadget/ci13xxx_udc.h
@@ -124,6 +124,8 @@
#define CI13XXX_CONTROLLER_CONNECT_EVENT 1
#define CI13XXX_CONTROLLER_SUSPEND_EVENT 2
#define CI13XXX_CONTROLLER_REMOTE_WAKEUP_EVENT 3
+#define CI13XXX_CONTROLLER_RESUME_EVENT 4
+#define CI13XXX_CONTROLLER_DISCONNECT_EVENT 5
void (*notify_event) (struct ci13xxx *udc, unsigned event);
};
diff --git a/drivers/usb/otg/msm_otg.c b/drivers/usb/otg/msm_otg.c
index 0ff0a48..5b05c5b 100644
--- a/drivers/usb/otg/msm_otg.c
+++ b/drivers/usb/otg/msm_otg.c
@@ -673,8 +673,7 @@
{
struct msm_otg *motg = container_of(otg, struct msm_otg, otg);
- if (aca_enabled() || (test_bit(ID, &motg->inputs) &&
- !test_bit(ID_A, &motg->inputs)))
+ if (aca_enabled())
return 0;
if (suspend) {
@@ -688,6 +687,14 @@
clear_bit(A_BUS_REQ, &motg->inputs);
queue_work(system_nrt_wq, &motg->sm_work);
break;
+ case OTG_STATE_B_PERIPHERAL:
+ pr_debug("peripheral bus suspend\n");
+ if (!(motg->caps & ALLOW_LPM_ON_DEV_SUSPEND))
+ break;
+ set_bit(A_BUS_SUSPEND, &motg->inputs);
+ queue_work(system_nrt_wq, &motg->sm_work);
+ break;
+
default:
break;
}
@@ -701,6 +708,13 @@
/* ensure hardware is not in low power mode */
pm_runtime_resume(otg->dev);
break;
+ case OTG_STATE_B_PERIPHERAL:
+ pr_debug("peripheral bus resume\n");
+ if (!(motg->caps & ALLOW_LPM_ON_DEV_SUSPEND))
+ break;
+ clear_bit(A_BUS_SUSPEND, &motg->inputs);
+ queue_work(system_nrt_wq, &motg->sm_work);
+ break;
default:
break;
}
@@ -718,7 +732,7 @@
struct usb_bus *bus = otg->host;
struct msm_otg_platform_data *pdata = motg->pdata;
int cnt = 0;
- bool host_bus_suspend, dcp;
+ bool host_bus_suspend, device_bus_suspend, dcp;
u32 phy_ctrl_val = 0, cmd_val;
unsigned ret;
u32 portsc;
@@ -728,6 +742,9 @@
disable_irq(motg->irq);
host_bus_suspend = otg->host && !test_bit(ID, &motg->inputs);
+ device_bus_suspend = otg->gadget && test_bit(ID, &motg->inputs) &&
+ test_bit(A_BUS_SUSPEND, &motg->inputs) &&
+ motg->caps & ALLOW_LPM_ON_DEV_SUSPEND;
dcp = motg->chg_type == USB_DCP_CHARGER;
/*
* Chipidea 45-nm PHY suspend sequence:
@@ -791,8 +808,8 @@
* PMIC notifications are unavailable.
*/
cmd_val = readl_relaxed(USB_USBCMD);
- if (host_bus_suspend || (motg->pdata->otg_control == OTG_PHY_CONTROL &&
- dcp))
+ if (host_bus_suspend || device_bus_suspend ||
+ (motg->pdata->otg_control == OTG_PHY_CONTROL && dcp))
cmd_val |= ASYNC_INTR_CTRL | ULPI_STP_CTRL;
else
cmd_val |= ULPI_STP_CTRL;
@@ -802,7 +819,8 @@
* BC1.2 spec mandates PD to enable VDP_SRC when charging from DCP.
* PHY retention and collapse can not happen with VDP_SRC enabled.
*/
- if (motg->caps & ALLOW_PHY_RETENTION && !host_bus_suspend && !dcp) {
+ if (motg->caps & ALLOW_PHY_RETENTION && !host_bus_suspend &&
+ !device_bus_suspend && !dcp) {
phy_ctrl_val = readl_relaxed(USB_PHY_CTRL);
if (motg->pdata->otg_control == OTG_PHY_CONTROL)
/* Enable PHY HV interrupts to wake MPM/Link */
@@ -2133,6 +2151,13 @@
*/
otg->host->is_b_host = 1;
msm_otg_start_host(otg, 1);
+ } else if (test_bit(A_BUS_SUSPEND, &motg->inputs) &&
+ test_bit(B_SESS_VLD, &motg->inputs)) {
+ pr_debug("a_bus_suspend && b_sess_vld\n");
+ if (motg->caps & ALLOW_LPM_ON_DEV_SUSPEND) {
+ pm_runtime_put_noidle(otg->dev);
+ pm_runtime_suspend(otg->dev);
+ }
} else if (test_bit(ID_C, &motg->inputs)) {
msm_otg_notify_charger(motg, IDEV_ACA_CHG_MAX);
}
@@ -2569,6 +2594,8 @@
} else {
pr_debug("BSV clear\n");
clear_bit(B_SESS_VLD, &motg->inputs);
+ clear_bit(A_BUS_SUSPEND, &motg->inputs);
+
msm_chg_check_aca_intr(motg);
}
work = 1;
@@ -3389,6 +3416,9 @@
motg->caps = ALLOW_PHY_RETENTION;
}
+ if (motg->pdata->enable_lpm_on_dev_suspend)
+ motg->caps |= ALLOW_LPM_ON_DEV_SUSPEND;
+
wake_lock(&motg->wlock);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/video/msm/external_common.c b/drivers/video/msm/external_common.c
index 7f603dd..0212142 100644
--- a/drivers/video/msm/external_common.c
+++ b/drivers/video/msm/external_common.c
@@ -26,6 +26,8 @@
#include "external_common.h"
#include "mhl_api.h"
+#include "mdp.h"
+
struct external_common_state_type *external_common_state;
EXPORT_SYMBOL(external_common_state);
DEFINE_MUTEX(external_common_state_hpd_mutex);
@@ -77,6 +79,23 @@
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDF};
#endif /* DEBUG_EDID */
+#define DMA_E_BASE 0xB0000
+void mdp_vid_quant_set(void)
+{
+ if ((external_common_state->video_resolution == \
+ HDMI_VFRMT_720x480p60_4_3) || \
+ (external_common_state->video_resolution == \
+ HDMI_VFRMT_720x480p60_16_9)) {
+ MDP_OUTP(MDP_BASE + DMA_E_BASE + 0x70, 0x00EB0010);
+ MDP_OUTP(MDP_BASE + DMA_E_BASE + 0x74, 0x00EB0010);
+ MDP_OUTP(MDP_BASE + DMA_E_BASE + 0x78, 0x00EB0010);
+ } else {
+ MDP_OUTP(MDP_BASE + DMA_E_BASE + 0x70, 0x00FF0000);
+ MDP_OUTP(MDP_BASE + DMA_E_BASE + 0x74, 0x00FF0000);
+ MDP_OUTP(MDP_BASE + DMA_E_BASE + 0x78, 0x00FF0000);
+ }
+}
+
const char *video_format_2string(uint32 format)
{
switch (format) {
diff --git a/drivers/video/msm/hdmi_msm.c b/drivers/video/msm/hdmi_msm.c
index 7f6585c..a372016 100644
--- a/drivers/video/msm/hdmi_msm.c
+++ b/drivers/video/msm/hdmi_msm.c
@@ -3641,8 +3641,8 @@
0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10}, /*00*/
{0x18, 0x18, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28,
0x28, 0x28, 0x28, 0x28, 0x18, 0x28, 0x18}, /*01*/
- {0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
- 0x04, 0x04, 0x04, 0x04, 0x88, 0x04, 0x04}, /*02*/
+ {0x00, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x88, 0x00, 0x04}, /*02*/
{0x02, 0x06, 0x11, 0x15, 0x04, 0x13, 0x10, 0x05, 0x1F,
0x14, 0x20, 0x22, 0x21, 0x01, 0x03, 0x11}, /*03*/
{0x00, 0x01, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
diff --git a/drivers/video/msm/logo.c b/drivers/video/msm/logo.c
index 1b5d7c5..57d754e 100644
--- a/drivers/video/msm/logo.c
+++ b/drivers/video/msm/logo.c
@@ -37,7 +37,7 @@
}
/* 565RLE image format: [count(2 bytes), rle(2 bytes)] */
-int load_565rle_image(char *filename)
+int load_565rle_image(char *filename, bool bf_supported)
{
struct fb_info *info;
int fd, count, err = 0;
@@ -76,7 +76,7 @@
max = fb_width(info) * fb_height(info);
ptr = data;
- if (info->node == 1 || info->node == 2) {
+ if (bf_supported && (info->node == 1 || info->node == 2)) {
err = -EPERM;
pr_err("%s:%d no info->creen_base on fb%d!\n",
__func__, __LINE__, info->node);
diff --git a/drivers/video/msm/mdp.h b/drivers/video/msm/mdp.h
index 6224dba..40f62b9 100644
--- a/drivers/video/msm/mdp.h
+++ b/drivers/video/msm/mdp.h
@@ -836,4 +836,5 @@
unsigned long srcp0_addr, unsigned long srcp0_size,
unsigned long srcp1_addr, unsigned long srcp1_size);
+void mdp_vid_quant_set(void);
#endif /* MDP_H */
diff --git a/drivers/video/msm/mdp4.h b/drivers/video/msm/mdp4.h
index 860209f..de254d0 100644
--- a/drivers/video/msm/mdp4.h
+++ b/drivers/video/msm/mdp4.h
@@ -766,6 +766,7 @@
int mdp4_igc_lut_config(struct mdp_igc_lut_data *cfg);
void mdp4_iommu_unmap(struct mdp4_overlay_pipe *pipe);
void mdp4_iommu_attach(void);
+void mdp4_iommu_detach(void);
int mdp4_v4l2_overlay_set(struct fb_info *info, struct mdp_overlay *req,
struct mdp4_overlay_pipe **ppipe);
void mdp4_v4l2_overlay_clear(struct mdp4_overlay_pipe *pipe);
diff --git a/drivers/video/msm/mdp4_overlay.c b/drivers/video/msm/mdp4_overlay.c
index 2e69ea6..034d6b6 100644
--- a/drivers/video/msm/mdp4_overlay.c
+++ b/drivers/video/msm/mdp4_overlay.c
@@ -287,9 +287,7 @@
MDP_OUTP(MDP_BASE + 0xb3014, 0x1000080);
MDP_OUTP(MDP_BASE + 0xb4004, 0x67686970);
} else {
- MDP_OUTP(MDP_BASE + 0xb0070, 0xff0000);
- MDP_OUTP(MDP_BASE + 0xb0074, 0xff0000);
- MDP_OUTP(MDP_BASE + 0xb0078, 0xff0000);
+ mdp_vid_quant_set();
}
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
@@ -670,12 +668,14 @@
case MDP_BGR_565:
case MDP_XRGB_8888:
case MDP_RGB_888:
+ case MDP_YCBCR_H1V1:
+ case MDP_YCRCB_H1V1:
*luma_off = pipe->src_x * pipe->bpp;
break;
default:
- pr_err("Source format %u not supported for x offset adjustment\n",
- pipe->src_format);
+ pr_err("%s: fmt %u not supported for adjustment\n",
+ __func__, pipe->src_format);
break;
}
}
@@ -1552,15 +1552,14 @@
mixer, data, flush_bits);
outpdw(MDP_BASE + off, data); /* LAYERMIXER_IN_CFG */
- if (pull_mode)
+ if (pull_mode) {
outpdw(MDP_BASE + 0x18000, flush_bits);
+ /* wait for vsync on both pull mode interfaces */
+ msleep(20);
+ }
}
if (ctrl->mixer_cfg[MDP4_MIXER2] != cfg[MDP4_MIXER2]) {
- /* wait for vsync on both pull mode interfaces */
- if (pull_mode)
- msleep(20);
-
off = 0x100F0;
ctrl->mixer_cfg[MDP4_MIXER2] = cfg[MDP4_MIXER2];
data = cfg[MDP4_MIXER2];
@@ -1700,7 +1699,8 @@
/*
* If solid fill is enabled, flip and scale
* have to be disabled. otherwise, h/w
- * underruns.
+ * underruns. Also flush the pipe inorder
+ * to take solid fill into effect.
*/
op_mode = inpdw(rgb_base + 0x0058);
op_mode &= ~(MDP4_OP_FLIP_LR + MDP4_OP_SCALEX_EN);
@@ -1708,6 +1708,7 @@
outpdw(rgb_base + 0x0058, op_mode);
outpdw(rgb_base + 0x50, rgb_src_format);
outpdw(rgb_base + 0x1008, constant_color);
+ mdp4_overlay_reg_flush(bg_pipe, 0);
}
} else if (fg_alpha) {
blend_op = (MDP4_BLEND_BG_ALPHA_FG_PIXEL |
@@ -2983,33 +2984,34 @@
} msm_iommu_ctx_names[] = {
/* Display */
{
- .name = "mdp_vg1",
+ .name = "mdp_port0_cb0",
.domain = DISPLAY_DOMAIN,
},
/* Display */
{
- .name = "mdp_vg2",
+ .name = "mdp_port0_cb1",
.domain = DISPLAY_DOMAIN,
},
/* Display */
{
- .name = "mdp_rgb1",
+ .name = "mdp_port1_cb0",
.domain = DISPLAY_DOMAIN,
},
/* Display */
{
- .name = "mdp_rgb2",
+ .name = "mdp_port1_cb1",
.domain = DISPLAY_DOMAIN,
},
};
+static int iommu_enabled;
+
void mdp4_iommu_attach(void)
{
- static int done;
struct iommu_domain *domain;
int i;
- if (!done) {
+ if (!iommu_enabled) {
for (i = 0; i < ARRAY_SIZE(msm_iommu_ctx_names); i++) {
int domain_idx;
struct device *ctx = msm_iommu_get_ctx(
@@ -3032,7 +3034,35 @@
continue;
}
}
- done = 1;
+ pr_debug("Attached MDP IOMMU device\n");
+ iommu_enabled = 1;
+ }
+}
+
+void mdp4_iommu_detach(void)
+{
+ struct iommu_domain *domain;
+ int i;
+
+ if (iommu_enabled) {
+ for (i = 0; i < ARRAY_SIZE(msm_iommu_ctx_names); i++) {
+ int domain_idx;
+ struct device *ctx = msm_iommu_get_ctx(
+ msm_iommu_ctx_names[i].name);
+
+ if (!ctx)
+ continue;
+
+ domain_idx = msm_iommu_ctx_names[i].domain;
+
+ domain = msm_get_iommu_domain(domain_idx);
+ if (!domain)
+ continue;
+
+ iommu_detach_device(domain, ctx);
+ }
+ pr_debug("Detached MDP IOMMU device\n");
+ iommu_enabled = 0;
}
}
@@ -3121,10 +3151,12 @@
else
mdp4_overlay_rgb_setup(pipe);
+ if (ctrl->panel_mode & MDP4_PANEL_LCDC)
+ mdp4_overlay_reg_flush(pipe, 1);
+
mdp4_mixer_stage_up(pipe);
if (ctrl->panel_mode & MDP4_PANEL_LCDC) {
- mdp4_overlay_reg_flush(pipe, 1);
mdp4_overlay_lcdc_vsync_push(mfd, pipe);
} else {
#ifdef CONFIG_FB_MSM_MIPI_DSI
diff --git a/drivers/video/msm/mdp4_overlay_atv.c b/drivers/video/msm/mdp4_overlay_atv.c
index dd827aa..753ff23 100644
--- a/drivers/video/msm/mdp4_overlay_atv.c
+++ b/drivers/video/msm/mdp4_overlay_atv.c
@@ -113,11 +113,10 @@
mdp4_overlay_rgb_setup(pipe);
- mdp4_mixer_stage_up(pipe);
-
mdp4_overlayproc_cfg(pipe);
mdp4_overlay_reg_flush(pipe, 1);
+ mdp4_mixer_stage_up(pipe);
if (ret == 0)
mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
@@ -185,8 +184,8 @@
pipe->srcp0_addr = (uint32)(buf + buf_offset);
}
mdp4_overlay_rgb_setup(pipe);
- mdp4_mixer_stage_up(pipe);
mdp4_overlay_reg_flush(pipe, 0);
+ mdp4_mixer_stage_up(pipe);
printk(KERN_INFO "mdp4_atv_overlay: pipe=%x ndx=%d\n",
(int)pipe, pipe->pipe_ndx);
diff --git a/drivers/video/msm/mdp4_overlay_dsi_video.c b/drivers/video/msm/mdp4_overlay_dsi_video.c
index 8ab12590..8bed42d 100644
--- a/drivers/video/msm/mdp4_overlay_dsi_video.c
+++ b/drivers/video/msm/mdp4_overlay_dsi_video.c
@@ -197,8 +197,6 @@
mdp4_overlay_rgb_setup(pipe);
- mdp4_mixer_stage_up(pipe);
-
mdp4_overlayproc_cfg(pipe);
/*
@@ -275,6 +273,7 @@
MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x30, dsi_hsync_skew);
MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x38, ctrl_polarity);
mdp4_overlay_reg_flush(pipe, 1);
+ mdp4_mixer_stage_up(pipe);
mdp_histogram_ctrl_all(TRUE);
@@ -311,6 +310,7 @@
mdp4_iommu_unmap(dsi_pipe);
}
+ mdp4_iommu_detach();
return ret;
}
@@ -386,6 +386,8 @@
mdp4_overlay_dmap_cfg(mfd, 1);
+ mdp4_overlay_reg_flush(pipe, 1);
+
mdp4_mixer_stage_up(pipe);
mb();
@@ -705,8 +707,8 @@
}
mdp4_overlay_rgb_setup(pipe);
- mdp4_mixer_stage_up(pipe);
mdp4_overlay_reg_flush(pipe, 0);
+ mdp4_mixer_stage_up(pipe);
mdp4_overlay_dsi_video_start();
mdp4_overlay_dsi_video_vsync_push(mfd, pipe);
mdp4_iommu_unmap(pipe);
diff --git a/drivers/video/msm/mdp4_overlay_dtv.c b/drivers/video/msm/mdp4_overlay_dtv.c
index e3917e6..8692b09 100644
--- a/drivers/video/msm/mdp4_overlay_dtv.c
+++ b/drivers/video/msm/mdp4_overlay_dtv.c
@@ -343,8 +343,8 @@
mdp4_overlay_rgb_setup(pipe);
}
- mdp4_mixer_stage_up(pipe);
mdp4_overlay_reg_flush(pipe, 1);
+ mdp4_mixer_stage_up(pipe);
dtv_pipe = pipe; /* keep it */
}
@@ -693,8 +693,8 @@
pipe->srcp0_addr = (uint32) mfd->ibuf.buf;
mdp4_overlay_rgb_setup(pipe);
}
- mdp4_mixer_stage_up(pipe);
mdp4_overlay_reg_flush(pipe, 0);
+ mdp4_mixer_stage_up(pipe);
mdp4_overlay_dtv_start();
mdp4_overlay_dtv_ov_done_push(mfd, pipe);
mdp4_iommu_unmap(pipe);
diff --git a/drivers/video/msm/mdp4_overlay_lcdc.c b/drivers/video/msm/mdp4_overlay_lcdc.c
index 3f90380..72722ef 100644
--- a/drivers/video/msm/mdp4_overlay_lcdc.c
+++ b/drivers/video/msm/mdp4_overlay_lcdc.c
@@ -159,8 +159,6 @@
mdp4_overlay_rgb_setup(pipe);
- mdp4_mixer_stage_up(pipe);
-
mdp4_overlayproc_cfg(pipe);
/*
@@ -243,6 +241,7 @@
MDP_OUTP(MDP_BASE + LCDC_BASE + 0x24, active_v_end);
mdp4_overlay_reg_flush(pipe, 1);
+ mdp4_mixer_stage_up(pipe);
#ifdef CONFIG_MSM_BUS_SCALING
mdp_bus_scale_update_request(2);
@@ -291,6 +290,7 @@
mdp_bus_scale_update_request(0);
#endif
+ mdp4_iommu_detach();
return ret;
}
@@ -597,8 +597,8 @@
pipe->srcp0_addr = (uint32)(buf + buf_offset);
}
mdp4_overlay_rgb_setup(pipe);
- mdp4_mixer_stage_up(pipe);
mdp4_overlay_reg_flush(pipe, 0);
+ mdp4_mixer_stage_up(pipe);
mdp4_overlay_lcdc_start();
mdp4_overlay_lcdc_vsync_push(mfd, pipe);
mdp4_iommu_unmap(pipe);
diff --git a/drivers/video/msm/mdp4_overlay_writeback.c b/drivers/video/msm/mdp4_overlay_writeback.c
index 2fba83d..342f565 100644
--- a/drivers/video/msm/mdp4_overlay_writeback.c
+++ b/drivers/video/msm/mdp4_overlay_writeback.c
@@ -273,6 +273,7 @@
if (node) {
list_del(&(node->active_entry));
node->state = IN_BUSY_QUEUE;
+ mfd->writeback_active_cnt++;
}
mutex_unlock(&mfd->writeback_mutex);
@@ -295,6 +296,7 @@
mutex_lock(&mfd->writeback_mutex);
list_add_tail(&node->active_entry, &mfd->writeback_busy_queue);
mutex_unlock(&mfd->writeback_mutex);
+ mfd->writeback_active_cnt--;
mutex_unlock(&mfd->unregister_mutex);
wake_up(&mfd->wait_q);
}
@@ -323,6 +325,7 @@
if (node) {
list_del(&(node->active_entry));
node->state = IN_BUSY_QUEUE;
+ mfd->writeback_active_cnt++;
}
mutex_unlock(&mfd->writeback_mutex);
@@ -367,6 +370,7 @@
mutex_lock(&mfd->writeback_mutex);
list_add_tail(&node->active_entry, &mfd->writeback_busy_queue);
+ mfd->writeback_active_cnt--;
mutex_unlock(&mfd->writeback_mutex);
wake_up(&mfd->wait_q);
fail_no_blt_addr:
@@ -523,13 +527,26 @@
return rc;
}
+static bool is_writeback_inactive(struct msm_fb_data_type *mfd)
+{
+ bool active;
+ mutex_lock(&mfd->writeback_mutex);
+ active = !mfd->writeback_active_cnt;
+ mutex_unlock(&mfd->writeback_mutex);
+ return active;
+}
int mdp4_writeback_stop(struct fb_info *info)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
mutex_lock(&mfd->writeback_mutex);
mfd->writeback_state = WB_STOPING;
mutex_unlock(&mfd->writeback_mutex);
+ /* Wait for all pending writebacks to finish */
+ wait_event_interruptible(mfd->wait_q, is_writeback_inactive(mfd));
+
+ /* Wake up dequeue thread in case of no UI update*/
wake_up(&mfd->wait_q);
+
return 0;
}
int mdp4_writeback_init(struct fb_info *info)
@@ -549,8 +566,19 @@
struct list_head *ptr, *next;
struct msmfb_writeback_data_list *temp;
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+ int rc = 0;
+
mutex_lock(&mfd->unregister_mutex);
mutex_lock(&mfd->writeback_mutex);
+
+ if (mfd->writeback_state != WB_STOPING &&
+ mfd->writeback_state != WB_STOP) {
+ pr_err("%s called without stopping\n", __func__);
+ rc = -EPERM;
+ goto terminate_err;
+
+ }
+
if (!list_empty(&mfd->writeback_register_queue)) {
list_for_each_safe(ptr, next,
&mfd->writeback_register_queue) {
@@ -564,7 +592,10 @@
INIT_LIST_HEAD(&mfd->writeback_register_queue);
INIT_LIST_HEAD(&mfd->writeback_busy_queue);
INIT_LIST_HEAD(&mfd->writeback_free_queue);
+
+
+terminate_err:
mutex_unlock(&mfd->writeback_mutex);
mutex_unlock(&mfd->unregister_mutex);
- return 0;
+ return rc;
}
diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c
index ce8744b..ff08548 100644
--- a/drivers/video/msm/msm_fb.c
+++ b/drivers/video/msm/msm_fb.c
@@ -48,11 +48,6 @@
#include "mdp.h"
#include "mdp4.h"
-#ifdef CONFIG_FB_MSM_LOGO
-#define INIT_IMAGE_FILE "/initlogo.rle"
-extern int load_565rle_image(char *filename);
-#endif
-
#ifdef CONFIG_FB_MSM_TRIPLE_BUFFER
#define MSM_FB_NUM 3
#endif
@@ -60,6 +55,7 @@
static unsigned char *fbram;
static unsigned char *fbram_phys;
static int fbram_size;
+static boolean bf_supported;
static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST];
static int pdev_list_cnt;
@@ -375,6 +371,9 @@
#ifdef CONFIG_FB_MSM_OVERLAY
mfd->overlay_play_enable = 1;
#endif
+
+ bf_supported = mdp4_overlay_borderfill_supported();
+
rc = msm_fb_register(mfd);
if (rc)
return rc;
@@ -1183,7 +1182,7 @@
* calculate smem_len based on max size of two supplied modes.
* Only fb0 has mem. fb1 and fb2 don't have mem.
*/
- if (mfd->index == 0)
+ if (!bf_supported || mfd->index == 0)
fix->smem_len = MAX((msm_fb_line_length(mfd->index,
panel_info->xres,
bpp) *
@@ -1302,7 +1301,7 @@
fbram_phys += fbram_offset;
fbram_size -= fbram_offset;
- if (mfd->index == 0)
+ if (!bf_supported || mfd->index == 0)
if (fbram_size < fix->smem_len) {
pr_err("error: no more framebuffer memory!\n");
return -ENOMEM;
@@ -1320,7 +1319,7 @@
fbi->fix.smem_start, mfd->map_buffer->iova[0],
mfd->map_buffer->iova[1]);
}
- if (mfd->index == 0)
+ if (!bf_supported || mfd->index == 0)
memset(fbi->screen_base, 0x0, fix->smem_len);
mfd->op_enable = TRUE;
@@ -1366,7 +1365,9 @@
mfd->index, fbi->var.xres, fbi->var.yres, fbi->fix.smem_len);
#ifdef CONFIG_FB_MSM_LOGO
- if (!load_565rle_image(INIT_IMAGE_FILE)) ; /* Flip buffer */
+ /* Flip buffer */
+ if (!load_565rle_image(INIT_IMAGE_FILE, bf_supported))
+ ;
#endif
ret = 0;
@@ -1526,9 +1527,10 @@
}
if (!mfd->ref_cnt) {
- if ((info->node != 1) && (info->node != 2)) {
+ if (!bf_supported ||
+ (info->node != 1 && info->node != 2))
mdp_set_dma_pan_info(info, NULL, TRUE);
- } else
+ else
pr_debug("%s:%d no mdp_set_dma_pan_info %d\n",
__func__, __LINE__, info->node);
@@ -1581,7 +1583,8 @@
/*
* If framebuffer is 1 or 2, io pen display is not allowed.
*/
- if (info->node == 1 || info->node == 2) {
+ if (bf_supported &&
+ (info->node == 1 || info->node == 2)) {
pr_err("%s: no pan display for fb%d!",
__func__, info->node);
return -EPERM;
@@ -1761,7 +1764,8 @@
if ((var->xres_virtual <= 0) || (var->yres_virtual <= 0))
return -EINVAL;
- if ((info->node != 1) && (info->node != 2))
+ if (!bf_supported ||
+ (info->node != 1 && info->node != 2))
if (info->fix.smem_len <
(var->xres_virtual*
var->yres_virtual*
@@ -2619,7 +2623,8 @@
struct mdp_blit_req_list req_list_header;
int count, i, req_list_count;
- if (info->node == 1 || info->node == 2) {
+ if (bf_supported &&
+ (info->node == 1 || info->node == 2)) {
pr_err("%s: no pan display for fb%d.",
__func__, info->node);
return -EPERM;
diff --git a/drivers/video/msm/msm_fb.h b/drivers/video/msm/msm_fb.h
index b63c022..87753b2 100644
--- a/drivers/video/msm/msm_fb.h
+++ b/drivers/video/msm/msm_fb.h
@@ -184,6 +184,7 @@
u32 use_ov0_blt, ov0_blt_state;
u32 use_ov1_blt, ov1_blt_state;
u32 writeback_state;
+ bool writeback_active_cnt;
int cont_splash_done;
};
@@ -214,4 +215,9 @@
int msm_fb_check_frame_rate(struct msm_fb_data_type *mfd,
struct fb_info *info);
+#ifdef CONFIG_FB_MSM_LOGO
+#define INIT_IMAGE_FILE "/initlogo.rle"
+int load_565rle_image(char *filename, bool bf_supported);
+#endif
+
#endif /* MSM_FB_H */
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.h b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.h
index 86f282e..ac1ff24 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.h
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.h
@@ -469,6 +469,8 @@
u32 ddl_check_reconfig(struct ddl_client_context *ddl);
void ddl_handle_reconfig(u32 res_change, struct ddl_client_context *ddl);
void ddl_fill_dec_desc_buffer(struct ddl_client_context *ddl);
+void ddl_set_vidc_timeout(struct ddl_client_context *ddl);
+
#ifdef DDL_BUF_LOG
void ddl_list_buffers(struct ddl_client_context *ddl);
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_core.h b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_core.h
index 41604b0..50c3696 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_core.h
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_core.h
@@ -55,6 +55,13 @@
#define DDL_HW_TIMEOUT_IN_MS 1000
#define DDL_STREAMBUF_ALIGN_GUARD_BYTES 0x7FF
+#define DDL_VIDC_1080P_48MHZ (48000000)
+#define DDL_VIDC_1080P_133MHZ (133330000)
+#define DDL_VIDC_1080P_200MHZ (200000000)
+#define DDL_VIDC_1080P_48MHZ_TIMEOUT_VALUE (0xCB8)
+#define DDL_VIDC_1080P_133MHZ_TIMEOUT_VALUE (0x2355)
+#define DDL_VIDC_1080P_200MHZ_TIMEOUT_VALUE (0x3500)
+
#define DDL_CONTEXT_MEMORY (1024 * 15 * (VCD_MAX_NO_CLIENT + 1))
#define DDL_ENC_MIN_DPB_BUFFERS 2
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c
index 6d3a05a..b480b42 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c
@@ -744,11 +744,11 @@
memset(dec_bufs->desc.align_virtual_addr,
0, buf_size.sz_desc);
msm_ion_do_cache_op(
- ddl_context->video_ion_client,
- dec_bufs->desc.alloc_handle,
- dec_bufs->desc.alloc_handle,
- dec_bufs->desc.buffer_size,
- ION_IOC_CLEAN_INV_CACHES);
+ ddl_context->video_ion_client,
+ dec_bufs->desc.alloc_handle,
+ dec_bufs->desc.virtual_base_addr,
+ dec_bufs->desc.buffer_size,
+ ION_IOC_CLEAN_INV_CACHES);
}
}
}
@@ -1061,3 +1061,24 @@
ip_bitstream->desc_buf,
ip_bitstream->desc_size);
}
+
+void ddl_set_vidc_timeout(struct ddl_client_context *ddl)
+{
+ unsigned long core_clk_rate;
+ u32 vidc_time_out = 0;
+ if (ddl->codec_data.decoder.idr_only_decoding) {
+ vidc_time_out = 2 * DDL_VIDC_1080P_200MHZ_TIMEOUT_VALUE;
+ } else {
+ res_trk_get_clk_rate(&core_clk_rate);
+ if (core_clk_rate == DDL_VIDC_1080P_48MHZ)
+ vidc_time_out = DDL_VIDC_1080P_48MHZ_TIMEOUT_VALUE;
+ else if (core_clk_rate == DDL_VIDC_1080P_133MHZ)
+ vidc_time_out = DDL_VIDC_1080P_133MHZ_TIMEOUT_VALUE;
+ else
+ vidc_time_out = DDL_VIDC_1080P_200MHZ_TIMEOUT_VALUE;
+ }
+ DDL_MSG_HIGH("%s Video core time out value = 0x%x"
+ __func__, vidc_time_out);
+ vidc_sm_set_video_core_timeout_value(
+ &ddl->shared_mem[ddl->command_channel], vidc_time_out);
+}
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.c
index ac81916..878db62 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.c
@@ -243,6 +243,10 @@
#define VIDC_SM_NUM_STUFF_BYTES_CONSUME_ADDR 0X01ac
+#define VIDC_SM_TIMEOUT_VALUE_ADDR 0x0158
+#define VIDC_SM_TIMEOUT_VALUE_BMSK 0xffffffff
+#define VIDC_SM_TIMEOUT_VALUE_SHFT 0
+
#define VIDC_SM_ENC_EXT_CTRL_CLOSED_GOP_ENABLE_BMSK 0x40
#define VIDC_SM_ENC_EXT_CTRL_CLOSED_GOP_ENABLE_SHFT 6
@@ -862,3 +866,11 @@
*output_buffer_size = DDL_MEM_READ_32(shared_mem,
VIDC_SM_BATCH_OUTPUT_SIZE_ADDR);
}
+
+void vidc_sm_set_video_core_timeout_value(struct ddl_buf_addr *shared_mem,
+ u32 timeout)
+{
+ DDL_MEM_WRITE_32(shared_mem, VIDC_SM_TIMEOUT_VALUE_ADDR,
+ timeout);
+}
+
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.h b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.h
index 7d9896f..6cd75595 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.h
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.h
@@ -191,4 +191,6 @@
u32 output_buffer_size);
void vidc_sm_get_encoder_batch_output_size(struct ddl_buf_addr *shared_mem,
u32 *output_buffer_size);
+void vidc_sm_set_video_core_timeout_value(struct ddl_buf_addr *shared_mem,
+ u32 timeout);
#endif
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_vidc.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_vidc.c
index 97c8d0d..d0cf4e8 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_vidc.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_vidc.c
@@ -977,6 +977,7 @@
DDL_MSG_ERROR("STATE-CRITICAL");
return VCD_ERR_FAIL;
}
+ ddl_set_vidc_timeout(ddl);
ddl_vidc_decode_set_metadata_output(decoder);
if (decoder->dp_buf.no_of_dec_pic_buf <
decoder->client_output_buf_req.actual_count)
diff --git a/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c b/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c
index e71259a..c8365ce 100644
--- a/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c
+++ b/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c
@@ -382,7 +382,7 @@
return status;
}
-static u32 res_trk_get_clk_rate(unsigned long *phclk_rate)
+u32 res_trk_get_clk_rate(unsigned long *phclk_rate)
{
u32 status = true;
mutex_lock(&resource_context.lock);
diff --git a/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker_api.h b/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker_api.h
index 99b123c..2ae2512 100644
--- a/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker_api.h
+++ b/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker_api.h
@@ -49,4 +49,5 @@
int res_trk_disable_footswitch(void);
void res_trk_release_fw_addr(void);
u32 res_trk_estimate_perf_level(u32 pn_perf_lvl);
+u32 res_trk_get_clk_rate(unsigned long *phclk_rate);
#endif
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 6395692..47b1fe3 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -151,6 +151,7 @@
__REQ_IO_STAT, /* account I/O stat */
__REQ_MIXED_MERGE, /* merge of different types, fail separately */
__REQ_SECURE, /* secure discard (used with __REQ_DISCARD) */
+ __REQ_SANITIZE, /* sanitize */
__REQ_NR_BITS, /* stops here */
};
@@ -161,13 +162,14 @@
#define REQ_SYNC (1 << __REQ_SYNC)
#define REQ_META (1 << __REQ_META)
#define REQ_DISCARD (1 << __REQ_DISCARD)
+#define REQ_SANITIZE (1 << __REQ_SANITIZE)
#define REQ_NOIDLE (1 << __REQ_NOIDLE)
#define REQ_FAILFAST_MASK \
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
#define REQ_COMMON_MASK \
(REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_DISCARD | \
- REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE)
+ REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE | REQ_SANITIZE)
#define REQ_CLONE_MASK REQ_COMMON_MASK
#define REQ_RAHEAD (1 << __REQ_RAHEAD)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 1b13021..4dc4b3e 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -403,6 +403,7 @@
#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */
#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */
+#define QUEUE_FLAG_SANITIZE 19 /* supports SANITIZE */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
@@ -485,6 +486,7 @@
#define blk_queue_stackable(q) \
test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
+#define blk_queue_sanitize(q) test_bit(QUEUE_FLAG_SANITIZE, &(q)->queue_flags)
#define blk_queue_secdiscard(q) (blk_queue_discard(q) && \
test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags))
@@ -922,6 +924,7 @@
extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
+extern int blkdev_issue_sanitize(struct block_device *bdev, gfp_t gfp_mask);
extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask);
static inline int sb_issue_discard(struct super_block *sb, sector_t block,
diff --git a/include/linux/dvb/dmx.h b/include/linux/dvb/dmx.h
index 7f963e6..5abadb6 100644
--- a/include/linux/dvb/dmx.h
+++ b/include/linux/dvb/dmx.h
@@ -99,16 +99,40 @@
} dmx_filter_t;
+/* Filter flags */
+#define DMX_CHECK_CRC 0x01
+#define DMX_ONESHOT 0x02
+#define DMX_IMMEDIATE_START 0x04
+#define DMX_ENABLE_INDEXING 0x08
+#define DMX_KERNEL_CLIENT 0x8000
+
struct dmx_sct_filter_params
{
__u16 pid;
dmx_filter_t filter;
__u32 timeout;
__u32 flags;
-#define DMX_CHECK_CRC 1
-#define DMX_ONESHOT 2
-#define DMX_IMMEDIATE_START 4
-#define DMX_KERNEL_CLIENT 0x8000
+};
+
+
+/* Indexing: supported video standards */
+enum dmx_indexing_video_standard {
+ DMX_INDEXING_MPEG2,
+ DMX_INDEXING_H264,
+ DMX_INDEXING_VC1
+};
+
+/* Indexing: Supported video profiles */
+enum dmx_indexing_video_profile {
+ DMX_INDEXING_MPEG2_ANY,
+ DMX_INDEXING_H264_ANY,
+ DMX_INDEXING_VC1_ANY
+};
+
+/* Indexing: video configuration parameters */
+struct dmx_indexing_video_params {
+ enum dmx_indexing_video_standard standard;
+ enum dmx_indexing_video_profile profile;
};
@@ -119,6 +143,8 @@
dmx_output_t output;
dmx_pes_type_t pes_type;
__u32 flags;
+
+ struct dmx_indexing_video_params video_params;
};
struct dmx_buffer_status {
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 35e4edf..1c91125 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -317,6 +317,7 @@
#define BLKPBSZGET _IO(0x12,123)
#define BLKDISCARDZEROES _IO(0x12,124)
#define BLKSECDISCARD _IO(0x12,125)
+#define BLKSANITIZE _IO(0x12, 126)
#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
#define FIBMAP _IO(0x00,1) /* bmap access */
diff --git a/include/linux/input.h b/include/linux/input.h
index 6e7d6d9..191f7d7 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -817,7 +817,8 @@
#define SW_ROTATE_LOCK 0x0c /* set = rotate locked/disabled */
#define SW_HPHL_OVERCURRENT 0x0d /* set = over current on left hph */
#define SW_HPHR_OVERCURRENT 0x0e /* set = over current on right hph */
-#define SW_MAX 0x0f
+#define SW_UNSUPPORT_INSERT 0x0f /* set = unsupported device inserted */
+#define SW_MAX 0x10
#define SW_CNT (SW_MAX+1)
/*
diff --git a/include/linux/mfd/wcd9xxx/core.h b/include/linux/mfd/wcd9xxx/core.h
index fca9a94..7917d24 100644
--- a/include/linux/mfd/wcd9xxx/core.h
+++ b/include/linux/mfd/wcd9xxx/core.h
@@ -30,6 +30,13 @@
(((ver == TABLA_VERSION_1_0) || (ver == TABLA_VERSION_1_1)) ? 1 : 0)
#define TABLA_IS_2_0(ver) ((ver == TABLA_VERSION_2_0) ? 1 : 0)
+#define SITAR_VERSION_1P0 0
+#define SITAR_VERSION_1P1 1
+#define SITAR_IS_1P0(ver) \
+ ((ver == SITAR_VERSION_1P0) ? 1 : 0)
+#define SITAR_IS_1P1(ver) \
+ ((ver == SITAR_VERSION_1P1) ? 1 : 0)
+
enum {
TABLA_IRQ_SLIMBUS = 0,
TABLA_IRQ_MBHC_REMOVAL,
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index aa808dc..14f2d43 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -52,6 +52,9 @@
u8 part_config;
u8 cache_ctrl;
u8 rst_n_function;
+ u8 max_packed_writes;
+ u8 max_packed_reads;
+ u8 packed_event_en;
unsigned int part_time; /* Units: ms */
unsigned int sa_timeout; /* Units: 100ns */
unsigned int generic_cmd6_time; /* Units: 10ms */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index c9a17de..2489bb5 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -249,7 +249,11 @@
#define MMC_CAP2_HS200_1_2V_SDR (1 << 6) /* can support */
#define MMC_CAP2_HS200 (MMC_CAP2_HS200_1_8V_SDR | \
MMC_CAP2_HS200_1_2V_SDR)
-#define MMC_CAP2_DETECT_ON_ERR (1 << 8) /* On I/O err check card removal */
+#define MMC_CAP2_DETECT_ON_ERR (1 << 7) /* On I/O err check card removal */
+#define MMC_CAP2_PACKED_RD (1 << 10) /* Allow packed read */
+#define MMC_CAP2_PACKED_WR (1 << 11) /* Allow packed write */
+#define MMC_CAP2_PACKED_CMD (MMC_CAP2_PACKED_RD | \
+ MMC_CAP2_PACKED_WR) /* Allow packed commands */
mmc_pm_flag_t pm_caps; /* supported pm features */
unsigned int power_notify_type;
#define MMC_HOST_PW_NOTIFY_NONE 0
@@ -344,12 +348,8 @@
#ifdef CONFIG_MMC_PERF_PROFILING
struct {
- unsigned long rbytes_mmcq; /* Rd bytes MMC queue */
- unsigned long wbytes_mmcq; /* Wr bytes MMC queue */
unsigned long rbytes_drv; /* Rd bytes MMC Host */
unsigned long wbytes_drv; /* Wr bytes MMC Host */
- ktime_t rtime_mmcq; /* Rd time MMC queue */
- ktime_t wtime_mmcq; /* Wr time MMC queue */
ktime_t rtime_drv; /* Rd time MMC Host */
ktime_t wtime_drv; /* Wr time MMC Host */
ktime_t start;
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index e124fbe..06539dff 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -140,6 +140,7 @@
#define R1_READY_FOR_DATA (1 << 8) /* sx, a */
#define R1_SWITCH_ERROR (1 << 7) /* sx, c */
#define R1_APP_CMD (1 << 5) /* sr, c */
+#define R1_EXP_EVENT (1 << 6) /* sr, a */
#define R1_STATE_IDLE 0
#define R1_STATE_READY 1
@@ -275,6 +276,10 @@
#define EXT_CSD_FLUSH_CACHE 32 /* W */
#define EXT_CSD_CACHE_CTRL 33 /* R/W */
#define EXT_CSD_POWER_OFF_NOTIFICATION 34 /* R/W */
+#define EXT_CSD_PACKED_FAILURE_INDEX 35 /* RO */
+#define EXT_CSD_PACKED_CMD_STATUS 36 /* RO */
+#define EXT_CSD_EXP_EVENTS_STATUS 54 /* RO, 2 bytes */
+#define EXT_CSD_EXP_EVENTS_CTRL 56 /* R/W, 2 bytes */
#define EXT_CSD_PARTITION_ATTRIBUTE 156 /* R/W */
#define EXT_CSD_PARTITION_SUPPORT 160 /* RO */
#define EXT_CSD_HPI_MGMT 161 /* R/W */
@@ -314,6 +319,8 @@
#define EXT_CSD_POWER_OFF_LONG_TIME 247 /* RO */
#define EXT_CSD_GENERIC_CMD6_TIME 248 /* RO */
#define EXT_CSD_CACHE_SIZE 249 /* RO, 4 bytes */
+#define EXT_CSD_MAX_PACKED_WRITES 500 /* RO */
+#define EXT_CSD_MAX_PACKED_READS 501 /* RO */
#define EXT_CSD_HPI_FEATURES 503 /* RO */
/*
@@ -426,6 +433,14 @@
#define EXT_CSD_PWR_CL_4BIT_MASK 0x0F /* 8 bit PWR CLS */
#define EXT_CSD_PWR_CL_8BIT_SHIFT 4
#define EXT_CSD_PWR_CL_4BIT_SHIFT 0
+
+#define EXT_CSD_PACKED_EVENT_EN (1 << 3)
+
+#define EXT_CSD_PACKED_FAILURE (1 << 3)
+
+#define EXT_CSD_PACKED_GENERIC_ERROR (1 << 0)
+#define EXT_CSD_PACKED_INDEXED_ERROR (1 << 1)
+
/*
* MMC_SWITCH access modes
*/
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index cb394e8..b4e14d2 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -292,7 +292,7 @@
#define INPUT_DEVICE_ID_LED_MAX 0x0f
#define INPUT_DEVICE_ID_SND_MAX 0x07
#define INPUT_DEVICE_ID_FF_MAX 0x7f
-#define INPUT_DEVICE_ID_SW_MAX 0x0f
+#define INPUT_DEVICE_ID_SW_MAX 0x10
#define INPUT_DEVICE_ID_MATCH_BUS 1
#define INPUT_DEVICE_ID_MATCH_VENDOR 2
diff --git a/include/linux/smux.h b/include/linux/smux.h
new file mode 100644
index 0000000..64d0ed6
--- /dev/null
+++ b/include/linux/smux.h
@@ -0,0 +1,295 @@
+/* include/linux/smux.h
+ *
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef SMUX_H
+#define SMUX_H
+
+/**
+ * Logical Channel IDs
+ *
+ * This must be identical between local and remote clients.
+ */
+enum {
+ /* Data Ports */
+ SMUX_DATA_0,
+ SMUX_DATA_1,
+ SMUX_DATA_2,
+ SMUX_DATA_3,
+ SMUX_DATA_4,
+ SMUX_DATA_5,
+ SMUX_DATA_6,
+ SMUX_DATA_7,
+ SMUX_DATA_8,
+ SMUX_DATA_9,
+ SMUX_USB_RMNET_DATA_0,
+ SMUX_USB_DUN_0,
+ SMUX_USB_DIAG_0,
+ SMUX_SYS_MONITOR_0,
+ SMUX_CSVT_0,
+ /* add new data ports here */
+
+ /* Control Ports */
+ SMUX_DATA_CTL_0 = 32,
+ SMUX_DATA_CTL_1,
+ SMUX_DATA_CTL_2,
+ SMUX_DATA_CTL_3,
+ SMUX_DATA_CTL_4,
+ SMUX_DATA_CTL_5,
+ SMUX_DATA_CTL_6,
+ SMUX_DATA_CTL_7,
+ SMUX_DATA_CTL_8,
+ SMUX_DATA_CTL_9,
+ SMUX_USB_RMNET_CTL_0,
+ SMUX_USB_DUN_CTL_0_UNUSED,
+ SMUX_USB_DIAG_CTL_0,
+ SMUX_SYS_MONITOR_CTL_0,
+ SMUX_CSVT_CTL_0,
+ /* add new control ports here */
+
+ SMUX_TEST_LCID,
+ SMUX_NUM_LOGICAL_CHANNELS,
+};
+
+/**
+ * Notification events that are passed to the notify() function.
+ *
+ * If the @metadata argument in the notifier is non-null, then it will
+ * point to the associated struct smux_meta_* structure.
+ */
+enum {
+ SMUX_CONNECTED, /* @metadata is null */
+ SMUX_DISCONNECTED,
+ SMUX_READ_DONE,
+ SMUX_READ_FAIL,
+ SMUX_WRITE_DONE,
+ SMUX_WRITE_FAIL,
+ SMUX_TIOCM_UPDATE,
+ SMUX_LOW_WM_HIT, /* @metadata is NULL */
+ SMUX_HIGH_WM_HIT, /* @metadata is NULL */
+};
+
+/**
+ * Channel options used to modify channel behavior.
+ */
+enum {
+ SMUX_CH_OPTION_LOCAL_LOOPBACK = 1 << 0,
+ SMUX_CH_OPTION_REMOTE_LOOPBACK = 1 << 1,
+ SMUX_CH_OPTION_REMOTE_TX_STOP = 1 << 2,
+};
+
+/**
+ * Metadata for SMUX_DISCONNECTED notification
+ *
+ * @is_ssr: Disconnect caused by subsystem restart
+ */
+struct smux_meta_disconnected {
+ int is_ssr;
+};
+
+/**
+ * Metadata for SMUX_READ_DONE/SMUX_READ_FAIL notification
+ *
+ * @pkt_priv: Packet-specific private data
+ * @buffer: Buffer pointer passed into msm_smux_write
+ * @len: Buffer length passed into msm_smux_write
+ */
+struct smux_meta_read {
+ void *pkt_priv;
+ void *buffer;
+ int len;
+};
+
+/**
+ * Metadata for SMUX_WRITE_DONE/SMUX_WRITE_FAIL notification
+ *
+ * @pkt_priv: Packet-specific private data
+ * @buffer: Buffer pointer returned by get_rx_buffer()
+ * @len: Buffer length returned by get_rx_buffer()
+ */
+struct smux_meta_write {
+ void *pkt_priv;
+ void *buffer;
+ int len;
+};
+
+/**
+ * Metadata for SMUX_TIOCM_UPDATE notification
+ *
+ * @tiocm_old: Previous TIOCM state
+ * @tiocm_new: Current TIOCM state
+ */
+struct smux_meta_tiocm {
+ uint32_t tiocm_old;
+ uint32_t tiocm_new;
+};
+
+
+#ifdef CONFIG_N_SMUX
+/**
+ * Starts the opening sequence for a logical channel.
+ *
+ * @lcid Logical channel ID
+ * @priv Free for client usage
+ * @notify Event notification function
+ * @get_rx_buffer Function used to provide a receive buffer to SMUX
+ *
+ * @returns 0 for success, <0 otherwise
+ *
+ * A channel must be fully closed (either not previously opened or
+ * msm_smux_close() has been called and the SMUX_DISCONNECTED has been
+ * recevied.
+ *
+ * One the remote side is opened, the client will receive a SMUX_CONNECTED
+ * event.
+ */
+int msm_smux_open(uint8_t lcid, void *priv,
+ void (*notify)(void *priv, int event_type, const void *metadata),
+ int (*get_rx_buffer)(void *priv, void **pkt_priv,
+ void **buffer, int size));
+
+/**
+ * Starts the closing sequence for a logical channel.
+ *
+ * @lcid Logical channel ID
+ * @returns 0 for success, <0 otherwise
+ *
+ * Once the close event has been acknowledge by the remote side, the client
+ * will receive a SMUX_DISCONNECTED notification.
+ */
+int msm_smux_close(uint8_t lcid);
+
+/**
+ * Write data to a logical channel.
+ *
+ * @lcid Logical channel ID
+ * @pkt_priv Client data that will be returned with the SMUX_WRITE_DONE or
+ * SMUX_WRITE_FAIL notification.
+ * @data Data to write
+ * @len Length of @data
+ *
+ * @returns 0 for success, <0 otherwise
+ *
+ * Data may be written immediately after msm_smux_open() is called, but
+ * the data will wait in the transmit queue until the channel has been
+ * fully opened.
+ *
+ * Once the data has been written, the client will receive either a completion
+ * (SMUX_WRITE_DONE) or a failure notice (SMUX_WRITE_FAIL).
+ */
+int msm_smux_write(uint8_t lcid, void *pkt_priv, const void *data, int len);
+
+/**
+ * Returns true if the TX queue is currently full (high water mark).
+ *
+ * @lcid Logical channel ID
+ *
+ * @returns 0 if channel is not full; 1 if it is full; < 0 for error
+ */
+int msm_smux_is_ch_full(uint8_t lcid);
+
+/**
+ * Returns true if the TX queue has space for more packets it is at or
+ * below the low water mark).
+ *
+ * @lcid Logical channel ID
+ *
+ * @returns 0 if channel is above low watermark
+ * 1 if it's at or below the low watermark
+ * < 0 for error
+ */
+int msm_smux_is_ch_low(uint8_t lcid);
+
+/**
+ * Get the TIOCM status bits.
+ *
+ * @lcid Logical channel ID
+ *
+ * @returns >= 0 TIOCM status bits
+ * < 0 Error condition
+ */
+long msm_smux_tiocm_get(uint8_t lcid);
+
+/**
+ * Set/clear the TIOCM status bits.
+ *
+ * @lcid Logical channel ID
+ * @set Bits to set
+ * @clear Bits to clear
+ *
+ * @returns 0 for success; < 0 for failure
+ *
+ * If a bit is specified in both the @set and @clear masks, then the clear bit
+ * definition will dominate and the bit will be cleared.
+ */
+int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear);
+
+/**
+ * Set or clear channel option using the SMUX_CH_OPTION_* channel
+ * flags.
+ *
+ * @lcid Logical channel ID
+ * @set Options to set
+ * @clear Options to clear
+ *
+ * @returns 0 for success, < 0 for failure
+ */
+int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear);
+
+#else
+int msm_smux_open(uint8_t lcid, void *priv,
+ void (*notify)(void *priv, int event_type, const void *metadata),
+ int (*get_rx_buffer)(void *priv, void **pkt_priv,
+ void **buffer, int size))
+{
+ return -ENODEV;
+}
+
+int msm_smux_close(uint8_t lcid)
+{
+ return -ENODEV;
+}
+
+int msm_smux_write(uint8_t lcid, void *pkt_priv, const void *data, int len)
+{
+ return -ENODEV;
+}
+
+int msm_smux_is_ch_full(uint8_t lcid);
+{
+ return -ENODEV;
+}
+
+int msm_smux_is_ch_low(uint8_t lcid);
+{
+ return -ENODEV;
+}
+
+long msm_smux_tiocm_get(uint8_t lcid)
+{
+ return 0;
+}
+
+int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear)
+{
+ return -ENODEV;
+}
+
+int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_N_SMUX */
+
+#endif /* SMUX_H */
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 1ff6b62..818d189 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -52,6 +52,7 @@
#define N_TI_WL 22 /* for TI's WL BT, FM, GPS combo chips */
#define N_TRACESINK 23 /* Trace data routing for MIPI P1149.7 */
#define N_TRACEROUTER 24 /* Trace data routing for MIPI P1149.7 */
+#define N_SMUX 25 /* Serial MUX */
/*
* This character is the same as _POSIX_VDISABLE: it cannot be used as
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
index 6e96f85..c68457e 100644
--- a/include/linux/usb/msm_hsusb.h
+++ b/include/linux/usb/msm_hsusb.h
@@ -177,6 +177,9 @@
* @enable_dcd: Enable Data Contact Detection circuit. if not set
* wait for 600msec before proceeding to primary
* detection.
+ * @enable_lpm_on_suspend: Enable the USB core to go into Low
+ * Power Mode, when USB bus is suspended but cable
+ * is connected.
* @bus_scale_table: parameters for bus bandwidth requirements
*/
struct msm_otg_platform_data {
@@ -192,6 +195,7 @@
bool mhl_enable;
bool disable_reset_on_disconnect;
bool enable_dcd;
+ bool enable_lpm_on_dev_suspend;
struct msm_bus_scale_pdata *bus_scale_table;
};
@@ -322,6 +326,11 @@
* voltage regulator(VDDCX).
*/
#define ALLOW_PHY_RETENTION BIT(1)
+ /*
+ * Allow putting the core in Low Power mode, when
+ * USB bus is suspended but cable is connected.
+ */
+#define ALLOW_LPM_ON_DEV_SUSPEND BIT(2)
unsigned long lpm_flags;
#define PHY_PWR_COLLAPSED BIT(0)
#define PHY_RETENTIONED BIT(1)
diff --git a/include/linux/wcnss_wlan.h b/include/linux/wcnss_wlan.h
index 1c492f9..d7e65b0 100644
--- a/include/linux/wcnss_wlan.h
+++ b/include/linux/wcnss_wlan.h
@@ -35,8 +35,10 @@
const struct dev_pm_ops *pm_ops);
void wcnss_wlan_unregister_pm_ops(struct device *dev,
const struct dev_pm_ops *pm_ops);
-void wcnss_register_thermal_mitigation(void (*tm_notify)(int));
-void wcnss_unregister_thermal_mitigation(void (*tm_notify)(int));
+void wcnss_register_thermal_mitigation(struct device *dev,
+ void (*tm_notify)(struct device *dev, int));
+void wcnss_unregister_thermal_mitigation(
+ void (*tm_notify)(struct device *dev, int));
struct platform_device *wcnss_get_platform_device(void);
struct wcnss_wlan_config *wcnss_get_wlan_config(void);
int wcnss_wlan_power(struct device *dev,
diff --git a/include/media/vcap_fmt.h b/include/media/vcap_fmt.h
index 4a62bc3..51b45ac 100644
--- a/include/media/vcap_fmt.h
+++ b/include/media/vcap_fmt.h
@@ -14,8 +14,8 @@
#ifndef VCAP_FMT_H
#define VCAP_FMT_H
-#define V4L2_BUF_TYPE_INTERLACED_IN_AFE (V4L2_BUF_TYPE_PRIVATE)
-#define V4L2_BUF_TYPE_INTERLACED_IN_DECODER (V4L2_BUF_TYPE_PRIVATE + 1)
+#define V4L2_BUF_TYPE_INTERLACED_IN_DECODER (V4L2_BUF_TYPE_PRIVATE)
+#define V4L2_BUF_TYPE_VP_OUT (V4L2_BUF_TYPE_PRIVATE + 1)
enum hal_vcap_mode {
HAL_VCAP_MODE_PRO = 0,
@@ -32,87 +32,7 @@
HAL_VCAP_RGB,
};
-enum hal_vcap_vc_fmt {
- /* 1080p */
- HAL_VCAP_YUV_1080p_60_RH = 0,
- HAL_VCAP_YUV_1080p_60_FL,
- HAL_VCAP_RGB_1080p_60_FL,
- HAL_VCAP_YUV_1080p_24_FL,
- HAL_VCAP_YUV_1080p_24_RH,
- HAL_VCAP_YUV_1080p_24_RW,
- HAL_VCAP_YUV_1080p_60_RW,
- HAL_VCAP_YUV_1080p_50_FL,
- HAL_VCAP_YUV_1080p_50_RH,
- HAL_VCAP_YUV_1080p_25_FL,
- HAL_VCAP_YUV_1080p_25_RH,
- HAL_VCAP_YUV_1080p_30_RH,
- HAL_VCAP_RGB_1080p_25_FL,
- HAL_VCAP_RGB_1080p_25_RH,
- /* 1080i */
- HAL_VCAP_YUV_1080i_60_FL,
- HAL_VCAP_YUV_1080i_60_RH,
- HAL_VCAP_YUV_1080i_60_RW,
- HAL_VCAP_YUV_1080i_50_FL,
- HAL_VCAP_YUV_1080i_50_RH,
- HAL_VCAP_YUV_1080i_50_RW,
- HAL_VCAP_RGB_1080i_50_FL,
- HAL_VCAP_RGB_1080i_50_RH,
- /* 480i */
- HAL_VCAP_YUV_480i_60_RH,
- HAL_VCAP_YUV_480i_60_FL,
- HAL_VCAP_YUV_480i_60_RW,
- HAL_VCAP_YUV_2880_480i_60_FL,
- HAL_VCAP_YUV_2880_480i_60_RH,
- /* 480p */
- HAL_VCAP_YUV_480p_60_RH,
- HAL_VCAP_RGB_480p_60_RH,
- HAL_VCAP_RGB_480p_60_FL,
- HAL_VCAP_YUV_480p_60_FL,
- HAL_VCAP_YUV_480p_60_RW,
- HAL_VCAP_YUV_2880_480p_60_FL,
- HAL_VCAP_YUV_2880_480p_60_RH,
- /* 720p */
- HAL_VCAP_YUV_720p_60_FL,
- HAL_VCAP_RGB_720p_60_FL,
- HAL_VCAP_YUV_720p_60_RW,
- HAL_VCAP_YUV_720p_60_RH,
- HAL_VCAP_YUV_720p_50_FL,
- HAL_VCAP_YUV_720p_50_RW,
- HAL_VCAP_YUV_720p_50_RH,
- /* 576p */
- HAL_VCAP_YUV_576p_50_FL,
- HAL_VCAP_RGB_576p_50_FL,
- HAL_VCAP_YUV_576p_50_RW,
- HAL_VCAP_YUV_576p_50_RH,
- HAL_VCAP_YUV_1440_576p_50_RH,
- HAL_VCAP_YUV_2880_576p_50_FL,
- HAL_VCAP_YUV_2880_576p_50_RH,
- /* 576i */
- HAL_VCAP_YUV_576i_50_FL,
- HAL_VCAP_YUV_576i_50_RW,
- HAL_VCAP_YUV_576i_50_RH,
- /* XGA 1024x768 */
- HAL_VCAP_YUV_XGA_FL,
- HAL_VCAP_YUV_XGA_RH,
- HAL_VCAP_YUV_XGA_RB,
- /* SXGA 1280x1024 */
- HAL_VCAP_YUV_SXGA_FL,
- HAL_VCAP_RGB_SXGA_FL,
- HAL_VCAP_YUV_SXGA_RH,
- HAL_VCAP_YUV_SXGA_RB,
- /* UXGA 1600x1200 */
- HAL_VCAP_YUV_UXGA_FL,
- HAL_VCAP_RGB_UXGA_FL,
- HAL_VCAP_YUV_UXGA_RH,
- HAL_VCAP_YUV_UXGA_RB,
- /* test odd height */
- HAL_VCAP_ODD_HEIGHT,
- /* test odd width RGB only */
- HAL_VCAP_ODD_WIDTH,
-};
-
struct v4l2_format_vc_ext {
- enum hal_vcap_vc_fmt format;
enum hal_vcap_mode mode;
enum hal_vcap_polar h_polar;
enum hal_vcap_polar v_polar;
@@ -136,5 +56,22 @@
uint32_t f2_vsync_h_end;
uint32_t f2_vsync_v_start;
uint32_t f2_vsync_v_end;
+ uint32_t sizeimage;
+ uint32_t bytesperline;
+};
+
+enum vcap_type {
+ VC_TYPE,
+ VP_IN_TYPE,
+ VP_OUT_TYPE,
+};
+
+struct vcap_priv_fmt {
+ enum vcap_type type;
+ union {
+ struct v4l2_format_vc_ext timing;
+ struct v4l2_pix_format pix;
+ /* Once VP is created there will be another type in here */
+ } u;
};
#endif
diff --git a/include/media/vcap_v4l2.h b/include/media/vcap_v4l2.h
index 57f9703..374e681 100644
--- a/include/media/vcap_v4l2.h
+++ b/include/media/vcap_v4l2.h
@@ -14,6 +14,7 @@
#ifndef VCAP_V4L2_H
#define VCAP_V4L2_H
+#define TOP_FIELD_FIX
#ifdef __KERNEL__
#include <linux/types.h>
#include <linux/videodev2.h>
@@ -28,6 +29,12 @@
#include <media/vcap_fmt.h>
#include <mach/board.h>
+#define writel_iowmb(val, addr) \
+ do { \
+ __iowmb(); \
+ writel_relaxed(val, addr); \
+ } while (0)
+
struct vcap_client_data;
enum rdy_buf {
@@ -37,11 +44,34 @@
VC_BUF1N2 = 0x11 << 1,
};
+enum vp_state {
+ VP_UNKNOWN = 0,
+ VP_FRAME1,
+ VP_FRAME2,
+ VP_FRAME3,
+ VP_NORMAL,
+};
+
+enum nr_buf_pos {
+ BUF_NOT_IN_USE = 0,
+ NRT2_BUF,
+ T1_BUF,
+ T0_BUF,
+ TM1_BUF,
+};
+
struct vcap_buf_info {
unsigned long vaddr;
unsigned long size;
};
+enum vcap_op_mode {
+ UNKNOWN_VCAP_OP = 0,
+ VC_VCAP_OP,
+ VP_VCAP_OP,
+ VC_AND_VP_VCAP_OP,
+};
+
struct vcap_action {
struct list_head active;
@@ -61,13 +91,50 @@
int ini_jiffies;
};
+struct nr_buffer {
+ void *vaddr;
+ unsigned long paddr;
+ enum nr_buf_pos nr_pos;
+};
+
+struct vp_action {
+ struct list_head in_active;
+ struct list_head out_active;
+
+ /* Buffer index */
+ enum vp_state vp_state;
+#ifdef TOP_FIELD_FIX
+ bool top_field;
+#endif
+
+ /* Buffers inside vc */
+ struct vcap_buffer *bufTm1;
+ struct vcap_buffer *bufT0;
+ struct vcap_buffer *bufT1;
+ struct vcap_buffer *bufT2;
+ struct vcap_buffer *bufNRT2;
+
+ struct vcap_buffer *bufOut;
+
+ void *bufMotion;
+ struct nr_buffer bufNR;
+ bool nr_enabled;
+};
+
+struct vp_work_t {
+ struct work_struct work;
+ struct vcap_client_data *cd;
+ uint32_t irq;
+};
+
struct vcap_dev {
struct v4l2_device v4l2_dev;
struct video_device *vfd;
struct ion_client *ion_client;
- struct resource *vcapirq;
+ struct resource *vcirq;
+ struct resource *vpirq;
struct resource *vcapmem;
struct resource *vcapio;
@@ -87,15 +154,20 @@
struct vcap_client_data *vp_client;
atomic_t vc_enabled;
+ atomic_t vp_enabled;
+
atomic_t vc_resource;
atomic_t vp_resource;
+
+ struct workqueue_struct *vcap_wq;
+ struct vp_work_t vp_work;
+ struct vp_work_t vc_to_vp_work;
+ struct vp_work_t vp_to_vc_work;
};
struct vp_format_data {
unsigned int width, height;
- unsigned int pixelformat;
- enum v4l2_field field;
-
+ unsigned int pixfmt;
};
struct vcap_buffer {
@@ -107,18 +179,23 @@
};
struct vcap_client_data {
+ bool set_cap, set_decode, set_vp_o;
struct vcap_dev *dev;
struct vb2_queue vc_vidq;
- /*struct vb2_queue vb__vidq;*/
- /*struct vb2_queue vb_cap_vidq;*/
+ struct vb2_queue vp_in_vidq;
+ struct vb2_queue vp_out_vidq;
+
+ enum vcap_op_mode op_mode;
struct v4l2_format_vc_ext vc_format;
enum v4l2_buf_type vp_buf_type_field;
- struct vp_format_data vp_format;
+ struct vp_format_data vp_in_fmt;
+ struct vp_format_data vp_out_fmt;
struct vcap_action vid_vc_action;
+ struct vp_action vid_vp_action;
struct workqueue_struct *vcap_work_q;
struct ion_handle *vc_ion_handle;
@@ -126,7 +203,20 @@
uint32_t hold_vp;
spinlock_t cap_slock;
+ bool streaming;
};
+struct vcap_hacked_vals {
+ uint32_t value;
+ uint32_t offset;
+};
+
+extern struct vcap_hacked_vals hacked_buf[];
+
#endif
+int free_ion_handle(struct vcap_dev *dev, struct vb2_queue *q,
+ struct v4l2_buffer *b);
+
+int get_phys_addr(struct vcap_dev *dev, struct vb2_queue *q,
+ struct v4l2_buffer *b);
#endif
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 5749293..47b856c 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -26,7 +26,7 @@
#define __HCI_CORE_H
#include <net/bluetooth/hci.h>
-
+#include <linux/wakelock.h>
/* HCI upper protocols */
#define HCI_PROTO_L2CAP 0
#define HCI_PROTO_SCO 1
@@ -326,7 +326,7 @@
struct work_struct work_add;
struct work_struct work_del;
-
+ struct wake_lock idle_lock;
struct device dev;
atomic_t devref;
diff --git a/include/sound/jack.h b/include/sound/jack.h
index ccdc341..1b13cbb 100644
--- a/include/sound/jack.h
+++ b/include/sound/jack.h
@@ -35,24 +35,26 @@
* sound/core/jack.c.
*/
enum snd_jack_types {
- SND_JACK_HEADPHONE = 0x0001,
- SND_JACK_MICROPHONE = 0x0002,
+ SND_JACK_HEADPHONE = 0x0000001,
+ SND_JACK_MICROPHONE = 0x0000002,
SND_JACK_HEADSET = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE,
- SND_JACK_LINEOUT = 0x0004,
- SND_JACK_MECHANICAL = 0x0008, /* If detected separately */
- SND_JACK_VIDEOOUT = 0x0010,
+ SND_JACK_LINEOUT = 0x0000004,
+ SND_JACK_MECHANICAL = 0x0000008, /* If detected separately */
+ SND_JACK_VIDEOOUT = 0x0000010,
SND_JACK_AVOUT = SND_JACK_LINEOUT | SND_JACK_VIDEOOUT,
- SND_JACK_OC_HPHL = 0x0020,
- SND_JACK_OC_HPHR = 0x0040,
+ /* */
+ SND_JACK_OC_HPHL = 0x0000020,
+ SND_JACK_OC_HPHR = 0x0000040,
+ SND_JACK_UNSUPPORTED = 0x0000080,
/* Kept separate from switches to facilitate implementation */
- SND_JACK_BTN_0 = 0x4000,
- SND_JACK_BTN_1 = 0x2000,
- SND_JACK_BTN_2 = 0x1000,
- SND_JACK_BTN_3 = 0x0800,
- SND_JACK_BTN_4 = 0x0400,
- SND_JACK_BTN_5 = 0x0200,
- SND_JACK_BTN_6 = 0x0100,
- SND_JACK_BTN_7 = 0x0080,
+ SND_JACK_BTN_0 = 0x4000000,
+ SND_JACK_BTN_1 = 0x2000000,
+ SND_JACK_BTN_2 = 0x1000000,
+ SND_JACK_BTN_3 = 0x0800000,
+ SND_JACK_BTN_4 = 0x0400000,
+ SND_JACK_BTN_5 = 0x0200000,
+ SND_JACK_BTN_6 = 0x0100000,
+ SND_JACK_BTN_7 = 0x0080000,
};
struct snd_jack {
diff --git a/include/sound/msm-dai-q6.h b/include/sound/msm-dai-q6.h
index 6328256..042aa6f 100644
--- a/include/sound/msm-dai-q6.h
+++ b/include/sound/msm-dai-q6.h
@@ -21,8 +21,7 @@
#define MSM_MI2S_CAP_RX 0
#define MSM_MI2S_CAP_TX 1
-struct msm_dai_auxpcm_pdata {
- const char *clk;
+struct msm_dai_auxpcm_config {
u16 mode;
u16 sync;
u16 frame;
@@ -36,4 +35,11 @@
u16 rx_sd_lines;
u16 tx_sd_lines;
};
+
+struct msm_dai_auxpcm_pdata {
+ const char *clk;
+ struct msm_dai_auxpcm_config mode_8k;
+ struct msm_dai_auxpcm_config mode_16k;
+};
+
#endif
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 6957aa2..fa1d639 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -1792,6 +1792,8 @@
rwbs[i++] = 'W';
else if (rw & REQ_DISCARD)
rwbs[i++] = 'D';
+ else if (rw & REQ_SANITIZE)
+ rwbs[i++] = 'Z';
else if (bytes)
rwbs[i++] = 'R';
else
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 25b559b1..02ea082 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -448,6 +448,7 @@
conn->power_save = 1;
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+ wake_lock_init(&conn->idle_lock, WAKE_LOCK_SUSPEND, "bt_idle");
switch (type) {
case ACL_LINK:
@@ -521,6 +522,7 @@
/* Make sure no timers are running */
del_timer(&conn->idle_timer);
+ wake_lock_destroy(&conn->idle_lock);
del_timer(&conn->disc_timer);
del_timer(&conn->smp_timer);
__cancel_delayed_work(&conn->rssi_update_work);
@@ -962,9 +964,11 @@
}
timer:
- if (hdev->idle_timeout > 0)
+ if (hdev->idle_timeout > 0) {
mod_timer(&conn->idle_timer,
jiffies + msecs_to_jiffies(hdev->idle_timeout));
+ wake_lock(&conn->idle_lock);
+ }
}
static inline void hci_conn_stop_rssi_timer(struct hci_conn *conn)
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 395a95c..9e545f3 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -2556,6 +2556,9 @@
else
conn->power_save = 0;
}
+ if (conn->mode == HCI_CM_SNIFF)
+ if (wake_lock_active(&conn->idle_lock))
+ wake_unlock(&conn->idle_lock);
if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
hci_sco_setup(conn, ev->status);
diff --git a/sound/core/jack.c b/sound/core/jack.c
index 80453a9..675f45b 100644
--- a/sound/core/jack.c
+++ b/sound/core/jack.c
@@ -32,6 +32,7 @@
SW_VIDEOOUT_INSERT,
SW_HPHL_OVERCURRENT,
SW_HPHR_OVERCURRENT,
+ SW_UNSUPPORT_INSERT,
};
static int snd_jack_dev_free(struct snd_device *device)
diff --git a/sound/soc/codecs/wcd9304.c b/sound/soc/codecs/wcd9304.c
index 9041bd7..ff83197 100644
--- a/sound/soc/codecs/wcd9304.c
+++ b/sound/soc/codecs/wcd9304.c
@@ -1775,6 +1775,9 @@
{"HEADPHONE", NULL, "HPHL"},
{"HEADPHONE", NULL, "HPHR"},
+ {"HPHL DAC", NULL, "CP"},
+ {"HPHR DAC", NULL, "CP"},
+
{"HPHL", NULL, "HPHL DAC"},
{"HPHL DAC", "NULL", "DAC4 MUX"},
{"HPHR", NULL, "HPHR DAC"},
@@ -1950,9 +1953,11 @@
static void sitar_codec_enable_audio_mode_bandgap(struct snd_soc_codec *codec)
{
+ struct wcd9xxx *sitar_core = dev_get_drvdata(codec->dev->parent);
- snd_soc_update_bits(codec, SITAR_A_LDO_H_MODE_1, 0x0C, 0x04);
- snd_soc_update_bits(codec, SITAR_A_LDO_H_MODE_1, 0x80, 0x80);
+ if (SITAR_IS_1P0(sitar_core->version))
+ snd_soc_update_bits(codec, SITAR_A_LDO_H_MODE_1, 0x80, 0x80);
+
snd_soc_update_bits(codec, SITAR_A_BIAS_CURR_CTL_2, 0x0C, 0x08);
usleep_range(1000, 1000);
snd_soc_write(codec, SITAR_A_BIAS_REF_CTL, 0x1C);
@@ -1971,6 +1976,7 @@
enum sitar_bandgap_type choice)
{
struct sitar_priv *sitar = snd_soc_codec_get_drvdata(codec);
+ struct wcd9xxx *sitar_core = dev_get_drvdata(codec->dev->parent);
/* TODO lock resources accessed by audio streams and threaded
* interrupt handlers
@@ -2005,7 +2011,9 @@
} else if (choice == SITAR_BANDGAP_OFF) {
snd_soc_update_bits(codec, SITAR_A_BIAS_CURR_CTL_2, 0x0C, 0x00);
snd_soc_write(codec, SITAR_A_BIAS_CENTRAL_BG_CTL, 0x50);
- snd_soc_update_bits(codec, SITAR_A_LDO_H_MODE_1, 0xFF, 0x65);
+ if (SITAR_IS_1P0(sitar_core->version))
+ snd_soc_update_bits(codec, SITAR_A_LDO_H_MODE_1,
+ 0xFF, 0x65);
usleep_range(1000, 1000);
} else {
pr_err("%s: Error, Invalid bandgap settings\n", __func__);
@@ -4327,6 +4335,8 @@
}
/* Set voltage level and always use LDO */
+ snd_soc_update_bits(codec, SITAR_A_LDO_H_MODE_1, 0x0C,
+ (pdata->micbias.ldoh_v << 2));
snd_soc_update_bits(codec, SITAR_A_MICB_CFILT_1_VAL, 0xFC,
(k1 << 2));
diff --git a/sound/soc/codecs/wcd9310.c b/sound/soc/codecs/wcd9310.c
index cad9907..03640d4 100644
--- a/sound/soc/codecs/wcd9310.c
+++ b/sound/soc/codecs/wcd9310.c
@@ -56,7 +56,8 @@
#define NUM_ATTEMPTS_INSERT_DETECT 25
#define NUM_ATTEMPTS_TO_REPORT 5
-#define TABLA_JACK_MASK (SND_JACK_HEADSET | SND_JACK_OC_HPHL | SND_JACK_OC_HPHR)
+#define TABLA_JACK_MASK (SND_JACK_HEADSET | SND_JACK_OC_HPHL | \
+ SND_JACK_OC_HPHR | SND_JACK_UNSUPPORTED)
#define TABLA_I2S_MASTER_MODE_MASK 0x08
@@ -102,6 +103,8 @@
#define TABLA_GPIO_IRQ_DEBOUNCE_TIME_US 5000
+#define TABLA_MBHC_GND_MIC_SWAP_THRESHOLD 2
+
#define TABLA_ACQUIRE_LOCK(x) do { mutex_lock(&x); } while (0)
#define TABLA_RELEASE_LOCK(x) do { mutex_unlock(&x); } while (0)
@@ -214,6 +217,7 @@
PLUG_TYPE_HEADSET,
PLUG_TYPE_HEADPHONE,
PLUG_TYPE_HIGH_HPH,
+ PLUG_TYPE_GND_MIC_SWAP,
};
enum tabla_mbhc_state {
@@ -2457,7 +2461,6 @@
tabla->mbhc_micbias_switched = true;
pr_debug("%s: VDDIO switch enabled\n", __func__);
-
} else if (!vddio_switch && tabla->mbhc_micbias_switched) {
if ((!checkpolling || tabla->mbhc_polling_active) &&
restartpolling)
@@ -3546,6 +3549,10 @@
if (tabla_is_digital_gain_register(reg))
return 1;
+ /* HPH status registers */
+ if (reg == TABLA_A_RX_HPH_L_STATUS || reg == TABLA_A_RX_HPH_R_STATUS)
+ return 1;
+
return 0;
}
@@ -4928,8 +4935,8 @@
tabla->buttons_pressed &=
~TABLA_JACK_BUTTON_MASK;
}
- pr_debug("%s: Reporting removal %d\n", __func__,
- jack_type);
+ pr_debug("%s: Reporting removal %d(%x)\n", __func__,
+ jack_type, tabla->hph_status);
tabla_snd_soc_jack_report(tabla,
tabla->mbhc_cfg.headset_jack,
tabla->hph_status,
@@ -4948,13 +4955,15 @@
if (jack_type == SND_JACK_HEADPHONE)
tabla->current_plug = PLUG_TYPE_HEADPHONE;
+ else if (jack_type == SND_JACK_UNSUPPORTED)
+ tabla->current_plug = PLUG_TYPE_GND_MIC_SWAP;
else if (jack_type == SND_JACK_HEADSET) {
tabla->mbhc_polling_active = true;
tabla->current_plug = PLUG_TYPE_HEADSET;
}
if (tabla->mbhc_cfg.headset_jack) {
- pr_debug("%s: Reporting insertion %d\n", __func__,
- jack_type);
+ pr_debug("%s: Reporting insertion %d(%x)\n", __func__,
+ jack_type, tabla->hph_status);
tabla_snd_soc_jack_report(tabla,
tabla->mbhc_cfg.headset_jack,
tabla->hph_status,
@@ -5874,8 +5883,8 @@
return IRQ_HANDLED;
}
-static bool tabla_is_invalid_insertion_range(struct snd_soc_codec *codec,
- s32 mic_volt, bool highhph)
+static bool tabla_is_inval_ins_range(struct snd_soc_codec *codec,
+ s32 mic_volt, bool highhph, bool *highv)
{
struct tabla_priv *tabla = snd_soc_codec_get_drvdata(codec);
bool invalid = false;
@@ -5885,7 +5894,8 @@
* needs to be considered as invalid
*/
v_hs_max = tabla_get_current_v_hs_max(tabla);
- if (!highhph && (mic_volt > v_hs_max))
+ *highv = mic_volt > v_hs_max;
+ if (!highhph && *highv)
invalid = true;
else if (mic_volt < tabla->mbhc_data.v_inval_ins_high &&
(mic_volt > tabla->mbhc_data.v_inval_ins_low))
@@ -5894,16 +5904,11 @@
return invalid;
}
-static bool tabla_is_inval_insert_delta(struct snd_soc_codec *codec,
- int mic_volt, int mic_volt_prev,
- int threshold)
+static bool tabla_is_inval_ins_delta(struct snd_soc_codec *codec,
+ int mic_volt, int mic_volt_prev,
+ int threshold)
{
- int delta = abs(mic_volt - mic_volt_prev);
- if (delta > threshold) {
- pr_debug("%s: volt delta %dmv\n", __func__, delta);
- return true;
- }
- return false;
+ return abs(mic_volt - mic_volt_prev) > threshold;
}
/* called under codec_resource_lock acquisition */
@@ -5912,13 +5917,21 @@
{
struct tabla_priv *tabla = snd_soc_codec_get_drvdata(codec);
- if (plug_type == PLUG_TYPE_HEADPHONE
- && tabla->current_plug == PLUG_TYPE_NONE) {
+ if (plug_type == PLUG_TYPE_HEADPHONE &&
+ tabla->current_plug == PLUG_TYPE_NONE) {
/* Nothing was reported previously
- * reporte a headphone
+ * report a headphone or unsupported
*/
tabla_codec_report_plug(codec, 1, SND_JACK_HEADPHONE);
tabla_codec_cleanup_hs_polling(codec);
+ } else if (plug_type == PLUG_TYPE_GND_MIC_SWAP) {
+ if (tabla->current_plug == PLUG_TYPE_HEADSET)
+ tabla_codec_report_plug(codec, 0, SND_JACK_HEADSET);
+ else if (tabla->current_plug == PLUG_TYPE_HEADPHONE)
+ tabla_codec_report_plug(codec, 0, SND_JACK_HEADPHONE);
+
+ tabla_codec_report_plug(codec, 1, SND_JACK_UNSUPPORTED);
+ tabla_codec_cleanup_hs_polling(codec);
} else if (plug_type == PLUG_TYPE_HEADSET) {
/* If Headphone was reported previously, this will
* only report the mic line
@@ -5936,6 +5949,9 @@
MBHC_USE_MB_TRIGGER |
MBHC_USE_HPHL_TRIGGER,
false);
+ } else {
+ WARN(1, "Unexpected current plug_type %d, plug_type %d\n",
+ tabla->current_plug, plug_type);
}
}
@@ -5990,66 +6006,86 @@
enum tabla_mbhc_plug_type plug_type[num_det];
s16 mb_v[num_det];
s32 mic_mv[num_det];
- bool inval = false;
+ bool inval;
+ bool highdelta;
+ bool ahighv = false, highv;
/* make sure override is on */
WARN_ON(!(snd_soc_read(codec, TABLA_A_CDC_MBHC_B1_CTL) & 0x04));
+ /* GND and MIC swap detection requires at least 2 rounds of DCE */
+ BUG_ON(num_det < 2);
+
+ plug_type_ptr =
+ TABLA_MBHC_CAL_PLUG_TYPE_PTR(tabla->mbhc_cfg.calibration);
+
+ plug_type[0] = PLUG_TYPE_INVALID;
+
/* performs DCEs for N times
* 1st: check if voltage is in invalid range
* 2nd - N-2nd: check voltage range and delta
* N-1st: check voltage range, delta with HPHR GND switch
* Nth: check voltage range with VDDIO switch if micbias V != vddio V*/
- for (i = 0; i < num_det && !inval; i++) {
+ for (i = 0; i < num_det; i++) {
gndswitch = (i == (num_det - 1 - vddio));
- vddioswitch = (vddio && (i == num_det - 1));
+ vddioswitch = (vddio && ((i == num_det - 1) ||
+ (i == num_det - 2)));
if (i == 0) {
mb_v[i] = tabla_codec_setup_hs_polling(codec);
mic_mv[i] = tabla_codec_sta_dce_v(codec, 1 , mb_v[i]);
- inval = tabla_is_invalid_insertion_range(codec,
- mic_mv[i],
- highhph);
+ inval = tabla_is_inval_ins_range(codec, mic_mv[i],
+ highhph, &highv);
+ ahighv |= highv;
scaled = mic_mv[i];
- } else if (vddioswitch) {
- __tabla_codec_switch_micbias(tabla->codec, 1, false,
- false);
- mb_v[i] = __tabla_codec_sta_dce(codec, 1, true, true);
- mic_mv[i] = tabla_codec_sta_dce_v(codec, 1 , mb_v[i]);
- scaled = tabla_scale_v_micb_vddio(tabla, mic_mv[i],
- false);
- inval = (tabla_is_invalid_insertion_range(codec,
- mic_mv[i],
- highhph) ||
- tabla_is_inval_insert_delta(codec, scaled,
- mic_mv[i - 1],
- TABLA_MBHC_FAKE_INS_DELTA_SCALED_MV));
- __tabla_codec_switch_micbias(tabla->codec, 0, false,
- false);
} else {
+ if (vddioswitch)
+ __tabla_codec_switch_micbias(tabla->codec, 1,
+ false, false);
if (gndswitch)
tabla_codec_hphr_gnd_switch(codec, true);
mb_v[i] = __tabla_codec_sta_dce(codec, 1, true, true);
mic_mv[i] = tabla_codec_sta_dce_v(codec, 1 , mb_v[i]);
- inval = (tabla_is_invalid_insertion_range(codec,
+ if (vddioswitch)
+ scaled = tabla_scale_v_micb_vddio(tabla,
mic_mv[i],
- highhph) ||
- tabla_is_inval_insert_delta(codec, mic_mv[i],
- mic_mv[i - 1],
- TABLA_MBHC_FAKE_INS_DELTA_SCALED_MV));
+ false);
+ else
+ scaled = mic_mv[i];
+ /* !gndswitch & vddioswitch means the previous DCE
+ * was done with gndswitch, don't compare with DCE
+ * with gndswitch */
+ highdelta = tabla_is_inval_ins_delta(codec, scaled,
+ mic_mv[i - !gndswitch - vddioswitch],
+ TABLA_MBHC_FAKE_INS_DELTA_SCALED_MV);
+ inval = (tabla_is_inval_ins_range(codec, mic_mv[i],
+ highhph, &highv) ||
+ highdelta);
+ ahighv |= highv;
if (gndswitch)
tabla_codec_hphr_gnd_switch(codec, false);
- scaled = mic_mv[i];
+ if (vddioswitch)
+ __tabla_codec_switch_micbias(tabla->codec, 0,
+ false, false);
+ /* claim UNSUPPORTED plug insertion when
+ * good headset is detected but HPHR GND switch makes
+ * delta difference */
+ if (i == (num_det - 2) && highdelta && !ahighv)
+ plug_type[0] = PLUG_TYPE_GND_MIC_SWAP;
+ else if (i == (num_det - 1) && inval)
+ plug_type[0] = PLUG_TYPE_INVALID;
}
pr_debug("%s: DCE #%d, %04x, V %d, scaled V %d, GND %d, "
- "invalid %d\n", __func__,
+ "VDDIO %d, inval %d\n", __func__,
i + 1, mb_v[i] & 0xffff, mic_mv[i], scaled, gndswitch,
- inval);
+ vddioswitch, inval);
+ /* don't need to run further DCEs */
+ if (ahighv && inval)
+ break;
+ mic_mv[i] = scaled;
}
- plug_type_ptr =
- TABLA_MBHC_CAL_PLUG_TYPE_PTR(tabla->mbhc_cfg.calibration);
- plug_type[0] = PLUG_TYPE_INVALID;
- for (i = 0; !inval && i < num_det; i++) {
+ for (i = 0; (plug_type[0] != PLUG_TYPE_GND_MIC_SWAP && !inval) &&
+ i < num_det; i++) {
/*
* If we are here, means none of the all
* measurements are fake, continue plug type detection.
@@ -6079,6 +6115,7 @@
}
}
+ pr_debug("%s: Detected plug type %d\n", __func__, plug_type[0]);
return plug_type[0];
}
@@ -6086,7 +6123,7 @@
{
struct tabla_priv *tabla;
struct snd_soc_codec *codec;
- int retry = 0;
+ int retry = 0, pt_gnd_mic_swap_cnt = 0;
bool correction = false;
enum tabla_mbhc_plug_type plug_type;
unsigned long timeout;
@@ -6137,14 +6174,33 @@
}
} else if (plug_type == PLUG_TYPE_HEADPHONE) {
pr_debug("Good headphone detected, continue polling mic\n");
- if (tabla->current_plug == PLUG_TYPE_NONE) {
+ if (tabla->current_plug == PLUG_TYPE_NONE)
tabla_codec_report_plug(codec, 1,
SND_JACK_HEADPHONE);
- }
} else {
+ if (plug_type == PLUG_TYPE_GND_MIC_SWAP) {
+ pt_gnd_mic_swap_cnt++;
+ if (pt_gnd_mic_swap_cnt <
+ TABLA_MBHC_GND_MIC_SWAP_THRESHOLD)
+ continue;
+ else if (pt_gnd_mic_swap_cnt >
+ TABLA_MBHC_GND_MIC_SWAP_THRESHOLD) {
+ /* This is due to GND/MIC switch didn't
+ * work, Report unsupported plug */
+ } else if (tabla->mbhc_cfg.swap_gnd_mic) {
+ /* if switch is toggled, check again,
+ * otherwise report unsupported plug */
+ if (tabla->mbhc_cfg.swap_gnd_mic(codec))
+ continue;
+ }
+ } else
+ pt_gnd_mic_swap_cnt = 0;
+
TABLA_ACQUIRE_LOCK(tabla->codec_resource_lock);
/* Turn off override */
tabla_turn_onoff_override(codec, false);
+ /* The valid plug also includes PLUG_TYPE_GND_MIC_SWAP
+ */
tabla_find_plug_and_report(codec, plug_type);
TABLA_RELEASE_LOCK(tabla->codec_resource_lock);
pr_debug("Attempt %d found correct plug %d\n", retry,
@@ -6167,8 +6223,8 @@
/* called under codec_resource_lock acquisition */
static void tabla_codec_decide_gpio_plug(struct snd_soc_codec *codec)
{
- struct tabla_priv *tabla = snd_soc_codec_get_drvdata(codec);
enum tabla_mbhc_plug_type plug_type;
+ struct tabla_priv *tabla = snd_soc_codec_get_drvdata(codec);
pr_debug("%s: enter\n", __func__);
@@ -6182,7 +6238,8 @@
return;
}
- if (plug_type == PLUG_TYPE_INVALID) {
+ if (plug_type == PLUG_TYPE_INVALID ||
+ plug_type == PLUG_TYPE_GND_MIC_SWAP) {
tabla_schedule_hs_detect_plug(tabla);
} else if (plug_type == PLUG_TYPE_HEADPHONE) {
tabla_codec_report_plug(codec, 1, SND_JACK_HEADPHONE);
@@ -6224,18 +6281,21 @@
return;
}
- plug_type = tabla_codec_get_plug_type(codec, tabla->mbhc_cfg.gpio ?
- true : false);
+ plug_type = tabla_codec_get_plug_type(codec, false);
tabla_turn_onoff_override(codec, false);
if (plug_type == PLUG_TYPE_INVALID) {
pr_debug("%s: Invalid plug type detected\n", __func__);
- snd_soc_update_bits(codec, TABLA_A_CDC_MBHC_B1_CTL,
- 0x02, 0x02);
+ snd_soc_update_bits(codec, TABLA_A_CDC_MBHC_B1_CTL, 0x02, 0x02);
tabla_codec_cleanup_hs_polling(codec);
tabla_codec_enable_hs_detect(codec, 1,
MBHC_USE_MB_TRIGGER |
MBHC_USE_HPHL_TRIGGER, false);
+ } else if (plug_type == PLUG_TYPE_GND_MIC_SWAP) {
+ pr_debug("%s: GND-MIC swapped plug type detected\n", __func__);
+ tabla_codec_report_plug(codec, 1, SND_JACK_UNSUPPORTED);
+ tabla_codec_cleanup_hs_polling(codec);
+ tabla_codec_enable_hs_detect(codec, 0, 0, false);
} else if (plug_type == PLUG_TYPE_HEADPHONE) {
pr_debug("%s: Headphone Detected\n", __func__);
tabla_codec_report_plug(codec, 1, SND_JACK_HEADPHONE);
@@ -6297,7 +6357,13 @@
* it is possible that micbias will be switched to VDDIO.
*/
tabla_codec_switch_micbias(codec, 0);
- tabla_codec_report_plug(codec, 0, SND_JACK_HEADPHONE);
+ if (priv->current_plug == PLUG_TYPE_HEADPHONE)
+ tabla_codec_report_plug(codec, 0, SND_JACK_HEADPHONE);
+ else if (priv->current_plug == PLUG_TYPE_GND_MIC_SWAP)
+ tabla_codec_report_plug(codec, 0, SND_JACK_UNSUPPORTED);
+ else
+ WARN(1, "%s: Unexpected current plug type %d\n",
+ __func__, priv->current_plug);
tabla_codec_shutdown_hs_removal_detect(codec);
tabla_codec_enable_hs_detect(codec, 1,
MBHC_USE_MB_TRIGGER |
@@ -6611,6 +6677,9 @@
if (tabla->current_plug == PLUG_TYPE_HEADPHONE) {
tabla_codec_report_plug(codec, 0, SND_JACK_HEADPHONE);
is_removed = true;
+ } else if (tabla->current_plug == PLUG_TYPE_GND_MIC_SWAP) {
+ tabla_codec_report_plug(codec, 0, SND_JACK_UNSUPPORTED);
+ is_removed = true;
} else if (tabla->current_plug == PLUG_TYPE_HEADSET) {
tabla_codec_pause_hs_polling(codec);
tabla_codec_cleanup_hs_polling(codec);
@@ -6722,12 +6791,10 @@
} else {
/* Enable Mic Bias pull down and HPH Switch to GND */
snd_soc_update_bits(codec,
- tabla->mbhc_bias_regs.ctl_reg, 0x01,
- 0x01);
- snd_soc_update_bits(codec, TABLA_A_MBHC_HPH, 0x01,
- 0x01);
+ tabla->mbhc_bias_regs.ctl_reg, 0x01, 0x01);
+ snd_soc_update_bits(codec, TABLA_A_MBHC_HPH, 0x01, 0x01);
INIT_WORK(&tabla->hs_correct_plug_work,
- tabla_hs_correct_gpio_plug);
+ tabla_hs_correct_gpio_plug);
}
}
@@ -6850,7 +6917,6 @@
return IRQ_HANDLED;
}
-
static int tabla_handle_pdata(struct tabla_priv *tabla)
{
struct snd_soc_codec *codec = tabla->codec;
@@ -7261,6 +7327,9 @@
p->v_inval_ins_low);
n += scnprintf(buffer + n, size - n, "v_inval_ins_high = %d\n",
p->v_inval_ins_high);
+ if (tabla->mbhc_cfg.gpio)
+ n += scnprintf(buffer + n, size - n, "GPIO insert = %d\n",
+ tabla_hs_gpio_level_remove(tabla));
buffer[n] = 0;
return simple_read_from_buffer(buf, count, pos, buffer, n);
diff --git a/sound/soc/codecs/wcd9310.h b/sound/soc/codecs/wcd9310.h
index 38ec27c..1cca360 100644
--- a/sound/soc/codecs/wcd9310.h
+++ b/sound/soc/codecs/wcd9310.h
@@ -176,6 +176,8 @@
unsigned int gpio;
unsigned int gpio_irq;
int gpio_level_insert;
+ /* swap_gnd_mic returns true if extern GND/MIC swap switch toggled */
+ bool (*swap_gnd_mic) (struct snd_soc_codec *);
};
extern int tabla_hs_detect(struct snd_soc_codec *codec,
diff --git a/sound/soc/msm/apq8064.c b/sound/soc/msm/apq8064.c
index c8ef419..0c72880 100644
--- a/sound/soc/msm/apq8064.c
+++ b/sound/soc/msm/apq8064.c
@@ -58,6 +58,8 @@
#define TABLA_MBHC_DEF_BUTTONS 8
#define TABLA_MBHC_DEF_RLOADS 5
+#define JACK_DETECT_GPIO 38
+
/* Shared channel numbers for Slimbus ports that connect APQ to MDM. */
enum {
SLIM_1_RX_1 = 145, /* BT-SCO and USB TX */
@@ -97,6 +99,15 @@
static struct snd_soc_jack hs_jack;
static struct snd_soc_jack button_jack;
+static int apq8064_hs_detect_use_gpio = -1;
+module_param(apq8064_hs_detect_use_gpio, int, 0444);
+MODULE_PARM_DESC(apq8064_hs_detect_use_gpio, "Use GPIO for headset detection");
+
+static bool apq8064_hs_detect_use_firmware;
+module_param(apq8064_hs_detect_use_firmware, bool, 0444);
+MODULE_PARM_DESC(apq8064_hs_detect_use_firmware, "Use firmware for headset "
+ "detection");
+
static int msm_enable_codec_ext_clk(struct snd_soc_codec *codec, int enable,
bool dapm);
@@ -108,7 +119,7 @@
.micbias = TABLA_MICBIAS2,
.mclk_cb_fn = msm_enable_codec_ext_clk,
.mclk_rate = TABLA_EXT_CLK_RATE,
- .gpio = 0, /* MBHC GPIO is not configured */
+ .gpio = 0,
.gpio_irq = 0,
.gpio_level_insert = 1,
};
@@ -1037,10 +1048,10 @@
return ret;
}
-
static int msm_audrx_init(struct snd_soc_pcm_runtime *rtd)
{
int err;
+ uint32_t revision;
struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_dapm_context *dapm = &codec->dapm;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
@@ -1097,6 +1108,48 @@
codec_clk = clk_get(cpu_dai->dev, "osr_clk");
+ /* APQ8064 Rev 1.1 CDP and Liquid have mechanical switch */
+ revision = socinfo_get_version();
+ if (apq8064_hs_detect_use_gpio != -1) {
+ if (apq8064_hs_detect_use_gpio == 1)
+ pr_debug("%s: MBHC mechanical is enabled by request\n",
+ __func__);
+ else if (apq8064_hs_detect_use_gpio == 0)
+ pr_debug("%s: MBHC mechanical is disabled by request\n",
+ __func__);
+ else
+ pr_warn("%s: Invalid hs_detect_use_gpio %d\n", __func__,
+ apq8064_hs_detect_use_gpio);
+ } else if (SOCINFO_VERSION_MAJOR(revision) == 0) {
+ pr_warn("%s: Unknown HW revision detected %d.%d\n", __func__,
+ SOCINFO_VERSION_MAJOR(revision),
+ SOCINFO_VERSION_MINOR(revision));
+ } else if ((SOCINFO_VERSION_MAJOR(revision) == 1 &&
+ SOCINFO_VERSION_MINOR(revision) >= 1 &&
+ (machine_is_apq8064_cdp() ||
+ machine_is_apq8064_liquid())) ||
+ SOCINFO_VERSION_MAJOR(revision) > 1) {
+ pr_debug("%s: MBHC mechanical switch available APQ8064 "
+ "detected\n", __func__);
+ apq8064_hs_detect_use_gpio = 1;
+ }
+
+ if (apq8064_hs_detect_use_gpio == 1) {
+ pr_debug("%s: Using MBHC mechanical switch\n", __func__);
+ mbhc_cfg.gpio = JACK_DETECT_GPIO;
+ mbhc_cfg.gpio_irq = gpio_to_irq(JACK_DETECT_GPIO);
+ err = gpio_request(mbhc_cfg.gpio, "MBHC_HS_DETECT");
+ if (err < 0) {
+ pr_err("%s: gpio_request %d failed %d\n", __func__,
+ mbhc_cfg.gpio, err);
+ return err;
+ }
+ gpio_direction_input(JACK_DETECT_GPIO);
+ } else
+ pr_debug("%s: Not using MBHC mechanical switch\n", __func__);
+
+ mbhc_cfg.read_fw_bin = apq8064_hs_detect_use_firmware;
+
err = tabla_hs_detect(codec, &mbhc_cfg);
return err;
@@ -1119,7 +1172,8 @@
},
};
-static struct snd_soc_dsp_link slimbus0_hl_media = {
+/* bi-directional media definition for hostless PCM device */
+static struct snd_soc_dsp_link bidir_hl_media = {
.playback = true,
.capture = true,
.trigger = {
@@ -1128,9 +1182,8 @@
},
};
-static struct snd_soc_dsp_link int_fm_hl_media = {
+static struct snd_soc_dsp_link hdmi_rx_hl = {
.playback = true,
- .capture = true,
.trigger = {
SND_SOC_DSP_TRIGGER_POST,
SND_SOC_DSP_TRIGGER_POST
@@ -1377,7 +1430,7 @@
.name = "MSM8960 Media2",
.stream_name = "MultiMedia2",
.cpu_dai_name = "MultiMedia2",
- .platform_name = "msm-pcm-dsp",
+ .platform_name = "msm-multi-ch-pcm-dsp",
.dynamic = 1,
.dsp_link = &fe_media,
.be_id = MSM_FRONTEND_DAI_MULTIMEDIA2,
@@ -1418,7 +1471,7 @@
.cpu_dai_name = "SLIMBUS0_HOSTLESS",
.platform_name = "msm-pcm-hostless",
.dynamic = 1,
- .dsp_link = &slimbus0_hl_media,
+ .dsp_link = &bidir_hl_media,
.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
.ignore_suspend = 1,
/* .be_id = do not care */
@@ -1429,7 +1482,7 @@
.cpu_dai_name = "INT_FM_HOSTLESS",
.platform_name = "msm-pcm-hostless",
.dynamic = 1,
- .dsp_link = &int_fm_hl_media,
+ .dsp_link = &bidir_hl_media,
.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
.ignore_suspend = 1,
/* .be_id = do not care */
@@ -1453,6 +1506,37 @@
.ignore_suspend = 1,
},
{
+ .name = "MSM8960 Compr",
+ .stream_name = "COMPR",
+ .cpu_dai_name = "MultiMedia4",
+ .platform_name = "msm-compr-dsp",
+ .dynamic = 1,
+ .dsp_link = &lpa_fe_media,
+ .be_id = MSM_FRONTEND_DAI_MULTIMEDIA4,
+ },
+ {
+ .name = "AUXPCM Hostless",
+ .stream_name = "AUXPCM Hostless",
+ .cpu_dai_name = "AUXPCM_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dsp_link = &bidir_hl_media,
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ },
+ /* HDMI Hostless */
+ {
+ .name = "HDMI_RX_HOSTLESS",
+ .stream_name = "HDMI_RX_HOSTLESS",
+ .cpu_dai_name = "HDMI_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dsp_link = &hdmi_rx_hl,
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .no_codec = 1,
+ .ignore_suspend = 1,
+ },
+ {
.name = "Voice Stub",
.stream_name = "Voice Stub",
.cpu_dai_name = "VOICE_STUB",
@@ -1826,6 +1910,8 @@
}
msm_free_headset_mic_gpios();
platform_device_unregister(msm_snd_device);
+ if (mbhc_cfg.gpio)
+ gpio_free(mbhc_cfg.gpio);
kfree(mbhc_cfg.calibration);
}
module_exit(msm_audio_exit);
diff --git a/sound/soc/msm/msm-dai-fe.c b/sound/soc/msm/msm-dai-fe.c
index 6c44cba..a050771 100644
--- a/sound/soc/msm/msm-dai-fe.c
+++ b/sound/soc/msm/msm-dai-fe.c
@@ -256,21 +256,21 @@
{
.playback = {
.stream_name = "AUXPCM Hostless Playback",
- .rates = SNDRV_PCM_RATE_8000,
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.channels_min = 1,
.channels_max = 1,
.rate_min = 8000,
- .rate_max = 8000,
+ .rate_max = 16000,
},
.capture = {
.stream_name = "AUXPCM Hostless Capture",
- .rates = SNDRV_PCM_RATE_8000,
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.channels_min = 1,
.channels_max = 1,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 16000,
},
.ops = &msm_fe_dai_ops,
.name = "AUXPCM_HOSTLESS",
diff --git a/sound/soc/msm/msm-dai-q6.c b/sound/soc/msm/msm-dai-q6.c
index fb7756c..a62541a 100644
--- a/sound/soc/msm/msm-dai-q6.c
+++ b/sound/soc/msm/msm-dai-q6.c
@@ -532,17 +532,28 @@
}
dai_data->channels = params_channels(params);
- if (params_rate(params) != 8000) {
- dev_err(dai->dev, "AUX PCM supports only 8KHz sampling rate\n");
+ dai_data->rate = params_rate(params);
+ switch (dai_data->rate) {
+ case 8000:
+ dai_data->port_config.pcm.mode = auxpcm_pdata->mode_8k.mode;
+ dai_data->port_config.pcm.sync = auxpcm_pdata->mode_8k.sync;
+ dai_data->port_config.pcm.frame = auxpcm_pdata->mode_8k.frame;
+ dai_data->port_config.pcm.quant = auxpcm_pdata->mode_8k.quant;
+ dai_data->port_config.pcm.slot = auxpcm_pdata->mode_8k.slot;
+ dai_data->port_config.pcm.data = auxpcm_pdata->mode_8k.data;
+ break;
+ case 16000:
+ dai_data->port_config.pcm.mode = auxpcm_pdata->mode_16k.mode;
+ dai_data->port_config.pcm.sync = auxpcm_pdata->mode_16k.sync;
+ dai_data->port_config.pcm.frame = auxpcm_pdata->mode_16k.frame;
+ dai_data->port_config.pcm.quant = auxpcm_pdata->mode_16k.quant;
+ dai_data->port_config.pcm.slot = auxpcm_pdata->mode_16k.slot;
+ dai_data->port_config.pcm.data = auxpcm_pdata->mode_16k.data;
+ break;
+ default:
+ dev_err(dai->dev, "AUX PCM supports only 8kHz and 16kHz sampling rate\n");
return -EINVAL;
}
- dai_data->rate = params_rate(params);
- dai_data->port_config.pcm.mode = auxpcm_pdata->mode;
- dai_data->port_config.pcm.sync = auxpcm_pdata->sync;
- dai_data->port_config.pcm.frame = auxpcm_pdata->frame;
- dai_data->port_config.pcm.quant = auxpcm_pdata->quant;
- dai_data->port_config.pcm.slot = auxpcm_pdata->slot;
- dai_data->port_config.pcm.data = auxpcm_pdata->data;
return 0;
}
@@ -702,9 +713,9 @@
{
struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
int rc = 0;
-
struct msm_dai_auxpcm_pdata *auxpcm_pdata =
(struct msm_dai_auxpcm_pdata *) dai->dev->platform_data;
+ unsigned long pcm_clk_rate;
mutex_lock(&aux_pcm_mutex);
@@ -753,8 +764,17 @@
afe_open(PCM_RX, &dai_data->port_config, dai_data->rate);
afe_open(PCM_TX, &dai_data->port_config, dai_data->rate);
+ if (dai_data->rate == 8000) {
+ pcm_clk_rate = auxpcm_pdata->mode_8k.pcm_clk_rate;
+ } else if (dai_data->rate == 16000) {
+ pcm_clk_rate = auxpcm_pdata->mode_8k.pcm_clk_rate;
+ } else {
+ dev_err(dai->dev, "%s: Invalid AUX PCM rate %d\n", __func__,
+ dai_data->rate);
+ return -EINVAL;
+ }
- rc = clk_set_rate(pcm_clk, auxpcm_pdata->pcm_clk_rate);
+ rc = clk_set_rate(pcm_clk, pcm_clk_rate);
if (rc < 0) {
pr_err("%s: clk_set_rate failed\n", __func__);
return rc;
@@ -1377,11 +1397,11 @@
static struct snd_soc_dai_driver msm_dai_q6_aux_pcm_rx_dai = {
.playback = {
- .rates = SNDRV_PCM_RATE_8000,
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.channels_min = 1,
.channels_max = 1,
- .rate_max = 8000,
+ .rate_max = 16000,
.rate_min = 8000,
},
.ops = &msm_dai_q6_auxpcm_ops,
@@ -1391,11 +1411,11 @@
static struct snd_soc_dai_driver msm_dai_q6_aux_pcm_tx_dai = {
.capture = {
- .rates = SNDRV_PCM_RATE_8000,
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.channels_min = 1,
.channels_max = 1,
- .rate_max = 8000,
+ .rate_max = 16000,
.rate_min = 8000,
},
.ops = &msm_dai_q6_auxpcm_ops,
diff --git a/sound/soc/msm/msm8930.c b/sound/soc/msm/msm8930.c
index b31ed65..2762bd6 100644
--- a/sound/soc/msm/msm8930.c
+++ b/sound/soc/msm/msm8930.c
@@ -39,6 +39,7 @@
#define SPK_AMP_POS 0x1
#define SPK_AMP_NEG 0x2
#define SPKR_BOOST_GPIO 15
+#define DEFAULT_PMIC_SPK_GAIN 0x0D
#define SITAR_EXT_CLK_RATE 12288000
#define SITAR_MBHC_DEF_BUTTONS 3
@@ -47,6 +48,7 @@
static int msm8930_spk_control;
static int msm8930_slim_0_rx_ch = 1;
static int msm8930_slim_0_tx_ch = 1;
+static int msm8930_pmic_spk_gain = DEFAULT_PMIC_SPK_GAIN;
static int msm8930_ext_spk_pamp;
static int msm8930_btsco_rate = BTSCO_RATE_8KHZ;
@@ -431,6 +433,39 @@
return 0;
}
+static const char *pmic_spk_gain_text[] = {
+ "NEG_6_DB", "NEG_4_DB", "NEG_2_DB", "ZERO_DB", "POS_2_DB", "POS_4_DB",
+ "POS_6_DB", "POS_8_DB", "POS_10_DB", "POS_12_DB", "POS_14_DB",
+ "POS_16_DB", "POS_18_DB", "POS_20_DB", "POS_22_DB", "POS_24_DB"
+};
+
+static const struct soc_enum msm8960_pmic_spk_gain_enum[] = {
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(pmic_spk_gain_text),
+ pmic_spk_gain_text),
+};
+
+static int msm8930_pmic_gain_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm8930_pmic_spk_gain = %d\n", __func__,
+ msm8930_pmic_spk_gain);
+ ucontrol->value.integer.value[0] = msm8930_pmic_spk_gain;
+ return 0;
+}
+
+static int msm8930_pmic_gain_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int ret = 0;
+ msm8930_pmic_spk_gain = ucontrol->value.integer.value[0];
+ ret = pm8xxx_spk_gain(msm8930_pmic_spk_gain);
+ pr_debug("%s: msm8930_pmic_spk_gain = %d"
+ " ucontrol->value.integer.value[0] = %d\n", __func__,
+ msm8930_pmic_spk_gain,
+ (int) ucontrol->value.integer.value[0]);
+ return ret;
+}
+
static const struct snd_kcontrol_new sitar_msm8930_controls[] = {
SOC_ENUM_EXT("Speaker Function", msm8930_enum[0], msm8930_get_spk,
msm8930_set_spk),
@@ -438,6 +473,8 @@
msm8930_slim_0_rx_ch_get, msm8930_slim_0_rx_ch_put),
SOC_ENUM_EXT("SLIM_0_TX Channels", msm8930_enum[2],
msm8930_slim_0_tx_ch_get, msm8930_slim_0_tx_ch_put),
+ SOC_ENUM_EXT("PMIC SPK Gain", msm8960_pmic_spk_gain_enum[0],
+ msm8930_pmic_gain_get, msm8930_pmic_gain_put),
};
static const struct snd_kcontrol_new int_btsco_rate_mixer_controls[] = {
@@ -642,6 +679,9 @@
mbhc_cfg.gpio_irq = gpio_to_irq(mbhc_cfg.gpio);
sitar_hs_detect(codec, &mbhc_cfg);
+ /* Initialize default PMIC speaker gain */
+ pm8xxx_spk_gain(DEFAULT_PMIC_SPK_GAIN);
+
return 0;
}
diff --git a/sound/soc/msm/msm8960.c b/sound/soc/msm/msm8960.c
index f78f58d..8f0fa32 100644
--- a/sound/soc/msm/msm8960.c
+++ b/sound/soc/msm/msm8960.c
@@ -41,8 +41,8 @@
#define msm8960_SLIM_0_RX_MAX_CHANNELS 2
#define msm8960_SLIM_0_TX_MAX_CHANNELS 4
-#define BTSCO_RATE_8KHZ 8000
-#define BTSCO_RATE_16KHZ 16000
+#define SAMPLE_RATE_8KHZ 8000
+#define SAMPLE_RATE_16KHZ 16000
#define BOTTOM_SPK_AMP_POS 0x1
#define BOTTOM_SPK_AMP_NEG 0x2
@@ -61,6 +61,7 @@
#define JACK_DETECT_GPIO 38
#define JACK_DETECT_INT PM8921_GPIO_IRQ(PM8921_IRQ_BASE, JACK_DETECT_GPIO)
+#define JACK_US_EURO_SEL_GPIO 35
static u32 top_spk_pamp_gpio = PM8921_GPIO_PM_TO_SYS(18);
static u32 bottom_spk_pamp_gpio = PM8921_GPIO_PM_TO_SYS(19);
@@ -70,9 +71,11 @@
static int msm8960_slim_0_rx_ch = 1;
static int msm8960_slim_0_tx_ch = 1;
-static int msm8960_btsco_rate = BTSCO_RATE_8KHZ;
+static int msm8960_btsco_rate = SAMPLE_RATE_8KHZ;
static int msm8960_btsco_ch = 1;
+static int msm8960_auxpcm_rate = SAMPLE_RATE_8KHZ;
+
static struct clk *codec_clk;
static int clk_users;
@@ -91,6 +94,7 @@
static int msm8960_enable_codec_ext_clk(struct snd_soc_codec *codec, int enable,
bool dapm);
+static bool msm8960_swap_gnd_mic(struct snd_soc_codec *codec);
static struct tabla_mbhc_config mbhc_cfg = {
.headset_jack = &hs_jack,
@@ -103,8 +107,11 @@
.gpio = 0,
.gpio_irq = 0,
.gpio_level_insert = 1,
+ .swap_gnd_mic = NULL,
};
+static u32 us_euro_sel_gpio = PM8921_GPIO_PM_TO_SYS(JACK_US_EURO_SEL_GPIO);
+
static struct mutex cdc_mclk_mutex;
static void msm8960_enable_ext_spk_amp_gpio(u32 spk_amp_gpio)
@@ -372,6 +379,15 @@
return r;
}
+static bool msm8960_swap_gnd_mic(struct snd_soc_codec *codec)
+{
+ int value = gpio_get_value_cansleep(us_euro_sel_gpio);
+ pr_debug("%s: US EURO select switch %d to %d\n", __func__, value,
+ !value);
+ gpio_set_value_cansleep(us_euro_sel_gpio, !value);
+ return true;
+}
+
static int msm8960_mclk_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
@@ -512,6 +528,11 @@
SOC_ENUM_SINGLE_EXT(2, btsco_rate_text),
};
+static const char *auxpcm_rate_text[] = {"rate_8000", "rate_16000"};
+static const struct soc_enum msm8960_auxpcm_enum[] = {
+ SOC_ENUM_SINGLE_EXT(2, auxpcm_rate_text),
+};
+
static int msm8960_slim_0_rx_ch_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -563,19 +584,49 @@
{
switch (ucontrol->value.integer.value[0]) {
case 0:
- msm8960_btsco_rate = BTSCO_RATE_8KHZ;
+ msm8960_btsco_rate = SAMPLE_RATE_8KHZ;
break;
case 1:
- msm8960_btsco_rate = BTSCO_RATE_16KHZ;
+ msm8960_btsco_rate = SAMPLE_RATE_16KHZ;
break;
default:
- msm8960_btsco_rate = BTSCO_RATE_8KHZ;
+ msm8960_btsco_rate = SAMPLE_RATE_8KHZ;
break;
}
pr_debug("%s: msm8960_btsco_rate = %d\n", __func__, msm8960_btsco_rate);
return 0;
}
+static int msm8960_auxpcm_rate_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm8960_auxpcm_rate = %d", __func__,
+ msm8960_auxpcm_rate);
+ ucontrol->value.integer.value[0] = msm8960_auxpcm_rate;
+ return 0;
+}
+
+static int msm8960_auxpcm_rate_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 0:
+ msm8960_auxpcm_rate = SAMPLE_RATE_8KHZ;
+ break;
+ case 1:
+ msm8960_auxpcm_rate = SAMPLE_RATE_16KHZ;
+ break;
+ default:
+ msm8960_auxpcm_rate = SAMPLE_RATE_8KHZ;
+ break;
+ }
+ pr_debug("%s: msm8960_auxpcm_rate = %d"
+ "ucontrol->value.integer.value[0] = %d\n", __func__,
+ msm8960_auxpcm_rate,
+ (int)ucontrol->value.integer.value[0]);
+ return 0;
+}
+
static const struct snd_kcontrol_new tabla_msm8960_controls[] = {
SOC_ENUM_EXT("Speaker Function", msm8960_enum[0], msm8960_get_spk,
msm8960_set_spk),
@@ -590,6 +641,11 @@
msm8960_btsco_rate_get, msm8960_btsco_rate_put),
};
+static const struct snd_kcontrol_new auxpcm_rate_mixer_controls[] = {
+ SOC_ENUM_EXT("AUX PCM SampleRate", msm8960_auxpcm_enum[0],
+ msm8960_auxpcm_rate_get, msm8960_auxpcm_rate_put),
+};
+
static int msm8960_btsco_init(struct snd_soc_pcm_runtime *rtd)
{
int err = 0;
@@ -603,6 +659,19 @@
return 0;
}
+static int msm8960_auxpcm_init(struct snd_soc_pcm_runtime *rtd)
+{
+ int err = 0;
+ struct snd_soc_platform *platform = rtd->platform;
+
+ err = snd_soc_add_platform_controls(platform,
+ auxpcm_rate_mixer_controls,
+ ARRAY_SIZE(auxpcm_rate_mixer_controls));
+ if (err < 0)
+ return err;
+ return 0;
+}
+
static void *def_tabla_mbhc_cal(void)
{
void *tabla_cal;
@@ -795,7 +864,7 @@
err = snd_soc_jack_new(codec, "Headset Jack",
(SND_JACK_HEADSET | SND_JACK_OC_HPHL |
- SND_JACK_OC_HPHR),
+ SND_JACK_OC_HPHR | SND_JACK_UNSUPPORTED),
&hs_jack);
if (err) {
pr_err("failed to create new jack\n");
@@ -811,6 +880,9 @@
codec_clk = clk_get(cpu_dai->dev, "osr_clk");
+ if (machine_is_msm8960_cdp())
+ mbhc_cfg.swap_gnd_mic = msm8960_swap_gnd_mic;
+
if (hs_detect_use_gpio) {
mbhc_cfg.gpio = PM8921_GPIO_PM_TO_SYS(JACK_DETECT_GPIO);
mbhc_cfg.gpio_irq = JACK_DETECT_INT;
@@ -819,8 +891,8 @@
if (mbhc_cfg.gpio) {
err = pm8xxx_gpio_config(mbhc_cfg.gpio, &jack_gpio_cfg);
if (err) {
- pr_err("%s: pm8xxx_gpio_config failed %d\n", __func__,
- err);
+ pr_err("%s: pm8xxx_gpio_config JACK_DETECT failed %d\n",
+ __func__, err);
return err;
}
}
@@ -951,8 +1023,8 @@
struct snd_interval *channels = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_CHANNELS);
- /* PCM only supports mono output with 8khz sample rate */
- rate->min = rate->max = 8000;
+ rate->min = rate->max = msm8960_auxpcm_rate;
+ /* PCM only supports mono output */
channels->min = channels->max = 1;
return 0;
@@ -1279,6 +1351,7 @@
.platform_name = "msm-pcm-routing",
.codec_name = "msm-stub-codec.1",
.codec_dai_name = "msm-stub-rx",
+ .init = &msm8960_auxpcm_init,
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_AUXPCM_RX,
.be_hw_params_fixup = msm8960_auxpcm_be_params_fixup,
@@ -1468,19 +1541,19 @@
else
gpio_direction_output(PM8921_GPIO_PM_TO_SYS(23), 0);
- ret = gpio_request(PM8921_GPIO_PM_TO_SYS(35), "US_EURO_SWITCH");
+ ret = gpio_request(us_euro_sel_gpio, "US_EURO_SWITCH");
if (ret) {
pr_err("%s: Failed to request gpio %d\n", __func__,
- PM8921_GPIO_PM_TO_SYS(35));
+ us_euro_sel_gpio);
gpio_free(PM8921_GPIO_PM_TO_SYS(23));
return ret;
}
- ret = pm8xxx_gpio_config(PM8921_GPIO_PM_TO_SYS(35), ¶m);
+ ret = pm8xxx_gpio_config(us_euro_sel_gpio, ¶m);
if (ret)
pr_err("%s: Failed to configure gpio %d\n", __func__,
- PM8921_GPIO_PM_TO_SYS(35));
+ us_euro_sel_gpio);
else
- gpio_direction_output(PM8921_GPIO_PM_TO_SYS(35), 0);
+ gpio_direction_output(us_euro_sel_gpio, 0);
return 0;
}
@@ -1488,7 +1561,7 @@
{
if (msm8960_headset_gpios_configured) {
gpio_free(PM8921_GPIO_PM_TO_SYS(23));
- gpio_free(PM8921_GPIO_PM_TO_SYS(35));
+ gpio_free(us_euro_sel_gpio);
}
}
diff --git a/sound/soc/msm/qdsp6/q6asm.c b/sound/soc/msm/qdsp6/q6asm.c
index fc08febf..09bfd94 100644
--- a/sound/soc/msm/qdsp6/q6asm.c
+++ b/sound/soc/msm/qdsp6/q6asm.c
@@ -526,6 +526,7 @@
pr_err("%s: ION create client"
" for AUDIO failed\n",
__func__);
+ mutex_unlock(&ac->cmd_lock);
goto fail;
}
buf[cnt].handle = ion_alloc
@@ -536,6 +537,7 @@
pr_err("%s: ION memory"
" allocation for AUDIO failed\n",
__func__);
+ mutex_unlock(&ac->cmd_lock);
goto fail;
}
@@ -548,6 +550,7 @@
pr_err("%s: ION Get Physical"
" for AUDIO failed, rc = %d\n",
__func__, rc);
+ mutex_unlock(&ac->cmd_lock);
goto fail;
}
@@ -558,6 +561,7 @@
buf[cnt].data)) {
pr_err("%s: ION memory"
" mapping for AUDIO failed\n", __func__);
+ mutex_unlock(&ac->cmd_lock);
goto fail;
}
memset((void *)buf[cnt].data, 0, bufsz);
@@ -661,6 +665,7 @@
buf[0].client = msm_ion_client_create(UINT_MAX, "audio_client");
if (IS_ERR_OR_NULL((void *)buf[0].client)) {
pr_err("%s: ION create client for AUDIO failed\n", __func__);
+ mutex_unlock(&ac->cmd_lock);
goto fail;
}
buf[0].handle = ion_alloc(buf[0].client, bufsz * bufcnt, SZ_4K,
@@ -668,6 +673,7 @@
if (IS_ERR_OR_NULL((void *) buf[0].handle)) {
pr_err("%s: ION memory allocation for AUDIO failed\n",
__func__);
+ mutex_unlock(&ac->cmd_lock);
goto fail;
}
@@ -676,12 +682,14 @@
if (rc) {
pr_err("%s: ION Get Physical for AUDIO failed, rc = %d\n",
__func__, rc);
+ mutex_unlock(&ac->cmd_lock);
goto fail;
}
buf[0].data = ion_map_kernel(buf[0].client, buf[0].handle, 0);
if (IS_ERR_OR_NULL((void *) buf[0].data)) {
pr_err("%s: ION memory mapping for AUDIO failed\n", __func__);
+ mutex_unlock(&ac->cmd_lock);
goto fail;
}
memset((void *)buf[0].data, 0, (bufsz * bufcnt));
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 2284f19..b6ddcc8a 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -1543,10 +1543,17 @@
trace_snd_soc_dapm_widget_power(w, power);
- if (power)
+ if (power) {
dapm_seq_insert(w, &up_list, true);
- else
+ dev_dbg(w->dapm->dev,
+ "%s(): power up . widget %s\n",
+ __func__, w->name);
+ } else {
dapm_seq_insert(w, &down_list, false);
+ dev_dbg(w->dapm->dev,
+ "%s(): power down . widget %s\n",
+ __func__, w->name);
+ }
w->power = power;
break;