Merge "mdss: dsi: add dsi mode configuration in unblank event"
diff --git a/arch/arm/boot/dts/apq8074-v2-cdp.dts b/arch/arm/boot/dts/apq8074-v2.0-1-cdp.dts
similarity index 92%
rename from arch/arm/boot/dts/apq8074-v2-cdp.dts
rename to arch/arm/boot/dts/apq8074-v2.0-1-cdp.dts
index 1dc0912..0489b55 100644
--- a/arch/arm/boot/dts/apq8074-v2-cdp.dts
+++ b/arch/arm/boot/dts/apq8074-v2.0-1-cdp.dts
@@ -12,11 +12,11 @@
/dts-v1/;
-/include/ "apq8074-v2.dtsi"
+/include/ "apq8074-v2.0-1.dtsi"
/include/ "msm8974-cdp.dtsi"
/ {
- model = "Qualcomm APQ 8074v2 CDP";
+ model = "Qualcomm APQ 8074v2.0-1 CDP";
compatible = "qcom,apq8074-cdp", "qcom,apq8074", "qcom,cdp";
qcom,msm-id = <184 1 0x20000>;
};
diff --git a/arch/arm/boot/dts/apq8074-v2-dragonboard.dts b/arch/arm/boot/dts/apq8074-v2.0-1-dragonboard.dts
similarity index 89%
rename from arch/arm/boot/dts/apq8074-v2-dragonboard.dts
rename to arch/arm/boot/dts/apq8074-v2.0-1-dragonboard.dts
index 5a6f5f3..128d8bd 100644
--- a/arch/arm/boot/dts/apq8074-v2-dragonboard.dts
+++ b/arch/arm/boot/dts/apq8074-v2.0-1-dragonboard.dts
@@ -12,11 +12,11 @@
/dts-v1/;
-/include/ "apq8074-v2.dtsi"
+/include/ "apq8074-v2.0-1.dtsi"
/include/ "apq8074-dragonboard.dtsi"
/ {
- model = "Qualcomm APQ 8074v2 DRAGONBOARD";
+ model = "Qualcomm APQ 8074v2.0-1 DRAGONBOARD";
compatible = "qcom,apq8074-dragonboard", "qcom,apq8074", "qcom,dragonboard";
qcom,msm-id = <184 10 0x20000>;
};
diff --git a/arch/arm/boot/dts/apq8074-v2-liquid.dts b/arch/arm/boot/dts/apq8074-v2.0-1-liquid.dts
similarity index 89%
rename from arch/arm/boot/dts/apq8074-v2-liquid.dts
rename to arch/arm/boot/dts/apq8074-v2.0-1-liquid.dts
index a0ecb50..63c32f3 100644
--- a/arch/arm/boot/dts/apq8074-v2-liquid.dts
+++ b/arch/arm/boot/dts/apq8074-v2.0-1-liquid.dts
@@ -12,11 +12,11 @@
/dts-v1/;
-/include/ "apq8074-v2.dtsi"
+/include/ "apq8074-v2.0-1.dtsi"
/include/ "msm8974-liquid.dtsi"
/ {
- model = "Qualcomm APQ 8074v2 LIQUID";
+ model = "Qualcomm APQ 8074v2.0-1 LIQUID";
compatible = "qcom,apq8074-liquid", "qcom,apq8074", "qcom,liquid";
qcom,msm-id = <184 9 0x20000>;
};
diff --git a/arch/arm/boot/dts/apq8074-v2.dtsi b/arch/arm/boot/dts/apq8074-v2.0-1.dtsi
similarity index 97%
rename from arch/arm/boot/dts/apq8074-v2.dtsi
rename to arch/arm/boot/dts/apq8074-v2.0-1.dtsi
index c700a5c..8314fab 100644
--- a/arch/arm/boot/dts/apq8074-v2.dtsi
+++ b/arch/arm/boot/dts/apq8074-v2.0-1.dtsi
@@ -16,7 +16,7 @@
* msm8974.dtsi file.
*/
-/include/ "msm8974-v2.dtsi"
+/include/ "msm8974-v2.0-1.dtsi"
&soc {
qcom,qseecom@a700000 {
diff --git a/arch/arm/boot/dts/apq8084.dtsi b/arch/arm/boot/dts/apq8084.dtsi
index b027f7d..943f2a3 100644
--- a/arch/arm/boot/dts/apq8084.dtsi
+++ b/arch/arm/boot/dts/apq8084.dtsi
@@ -344,6 +344,12 @@
qcom,pet-time = <10000>;
qcom,ipi-ping;
};
+
+ qcom,msm-rng@f9bff000{
+ compatible = "qcom,msm-rng";
+ reg = <0xf9bff000 0x200>;
+ qcom,msm-rng-iface-clk;
+ };
};
&gdsc_venus {
diff --git a/arch/arm/boot/dts/msm8610-qrd-skuab.dts b/arch/arm/boot/dts/msm8610-qrd-skuab.dts
index c435038..947a312 100644
--- a/arch/arm/boot/dts/msm8610-qrd-skuab.dts
+++ b/arch/arm/boot/dts/msm8610-qrd-skuab.dts
@@ -76,7 +76,7 @@
vdd-supply = <&pm8110_l19>;
vio-supply = <&pm8110_l14>;
fsl,irq-gpio = <&msmgpio 81 0x00>;
- fsl,sensors-position = <5>;
+ fsl,sensors-position = <1>;
};
};
diff --git a/arch/arm/boot/dts/msm8974-v1.dtsi b/arch/arm/boot/dts/msm8974-v1.dtsi
index 7b801da..86a61cd 100644
--- a/arch/arm/boot/dts/msm8974-v1.dtsi
+++ b/arch/arm/boot/dts/msm8974-v1.dtsi
@@ -148,3 +148,20 @@
&usb_otg {
qcom,hsusb-otg-pnoc-errata-fix;
};
+
+&gdsc_venus {
+ qcom,skip-logic-collapse;
+ qcom,retain-periph;
+ qcom,retain-mem;
+};
+
+&gdsc_mdss {
+ qcom,skip-logic-collapse;
+ qcom,retain-periph;
+ qcom,retain-mem;
+};
+
+&gdsc_oxili_gx {
+ qcom,retain-mem;
+ qcom,retain-periph;
+};
diff --git a/arch/arm/boot/dts/msm8974-v2-cdp.dts b/arch/arm/boot/dts/msm8974-v2.0-1-cdp.dts
similarity index 90%
rename from arch/arm/boot/dts/msm8974-v2-cdp.dts
rename to arch/arm/boot/dts/msm8974-v2.0-1-cdp.dts
index f4014aa..875b3fc 100644
--- a/arch/arm/boot/dts/msm8974-v2-cdp.dts
+++ b/arch/arm/boot/dts/msm8974-v2.0-1-cdp.dts
@@ -12,11 +12,11 @@
/dts-v1/;
-/include/ "msm8974-v2.dtsi"
+/include/ "msm8974-v2.0-1.dtsi"
/include/ "msm8974-cdp.dtsi"
/ {
- model = "Qualcomm MSM 8974v2 CDP";
+ model = "Qualcomm MSM 8974v2.0/1 CDP";
compatible = "qcom,msm8974-cdp", "qcom,msm8974", "qcom,cdp";
qcom,msm-id = <126 1 0x20000>,
<185 1 0x20000>,
diff --git a/arch/arm/boot/dts/msm8974-v2-fluid.dts b/arch/arm/boot/dts/msm8974-v2.0-1-fluid.dts
similarity index 90%
rename from arch/arm/boot/dts/msm8974-v2-fluid.dts
rename to arch/arm/boot/dts/msm8974-v2.0-1-fluid.dts
index 9c9e3c0..236593d 100644
--- a/arch/arm/boot/dts/msm8974-v2-fluid.dts
+++ b/arch/arm/boot/dts/msm8974-v2.0-1-fluid.dts
@@ -12,11 +12,11 @@
/dts-v1/;
-/include/ "msm8974-v2.dtsi"
+/include/ "msm8974-v2.0-1.dtsi"
/include/ "msm8974-fluid.dtsi"
/ {
- model = "Qualcomm MSM 8974v2 FLUID";
+ model = "Qualcomm MSM 8974v2.0/1 FLUID";
compatible = "qcom,msm8974-fluid", "qcom,msm8974", "qcom,fluid";
qcom,msm-id = <126 3 0x20000>,
<185 3 0x20000>,
diff --git a/arch/arm/boot/dts/msm8974-v2-liquid.dts b/arch/arm/boot/dts/msm8974-v2.0-1-liquid.dts
similarity index 90%
rename from arch/arm/boot/dts/msm8974-v2-liquid.dts
rename to arch/arm/boot/dts/msm8974-v2.0-1-liquid.dts
index ddae6fe..23292f6 100644
--- a/arch/arm/boot/dts/msm8974-v2-liquid.dts
+++ b/arch/arm/boot/dts/msm8974-v2.0-1-liquid.dts
@@ -12,11 +12,11 @@
/dts-v1/;
-/include/ "msm8974-v2.dtsi"
+/include/ "msm8974-v2.0-1.dtsi"
/include/ "msm8974-liquid.dtsi"
/ {
- model = "Qualcomm MSM 8974v2 LIQUID";
+ model = "Qualcomm MSM 8974v2.0/1 LIQUID";
compatible = "qcom,msm8974-liquid", "qcom,msm8974", "qcom,liquid";
qcom,msm-id = <126 9 0x20000>,
<185 9 0x20000>,
diff --git a/arch/arm/boot/dts/msm8974-v2-mtp.dts b/arch/arm/boot/dts/msm8974-v2.0-1-mtp.dts
similarity index 91%
rename from arch/arm/boot/dts/msm8974-v2-mtp.dts
rename to arch/arm/boot/dts/msm8974-v2.0-1-mtp.dts
index 021b626..de9e6a3 100644
--- a/arch/arm/boot/dts/msm8974-v2-mtp.dts
+++ b/arch/arm/boot/dts/msm8974-v2.0-1-mtp.dts
@@ -12,11 +12,11 @@
/dts-v1/;
-/include/ "msm8974-v2.dtsi"
+/include/ "msm8974-v2.0-1.dtsi"
/include/ "msm8974-mtp.dtsi"
/ {
- model = "Qualcomm MSM 8974v2 MTP";
+ model = "Qualcomm MSM 8974v2.0/1 MTP";
compatible = "qcom,msm8974-mtp", "qcom,msm8974", "qcom,mtp";
qcom,msm-id = <126 8 0x20000>,
<185 8 0x20000>,
diff --git a/arch/arm/boot/dts/msm8974-v2.0-1.dtsi b/arch/arm/boot/dts/msm8974-v2.0-1.dtsi
new file mode 100644
index 0000000..1fad868
--- /dev/null
+++ b/arch/arm/boot/dts/msm8974-v2.0-1.dtsi
@@ -0,0 +1,36 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * As a general rule, only version-specific property overrides should be placed
+ * inside this file. However, device definitions should be placed inside the
+ * msm8974.dtsi file.
+ */
+
+/include/ "msm8974-v2.dtsi"
+
+&gdsc_venus {
+ qcom,skip-logic-collapse;
+ qcom,retain-periph;
+ qcom,retain-mem;
+};
+
+&gdsc_mdss {
+ qcom,skip-logic-collapse;
+ qcom,retain-periph;
+ qcom,retain-mem;
+};
+
+&gdsc_oxili_gx {
+ qcom,retain-mem;
+ qcom,retain-periph;
+};
diff --git a/arch/arm/boot/dts/msm8974-v2.2.dtsi b/arch/arm/boot/dts/msm8974-v2.2.dtsi
index 09455b1..0ca021b 100644
--- a/arch/arm/boot/dts/msm8974-v2.2.dtsi
+++ b/arch/arm/boot/dts/msm8974-v2.2.dtsi
@@ -103,3 +103,8 @@
};
};
};
+
+&gdsc_mdss {
+ qcom,retain-periph;
+ qcom,retain-mem;
+};
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index 59e8dac..4360fe0 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -1651,17 +1651,11 @@
&gdsc_venus {
qcom,clock-names = "core_clk";
- qcom,skip-logic-collapse;
- qcom,retain-periph;
- qcom,retain-mem;
status = "ok";
};
&gdsc_mdss {
qcom,clock-names = "core_clk", "lut_clk";
- qcom,skip-logic-collapse;
- qcom,retain-periph;
- qcom,retain-mem;
status = "ok";
};
@@ -1678,8 +1672,6 @@
&gdsc_oxili_gx {
qcom,clock-names = "core_clk";
- qcom,retain-mem;
- qcom,retain-periph;
status = "ok";
};
diff --git a/arch/arm/configs/msm8610_defconfig b/arch/arm/configs/msm8610_defconfig
index cfd346e..a5f0704 100644
--- a/arch/arm/configs/msm8610_defconfig
+++ b/arch/arm/configs/msm8610_defconfig
@@ -71,7 +71,6 @@
CONFIG_MSM_RTB_SEPARATE_CPUS=y
CONFIG_MSM_ENABLE_WDOG_DEBUG_CONTROL=y
CONFIG_MSM_BOOT_STATS=y
-CONFIG_MSM_XPU_ERR_FATAL=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_SMP=y
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index b80949c..76bd74c 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -284,6 +284,7 @@
select MSM_ULTRASOUND_B
select MSM_RPM_LOG
select ARCH_WANT_KMAP_ATOMIC_FLUSH
+ select KRAIT_REGULATOR
config ARCH_APQ8084
bool "APQ8084"
@@ -3060,4 +3061,11 @@
Support the wallclk directory in sysfs filesystem to enable the
wall clock simulation and read the current SFN.
+config KRAIT_REGULATOR
+ bool "Support Kraits powered via ganged regulators in the pmic"
+ help
+ Certain MSMs have the Krait CPUs powered via a single supply
+ line from the PMIC. This supply line is powered by multiple
+ regulators running in ganged mode inside the PMIC. Enable
+ this option to support such configurations.
endif
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index 8c42b8d..cd71104 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -296,7 +296,7 @@
obj-$(CONFIG_ARCH_MSM8610) += gdsc.o
obj-$(CONFIG_ARCH_MPQ8092) += gdsc.o
obj-$(CONFIG_ARCH_APQ8084) += gdsc.o
-obj-$(CONFIG_ARCH_MSM8974) += krait-regulator.o
+obj-$(CONFIG_KRAIT_REGULATOR) += krait-regulator.o
obj-$(CONFIG_ARCH_MSMKRYPTON) += board-krypton.o board-krypton-gpiomux.o
obj-$(CONFIG_ARCH_MSMSAMARIUM) += board-samarium.o board-samarium-gpiomux.o
obj-$(CONFIG_ARCH_MSM9625) += board-9625.o board-9625-gpiomux.o
diff --git a/arch/arm/mach-msm/Makefile.boot b/arch/arm/mach-msm/Makefile.boot
index 3505afe..72472f9 100644
--- a/arch/arm/mach-msm/Makefile.boot
+++ b/arch/arm/mach-msm/Makefile.boot
@@ -53,13 +53,13 @@
dtb-$(CONFIG_ARCH_MSM8974) += msm8974-v1-mtp.dtb
dtb-$(CONFIG_ARCH_MSM8974) += msm8974-v1-rumi.dtb
dtb-$(CONFIG_ARCH_MSM8974) += msm8974-v1-sim.dtb
- dtb-$(CONFIG_ARCH_MSM8974) += msm8974-v2-cdp.dtb
- dtb-$(CONFIG_ARCH_MSM8974) += msm8974-v2-fluid.dtb
- dtb-$(CONFIG_ARCH_MSM8974) += msm8974-v2-liquid.dtb
- dtb-$(CONFIG_ARCH_MSM8974) += msm8974-v2-mtp.dtb
- dtb-$(CONFIG_ARCH_MSM8974) += apq8074-v2-cdp.dtb
- dtb-$(CONFIG_ARCH_MSM8974) += apq8074-v2-liquid.dtb
- dtb-$(CONFIG_ARCH_MSM8974) += apq8074-v2-dragonboard.dtb
+ dtb-$(CONFIG_ARCH_MSM8974) += msm8974-v2.0-1-cdp.dtb
+ dtb-$(CONFIG_ARCH_MSM8974) += msm8974-v2.0-1-fluid.dtb
+ dtb-$(CONFIG_ARCH_MSM8974) += msm8974-v2.0-1-liquid.dtb
+ dtb-$(CONFIG_ARCH_MSM8974) += msm8974-v2.0-1-mtp.dtb
+ dtb-$(CONFIG_ARCH_MSM8974) += apq8074-v2.0-1-cdp.dtb
+ dtb-$(CONFIG_ARCH_MSM8974) += apq8074-v2.0-1-liquid.dtb
+ dtb-$(CONFIG_ARCH_MSM8974) += apq8074-v2.0-1-dragonboard.dtb
# APQ8084
zreladdr-$(CONFIG_ARCH_APQ8084) := 0x00008000
diff --git a/arch/arm/mach-msm/board-fsm9900-gpiomux.c b/arch/arm/mach-msm/board-fsm9900-gpiomux.c
index dede706..990aefc 100644
--- a/arch/arm/mach-msm/board-fsm9900-gpiomux.c
+++ b/arch/arm/mach-msm/board-fsm9900-gpiomux.c
@@ -17,6 +17,370 @@
#include <mach/board.h>
#include <mach/gpiomux.h>
+static struct gpiomux_setting blsp_uart_no_pull_config = {
+ .func = GPIOMUX_FUNC_2,
+ .drv = GPIOMUX_DRV_6MA,
+ .pull = GPIOMUX_PULL_NONE,
+};
+
+static struct gpiomux_setting blsp_uart_pull_up_config = {
+ .func = GPIOMUX_FUNC_2,
+ .drv = GPIOMUX_DRV_6MA,
+ .pull = GPIOMUX_PULL_UP,
+};
+
+static struct gpiomux_setting blsp_i2c_config = {
+ .func = GPIOMUX_FUNC_3,
+ .drv = GPIOMUX_DRV_6MA,
+ .pull = GPIOMUX_PULL_UP,
+};
+
+static struct msm_gpiomux_config fsm_blsp_configs[] __initdata = {
+ {
+ .gpio = 0, /* BLSP UART1 TX */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &blsp_uart_no_pull_config,
+ },
+ },
+ {
+ .gpio = 1, /* BLSP UART1 RX */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &blsp_uart_pull_up_config,
+ },
+ },
+ {
+ .gpio = 2, /* BLSP I2C SDA */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &blsp_i2c_config,
+ },
+ },
+ {
+ .gpio = 3, /* BLSP I2C SCL */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &blsp_i2c_config,
+ },
+ },
+ {
+ .gpio = 6, /* BLSP I2C SDA */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &blsp_i2c_config,
+ },
+ },
+ {
+ .gpio = 7, /* BLSP I2C SCL */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &blsp_i2c_config,
+ },
+ },
+ {
+ .gpio = 36, /* BLSP UART10 TX */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &blsp_uart_no_pull_config,
+ },
+ },
+ {
+ .gpio = 37, /* BLSP UART10 RX */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &blsp_uart_pull_up_config,
+ },
+ },
+ {
+ .gpio = 38, /* BLSP I2C10 SDA */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &blsp_i2c_config,
+ },
+ },
+ {
+ .gpio = 39, /* BLSP I2C10 SCL */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &blsp_i2c_config,
+ },
+ },
+
+};
+
+static struct gpiomux_setting geni_func4_config = {
+ .func = GPIOMUX_FUNC_4,
+ .drv = GPIOMUX_DRV_8MA,
+ .pull = GPIOMUX_PULL_DOWN,
+};
+
+static struct gpiomux_setting geni_func5_config = {
+ .func = GPIOMUX_FUNC_5,
+ .drv = GPIOMUX_DRV_8MA,
+ .pull = GPIOMUX_PULL_DOWN,
+};
+
+static struct msm_gpiomux_config fsm_geni_configs[] __initdata = {
+ {
+ .gpio = 8, /* GENI7 DATA */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &geni_func4_config,
+ },
+ },
+ {
+ .gpio = 9, /* GENI1 DATA */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &geni_func4_config,
+ },
+ },
+ {
+ .gpio = 10, /* GENI2 DATA */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &geni_func4_config,
+ },
+ },
+ {
+ .gpio = 11, /* GENI7 CLK */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &geni_func4_config,
+ },
+ },
+ {
+ .gpio = 20, /* GENI3 DATA */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &geni_func5_config,
+ },
+ },
+ {
+ .gpio = 21, /* GENI4 DATA */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &geni_func5_config,
+ },
+ },
+ {
+ .gpio = 22, /* GENI6 DATA */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &geni_func5_config,
+ },
+ },
+ {
+ .gpio = 23, /* GENI6 CLK */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &geni_func5_config,
+ },
+ },
+ {
+ .gpio = 30, /* GENI5 DATA */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &geni_func4_config,
+ },
+ },
+ {
+ .gpio = 31, /* GENI5 CLK */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &geni_func4_config,
+ },
+ },
+
+};
+
+static struct gpiomux_setting dan_spi_func4_config = {
+ .func = GPIOMUX_FUNC_4,
+ .drv = GPIOMUX_DRV_8MA,
+ .pull = GPIOMUX_PULL_UP,
+};
+
+static struct gpiomux_setting dan_spi_func1_config = {
+ .func = GPIOMUX_FUNC_1,
+ .drv = GPIOMUX_DRV_8MA,
+ .pull = GPIOMUX_PULL_UP,
+};
+
+static struct msm_gpiomux_config fsm_dan_spi_configs[] __initdata = {
+ {
+ .gpio = 12, /* BLSP DAN0 SPI_MOSI */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &dan_spi_func4_config,
+ },
+ },
+ {
+ .gpio = 13, /* BLSP DAN0 SPI_MISO */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &dan_spi_func4_config,
+ },
+ },
+ {
+ .gpio = 14, /* BLSP DAN0 SPI_CS */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &dan_spi_func4_config,
+ },
+ },
+ {
+ .gpio = 15, /* BLSP DAN0 SPI_CLK */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &dan_spi_func4_config,
+ },
+ },
+ {
+ .gpio = 16, /* BLSP DAN1 SPI_MOSI */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &dan_spi_func4_config,
+ },
+ },
+ {
+ .gpio = 17, /* BLSP DAN1 SPI_MISO */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &dan_spi_func4_config,
+ },
+ },
+ {
+ .gpio = 18, /* BLSP DAN1 SPI_CS */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &dan_spi_func4_config,
+ },
+ },
+ {
+ .gpio = 19, /* BLSP DAN1 SPI_CLK */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &dan_spi_func4_config,
+ },
+ },
+ {
+ .gpio = 81, /* BLSP DAN1 SPI_CS0 */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &dan_spi_func1_config,
+ },
+ },
+ {
+ .gpio = 82, /* BLSP DAN1 SPI_CS1 */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &dan_spi_func1_config,
+ },
+ },
+};
+
+static struct gpiomux_setting uim_config = {
+ .func = GPIOMUX_FUNC_1,
+ .drv = GPIOMUX_DRV_4MA,
+ .pull = GPIOMUX_PULL_UP,
+};
+
+static struct msm_gpiomux_config fsm_uim_configs[] __initdata = {
+ {
+ .gpio = 24, /* UIM_DATA */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &uim_config,
+ },
+ },
+ {
+ .gpio = 25, /* UIM_CLK */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &uim_config,
+ },
+ },
+ {
+ .gpio = 26, /* UIM_RESET */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &uim_config,
+ },
+ },
+ {
+ .gpio = 27, /* UIM_PRESENT */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &uim_config,
+ },
+ },
+};
+
+static struct gpiomux_setting pcie_config = {
+ .func = GPIOMUX_FUNC_4,
+ .drv = GPIOMUX_DRV_8MA,
+ .pull = GPIOMUX_PULL_UP,
+};
+
+static struct msm_gpiomux_config fsm_pcie_configs[] __initdata = {
+ {
+ .gpio = 28, /* BLSP PCIE1_CLK */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &pcie_config,
+ },
+ },
+ {
+ .gpio = 32, /* BLSP PCIE0_CLK */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &pcie_config,
+ },
+ },
+};
+
+static struct gpiomux_setting pps_out_config = {
+ .func = GPIOMUX_FUNC_1,
+ .drv = GPIOMUX_DRV_4MA,
+ .pull = GPIOMUX_PULL_NONE,
+};
+
+static struct gpiomux_setting pps_in_config = {
+ .func = GPIOMUX_FUNC_1,
+ .drv = GPIOMUX_DRV_2MA,
+ .pull = GPIOMUX_PULL_DOWN,
+};
+
+static struct gpiomux_setting gps_clk_in_config = {
+ .func = GPIOMUX_FUNC_1,
+ .drv = GPIOMUX_DRV_2MA,
+ .pull = GPIOMUX_PULL_DOWN,
+};
+
+static struct gpiomux_setting gps_nav_tlmm_blank_config = {
+ .func = GPIOMUX_FUNC_2,
+ .drv = GPIOMUX_DRV_2MA,
+ .pull = GPIOMUX_PULL_DOWN,
+};
+static struct msm_gpiomux_config fsm_gps_configs[] __initdata = {
+ {
+ .gpio = 40, /* GPS_PPS_OUT */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &pps_out_config,
+ },
+ },
+ {
+ .gpio = 41, /* GPS_PPS_IN */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &pps_in_config,
+ },
+ },
+ {
+ .gpio = 43, /* GPS_CLK_IN */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &gps_clk_in_config,
+ },
+ },
+ {
+ .gpio = 120, /* GPS_NAV_TLMM_BLANK */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &gps_nav_tlmm_blank_config,
+ },
+ },
+};
+
+static struct gpiomux_setting sd_detect_config = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_2MA,
+ .pull = GPIOMUX_PULL_UP,
+};
+
+static struct gpiomux_setting sd_wp_config = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_6MA,
+ .pull = GPIOMUX_PULL_UP,
+};
+
+static struct msm_gpiomux_config fsm_sd_configs[] __initdata = {
+ {
+ .gpio = 42, /* SD_CARD_DET */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &sd_detect_config,
+ },
+ },
+ {
+ .gpio = 122, /* BLSP SD WRITE PROTECT */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &sd_wp_config,
+ },
+ },
+};
+
void __init fsm9900_init_gpiomux(void)
{
int rc;
@@ -26,4 +390,13 @@
pr_err("%s failed %d\n", __func__, rc);
return;
}
+
+ msm_gpiomux_install(fsm_blsp_configs, ARRAY_SIZE(fsm_blsp_configs));
+ msm_gpiomux_install(fsm_geni_configs, ARRAY_SIZE(fsm_geni_configs));
+ msm_gpiomux_install(fsm_dan_spi_configs,
+ ARRAY_SIZE(fsm_dan_spi_configs));
+ msm_gpiomux_install(fsm_uim_configs, ARRAY_SIZE(fsm_uim_configs));
+ msm_gpiomux_install(fsm_pcie_configs, ARRAY_SIZE(fsm_pcie_configs));
+ msm_gpiomux_install(fsm_gps_configs, ARRAY_SIZE(fsm_gps_configs));
+ msm_gpiomux_install(fsm_sd_configs, ARRAY_SIZE(fsm_sd_configs));
}
diff --git a/arch/arm/mach-msm/include/mach/msm_smem.h b/arch/arm/mach-msm/include/mach/msm_smem.h
index 64ab6bf..a121791 100644
--- a/arch/arm/mach-msm/include/mach/msm_smem.h
+++ b/arch/arm/mach-msm/include/mach/msm_smem.h
@@ -136,6 +136,7 @@
SMEM_BAM_PIPE_MEMORY, /* 468 */
SMEM_IMAGE_VERSION_TABLE, /* 469 */
SMEM_LC_DEBUGGER, /* 470 */
+ SMEM_FLASH_NAND_DEV_INFO, /* 471 */
SMEM_NUM_ITEMS,
};
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_board_9625.c b/arch/arm/mach-msm/msm_bus/msm_bus_board_9625.c
index 3a996eb..4538c4f 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_board_9625.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_board_9625.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -524,6 +524,7 @@
.mode = NOC_QOS_MODE_FIXED,
.qport = qports_ipa,
.mas_hw_id = MAS_IPA,
+ .hw_sel = MSM_BUS_NOC,
},
{
.id = MSM_BUS_MASTER_QDSS_ETR,
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index cf21b82..7ef1d80 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -308,7 +308,7 @@
mempool_t *diag_hdlc_pool;
mempool_t *diag_user_pool;
mempool_t *diag_write_struct_pool;
- struct mutex diagmem_mutex;
+ spinlock_t diag_mem_lock;
int count;
int count_hdlc_pool;
int count_user_pool;
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 099dc09..24d7fac 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -131,12 +131,8 @@
mutex_lock(&driver->diagchar_mutex);
if (buf_hdlc) {
err = diag_device_write(buf_hdlc, APPS_DATA, NULL);
- if (err) {
- /*Free the buffer right away if write failed */
+ if (err)
diagmem_free(driver, buf_hdlc, POOL_TYPE_HDLC);
- diagmem_free(driver, (unsigned char *)driver->
- write_ptr_svc, POOL_TYPE_WRITE_STRUCT);
- }
buf_hdlc = NULL;
#ifdef DIAG_DEBUG
pr_debug("diag: Number of bytes written "
@@ -1818,10 +1814,6 @@
if (HDLC_OUT_BUF_SIZE - driver->used <= (2*payload_size) + 3) {
err = diag_device_write(buf_hdlc, APPS_DATA, NULL);
if (err) {
- /*Free the buffer right away if write failed */
- if (driver->logging_mode == USB_MODE)
- diagmem_free(driver, (unsigned char *)driver->
- write_ptr_svc, POOL_TYPE_WRITE_STRUCT);
ret = -EIO;
goto fail_free_hdlc;
}
@@ -1846,10 +1838,6 @@
(unsigned int)(buf_hdlc + HDLC_OUT_BUF_SIZE)) {
err = diag_device_write(buf_hdlc, APPS_DATA, NULL);
if (err) {
- /*Free the buffer right away if write failed */
- if (driver->logging_mode == USB_MODE)
- diagmem_free(driver, (unsigned char *)driver->
- write_ptr_svc, POOL_TYPE_WRITE_STRUCT);
ret = -EIO;
goto fail_free_hdlc;
}
@@ -1871,10 +1859,6 @@
if (pkt_type == DATA_TYPE_RESPONSE) {
err = diag_device_write(buf_hdlc, APPS_DATA, NULL);
if (err) {
- /*Free the buffer right away if write failed */
- if (driver->logging_mode == USB_MODE)
- diagmem_free(driver, (unsigned char *)driver->
- write_ptr_svc, POOL_TYPE_WRITE_STRUCT);
ret = -EIO;
goto fail_free_hdlc;
}
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index 609d6cd..a1f6b2c 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -789,8 +789,16 @@
driver->write_ptr_svc->buf = buf;
err = usb_diag_write(driver->legacy_ch,
driver->write_ptr_svc);
- } else
- err = -1;
+ /* Free the buffer if write failed */
+ if (err) {
+ diagmem_free(driver,
+ (unsigned char *)driver->
+ write_ptr_svc,
+ POOL_TYPE_WRITE_STRUCT);
+ }
+ } else {
+ err = -ENOMEM;
+ }
} else if ((data_type >= MODEM_DATA) &&
(data_type <= WCNSS_DATA)) {
write_ptr->buf = buf;
@@ -1762,6 +1770,9 @@
}
#endif
if (!found_it) {
+ if (driver->logging_mode != USB_MODE)
+ pr_debug("diag: freeing buffer when not in usb mode\n");
+
diagmem_free(driver, (unsigned char *)buf,
POOL_TYPE_HDLC);
diagmem_free(driver, (unsigned char *)diag_write_ptr,
diff --git a/drivers/char/diag/diagmem.c b/drivers/char/diag/diagmem.c
index a6ef3ca..4ceca4f 100644
--- a/drivers/char/diag/diagmem.c
+++ b/drivers/char/diag/diagmem.c
@@ -25,22 +25,24 @@
void *diagmem_alloc(struct diagchar_dev *driver, int size, int pool_type)
{
void *buf = NULL;
+ unsigned long flags;
int index;
+ spin_lock_irqsave(&driver->diag_mem_lock, flags);
index = 0;
if (pool_type == POOL_TYPE_COPY) {
if (driver->diagpool) {
- mutex_lock(&driver->diagmem_mutex);
- if (driver->count < driver->poolsize) {
+ if ((driver->count < driver->poolsize) &&
+ (size <= driver->itemsize)) {
atomic_add(1, (atomic_t *)&driver->count);
buf = mempool_alloc(driver->diagpool,
GFP_ATOMIC);
}
- mutex_unlock(&driver->diagmem_mutex);
}
} else if (pool_type == POOL_TYPE_HDLC) {
if (driver->diag_hdlc_pool) {
- if (driver->count_hdlc_pool < driver->poolsize_hdlc) {
+ if ((driver->count_hdlc_pool < driver->poolsize_hdlc) &&
+ (size <= driver->itemsize_hdlc)) {
atomic_add(1,
(atomic_t *)&driver->count_hdlc_pool);
buf = mempool_alloc(driver->diag_hdlc_pool,
@@ -49,7 +51,8 @@
}
} else if (pool_type == POOL_TYPE_USER) {
if (driver->diag_user_pool) {
- if (driver->count_user_pool < driver->poolsize_user) {
+ if ((driver->count_user_pool < driver->poolsize_user) &&
+ (size <= driver->itemsize_user)) {
atomic_add(1,
(atomic_t *)&driver->count_user_pool);
buf = mempool_alloc(driver->diag_user_pool,
@@ -58,8 +61,9 @@
}
} else if (pool_type == POOL_TYPE_WRITE_STRUCT) {
if (driver->diag_write_struct_pool) {
- if (driver->count_write_struct_pool <
- driver->poolsize_write_struct) {
+ if ((driver->count_write_struct_pool <
+ driver->poolsize_write_struct) &&
+ (size <= driver->itemsize_write_struct)) {
atomic_add(1,
(atomic_t *)&driver->count_write_struct_pool);
buf = mempool_alloc(
@@ -71,8 +75,9 @@
pool_type == POOL_TYPE_HSIC_2) {
index = pool_type - POOL_TYPE_HSIC;
if (diag_hsic[index].diag_hsic_pool) {
- if (diag_hsic[index].count_hsic_pool <
- diag_hsic[index].poolsize_hsic) {
+ if ((diag_hsic[index].count_hsic_pool <
+ diag_hsic[index].poolsize_hsic) &&
+ (size <= diag_hsic[index].itemsize_hsic)) {
atomic_add(1, (atomic_t *)
&diag_hsic[index].count_hsic_pool);
buf = mempool_alloc(
@@ -85,7 +90,8 @@
index = pool_type - POOL_TYPE_HSIC_WRITE;
if (diag_hsic[index].diag_hsic_write_pool) {
if (diag_hsic[index].count_hsic_write_pool <
- diag_hsic[index].poolsize_hsic_write) {
+ diag_hsic[index].poolsize_hsic_write &&
+ (size <= diag_hsic[index].itemsize_hsic_write)) {
atomic_add(1, (atomic_t *)
&diag_hsic[index].
count_hsic_write_pool);
@@ -96,14 +102,17 @@
}
#endif
}
+ spin_unlock_irqrestore(&driver->diag_mem_lock, flags);
return buf;
}
void diagmem_exit(struct diagchar_dev *driver, int pool_type)
{
int index;
+ unsigned long flags;
index = 0;
+ spin_lock_irqsave(&driver->diag_mem_lock, flags);
if (driver->diagpool) {
if (driver->count == 0 && driver->ref_count == 0) {
mempool_destroy(driver->diagpool);
@@ -176,12 +185,18 @@
}
}
#endif
+ spin_unlock_irqrestore(&driver->diag_mem_lock, flags);
}
void diagmem_free(struct diagchar_dev *driver, void *buf, int pool_type)
{
int index;
+ unsigned long flags;
+ if (!buf)
+ return;
+
+ spin_lock_irqsave(&driver->diag_mem_lock, flags);
index = 0;
if (pool_type == POOL_TYPE_COPY) {
if (driver->diagpool != NULL && driver->count > 0) {
@@ -246,13 +261,13 @@
__func__, pool_type);
}
-
+ spin_unlock_irqrestore(&driver->diag_mem_lock, flags);
diagmem_exit(driver, pool_type);
}
void diagmem_init(struct diagchar_dev *driver)
{
- mutex_init(&driver->diagmem_mutex);
+ spin_lock_init(&driver->diag_mem_lock);
if (driver->count == 0) {
driver->diagpool = mempool_create_kmalloc_pool(
diff --git a/drivers/gpu/msm/Makefile b/drivers/gpu/msm/Makefile
index 118e033..aac183b 100644
--- a/drivers/gpu/msm/Makefile
+++ b/drivers/gpu/msm/Makefile
@@ -22,9 +22,11 @@
msm_adreno-y += \
adreno_ringbuffer.o \
adreno_drawctxt.o \
+ adreno_dispatch.o \
adreno_postmortem.o \
adreno_snapshot.o \
adreno_coresight.o \
+ adreno_trace.o \
adreno_a2xx.o \
adreno_a2xx_trace.o \
adreno_a2xx_snapshot.o \
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index b964620..184dd982 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -32,6 +32,7 @@
#include "adreno.h"
#include "adreno_pm4types.h"
+#include "adreno_trace.h"
#include "a2xx_reg.h"
#include "a3xx_reg.h"
@@ -213,7 +214,7 @@
512, 0, 2, SZ_128K, 0x3FF037, 0x3FF016 },
};
-static unsigned int adreno_isidle(struct kgsl_device *device);
+static bool adreno_isidle(struct kgsl_device *device);
/**
* adreno_perfcounter_init: Reserve kernel performance counters
@@ -598,23 +599,9 @@
static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
{
- irqreturn_t result;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- result = adreno_dev->gpudev->irq_handler(adreno_dev);
-
- device->pwrctrl.irq_last = 1;
- if (device->requested_state == KGSL_STATE_NONE) {
- kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP);
- queue_work(device->work_queue, &device->idle_check_ws);
- }
-
- /* Reset the time-out in our idle timer */
- mod_timer_pending(&device->idle_timer,
- jiffies + device->pwrctrl.interval_timeout);
- mod_timer_pending(&device->hang_timer,
- (jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART)));
- return result;
+ return adreno_dev->gpudev->irq_handler(adreno_dev);
}
static void adreno_cleanup_pt(struct kgsl_device *device,
@@ -921,7 +908,7 @@
adreno_dev->dev.cff_dump_enable);
}
-static void adreno_iommu_setstate(struct kgsl_device *device,
+static int adreno_iommu_setstate(struct kgsl_device *device,
unsigned int context_id,
uint32_t flags)
{
@@ -934,22 +921,24 @@
struct kgsl_context *context;
struct adreno_context *adreno_ctx = NULL;
struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+ unsigned int result;
if (adreno_use_default_setstate(adreno_dev)) {
kgsl_mmu_device_setstate(&device->mmu, flags);
- return;
+ return 0;
}
num_iommu_units = kgsl_mmu_get_num_iommu_units(&device->mmu);
context = kgsl_context_get(device, context_id);
if (context == NULL)
- return;
+ return -EINVAL;
adreno_ctx = ADRENO_CONTEXT(context);
- if (kgsl_mmu_enable_clk(&device->mmu,
- KGSL_IOMMU_CONTEXT_USER))
- return;
+ result = kgsl_mmu_enable_clk(&device->mmu, KGSL_IOMMU_CONTEXT_USER);
+
+ if (result)
+ goto done;
pt_val = kgsl_mmu_get_pt_base_addr(&device->mmu,
device->mmu.hwpagetable);
@@ -983,14 +972,24 @@
* This returns the per context timestamp but we need to
* use the global timestamp for iommu clock disablement
*/
- adreno_ringbuffer_issuecmds(device, adreno_ctx, KGSL_CMD_FLAGS_PMODE,
- &link[0], sizedwords);
+ result = adreno_ringbuffer_issuecmds(device, adreno_ctx,
+ KGSL_CMD_FLAGS_PMODE, &link[0], sizedwords);
- kgsl_mmu_disable_clk_on_ts(&device->mmu, rb->global_ts, true);
+ /*
+ * On error disable the IOMMU clock right away otherwise turn it off
+ * after the command has been retired
+ */
+ if (result)
+ kgsl_mmu_disable_clk_on_ts(&device->mmu, 0, false);
+ else
+ kgsl_mmu_disable_clk_on_ts(&device->mmu, rb->global_ts, true);
+
+done:
kgsl_context_put(context);
+ return result;
}
-static void adreno_gpummu_setstate(struct kgsl_device *device,
+static int adreno_gpummu_setstate(struct kgsl_device *device,
unsigned int context_id,
uint32_t flags)
{
@@ -1001,6 +1000,7 @@
unsigned int mh_mmu_invalidate = 0x00000003; /*invalidate all and tc */
struct kgsl_context *context;
struct adreno_context *adreno_ctx = NULL;
+ int ret = 0;
/*
* Fix target freeze issue by adding TLB flush for each submit
@@ -1017,7 +1017,8 @@
if (!adreno_use_default_setstate(adreno_dev)) {
context = kgsl_context_get(device, context_id);
if (context == NULL)
- return;
+ return -EINVAL;
+
adreno_ctx = ADRENO_CONTEXT(context);
if (flags & KGSL_MMUFLAGS_PTUPDATE) {
@@ -1092,7 +1093,7 @@
sizedwords += 2;
}
- adreno_ringbuffer_issuecmds(device, adreno_ctx,
+ ret = adreno_ringbuffer_issuecmds(device, adreno_ctx,
KGSL_CMD_FLAGS_PMODE,
&link[0], sizedwords);
@@ -1100,9 +1101,11 @@
} else {
kgsl_mmu_device_setstate(&device->mmu, flags);
}
+
+ return ret;
}
-static void adreno_setstate(struct kgsl_device *device,
+static int adreno_setstate(struct kgsl_device *device,
unsigned int context_id,
uint32_t flags)
{
@@ -1111,6 +1114,8 @@
return adreno_gpummu_setstate(device, context_id, flags);
else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
return adreno_iommu_setstate(device, context_id, flags);
+
+ return 0;
}
static unsigned int
@@ -1586,6 +1591,10 @@
if (status)
goto error_close_rb;
+ status = adreno_dispatcher_init(adreno_dev);
+ if (status)
+ goto error_close_device;
+
adreno_debugfs_init(device);
adreno_profile_init(device);
@@ -1601,6 +1610,8 @@
return 0;
+error_close_device:
+ kgsl_device_platform_remove(device);
error_close_rb:
adreno_ringbuffer_close(&adreno_dev->ringbuffer);
error:
@@ -1623,6 +1634,7 @@
kgsl_pwrscale_detach_policy(device);
kgsl_pwrscale_close(device);
+ adreno_dispatcher_close(adreno_dev);
adreno_ringbuffer_close(&adreno_dev->ringbuffer);
kgsl_device_platform_remove(device);
@@ -1634,8 +1646,7 @@
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
int i;
- if (KGSL_STATE_DUMP_AND_FT != device->state)
- kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
/* Power up the device */
kgsl_pwrctrl_enable(device);
@@ -1705,8 +1716,7 @@
kgsl_cffdump_open(device);
- if (KGSL_STATE_DUMP_AND_FT != device->state)
- kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
regulator_left_on = (regulator_is_enabled(device->pwrctrl.gpu_reg) ||
(device->pwrctrl.gpu_cx &&
@@ -1757,11 +1767,11 @@
if (status)
goto error_irq_off;
- mod_timer(&device->hang_timer,
- (jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART)));
-
adreno_perfcounter_start(adreno_dev);
+ /* Start the dispatcher */
+ adreno_dispatcher_start(adreno_dev);
+
device->reset_counter++;
return 0;
@@ -1791,6 +1801,7 @@
adreno_dev->drawctxt_active = NULL;
+ adreno_dispatcher_stop(adreno_dev);
adreno_ringbuffer_stop(&adreno_dev->ringbuffer);
kgsl_mmu_stop(&device->mmu);
@@ -1798,7 +1809,6 @@
device->ftbl->irqctrl(device, 0);
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
del_timer_sync(&device->idle_timer);
- del_timer_sync(&device->hang_timer);
adreno_ocmem_gmem_free(adreno_dev);
@@ -1810,917 +1820,41 @@
return 0;
}
-/*
- * Set the reset status of all contexts to
- * INNOCENT_CONTEXT_RESET_EXT except for the bad context
- * since thats the guilty party, if fault tolerance failed then
- * mark all as guilty
- */
-
-static int _mark_context_status(int id, void *ptr, void *data)
-{
- unsigned int ft_status = *((unsigned int *) data);
- struct kgsl_context *context = ptr;
- struct adreno_context *adreno_context = ADRENO_CONTEXT(context);
-
- if (ft_status) {
- context->reset_status =
- KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT;
- adreno_context->flags |= CTXT_FLAGS_GPU_HANG;
- } else if (KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT !=
- context->reset_status) {
- if (adreno_context->flags & (CTXT_FLAGS_GPU_HANG |
- CTXT_FLAGS_GPU_HANG_FT))
- context->reset_status =
- KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT;
- else
- context->reset_status =
- KGSL_CTX_STAT_INNOCENT_CONTEXT_RESET_EXT;
- }
-
- return 0;
-}
-
-static void adreno_mark_context_status(struct kgsl_device *device,
- int ft_status)
-{
- /* Mark the status for all the contexts in the device */
-
- read_lock(&device->context_lock);
- idr_for_each(&device->context_idr, _mark_context_status, &ft_status);
- read_unlock(&device->context_lock);
-}
-
-/*
- * For hung contexts set the current memstore value to the most recent issued
- * timestamp - this resets the status and lets the system continue on
- */
-
-static int _set_max_ts(int id, void *ptr, void *data)
-{
- struct kgsl_device *device = data;
- struct kgsl_context *context = ptr;
- struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
-
- if (drawctxt && drawctxt->flags & CTXT_FLAGS_GPU_HANG) {
- kgsl_sharedmem_writel(device, &device->memstore,
- KGSL_MEMSTORE_OFFSET(context->id,
- soptimestamp), drawctxt->timestamp);
- kgsl_sharedmem_writel(device, &device->memstore,
- KGSL_MEMSTORE_OFFSET(context->id,
- eoptimestamp), drawctxt->timestamp);
- }
-
- return 0;
-}
-
-static void adreno_set_max_ts_for_bad_ctxs(struct kgsl_device *device)
-{
- read_lock(&device->context_lock);
- idr_for_each(&device->context_idr, _set_max_ts, device);
- read_unlock(&device->context_lock);
-}
-
-static void adreno_destroy_ft_data(struct adreno_ft_data *ft_data)
-{
- vfree(ft_data->rb_buffer);
- vfree(ft_data->bad_rb_buffer);
- vfree(ft_data->good_rb_buffer);
-}
-
-static int _find_start_of_cmd_seq(struct adreno_ringbuffer *rb,
- unsigned int *ptr,
- bool inc)
-{
- int status = -EINVAL;
- unsigned int val1;
- unsigned int size = rb->buffer_desc.size;
- unsigned int start_ptr = *ptr;
-
- while ((start_ptr / sizeof(unsigned int)) != rb->wptr) {
- if (inc)
- start_ptr = adreno_ringbuffer_inc_wrapped(start_ptr,
- size);
- else
- start_ptr = adreno_ringbuffer_dec_wrapped(start_ptr,
- size);
- kgsl_sharedmem_readl(&rb->buffer_desc, &val1, start_ptr);
- /* Ensure above read is finished before next read */
- rmb();
- if (KGSL_CMD_IDENTIFIER == val1) {
- if ((start_ptr / sizeof(unsigned int)) != rb->wptr)
- start_ptr = adreno_ringbuffer_dec_wrapped(
- start_ptr, size);
- *ptr = start_ptr;
- status = 0;
- break;
- }
- }
- return status;
-}
-
-static int _find_cmd_seq_after_eop_ts(struct adreno_ringbuffer *rb,
- unsigned int *rb_rptr,
- unsigned int global_eop,
- bool inc)
-{
- int status = -EINVAL;
- unsigned int temp_rb_rptr = *rb_rptr;
- unsigned int size = rb->buffer_desc.size;
- unsigned int val[3];
- int i = 0;
- bool check = false;
-
- if (inc && temp_rb_rptr / sizeof(unsigned int) != rb->wptr)
- return status;
-
- do {
- /*
- * when decrementing we need to decrement first and
- * then read make sure we cover all the data
- */
- if (!inc)
- temp_rb_rptr = adreno_ringbuffer_dec_wrapped(
- temp_rb_rptr, size);
- kgsl_sharedmem_readl(&rb->buffer_desc, &val[i],
- temp_rb_rptr);
- /* Ensure above read is finished before next read */
- rmb();
-
- if (check && ((inc && val[i] == global_eop) ||
- (!inc && (val[i] ==
- cp_type3_packet(CP_MEM_WRITE, 2) ||
- val[i] == CACHE_FLUSH_TS)))) {
- /* decrement i, i.e i = (i - 1 + 3) % 3 if
- * we are going forward, else increment i */
- i = (i + 2) % 3;
- if (val[i] == rb->device->memstore.gpuaddr +
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- eoptimestamp)) {
- int j = ((i + 2) % 3);
- if ((inc && (val[j] == CACHE_FLUSH_TS ||
- val[j] == cp_type3_packet(
- CP_MEM_WRITE, 2))) ||
- (!inc && val[j] == global_eop)) {
- /* Found the global eop */
- status = 0;
- break;
- }
- }
- /* if no match found then increment i again
- * since we decremented before matching */
- i = (i + 1) % 3;
- }
- if (inc)
- temp_rb_rptr = adreno_ringbuffer_inc_wrapped(
- temp_rb_rptr, size);
-
- i = (i + 1) % 3;
- if (2 == i)
- check = true;
- } while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr);
- /* temp_rb_rptr points to the command stream after global eop,
- * move backward till the start of command sequence */
- if (!status) {
- status = _find_start_of_cmd_seq(rb, &temp_rb_rptr, false);
- if (!status) {
- *rb_rptr = temp_rb_rptr;
- KGSL_FT_INFO(rb->device,
- "Offset of cmd sequence after eop timestamp: 0x%x\n",
- temp_rb_rptr / sizeof(unsigned int));
- }
- }
- if (status)
- KGSL_FT_ERR(rb->device,
- "Failed to find the command sequence after eop timestamp %x\n",
- global_eop);
- return status;
-}
-
-static int _find_hanging_ib_sequence(struct adreno_ringbuffer *rb,
- unsigned int *rb_rptr,
- unsigned int ib1)
-{
- int status = -EINVAL;
- unsigned int temp_rb_rptr = *rb_rptr;
- unsigned int size = rb->buffer_desc.size;
- unsigned int val[2];
- int i = 0;
- bool check = false;
- bool ctx_switch = false;
-
- while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr) {
- kgsl_sharedmem_readl(&rb->buffer_desc, &val[i], temp_rb_rptr);
- /* Ensure above read is finished before next read */
- rmb();
-
- if (check && val[i] == ib1) {
- /* decrement i, i.e i = (i - 1 + 2) % 2 */
- i = (i + 1) % 2;
- if (adreno_cmd_is_ib(val[i])) {
- /* go till start of command sequence */
- status = _find_start_of_cmd_seq(rb,
- &temp_rb_rptr, false);
-
- KGSL_FT_INFO(rb->device,
- "Found the hanging IB at offset 0x%x\n",
- temp_rb_rptr / sizeof(unsigned int));
- break;
- }
- /* if no match the increment i since we decremented
- * before checking */
- i = (i + 1) % 2;
- }
- /* Make sure you do not encounter a context switch twice, we can
- * encounter it once for the bad context as the start of search
- * can point to the context switch */
- if (val[i] == KGSL_CONTEXT_TO_MEM_IDENTIFIER) {
- if (ctx_switch) {
- KGSL_FT_ERR(rb->device,
- "Context switch encountered before bad "
- "IB found\n");
- break;
- }
- ctx_switch = true;
- }
- i = (i + 1) % 2;
- if (1 == i)
- check = true;
- temp_rb_rptr = adreno_ringbuffer_inc_wrapped(temp_rb_rptr,
- size);
- }
- if (!status)
- *rb_rptr = temp_rb_rptr;
- return status;
-}
-
-static void adreno_setup_ft_data(struct kgsl_device *device,
- struct adreno_ft_data *ft_data)
-{
- int ret = 0;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
- struct kgsl_context *context;
- struct adreno_context *adreno_context;
- unsigned int rb_rptr = rb->wptr * sizeof(unsigned int);
-
- memset(ft_data, 0, sizeof(*ft_data));
- ft_data->start_of_replay_cmds = 0xFFFFFFFF;
- ft_data->replay_for_snapshot = 0xFFFFFFFF;
-
- adreno_readreg(adreno_dev, ADRENO_REG_CP_IB1_BASE, &ft_data->ib1);
-
- kgsl_sharedmem_readl(&device->memstore, &ft_data->context_id,
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- current_context));
-
- kgsl_sharedmem_readl(&device->memstore,
- &ft_data->global_eop,
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- eoptimestamp));
-
- /* Ensure context id and global eop ts read complete */
- rmb();
-
- ft_data->rb_buffer = vmalloc(rb->buffer_desc.size);
- if (!ft_data->rb_buffer) {
- KGSL_MEM_ERR(device, "vmalloc(%d) failed\n",
- rb->buffer_desc.size);
- return;
- }
-
- ft_data->bad_rb_buffer = vmalloc(rb->buffer_desc.size);
- if (!ft_data->bad_rb_buffer) {
- KGSL_MEM_ERR(device, "vmalloc(%d) failed\n",
- rb->buffer_desc.size);
- return;
- }
-
- ft_data->good_rb_buffer = vmalloc(rb->buffer_desc.size);
- if (!ft_data->good_rb_buffer) {
- KGSL_MEM_ERR(device, "vmalloc(%d) failed\n",
- rb->buffer_desc.size);
- return;
- }
- ft_data->status = 0;
-
- /* find the start of bad command sequence in rb */
- context = kgsl_context_get(device, ft_data->context_id);
-
- ft_data->ft_policy = adreno_dev->ft_policy;
-
- if (!ft_data->ft_policy)
- ft_data->ft_policy = KGSL_FT_DEFAULT_POLICY;
-
- /* Look for the command stream that is right after the global eop */
- ret = _find_cmd_seq_after_eop_ts(rb, &rb_rptr,
- ft_data->global_eop + 1, false);
- if (ret) {
- ft_data->ft_policy |= KGSL_FT_TEMP_DISABLE;
- goto done;
- } else {
- ft_data->start_of_replay_cmds = rb_rptr;
- ft_data->ft_policy &= ~KGSL_FT_TEMP_DISABLE;
- }
-
- if (context) {
- adreno_context = ADRENO_CONTEXT(context);
- if (adreno_context->flags & CTXT_FLAGS_PREAMBLE) {
- if (ft_data->ib1) {
- ret = _find_hanging_ib_sequence(rb,
- &rb_rptr, ft_data->ib1);
- if (ret) {
- KGSL_FT_ERR(device,
- "Start not found for replay IB seq\n");
- goto done;
- }
- ft_data->start_of_replay_cmds = rb_rptr;
- ft_data->replay_for_snapshot = rb_rptr;
- }
- }
- }
-
-done:
- kgsl_context_put(context);
-}
-
-static int
-_adreno_check_long_ib(struct kgsl_device *device)
-{
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- unsigned int curr_global_ts = 0;
-
- /* check if the global ts is still the same */
- kgsl_sharedmem_readl(&device->memstore,
- &curr_global_ts,
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- eoptimestamp));
- /* Ensure above read is finished before long ib check */
- rmb();
-
- /* Mark long ib as handled */
- adreno_dev->long_ib = 0;
-
- if (curr_global_ts == adreno_dev->long_ib_ts) {
- KGSL_FT_ERR(device,
- "IB ran too long, invalidate ctxt\n");
- return 1;
- } else {
- /* Do nothing GPU has gone ahead */
- KGSL_FT_INFO(device, "false long ib detection return\n");
- return 0;
- }
-}
-
/**
- * adreno_soft_reset() - Do a soft reset of the GPU hardware
- * @device: KGSL device to soft reset
+ * adreno_reset() - Helper function to reset the GPU
+ * @device: Pointer to the KGSL device structure for the GPU
*
- * "soft reset" the GPU hardware - this is a fast path GPU reset
- * The GPU hardware is reset but we never pull power so we can skip
- * a lot of the standard adreno_stop/adreno_start sequence
+ * Try to reset the GPU to recover from a fault. First, try to do a low latency
+ * soft reset. If the soft reset fails for some reason, then bring out the big
+ * guns and toggle the footswitch.
*/
-int adreno_soft_reset(struct kgsl_device *device)
+int adreno_reset(struct kgsl_device *device)
{
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
int ret;
- /* If the jump table index is 0 soft reset is not supported */
- if ((!adreno_dev->pm4_jt_idx) || (!adreno_dev->gpudev->soft_reset)) {
- dev_WARN_ONCE(device->dev, 1, "Soft reset not supported");
- return -EINVAL;
- }
+ /* Try soft reset first */
+ if (adreno_soft_reset(device) == 0)
+ return 0;
- if (adreno_dev->drawctxt_active)
- kgsl_context_put(&adreno_dev->drawctxt_active->base);
-
- adreno_dev->drawctxt_active = NULL;
-
- /* Stop the ringbuffer */
- adreno_ringbuffer_stop(&adreno_dev->ringbuffer);
-
- /* Delete the idle timer */
- del_timer_sync(&device->idle_timer);
-
- /* Make sure we are totally awake */
- kgsl_pwrctrl_enable(device);
-
- /* Reset the GPU */
- adreno_dev->gpudev->soft_reset(adreno_dev);
-
- /* Reinitialize the GPU */
- adreno_dev->gpudev->start(adreno_dev);
-
- /* Enable IRQ */
- kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
- device->ftbl->irqctrl(device, 1);
-
- /*
- * Restart the ringbuffer - we can go down the warm start path because
- * power was never yanked
- */
- ret = adreno_ringbuffer_warm_start(&adreno_dev->ringbuffer);
+ /* If it failed, then pull the power */
+ ret = adreno_stop(device);
if (ret)
return ret;
- device->reset_counter++;
+ ret = adreno_start(device);
- return 0;
-}
-
-static int
-_adreno_ft_restart_device(struct kgsl_device *device,
- struct kgsl_context *context)
-{
- /* If device soft reset fails try hard reset */
- if (adreno_soft_reset(device))
- KGSL_DEV_ERR_ONCE(device, "Device soft reset failed\n");
- else
- /* Soft reset is successful */
- goto reset_done;
-
- /* restart device */
- if (adreno_stop(device)) {
- KGSL_FT_ERR(device, "Device stop failed\n");
- return 1;
- }
-
- if (adreno_init(device)) {
- KGSL_FT_ERR(device, "Device init failed\n");
- return 1;
- }
-
- if (adreno_start(device)) {
- KGSL_FT_ERR(device, "Device start failed\n");
- return 1;
- }
-
-reset_done:
- if (context)
- kgsl_mmu_setstate(&device->mmu, context->pagetable,
- KGSL_MEMSTORE_GLOBAL);
-
- /* If iommu is used then we need to make sure that the iommu clocks
- * are on since there could be commands in pipeline that touch iommu */
- if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) {
- if (kgsl_mmu_enable_clk(&device->mmu,
- KGSL_IOMMU_CONTEXT_USER))
- return 1;
- }
-
- return 0;
-}
-
-static inline void
-_adreno_debug_ft_info(struct kgsl_device *device,
- struct adreno_ft_data *ft_data)
-{
-
- /*
- * Dumping rb is a very useful tool to debug FT.
- * It will tell us if we are extracting the rb correctly
- * NOP'ing the right IB, skipping the EOF correctly etc.
- */
- if (device->ft_log >= 7) {
-
- /* Print fault tolerance data here */
- KGSL_FT_INFO(device, "Temp RB buffer size 0x%X\n",
- ft_data->rb_size);
- adreno_dump_rb(device, ft_data->rb_buffer,
- ft_data->rb_size<<2, 0, ft_data->rb_size);
-
- KGSL_FT_INFO(device, "Bad RB buffer size 0x%X\n",
- ft_data->bad_rb_size);
- adreno_dump_rb(device, ft_data->bad_rb_buffer,
- ft_data->bad_rb_size<<2, 0, ft_data->bad_rb_size);
-
- KGSL_FT_INFO(device, "Good RB buffer size 0x%X\n",
- ft_data->good_rb_size);
- adreno_dump_rb(device, ft_data->good_rb_buffer,
- ft_data->good_rb_size<<2, 0, ft_data->good_rb_size);
-
- }
-}
-
-static int
-_adreno_ft_resubmit_rb(struct kgsl_device *device,
- struct adreno_ringbuffer *rb,
- struct kgsl_context *context,
- struct adreno_ft_data *ft_data,
- unsigned int *buff, unsigned int size)
-{
- unsigned int ret = 0;
- unsigned int retry_num = 0;
-
- _adreno_debug_ft_info(device, ft_data);
-
- do {
- ret = _adreno_ft_restart_device(device, context);
- if (ret == 0)
- break;
+ if (ret == 0) {
/*
- * If device restart fails sleep for 20ms before
- * attempting restart. This allows GPU HW to settle
- * and improve the chances of next restart to be
- * successful.
+ * If active_cnt is non-zero then the system was active before
+ * going into a reset - put it back in that state
*/
- msleep(20);
- KGSL_FT_ERR(device, "Retry device restart %d\n", retry_num);
- retry_num++;
- } while (retry_num < 4);
- if (ret) {
- KGSL_FT_ERR(device, "Device restart failed\n");
- BUG_ON(1);
- goto done;
- }
-
- if (size) {
-
- /* submit commands and wait for them to pass */
- adreno_ringbuffer_restore(rb, buff, size);
-
- ret = adreno_idle(device);
- }
-
-done:
- return ret;
-}
-
-
-static int
-_adreno_ft(struct kgsl_device *device,
- struct adreno_ft_data *ft_data)
-{
- int ret = 0, i;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
- struct kgsl_context *context;
- struct adreno_context *adreno_context = NULL;
- struct adreno_context *last_active_ctx = adreno_dev->drawctxt_active;
- unsigned int long_ib = 0;
- static int no_context_ft;
- struct kgsl_mmu *mmu = &device->mmu;
-
- context = kgsl_context_get(device, ft_data->context_id);
-
- if (context == NULL) {
- KGSL_FT_ERR(device, "Last context unknown id:%d\n",
- ft_data->context_id);
- if (no_context_ft) {
- /*
- * If 2 consecutive no context ft occurred then
- * just reset GPU
- */
- no_context_ft = 0;
- goto play_good_cmds;
- }
- } else {
- no_context_ft = 0;
- adreno_context = ADRENO_CONTEXT(context);
- adreno_context->flags |= CTXT_FLAGS_GPU_HANG;
- /*
- * set the invalid ts flag to 0 for this context since we have
- * detected a hang for it
- */
- context->wait_on_invalid_ts = false;
-
- if (!(adreno_context->flags & CTXT_FLAGS_PER_CONTEXT_TS)) {
- ft_data->status = 1;
- KGSL_FT_ERR(device, "Fault tolerance not supported\n");
- goto play_good_cmds;
- }
-
- /*
- * This flag will be set by userspace for contexts
- * that do not want to be fault tolerant (ex: OPENCL)
- */
- if (adreno_context->flags & CTXT_FLAGS_NO_FAULT_TOLERANCE) {
- ft_data->status = 1;
- KGSL_FT_ERR(device,
- "No FT set for this context play good cmds\n");
- goto play_good_cmds;
- }
-
- }
-
- /* Check if we detected a long running IB, if false return */
- if ((adreno_context) && (adreno_dev->long_ib)) {
- long_ib = _adreno_check_long_ib(device);
- if (!long_ib) {
- adreno_context->flags &= ~CTXT_FLAGS_GPU_HANG;
- return 0;
- }
- }
-
- /*
- * Extract valid contents from rb which can still be executed after
- * hang
- */
- adreno_ringbuffer_extract(rb, ft_data);
-
- /* If long IB detected do not attempt replay of bad cmds */
- if (long_ib) {
- ft_data->status = 1;
- _adreno_debug_ft_info(device, ft_data);
- goto play_good_cmds;
- }
-
- if ((ft_data->ft_policy & KGSL_FT_DISABLE) ||
- (ft_data->ft_policy & KGSL_FT_TEMP_DISABLE)) {
- KGSL_FT_ERR(device, "NO FT policy play only good cmds\n");
- ft_data->status = 1;
- goto play_good_cmds;
- }
-
- /* Do not try to replay if hang is due to a pagefault */
- if (context && test_bit(KGSL_CONTEXT_PAGEFAULT, &context->priv)) {
- /* Resume MMU */
- mmu->mmu_ops->mmu_pagefault_resume(mmu);
- if ((ft_data->context_id == context->id) &&
- (ft_data->global_eop == context->pagefault_ts)) {
- ft_data->ft_policy &= ~KGSL_FT_REPLAY;
- KGSL_FT_ERR(device, "MMU fault skipping replay\n");
- }
- clear_bit(KGSL_CONTEXT_PAGEFAULT, &context->priv);
- }
-
- if (ft_data->ft_policy & KGSL_FT_REPLAY) {
- ret = _adreno_ft_resubmit_rb(device, rb, context, ft_data,
- ft_data->bad_rb_buffer, ft_data->bad_rb_size);
-
- if (ret) {
- KGSL_FT_ERR(device, "Replay status: 1\n");
- ft_data->status = 1;
- } else
- goto play_good_cmds;
- }
-
- if (ft_data->ft_policy & KGSL_FT_SKIPIB) {
- for (i = 0; i < ft_data->bad_rb_size; i++) {
- if ((ft_data->bad_rb_buffer[i] ==
- CP_HDR_INDIRECT_BUFFER_PFD) &&
- (ft_data->bad_rb_buffer[i+1] == ft_data->ib1)) {
-
- ft_data->bad_rb_buffer[i] = cp_nop_packet(2);
- ft_data->bad_rb_buffer[i+1] =
- KGSL_NOP_IB_IDENTIFIER;
- ft_data->bad_rb_buffer[i+2] =
- KGSL_NOP_IB_IDENTIFIER;
- break;
- }
- }
-
- if ((i == (ft_data->bad_rb_size)) || (!ft_data->ib1)) {
- KGSL_FT_ERR(device, "Bad IB to NOP not found\n");
- ft_data->status = 1;
- goto play_good_cmds;
- }
-
- ret = _adreno_ft_resubmit_rb(device, rb, context, ft_data,
- ft_data->bad_rb_buffer, ft_data->bad_rb_size);
-
- if (ret) {
- KGSL_FT_ERR(device, "NOP faulty IB status: 1\n");
- ft_data->status = 1;
- } else {
- ft_data->status = 0;
- goto play_good_cmds;
- }
- }
-
- if (ft_data->ft_policy & KGSL_FT_SKIPFRAME) {
- for (i = 0; i < ft_data->bad_rb_size; i++) {
- if (ft_data->bad_rb_buffer[i] ==
- KGSL_END_OF_FRAME_IDENTIFIER) {
- ft_data->bad_rb_buffer[0] = cp_nop_packet(i);
- break;
- }
- }
-
- /* EOF not found in RB, discard till EOF in
- next IB submission */
- if (adreno_context && (i == ft_data->bad_rb_size)) {
- adreno_context->flags |= CTXT_FLAGS_SKIP_EOF;
- KGSL_FT_INFO(device,
- "EOF not found in RB, skip next issueib till EOF\n");
- ft_data->bad_rb_buffer[0] = cp_nop_packet(i);
- }
-
- ret = _adreno_ft_resubmit_rb(device, rb, context, ft_data,
- ft_data->bad_rb_buffer, ft_data->bad_rb_size);
-
- if (ret) {
- KGSL_FT_ERR(device, "Skip EOF status: 1\n");
- ft_data->status = 1;
- } else {
- ft_data->status = 0;
- goto play_good_cmds;
- }
- }
-
-play_good_cmds:
-
- if (ft_data->status)
- KGSL_FT_ERR(device, "Bad context commands failed\n");
- else {
- KGSL_FT_INFO(device, "Bad context commands success\n");
-
- if (adreno_context) {
- adreno_context->flags = (adreno_context->flags &
- ~CTXT_FLAGS_GPU_HANG) | CTXT_FLAGS_GPU_HANG_FT;
- }
-
- if (last_active_ctx)
- _kgsl_context_get(&last_active_ctx->base);
-
- adreno_dev->drawctxt_active = last_active_ctx;
- }
-
- ret = _adreno_ft_resubmit_rb(device, rb, context, ft_data,
- ft_data->good_rb_buffer, ft_data->good_rb_size);
-
- if (ret) {
- /*
- * If we fail here we can try to invalidate another
- * context and try fault tolerance again, although
- * we will only try ft with no context once to avoid
- * going into continuous loop of trying ft with no context
- */
- if (!context)
- no_context_ft = 1;
- ret = -EAGAIN;
- KGSL_FT_ERR(device, "Playing good commands unsuccessful\n");
- goto done;
- } else
- KGSL_FT_INFO(device, "Playing good commands successful\n");
-
- /* ringbuffer now has data from the last valid context id,
- * so restore the active_ctx to the last valid context */
- if (ft_data->last_valid_ctx_id) {
- struct kgsl_context *last_ctx = kgsl_context_get(device,
- ft_data->last_valid_ctx_id);
-
- adreno_dev->drawctxt_active = ADRENO_CONTEXT(last_ctx);
- }
-
-done:
- /* Turn off iommu clocks */
- if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
- kgsl_mmu_disable_clk_on_ts(&device->mmu, 0, false);
-
- kgsl_context_put(context);
- return ret;
-}
-
-static int
-adreno_ft(struct kgsl_device *device,
- struct adreno_ft_data *ft_data)
-{
- int ret = 0;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
-
- /*
- * If GPU FT is turned off do not run FT.
- * If GPU stall detection is suspected to be false,
- * we can use this option to confirm stall detection.
- */
- if (ft_data->ft_policy & KGSL_FT_OFF) {
- KGSL_FT_ERR(device, "GPU FT turned off\n");
- return 0;
- }
-
- KGSL_FT_INFO(device,
- "Start Parameters: IB1: 0x%X, "
- "Bad context_id: %u, global_eop: 0x%x\n",
- ft_data->ib1, ft_data->context_id, ft_data->global_eop);
-
- KGSL_FT_INFO(device, "Last issued global timestamp: %x\n",
- rb->global_ts);
-
- /* We may need to replay commands multiple times based on whether
- * multiple contexts hang the GPU */
- while (true) {
-
- ret = _adreno_ft(device, ft_data);
-
- if (-EAGAIN == ret) {
- /* setup new fault tolerance parameters and retry, this
- * means more than 1 contexts are causing hang */
- adreno_destroy_ft_data(ft_data);
- adreno_setup_ft_data(device, ft_data);
- KGSL_FT_INFO(device,
- "Retry. Parameters: "
- "IB1: 0x%X, Bad context_id: %u, global_eop: 0x%x\n",
- ft_data->ib1, ft_data->context_id,
- ft_data->global_eop);
- } else {
- break;
- }
- }
-
- if (ret)
- goto done;
-
- /* Restore correct states after fault tolerance */
- if (adreno_dev->drawctxt_active)
- device->mmu.hwpagetable =
- adreno_dev->drawctxt_active->base.pagetable;
- else
- device->mmu.hwpagetable = device->mmu.defaultpagetable;
- kgsl_sharedmem_writel(device, &device->memstore,
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- eoptimestamp), rb->global_ts);
-
- /* switch to NULL ctxt */
- if (adreno_dev->drawctxt_active != NULL)
- adreno_drawctxt_switch(adreno_dev, NULL, 0);
-
-done:
- adreno_set_max_ts_for_bad_ctxs(device);
- adreno_mark_context_status(device, ret);
- KGSL_FT_ERR(device, "policy 0x%X status 0x%x\n",
- ft_data->ft_policy, ret);
- return ret;
-}
-
-int
-adreno_dump_and_exec_ft(struct kgsl_device *device)
-{
- int result = -ETIMEDOUT;
- struct adreno_ft_data ft_data;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct kgsl_pwrctrl *pwr = &device->pwrctrl;
- unsigned int curr_pwrlevel;
-
- if (device->state == KGSL_STATE_HUNG)
- goto done;
- if (device->state == KGSL_STATE_DUMP_AND_FT) {
- mutex_unlock(&device->mutex);
- wait_for_completion(&device->ft_gate);
- mutex_lock(&device->mutex);
- if (device->state != KGSL_STATE_HUNG)
- result = 0;
- } else {
- /*
- * While fault tolerance is happening we do not want the
- * idle_timer to fire and attempt to change any device state
- */
- del_timer_sync(&device->idle_timer);
-
- kgsl_pwrctrl_set_state(device, KGSL_STATE_DUMP_AND_FT);
- INIT_COMPLETION(device->ft_gate);
- /* Detected a hang */
-
- kgsl_cffdump_hang(device);
- /* Run fault tolerance at max power level */
- curr_pwrlevel = pwr->active_pwrlevel;
- kgsl_pwrctrl_pwrlevel_change(device, pwr->max_pwrlevel);
-
- /* Get the fault tolerance data as soon as hang is detected */
- adreno_setup_ft_data(device, &ft_data);
-
- /*
- * If long ib is detected, do not attempt postmortem or
- * snapshot, if GPU is still executing commands
- * we will get errors
- */
- if (!adreno_dev->long_ib) {
- /*
- * Trigger an automatic dump of the state to
- * the console
- */
- kgsl_postmortem_dump(device, 0);
-
- /*
- * Make a GPU snapshot. For now, do it after the
- * PM dump so we can at least be sure the PM dump
- * will work as it always has
- */
- kgsl_device_snapshot(device, 1);
- }
-
- result = adreno_ft(device, &ft_data);
- adreno_destroy_ft_data(&ft_data);
-
- /* restore power level */
- kgsl_pwrctrl_pwrlevel_change(device, curr_pwrlevel);
-
- if (result) {
- kgsl_pwrctrl_set_state(device, KGSL_STATE_HUNG);
- } else {
+ if (atomic_read(&device->active_cnt))
kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
- mod_timer(&device->hang_timer,
- (jiffies +
- msecs_to_jiffies(KGSL_TIMEOUT_PART)));
- }
- complete_all(&device->ft_gate);
}
-done:
- return result;
+
+ return ret;
}
-EXPORT_SYMBOL(adreno_dump_and_exec_ft);
/**
* _ft_sysfs_store() - Common routine to write to FT sysfs files
@@ -3119,140 +2253,166 @@
return status;
}
-static int adreno_ringbuffer_drain(struct kgsl_device *device,
- unsigned int *regs)
+/**
+ * adreno_hw_isidle() - Check if the GPU core is idle
+ * @device: Pointer to the KGSL device structure for the GPU
+ *
+ * Return true if the RBBM status register for the GPU type indicates that the
+ * hardware is idle
+ */
+static bool adreno_hw_isidle(struct kgsl_device *device)
+{
+ unsigned int reg_rbbm_status;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ /* Don't consider ourselves idle if there is an IRQ pending */
+ if (adreno_dev->gpudev->irq_pending(adreno_dev))
+ return false;
+
+ adreno_readreg(adreno_dev, ADRENO_REG_RBBM_STATUS,
+ ®_rbbm_status);
+
+ if (adreno_is_a2xx(adreno_dev)) {
+ if (reg_rbbm_status == 0x110)
+ return true;
+ } else if (adreno_is_a3xx(adreno_dev)) {
+ if (!(reg_rbbm_status & 0x80000000))
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * adreno_soft_reset() - Do a soft reset of the GPU hardware
+ * @device: KGSL device to soft reset
+ *
+ * "soft reset" the GPU hardware - this is a fast path GPU reset
+ * The GPU hardware is reset but we never pull power so we can skip
+ * a lot of the standard adreno_stop/adreno_start sequence
+ */
+int adreno_soft_reset(struct kgsl_device *device)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
- unsigned long wait = jiffies;
- unsigned long timeout = jiffies + msecs_to_jiffies(ADRENO_IDLE_TIMEOUT);
- unsigned int rptr;
+ int ret;
- do {
- /*
- * Wait is "jiffies" first time in the loop to start
- * GPU stall detection immediately.
- */
- if (time_after(jiffies, wait)) {
- /* Check to see if the core is hung */
- if (adreno_ft_detect(device, regs))
- return -ETIMEDOUT;
+ /* If the jump table index is 0 soft reset is not supported */
+ if ((!adreno_dev->pm4_jt_idx) || (!adreno_dev->gpudev->soft_reset)) {
+ dev_WARN_ONCE(device->dev, 1, "Soft reset not supported");
+ return -EINVAL;
+ }
- wait = jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART);
- }
- rptr = adreno_get_rptr(rb);
- if (time_after(jiffies, timeout)) {
- KGSL_DRV_ERR(device, "rptr: %x, wptr: %x\n",
- rptr, rb->wptr);
- return -ETIMEDOUT;
- }
- } while (rptr != rb->wptr);
+ if (adreno_dev->drawctxt_active)
+ kgsl_context_put(&adreno_dev->drawctxt_active->base);
+
+ adreno_dev->drawctxt_active = NULL;
+
+ /* Stop the ringbuffer */
+ adreno_ringbuffer_stop(&adreno_dev->ringbuffer);
+
+ /* Delete the idle timer */
+ del_timer_sync(&device->idle_timer);
+
+ /* Make sure we are totally awake */
+ kgsl_pwrctrl_enable(device);
+
+ /* Reset the GPU */
+ adreno_dev->gpudev->soft_reset(adreno_dev);
+
+ /* Reinitialize the GPU */
+ adreno_dev->gpudev->start(adreno_dev);
+
+ /* Enable IRQ */
+ kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
+ device->ftbl->irqctrl(device, 1);
+
+ /*
+ * Restart the ringbuffer - we can go down the warm start path because
+ * power was never yanked
+ */
+ ret = adreno_ringbuffer_warm_start(&adreno_dev->ringbuffer);
+ if (ret)
+ return ret;
+
+ device->reset_counter++;
return 0;
}
-/* Caller must hold the device mutex. */
+/*
+ * adreno_isidle() - return true if the GPU hardware is idle
+ * @device: Pointer to the KGSL device structure for the GPU
+ *
+ * Return true if the GPU hardware is idle and there are no commands pending in
+ * the ringbuffer
+ */
+static bool adreno_isidle(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ unsigned int rptr;
+
+ if (!kgsl_pwrctrl_isenabled(device))
+ return true;
+
+ rptr = adreno_get_rptr(&adreno_dev->ringbuffer);
+
+ if (rptr == adreno_dev->ringbuffer.wptr)
+ return adreno_hw_isidle(device);
+
+ return false;
+}
+
+/**
+ * adreno_idle() - wait for the GPU hardware to go idle
+ * @device: Pointer to the KGSL device structure for the GPU
+ *
+ * Wait up to ADRENO_IDLE_TIMEOUT milliseconds for the GPU hardware to go quiet.
+ */
+
int adreno_idle(struct kgsl_device *device)
{
- unsigned long wait_time;
- unsigned long wait_time_part;
- unsigned int prev_reg_val[FT_DETECT_REGS_COUNT];
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ unsigned long wait = jiffies + msecs_to_jiffies(ADRENO_IDLE_TIMEOUT);
- memset(prev_reg_val, 0, sizeof(prev_reg_val));
+ /*
+ * Make sure the device mutex is held so the dispatcher can't send any
+ * more commands to the hardware
+ */
- kgsl_cffdump_regpoll(device,
- adreno_getreg(adreno_dev, ADRENO_REG_RBBM_STATUS) << 2,
- 0x00000000, 0x80000000);
+ BUG_ON(!mutex_is_locked(&device->mutex));
-retry:
- /* First, wait for the ringbuffer to drain */
- if (adreno_ringbuffer_drain(device, prev_reg_val))
- goto err;
+ if (adreno_is_a3xx(adreno_dev))
+ kgsl_cffdump_regpoll(device,
+ adreno_getreg(adreno_dev, ADRENO_REG_RBBM_STATUS) << 2,
+ 0x00000000, 0x80000000);
+ else
+ kgsl_cffdump_regpoll(device,
+ adreno_getreg(adreno_dev, ADRENO_REG_RBBM_STATUS) << 2,
+ 0x110, 0x110);
- /* now, wait for the GPU to finish its operations */
- wait_time = jiffies + msecs_to_jiffies(ADRENO_IDLE_TIMEOUT);
- wait_time_part = jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART);
-
- while (time_before(jiffies, wait_time)) {
+ while (time_before(jiffies, wait)) {
if (adreno_isidle(device))
return 0;
-
- /* Dont wait for timeout, detect hang faster. */
- if (time_after(jiffies, wait_time_part)) {
- wait_time_part = jiffies +
- msecs_to_jiffies(KGSL_TIMEOUT_PART);
- if ((adreno_ft_detect(device, prev_reg_val)))
- goto err;
- }
-
}
-err:
- KGSL_DRV_ERR(device, "spun too long waiting for RB to idle\n");
- if (KGSL_STATE_DUMP_AND_FT != device->state &&
- !adreno_dump_and_exec_ft(device)) {
- wait_time = jiffies + ADRENO_IDLE_TIMEOUT;
- goto retry;
- }
+ kgsl_postmortem_dump(device, 0);
+
return -ETIMEDOUT;
}
/**
- * is_adreno_rbbm_status_idle - Check if GPU core is idle by probing
- * rbbm_status register
- * @device - Pointer to the GPU device whose idle status is to be
- * checked
- * @returns - Returns whether the core is idle (based on rbbm_status)
- * false if the core is active, true if the core is idle
+ * adreno_drain() - Drain the dispatch queue
+ * @device: Pointer to the KGSL device structure for the GPU
+ *
+ * Tell the dispatcher to pause - this has the effect of draining the inflight
+ * command batches
*/
-static bool is_adreno_rbbm_status_idle(struct kgsl_device *device)
+static int adreno_drain(struct kgsl_device *device)
{
- unsigned int reg_rbbm_status;
- bool status = false;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- /* Is the core idle? */
- adreno_readreg(adreno_dev, ADRENO_REG_RBBM_STATUS,
- ®_rbbm_status);
-
- if (adreno_is_a2xx(adreno_dev)) {
- if (reg_rbbm_status == 0x110)
- status = true;
- } else {
- if (!(reg_rbbm_status & 0x80000000))
- status = true;
- }
- return status;
-}
-
-static unsigned int adreno_isidle(struct kgsl_device *device)
-{
- int status = false;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
-
- /* If the device isn't active, don't force it on. */
- if (kgsl_pwrctrl_isenabled(device)) {
- /* Is the ring buffer is empty? */
- unsigned int rptr = adreno_get_rptr(rb);
- if (rptr == rb->wptr) {
- /*
- * Are there interrupts pending? If so then pretend we
- * are not idle - this avoids the possiblity that we go
- * to a lower power state without handling interrupts
- * first.
- */
-
- if (!adreno_dev->gpudev->irq_pending(adreno_dev)) {
- /* Is the core idle? */
- status = is_adreno_rbbm_status_idle(device);
- }
- }
- } else {
- status = true;
- }
- return status;
+ adreno_dispatcher_pause(adreno_dev);
+ return 0;
}
/* Caller must hold the device mutex. */
@@ -3423,342 +2583,6 @@
__raw_writel(value, reg);
}
-static unsigned int _get_context_id(struct kgsl_context *k_ctxt)
-{
- unsigned int context_id = KGSL_MEMSTORE_GLOBAL;
-
- if (k_ctxt != NULL) {
- struct adreno_context *a_ctxt = ADRENO_CONTEXT(k_ctxt);
- if (kgsl_context_detached(k_ctxt))
- context_id = KGSL_CONTEXT_INVALID;
- else if (a_ctxt->flags & CTXT_FLAGS_PER_CONTEXT_TS)
- context_id = k_ctxt->id;
- }
-
- return context_id;
-}
-
-static unsigned int adreno_check_hw_ts(struct kgsl_device *device,
- struct kgsl_context *context, unsigned int timestamp)
-{
- int status = 0;
- unsigned int ref_ts, enableflag;
- unsigned int context_id = _get_context_id(context);
-
- /*
- * If the context ID is invalid, we are in a race with
- * the context being destroyed by userspace so bail.
- */
- if (context_id == KGSL_CONTEXT_INVALID) {
- KGSL_DRV_WARN(device, "context was detached");
- return -EINVAL;
- }
-
- status = kgsl_check_timestamp(device, context, timestamp);
- if (status)
- return status;
-
- kgsl_sharedmem_readl(&device->memstore, &enableflag,
- KGSL_MEMSTORE_OFFSET(context_id, ts_cmp_enable));
- /*
- * Barrier is needed here to make sure the read from memstore
- * has posted
- */
-
- mb();
-
- if (enableflag) {
- kgsl_sharedmem_readl(&device->memstore, &ref_ts,
- KGSL_MEMSTORE_OFFSET(context_id,
- ref_wait_ts));
-
- /* Make sure the memstore read has posted */
- mb();
- if (timestamp_cmp(ref_ts, timestamp) >= 0) {
- kgsl_sharedmem_writel(device, &device->memstore,
- KGSL_MEMSTORE_OFFSET(context_id,
- ref_wait_ts), timestamp);
- /* Make sure the memstore write is posted */
- wmb();
- }
- } else {
- kgsl_sharedmem_writel(device, &device->memstore,
- KGSL_MEMSTORE_OFFSET(context_id,
- ref_wait_ts), timestamp);
- enableflag = 1;
- kgsl_sharedmem_writel(device, &device->memstore,
- KGSL_MEMSTORE_OFFSET(context_id,
- ts_cmp_enable), enableflag);
-
- /* Make sure the memstore write gets posted */
- wmb();
-
- /*
- * submit a dummy packet so that even if all
- * commands upto timestamp get executed we will still
- * get an interrupt
- */
-
- if (context && device->state != KGSL_STATE_SLUMBER) {
- adreno_ringbuffer_issuecmds(device,
- ADRENO_CONTEXT(context),
- KGSL_CMD_FLAGS_GET_INT, NULL, 0);
- }
- }
-
- return 0;
-}
-
-/* Return 1 if the event timestmp has already passed, 0 if it was marked */
-static int adreno_next_event(struct kgsl_device *device,
- struct kgsl_event *event)
-{
- return adreno_check_hw_ts(device, event->context, event->timestamp);
-}
-
-static int adreno_check_interrupt_timestamp(struct kgsl_device *device,
- struct kgsl_context *context, unsigned int timestamp)
-{
- int status;
-
- mutex_lock(&device->mutex);
- status = adreno_check_hw_ts(device, context, timestamp);
- mutex_unlock(&device->mutex);
-
- return status;
-}
-
-/*
- wait_event_interruptible_timeout checks for the exit condition before
- placing a process in wait q. For conditional interrupts we expect the
- process to already be in its wait q when its exit condition checking
- function is called.
-*/
-#define kgsl_wait_event_interruptible_timeout(wq, condition, timeout, io)\
-({ \
- long __ret = timeout; \
- if (io) \
- __wait_io_event_interruptible_timeout(wq, condition, __ret);\
- else \
- __wait_event_interruptible_timeout(wq, condition, __ret);\
- __ret; \
-})
-
-
-
-unsigned int adreno_ft_detect(struct kgsl_device *device,
- unsigned int *prev_reg_val)
-{
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
- unsigned int curr_reg_val[FT_DETECT_REGS_COUNT];
- unsigned int fast_hang_detected = 1;
- unsigned int long_ib_detected = 1;
- unsigned int i;
- static unsigned long next_hang_detect_time;
- static unsigned int prev_global_ts;
- unsigned int curr_global_ts = 0;
- unsigned int curr_context_id = 0;
- static struct adreno_context *curr_context;
- static struct kgsl_context *context;
- static char pid_name[TASK_COMM_LEN] = "unknown";
-
- if (!adreno_dev->fast_hang_detect)
- fast_hang_detected = 0;
-
- if (!adreno_dev->long_ib_detect)
- long_ib_detected = 0;
-
- if (!(adreno_dev->ringbuffer.flags & KGSL_FLAGS_STARTED))
- return 0;
-
- if (is_adreno_rbbm_status_idle(device) &&
- (kgsl_readtimestamp(device, NULL, KGSL_TIMESTAMP_RETIRED)
- == rb->global_ts)) {
-
- /*
- * On A2XX if the RPTR != WPTR and the device is idle, then
- * the last write to WPTR probably failed to latch so write it
- * again
- */
-
- if (adreno_is_a2xx(adreno_dev)) {
- unsigned int rptr;
- adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_RPTR,
- &rptr);
- if (rptr != adreno_dev->ringbuffer.wptr)
- adreno_writereg(adreno_dev,
- ADRENO_REG_CP_RB_WPTR,
- adreno_dev->ringbuffer.wptr);
- }
-
- return 0;
- }
-
- /*
- * Time interval between hang detection should be KGSL_TIMEOUT_PART
- * or more, if next hang detection is requested < KGSL_TIMEOUT_PART
- * from the last time do nothing.
- */
- if ((next_hang_detect_time) &&
- (time_before(jiffies, next_hang_detect_time)))
- return 0;
- else
- next_hang_detect_time = (jiffies +
- msecs_to_jiffies(KGSL_TIMEOUT_PART-1));
-
- /* Read the current Hang detect reg values here */
- for (i = 0; i < FT_DETECT_REGS_COUNT; i++) {
- if (ft_detect_regs[i] == 0)
- continue;
- kgsl_regread(device, ft_detect_regs[i],
- &curr_reg_val[i]);
- }
-
- /* Read the current global timestamp here */
- kgsl_sharedmem_readl(&device->memstore,
- &curr_global_ts,
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- eoptimestamp));
- /* Make sure the memstore read has posted */
- mb();
-
- if (curr_global_ts == prev_global_ts) {
-
- /* If we don't already have a good context, get it. */
- if (kgsl_context_detached(context)) {
- kgsl_context_put(context);
- context = NULL;
- curr_context = NULL;
- strlcpy(pid_name, "unknown", sizeof(pid_name));
-
- kgsl_sharedmem_readl(&device->memstore,
- &curr_context_id,
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- current_context));
- /* Make sure the memstore read has posted */
- mb();
-
- context = kgsl_context_get(device, curr_context_id);
- if (context != NULL) {
- struct task_struct *task;
- curr_context = ADRENO_CONTEXT(context);
- curr_context->ib_gpu_time_used = 0;
- task = find_task_by_vpid(context->pid);
- if (task)
- get_task_comm(pid_name, task);
- } else {
- KGSL_DRV_ERR(device,
- "Fault tolerance no context found\n");
- }
- }
- for (i = 0; i < FT_DETECT_REGS_COUNT; i++) {
- if (curr_reg_val[i] != prev_reg_val[i]) {
- fast_hang_detected = 0;
-
- /* Check for long IB here */
- if ((i >=
- LONG_IB_DETECT_REG_INDEX_START)
- &&
- (i <=
- LONG_IB_DETECT_REG_INDEX_END))
- long_ib_detected = 0;
- }
- }
-
- if (fast_hang_detected) {
- KGSL_FT_ERR(device,
- "Proc %s, ctxt_id %d ts %d triggered fault tolerance"
- " on global ts %d\n",
- pid_name, context ? context->id : 0,
- (kgsl_readtimestamp(device, context,
- KGSL_TIMESTAMP_RETIRED) + 1),
- curr_global_ts + 1);
- return 1;
- }
-
- if (curr_context != NULL) {
-
- curr_context->ib_gpu_time_used += KGSL_TIMEOUT_PART;
- KGSL_FT_INFO(device,
- "Proc %s used GPU Time %d ms on timestamp 0x%X\n",
- pid_name, curr_context->ib_gpu_time_used,
- curr_global_ts+1);
-
- if ((long_ib_detected) &&
- (!(curr_context->flags &
- CTXT_FLAGS_NO_FAULT_TOLERANCE))) {
- curr_context->ib_gpu_time_used +=
- KGSL_TIMEOUT_PART;
- if (curr_context->ib_gpu_time_used >
- KGSL_TIMEOUT_LONG_IB_DETECTION) {
- if (adreno_dev->long_ib_ts !=
- curr_global_ts) {
- KGSL_FT_ERR(device,
- "Proc %s, ctxt_id %d ts %d"
- "used GPU for %d ms long ib "
- "detected on global ts %d\n",
- pid_name, context->id,
- (kgsl_readtimestamp(device,
- context,
- KGSL_TIMESTAMP_RETIRED)+1),
- curr_context->ib_gpu_time_used,
- curr_global_ts+1);
- adreno_dev->long_ib = 1;
- adreno_dev->long_ib_ts =
- curr_global_ts;
- curr_context->ib_gpu_time_used =
- 0;
- return 1;
- }
- }
- }
- }
- } else {
- /* GPU is moving forward */
- prev_global_ts = curr_global_ts;
- kgsl_context_put(context);
- context = NULL;
- curr_context = NULL;
- strlcpy(pid_name, "unknown", sizeof(pid_name));
- adreno_dev->long_ib = 0;
- adreno_dev->long_ib_ts = 0;
- }
-
-
- /* If hangs are not detected copy the current reg values
- * to previous values and return no hang */
- for (i = 0; i < FT_DETECT_REGS_COUNT; i++)
- prev_reg_val[i] = curr_reg_val[i];
- return 0;
-}
-
-static int _check_pending_timestamp(struct kgsl_device *device,
- struct kgsl_context *context, unsigned int timestamp)
-{
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- unsigned int context_id = _get_context_id(context);
- unsigned int ts_issued;
-
- if (context_id == KGSL_CONTEXT_INVALID)
- return -EINVAL;
-
- ts_issued = adreno_context_timestamp(context, &adreno_dev->ringbuffer);
-
- if (timestamp_cmp(timestamp, ts_issued) <= 0)
- return 0;
-
- if (context && !context->wait_on_invalid_ts) {
- KGSL_DRV_ERR(device, "Cannot wait for invalid ts <%d:0x%x>, last issued ts <%d:0x%x>\n",
- context_id, timestamp, context_id, ts_issued);
-
- /* Only print this message once */
- context->wait_on_invalid_ts = true;
- }
-
- return -EINVAL;
-}
-
/**
* adreno_waittimestamp - sleep while waiting for the specified timestamp
* @device - pointer to a KGSL device structure
@@ -3766,147 +2590,35 @@
* @timestamp - GPU timestamp to wait for
* @msecs - amount of time to wait (in milliseconds)
*
- * Wait 'msecs' milliseconds for the specified timestamp to expire. Wake up
- * every KGSL_TIMEOUT_PART milliseconds to check for a device hang and process
- * one if it happened. Otherwise, spend most of our time in an interruptible
- * wait for the timestamp interrupt to be processed. This function must be
- * called with the mutex already held.
+ * Wait up to 'msecs' milliseconds for the specified timestamp to expire.
*/
static int adreno_waittimestamp(struct kgsl_device *device,
- struct kgsl_context *context,
- unsigned int timestamp,
- unsigned int msecs)
+ struct kgsl_context *context,
+ unsigned int timestamp,
+ unsigned int msecs)
{
- static unsigned int io_cnt;
- struct adreno_context *adreno_ctx = context ? ADRENO_CONTEXT(context) :
- NULL;
- struct kgsl_pwrctrl *pwr = &device->pwrctrl;
- unsigned int context_id = _get_context_id(context);
- unsigned int time_elapsed = 0;
- unsigned int wait;
- int ts_compare = 1;
- int io, ret = -ETIMEDOUT;
+ int ret;
+ struct adreno_context *drawctxt;
- if (context_id == KGSL_CONTEXT_INVALID) {
- KGSL_DRV_WARN(device, "context was detached");
+ if (context == NULL) {
+ /* If they are doing then complain once */
+ dev_WARN_ONCE(device->dev, 1,
+ "IOCTL_KGSL_DEVICE_WAITTIMESTAMP is deprecated\n");
return -EINVAL;
}
- /*
- * Check to see if the requested timestamp is "newer" then the last
- * timestamp issued. If it is complain once and return error. Only
- * print the message once per context so that badly behaving
- * applications don't spam the logs
- */
+ /* Return -EINVAL if the context has been detached */
+ if (kgsl_context_detached(context))
+ return -EINVAL;
- if (adreno_ctx && !(adreno_ctx->flags & CTXT_FLAGS_USER_GENERATED_TS)) {
- if (_check_pending_timestamp(device, context, timestamp))
- return -EINVAL;
+ ret = adreno_drawctxt_wait(ADRENO_DEVICE(device), context,
+ timestamp, msecs_to_jiffies(msecs));
- /* Reset the invalid timestamp flag on a valid wait */
- context->wait_on_invalid_ts = false;
- }
+ /* If the context got invalidated then return a specific error */
+ drawctxt = ADRENO_CONTEXT(context);
- /*
- * On the first time through the loop only wait 100ms.
- * this gives enough time for the engine to start moving and oddly
- * provides better hang detection results than just going the full
- * KGSL_TIMEOUT_PART right off the bat. The exception to this rule
- * is if msecs happens to be < 100ms then just use 20ms or the msecs,
- * whichever is larger because anything less than 20 is unreliable
- */
-
- if (msecs == 0 || msecs >= 100)
- wait = 100;
- else
- wait = (msecs > 20) ? msecs : 20;
-
- do {
- long status;
-
- /*
- * if the timestamp happens while we're not
- * waiting, there's a chance that an interrupt
- * will not be generated and thus the timestamp
- * work needs to be queued.
- */
-
- if (kgsl_check_timestamp(device, context, timestamp)) {
- queue_work(device->work_queue, &device->ts_expired_ws);
- ret = 0;
- break;
- }
-
- /*
- * For proper power accounting sometimes we need to call
- * io_wait_interruptible_timeout and sometimes we need to call
- * plain old wait_interruptible_timeout. We call the regular
- * timeout N times out of 100, where N is a number specified by
- * the current power level
- */
-
- io_cnt = (io_cnt + 1) % 100;
- io = (io_cnt < pwr->pwrlevels[pwr->active_pwrlevel].io_fraction)
- ? 0 : 1;
-
- mutex_unlock(&device->mutex);
-
- /* Wait for a timestamp event */
- status = kgsl_wait_event_interruptible_timeout(
- device->wait_queue,
- adreno_check_interrupt_timestamp(device, context,
- timestamp), msecs_to_jiffies(wait), io);
-
- mutex_lock(&device->mutex);
-
- /*
- * If status is non zero then either the condition was satisfied
- * or there was an error. In either event, this is the end of
- * the line for us
- */
-
- if (status != 0) {
- ret = (status > 0) ? 0 : (int) status;
- break;
- }
- time_elapsed += wait;
-
- /* If user specified timestamps are being used, wait at least
- * KGSL_SYNCOBJ_SERVER_TIMEOUT msecs for the user driver to
- * issue a IB for a timestamp before checking to see if the
- * current timestamp we are waiting for is valid or not
- */
-
- if (ts_compare && (adreno_ctx &&
- (adreno_ctx->flags & CTXT_FLAGS_USER_GENERATED_TS))) {
- if (time_elapsed > KGSL_SYNCOBJ_SERVER_TIMEOUT) {
- ret = _check_pending_timestamp(device, context,
- timestamp);
- if (ret)
- break;
-
- /* Don't do this check again */
- ts_compare = 0;
-
- /*
- * Reset the invalid timestamp flag on a valid
- * wait
- */
- context->wait_on_invalid_ts = false;
- }
- }
-
- /*
- * We want to wait the floor of KGSL_TIMEOUT_PART
- * and (msecs - time_elapsed).
- */
-
- if (KGSL_TIMEOUT_PART < (msecs - time_elapsed))
- wait = KGSL_TIMEOUT_PART;
- else
- wait = (msecs - time_elapsed);
-
- } while (!msecs || time_elapsed < msecs);
+ if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID)
+ ret = -EDEADLK;
return ret;
}
@@ -3915,13 +2627,13 @@
struct kgsl_context *context, enum kgsl_timestamp_type type)
{
unsigned int timestamp = 0;
- unsigned int context_id = _get_context_id(context);
+ unsigned int id = context ? context->id : KGSL_MEMSTORE_GLOBAL;
/*
- * If the context ID is invalid, we are in a race with
+ * If the context is detached we are in a race with
* the context being destroyed by userspace so bail.
*/
- if (context_id == KGSL_CONTEXT_INVALID) {
+ if (context && kgsl_context_detached(context)) {
KGSL_DRV_WARN(device, "context was detached");
return timestamp;
}
@@ -3935,11 +2647,11 @@
}
case KGSL_TIMESTAMP_CONSUMED:
kgsl_sharedmem_readl(&device->memstore, ×tamp,
- KGSL_MEMSTORE_OFFSET(context_id, soptimestamp));
+ KGSL_MEMSTORE_OFFSET(id, soptimestamp));
break;
case KGSL_TIMESTAMP_RETIRED:
kgsl_sharedmem_readl(&device->memstore, ×tamp,
- KGSL_MEMSTORE_OFFSET(context_id, eoptimestamp));
+ KGSL_MEMSTORE_OFFSET(id, eoptimestamp));
break;
}
@@ -4099,6 +2811,7 @@
.gpuid = adreno_gpuid,
.snapshot = adreno_snapshot,
.irq_handler = adreno_irq_handler,
+ .drain = adreno_drain,
/* Optional functions */
.setstate = adreno_setstate,
.drawctxt_create = adreno_drawctxt_create,
@@ -4106,7 +2819,6 @@
.drawctxt_destroy = adreno_drawctxt_destroy,
.setproperty = adreno_setproperty,
.postmortem_dump = adreno_dump,
- .next_event = adreno_next_event,
};
static struct platform_driver adreno_platform_driver = {
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 72f15e7..32e43b2 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -96,6 +96,46 @@
TRACE_BUS_CTL,
};
+/*
+ * Maximum size of the dispatcher ringbuffer - the actual inflight size will be
+ * smaller then this but this size will allow for a larger range of inflight
+ * sizes that can be chosen at runtime
+ */
+
+#define ADRENO_DISPATCH_CMDQUEUE_SIZE 128
+
+/**
+ * struct adreno_dispatcher - container for the adreno GPU dispatcher
+ * @mutex: Mutex to protect the structure
+ * @state: Current state of the dispatcher (active or paused)
+ * @timer: Timer to monitor the progress of the command batches
+ * @inflight: Number of command batch operations pending in the ringbuffer
+ * @fault: True if a HW fault was detected
+ * @pending: Priority list of contexts waiting to submit command batches
+ * @plist_lock: Spin lock to protect the pending queue
+ * @cmdqueue: Queue of command batches currently flight
+ * @head: pointer to the head of of the cmdqueue. This is the oldest pending
+ * operation
+ * @tail: pointer to the tail of the cmdqueue. This is the most recently
+ * submitted operation
+ * @work: work_struct to put the dispatcher in a work queue
+ * @kobj: kobject for the dispatcher directory in the device sysfs node
+ */
+struct adreno_dispatcher {
+ struct mutex mutex;
+ unsigned int state;
+ struct timer_list timer;
+ unsigned int inflight;
+ int fault;
+ struct plist_head pending;
+ spinlock_t plist_lock;
+ struct kgsl_cmdbatch *cmdqueue[ADRENO_DISPATCH_CMDQUEUE_SIZE];
+ unsigned int head;
+ unsigned int tail;
+ struct work_struct work;
+ struct kobject kobj;
+};
+
struct adreno_gpudev;
struct adreno_device {
@@ -136,6 +176,7 @@
unsigned int ocmem_base;
unsigned int gpu_cycles;
struct adreno_profile profile;
+ struct adreno_dispatcher dispatcher;
};
#define PERFCOUNTER_FLAG_NONE 0x0
@@ -266,9 +307,9 @@
/* GPU specific function hooks */
int (*ctxt_create)(struct adreno_device *, struct adreno_context *);
- void (*ctxt_save)(struct adreno_device *, struct adreno_context *);
- void (*ctxt_restore)(struct adreno_device *, struct adreno_context *);
- void (*ctxt_draw_workaround)(struct adreno_device *,
+ int (*ctxt_save)(struct adreno_device *, struct adreno_context *);
+ int (*ctxt_restore)(struct adreno_device *, struct adreno_context *);
+ int (*ctxt_draw_workaround)(struct adreno_device *,
struct adreno_context *);
irqreturn_t (*irq_handler)(struct adreno_device *);
void (*irq_control)(struct adreno_device *, int);
@@ -291,46 +332,6 @@
void (*postmortem_dump)(struct adreno_device *adreno_dev);
};
-/*
- * struct adreno_ft_data - Structure that contains all information to
- * perform gpu fault tolerance
- * @ib1 - IB1 that the GPU was executing when hang happened
- * @context_id - Context which caused the hang
- * @global_eop - eoptimestamp at time of hang
- * @rb_buffer - Buffer that holds the commands from good contexts
- * @rb_size - Number of valid dwords in rb_buffer
- * @bad_rb_buffer - Buffer that holds commands from the hanging context
- * bad_rb_size - Number of valid dwords in bad_rb_buffer
- * @good_rb_buffer - Buffer that holds commands from good contexts
- * good_rb_size - Number of valid dwords in good_rb_buffer
- * @last_valid_ctx_id - The last context from which commands were placed in
- * ringbuffer before the GPU hung
- * @step - Current fault tolerance step being executed
- * @err_code - Fault tolerance error code
- * @fault - Indicates whether the hang was caused due to a pagefault
- * @start_of_replay_cmds - Offset in ringbuffer from where commands can be
- * replayed during fault tolerance
- * @replay_for_snapshot - Offset in ringbuffer where IB's can be saved for
- * replaying with snapshot
- */
-struct adreno_ft_data {
- unsigned int ib1;
- unsigned int context_id;
- unsigned int global_eop;
- unsigned int *rb_buffer;
- unsigned int rb_size;
- unsigned int *bad_rb_buffer;
- unsigned int bad_rb_size;
- unsigned int *good_rb_buffer;
- unsigned int good_rb_size;
- unsigned int last_valid_ctx_id;
- unsigned int status;
- unsigned int ft_policy;
- unsigned int err_code;
- unsigned int start_of_replay_cmds;
- unsigned int replay_for_snapshot;
-};
-
#define FT_DETECT_REGS_COUNT 12
struct log_field {
@@ -410,13 +411,21 @@
void *adreno_snapshot(struct kgsl_device *device, void *snapshot, int *remain,
int hang);
-int adreno_dump_and_exec_ft(struct kgsl_device *device);
+void adreno_dispatcher_start(struct adreno_device *adreno_dev);
+int adreno_dispatcher_init(struct adreno_device *adreno_dev);
+void adreno_dispatcher_close(struct adreno_device *adreno_dev);
+int adreno_dispatcher_idle(struct adreno_device *adreno_dev,
+ unsigned int timeout);
+void adreno_dispatcher_irq_fault(struct kgsl_device *device);
+void adreno_dispatcher_stop(struct adreno_device *adreno_dev);
-void adreno_dump_rb(struct kgsl_device *device, const void *buf,
- size_t len, int start, int size);
+int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch,
+ uint32_t *timestamp);
-unsigned int adreno_ft_detect(struct kgsl_device *device,
- unsigned int *prev_reg_val);
+void adreno_dispatcher_schedule(struct kgsl_device *device);
+void adreno_dispatcher_pause(struct adreno_device *adreno_dev);
+int adreno_reset(struct kgsl_device *device);
int adreno_ft_init_sysfs(struct kgsl_device *device);
void adreno_ft_uninit_sysfs(struct kgsl_device *device);
@@ -533,9 +542,7 @@
{
if (k_ctxt) {
struct adreno_context *a_ctxt = ADRENO_CONTEXT(k_ctxt);
-
- if (a_ctxt->flags & CTXT_FLAGS_PER_CONTEXT_TS)
- return a_ctxt->timestamp;
+ return a_ctxt->timestamp;
}
return rb->global_ts;
}
diff --git a/drivers/gpu/msm/adreno_a2xx.c b/drivers/gpu/msm/adreno_a2xx.c
index 3d72c5c..cce4f91 100644
--- a/drivers/gpu/msm/adreno_a2xx.c
+++ b/drivers/gpu/msm/adreno_a2xx.c
@@ -1451,7 +1451,7 @@
return ret;
}
-static void a2xx_drawctxt_draw_workaround(struct adreno_device *adreno_dev,
+static int a2xx_drawctxt_draw_workaround(struct adreno_device *adreno_dev,
struct adreno_context *context)
{
struct kgsl_device *device = &adreno_dev->dev;
@@ -1468,7 +1468,7 @@
ADRENO_NUM_CTX_SWITCH_ALLOWED_BEFORE_DRAW)
adreno_dev->gpudev->ctx_switches_since_last_draw = 0;
else
- return;
+ return 0;
/*
* Issue an empty draw call to avoid possible hangs due to
* repeated idles without intervening draw calls.
@@ -1499,41 +1499,46 @@
| adreno_dev->pix_shader_start;
}
- adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_PMODE,
- &cmd[0], cmds - cmd);
+ return adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_PMODE, &cmd[0], cmds - cmd);
}
-static void a2xx_drawctxt_save(struct adreno_device *adreno_dev,
+static int a2xx_drawctxt_save(struct adreno_device *adreno_dev,
struct adreno_context *context)
{
struct kgsl_device *device = &adreno_dev->dev;
+ int ret;
if (context == NULL || (context->flags & CTXT_FLAGS_BEING_DESTROYED))
- return;
+ return 0;
- if (context->flags & CTXT_FLAGS_GPU_HANG)
- KGSL_CTXT_WARN(device,
- "Current active context has caused gpu hang\n");
+ if (context->state == ADRENO_CONTEXT_STATE_INVALID)
+ return 0;
if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
kgsl_cffdump_syncmem(context->base.device, &context->gpustate,
context->reg_save[1],
context->reg_save[2] << 2, true);
/* save registers and constants. */
- adreno_ringbuffer_issuecmds(device, context,
+ ret = adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->reg_save, 3);
+ if (ret)
+ return ret;
+
if (context->flags & CTXT_FLAGS_SHADER_SAVE) {
kgsl_cffdump_syncmem(context->base.device,
&context->gpustate,
context->shader_save[1],
context->shader_save[2] << 2, true);
/* save shader partitioning and instructions. */
- adreno_ringbuffer_issuecmds(device, context,
+ ret = adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_PMODE,
context->shader_save, 3);
+ if (ret)
+ return ret;
kgsl_cffdump_syncmem(context->base.device,
&context->gpustate,
context->shader_fixup[1],
@@ -1542,10 +1547,13 @@
* fixup shader partitioning parameter for
* SET_SHADER_BASES.
*/
- adreno_ringbuffer_issuecmds(device, context,
+ ret = adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->shader_fixup, 3);
+ if (ret)
+ return ret;
+
context->flags |= CTXT_FLAGS_SHADER_RESTORE;
}
}
@@ -1558,32 +1566,41 @@
/* save gmem.
* (note: changes shader. shader must already be saved.)
*/
- adreno_ringbuffer_issuecmds(device, context,
+ ret = adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_PMODE,
context->context_gmem_shadow.gmem_save, 3);
+ if (ret)
+ return ret;
+
kgsl_cffdump_syncmem(context->base.device, &context->gpustate,
context->chicken_restore[1],
context->chicken_restore[2] << 2, true);
/* Restore TP0_CHICKEN */
if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
- adreno_ringbuffer_issuecmds(device, context,
+ ret = adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->chicken_restore, 3);
+
+ if (ret)
+ return ret;
}
adreno_dev->gpudev->ctx_switches_since_last_draw = 0;
context->flags |= CTXT_FLAGS_GMEM_RESTORE;
} else if (adreno_is_a2xx(adreno_dev))
- a2xx_drawctxt_draw_workaround(adreno_dev, context);
+ return a2xx_drawctxt_draw_workaround(adreno_dev, context);
+
+ return 0;
}
-static void a2xx_drawctxt_restore(struct adreno_device *adreno_dev,
+static int a2xx_drawctxt_restore(struct adreno_device *adreno_dev,
struct adreno_context *context)
{
struct kgsl_device *device = &adreno_dev->dev;
unsigned int cmds[5];
+ int ret = 0;
if (context == NULL) {
/* No context - set the default pagetable and thats it */
@@ -1598,7 +1615,7 @@
: KGSL_CONTEXT_INVALID;
kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable,
id);
- return;
+ return 0;
}
cmds[0] = cp_nop_packet(1);
@@ -1607,8 +1624,11 @@
cmds[3] = device->memstore.gpuaddr +
KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context);
cmds[4] = context->base.id;
- adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE,
+ ret = adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE,
cmds, 5);
+ if (ret)
+ return ret;
+
kgsl_mmu_setstate(&device->mmu, context->base.pagetable,
context->base.id);
@@ -1621,9 +1641,11 @@
context->context_gmem_shadow.gmem_restore[2] << 2,
true);
- adreno_ringbuffer_issuecmds(device, context,
+ ret = adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_PMODE,
context->context_gmem_shadow.gmem_restore, 3);
+ if (ret)
+ return ret;
if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
kgsl_cffdump_syncmem(context->base.device,
@@ -1632,9 +1654,11 @@
context->chicken_restore[2] << 2, true);
/* Restore TP0_CHICKEN */
- adreno_ringbuffer_issuecmds(device, context,
+ ret = adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->chicken_restore, 3);
+ if (ret)
+ return ret;
}
context->flags &= ~CTXT_FLAGS_GMEM_RESTORE;
@@ -1646,8 +1670,10 @@
context->reg_restore[2] << 2, true);
/* restore registers and constants. */
- adreno_ringbuffer_issuecmds(device, context,
+ ret = adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE, context->reg_restore, 3);
+ if (ret)
+ return ret;
/* restore shader instructions & partitioning. */
if (context->flags & CTXT_FLAGS_SHADER_RESTORE) {
@@ -1656,18 +1682,22 @@
context->shader_restore[1],
context->shader_restore[2] << 2, true);
- adreno_ringbuffer_issuecmds(device, context,
+ ret = adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->shader_restore, 3);
+ if (ret)
+ return ret;
}
}
if (adreno_is_a20x(adreno_dev)) {
cmds[0] = cp_type3_packet(CP_SET_BIN_BASE_OFFSET, 1);
cmds[1] = context->bin_base_offset;
- adreno_ringbuffer_issuecmds(device, context,
+ ret = adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE, cmds, 2);
}
+
+ return ret;
}
/*
@@ -1734,13 +1764,14 @@
if (!status) {
if (master_status & MASTER_INT_SIGNAL__CP_INT_STAT) {
- /* This indicates that we could not read CP_INT_STAT.
- * As a precaution just wake up processes so
- * they can check their timestamps. Since, we
- * did not ack any interrupts this interrupt will
- * be generated again */
+ /*
+ * This indicates that we could not read CP_INT_STAT.
+ * As a precaution schedule the dispatcher to check
+ * things out. Since we did not ack any interrupts this
+ * interrupt will be generated again
+ */
KGSL_DRV_WARN(device, "Unable to read CP_INT_STATUS\n");
- wake_up_interruptible_all(&device->wait_queue);
+ adreno_dispatcher_schedule(device);
} else
KGSL_DRV_WARN(device, "Spurious interrput detected\n");
return;
@@ -1766,7 +1797,7 @@
if (status & (CP_INT_CNTL__IB1_INT_MASK | CP_INT_CNTL__RB_INT_MASK)) {
queue_work(device->work_queue, &device->ts_expired_ws);
- wake_up_interruptible_all(&device->wait_queue);
+ adreno_dispatcher_schedule(device);
}
}
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index d96965c..8b75c4e 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -2382,32 +2382,38 @@
return ret;
}
-static void a3xx_drawctxt_save(struct adreno_device *adreno_dev,
+static int a3xx_drawctxt_save(struct adreno_device *adreno_dev,
struct adreno_context *context)
{
struct kgsl_device *device = &adreno_dev->dev;
+ int ret;
if (context == NULL || (context->flags & CTXT_FLAGS_BEING_DESTROYED))
- return;
+ return 0;
- if (context->flags & CTXT_FLAGS_GPU_HANG)
- KGSL_CTXT_WARN(device,
- "Current active context has caused gpu hang\n");
+ if (context->state == ADRENO_CONTEXT_STATE_INVALID)
+ return 0;
if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
/* Fixup self modifying IBs for save operations */
- adreno_ringbuffer_issuecmds(device, context,
+ ret = adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE, context->save_fixup, 3);
+ if (ret)
+ return ret;
/* save registers and constants. */
- adreno_ringbuffer_issuecmds(device, context,
+ ret = adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->regconstant_save, 3);
+ if (ret)
+ return ret;
if (context->flags & CTXT_FLAGS_SHADER_SAVE) {
/* Save shader instructions */
- adreno_ringbuffer_issuecmds(device, context,
+ ret = adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_PMODE, context->shader_save, 3);
+ if (ret)
+ return ret;
context->flags |= CTXT_FLAGS_SHADER_RESTORE;
}
@@ -2425,19 +2431,25 @@
context->context_gmem_shadow.gmem_save[1],
context->context_gmem_shadow.gmem_save[2] << 2, true);
- adreno_ringbuffer_issuecmds(device, context,
+ ret = adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_PMODE,
context->context_gmem_shadow.
gmem_save, 3);
+ if (ret)
+ return ret;
+
context->flags |= CTXT_FLAGS_GMEM_RESTORE;
}
+
+ return 0;
}
-static void a3xx_drawctxt_restore(struct adreno_device *adreno_dev,
+static int a3xx_drawctxt_restore(struct adreno_device *adreno_dev,
struct adreno_context *context)
{
struct kgsl_device *device = &adreno_dev->dev;
unsigned int cmds[5];
+ int ret = 0;
if (context == NULL) {
/* No context - set the default pagetable and thats it */
@@ -2452,7 +2464,7 @@
: KGSL_CONTEXT_INVALID;
kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable,
id);
- return;
+ return 0;
}
cmds[0] = cp_nop_packet(1);
@@ -2461,8 +2473,11 @@
cmds[3] = device->memstore.gpuaddr +
KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context);
cmds[4] = context->base.id;
- adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE,
+ ret = adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE,
cmds, 5);
+ if (ret)
+ return ret;
+
kgsl_mmu_setstate(&device->mmu, context->base.pagetable,
context->base.id);
@@ -2478,36 +2493,47 @@
context->context_gmem_shadow.gmem_restore[2] << 2,
true);
- adreno_ringbuffer_issuecmds(device, context,
+ ret = adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_PMODE,
context->context_gmem_shadow.
gmem_restore, 3);
+ if (ret)
+ return ret;
context->flags &= ~CTXT_FLAGS_GMEM_RESTORE;
}
if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
- adreno_ringbuffer_issuecmds(device, context,
+ ret = adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE, context->reg_restore, 3);
+ if (ret)
+ return ret;
/* Fixup self modifying IBs for restore operations */
- adreno_ringbuffer_issuecmds(device, context,
+ ret = adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->restore_fixup, 3);
+ if (ret)
+ return ret;
- adreno_ringbuffer_issuecmds(device, context,
+ ret = adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->constant_restore, 3);
+ if (ret)
+ return ret;
if (context->flags & CTXT_FLAGS_SHADER_RESTORE)
- adreno_ringbuffer_issuecmds(device, context,
+ ret = adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->shader_restore, 3);
-
+ if (ret)
+ return ret;
/* Restore HLSQ_CONTROL_0 register */
- adreno_ringbuffer_issuecmds(device, context,
+ ret = adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->hlsqcontrol_restore, 3);
}
+
+ return ret;
}
static int a3xx_rb_init(struct adreno_device *adreno_dev,
@@ -2621,11 +2647,8 @@
{
struct kgsl_device *device = &adreno_dev->dev;
- /* Wake up everybody waiting for the interrupt */
- wake_up_interruptible_all(&device->wait_queue);
-
- /* Schedule work to free mem and issue ibs */
queue_work(device->work_queue, &device->ts_expired_ws);
+ adreno_dispatcher_schedule(device);
}
/**
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
new file mode 100644
index 0000000..e429934
--- /dev/null
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -0,0 +1,1038 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/jiffies.h>
+
+#include "kgsl.h"
+#include "adreno.h"
+#include "adreno_ringbuffer.h"
+#include "adreno_trace.h"
+
+#define ADRENO_DISPATCHER_ACTIVE 0
+#define ADRENO_DISPATCHER_PAUSE 1
+
+#define CMDQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s))
+
+/* Number of commands that can be queued in a context before it sleeps */
+static unsigned int _context_cmdqueue_size = 50;
+
+/* Number of milliseconds to wait for the context queue to clear */
+static unsigned int _context_queue_wait = 10000;
+
+/* Number of command batches sent at a time from a single context */
+static unsigned int _context_cmdbatch_burst = 5;
+
+/* Number of command batches inflight in the ringbuffer at any time */
+static unsigned int _dispatcher_inflight = 15;
+
+/* Command batch timeout (in milliseconds) */
+static unsigned int _cmdbatch_timeout = 2000;
+
+/**
+ * adreno_dispatcher_get_cmdbatch() - Get a new command from a context queue
+ * @drawctxt: Pointer to the adreno draw context
+ *
+ * Dequeue a new command batch from the context list
+ */
+static inline struct kgsl_cmdbatch *adreno_dispatcher_get_cmdbatch(
+ struct adreno_context *drawctxt)
+{
+ struct kgsl_cmdbatch *cmdbatch = NULL;
+
+ mutex_lock(&drawctxt->mutex);
+ if (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {
+ cmdbatch = drawctxt->cmdqueue[drawctxt->cmdqueue_head];
+ drawctxt->cmdqueue_head =
+ CMDQUEUE_NEXT(drawctxt->cmdqueue_head,
+ ADRENO_CONTEXT_CMDQUEUE_SIZE);
+ drawctxt->queued--;
+ }
+
+ mutex_unlock(&drawctxt->mutex);
+
+ return cmdbatch;
+}
+
+/**
+ * adreno_dispatcher_requeue_cmdbatch() - Put a command back on the context
+ * queue
+ * @drawctxt: Pointer to the adreno draw context
+ * @cmdbatch: Pointer to the KGSL cmdbatch to requeue
+ *
+ * Failure to submit a command to the ringbuffer isn't the fault of the command
+ * being submitted so if a failure happens, push it back on the head of the the
+ * context queue to be reconsidered again
+ */
+static inline void adreno_dispatcher_requeue_cmdbatch(
+ struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch)
+{
+ unsigned int prev;
+ mutex_lock(&drawctxt->mutex);
+
+ if (kgsl_context_detached(&drawctxt->base) ||
+ drawctxt->state == ADRENO_CONTEXT_STATE_INVALID) {
+ mutex_unlock(&drawctxt->mutex);
+ return;
+ }
+
+ prev = drawctxt->cmdqueue_head - 1;
+
+ if (prev < 0)
+ prev = ADRENO_CONTEXT_CMDQUEUE_SIZE - 1;
+
+ /*
+ * The maximum queue size always needs to be one less then the size of
+ * the ringbuffer queue so there is "room" to put the cmdbatch back in
+ */
+
+ BUG_ON(prev == drawctxt->cmdqueue_tail);
+
+ drawctxt->cmdqueue[prev] = cmdbatch;
+ drawctxt->queued++;
+
+ /* Reset the command queue head to reflect the newly requeued change */
+ drawctxt->cmdqueue_head = prev;
+ mutex_unlock(&drawctxt->mutex);
+}
+
+/**
+ * dispatcher_queue_context() - Queue a context in the dispatcher pending list
+ * @dispatcher: Pointer to the adreno dispatcher struct
+ * @drawctxt: Pointer to the adreno draw context
+ *
+ * Add a context to the dispatcher pending list.
+ */
+static void dispatcher_queue_context(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+
+ spin_lock(&dispatcher->plist_lock);
+
+ if (plist_node_empty(&drawctxt->pending)) {
+ /* Get a reference to the context while it sits on the list */
+ _kgsl_context_get(&drawctxt->base);
+ trace_dispatch_queue_context(drawctxt);
+ plist_add(&drawctxt->pending, &dispatcher->pending);
+ }
+
+ spin_unlock(&dispatcher->plist_lock);
+}
+
+/**
+ * sendcmd() - Send a command batch to the GPU hardware
+ * @dispatcher: Pointer to the adreno dispatcher struct
+ * @cmdbatch: Pointer to the KGSL cmdbatch being sent
+ *
+ * Send a KGSL command batch to the GPU hardware
+ */
+static int sendcmd(struct adreno_device *adreno_dev,
+ struct kgsl_cmdbatch *cmdbatch)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+ int ret;
+
+ dispatcher->inflight++;
+
+ mutex_lock(&device->mutex);
+
+ if (dispatcher->inflight == 1) {
+ /* Time to make the donuts. Turn on the GPU */
+ ret = kgsl_active_count_get(device);
+ if (ret) {
+ dispatcher->inflight--;
+ mutex_unlock(&device->mutex);
+ return ret;
+ }
+ }
+
+ ret = adreno_ringbuffer_submitcmd(adreno_dev, cmdbatch);
+
+ /* Turn the GPU back off on failure. Sad face. */
+ if (ret && dispatcher->inflight == 1)
+ kgsl_active_count_put(device);
+
+ mutex_unlock(&device->mutex);
+
+ if (ret) {
+ dispatcher->inflight--;
+ KGSL_DRV_ERR(device,
+ "Unable to submit command to the ringbuffer\n");
+ return ret;
+ }
+
+ trace_adreno_cmdbatch_submitted(cmdbatch, dispatcher->inflight);
+
+ dispatcher->cmdqueue[dispatcher->tail] = cmdbatch;
+ dispatcher->tail = (dispatcher->tail + 1) %
+ ADRENO_DISPATCH_CMDQUEUE_SIZE;
+
+ /*
+ * If this is the first command in the pipe then the GPU will
+ * immediately start executing it so we can start the expiry timeout on
+ * the command batch here. Subsequent command batches will have their
+ * timer started when the previous command batch is retired
+ */
+ if (dispatcher->inflight == 1) {
+ cmdbatch->expires = jiffies +
+ msecs_to_jiffies(_cmdbatch_timeout);
+ mod_timer(&dispatcher->timer, cmdbatch->expires);
+ }
+
+ return 0;
+}
+
+/**
+ * dispatcher_context_sendcmds() - Send commands from a context to the GPU
+ * @adreno_dev: Pointer to the adreno device struct
+ * @drawctxt: Pointer to the adreno context to dispatch commands from
+ *
+ * Dequeue and send a burst of commands from the specified context to the GPU
+ */
+static int dispatcher_context_sendcmds(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+ int count = 0;
+
+ /*
+ * Each context can send a specific number of command batches per cycle
+ */
+ for ( ; count < _context_cmdbatch_burst &&
+ dispatcher->inflight < _dispatcher_inflight; count++) {
+ int ret;
+ struct kgsl_cmdbatch *cmdbatch =
+ adreno_dispatcher_get_cmdbatch(drawctxt);
+
+ if (cmdbatch == NULL)
+ break;
+
+ ret = sendcmd(adreno_dev, cmdbatch);
+
+ /*
+ * There are various reasons why we can't submit a command (no
+ * memory for the commands, full ringbuffer, etc) but none of
+ * these are actually the current command's fault. Requeue it
+ * back on the context and let it come back around again if
+ * conditions improve
+ */
+ if (ret) {
+ adreno_dispatcher_requeue_cmdbatch(drawctxt, cmdbatch);
+ break;
+ }
+ }
+
+ /*
+ * If the context successfully submitted commands, then
+ * unconditionally put it back on the queue to be considered the
+ * next time around. This might seem a little wasteful but it is
+ * reasonable to think that a busy context will stay busy.
+ */
+
+ if (count) {
+ dispatcher_queue_context(adreno_dev, drawctxt);
+
+ /*
+ * If we submitted something there will be room in the
+ * context queue so ping the context wait queue on the
+ * chance that the context is snoozing
+ */
+
+ wake_up_interruptible_all(&drawctxt->wq);
+ }
+
+ return count;
+}
+
+/**
+ * _adreno_dispatcher_issuecmds() - Issue commmands from pending contexts
+ * @adreno_dev: Pointer to the adreno device struct
+ *
+ * Issue as many commands as possible (up to inflight) from the pending contexts
+ * This function assumes the dispatcher mutex has been locked.
+ */
+static int _adreno_dispatcher_issuecmds(struct adreno_device *adreno_dev)
+{
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+
+ /* Don't do anything if the dispatcher is paused */
+ if (dispatcher->state != ADRENO_DISPATCHER_ACTIVE)
+ return 0;
+
+ while (dispatcher->inflight < _dispatcher_inflight) {
+ struct adreno_context *drawctxt = NULL;
+
+ spin_lock(&dispatcher->plist_lock);
+
+ if (!plist_head_empty(&dispatcher->pending)) {
+ drawctxt = plist_first_entry(&dispatcher->pending,
+ struct adreno_context, pending);
+
+ plist_del(&drawctxt->pending, &dispatcher->pending);
+ }
+
+ spin_unlock(&dispatcher->plist_lock);
+
+ if (drawctxt == NULL)
+ break;
+
+ if (kgsl_context_detached(&drawctxt->base) ||
+ drawctxt->state == ADRENO_CONTEXT_STATE_INVALID) {
+ kgsl_context_put(&drawctxt->base);
+ continue;
+ }
+
+ dispatcher_context_sendcmds(adreno_dev, drawctxt);
+ kgsl_context_put(&drawctxt->base);
+ }
+
+ return 0;
+}
+
+/**
+ * adreno_dispatcher_issuecmds() - Issue commmands from pending contexts
+ * @adreno_dev: Pointer to the adreno device struct
+ *
+ * Lock the dispatcher and call _adreno_dispatcher_issueibcmds
+ */
+int adreno_dispatcher_issuecmds(struct adreno_device *adreno_dev)
+{
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+ int ret;
+
+ mutex_lock(&dispatcher->mutex);
+ ret = _adreno_dispatcher_issuecmds(adreno_dev);
+ mutex_unlock(&dispatcher->mutex);
+
+ return ret;
+}
+
+static int _check_context_queue(struct adreno_context *drawctxt)
+{
+ int ret;
+
+ mutex_lock(&drawctxt->mutex);
+
+ /*
+ * Wake up if there is room in the context or if the whole thing got
+ * invalidated while we were asleep
+ */
+
+ if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID)
+ ret = 1;
+ else
+ ret = drawctxt->queued < _context_cmdqueue_size ? 1 : 0;
+
+ mutex_unlock(&drawctxt->mutex);
+
+ return ret;
+}
+
+/**
+ * adreno_dispatcher_replay() - Replay commands from the dispatcher queue
+ * @adreno_dev: Pointer to the adreno device struct
+ *
+ * Replay the commands from the dispatcher inflight queue. This is called after
+ * a power down/up to recover from a fault
+ */
+int adreno_dispatcher_replay(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+ struct kgsl_cmdbatch **replay;
+ int i, ptr, count = 0;
+
+ BUG_ON(!mutex_is_locked(&dispatcher->mutex));
+
+ replay = kzalloc(sizeof(*replay) * dispatcher->inflight, GFP_KERNEL);
+
+ /*
+ * If we can't allocate enough memory for the replay commands then we
+ * are in a bad way. Invalidate everything, reset the GPU and see ya
+ * later alligator
+ */
+
+ if (replay == NULL) {
+
+ ptr = dispatcher->head;
+
+ while (ptr != dispatcher->tail) {
+ struct kgsl_context *context =
+ dispatcher->cmdqueue[ptr]->context;
+
+ adreno_drawctxt_invalidate(device, context);
+ ptr = CMDQUEUE_NEXT(ptr, ADRENO_DISPATCH_CMDQUEUE_SIZE);
+ }
+
+ /* Reset the dispatcher queue */
+ dispatcher->inflight = 0;
+ dispatcher->head = dispatcher->tail = 0;
+
+ /* Reset the hardware */
+ mutex_lock(&device->mutex);
+
+ /*
+ * If adreno_reset fails then the GPU is not alive and there
+ * isn't anything we can do to recover at this point
+ */
+
+ BUG_ON(adreno_reset(device));
+ mutex_unlock(&device->mutex);
+
+ return 0;
+ }
+
+ ptr = dispatcher->head;
+
+ while (ptr != dispatcher->tail) {
+ struct kgsl_cmdbatch *cmdbatch = dispatcher->cmdqueue[ptr];
+ struct adreno_context *drawctxt =
+ ADRENO_CONTEXT(cmdbatch->context);
+
+ if (cmdbatch->invalid)
+ adreno_drawctxt_invalidate(device, cmdbatch->context);
+
+ if (!kgsl_context_detached(cmdbatch->context) &&
+ drawctxt->state == ADRENO_CONTEXT_STATE_ACTIVE) {
+ /*
+ * The context for the command batch is still valid -
+ * add it to the replay list
+ */
+ replay[count++] = dispatcher->cmdqueue[ptr];
+ } else {
+ /*
+ * Skip over invaliated or detached contexts - cancel
+ * any pending events for the timestamp and destroy the
+ * command batch
+ */
+ mutex_lock(&device->mutex);
+ kgsl_cancel_events_timestamp(device, cmdbatch->context,
+ cmdbatch->timestamp);
+ mutex_unlock(&device->mutex);
+
+ kgsl_cmdbatch_destroy(cmdbatch);
+ }
+
+ ptr = CMDQUEUE_NEXT(ptr, ADRENO_DISPATCH_CMDQUEUE_SIZE);
+ }
+
+ /* Reset the dispatcher queue */
+ dispatcher->inflight = 0;
+ dispatcher->head = dispatcher->tail = 0;
+
+ mutex_lock(&device->mutex);
+ BUG_ON(adreno_reset(device));
+ mutex_unlock(&device->mutex);
+
+ /* Replay the pending command buffers */
+ for (i = 0; i < count; i++) {
+ int ret = sendcmd(adreno_dev, replay[i]);
+
+ /*
+ * I'm afraid that if we get an error during replay we
+ * are not going to space today
+ */
+
+ BUG_ON(ret);
+ }
+
+ /*
+ * active_count will be set when we come into this function because
+ * there were inflight commands. By virtue of setting ->inflight back
+ * to 0 sendcmd() will increase the active count again on the first
+ * submission. This active_count_put is needed to put the universe back
+ * in balance and as a bonus it ensures that the hardware stays up for
+ * the entire reset process
+ */
+ mutex_lock(&device->mutex);
+ kgsl_active_count_put(device);
+ mutex_unlock(&device->mutex);
+
+ kfree(replay);
+ return 0;
+}
+
+/**
+ * adreno_dispatcher_queue_cmd() - Queue a new command in the context
+ * @adreno_dev: Pointer to the adreno device struct
+ * @drawctxt: Pointer to the adreno draw context
+ * @cmdbatch: Pointer to the command batch being submitted
+ * @timestamp: Pointer to the requested timestamp
+ *
+ * Queue a command in the context - if there isn't any room in the queue, then
+ * block until there is
+ */
+int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch,
+ uint32_t *timestamp)
+{
+ int ret;
+
+ mutex_lock(&drawctxt->mutex);
+
+ if (drawctxt->flags & CTXT_FLAGS_BEING_DESTROYED) {
+ mutex_unlock(&drawctxt->mutex);
+ return -EINVAL;
+ }
+
+ /* Wait for room in the context queue */
+
+ while (drawctxt->queued >= _context_cmdqueue_size) {
+ trace_adreno_drawctxt_sleep(drawctxt);
+ mutex_unlock(&drawctxt->mutex);
+
+ ret = wait_event_interruptible_timeout(drawctxt->wq,
+ _check_context_queue(drawctxt),
+ msecs_to_jiffies(_context_queue_wait));
+
+ mutex_lock(&drawctxt->mutex);
+ trace_adreno_drawctxt_wake(drawctxt);
+
+ if (ret <= 0) {
+ mutex_unlock(&drawctxt->mutex);
+ return (ret == 0) ? -ETIMEDOUT : (int) ret;
+ }
+
+ /*
+ * Account for the possiblity that the context got invalidated
+ * while we were sleeping
+ */
+
+ if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID) {
+ mutex_unlock(&drawctxt->mutex);
+ return -EDEADLK;
+ }
+ }
+
+ /*
+ * If the UMD specified a timestamp then use that under the condition
+ * that it is greater then the last queued timestamp in the context.
+ */
+
+ if (drawctxt->flags & CTXT_FLAGS_USER_GENERATED_TS) {
+ if (timestamp_cmp(drawctxt->timestamp, *timestamp) >= 0) {
+ mutex_unlock(&drawctxt->mutex);
+ return -ERANGE;
+ }
+
+ drawctxt->timestamp = *timestamp;
+ } else
+ drawctxt->timestamp++;
+
+ cmdbatch->timestamp = drawctxt->timestamp;
+ *timestamp = drawctxt->timestamp;
+
+ /* Put the command into the queue */
+ drawctxt->cmdqueue[drawctxt->cmdqueue_tail] = cmdbatch;
+ drawctxt->cmdqueue_tail = (drawctxt->cmdqueue_tail + 1) %
+ ADRENO_CONTEXT_CMDQUEUE_SIZE;
+
+ drawctxt->queued++;
+ trace_adreno_cmdbatch_queued(cmdbatch, drawctxt->queued);
+
+
+ mutex_unlock(&drawctxt->mutex);
+
+ /* Add the context to the dispatcher pending list */
+ dispatcher_queue_context(adreno_dev, drawctxt);
+
+ /*
+ * Only issue commands if inflight is less than burst -this prevents us
+ * from sitting around waiting for the mutex on a busy system - the work
+ * loop will schedule it for us. Inflight is mutex protected but the
+ * worse that can happen is that it will go to 0 after we check and if
+ * it goes to 0 it is because the work loop decremented it and the work
+ * queue will try to schedule new commands anyway.
+ */
+
+ if (adreno_dev->dispatcher.inflight < _context_cmdbatch_burst)
+ adreno_dispatcher_issuecmds(adreno_dev);
+
+ return 0;
+}
+
+/**
+ * dispatcher_do_fault() - Handle a GPU fault and reset the GPU
+ * @device: Pointer to the KGSL device
+ * @cmdbatch: Pointer to the command batch believed to be responsible for the
+ * fault
+ * @invalidate: Non zero if the current command should be invalidated
+ *
+ * Trigger a fault in the dispatcher and start the replay process
+ */
+static void dispatcher_do_fault(struct kgsl_device *device,
+ struct kgsl_cmdbatch *cmdbatch, int invalidate)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+ unsigned int reg;
+
+ /* Stop the timers */
+ del_timer_sync(&dispatcher->timer);
+
+ mutex_lock(&device->mutex);
+
+ /*
+ * There is an interesting race condition here - when a command batch
+ * expires and we invaliate before we recover we run the risk of having
+ * the UMD clean up the context and free memory that the GPU is still
+ * using. Not that it is dangerous because we are a few microseconds
+ * away from resetting, but it still ends up in pagefaults and log
+ * messages and so on and so forth. To avoid this we mark the command
+ * batch itself as invalid and then reset - the context will get
+ * invalidated in the replay.
+ */
+
+ if (invalidate)
+ cmdbatch->invalid = 1;
+
+ /*
+ * Stop the CP in its tracks - this ensures that we don't get activity
+ * while we are trying to dump the state of the system
+ */
+
+
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_ME_CNTL, ®);
+ reg |= (1 << 27) | (1 << 28);
+ adreno_writereg(adreno_dev, ADRENO_REG_CP_ME_CNTL, reg);
+
+ kgsl_postmortem_dump(device, 0);
+ kgsl_device_snapshot(device, 1);
+ mutex_unlock(&device->mutex);
+
+ /* If we can't replay then bravely run away and die */
+ if (adreno_dispatcher_replay(adreno_dev))
+ BUG();
+}
+
+static inline int cmdbatch_consumed(struct kgsl_cmdbatch *cmdbatch,
+ unsigned int consumed, unsigned int retired)
+{
+ return ((timestamp_cmp(cmdbatch->timestamp, consumed) >= 0) &&
+ (timestamp_cmp(retired, cmdbatch->timestamp) < 0));
+}
+
+/**
+ * adreno_dispatcher_work() - Master work handler for the dispatcher
+ * @work: Pointer to the work struct for the current work queue
+ *
+ * Process expired commands and send new ones.
+ */
+static void adreno_dispatcher_work(struct work_struct *work)
+{
+ struct adreno_dispatcher *dispatcher =
+ container_of(work, struct adreno_dispatcher, work);
+ struct adreno_device *adreno_dev =
+ container_of(dispatcher, struct adreno_device, dispatcher);
+ struct kgsl_device *device = &adreno_dev->dev;
+ int inv, count = 0;
+
+ mutex_lock(&dispatcher->mutex);
+
+ while (dispatcher->head != dispatcher->tail) {
+ uint32_t consumed, retired = 0;
+ struct kgsl_cmdbatch *cmdbatch =
+ dispatcher->cmdqueue[dispatcher->head];
+ struct adreno_context *drawctxt;
+ BUG_ON(cmdbatch == NULL);
+
+ drawctxt = ADRENO_CONTEXT(cmdbatch->context);
+
+ /*
+ * First try to expire the timestamp. This happens if the
+ * context is valid and the timestamp expired normally or if the
+ * context was destroyed before the command batch was finished
+ * in the GPU. Either way retire the command batch advance the
+ * pointers and continue processing the queue
+ */
+
+ if (!kgsl_context_detached(cmdbatch->context))
+ retired = kgsl_readtimestamp(device, cmdbatch->context,
+ KGSL_TIMESTAMP_RETIRED);
+
+ if (kgsl_context_detached(cmdbatch->context) ||
+ (timestamp_cmp(cmdbatch->timestamp, retired) <= 0)) {
+
+ trace_adreno_cmdbatch_retired(cmdbatch,
+ dispatcher->inflight - 1);
+
+ /* Reduce the number of inflight command batches */
+ dispatcher->inflight--;
+
+ /* Zero the old entry*/
+ dispatcher->cmdqueue[dispatcher->head] = NULL;
+
+ /* Advance the buffer head */
+ dispatcher->head = CMDQUEUE_NEXT(dispatcher->head,
+ ADRENO_DISPATCH_CMDQUEUE_SIZE);
+
+ /* Destroy the retired command batch */
+ kgsl_cmdbatch_destroy(cmdbatch);
+
+ /* Update the expire time for the next command batch */
+
+ if (dispatcher->inflight > 0) {
+ cmdbatch =
+ dispatcher->cmdqueue[dispatcher->head];
+ cmdbatch->expires = jiffies +
+ msecs_to_jiffies(_cmdbatch_timeout);
+ }
+
+ count++;
+
+ BUG_ON(dispatcher->inflight == 0 && dispatcher->fault);
+ continue;
+ }
+
+ /*
+ * If we got a fault from the interrupt handler, this command
+ * is to blame. Invalidate it, reset and replay
+ */
+
+ if (dispatcher->fault) {
+ dispatcher_do_fault(device, cmdbatch, 1);
+ goto done;
+ }
+
+ /* Get the last consumed timestamp */
+ consumed = kgsl_readtimestamp(device, cmdbatch->context,
+ KGSL_TIMESTAMP_CONSUMED);
+
+ /* Break here if fault detection is disabled for the context */
+ if (drawctxt->flags & CTXT_FLAGS_NO_FAULT_TOLERANCE)
+ break;
+
+ /*
+ * The last line of defense is to check if the command batch has
+ * timed out. If we get this far but the timeout hasn't expired
+ * yet then the GPU is still ticking away
+ */
+
+ if (time_is_after_jiffies(cmdbatch->expires))
+ break;
+
+ /* Boom goes the dynamite */
+
+ pr_err("-----------------------\n");
+
+ pr_err("dispatcher: expired ctx=%d ts=%d consumed=%d retired=%d\n",
+ cmdbatch->context->id, cmdbatch->timestamp, consumed,
+ retired);
+ pr_err("dispatcher: jiffies=%lu expired=%lu\n", jiffies,
+ cmdbatch->expires);
+
+ /*
+ * If execution stopped after the current command batch was
+ * consumed then invalidate the context for the current command
+ * batch
+ */
+
+ inv = cmdbatch_consumed(cmdbatch, consumed, retired);
+
+ dispatcher_do_fault(device, cmdbatch, inv);
+ break;
+ }
+
+ /*
+ * Decrement the active count to 0 - this will allow the system to go
+ * into suspend even if there are queued command batches
+ */
+
+ if (count && dispatcher->inflight == 0) {
+ mutex_lock(&device->mutex);
+ kgsl_active_count_put(device);
+ mutex_unlock(&device->mutex);
+ }
+
+ /* Dispatch new commands if we have the room */
+ if (dispatcher->inflight < _dispatcher_inflight)
+ _adreno_dispatcher_issuecmds(adreno_dev);
+
+done:
+ /* Either update the timer for the next command batch or disable it */
+ if (dispatcher->inflight) {
+ struct kgsl_cmdbatch *cmdbatch
+ = dispatcher->cmdqueue[dispatcher->head];
+
+ mod_timer(&dispatcher->timer, cmdbatch->expires);
+ } else
+ del_timer_sync(&dispatcher->timer);
+
+ /* Before leaving update the pwrscale information */
+ mutex_lock(&device->mutex);
+ kgsl_pwrscale_idle(device);
+ mutex_unlock(&device->mutex);
+
+ mutex_unlock(&dispatcher->mutex);
+}
+
+void adreno_dispatcher_schedule(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+
+ queue_work(device->work_queue, &dispatcher->work);
+}
+
+/*
+ * This is called when the timer expires - it either means the GPU is hung or
+ * the IB is taking too long to execute
+ */
+void adreno_dispatcher_timer(unsigned long data)
+{
+ struct adreno_device *adreno_dev = (struct adreno_device *) data;
+ struct kgsl_device *device = &adreno_dev->dev;
+
+ adreno_dispatcher_schedule(device);
+}
+/**
+ * adreno_dispatcher_fault_irq() - Trigger a fault in the dispatcher
+ * @device: Pointer to the KGSL device
+ *
+ * Called from an interrupt context this will trigger a fault in the
+ * dispatcher
+ */
+void adreno_dispatcher_fault_irq(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+
+ dispatcher->fault = 1;
+ adreno_dispatcher_schedule(device);
+}
+
+/**
+ * adreno_dispatcher_pause() - stop the dispatcher
+ * @adreno_dev: pointer to the adreno device structure
+ *
+ * Pause the dispather so it doesn't accept any new commands
+ */
+void adreno_dispatcher_pause(struct adreno_device *adreno_dev)
+{
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+
+ /*
+ * This will probably get called while holding other mutexes so don't
+ * take the dispatcher mutex. The biggest penalty is that another
+ * command might be submitted while we are in here but thats okay
+ * because whoever is waiting for the drain will just have another
+ * command batch to wait for
+ */
+
+ dispatcher->state = ADRENO_DISPATCHER_PAUSE;
+}
+
+/**
+ * adreno_dispatcher_start() - activate the dispatcher
+ * @adreno_dev: pointer to the adreno device structure
+ *
+ * Set the disaptcher active and start the loop once to get things going
+ */
+void adreno_dispatcher_start(struct adreno_device *adreno_dev)
+{
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+
+ dispatcher->state = ADRENO_DISPATCHER_ACTIVE;
+
+ /* Schedule the work loop to get things going */
+ adreno_dispatcher_schedule(&adreno_dev->dev);
+}
+
+/**
+ * adreno_dispatcher_stop() - stop the dispatcher
+ * @adreno_dev: pointer to the adreno device structure
+ *
+ * Stop the dispatcher and close all the timers
+ */
+void adreno_dispatcher_stop(struct adreno_device *adreno_dev)
+{
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+
+ del_timer_sync(&dispatcher->timer);
+ dispatcher->state = ADRENO_DISPATCHER_PAUSE;
+}
+
+/**
+ * adreno_dispatcher_close() - close the dispatcher
+ * @adreno_dev: pointer to the adreno device structure
+ *
+ * Close the dispatcher and free all the oustanding commands and memory
+ */
+void adreno_dispatcher_close(struct adreno_device *adreno_dev)
+{
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+
+ mutex_lock(&dispatcher->mutex);
+ del_timer_sync(&dispatcher->timer);
+
+ while (dispatcher->head != dispatcher->tail) {
+ kgsl_cmdbatch_destroy(dispatcher->cmdqueue[dispatcher->head]);
+ dispatcher->head = (dispatcher->head + 1)
+ % ADRENO_DISPATCH_CMDQUEUE_SIZE;
+ }
+
+ mutex_unlock(&dispatcher->mutex);
+
+ kobject_put(&dispatcher->kobj);
+}
+
+struct dispatcher_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct adreno_dispatcher *,
+ struct dispatcher_attribute *, char *);
+ ssize_t (*store)(struct adreno_dispatcher *,
+ struct dispatcher_attribute *, const char *buf,
+ size_t count);
+ unsigned int max;
+ unsigned int *value;
+};
+
+#define DISPATCHER_UINT_ATTR(_name, _mode, _max, _value) \
+ struct dispatcher_attribute dispatcher_attr_##_name = { \
+ .attr = { .name = __stringify(_name), .mode = _mode }, \
+ .show = _show_uint, \
+ .store = _store_uint, \
+ .max = _max, \
+ .value = &(_value), \
+ }
+
+#define to_dispatcher_attr(_a) \
+ container_of((_a), struct dispatcher_attribute, attr)
+#define to_dispatcher(k) container_of(k, struct adreno_dispatcher, kobj)
+
+static ssize_t _store_uint(struct adreno_dispatcher *dispatcher,
+ struct dispatcher_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ int ret = kstrtoul(buf, 0, &val);
+
+ if (ret)
+ return ret;
+
+ if (!val || (attr->max && (val > attr->max)))
+ return -EINVAL;
+
+ *((unsigned int *) attr->value) = val;
+ return size;
+}
+
+static ssize_t _show_uint(struct adreno_dispatcher *dispatcher,
+ struct dispatcher_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ *((unsigned int *) attr->value));
+}
+
+static DISPATCHER_UINT_ATTR(inflight, 0644, ADRENO_DISPATCH_CMDQUEUE_SIZE,
+ _dispatcher_inflight);
+/*
+ * Our code that "puts back" a command from the context is much cleaner
+ * if we are sure that there will always be enough room in the
+ * ringbuffer so restrict the maximum size of the context queue to
+ * ADRENO_CONTEXT_CMDQUEUE_SIZE - 1
+ */
+static DISPATCHER_UINT_ATTR(context_cmdqueue_size, 0644,
+ ADRENO_CONTEXT_CMDQUEUE_SIZE - 1, _context_cmdqueue_size);
+static DISPATCHER_UINT_ATTR(context_burst_count, 0644, 0,
+ _context_cmdbatch_burst);
+static DISPATCHER_UINT_ATTR(cmdbatch_timeout, 0644, 0, _cmdbatch_timeout);
+static DISPATCHER_UINT_ATTR(context_queue_wait, 0644, 0, _context_queue_wait);
+
+static struct attribute *dispatcher_attrs[] = {
+ &dispatcher_attr_inflight.attr,
+ &dispatcher_attr_context_cmdqueue_size.attr,
+ &dispatcher_attr_context_burst_count.attr,
+ &dispatcher_attr_cmdbatch_timeout.attr,
+ &dispatcher_attr_context_queue_wait.attr,
+ NULL,
+};
+
+static ssize_t dispatcher_sysfs_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct adreno_dispatcher *dispatcher = to_dispatcher(kobj);
+ struct dispatcher_attribute *pattr = to_dispatcher_attr(attr);
+ ssize_t ret = -EIO;
+
+ if (pattr->show)
+ ret = pattr->show(dispatcher, pattr, buf);
+
+ return ret;
+}
+
+static ssize_t dispatcher_sysfs_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct adreno_dispatcher *dispatcher = to_dispatcher(kobj);
+ struct dispatcher_attribute *pattr = to_dispatcher_attr(attr);
+ ssize_t ret = -EIO;
+
+ if (pattr->store)
+ ret = pattr->store(dispatcher, pattr, buf, count);
+
+ return ret;
+}
+
+static void dispatcher_sysfs_release(struct kobject *kobj)
+{
+}
+
+static const struct sysfs_ops dispatcher_sysfs_ops = {
+ .show = dispatcher_sysfs_show,
+ .store = dispatcher_sysfs_store
+};
+
+static struct kobj_type ktype_dispatcher = {
+ .sysfs_ops = &dispatcher_sysfs_ops,
+ .default_attrs = dispatcher_attrs,
+ .release = dispatcher_sysfs_release
+};
+
+/**
+ * adreno_dispatcher_init() - Initialize the dispatcher
+ * @adreno_dev: pointer to the adreno device structure
+ *
+ * Initialize the dispatcher
+ */
+int adreno_dispatcher_init(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+ int ret;
+
+ memset(dispatcher, 0, sizeof(*dispatcher));
+
+ mutex_init(&dispatcher->mutex);
+
+ setup_timer(&dispatcher->timer, adreno_dispatcher_timer,
+ (unsigned long) adreno_dev);
+
+ INIT_WORK(&dispatcher->work, adreno_dispatcher_work);
+
+ plist_head_init(&dispatcher->pending);
+ spin_lock_init(&dispatcher->plist_lock);
+
+ dispatcher->state = ADRENO_DISPATCHER_ACTIVE;
+
+ ret = kobject_init_and_add(&dispatcher->kobj, &ktype_dispatcher,
+ &device->dev->kobj, "dispatch");
+
+ return ret;
+}
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index 0af4c12e..1a4310e 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -13,10 +13,12 @@
#include <linux/slab.h>
#include <linux/msm_kgsl.h>
+#include <linux/sched.h>
#include "kgsl.h"
#include "kgsl_sharedmem.h"
#include "adreno.h"
+#include "adreno_trace.h"
#define KGSL_INIT_REFTIMESTAMP 0x7FFFFFFF
@@ -132,6 +134,247 @@
*incmd = cmd;
}
+static void wait_callback(struct kgsl_device *device, void *priv, u32 id,
+ u32 timestamp, u32 type)
+{
+ struct adreno_context *drawctxt = priv;
+ wake_up_interruptible_all(&drawctxt->waiting);
+}
+
+#define adreno_wait_event_interruptible_timeout(wq, condition, timeout, io) \
+({ \
+ long __ret = timeout; \
+ if (io) \
+ __wait_io_event_interruptible_timeout(wq, condition, __ret); \
+ else \
+ __wait_event_interruptible_timeout(wq, condition, __ret); \
+ __ret; \
+})
+
+#define adreno_wait_event_interruptible(wq, condition, io) \
+({ \
+ long __ret; \
+ if (io) \
+ __wait_io_event_interruptible(wq, condition, __ret); \
+ else \
+ __wait_event_interruptible(wq, condition, __ret); \
+ __ret; \
+})
+
+static int _check_context_timestamp(struct kgsl_device *device,
+ struct adreno_context *drawctxt, unsigned int timestamp)
+{
+ int ret = 0;
+
+ /* Bail if the drawctxt has been invalidated or destroyed */
+ if (kgsl_context_detached(&drawctxt->base) ||
+ drawctxt->state != ADRENO_CONTEXT_STATE_ACTIVE)
+ return 1;
+
+ mutex_lock(&device->mutex);
+ ret = kgsl_check_timestamp(device, &drawctxt->base, timestamp);
+ mutex_unlock(&device->mutex);
+
+ return ret;
+}
+
+/**
+ * adreno_drawctxt_wait() - sleep until a timestamp expires
+ * @adreno_dev: pointer to the adreno_device struct
+ * @drawctxt: Pointer to the draw context to sleep for
+ * @timetamp: Timestamp to wait on
+ * @timeout: Number of jiffies to wait (0 for infinite)
+ *
+ * Register an event to wait for a timestamp on a context and sleep until it
+ * has past. Returns < 0 on error, -ETIMEDOUT if the timeout expires or 0
+ * on success
+ */
+int adreno_drawctxt_wait(struct adreno_device *adreno_dev,
+ struct kgsl_context *context,
+ uint32_t timestamp, unsigned int timeout)
+{
+ static unsigned int io_cnt;
+ struct kgsl_device *device = &adreno_dev->dev;
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
+ int ret, io;
+
+ if (kgsl_context_detached(context))
+ return -EINVAL;
+
+ if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID)
+ return -EDEADLK;
+
+ /* Needs to hold the device mutex */
+ BUG_ON(!mutex_is_locked(&device->mutex));
+
+ trace_adreno_drawctxt_wait_start(context->id, timestamp);
+
+ ret = kgsl_add_event(device, context->id, timestamp,
+ wait_callback, drawctxt, NULL);
+ if (ret)
+ goto done;
+
+ /*
+ * For proper power accounting sometimes we need to call
+ * io_wait_interruptible_timeout and sometimes we need to call
+ * plain old wait_interruptible_timeout. We call the regular
+ * timeout N times out of 100, where N is a number specified by
+ * the current power level
+ */
+
+ io_cnt = (io_cnt + 1) % 100;
+ io = (io_cnt < pwr->pwrlevels[pwr->active_pwrlevel].io_fraction)
+ ? 0 : 1;
+
+ mutex_unlock(&device->mutex);
+
+ if (timeout) {
+ ret = (int) adreno_wait_event_interruptible_timeout(
+ drawctxt->waiting,
+ _check_context_timestamp(device, drawctxt, timestamp),
+ msecs_to_jiffies(timeout), io);
+
+ if (ret == 0)
+ ret = -ETIMEDOUT;
+ else if (ret > 0)
+ ret = 0;
+ } else {
+ ret = (int) adreno_wait_event_interruptible(drawctxt->waiting,
+ _check_context_timestamp(device, drawctxt, timestamp),
+ io);
+ }
+
+ mutex_lock(&device->mutex);
+
+ /* -EDEADLK if the context was invalidated while we were waiting */
+ if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID)
+ ret = -EDEADLK;
+
+
+ /* Return -EINVAL if the context was detached while we were waiting */
+ if (kgsl_context_detached(context))
+ ret = -EINVAL;
+
+done:
+ trace_adreno_drawctxt_wait_done(context->id, timestamp, ret);
+ return ret;
+}
+
+static void global_wait_callback(struct kgsl_device *device, void *priv, u32 id,
+ u32 timestamp, u32 type)
+{
+ struct adreno_context *drawctxt = priv;
+
+ wake_up_interruptible_all(&drawctxt->waiting);
+ kgsl_context_put(&drawctxt->base);
+}
+
+static int _check_global_timestamp(struct kgsl_device *device,
+ unsigned int timestamp)
+{
+ int ret;
+
+ mutex_lock(&device->mutex);
+ ret = kgsl_check_timestamp(device, NULL, timestamp);
+ mutex_unlock(&device->mutex);
+
+ return ret;
+}
+
+int adreno_drawctxt_wait_global(struct adreno_device *adreno_dev,
+ struct kgsl_context *context,
+ uint32_t timestamp, unsigned int timeout)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
+ int ret;
+
+ /* Needs to hold the device mutex */
+ BUG_ON(!mutex_is_locked(&device->mutex));
+
+ _kgsl_context_get(context);
+
+ trace_adreno_drawctxt_wait_start(KGSL_MEMSTORE_GLOBAL, timestamp);
+
+ ret = kgsl_add_event(device, KGSL_MEMSTORE_GLOBAL, timestamp,
+ global_wait_callback, drawctxt, NULL);
+ if (ret) {
+ kgsl_context_put(context);
+ goto done;
+ }
+
+ mutex_unlock(&device->mutex);
+
+ if (timeout) {
+ ret = (int) wait_event_interruptible_timeout(drawctxt->waiting,
+ _check_global_timestamp(device, timestamp),
+ msecs_to_jiffies(timeout));
+
+ if (ret == 0)
+ ret = -ETIMEDOUT;
+ else if (ret > 0)
+ ret = 0;
+ } else {
+ ret = (int) wait_event_interruptible(drawctxt->waiting,
+ _check_global_timestamp(device, timestamp));
+ }
+
+ mutex_lock(&device->mutex);
+
+ if (ret)
+ kgsl_cancel_events_timestamp(device, NULL, timestamp);
+
+done:
+ trace_adreno_drawctxt_wait_done(KGSL_MEMSTORE_GLOBAL, timestamp, ret);
+ return ret;
+}
+
+/**
+ * adreno_drawctxt_invalidate() - Invalidate an adreno draw context
+ * @device: Pointer to the KGSL device structure for the GPU
+ * @context: Pointer to the KGSL context structure
+ *
+ * Invalidate the context and remove all queued commands and cancel any pending
+ * waiters
+ */
+void adreno_drawctxt_invalidate(struct kgsl_device *device,
+ struct kgsl_context *context)
+{
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
+
+ trace_adreno_drawctxt_invalidate(drawctxt);
+
+ drawctxt->state = ADRENO_CONTEXT_STATE_INVALID;
+
+ /* Clear the pending queue */
+ mutex_lock(&drawctxt->mutex);
+
+ while (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {
+ struct kgsl_cmdbatch *cmdbatch =
+ drawctxt->cmdqueue[drawctxt->cmdqueue_head];
+
+ drawctxt->cmdqueue_head = (drawctxt->cmdqueue_head + 1) %
+ ADRENO_CONTEXT_CMDQUEUE_SIZE;
+
+ mutex_unlock(&drawctxt->mutex);
+
+ mutex_lock(&device->mutex);
+ kgsl_cancel_events_timestamp(device, context,
+ cmdbatch->timestamp);
+ mutex_unlock(&device->mutex);
+
+ kgsl_cmdbatch_destroy(cmdbatch);
+ mutex_lock(&drawctxt->mutex);
+ }
+
+ mutex_unlock(&drawctxt->mutex);
+
+ /* Give the bad news to everybody waiting around */
+ wake_up_interruptible_all(&drawctxt->waiting);
+ wake_up_interruptible_all(&drawctxt->wq);
+}
+
/**
* adreno_drawctxt_create - create a new adreno draw context
* @dev_priv: the owner of the context
@@ -149,6 +392,7 @@
int ret;
drawctxt = kzalloc(sizeof(struct adreno_context), GFP_KERNEL);
+
if (drawctxt == NULL)
return ERR_PTR(-ENOMEM);
@@ -168,22 +412,30 @@
KGSL_CONTEXT_NO_FAULT_TOLERANCE |
KGSL_CONTEXT_TYPE_MASK);
+ /* Always enable per-context timestamps */
+ *flags |= KGSL_CONTEXT_PER_CONTEXT_TS;
+ drawctxt->flags |= CTXT_FLAGS_PER_CONTEXT_TS;
+
if (*flags & KGSL_CONTEXT_PREAMBLE)
drawctxt->flags |= CTXT_FLAGS_PREAMBLE;
if (*flags & KGSL_CONTEXT_NO_GMEM_ALLOC)
drawctxt->flags |= CTXT_FLAGS_NOGMEMALLOC;
- if (*flags & KGSL_CONTEXT_PER_CONTEXT_TS)
- drawctxt->flags |= CTXT_FLAGS_PER_CONTEXT_TS;
-
- if (*flags & KGSL_CONTEXT_USER_GENERATED_TS) {
- if (!(*flags & KGSL_CONTEXT_PER_CONTEXT_TS)) {
- ret = -EINVAL;
- goto err;
- }
+ if (*flags & KGSL_CONTEXT_USER_GENERATED_TS)
drawctxt->flags |= CTXT_FLAGS_USER_GENERATED_TS;
- }
+
+ mutex_init(&drawctxt->mutex);
+ init_waitqueue_head(&drawctxt->wq);
+ init_waitqueue_head(&drawctxt->waiting);
+
+ /*
+ * Set up the plist node for the dispatcher. For now all contexts have
+ * the same priority, but later the priority will be set at create time
+ * by the user
+ */
+
+ plist_node_init(&drawctxt->pending, ADRENO_CONTEXT_DEFAULT_PRIORITY);
if (*flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE)
drawctxt->flags |= CTXT_FLAGS_NO_FAULT_TOLERANCE;
@@ -196,12 +448,6 @@
goto err;
kgsl_sharedmem_writel(device, &device->memstore,
- KGSL_MEMSTORE_OFFSET(drawctxt->base.id, ref_wait_ts),
- KGSL_INIT_REFTIMESTAMP);
- kgsl_sharedmem_writel(device, &device->memstore,
- KGSL_MEMSTORE_OFFSET(drawctxt->base.id, ts_cmp_enable),
- 0);
- kgsl_sharedmem_writel(device, &device->memstore,
KGSL_MEMSTORE_OFFSET(drawctxt->base.id, soptimestamp),
0);
kgsl_sharedmem_writel(device, &device->memstore,
@@ -219,18 +465,20 @@
* @context: Generic KGSL context container for the context
*
*/
-void adreno_drawctxt_detach(struct kgsl_context *context)
+int adreno_drawctxt_detach(struct kgsl_context *context)
{
struct kgsl_device *device;
struct adreno_device *adreno_dev;
struct adreno_context *drawctxt;
+ int ret;
if (context == NULL)
- return;
+ return 0;
device = context->device;
adreno_dev = ADRENO_DEVICE(device);
drawctxt = ADRENO_CONTEXT(context);
+
/* deactivate context */
if (adreno_dev->drawctxt_active == drawctxt) {
/* no need to save GMEM or shader, the context is
@@ -246,13 +494,39 @@
adreno_drawctxt_switch(adreno_dev, NULL, 0);
}
- if (device->state != KGSL_STATE_HUNG)
- adreno_idle(device);
+ mutex_lock(&drawctxt->mutex);
+
+ while (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {
+ struct kgsl_cmdbatch *cmdbatch =
+ drawctxt->cmdqueue[drawctxt->cmdqueue_head];
+
+ drawctxt->cmdqueue_head = (drawctxt->cmdqueue_head + 1) %
+ ADRENO_CONTEXT_CMDQUEUE_SIZE;
+
+ mutex_unlock(&drawctxt->mutex);
+
+ /*
+ * Don't hold the drawctxt mutex while the cmdbatch is being
+ * destroyed because the cmdbatch destroy takes the device
+ * mutex and the world falls in on itself
+ */
+
+ kgsl_cmdbatch_destroy(cmdbatch);
+ mutex_lock(&drawctxt->mutex);
+ }
+
+ mutex_unlock(&drawctxt->mutex);
+
+ /* Wait for the last global timestamp to pass before continuing */
+ ret = adreno_drawctxt_wait_global(adreno_dev, context,
+ drawctxt->internal_timestamp, 10 * 1000);
adreno_profile_process_results(device);
kgsl_sharedmem_free(&drawctxt->gpustate);
kgsl_sharedmem_free(&drawctxt->context_gmem_shadow.gmemshadow);
+
+ return ret;
}
@@ -296,11 +570,12 @@
* Switch the current draw context
*/
-void adreno_drawctxt_switch(struct adreno_device *adreno_dev,
+int adreno_drawctxt_switch(struct adreno_device *adreno_dev,
struct adreno_context *drawctxt,
unsigned int flags)
{
struct kgsl_device *device = &adreno_dev->dev;
+ int ret = 0;
if (drawctxt) {
if (flags & KGSL_CONTEXT_SAVE_GMEM)
@@ -316,9 +591,9 @@
if (adreno_dev->drawctxt_active == drawctxt) {
if (adreno_dev->gpudev->ctxt_draw_workaround &&
adreno_is_a225(adreno_dev))
- adreno_dev->gpudev->ctxt_draw_workaround(
+ ret = adreno_dev->gpudev->ctxt_draw_workaround(
adreno_dev, drawctxt);
- return;
+ return ret;
}
KGSL_CTXT_INFO(device, "from %d to %d flags %d\n",
@@ -327,7 +602,15 @@
drawctxt ? drawctxt->base.id : 0, flags);
/* Save the old context */
- adreno_dev->gpudev->ctxt_save(adreno_dev, adreno_dev->drawctxt_active);
+ ret = adreno_dev->gpudev->ctxt_save(adreno_dev,
+ adreno_dev->drawctxt_active);
+
+ if (ret) {
+ KGSL_DRV_ERR(device,
+ "Error in GPU context %d save: %d\n",
+ adreno_dev->drawctxt_active->base.id, ret);
+ return ret;
+ }
/* Put the old instance of the active drawctxt */
if (adreno_dev->drawctxt_active) {
@@ -340,6 +623,14 @@
_kgsl_context_get(&drawctxt->base);
/* Set the new context */
- adreno_dev->gpudev->ctxt_restore(adreno_dev, drawctxt);
+ ret = adreno_dev->gpudev->ctxt_restore(adreno_dev, drawctxt);
+ if (ret) {
+ KGSL_DRV_ERR(device,
+ "Error in GPU context %d restore: %d\n",
+ drawctxt->base.id, ret);
+ return ret;
+ }
+
adreno_dev->drawctxt_active = drawctxt;
+ return 0;
}
diff --git a/drivers/gpu/msm/adreno_drawctxt.h b/drivers/gpu/msm/adreno_drawctxt.h
index 3088099..f8469e2 100644
--- a/drivers/gpu/msm/adreno_drawctxt.h
+++ b/drivers/gpu/msm/adreno_drawctxt.h
@@ -69,6 +69,13 @@
const char *str;
};
+#define ADRENO_CONTEXT_CMDQUEUE_SIZE 128
+
+#define ADRENO_CONTEXT_DEFAULT_PRIORITY 1
+
+#define ADRENO_CONTEXT_STATE_ACTIVE 0
+#define ADRENO_CONTEXT_STATE_INVALID 1
+
struct kgsl_device;
struct adreno_device;
struct kgsl_device_private;
@@ -99,18 +106,58 @@
struct kgsl_memdesc quad_vertices_restore;
};
+/**
+ * struct adreno_context - Adreno GPU draw context
+ * @id: Unique integer ID of the context
+ * @timestamp: Last issued context-specific timestamp
+ * @internal_timestamp: Global timestamp of the last issued command
+ * @state: Current state of the context
+ * @flags: Bitfield controlling behavior of the context
+ * @type: Context type (GL, CL, RS)
+ * @mutex: Mutex to protect the cmdqueue
+ * @pagetable: Pointer to the GPU pagetable for the context
+ * @gpustate: Pointer to the GPU scratch memory for context save/restore
+ * @reg_restore: Command buffer for restoring context registers
+ * @shader_save: Command buffer for saving shaders
+ * @shader_restore: Command buffer to restore shaders
+ * @context_gmem_shadow: GMEM shadow structure for save/restore
+ * @reg_save: A2XX command buffer to save context registers
+ * @shader_fixup: A2XX command buffer to "fix" shaders on restore
+ * @chicken_restore: A2XX command buffer to "fix" register restore
+ * @bin_base_offset: Saved value of the A2XX BIN_BASE_OFFSET register
+ * @regconstant_save: A3XX command buffer to save some registers
+ * @constant_retore: A3XX command buffer to restore some registers
+ * @hslqcontrol_restore: A3XX command buffer to restore HSLSQ registers
+ * @save_fixup: A3XX command buffer to "fix" register save
+ * @restore_fixup: A3XX cmmand buffer to restore register save fixes
+ * @shader_load_commands: A3XX GPU memory descriptor for shader load IB
+ * @shader_save_commands: A3XX GPU memory descriptor for shader save IB
+ * @constantr_save_commands: A3XX GPU memory descriptor for constant save IB
+ * @constant_load_commands: A3XX GPU memory descriptor for constant load IB
+ * @cond_execs: A3XX GPU memory descriptor for conditional exec IB
+ * @hlsq_restore_commands: A3XX GPU memory descriptor for HLSQ restore IB
+ * @cmdqueue: Queue of command batches waiting to be dispatched for this context
+ * @cmdqueue_head: Head of the cmdqueue queue
+ * @cmdqueue_tail: Tail of the cmdqueue queue
+ * @pending: Priority list node for the dispatcher list of pending contexts
+ * @wq: Workqueue structure for contexts to sleep pending room in the queue
+ * @waiting: Workqueue structure for contexts waiting for a timestamp or event
+ * @queued: Number of commands queued in the cmdqueue
+ */
struct adreno_context {
struct kgsl_context base;
unsigned int ib_gpu_time_used;
unsigned int timestamp;
+ unsigned int internal_timestamp;
+ int state;
uint32_t flags;
unsigned int type;
+ struct mutex mutex;
struct kgsl_memdesc gpustate;
unsigned int reg_restore[3];
unsigned int shader_save[3];
unsigned int shader_restore[3];
- /* Information of the GMEM shadow that is created in context create */
struct gmem_shadow_t context_gmem_shadow;
/* A2XX specific items */
@@ -131,23 +178,41 @@
struct kgsl_memdesc constant_load_commands[3];
struct kgsl_memdesc cond_execs[4];
struct kgsl_memdesc hlsqcontrol_restore_commands[1];
+
+ /* Dispatcher */
+ struct kgsl_cmdbatch *cmdqueue[ADRENO_CONTEXT_CMDQUEUE_SIZE];
+ int cmdqueue_head;
+ int cmdqueue_tail;
+
+ struct plist_node pending;
+ wait_queue_head_t wq;
+ wait_queue_head_t waiting;
+
+ int queued;
};
struct kgsl_context *adreno_drawctxt_create(struct kgsl_device_private *,
uint32_t *flags);
-void adreno_drawctxt_detach(struct kgsl_context *context);
+int adreno_drawctxt_detach(struct kgsl_context *context);
void adreno_drawctxt_destroy(struct kgsl_context *context);
-void adreno_drawctxt_switch(struct adreno_device *adreno_dev,
+int adreno_drawctxt_switch(struct adreno_device *adreno_dev,
struct adreno_context *drawctxt,
unsigned int flags);
void adreno_drawctxt_set_bin_base_offset(struct kgsl_device *device,
struct kgsl_context *context,
unsigned int offset);
+int adreno_drawctxt_wait(struct adreno_device *adreno_dev,
+ struct kgsl_context *context,
+ uint32_t timestamp, unsigned int timeout);
+
+void adreno_drawctxt_invalidate(struct kgsl_device *device,
+ struct kgsl_context *context);
+
/* GPU context switch helper functions */
void build_quad_vtxbuff(struct adreno_context *drawctxt,
diff --git a/drivers/gpu/msm/adreno_postmortem.c b/drivers/gpu/msm/adreno_postmortem.c
index 32dbd51..294ae76 100644
--- a/drivers/gpu/msm/adreno_postmortem.c
+++ b/drivers/gpu/msm/adreno_postmortem.c
@@ -22,6 +22,7 @@
#include "adreno_ringbuffer.h"
#include "kgsl_cffdump.h"
#include "kgsl_pwrctrl.h"
+#include "adreno_trace.h"
#include "a2xx_reg.h"
#include "a3xx_reg.h"
@@ -459,6 +460,9 @@
adreno_getreg(adreno_dev, ADRENO_REG_CP_IB2_BUFSZ),
&cp_ib2_bufsz);
+ trace_adreno_gpu_fault(rbbm_status, cp_rb_rptr, cp_rb_wptr,
+ cp_ib1_base, cp_ib1_bufsz, cp_ib2_base, cp_ib2_bufsz);
+
/* If postmortem dump is not enabled, dump minimal set and return */
if (!device->pm_dump_enable) {
@@ -644,5 +648,9 @@
error_vfree:
vfree(rb_copy);
end:
+ /* Restart the dispatcher after a manually triggered dump */
+ if (manual)
+ adreno_dispatcher_start(adreno_dev);
+
return result;
}
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index b8cf21f..dc1530a 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -67,11 +67,8 @@
unsigned long wait_time;
unsigned long wait_timeout = msecs_to_jiffies(ADRENO_IDLE_TIMEOUT);
unsigned long wait_time_part;
- unsigned int prev_reg_val[FT_DETECT_REGS_COUNT];
unsigned int rptr;
- memset(prev_reg_val, 0, sizeof(prev_reg_val));
-
/* if wptr ahead, fill the remaining with NOPs */
if (wptr_ahead) {
/* -1 for header */
@@ -105,43 +102,13 @@
if (freecmds == 0 || freecmds > numcmds)
break;
- /* Dont wait for timeout, detect hang faster.
- */
- if (time_after(jiffies, wait_time_part)) {
- wait_time_part = jiffies +
- msecs_to_jiffies(KGSL_TIMEOUT_PART);
- if ((adreno_ft_detect(rb->device,
- prev_reg_val))){
- KGSL_DRV_ERR(rb->device,
- "Hang detected while waiting for freespace in"
- "ringbuffer rptr: 0x%x, wptr: 0x%x\n",
- rptr, rb->wptr);
- goto err;
- }
- }
-
if (time_after(jiffies, wait_time)) {
KGSL_DRV_ERR(rb->device,
"Timed out while waiting for freespace in ringbuffer "
"rptr: 0x%x, wptr: 0x%x\n", rptr, rb->wptr);
- goto err;
+ return -ETIMEDOUT;
}
- continue;
-
-err:
- if (!adreno_dump_and_exec_ft(rb->device)) {
- if (context && context->flags & CTXT_FLAGS_GPU_HANG) {
- KGSL_CTXT_WARN(rb->device,
- "Context %p caused a gpu hang. Will not accept commands for context %d\n",
- context, context->base.id);
- return -EDEADLK;
- }
- wait_time = jiffies + wait_timeout;
- } else {
- /* GPU is hung and fault tolerance failed */
- BUG();
- }
}
return 0;
}
@@ -180,7 +147,8 @@
if (!ret) {
ptr = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
rb->wptr += numcmds;
- }
+ } else
+ ptr = ERR_PTR(ret);
return ptr;
}
@@ -347,7 +315,6 @@
int _ringbuffer_start_common(struct adreno_ringbuffer *rb)
{
int status;
- /*cp_rb_cntl_u cp_rb_cntl; */
union reg_cp_rb_cntl cp_rb_cntl;
unsigned int rb_cntl;
struct kgsl_device *device = rb->device;
@@ -568,18 +535,17 @@
static int
adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
- struct adreno_context *context,
+ struct adreno_context *drawctxt,
unsigned int flags, unsigned int *cmds,
- int sizedwords)
+ int sizedwords, uint32_t timestamp)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
unsigned int *ringcmds;
unsigned int total_sizedwords = sizedwords;
unsigned int i;
unsigned int rcmd_gpu;
- unsigned int context_id = KGSL_MEMSTORE_GLOBAL;
+ unsigned int context_id;
unsigned int gpuaddr = rb->device->memstore.gpuaddr;
- unsigned int timestamp;
bool profile_ready;
/*
@@ -594,15 +560,19 @@
adreno_profile_assignments_ready(&adreno_dev->profile) &&
!(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE);
- /*
- * if the context was not created with per context timestamp
- * support, we must use the global timestamp since issueibcmds
- * will be returning that one, or if an internal issue then
- * use global timestamp.
- */
- if ((context && (context->flags & CTXT_FLAGS_PER_CONTEXT_TS)) &&
- !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE))
- context_id = context->base.id;
+ /* The global timestamp always needs to be incremented */
+ rb->global_ts++;
+
+ /* If this is a internal IB, use the global timestamp for it */
+ if (!drawctxt || (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) {
+ timestamp = rb->global_ts;
+ context_id = KGSL_MEMSTORE_GLOBAL;
+ } else {
+ context_id = drawctxt->base.id;
+ }
+
+ if (drawctxt)
+ drawctxt->internal_timestamp = rb->global_ts;
/* reserve space to temporarily turn off protected mode
* error checking if needed
@@ -613,13 +583,8 @@
/* internal ib command identifier for the ringbuffer */
total_sizedwords += (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE) ? 2 : 0;
- /* Add CP_COND_EXEC commands to generate CP_INTERRUPT */
- total_sizedwords += context ? 13 : 0;
-
- if ((context) && (context->flags & CTXT_FLAGS_PER_CONTEXT_TS) &&
- (flags & (KGSL_CMD_FLAGS_INTERNAL_ISSUE |
- KGSL_CMD_FLAGS_GET_INT)))
- total_sizedwords += 2;
+ /* Add two dwords for the CP_INTERRUPT */
+ total_sizedwords += drawctxt ? 2 : 0;
if (adreno_is_a3xx(adreno_dev))
total_sizedwords += 7;
@@ -627,13 +592,16 @@
if (adreno_is_a2xx(adreno_dev))
total_sizedwords += 2; /* CP_WAIT_FOR_IDLE */
- total_sizedwords += 2; /* scratchpad ts for fault tolerance */
total_sizedwords += 3; /* sop timestamp */
total_sizedwords += 4; /* eop timestamp */
- if (KGSL_MEMSTORE_GLOBAL != context_id)
+ if (adreno_is_a20x(adreno_dev))
+ total_sizedwords += 2; /* CACHE_FLUSH */
+
+ if (drawctxt) {
total_sizedwords += 3; /* global timestamp without cache
* flush for non-zero context */
+ }
if (adreno_is_a20x(adreno_dev))
total_sizedwords += 2; /* CACHE_FLUSH */
@@ -644,8 +612,11 @@
if (profile_ready)
total_sizedwords += 6; /* space for pre_ib and post_ib */
- ringcmds = adreno_ringbuffer_allocspace(rb, context, total_sizedwords);
- if (!ringcmds)
+ ringcmds = adreno_ringbuffer_allocspace(rb, drawctxt, total_sizedwords);
+
+ if (IS_ERR(ringcmds))
+ return PTR_ERR(ringcmds);
+ if (ringcmds == NULL)
return -ENOSPC;
rcmd_gpu = rb->buffer_desc.gpuaddr
@@ -662,24 +633,9 @@
/* Add any IB required for profiling if it is enabled */
if (profile_ready)
- adreno_profile_preib_processing(rb->device, context->base.id,
+ adreno_profile_preib_processing(rb->device, drawctxt->base.id,
&flags, &ringcmds, &rcmd_gpu);
- /* always increment the global timestamp. once. */
- rb->global_ts++;
-
- if (KGSL_MEMSTORE_GLOBAL != context_id)
- timestamp = context->timestamp;
- else
- timestamp = rb->global_ts;
-
- /* scratchpad ts for fault tolerance */
- GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
- cp_type0_packet(adreno_getreg(adreno_dev,
- ADRENO_REG_CP_TIMESTAMP), 1));
- GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
- rb->global_ts);
-
/* start-of-pipeline timestamp */
GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
cp_type3_packet(CP_MEM_WRITE, 2));
@@ -749,7 +705,7 @@
KGSL_MEMSTORE_OFFSET(context_id, eoptimestamp)));
GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, timestamp);
- if (KGSL_MEMSTORE_GLOBAL != context_id) {
+ if (drawctxt) {
GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
cp_type3_packet(CP_MEM_WRITE, 2));
GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, (gpuaddr +
@@ -765,56 +721,13 @@
GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, CACHE_FLUSH);
}
- if (context) {
- /* Conditional execution based on memory values */
- GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
- cp_type3_packet(CP_COND_EXEC, 4));
- GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, (gpuaddr +
- KGSL_MEMSTORE_OFFSET(
- context_id, ts_cmp_enable)) >> 2);
- GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, (gpuaddr +
- KGSL_MEMSTORE_OFFSET(
- context_id, ref_wait_ts)) >> 2);
- GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, timestamp);
- /* # of conditional command DWORDs */
- GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, 8);
-
- /* Clear the ts_cmp_enable for the context */
- GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
- cp_type3_packet(CP_MEM_WRITE, 2));
- GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, gpuaddr +
- KGSL_MEMSTORE_OFFSET(
- context_id, ts_cmp_enable));
- GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, 0x0);
-
- /* Clear the ts_cmp_enable for the global timestamp */
- GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
- cp_type3_packet(CP_MEM_WRITE, 2));
- GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, gpuaddr +
- KGSL_MEMSTORE_OFFSET(
- KGSL_MEMSTORE_GLOBAL, ts_cmp_enable));
- GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, 0x0);
-
- /* Trigger the interrupt */
+ if (drawctxt || (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) {
GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
cp_type3_packet(CP_INTERRUPT, 1));
GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
CP_INT_CNTL__RB_INT_MASK);
}
- /*
- * If per context timestamps are enabled and any of the kgsl
- * internal commands want INT to be generated trigger the INT
- */
- if ((context) && (context->flags & CTXT_FLAGS_PER_CONTEXT_TS) &&
- (flags & (KGSL_CMD_FLAGS_INTERNAL_ISSUE |
- KGSL_CMD_FLAGS_GET_INT))) {
- GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
- cp_type3_packet(CP_INTERRUPT, 1));
- GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
- CP_INT_CNTL__RB_INT_MASK);
- }
-
if (adreno_is_a3xx(adreno_dev)) {
/* Dummy set-constant to trigger context rollover */
GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
@@ -824,12 +737,6 @@
GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, 0);
}
- if (flags & KGSL_CMD_FLAGS_EOF) {
- GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, cp_nop_packet(1));
- GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
- KGSL_END_OF_FRAME_IDENTIFIER);
- }
-
adreno_ringbuffer_submit(rb);
return 0;
@@ -845,14 +752,10 @@
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
- if (device->state & KGSL_STATE_HUNG)
- return kgsl_readtimestamp(device, KGSL_MEMSTORE_GLOBAL,
- KGSL_TIMESTAMP_RETIRED);
-
flags |= KGSL_CMD_FLAGS_INTERNAL_ISSUE;
return adreno_ringbuffer_addcmds(rb, drawctxt, flags, cmds,
- sizedwords);
+ sizedwords, 0);
}
static bool _parse_ibs(struct kgsl_device_private *dev_priv, uint gpuaddr,
@@ -1045,39 +948,89 @@
return ret;
}
-int
-adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
- struct kgsl_context *context,
- struct kgsl_ibdesc *ibdesc,
- unsigned int numibs,
- uint32_t *timestamp,
- unsigned int flags)
+/**
+ * _ringbuffer_verify_ib() - parse an IB and verify that it is correct
+ * @dev_priv: Pointer to the process struct
+ * @ibdesc: Pointer to the IB descriptor
+ *
+ * This function only gets called if debugging is enabled - it walks the IB and
+ * does additional level parsing and verification above and beyond what KGSL
+ * core does
+ */
+static inline bool _ringbuffer_verify_ib(struct kgsl_device_private *dev_priv,
+ struct kgsl_ibdesc *ibdesc)
{
struct kgsl_device *device = dev_priv->device;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- unsigned int *link = 0;
+
+ /* Check that the size of the IBs is under the allowable limit */
+ if (ibdesc->sizedwords == 0 || ibdesc->sizedwords > 0xFFFFF) {
+ KGSL_DRV_ERR(device, "Invalid IB size 0x%X\n",
+ ibdesc->sizedwords);
+ return false;
+ }
+
+ if (unlikely(adreno_dev->ib_check_level >= 1) &&
+ !_parse_ibs(dev_priv, ibdesc->gpuaddr, ibdesc->sizedwords)) {
+ KGSL_DRV_ERR(device, "Could not verify the IBs\n");
+ return false;
+ }
+
+ return true;
+}
+
+int
+adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
+ struct kgsl_context *context,
+ struct kgsl_cmdbatch *cmdbatch,
+ uint32_t *timestamp)
+{
+ struct kgsl_device *device = dev_priv->device;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
+ int i, ret;
+
+ if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID)
+ return -EDEADLK;
+
+ /* Verify the IBs before they get queued */
+
+ for (i = 0; i < cmdbatch->ibcount; i++) {
+ if (!_ringbuffer_verify_ib(dev_priv, &cmdbatch->ibdesc[i]))
+ return -EINVAL;
+ }
+
+ /* Queue the command in the ringbuffer */
+ ret = adreno_dispatcher_queue_cmd(adreno_dev, drawctxt, cmdbatch,
+ timestamp);
+
+ if (ret)
+ KGSL_DRV_ERR(device,
+ "adreno_dispatcher_queue_cmd returned %d\n", ret);
+
+ return ret;
+}
+
+/* adreno_rindbuffer_submitcmd - submit userspace IBs to the GPU */
+int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
+ struct kgsl_cmdbatch *cmdbatch)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ struct kgsl_ibdesc *ibdesc;
+ unsigned int numibs;
+ unsigned int *link;
unsigned int *cmds;
unsigned int i;
- struct adreno_context *drawctxt = NULL;
+ struct kgsl_context *context;
+ struct adreno_context *drawctxt;
unsigned int start_index = 0;
int ret;
- if (device->state & KGSL_STATE_HUNG) {
- ret = -EBUSY;
- goto done;
- }
-
- if (!(adreno_dev->ringbuffer.flags & KGSL_FLAGS_STARTED) ||
- context == NULL || ibdesc == 0 || numibs == 0) {
- ret = -EINVAL;
- goto done;
- }
+ context = cmdbatch->context;
drawctxt = ADRENO_CONTEXT(context);
- if (drawctxt->flags & CTXT_FLAGS_GPU_HANG) {
- ret = -EDEADLK;
- goto done;
- }
+ ibdesc = cmdbatch->ibdesc;
+ numibs = cmdbatch->ibcount;
/* process any profiling results that are available into the log_buf */
adreno_profile_process_results(device);
@@ -1090,15 +1043,6 @@
adreno_dev->drawctxt_active == drawctxt)
start_index = 1;
- if (drawctxt->flags & CTXT_FLAGS_SKIP_EOF) {
- if (flags & KGSL_CMD_FLAGS_EOF)
- drawctxt->flags &= ~CTXT_FLAGS_SKIP_EOF;
- if (start_index)
- numibs = 1;
- else
- numibs = 0;
- }
-
cmds = link = kzalloc(sizeof(unsigned int) * (numibs * 3 + 4),
GFP_KERNEL);
if (!link) {
@@ -1117,18 +1061,6 @@
*cmds++ = ibdesc[0].sizedwords;
}
for (i = start_index; i < numibs; i++) {
- if (unlikely(adreno_dev->ib_check_level >= 1 &&
- !_parse_ibs(dev_priv, ibdesc[i].gpuaddr,
- ibdesc[i].sizedwords))) {
- ret = -EINVAL;
- goto done;
- }
-
- if (ibdesc[i].sizedwords == 0) {
- ret = -EINVAL;
- goto done;
- }
-
*cmds++ = CP_HDR_INDIRECT_BUFFER_PFD;
*cmds++ = ibdesc[i].gpuaddr;
*cmds++ = ibdesc[i].sizedwords;
@@ -1137,253 +1069,44 @@
*cmds++ = cp_nop_packet(1);
*cmds++ = KGSL_END_OF_IB_IDENTIFIER;
- kgsl_setstate(&device->mmu, context->id,
+ ret = kgsl_setstate(&device->mmu, context->id,
kgsl_mmu_pt_get_flags(device->mmu.hwpagetable,
device->id));
- adreno_drawctxt_switch(adreno_dev, drawctxt, flags);
-
- if (drawctxt->flags & CTXT_FLAGS_USER_GENERATED_TS) {
- if (timestamp_cmp(drawctxt->timestamp, *timestamp) >= 0) {
- KGSL_DRV_ERR(device,
- "Invalid user generated ts <%d:0x%x>, "
- "less than last issued ts <%d:0x%x>\n",
- context->id, *timestamp, context->id,
- drawctxt->timestamp);
- return -ERANGE;
- }
- drawctxt->timestamp = *timestamp;
- } else
- drawctxt->timestamp++;
-
- ret = adreno_ringbuffer_addcmds(&adreno_dev->ringbuffer,
- drawctxt,
- (flags & KGSL_CMD_FLAGS_EOF),
- &link[0], (cmds - link));
if (ret)
goto done;
- if (drawctxt->flags & CTXT_FLAGS_PER_CONTEXT_TS)
- *timestamp = drawctxt->timestamp;
- else
- *timestamp = adreno_dev->ringbuffer.global_ts;
+ ret = adreno_drawctxt_switch(adreno_dev, drawctxt, cmdbatch->flags);
+
+ /*
+ * In the unlikely event of an error in the drawctxt switch,
+ * treat it like a hang
+ */
+ if (ret)
+ goto done;
+
+ ret = adreno_ringbuffer_addcmds(&adreno_dev->ringbuffer,
+ drawctxt,
+ cmdbatch->flags,
+ &link[0], (cmds - link),
+ cmdbatch->timestamp);
#ifdef CONFIG_MSM_KGSL_CFF_DUMP
+ if (ret)
+ goto done;
/*
* insert wait for idle after every IB1
* this is conservative but works reliably and is ok
* even for performance simulations
*/
- adreno_idle(device);
+ ret = adreno_idle(device);
#endif
- /*
- * If context hung and recovered then return error so that the
- * application may handle it
- */
- if (drawctxt->flags & CTXT_FLAGS_GPU_HANG_FT) {
- drawctxt->flags &= ~CTXT_FLAGS_GPU_HANG_FT;
- ret = -EPROTO;
- } else
- ret = 0;
-
done:
- device->pwrctrl.irq_last = 0;
- kgsl_trace_issueibcmds(device, context ? context->id : 0, ibdesc,
- numibs, *timestamp, flags, ret,
- drawctxt ? drawctxt->type : 0);
+ kgsl_trace_issueibcmds(device, context->id, cmdbatch,
+ cmdbatch->timestamp, cmdbatch->flags, ret,
+ drawctxt->type);
kfree(link);
return ret;
}
-
-static void _turn_preamble_on_for_ib_seq(struct adreno_ringbuffer *rb,
- unsigned int rb_rptr)
-{
- unsigned int temp_rb_rptr = rb_rptr;
- unsigned int size = rb->buffer_desc.size;
- unsigned int val[2];
- int i = 0;
- bool check = false;
- bool cmd_start = false;
-
- /* Go till the start of the ib sequence and turn on preamble */
- while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr) {
- kgsl_sharedmem_readl(&rb->buffer_desc, &val[i], temp_rb_rptr);
- if (check && KGSL_START_OF_IB_IDENTIFIER == val[i]) {
- /* decrement i */
- i = (i + 1) % 2;
- if (val[i] == cp_nop_packet(4)) {
- temp_rb_rptr = adreno_ringbuffer_dec_wrapped(
- temp_rb_rptr, size);
- kgsl_sharedmem_writel(rb->device,
- &rb->buffer_desc,
- temp_rb_rptr, cp_nop_packet(1));
- }
- KGSL_FT_INFO(rb->device,
- "Turned preamble on at offset 0x%x\n",
- temp_rb_rptr / 4);
- break;
- }
- /* If you reach beginning of next command sequence then exit
- * First command encountered is the current one so don't break
- * on that. */
- if (KGSL_CMD_IDENTIFIER == val[i]) {
- if (cmd_start)
- break;
- cmd_start = true;
- }
-
- i = (i + 1) % 2;
- if (1 == i)
- check = true;
- temp_rb_rptr = adreno_ringbuffer_inc_wrapped(temp_rb_rptr,
- size);
- }
-}
-
-void adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
- struct adreno_ft_data *ft_data)
-{
- struct kgsl_device *device = rb->device;
- unsigned int rb_rptr = ft_data->start_of_replay_cmds;
- unsigned int good_rb_idx = 0, bad_rb_idx = 0, temp_rb_idx = 0;
- unsigned int last_good_cmd_end_idx = 0, last_bad_cmd_end_idx = 0;
- unsigned int cmd_start_idx = 0;
- unsigned int val1 = 0;
- int copy_rb_contents = 0;
- unsigned int temp_rb_rptr;
- struct kgsl_context *k_ctxt;
- struct adreno_context *a_ctxt;
- unsigned int size = rb->buffer_desc.size;
- unsigned int *temp_rb_buffer = ft_data->rb_buffer;
- int *rb_size = &ft_data->rb_size;
- unsigned int *bad_rb_buffer = ft_data->bad_rb_buffer;
- int *bad_rb_size = &ft_data->bad_rb_size;
- unsigned int *good_rb_buffer = ft_data->good_rb_buffer;
- int *good_rb_size = &ft_data->good_rb_size;
-
- /*
- * If the start index from where commands need to be copied is invalid
- * then no need to save off any commands
- */
- if (0xFFFFFFFF == ft_data->start_of_replay_cmds)
- return;
-
- k_ctxt = kgsl_context_get(device, ft_data->context_id);
-
- if (k_ctxt) {
- a_ctxt = ADRENO_CONTEXT(k_ctxt);
- if (a_ctxt->flags & CTXT_FLAGS_PREAMBLE)
- _turn_preamble_on_for_ib_seq(rb, rb_rptr);
- kgsl_context_put(k_ctxt);
- }
- k_ctxt = NULL;
-
- /* Walk the rb from the context switch. Omit any commands
- * for an invalid context. */
- while ((rb_rptr / sizeof(unsigned int)) != rb->wptr) {
- kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
-
- if (KGSL_CMD_IDENTIFIER == val1) {
- /* Start is the NOP dword that comes before
- * KGSL_CMD_IDENTIFIER */
- cmd_start_idx = temp_rb_idx - 1;
- if ((copy_rb_contents) && (good_rb_idx))
- last_good_cmd_end_idx = good_rb_idx - 1;
- if ((!copy_rb_contents) && (bad_rb_idx))
- last_bad_cmd_end_idx = bad_rb_idx - 1;
- }
-
- /* check for context switch indicator */
- if (val1 == KGSL_CONTEXT_TO_MEM_IDENTIFIER) {
- unsigned int temp_idx, val2;
- /* increment by 3 to get to the context_id */
- temp_rb_rptr = rb_rptr + (3 * sizeof(unsigned int)) %
- size;
- kgsl_sharedmem_readl(&rb->buffer_desc, &val2,
- temp_rb_rptr);
-
- /* if context switches to a context that did not cause
- * hang then start saving the rb contents as those
- * commands can be executed */
- k_ctxt = kgsl_context_get(rb->device, val2);
-
- if (k_ctxt) {
- a_ctxt = ADRENO_CONTEXT(k_ctxt);
-
- /* If we are changing to a good context and were not
- * copying commands then copy over commands to the good
- * context */
- if (!copy_rb_contents && ((k_ctxt &&
- !(a_ctxt->flags & CTXT_FLAGS_GPU_HANG)) ||
- !k_ctxt)) {
- for (temp_idx = cmd_start_idx;
- temp_idx < temp_rb_idx;
- temp_idx++)
- good_rb_buffer[good_rb_idx++] =
- temp_rb_buffer[temp_idx];
- ft_data->last_valid_ctx_id = val2;
- copy_rb_contents = 1;
- /* remove the good commands from bad buffer */
- bad_rb_idx = last_bad_cmd_end_idx;
- } else if (copy_rb_contents && k_ctxt &&
- (a_ctxt->flags & CTXT_FLAGS_GPU_HANG)) {
-
- /* If we are changing back to a bad context
- * from good ctxt and were not copying commands
- * to bad ctxt then copy over commands to
- * the bad context */
- for (temp_idx = cmd_start_idx;
- temp_idx < temp_rb_idx;
- temp_idx++)
- bad_rb_buffer[bad_rb_idx++] =
- temp_rb_buffer[temp_idx];
- /* If we are changing to bad context then
- * remove the dwords we copied for this
- * sequence from the good buffer */
- good_rb_idx = last_good_cmd_end_idx;
- copy_rb_contents = 0;
- }
- }
- kgsl_context_put(k_ctxt);
- }
-
- if (copy_rb_contents)
- good_rb_buffer[good_rb_idx++] = val1;
- else
- bad_rb_buffer[bad_rb_idx++] = val1;
-
- /* Copy both good and bad commands to temp buffer */
- temp_rb_buffer[temp_rb_idx++] = val1;
-
- rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr, size);
- }
- *good_rb_size = good_rb_idx;
- *bad_rb_size = bad_rb_idx;
- *rb_size = temp_rb_idx;
-}
-
-void
-adreno_ringbuffer_restore(struct adreno_ringbuffer *rb, unsigned int *rb_buff,
- int num_rb_contents)
-{
- int i;
- unsigned int *ringcmds;
- unsigned int rcmd_gpu;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
-
- if (!num_rb_contents)
- return;
-
- if (num_rb_contents > (rb->buffer_desc.size - rb->wptr)) {
- adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_RPTR, 0);
- BUG_ON(num_rb_contents > rb->buffer_desc.size);
- }
- ringcmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
- rcmd_gpu = rb->buffer_desc.gpuaddr + sizeof(unsigned int) * rb->wptr;
- for (i = 0; i < num_rb_contents; i++)
- GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, rb_buff[i]);
- rb->wptr += num_rb_contents;
- adreno_ringbuffer_submit(rb);
-}
diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h
index 9634e32..3aa0101 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.h
+++ b/drivers/gpu/msm/adreno_ringbuffer.h
@@ -27,7 +27,6 @@
struct kgsl_device;
struct kgsl_device_private;
-struct adreno_ft_data;
#define GSL_RB_MEMPTRS_SCRATCH_COUNT 8
struct kgsl_rbmemptrs {
@@ -99,10 +98,11 @@
int adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
struct kgsl_context *context,
- struct kgsl_ibdesc *ibdesc,
- unsigned int numibs,
- uint32_t *timestamp,
- unsigned int flags);
+ struct kgsl_cmdbatch *cmdbatch,
+ uint32_t *timestamp);
+
+int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
+ struct kgsl_cmdbatch *cmdbatch);
int adreno_ringbuffer_init(struct kgsl_device *device);
@@ -124,13 +124,6 @@
void kgsl_cp_intrcallback(struct kgsl_device *device);
-void adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
- struct adreno_ft_data *ft_data);
-
-void
-adreno_ringbuffer_restore(struct adreno_ringbuffer *rb, unsigned int *rb_buff,
- int num_rb_contents);
-
unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb,
struct adreno_context *context,
unsigned int numcmds);
diff --git a/arch/arm/boot/dts/apq8074-v2-liquid.dts b/drivers/gpu/msm/adreno_trace.c
similarity index 70%
copy from arch/arm/boot/dts/apq8074-v2-liquid.dts
copy to drivers/gpu/msm/adreno_trace.c
index a0ecb50..607ba8c 100644
--- a/arch/arm/boot/dts/apq8074-v2-liquid.dts
+++ b/drivers/gpu/msm/adreno_trace.c
@@ -8,15 +8,11 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
+ *
*/
-/dts-v1/;
+#include "adreno.h"
-/include/ "apq8074-v2.dtsi"
-/include/ "msm8974-liquid.dtsi"
-
-/ {
- model = "Qualcomm APQ 8074v2 LIQUID";
- compatible = "qcom,apq8074-liquid", "qcom,apq8074", "qcom,liquid";
- qcom,msm-id = <184 9 0x20000>;
-};
+/* Instantiate tracepoints */
+#define CREATE_TRACE_POINTS
+#include "adreno_trace.h"
diff --git a/drivers/gpu/msm/adreno_trace.h b/drivers/gpu/msm/adreno_trace.h
new file mode 100644
index 0000000..59aca2e
--- /dev/null
+++ b/drivers/gpu/msm/adreno_trace.h
@@ -0,0 +1,174 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#if !defined(_ADRENO_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _ADRENO_TRACE_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kgsl
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE adreno_trace
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(adreno_cmdbatch_queued,
+ TP_PROTO(struct kgsl_cmdbatch *cmdbatch, unsigned int queued),
+ TP_ARGS(cmdbatch, queued),
+ TP_STRUCT__entry(
+ __field(unsigned int, id)
+ __field(unsigned int, timestamp)
+ __field(unsigned int, queued)
+ ),
+ TP_fast_assign(
+ __entry->id = cmdbatch->context->id;
+ __entry->timestamp = cmdbatch->timestamp;
+ __entry->queued = queued;
+ ),
+ TP_printk(
+ "ctx=%u ts=%u queued=%u",
+ __entry->id, __entry->timestamp, __entry->queued
+ )
+);
+
+DECLARE_EVENT_CLASS(adreno_cmdbatch_template,
+ TP_PROTO(struct kgsl_cmdbatch *cmdbatch, int inflight),
+ TP_ARGS(cmdbatch, inflight),
+ TP_STRUCT__entry(
+ __field(unsigned int, id)
+ __field(unsigned int, timestamp)
+ __field(unsigned int, inflight)
+ ),
+ TP_fast_assign(
+ __entry->id = cmdbatch->context->id;
+ __entry->timestamp = cmdbatch->timestamp;
+ __entry->inflight = inflight;
+ ),
+ TP_printk(
+ "ctx=%u ts=%u inflight=%u",
+ __entry->id, __entry->timestamp,
+ __entry->inflight
+ )
+);
+
+DEFINE_EVENT(adreno_cmdbatch_template, adreno_cmdbatch_retired,
+ TP_PROTO(struct kgsl_cmdbatch *cmdbatch, int inflight),
+ TP_ARGS(cmdbatch, inflight)
+);
+
+DEFINE_EVENT(adreno_cmdbatch_template, adreno_cmdbatch_submitted,
+ TP_PROTO(struct kgsl_cmdbatch *cmdbatch, int inflight),
+ TP_ARGS(cmdbatch, inflight)
+);
+
+DECLARE_EVENT_CLASS(adreno_drawctxt_template,
+ TP_PROTO(struct adreno_context *drawctxt),
+ TP_ARGS(drawctxt),
+ TP_STRUCT__entry(
+ __field(unsigned int, id)
+ ),
+ TP_fast_assign(
+ __entry->id = drawctxt->base.id;
+ ),
+ TP_printk("ctx=%u", __entry->id)
+);
+
+DEFINE_EVENT(adreno_drawctxt_template, adreno_drawctxt_sleep,
+ TP_PROTO(struct adreno_context *drawctxt),
+ TP_ARGS(drawctxt)
+);
+
+DEFINE_EVENT(adreno_drawctxt_template, adreno_drawctxt_wake,
+ TP_PROTO(struct adreno_context *drawctxt),
+ TP_ARGS(drawctxt)
+);
+
+DEFINE_EVENT(adreno_drawctxt_template, dispatch_queue_context,
+ TP_PROTO(struct adreno_context *drawctxt),
+ TP_ARGS(drawctxt)
+);
+
+DEFINE_EVENT(adreno_drawctxt_template, adreno_drawctxt_invalidate,
+ TP_PROTO(struct adreno_context *drawctxt),
+ TP_ARGS(drawctxt)
+);
+
+TRACE_EVENT(adreno_drawctxt_wait_start,
+ TP_PROTO(unsigned int id, unsigned int ts),
+ TP_ARGS(id, ts),
+ TP_STRUCT__entry(
+ __field(unsigned int, id)
+ __field(unsigned int, ts)
+ ),
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->ts = ts;
+ ),
+ TP_printk(
+ "ctx=%u ts=%u",
+ __entry->id, __entry->ts
+ )
+);
+
+TRACE_EVENT(adreno_drawctxt_wait_done,
+ TP_PROTO(unsigned int id, unsigned int ts, int status),
+ TP_ARGS(id, ts, status),
+ TP_STRUCT__entry(
+ __field(unsigned int, id)
+ __field(unsigned int, ts)
+ __field(int, status)
+ ),
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->ts = ts;
+ __entry->status = status;
+ ),
+ TP_printk(
+ "ctx=%u ts=%u status=%d",
+ __entry->id, __entry->ts, __entry->status
+ )
+);
+
+TRACE_EVENT(adreno_gpu_fault,
+ TP_PROTO(unsigned int status, unsigned int rptr, unsigned int wptr,
+ unsigned int ib1base, unsigned int ib1size,
+ unsigned int ib2base, unsigned int ib2size),
+ TP_ARGS(status, rptr, wptr, ib1base, ib1size, ib2base, ib2size),
+ TP_STRUCT__entry(
+ __field(unsigned int, status)
+ __field(unsigned int, rptr)
+ __field(unsigned int, wptr)
+ __field(unsigned int, ib1base)
+ __field(unsigned int, ib1size)
+ __field(unsigned int, ib2base)
+ __field(unsigned int, ib2size)
+ ),
+ TP_fast_assign(
+ __entry->status = status;
+ __entry->rptr = rptr;
+ __entry->wptr = wptr;
+ __entry->ib1base = ib1base;
+ __entry->ib1size = ib1size;
+ __entry->ib2base = ib2base;
+ __entry->ib2size = ib2size;
+ ),
+ TP_printk("status=%X RB=%X/%X IB1=%X/%X IB2=%X/%X",
+ __entry->status, __entry->wptr, __entry->rptr,
+ __entry->ib1base, __entry->ib1size, __entry->ib2base,
+ __entry->ib2size)
+);
+
+#endif /* _ADRENO_TRACE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index fdd19e9..2624c16 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -62,59 +62,10 @@
static void kgsl_mem_entry_detach_process(struct kgsl_mem_entry *entry);
/**
- * kgsl_hang_check() - Check for GPU hang
- * data: KGSL device structure
- *
- * This function is called every KGSL_TIMEOUT_PART time when
- * GPU is active to check for hang. If a hang is detected we
- * trigger fault tolerance.
- */
-void kgsl_hang_check(struct work_struct *work)
-{
- struct kgsl_device *device = container_of(work, struct kgsl_device,
- hang_check_ws);
- static unsigned int prev_reg_val[FT_DETECT_REGS_COUNT];
-
- mutex_lock(&device->mutex);
-
- if (device->state == KGSL_STATE_ACTIVE) {
-
- /* Check to see if the GPU is hung */
- if (adreno_ft_detect(device, prev_reg_val))
- adreno_dump_and_exec_ft(device);
-
- mod_timer(&device->hang_timer,
- (jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART)));
- }
-
- mutex_unlock(&device->mutex);
-}
-
-/**
- * hang_timer() - Hang timer function
- * data: KGSL device structure
- *
- * This function is called when hang timer expires, in this
- * function we check if GPU is in active state and queue the
- * work on device workqueue to check for the hang. We restart
- * the timer after KGSL_TIMEOUT_PART time.
- */
-void hang_timer(unsigned long data)
-{
- struct kgsl_device *device = (struct kgsl_device *) data;
-
- if (device->state == KGSL_STATE_ACTIVE) {
- /* Have work run in a non-interrupt context. */
- queue_work(device->work_queue, &device->hang_check_ws);
- }
-}
-
-/**
* kgsl_trace_issueibcmds() - Call trace_issueibcmds by proxy
* device: KGSL device
* id: ID of the context submitting the command
- * ibdesc: Pointer to the list of IB descriptors
- * numib: Number of IBs in the list
+ * cmdbatch: Pointer to kgsl_cmdbatch describing these commands
* timestamp: Timestamp assigned to the command batch
* flags: Flags sent by the user
* result: Result of the submission attempt
@@ -124,11 +75,11 @@
* GPU specific modules.
*/
void kgsl_trace_issueibcmds(struct kgsl_device *device, int id,
- struct kgsl_ibdesc *ibdesc, int numibs,
+ struct kgsl_cmdbatch *cmdbatch,
unsigned int timestamp, unsigned int flags,
int result, unsigned int type)
{
- trace_kgsl_issueibcmds(device, id, ibdesc, numibs,
+ trace_kgsl_issueibcmds(device, id, cmdbatch,
timestamp, flags, result, type);
}
EXPORT_SYMBOL(kgsl_trace_issueibcmds);
@@ -530,8 +481,8 @@
EXPORT_SYMBOL(kgsl_context_init);
/**
- * kgsl_context_detach - Release the "master" context reference
- * @context - The context that will be detached
+ * kgsl_context_detach() - Release the "master" context reference
+ * @context: The context that will be detached
*
* This is called when a context becomes unusable, because userspace
* has requested for it to be destroyed. The context itself may
@@ -540,14 +491,12 @@
* detached by checking the KGSL_CONTEXT_DETACHED bit in
* context->priv.
*/
-void
-kgsl_context_detach(struct kgsl_context *context)
+int kgsl_context_detach(struct kgsl_context *context)
{
- struct kgsl_device *device;
- if (context == NULL)
- return;
+ int ret;
- device = context->device;
+ if (context == NULL)
+ return -EINVAL;
/*
* Mark the context as detached to keep others from using
@@ -555,19 +504,22 @@
* we don't try to detach twice.
*/
if (test_and_set_bit(KGSL_CONTEXT_DETACHED, &context->priv))
- return;
+ return -EINVAL;
- trace_kgsl_context_detach(device, context);
+ trace_kgsl_context_detach(context->device, context);
- device->ftbl->drawctxt_detach(context);
+ ret = context->device->ftbl->drawctxt_detach(context);
+
/*
* Cancel events after the device-specific context is
* detached, to avoid possibly freeing memory while
* it is still in use by the GPU.
*/
- kgsl_context_cancel_events(device, context);
+ kgsl_context_cancel_events(context->device, context);
kgsl_context_put(context);
+
+ return ret;
}
void
@@ -579,6 +531,8 @@
trace_kgsl_context_destroy(device, context);
+ BUG_ON(!kgsl_context_detached(context));
+
write_lock(&device->context_lock);
if (context->id != KGSL_CONTEXT_INVALID) {
idr_remove(&device->context_idr, context->id);
@@ -649,10 +603,11 @@
policy_saved = device->pwrscale.policy;
device->pwrscale.policy = NULL;
kgsl_pwrctrl_request_state(device, KGSL_STATE_SUSPEND);
- /*
- * Make sure no user process is waiting for a timestamp
- * before supending.
- */
+
+ /* Tell the device to drain the submission queue */
+ device->ftbl->drain(device);
+
+ /* Wait for the active count to hit zero */
kgsl_active_count_wait(device, 0);
/*
@@ -663,13 +618,10 @@
/* Don't let the timer wake us during suspended sleep. */
del_timer_sync(&device->idle_timer);
- del_timer_sync(&device->hang_timer);
switch (device->state) {
case KGSL_STATE_INIT:
break;
case KGSL_STATE_ACTIVE:
- /* Wait for the device to become idle */
- device->ftbl->idle(device);
case KGSL_STATE_NAP:
case KGSL_STATE_SLEEP:
/* make sure power is on to stop the device */
@@ -995,8 +947,16 @@
if (context == NULL)
break;
- if (context->dev_priv == dev_priv)
+ if (context->dev_priv == dev_priv) {
+ /*
+ * Hold a reference to the context in case somebody
+ * tries to put it while we are detaching
+ */
+
+ _kgsl_context_get(context);
kgsl_context_detach(context);
+ kgsl_context_put(context);
+ }
next = next + 1;
}
@@ -1010,6 +970,7 @@
result = kgsl_close_device(device);
mutex_unlock(&device->mutex);
+
kfree(dev_priv);
kgsl_put_process_private(device, private);
@@ -1042,7 +1003,6 @@
* Make sure the gates are open, so they don't block until
* we start suspend or FT.
*/
- complete_all(&device->ft_gate);
complete_all(&device->hwaccess_gate);
kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
kgsl_active_count_put(device);
@@ -1438,93 +1398,179 @@
return result;
}
+/**
+ * kgsl_cmdbatch_create() - Create a new cmdbatch structure
+ * @context: Pointer to a KGSL context struct
+ * @numibs: Number of indirect buffers to make room for in the cmdbatch
+ *
+ * Allocate an new cmdbatch structure and add enough room to store the list of
+ * indirect buffers
+ */
+struct kgsl_cmdbatch *kgsl_cmdbatch_create(struct kgsl_context *context,
+ int numibs)
+{
+ struct kgsl_cmdbatch *cmdbatch = kzalloc(sizeof(*cmdbatch), GFP_KERNEL);
+ if (cmdbatch == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ cmdbatch->ibdesc = kzalloc(sizeof(*cmdbatch->ibdesc) * numibs,
+ GFP_KERNEL);
+ if (cmdbatch->ibdesc == NULL) {
+ kfree(cmdbatch);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ cmdbatch->ibcount = numibs;
+ cmdbatch->context = context;
+
+ /*
+ * Increase the reference count on the context so it doesn't disappear
+ * during the lifetime of this command batch
+ */
+ _kgsl_context_get(context);
+
+ return cmdbatch;
+}
+
+/**
+ * _kgsl_cmdbatch_verify() - Perform a quick sanity check on a command batch
+ * @device: Pointer to a KGSL instance that owns the command batch
+ * @pagetable: Pointer to the pagetable for the current process
+ * @cmdbatch: Number of indirect buffers to make room for in the cmdbatch
+ *
+ * Do a quick sanity test on the list of indirect buffers in a command batch
+ * verifying that the size and GPU address
+ */
+static bool _kgsl_cmdbatch_verify(struct kgsl_device_private *dev_priv,
+ struct kgsl_cmdbatch *cmdbatch)
+{
+ int i;
+ struct kgsl_process_private *private = dev_priv->process_priv;
+
+ for (i = 0; i < cmdbatch->ibcount; i++) {
+ if (cmdbatch->ibdesc[i].sizedwords == 0) {
+ KGSL_DRV_ERR(dev_priv->device,
+ "IB verification failed: Invalid size\n");
+ return false;
+ }
+
+ if (!kgsl_mmu_gpuaddr_in_range(private->pagetable,
+ cmdbatch->ibdesc[i].gpuaddr)) {
+ KGSL_DRV_ERR(dev_priv->device,
+ "IB verification failed: invalid address 0x%X\n",
+ cmdbatch->ibdesc[i].gpuaddr);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/**
+ * _kgsl_cmdbatch_create_legacy() - Create a cmdbatch from a legacy ioctl struct
+ * @context: Pointer to the KGSL context that issued the command batch
+ * @param: Pointer to the kgsl_ringbuffer_issueibcmds struct that the user sent
+ *
+ * Create a command batch from the legacy issueibcmds format.
+ */
+static struct kgsl_cmdbatch *_kgsl_cmdbatch_create_legacy(
+ struct kgsl_context *context,
+ struct kgsl_ringbuffer_issueibcmds *param)
+{
+ struct kgsl_cmdbatch *cmdbatch = kgsl_cmdbatch_create(context, 1);
+
+ if (IS_ERR(cmdbatch))
+ return cmdbatch;
+
+ cmdbatch->ibdesc[0].gpuaddr = param->ibdesc_addr;
+ cmdbatch->ibdesc[0].sizedwords = param->numibs;
+ cmdbatch->ibcount = 1;
+ cmdbatch->flags = param->flags;
+
+ return cmdbatch;
+}
+
+/**
+ * _kgsl_cmdbatch_create() - Create a cmdbatch from a ioctl struct
+ * @device: Pointer to the KGSL device for the GPU
+ * @context: Pointer to the KGSL context that issued the command batch
+ * @param: Pointer to the kgsl_ringbuffer_issueibcmds struct that the user sent
+ *
+ * Create a command batch from the standard issueibcmds format sent by the user.
+ */
+struct kgsl_cmdbatch *_kgsl_cmdbatch_create(struct kgsl_device *device,
+ struct kgsl_context *context,
+ struct kgsl_ringbuffer_issueibcmds *param)
+{
+ struct kgsl_cmdbatch *cmdbatch =
+ kgsl_cmdbatch_create(context, param->numibs);
+
+ if (IS_ERR(cmdbatch))
+ return cmdbatch;
+
+ if (copy_from_user(cmdbatch->ibdesc, (void *)param->ibdesc_addr,
+ sizeof(struct kgsl_ibdesc) * param->numibs)) {
+ KGSL_DRV_ERR(device,
+ "Unable to copy the IB userspace commands\n");
+ kgsl_cmdbatch_destroy(cmdbatch);
+ return ERR_PTR(-EFAULT);
+ }
+
+ cmdbatch->flags = param->flags & ~KGSL_CONTEXT_SUBMIT_IB_LIST;
+
+ return cmdbatch;
+}
+
static long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
{
- int result = 0;
- int i = 0;
struct kgsl_ringbuffer_issueibcmds *param = data;
- struct kgsl_ibdesc *ibdesc;
+ struct kgsl_device *device = dev_priv->device;
struct kgsl_context *context;
+ struct kgsl_cmdbatch *cmdbatch;
+ long result = -EINVAL;
context = kgsl_context_get_owner(dev_priv, param->drawctxt_id);
if (context == NULL) {
- result = -EINVAL;
+ KGSL_DRV_ERR(device,
+ "Could not find context %d\n", param->drawctxt_id);
goto done;
}
if (param->flags & KGSL_CONTEXT_SUBMIT_IB_LIST) {
- if (!param->numibs) {
- result = -EINVAL;
- goto done;
- }
-
/*
- * Put a reasonable upper limit on the number of IBs that can be
- * submitted
+ * Do a quick sanity check on the number of IBs in the
+ * submission
*/
- if (param->numibs > 10000) {
- result = -EINVAL;
+ if (param->numibs == 0 || param->numibs > 100000) {
+ KGSL_DRV_ERR(device,
+ "Invalid number of IBs %d\n", param->numibs);
goto done;
}
- ibdesc = kzalloc(sizeof(struct kgsl_ibdesc) * param->numibs,
- GFP_KERNEL);
- if (!ibdesc) {
- KGSL_MEM_ERR(dev_priv->device,
- "kzalloc(%d) failed\n",
- sizeof(struct kgsl_ibdesc) * param->numibs);
- result = -ENOMEM;
- goto done;
- }
+ cmdbatch = _kgsl_cmdbatch_create(device, context, param);
+ } else
+ cmdbatch = _kgsl_cmdbatch_create_legacy(context, param);
- if (copy_from_user(ibdesc, (void *)param->ibdesc_addr,
- sizeof(struct kgsl_ibdesc) * param->numibs)) {
- result = -EFAULT;
- KGSL_DRV_ERR(dev_priv->device,
- "copy_from_user failed\n");
- goto free_ibdesc;
- }
- } else {
- KGSL_DRV_INFO(dev_priv->device,
- "Using single IB submission mode for ib submission\n");
- /* If user space driver is still using the old mode of
- * submitting single ib then we need to support that as well */
- ibdesc = kzalloc(sizeof(struct kgsl_ibdesc), GFP_KERNEL);
- if (!ibdesc) {
- KGSL_MEM_ERR(dev_priv->device,
- "kzalloc(%d) failed\n",
- sizeof(struct kgsl_ibdesc));
- result = -ENOMEM;
- goto done;
- }
- ibdesc[0].gpuaddr = param->ibdesc_addr;
- ibdesc[0].sizedwords = param->numibs;
- param->numibs = 1;
+ if (IS_ERR(cmdbatch)) {
+ result = PTR_ERR(cmdbatch);
+ goto done;
}
- for (i = 0; i < param->numibs; i++) {
- struct kgsl_pagetable *pt = dev_priv->process_priv->pagetable;
-
- if (!kgsl_mmu_gpuaddr_in_range(pt, ibdesc[i].gpuaddr)) {
- result = -ERANGE;
- KGSL_DRV_ERR(dev_priv->device,
- "invalid ib base GPU virtual addr %x\n",
- ibdesc[i].gpuaddr);
- goto free_ibdesc;
- }
+ /* Run basic sanity checking on the command */
+ if (!_kgsl_cmdbatch_verify(dev_priv, cmdbatch)) {
+ KGSL_DRV_ERR(device, "Unable to verify the IBs\n");
+ goto free_cmdbatch;
}
- result = dev_priv->device->ftbl->issueibcmds(dev_priv,
- context,
- ibdesc,
- param->numibs,
- ¶m->timestamp,
- param->flags);
+ result = dev_priv->device->ftbl->issueibcmds(dev_priv, context,
+ cmdbatch, ¶m->timestamp);
-free_ibdesc:
- kfree(ibdesc);
+free_cmdbatch:
+ if (result)
+ kgsl_cmdbatch_destroy(cmdbatch);
+
done:
kgsl_context_put(context);
return result;
@@ -1665,14 +1711,11 @@
{
struct kgsl_drawctxt_destroy *param = data;
struct kgsl_context *context;
- long result = -EINVAL;
+ long result;
context = kgsl_context_get_owner(dev_priv, param->drawctxt_id);
- if (context) {
- kgsl_context_detach(context);
- result = 0;
- }
+ result = kgsl_context_detach(context);
kgsl_context_put(context);
return result;
@@ -2779,8 +2822,7 @@
kgsl_ioctl_device_waittimestamp_ctxtid,
KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
KGSL_IOCTL_FUNC(IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS,
- kgsl_ioctl_rb_issueibcmds,
- KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
+ kgsl_ioctl_rb_issueibcmds, 0),
KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_READTIMESTAMP,
kgsl_ioctl_cmdstream_readtimestamp,
KGSL_IOCTL_LOCK),
@@ -3462,7 +3504,6 @@
setup_timer(&device->idle_timer, kgsl_timer, (unsigned long) device);
- setup_timer(&device->hang_timer, hang_timer, (unsigned long) device);
status = kgsl_create_device_workqueue(device);
if (status)
goto error_pwrctrl_close;
@@ -3522,7 +3563,6 @@
if (device->state == KGSL_STATE_ACTIVE)
kgsl_idle(device);
-
}
if (device->pm_dump_enable) {
@@ -3536,13 +3576,12 @@
pwr->power_flags, pwr->active_pwrlevel);
KGSL_LOG_DUMP(device, "POWER: INTERVAL TIMEOUT = %08X ",
- pwr->interval_timeout);
+ pwr->interval_timeout);
}
/* Disable the idle timer so we don't get interrupted */
del_timer_sync(&device->idle_timer);
- del_timer_sync(&device->hang_timer);
/* Force on the clocks */
kgsl_pwrctrl_wake(device);
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index 8d390a9..de647d5 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -141,6 +141,7 @@
struct kgsl_pagetable;
struct kgsl_memdesc;
+struct kgsl_cmdbatch;
struct kgsl_memdesc_ops {
int (*vmflags)(struct kgsl_memdesc *);
@@ -205,7 +206,6 @@
#define MMU_CONFIG 1
#endif
-void kgsl_hang_check(struct work_struct *work);
void kgsl_mem_entry_destroy(struct kref *kref);
int kgsl_postmortem_dump(struct kgsl_device *device, int manual);
@@ -237,7 +237,7 @@
unsigned int value);
void kgsl_trace_issueibcmds(struct kgsl_device *device, int id,
- struct kgsl_ibdesc *ibdesc, int numibs,
+ struct kgsl_cmdbatch *cmdbatch,
unsigned int timestamp, unsigned int flags,
int result, unsigned int type);
diff --git a/drivers/gpu/msm/kgsl_debugfs.c b/drivers/gpu/msm/kgsl_debugfs.c
index 9ab8d22..110264b 100644
--- a/drivers/gpu/msm/kgsl_debugfs.c
+++ b/drivers/gpu/msm/kgsl_debugfs.c
@@ -123,7 +123,6 @@
KGSL_DEBUGFS_LOG(ctxt_log);
KGSL_DEBUGFS_LOG(mem_log);
KGSL_DEBUGFS_LOG(pwr_log);
-KGSL_DEBUGFS_LOG(ft_log);
static int memfree_hist_print(struct seq_file *s, void *unused)
{
@@ -185,7 +184,6 @@
device->drv_log = KGSL_LOG_LEVEL_DEFAULT;
device->mem_log = KGSL_LOG_LEVEL_DEFAULT;
device->pwr_log = KGSL_LOG_LEVEL_DEFAULT;
- device->ft_log = KGSL_LOG_LEVEL_DEFAULT;
debugfs_create_file("log_level_cmd", 0644, device->d_debugfs, device,
&cmd_log_fops);
@@ -199,8 +197,6 @@
&pwr_log_fops);
debugfs_create_file("memfree_history", 0444, device->d_debugfs, device,
&memfree_hist_fops);
- debugfs_create_file("log_level_ft", 0644, device->d_debugfs, device,
- &ft_log_fops);
/* Create postmortem dump control files */
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 09a31c9..f5b27d0 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -13,6 +13,7 @@
#ifndef __KGSL_DEVICE_H
#define __KGSL_DEVICE_H
+#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/pm_qos.h>
#include <linux/sched.h>
@@ -76,6 +77,7 @@
struct kgsl_context;
struct kgsl_power_stats;
struct kgsl_event;
+struct kgsl_cmdbatch;
struct kgsl_functable {
/* Mandatory functions - these functions must be implemented
@@ -87,7 +89,7 @@
void (*regwrite) (struct kgsl_device *device,
unsigned int offsetwords, unsigned int value);
int (*idle) (struct kgsl_device *device);
- unsigned int (*isidle) (struct kgsl_device *device);
+ bool (*isidle) (struct kgsl_device *device);
int (*suspend_context) (struct kgsl_device *device);
int (*init) (struct kgsl_device *device);
int (*start) (struct kgsl_device *device);
@@ -101,9 +103,8 @@
unsigned int (*readtimestamp) (struct kgsl_device *device,
struct kgsl_context *context, enum kgsl_timestamp_type type);
int (*issueibcmds) (struct kgsl_device_private *dev_priv,
- struct kgsl_context *context, struct kgsl_ibdesc *ibdesc,
- unsigned int sizedwords, uint32_t *timestamp,
- unsigned int flags);
+ struct kgsl_context *context, struct kgsl_cmdbatch *cmdbatch,
+ uint32_t *timestamps);
int (*setup_pt)(struct kgsl_device *device,
struct kgsl_pagetable *pagetable);
void (*cleanup_pt)(struct kgsl_device *device,
@@ -115,14 +116,15 @@
void * (*snapshot)(struct kgsl_device *device, void *snapshot,
int *remain, int hang);
irqreturn_t (*irq_handler)(struct kgsl_device *device);
+ int (*drain)(struct kgsl_device *device);
/* Optional functions - these functions are not mandatory. The
driver will check that the function pointer is not NULL before
calling the hook */
- void (*setstate) (struct kgsl_device *device, unsigned int context_id,
+ int (*setstate) (struct kgsl_device *device, unsigned int context_id,
uint32_t flags);
struct kgsl_context *(*drawctxt_create) (struct kgsl_device_private *,
uint32_t *flags);
- void (*drawctxt_detach) (struct kgsl_context *context);
+ int (*drawctxt_detach) (struct kgsl_context *context);
void (*drawctxt_destroy) (struct kgsl_context *context);
long (*ioctl) (struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data);
@@ -155,6 +157,26 @@
unsigned int created;
};
+/**
+ * struct kgsl_cmdbatch - KGSl command descriptor
+ * @context: KGSL context that created the command
+ * @timestamp: Timestamp assigned to the command (currently unused)
+ * @flags: flags
+ * @ibcount: Number of IBs in the command list
+ * @ibdesc: Pointer to the list of IBs
+ * @expires: Point in time when the cmdbatch is considered to be hung
+ * @invalid: non-zero if the dispatcher determines the command and the owning
+ * context should be invalidated
+ */
+struct kgsl_cmdbatch {
+ struct kgsl_context *context;
+ uint32_t timestamp;
+ uint32_t flags;
+ uint32_t ibcount;
+ struct kgsl_ibdesc *ibdesc;
+ unsigned long expires;
+ int invalid;
+};
struct kgsl_device {
struct device *dev;
@@ -190,9 +212,7 @@
struct completion hwaccess_gate;
const struct kgsl_functable *ftbl;
struct work_struct idle_check_ws;
- struct work_struct hang_check_ws;
struct timer_list idle_timer;
- struct timer_list hang_timer;
struct kgsl_pwrctrl pwrctrl;
int open_count;
@@ -206,7 +226,6 @@
wait_queue_head_t active_cnt_wq;
struct workqueue_struct *work_queue;
struct device *parentdev;
- struct completion ft_gate;
struct dentry *d_debugfs;
struct idr context_idr;
rwlock_t context_lock;
@@ -233,7 +252,6 @@
int drv_log;
int mem_log;
int pwr_log;
- int ft_log;
int pm_dump_enable;
struct kgsl_pwrscale pwrscale;
struct kobject pwrscale_kobj;
@@ -254,11 +272,8 @@
#define KGSL_DEVICE_COMMON_INIT(_dev) \
.hwaccess_gate = COMPLETION_INITIALIZER((_dev).hwaccess_gate),\
- .ft_gate = COMPLETION_INITIALIZER((_dev).ft_gate),\
.idle_check_ws = __WORK_INITIALIZER((_dev).idle_check_ws,\
kgsl_idle_check),\
- .hang_check_ws = __WORK_INITIALIZER((_dev).hang_check_ws,\
- kgsl_hang_check),\
.ts_expired_ws = __WORK_INITIALIZER((_dev).ts_expired_ws,\
kgsl_process_events),\
.context_idr = IDR_INIT((_dev).context_idr),\
@@ -440,8 +455,6 @@
return 0;
}
-
-
int kgsl_check_timestamp(struct kgsl_device *device,
struct kgsl_context *context, unsigned int timestamp);
@@ -597,4 +610,21 @@
{
kgsl_signal_event(device, context, timestamp, KGSL_EVENT_CANCELLED);
}
+
+/**
+ * kgsl_cmdbatch_destroy() - Destroy a command batch structure
+ * @cmdbatch: Pointer to the command batch to destroy
+ *
+ * Destroy and free a command batch
+ */
+static inline void kgsl_cmdbatch_destroy(struct kgsl_cmdbatch *cmdbatch)
+{
+ if (cmdbatch) {
+ kgsl_context_put(cmdbatch->context);
+ kfree(cmdbatch->ibdesc);
+ }
+
+ kfree(cmdbatch);
+}
+
#endif /* __KGSL_DEVICE_H */
diff --git a/drivers/gpu/msm/kgsl_gpummu.c b/drivers/gpu/msm/kgsl_gpummu.c
index 68052b1..2634e4f 100644
--- a/drivers/gpu/msm/kgsl_gpummu.c
+++ b/drivers/gpu/msm/kgsl_gpummu.c
@@ -482,15 +482,17 @@
return NULL;
}
-static void kgsl_gpummu_default_setstate(struct kgsl_mmu *mmu,
+static int kgsl_gpummu_default_setstate(struct kgsl_mmu *mmu,
uint32_t flags)
{
struct kgsl_gpummu_pt *gpummu_pt;
if (!kgsl_mmu_enabled())
- return;
+ return 0;
if (flags & KGSL_MMUFLAGS_PTUPDATE) {
- kgsl_idle(mmu->device);
+ int ret = kgsl_idle(mmu->device);
+ if (ret)
+ return ret;
gpummu_pt = mmu->hwpagetable->priv;
kgsl_regwrite(mmu->device, MH_MMU_PT_BASE,
gpummu_pt->base.gpuaddr);
@@ -500,12 +502,16 @@
/* Invalidate all and tc */
kgsl_regwrite(mmu->device, MH_MMU_INVALIDATE, 0x00000003);
}
+
+ return 0;
}
-static void kgsl_gpummu_setstate(struct kgsl_mmu *mmu,
+static int kgsl_gpummu_setstate(struct kgsl_mmu *mmu,
struct kgsl_pagetable *pagetable,
unsigned int context_id)
{
+ int ret = 0;
+
if (mmu->flags & KGSL_FLAGS_STARTED) {
/* page table not current, then setup mmu to use new
* specified page table
@@ -518,10 +524,13 @@
kgsl_mmu_pt_get_flags(pagetable, mmu->device->id);
/* call device specific set page table */
- kgsl_setstate(mmu, context_id, KGSL_MMUFLAGS_TLBFLUSH |
+ ret = kgsl_setstate(mmu, context_id,
+ KGSL_MMUFLAGS_TLBFLUSH |
KGSL_MMUFLAGS_PTUPDATE);
}
}
+
+ return ret;
}
static int kgsl_gpummu_init(struct kgsl_mmu *mmu)
@@ -563,6 +572,7 @@
struct kgsl_device *device = mmu->device;
struct kgsl_gpummu_pt *gpummu_pt;
+ int ret;
if (mmu->flags & KGSL_FLAGS_STARTED)
return 0;
@@ -574,9 +584,6 @@
/* setup MMU and sub-client behavior */
kgsl_regwrite(device, MH_MMU_CONFIG, mmu->config);
- /* idle device */
- kgsl_idle(device);
-
/* enable axi interrupts */
kgsl_regwrite(device, MH_INTERRUPT_MASK,
GSL_MMU_INT_MASK | MH_INTERRUPT_MASK__MMU_PAGE_FAULT);
@@ -607,10 +614,12 @@
kgsl_regwrite(mmu->device, MH_MMU_VA_RANGE,
(KGSL_PAGETABLE_BASE |
(CONFIG_MSM_KGSL_PAGE_TABLE_SIZE >> 16)));
- kgsl_setstate(mmu, KGSL_MEMSTORE_GLOBAL, KGSL_MMUFLAGS_TLBFLUSH);
- mmu->flags |= KGSL_FLAGS_STARTED;
- return 0;
+ ret = kgsl_setstate(mmu, KGSL_MEMSTORE_GLOBAL, KGSL_MMUFLAGS_TLBFLUSH);
+ if (!ret)
+ mmu->flags |= KGSL_FLAGS_STARTED;
+
+ return ret;
}
static int
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index ecda5a7..103736d 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -1205,10 +1205,12 @@
return 0;
}
-static void kgsl_iommu_setstate(struct kgsl_mmu *mmu,
+static int kgsl_iommu_setstate(struct kgsl_mmu *mmu,
struct kgsl_pagetable *pagetable,
unsigned int context_id)
{
+ int ret = 0;
+
if (mmu->flags & KGSL_FLAGS_STARTED) {
/* page table not current, then setup mmu to use new
* specified page table
@@ -1219,10 +1221,12 @@
flags |= kgsl_mmu_pt_get_flags(mmu->hwpagetable,
mmu->device->id) |
KGSL_MMUFLAGS_TLBFLUSH;
- kgsl_setstate(mmu, context_id,
+ ret = kgsl_setstate(mmu, context_id,
KGSL_MMUFLAGS_PTUPDATE | flags);
}
}
+
+ return ret;
}
/*
@@ -1892,31 +1896,40 @@
* cpu
* Return - void
*/
-static void kgsl_iommu_default_setstate(struct kgsl_mmu *mmu,
+static int kgsl_iommu_default_setstate(struct kgsl_mmu *mmu,
uint32_t flags)
{
struct kgsl_iommu *iommu = mmu->priv;
int temp;
int i;
+ int ret = 0;
phys_addr_t pt_base = kgsl_iommu_get_pt_base_addr(mmu,
mmu->hwpagetable);
phys_addr_t pt_val;
- if (kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER)) {
+ ret = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
+
+ if (ret) {
KGSL_DRV_ERR(mmu->device, "Failed to enable iommu clocks\n");
- return;
+ return ret;
}
/* For v0 SMMU GPU needs to be idle for tlb invalidate as well */
- if (msm_soc_version_supports_iommu_v0())
- kgsl_idle(mmu->device);
+ if (msm_soc_version_supports_iommu_v0()) {
+ ret = kgsl_idle(mmu->device);
+ if (ret)
+ return ret;
+ }
/* Acquire GPU-CPU sync Lock here */
_iommu_lock();
if (flags & KGSL_MMUFLAGS_PTUPDATE) {
- if (!msm_soc_version_supports_iommu_v0())
- kgsl_idle(mmu->device);
+ if (!msm_soc_version_supports_iommu_v0()) {
+ ret = kgsl_idle(mmu->device);
+ if (ret)
+ goto unlock;
+ }
for (i = 0; i < iommu->unit_count; i++) {
/* get the lsb value which should not change when
* changing ttbr0 */
@@ -1977,12 +1990,13 @@
}
}
}
-
+unlock:
/* Release GPU-CPU sync Lock here */
_iommu_unlock();
/* Disable smmu clock */
kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
+ return ret;
}
/*
@@ -2039,6 +2053,7 @@
.mmu_pagefault_resume = kgsl_iommu_pagefault_resume,
.mmu_get_current_ptbase = kgsl_iommu_get_current_ptbase,
.mmu_enable_clk = kgsl_iommu_enable_clk,
+ .mmu_disable_clk = kgsl_iommu_disable_clk,
.mmu_disable_clk_on_ts = kgsl_iommu_disable_clk_on_ts,
.mmu_get_default_ttbr0 = kgsl_iommu_get_default_ttbr0,
.mmu_get_reg_gpuaddr = kgsl_iommu_get_reg_gpuaddr,
diff --git a/drivers/gpu/msm/kgsl_log.h b/drivers/gpu/msm/kgsl_log.h
index a7832e4..3a32953 100644
--- a/drivers/gpu/msm/kgsl_log.h
+++ b/drivers/gpu/msm/kgsl_log.h
@@ -103,15 +103,6 @@
#define KGSL_PWR_CRIT(_dev, fmt, args...) \
KGSL_LOG_CRIT(_dev->dev, _dev->pwr_log, fmt, ##args)
-#define KGSL_FT_INFO(_dev, fmt, args...) \
-KGSL_LOG_INFO(_dev->dev, _dev->ft_log, fmt, ##args)
-#define KGSL_FT_WARN(_dev, fmt, args...) \
-KGSL_LOG_WARN(_dev->dev, _dev->ft_log, fmt, ##args)
-#define KGSL_FT_ERR(_dev, fmt, args...) \
-KGSL_LOG_ERR(_dev->dev, _dev->ft_log, fmt, ##args)
-#define KGSL_FT_CRIT(_dev, fmt, args...) \
-KGSL_LOG_CRIT(_dev->dev, _dev->ft_log, fmt, ##args)
-
/* Core error messages - these are for core KGSL functions that have
no device associated with them (such as memory) */
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index 952019f..6635a7c 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -566,7 +566,7 @@
}
EXPORT_SYMBOL(kgsl_mmu_putpagetable);
-void kgsl_setstate(struct kgsl_mmu *mmu, unsigned int context_id,
+int kgsl_setstate(struct kgsl_mmu *mmu, unsigned int context_id,
uint32_t flags)
{
struct kgsl_device *device = mmu->device;
@@ -574,14 +574,16 @@
if (!(flags & (KGSL_MMUFLAGS_TLBFLUSH | KGSL_MMUFLAGS_PTUPDATE))
&& !adreno_is_a2xx(adreno_dev))
- return;
+ return 0;
if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
- return;
+ return 0;
else if (device->ftbl->setstate)
- device->ftbl->setstate(device, context_id, flags);
+ return device->ftbl->setstate(device, context_id, flags);
else if (mmu->mmu_ops->mmu_device_setstate)
- mmu->mmu_ops->mmu_device_setstate(mmu, flags);
+ return mmu->mmu_ops->mmu_device_setstate(mmu, flags);
+
+ return 0;
}
EXPORT_SYMBOL(kgsl_setstate);
@@ -590,7 +592,6 @@
struct kgsl_mh *mh = &device->mh;
/* force mmu off to for now*/
kgsl_regwrite(device, MH_MMU_CONFIG, 0);
- kgsl_idle(device);
/* define physical memory range accessible by the core */
kgsl_regwrite(device, MH_MMU_MPU_BASE, mh->mpu_base);
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index faba81e..a30ee3f 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -133,10 +133,10 @@
int (*mmu_close) (struct kgsl_mmu *mmu);
int (*mmu_start) (struct kgsl_mmu *mmu);
void (*mmu_stop) (struct kgsl_mmu *mmu);
- void (*mmu_setstate) (struct kgsl_mmu *mmu,
+ int (*mmu_setstate) (struct kgsl_mmu *mmu,
struct kgsl_pagetable *pagetable,
unsigned int context_id);
- void (*mmu_device_setstate) (struct kgsl_mmu *mmu,
+ int (*mmu_device_setstate) (struct kgsl_mmu *mmu,
uint32_t flags);
void (*mmu_pagefault) (struct kgsl_mmu *mmu);
phys_addr_t (*mmu_get_current_ptbase)
@@ -147,6 +147,8 @@
(struct kgsl_mmu *mmu, uint32_t ts, bool ts_valid);
int (*mmu_enable_clk)
(struct kgsl_mmu *mmu, int ctx_id);
+ void (*mmu_disable_clk)
+ (struct kgsl_mmu *mmu);
phys_addr_t (*mmu_get_default_ttbr0)(struct kgsl_mmu *mmu,
unsigned int unit_id,
enum kgsl_iommu_context_id ctx_id);
@@ -231,7 +233,7 @@
int kgsl_mmu_put_gpuaddr(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc);
unsigned int kgsl_virtaddr_to_physaddr(void *virtaddr);
-void kgsl_setstate(struct kgsl_mmu *mmu, unsigned int context_id,
+int kgsl_setstate(struct kgsl_mmu *mmu, unsigned int context_id,
uint32_t flags);
int kgsl_mmu_get_ptname_from_ptbase(struct kgsl_mmu *mmu,
phys_addr_t pt_base);
@@ -260,19 +262,23 @@
return 0;
}
-static inline void kgsl_mmu_setstate(struct kgsl_mmu *mmu,
+static inline int kgsl_mmu_setstate(struct kgsl_mmu *mmu,
struct kgsl_pagetable *pagetable,
unsigned int context_id)
{
if (mmu->mmu_ops && mmu->mmu_ops->mmu_setstate)
- mmu->mmu_ops->mmu_setstate(mmu, pagetable, context_id);
+ return mmu->mmu_ops->mmu_setstate(mmu, pagetable, context_id);
+
+ return 0;
}
-static inline void kgsl_mmu_device_setstate(struct kgsl_mmu *mmu,
+static inline int kgsl_mmu_device_setstate(struct kgsl_mmu *mmu,
uint32_t flags)
{
if (mmu->mmu_ops && mmu->mmu_ops->mmu_device_setstate)
- mmu->mmu_ops->mmu_device_setstate(mmu, flags);
+ return mmu->mmu_ops->mmu_device_setstate(mmu, flags);
+
+ return 0;
}
static inline void kgsl_mmu_stop(struct kgsl_mmu *mmu)
@@ -320,6 +326,12 @@
return 0;
}
+static inline void kgsl_mmu_disable_clk(struct kgsl_mmu *mmu)
+{
+ if (mmu->mmu_ops && mmu->mmu_ops->mmu_disable_clk)
+ mmu->mmu_ops->mmu_disable_clk(mmu);
+}
+
static inline void kgsl_mmu_disable_clk_on_ts(struct kgsl_mmu *mmu,
unsigned int ts, bool ts_valid)
{
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 1a95761..07131f7 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -1215,9 +1215,6 @@
} else {
device->pwrctrl.irq_last = 0;
}
- } else if (device->state & (KGSL_STATE_HUNG |
- KGSL_STATE_DUMP_AND_FT)) {
- kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
}
mutex_unlock(&device->mutex);
@@ -1273,7 +1270,6 @@
kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
return -EBUSY;
}
- del_timer_sync(&device->hang_timer);
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_NAP);
kgsl_pwrctrl_set_state(device, KGSL_STATE_NAP);
@@ -1343,7 +1339,6 @@
case KGSL_STATE_NAP:
case KGSL_STATE_SLEEP:
del_timer_sync(&device->idle_timer);
- del_timer_sync(&device->hang_timer);
/* make sure power is on to stop the device*/
kgsl_pwrctrl_enable(device);
device->ftbl->suspend_context(device);
@@ -1435,8 +1430,6 @@
kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
- mod_timer(&device->hang_timer,
- (jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART)));
pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
device->pwrctrl.pm_qos_latency);
case KGSL_STATE_ACTIVE:
@@ -1504,10 +1497,6 @@
return "SLEEP";
case KGSL_STATE_SUSPEND:
return "SUSPEND";
- case KGSL_STATE_HUNG:
- return "HUNG";
- case KGSL_STATE_DUMP_AND_FT:
- return "DNR";
case KGSL_STATE_SLUMBER:
return "SLUMBER";
default:
@@ -1539,7 +1528,6 @@
(device->state != KGSL_STATE_ACTIVE)) {
mutex_unlock(&device->mutex);
wait_for_completion(&device->hwaccess_gate);
- wait_for_completion(&device->ft_gate);
mutex_lock(&device->mutex);
/* Stop the idle timer */
diff --git a/drivers/gpu/msm/kgsl_pwrscale_trustzone.c b/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
index 40649d2..8fc1753 100644
--- a/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
+++ b/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
@@ -235,8 +235,10 @@
tz_pwrlevels[0] = j;
ret = scm_call(SCM_SVC_DCVS, TZ_INIT_ID, tz_pwrlevels,
sizeof(tz_pwrlevels), NULL, 0);
- if (ret)
+ if (ret) {
+ KGSL_DRV_ERR(device, "Fall back to idle based GPU DCVS algo");
priv->idle_dcvs = 1;
+ }
return 0;
}
#else
diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h
index f16f2b4..179a72b 100644
--- a/drivers/gpu/msm/kgsl_trace.h
+++ b/drivers/gpu/msm/kgsl_trace.h
@@ -37,14 +37,13 @@
TP_PROTO(struct kgsl_device *device,
int drawctxt_id,
- struct kgsl_ibdesc *ibdesc,
- int numibs,
+ struct kgsl_cmdbatch *cmdbatch,
int timestamp,
int flags,
int result,
unsigned int type),
- TP_ARGS(device, drawctxt_id, ibdesc, numibs, timestamp, flags,
+ TP_ARGS(device, drawctxt_id, cmdbatch, timestamp, flags,
result, type),
TP_STRUCT__entry(
@@ -61,8 +60,8 @@
TP_fast_assign(
__assign_str(device_name, device->name);
__entry->drawctxt_id = drawctxt_id;
- __entry->ibdesc_addr = ibdesc[0].gpuaddr;
- __entry->numibs = numibs;
+ __entry->ibdesc_addr = cmdbatch->ibdesc[0].gpuaddr;
+ __entry->numibs = cmdbatch->ibcount;
__entry->timestamp = timestamp;
__entry->flags = flags;
__entry->result = result;
diff --git a/drivers/gpu/msm/z180.c b/drivers/gpu/msm/z180.c
index 883417f..0af57aa 100644
--- a/drivers/gpu/msm/z180.c
+++ b/drivers/gpu/msm/z180.c
@@ -353,7 +353,13 @@
return ts_diff < Z180_PACKET_COUNT;
}
-static int z180_idle(struct kgsl_device *device)
+/**
+ * z180_idle() - Idle the 2D device
+ * @device: Pointer to the KGSL device struct for the Z180
+ *
+ * wait until the z180 submission queue is idle
+ */
+int z180_idle(struct kgsl_device *device)
{
int status = 0;
struct z180_device *z180_dev = Z180_DEVICE(device);
@@ -373,10 +379,8 @@
int
z180_cmdstream_issueibcmds(struct kgsl_device_private *dev_priv,
struct kgsl_context *context,
- struct kgsl_ibdesc *ibdesc,
- unsigned int numibs,
- uint32_t *timestamp,
- unsigned int ctrl)
+ struct kgsl_cmdbatch *cmdbatch,
+ uint32_t *timestamp)
{
long result = 0;
unsigned int ofs = PACKETSIZE_STATESTREAM * sizeof(unsigned int);
@@ -389,6 +393,20 @@
struct kgsl_pagetable *pagetable = dev_priv->process_priv->pagetable;
struct z180_device *z180_dev = Z180_DEVICE(device);
unsigned int sizedwords;
+ unsigned int numibs;
+ struct kgsl_ibdesc *ibdesc;
+
+ mutex_lock(&device->mutex);
+
+ kgsl_active_count_get(device);
+
+ if (cmdbatch == NULL) {
+ result = EINVAL;
+ goto error;
+ }
+
+ ibdesc = cmdbatch->ibdesc;
+ numibs = cmdbatch->ibcount;
if (device->state & KGSL_STATE_HUNG) {
result = -EINVAL;
@@ -430,7 +448,7 @@
context->id, cmd, sizedwords);
/* context switch */
if ((context->id != (int)z180_dev->ringbuffer.prevctx) ||
- (ctrl & KGSL_CONTEXT_CTX_SWITCH)) {
+ (cmdbatch->flags & KGSL_CONTEXT_CTX_SWITCH)) {
KGSL_CMD_INFO(device, "context switch %d -> %d\n",
context->id, z180_dev->ringbuffer.prevctx);
kgsl_mmu_setstate(&device->mmu, pagetable,
@@ -438,10 +456,13 @@
cnt = PACKETSIZE_STATESTREAM;
ofs = 0;
}
- kgsl_setstate(&device->mmu,
+
+ result = kgsl_setstate(&device->mmu,
KGSL_MEMSTORE_GLOBAL,
kgsl_mmu_pt_get_flags(device->mmu.hwpagetable,
device->id));
+ if (result < 0)
+ goto error;
result = wait_event_interruptible_timeout(device->wait_queue,
room_in_rb(z180_dev),
@@ -482,9 +503,12 @@
z180_cmdwindow_write(device, ADDR_VGV3_CONTROL, cmd);
z180_cmdwindow_write(device, ADDR_VGV3_CONTROL, 0);
error:
+ kgsl_trace_issueibcmds(device, context->id, cmdbatch,
+ *timestamp, cmdbatch->flags, result, 0);
- kgsl_trace_issueibcmds(device, context->id, ibdesc, numibs,
- *timestamp, ctrl, result, 0);
+ kgsl_active_count_put(device);
+
+ mutex_unlock(&device->mutex);
return (int)result;
}
@@ -595,8 +619,12 @@
static int z180_stop(struct kgsl_device *device)
{
+ int ret;
+
device->ftbl->irqctrl(device, 0);
- z180_idle(device);
+ ret = z180_idle(device);
+ if (ret)
+ return ret;
del_timer_sync(&device->idle_timer);
@@ -662,7 +690,7 @@
return status;
}
-static unsigned int z180_isidle(struct kgsl_device *device)
+static bool z180_isidle(struct kgsl_device *device)
{
struct z180_device *z180_dev = Z180_DEVICE(device);
@@ -875,7 +903,7 @@
return context;
}
-static void
+static int
z180_drawctxt_detach(struct kgsl_context *context)
{
struct kgsl_device *device;
@@ -889,9 +917,13 @@
if (z180_dev->ringbuffer.prevctx == context->id) {
z180_dev->ringbuffer.prevctx = Z180_INVALID_CONTEXT;
device->mmu.hwpagetable = device->mmu.defaultpagetable;
+
+ /* Ignore the result - we are going down anyway */
kgsl_setstate(&device->mmu, KGSL_MEMSTORE_GLOBAL,
KGSL_MMUFLAGS_PTUPDATE);
}
+
+ return 0;
}
static void
@@ -965,6 +997,7 @@
.irqctrl = z180_irqctrl,
.gpuid = z180_gpuid,
.irq_handler = z180_irq_handler,
+ .drain = z180_idle, /* drain == idle for the z180 */
/* Optional functions */
.drawctxt_create = z180_drawctxt_create,
.drawctxt_detach = z180_drawctxt_detach,
diff --git a/drivers/gpu/msm/z180.h b/drivers/gpu/msm/z180.h
index 1be0870..a36e92d 100644
--- a/drivers/gpu/msm/z180.h
+++ b/drivers/gpu/msm/z180.h
@@ -45,5 +45,6 @@
};
int z180_dump(struct kgsl_device *, int);
+int z180_idle(struct kgsl_device *);
#endif /* __Z180_H */
diff --git a/drivers/gpu/msm/z180_postmortem.c b/drivers/gpu/msm/z180_postmortem.c
index 5d929cf..bc53c0e 100644
--- a/drivers/gpu/msm/z180_postmortem.c
+++ b/drivers/gpu/msm/z180_postmortem.c
@@ -58,6 +58,8 @@
unsigned int i;
unsigned int reg_val;
+ z180_idle(device);
+
KGSL_LOG_DUMP(device, "Z180 Register Dump\n");
for (i = 0; i < ARRAY_SIZE(regs_to_dump); i++) {
kgsl_regread(device,
diff --git a/drivers/gud/mobicore_driver/api.c b/drivers/gud/mobicore_driver/api.c
index 871f6cc..b47383a0 100644
--- a/drivers/gud/mobicore_driver/api.c
+++ b/drivers/gud/mobicore_driver/api.c
@@ -98,7 +98,11 @@
*/
struct mc_instance *mobicore_open(void)
{
- return mc_alloc_instance();
+ struct mc_instance *instance = mc_alloc_instance();
+ if(instance) {
+ instance->admin = true;
+ }
+ return instance;
}
EXPORT_SYMBOL(mobicore_open);
diff --git a/drivers/gud/mobicore_driver/build_tag.h b/drivers/gud/mobicore_driver/build_tag.h
index 2a7772e..4a24275 100644
--- a/drivers/gud/mobicore_driver/build_tag.h
+++ b/drivers/gud/mobicore_driver/build_tag.h
@@ -26,4 +26,4 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define MOBICORE_COMPONENT_BUILD_TAG \
- "*** GC_MSM8960_Release_V019 ###"
+ "*** t-base-202_V001 ###"
diff --git a/drivers/gud/mobicore_driver/main.c b/drivers/gud/mobicore_driver/main.c
index 6f91974..0451452 100644
--- a/drivers/gud/mobicore_driver/main.c
+++ b/drivers/gud/mobicore_driver/main.c
@@ -47,7 +47,7 @@
/* Define a MobiCore device structure for use with dev_debug() etc */
struct device_driver mcd_debug_name = {
- .name = "mcdrvkmod"
+ .name = "MobiCore"
};
struct device mcd_debug_subname = {
diff --git a/drivers/gud/mobicore_driver/platforms/msm8960_surf_std/platform.h b/drivers/gud/mobicore_driver/platforms/msm8960_surf_std/platform.h
index 4768f39..7854fc5 100644
--- a/drivers/gud/mobicore_driver/platforms/msm8960_surf_std/platform.h
+++ b/drivers/gud/mobicore_driver/platforms/msm8960_surf_std/platform.h
@@ -41,7 +41,15 @@
/* Enable the use of vm_unamp instead of the deprecated do_munmap
* and other 3.7 features
*/
+#ifndef CONFIG_ARCH_MSM8960
#define MC_VM_UNMAP
+#endif
+
+
+#if defined(CONFIG_ARCH_MSM8974) || defined(CONFIG_ARCH_MSM8226)
+/* Perform clock enable/disable */
+#define MC_CRYPTO_CLOCK_MANAGEMENT
+#endif
/* Enable Power Management for Crypto Engine */
#define MC_CRYPTO_CLOCK_MANAGEMENT
diff --git a/drivers/gud/mobicore_driver/pm.c b/drivers/gud/mobicore_driver/pm.c
index 3ad2015..55a1ef7 100644
--- a/drivers/gud/mobicore_driver/pm.c
+++ b/drivers/gud/mobicore_driver/pm.c
@@ -67,8 +67,8 @@
MCDRV_DBG(mcd, "MobiCore IDLE=%d!", flags->schedule);
MCDRV_DBG(mcd,
"MobiCore Request Sleep=%d!", flags->sleep_mode.SleepReq);
- MCDRV_DBG(mcd, "MobiCore Sleep Ready=%d!",
- flags->sleep_mode.ReadyToSleep);
+ MCDRV_DBG(mcd,
+ "MobiCore Sleep Ready=%d!", flags->sleep_mode.ReadyToSleep);
}
static int mc_suspend_notifier(struct notifier_block *nb,
diff --git a/drivers/gud/mobicore_driver/public/mc_linux.h b/drivers/gud/mobicore_driver/public/mc_linux.h
index 9c49aef..af027dc 100644
--- a/drivers/gud/mobicore_driver/public/mc_linux.h
+++ b/drivers/gud/mobicore_driver/public/mc_linux.h
@@ -43,6 +43,10 @@
#include "version.h"
+#ifndef __KERNEL__
+#include <stdint.h>
+#endif
+
#define MC_ADMIN_DEVNODE "mobicore"
#define MC_USER_DEVNODE "mobicore-user"
diff --git a/drivers/gud/mobicore_kernelapi/clientlib.c b/drivers/gud/mobicore_kernelapi/clientlib.c
index 7038e02..16b52e5 100644
--- a/drivers/gud/mobicore_kernelapi/clientlib.c
+++ b/drivers/gud/mobicore_kernelapi/clientlib.c
@@ -299,7 +299,8 @@
{
session->device_id,
*uuid,
- (uint32_t)wsm->phys_addr,
+ (uint32_t)(wsm->phys_addr) & 0xFFF,
+ wsm->handle,
len
}
};
@@ -926,7 +927,8 @@
{
session->session_id,
handle,
- (uint32_t)(map_info->secure_virt_addr)
+ (uint32_t)(map_info->secure_virt_addr),
+ map_info->secure_virt_len
}
};
@@ -956,11 +958,11 @@
break;
}
- struct mc_drv_rsp_unmap_bulk_mem_payload_t
+ /*struct mc_drv_rsp_unmap_bulk_mem_payload_t
rsp_unmap_bulk_mem_payload;
connection_read_datablock(dev_con,
&rsp_unmap_bulk_mem_payload,
- sizeof(rsp_unmap_bulk_mem_payload));
+ sizeof(rsp_unmap_bulk_mem_payload));*/
/*
* Unregister mapped bulk buffer from Kernel Module and
diff --git a/drivers/gud/mobicore_kernelapi/public/mobicore_driver_cmd.h b/drivers/gud/mobicore_kernelapi/public/mobicore_driver_cmd.h
index 3b8eb4b..eaf7e6c 100644
--- a/drivers/gud/mobicore_kernelapi/public/mobicore_driver_cmd.h
+++ b/drivers/gud/mobicore_kernelapi/public/mobicore_driver_cmd.h
@@ -109,6 +109,7 @@
uint32_t device_id;
struct mc_uuid_t uuid;
uint32_t tci;
+ uint32_t handle;
uint32_t len;
};
@@ -119,10 +120,8 @@
struct mc_drv_rsp_open_session_payload_t {
- uint32_t device_id;
uint32_t session_id;
uint32_t device_session_id;
- uint32_t mc_result;
uint32_t session_magic;
};
@@ -186,7 +185,6 @@
struct mc_drv_rsp_map_bulk_mem_payload_t {
uint32_t session_id;
uint32_t secure_virtual_adr;
- uint32_t mc_result;
};
struct mc_drv_rsp_map_bulk_mem_t {
@@ -210,7 +208,6 @@
struct mc_drv_rsp_unmap_bulk_mem_payload_t {
uint32_t response_id;
uint32_t session_id;
- uint32_t mc_result;
};
struct mc_drv_rsp_unmap_bulk_mem_t {
diff --git a/drivers/input/misc/stk3x1x.c b/drivers/input/misc/stk3x1x.c
new file mode 100644
index 0000000..eee9a28
--- /dev/null
+++ b/drivers/input/misc/stk3x1x.c
@@ -0,0 +1,2020 @@
+/*
+ * stk3x1x.c - Linux kernel modules for sensortek stk301x, stk321x and stk331x
+ * proximity/ambient light sensor
+ *
+ * Copyright (C) 2012 Lex Hsieh / sensortek <lex_hsieh@sitronix.com.tw> or
+ * <lex_hsieh@sensortek.com.tw>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/kdev_t.h>
+#include <linux/fs.h>
+#include <linux/input.h>
+#include <linux/workqueue.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/kthread.h>
+#include <linux/errno.h>
+#include <linux/wakelock.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/fs.h>
+#include <asm/uaccess.h>
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+#include "linux/stk3x1x.h"
+
+#define DRIVER_VERSION "3.4.4ts"
+
+/* Driver Settings */
+#define CONFIG_STK_PS_ALS_USE_CHANGE_THRESHOLD
+#ifdef CONFIG_STK_PS_ALS_USE_CHANGE_THRESHOLD
+#define STK_ALS_CHANGE_THD 20 /* The threshold to trigger ALS interrupt, unit: lux */
+#endif /* #ifdef CONFIG_STK_PS_ALS_USE_CHANGE_THRESHOLD */
+#define STK_INT_PS_MODE 1 /* 1, 2, or 3 */
+#define STK_POLL_PS
+#define STK_POLL_ALS /* ALS interrupt is valid only when STK_PS_INT_MODE = 1 or 4*/
+
+#define STK_DEBUG_PRINTF
+
+/* Define Register Map */
+#define STK_STATE_REG 0x00
+#define STK_PSCTRL_REG 0x01
+#define STK_ALSCTRL_REG 0x02
+#define STK_LEDCTRL_REG 0x03
+#define STK_INT_REG 0x04
+#define STK_WAIT_REG 0x05
+#define STK_THDH1_PS_REG 0x06
+#define STK_THDH2_PS_REG 0x07
+#define STK_THDL1_PS_REG 0x08
+#define STK_THDL2_PS_REG 0x09
+#define STK_THDH1_ALS_REG 0x0A
+#define STK_THDH2_ALS_REG 0x0B
+#define STK_THDL1_ALS_REG 0x0C
+#define STK_THDL2_ALS_REG 0x0D
+#define STK_FLAG_REG 0x10
+#define STK_DATA1_PS_REG 0x11
+#define STK_DATA2_PS_REG 0x12
+#define STK_DATA1_ALS_REG 0x13
+#define STK_DATA2_ALS_REG 0x14
+#define STK_DATA1_OFFSET_REG 0x15
+#define STK_DATA2_OFFSET_REG 0x16
+#define STK_DATA1_IR_REG 0x17
+#define STK_DATA2_IR_REG 0x18
+#define STK_PDT_ID_REG 0x3E
+#define STK_RSRVD_REG 0x3F
+#define STK_SW_RESET_REG 0x80
+
+
+/* Define state reg */
+#define STK_STATE_EN_IRS_SHIFT 7
+#define STK_STATE_EN_AK_SHIFT 6
+#define STK_STATE_EN_ASO_SHIFT 5
+#define STK_STATE_EN_IRO_SHIFT 4
+#define STK_STATE_EN_WAIT_SHIFT 2
+#define STK_STATE_EN_ALS_SHIFT 1
+#define STK_STATE_EN_PS_SHIFT 0
+
+#define STK_STATE_EN_IRS_MASK 0x80
+#define STK_STATE_EN_AK_MASK 0x40
+#define STK_STATE_EN_ASO_MASK 0x20
+#define STK_STATE_EN_IRO_MASK 0x10
+#define STK_STATE_EN_WAIT_MASK 0x04
+#define STK_STATE_EN_ALS_MASK 0x02
+#define STK_STATE_EN_PS_MASK 0x01
+
+/* Define PS ctrl reg */
+#define STK_PS_PRS_SHIFT 6
+#define STK_PS_GAIN_SHIFT 4
+#define STK_PS_IT_SHIFT 0
+
+#define STK_PS_PRS_MASK 0xC0
+#define STK_PS_GAIN_MASK 0x30
+#define STK_PS_IT_MASK 0x0F
+
+/* Define ALS ctrl reg */
+#define STK_ALS_PRS_SHIFT 6
+#define STK_ALS_GAIN_SHIFT 4
+#define STK_ALS_IT_SHIFT 0
+
+#define STK_ALS_PRS_MASK 0xC0
+#define STK_ALS_GAIN_MASK 0x30
+#define STK_ALS_IT_MASK 0x0F
+
+/* Define LED ctrl reg */
+#define STK_LED_IRDR_SHIFT 6
+#define STK_LED_DT_SHIFT 0
+
+#define STK_LED_IRDR_MASK 0xC0
+#define STK_LED_DT_MASK 0x3F
+
+/* Define interrupt reg */
+#define STK_INT_CTRL_SHIFT 7
+#define STK_INT_OUI_SHIFT 4
+#define STK_INT_ALS_SHIFT 3
+#define STK_INT_PS_SHIFT 0
+
+#define STK_INT_CTRL_MASK 0x80
+#define STK_INT_OUI_MASK 0x10
+#define STK_INT_ALS_MASK 0x08
+#define STK_INT_PS_MASK 0x07
+
+#define STK_INT_ALS 0x08
+
+/* Define flag reg */
+#define STK_FLG_ALSDR_SHIFT 7
+#define STK_FLG_PSDR_SHIFT 6
+#define STK_FLG_ALSINT_SHIFT 5
+#define STK_FLG_PSINT_SHIFT 4
+#define STK_FLG_OUI_SHIFT 2
+#define STK_FLG_IR_RDY_SHIFT 1
+#define STK_FLG_NF_SHIFT 0
+
+#define STK_FLG_ALSDR_MASK 0x80
+#define STK_FLG_PSDR_MASK 0x40
+#define STK_FLG_ALSINT_MASK 0x20
+#define STK_FLG_PSINT_MASK 0x10
+#define STK_FLG_OUI_MASK 0x04
+#define STK_FLG_IR_RDY_MASK 0x02
+#define STK_FLG_NF_MASK 0x01
+
+/* misc define */
+#define MIN_ALS_POLL_DELAY_NS 110000000
+
+#define DEVICE_NAME "stk_ps"
+#define ALS_NAME "lightsensor-level"
+#define PS_NAME "proximity"
+
+struct stk3x1x_data {
+ struct i2c_client *client;
+#if (!defined(STK_POLL_PS) || !defined(STK_POLL_ALS))
+ int32_t irq;
+ struct work_struct stk_work;
+ struct workqueue_struct *stk_wq;
+#endif
+ int int_pin;
+ uint8_t wait_reg;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ struct early_suspend stk_early_suspend;
+#endif
+ uint16_t ps_thd_h;
+ uint16_t ps_thd_l;
+ struct mutex io_lock;
+ struct input_dev *ps_input_dev;
+ int32_t ps_distance_last;
+ bool ps_enabled;
+ struct wake_lock ps_wakelock;
+ struct work_struct stk_ps_work;
+ struct workqueue_struct *stk_ps_wq;
+#ifdef STK_POLL_PS
+ struct wake_lock ps_nosuspend_wl;
+#endif
+ struct input_dev *als_input_dev;
+ int32_t als_lux_last;
+ uint32_t als_transmittance;
+ bool als_enabled;
+ struct hrtimer als_timer;
+ struct hrtimer ps_timer;
+ ktime_t als_poll_delay;
+ ktime_t ps_poll_delay;
+#ifdef STK_POLL_ALS
+ struct work_struct stk_als_work;
+ struct workqueue_struct *stk_als_wq;
+#endif
+};
+
+#if( !defined(CONFIG_STK_PS_ALS_USE_CHANGE_THRESHOLD))
+static uint32_t lux_threshold_table[] =
+{
+ 3,
+ 10,
+ 40,
+ 65,
+ 145,
+ 300,
+ 550,
+ 930,
+ 1250,
+ 1700,
+};
+
+#define LUX_THD_TABLE_SIZE (sizeof(lux_threshold_table)/sizeof(uint32_t)+1)
+static uint16_t code_threshold_table[LUX_THD_TABLE_SIZE+1];
+#endif
+
+static int32_t stk3x1x_enable_ps(struct stk3x1x_data *ps_data, uint8_t enable);
+static int32_t stk3x1x_enable_als(struct stk3x1x_data *ps_data, uint8_t enable);
+static int32_t stk3x1x_set_ps_thd_l(struct stk3x1x_data *ps_data, uint16_t thd_l);
+static int32_t stk3x1x_set_ps_thd_h(struct stk3x1x_data *ps_data, uint16_t thd_h);
+static int32_t stk3x1x_set_als_thd_l(struct stk3x1x_data *ps_data, uint16_t thd_l);
+static int32_t stk3x1x_set_als_thd_h(struct stk3x1x_data *ps_data, uint16_t thd_h);
+//static int32_t stk3x1x_set_ps_aoffset(struct stk3x1x_data *ps_data, uint16_t offset);
+
+inline uint32_t stk_alscode2lux(struct stk3x1x_data *ps_data, uint32_t alscode)
+{
+ alscode += ((alscode<<7)+(alscode<<3)+(alscode>>1));
+ alscode<<=3;
+ alscode/=ps_data->als_transmittance;
+ return alscode;
+}
+
+inline uint32_t stk_lux2alscode(struct stk3x1x_data *ps_data, uint32_t lux)
+{
+ lux*=ps_data->als_transmittance;
+ lux/=1100;
+ if (unlikely(lux>=(1<<16)))
+ lux = (1<<16) -1;
+ return lux;
+}
+
+#ifndef CONFIG_STK_PS_ALS_USE_CHANGE_THRESHOLD
+static void stk_init_code_threshold_table(struct stk3x1x_data *ps_data)
+{
+ uint32_t i,j;
+ uint32_t alscode;
+
+ code_threshold_table[0] = 0;
+#ifdef STK_DEBUG_PRINTF
+ printk(KERN_INFO "alscode[0]=%d\n",0);
+#endif
+ for (i=1,j=0;i<LUX_THD_TABLE_SIZE;i++,j++)
+ {
+ alscode = stk_lux2alscode(ps_data, lux_threshold_table[j]);
+ printk(KERN_INFO "alscode[%d]=%d\n",i,alscode);
+ code_threshold_table[i] = (uint16_t)(alscode);
+ }
+ code_threshold_table[i] = 0xffff;
+ printk(KERN_INFO "alscode[%d]=%d\n",i,alscode);
+}
+
+static uint32_t stk_get_lux_interval_index(uint16_t alscode)
+{
+ uint32_t i;
+ for (i=1;i<=LUX_THD_TABLE_SIZE;i++)
+ {
+ if ((alscode>=code_threshold_table[i-1])&&(alscode<code_threshold_table[i]))
+ {
+ return i;
+ }
+ }
+ return LUX_THD_TABLE_SIZE;
+}
+#else
+inline void stk_als_set_new_thd(struct stk3x1x_data *ps_data, uint16_t alscode)
+{
+ int32_t high_thd,low_thd;
+ high_thd = alscode + stk_lux2alscode(ps_data, STK_ALS_CHANGE_THD);
+ low_thd = alscode - stk_lux2alscode(ps_data, STK_ALS_CHANGE_THD);
+ if (high_thd >= (1<<16))
+ high_thd = (1<<16) -1;
+ if (low_thd <0)
+ low_thd = 0;
+ stk3x1x_set_als_thd_h(ps_data, (uint16_t)high_thd);
+ stk3x1x_set_als_thd_l(ps_data, (uint16_t)low_thd);
+}
+#endif // CONFIG_STK_PS_ALS_USE_CHANGE_THRESHOLD
+
+
+static int32_t stk3x1x_init_all_reg(struct stk3x1x_data *ps_data, struct stk3x1x_platform_data *plat_data)
+{
+ int32_t ret;
+ uint8_t w_reg;
+
+ w_reg = plat_data->state_reg;
+ ret = i2c_smbus_write_byte_data(ps_data->client, STK_STATE_REG, w_reg);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error\n", __func__);
+ return ret;
+ }
+
+ ps_data->ps_thd_h = plat_data->ps_thd_h;
+ ps_data->ps_thd_l = plat_data->ps_thd_l;
+
+ w_reg = plat_data->psctrl_reg;
+ ret = i2c_smbus_write_byte_data(ps_data->client, STK_PSCTRL_REG, w_reg);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error\n", __func__);
+ return ret;
+ }
+ w_reg = plat_data->alsctrl_reg;
+ ret = i2c_smbus_write_byte_data(ps_data->client, STK_ALSCTRL_REG, w_reg);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error\n", __func__);
+ return ret;
+ }
+ w_reg = plat_data->ledctrl_reg;
+ ret = i2c_smbus_write_byte_data(ps_data->client, STK_LEDCTRL_REG, w_reg);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error\n", __func__);
+ return ret;
+ }
+ ps_data->wait_reg = plat_data->wait_reg;
+
+ if(ps_data->wait_reg < 2)
+ {
+ printk(KERN_WARNING "%s: wait_reg should be larger than 2, force to write 2\n", __func__);
+ ps_data->wait_reg = 2;
+ }
+ else if (ps_data->wait_reg > 0xFF)
+ {
+ printk(KERN_WARNING "%s: wait_reg should be less than 0xFF, force to write 0xFF\n", __func__);
+ ps_data->wait_reg = 0xFF;
+ }
+ w_reg = plat_data->wait_reg;
+ ret = i2c_smbus_write_byte_data(ps_data->client, STK_WAIT_REG, w_reg);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error\n", __func__);
+ return ret;
+ }
+ stk3x1x_set_ps_thd_h(ps_data, ps_data->ps_thd_h);
+ stk3x1x_set_ps_thd_l(ps_data, ps_data->ps_thd_l);
+
+ w_reg = 0;
+#ifndef STK_POLL_PS
+ w_reg |= STK_INT_PS_MODE;
+#else
+ w_reg |= 0x01;
+#endif
+
+#if (!defined(STK_POLL_ALS) && (STK_INT_PS_MODE != 0x02) && (STK_INT_PS_MODE != 0x03))
+ w_reg |= STK_INT_ALS;
+#endif
+ ret = i2c_smbus_write_byte_data(ps_data->client, STK_INT_REG, w_reg);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error\n", __func__);
+ return ret;
+ }
+ return 0;
+}
+
+static int32_t stk3x1x_check_pid(struct stk3x1x_data *ps_data)
+{
+ int32_t err1, err2;
+
+ err1 = i2c_smbus_read_byte_data(ps_data->client,STK_PDT_ID_REG);
+ if (err1 < 0)
+ {
+ printk(KERN_ERR "%s: read i2c error, err=%d\n", __func__, err1);
+ return err1;
+ }
+
+ err2 = i2c_smbus_read_byte_data(ps_data->client,STK_RSRVD_REG);
+ if (err2 < 0)
+ {
+ printk(KERN_ERR "%s: read i2c error, err=%d\n", __func__, err2);
+ return -1;
+ }
+ printk(KERN_INFO "%s: PID=0x%x, RID=0x%x\n", __func__, err1, err2);
+ if(err2 == 0xC0)
+ printk(KERN_INFO "%s: RID=0xC0!!!!!!!!!!!!!\n", __func__);
+
+ return 0;
+}
+
+
+static int32_t stk3x1x_software_reset(struct stk3x1x_data *ps_data)
+{
+ int32_t r;
+ uint8_t w_reg;
+
+ w_reg = 0x7F;
+ r = i2c_smbus_write_byte_data(ps_data->client,STK_WAIT_REG,w_reg);
+ if (r<0)
+ {
+ printk(KERN_ERR "%s: software reset: write i2c error, ret=%d\n", __func__, r);
+ return r;
+ }
+ r = i2c_smbus_read_byte_data(ps_data->client,STK_WAIT_REG);
+ if (w_reg != r)
+ {
+ printk(KERN_ERR "%s: software reset: read-back value is not the same\n", __func__);
+ return -1;
+ }
+
+ r = i2c_smbus_write_byte_data(ps_data->client,STK_SW_RESET_REG,0);
+ if (r<0)
+ {
+ printk(KERN_ERR "%s: software reset: read error after reset\n", __func__);
+ return r;
+ }
+ msleep(1);
+ return 0;
+}
+
+
+static int32_t stk3x1x_set_als_thd_l(struct stk3x1x_data *ps_data, uint16_t thd_l)
+{
+ uint8_t temp;
+ uint8_t* pSrc = (uint8_t*)&thd_l;
+ temp = *pSrc;
+ *pSrc = *(pSrc+1);
+ *(pSrc+1) = temp;
+ return i2c_smbus_write_word_data(ps_data->client,STK_THDL1_ALS_REG,thd_l);
+}
+static int32_t stk3x1x_set_als_thd_h(struct stk3x1x_data *ps_data, uint16_t thd_h)
+{
+ uint8_t temp;
+ uint8_t* pSrc = (uint8_t*)&thd_h;
+ temp = *pSrc;
+ *pSrc = *(pSrc+1);
+ *(pSrc+1) = temp;
+ return i2c_smbus_write_word_data(ps_data->client,STK_THDH1_ALS_REG,thd_h);
+}
+
+static int32_t stk3x1x_set_ps_thd_l(struct stk3x1x_data *ps_data, uint16_t thd_l)
+{
+ uint8_t temp;
+ uint8_t* pSrc = (uint8_t*)&thd_l;
+
+ temp = *pSrc;
+ *pSrc = *(pSrc+1);
+ *(pSrc+1) = temp;
+ ps_data->ps_thd_l = thd_l;
+ return i2c_smbus_write_word_data(ps_data->client,STK_THDL1_PS_REG,thd_l);
+}
+
+static int32_t stk3x1x_set_ps_thd_h(struct stk3x1x_data *ps_data, uint16_t thd_h)
+{
+ uint8_t temp;
+ uint8_t* pSrc = (uint8_t*)&thd_h;
+
+ temp = *pSrc;
+ *pSrc = *(pSrc+1);
+ *(pSrc+1) = temp;
+ ps_data->ps_thd_h = thd_h;
+ return i2c_smbus_write_word_data(ps_data->client,STK_THDH1_PS_REG,thd_h);
+}
+
+/*
+static int32_t stk3x1x_set_ps_foffset(struct stk3x1x_data *ps_data, uint16_t offset)
+{
+ uint8_t temp;
+ uint8_t* pSrc = (uint8_t*)&offset;
+ temp = *pSrc;
+ *pSrc = *(pSrc+1);
+ *(pSrc+1) = temp;
+ return i2c_smbus_write_word_data(ps_data->client,STK_DATA1_OFFSET_REG,offset);
+}
+
+static int32_t stk3x1x_set_ps_aoffset(struct stk3x1x_data *ps_data, uint16_t offset)
+{
+ uint8_t temp;
+ uint8_t* pSrc = (uint8_t*)&offset;
+ int ret;
+ uint8_t w_state_reg;
+ uint8_t re_en;
+
+ ret = i2c_smbus_read_byte_data(ps_data->client, STK_STATE_REG);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error\n", __func__);
+ return ret;
+ }
+ re_en = (ret & STK_STATE_EN_AK_MASK) ? 1: 0;
+ if(re_en)
+ {
+ w_state_reg = (uint8_t)(ret & (~STK_STATE_EN_AK_MASK));
+ ret = i2c_smbus_write_byte_data(ps_data->client, STK_STATE_REG, w_state_reg);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error\n", __func__);
+ return ret;
+ }
+ msleep(1);
+ }
+ temp = *pSrc;
+ *pSrc = *(pSrc+1);
+ *(pSrc+1) = temp;
+ ret = i2c_smbus_write_word_data(ps_data->client,0x0E,offset);
+ if(!re_en)
+ return ret;
+
+ w_state_reg |= STK_STATE_EN_AK_MASK;
+ ret = i2c_smbus_write_byte_data(ps_data->client, STK_STATE_REG, w_state_reg);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error\n", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+*/
+
+static inline uint32_t stk3x1x_get_ps_reading(struct stk3x1x_data *ps_data)
+{
+ int32_t word_data, tmp_word_data;
+
+ tmp_word_data = i2c_smbus_read_word_data(ps_data->client,STK_DATA1_PS_REG);
+ if(tmp_word_data < 0)
+ {
+ printk(KERN_ERR "%s fail, err=0x%x", __func__, tmp_word_data);
+ return tmp_word_data;
+ }
+ word_data = ((tmp_word_data & 0xFF00) >> 8) | ((tmp_word_data & 0x00FF) << 8) ;
+ return word_data;
+}
+
+static int32_t stk3x1x_set_flag(struct stk3x1x_data *ps_data, uint8_t org_flag_reg, uint8_t clr)
+{
+ uint8_t w_flag;
+ w_flag = org_flag_reg | (STK_FLG_ALSINT_MASK | STK_FLG_PSINT_MASK | STK_FLG_OUI_MASK | STK_FLG_IR_RDY_MASK);
+ w_flag &= (~clr);
+ //printk(KERN_INFO "%s: org_flag_reg=0x%x, w_flag = 0x%x\n", __func__, org_flag_reg, w_flag);
+ return i2c_smbus_write_byte_data(ps_data->client,STK_FLAG_REG, w_flag);
+}
+
+static int32_t stk3x1x_get_flag(struct stk3x1x_data *ps_data)
+{
+ return i2c_smbus_read_byte_data(ps_data->client,STK_FLAG_REG);
+}
+
+static int32_t stk3x1x_enable_ps(struct stk3x1x_data *ps_data, uint8_t enable)
+{
+ int32_t ret;
+ uint8_t w_state_reg;
+ uint8_t curr_ps_enable;
+ curr_ps_enable = ps_data->ps_enabled?1:0;
+ if(curr_ps_enable == enable)
+ return 0;
+
+ ret = i2c_smbus_read_byte_data(ps_data->client, STK_STATE_REG);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error, ret=%d\n", __func__, ret);
+ return ret;
+ }
+ w_state_reg = ret;
+ w_state_reg &= ~(STK_STATE_EN_PS_MASK | STK_STATE_EN_WAIT_MASK | 0x60);
+ if(enable)
+ {
+ w_state_reg |= STK_STATE_EN_PS_MASK;
+ if(!(ps_data->als_enabled))
+ w_state_reg |= STK_STATE_EN_WAIT_MASK;
+ }
+ ret = i2c_smbus_write_byte_data(ps_data->client, STK_STATE_REG, w_state_reg);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error, ret=%d\n", __func__, ret);
+ return ret;
+ }
+
+ if(enable)
+ {
+#ifdef STK_POLL_PS
+ hrtimer_start(&ps_data->ps_timer, ps_data->ps_poll_delay, HRTIMER_MODE_REL);
+ ps_data->ps_distance_last = -1;
+#endif
+ ps_data->ps_enabled = true;
+#ifndef STK_POLL_PS
+#ifndef STK_POLL_ALS
+ if(!(ps_data->als_enabled))
+#endif /* #ifndef STK_POLL_ALS */
+ enable_irq(ps_data->irq);
+ msleep(1);
+ ret = stk3x1x_get_flag(ps_data);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: read i2c error, ret=%d\n", __func__, ret);
+ return ret;
+ }
+
+ near_far_state = ret & STK_FLG_NF_MASK;
+ ps_data->ps_distance_last = near_far_state;
+ input_report_abs(ps_data->ps_input_dev, ABS_DISTANCE, near_far_state);
+ input_sync(ps_data->ps_input_dev);
+ wake_lock_timeout(&ps_data->ps_wakelock, 3*HZ);
+ reading = stk3x1x_get_ps_reading(ps_data);
+ printk(KERN_INFO "%s: ps input event=%d, ps code = %d\n",__func__, near_far_state, reading);
+#endif /* #ifndef STK_POLL_PS */
+ }
+ else
+ {
+#ifdef STK_POLL_PS
+ hrtimer_cancel(&ps_data->ps_timer);
+#else
+#ifndef STK_POLL_ALS
+ if(!(ps_data->als_enabled))
+#endif
+ disable_irq(ps_data->irq);
+#endif
+ ps_data->ps_enabled = false;
+ }
+ return ret;
+}
+
+static int32_t stk3x1x_enable_als(struct stk3x1x_data *ps_data, uint8_t enable)
+{
+ int32_t ret;
+ uint8_t w_state_reg;
+ uint8_t curr_als_enable = (ps_data->als_enabled)?1:0;
+
+ if(curr_als_enable == enable)
+ return 0;
+
+#ifndef STK_POLL_ALS
+ if (enable)
+ {
+ stk3x1x_set_als_thd_h(ps_data, 0x0000);
+ stk3x1x_set_als_thd_l(ps_data, 0xFFFF);
+ }
+#endif
+ ret = i2c_smbus_read_byte_data(ps_data->client, STK_STATE_REG);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error\n", __func__);
+ return ret;
+ }
+ w_state_reg = (uint8_t)(ret & (~(STK_STATE_EN_ALS_MASK | STK_STATE_EN_WAIT_MASK)));
+ if(enable)
+ w_state_reg |= STK_STATE_EN_ALS_MASK;
+ else if (ps_data->ps_enabled)
+ w_state_reg |= STK_STATE_EN_WAIT_MASK;
+
+ ret = i2c_smbus_write_byte_data(ps_data->client, STK_STATE_REG, w_state_reg);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error\n", __func__);
+ return ret;
+ }
+
+ if (enable)
+ {
+ ps_data->als_enabled = true;
+#ifdef STK_POLL_ALS
+ hrtimer_start(&ps_data->als_timer, ps_data->als_poll_delay, HRTIMER_MODE_REL);
+#else
+#ifndef STK_POLL_PS
+ if(!(ps_data->ps_enabled))
+#endif
+ enable_irq(ps_data->irq);
+#endif
+ }
+ else
+ {
+ ps_data->als_enabled = false;
+#ifdef STK_POLL_ALS
+ hrtimer_cancel(&ps_data->als_timer);
+#else
+#ifndef STK_POLL_PS
+ if(!(ps_data->ps_enabled))
+#endif
+ disable_irq(ps_data->irq);
+#endif
+ }
+ return ret;
+}
+
+static inline int32_t stk3x1x_get_als_reading(struct stk3x1x_data *ps_data)
+{
+ int32_t word_data, tmp_word_data;
+ tmp_word_data = i2c_smbus_read_word_data(ps_data->client, STK_DATA1_ALS_REG);
+ if(tmp_word_data < 0)
+ {
+ printk(KERN_ERR "%s fail, err=0x%x", __func__, tmp_word_data);
+ return tmp_word_data;
+ }
+ word_data = ((tmp_word_data & 0xFF00) >> 8) | ((tmp_word_data & 0x00FF) << 8) ;
+ return word_data;
+}
+
+static int32_t stk3x1x_get_ir_reading(struct stk3x1x_data *ps_data)
+{
+ int32_t word_data, tmp_word_data;
+ int32_t ret;
+ uint8_t w_reg, retry = 0;
+
+ if(ps_data->ps_enabled)
+ {
+ stk3x1x_enable_ps(ps_data, 0);
+ ps_data->ps_enabled = true;
+ }
+ ret = i2c_smbus_read_byte_data(ps_data->client, STK_STATE_REG);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error\n", __func__);
+ return ret;
+ }
+ w_reg = (uint8_t)(ret & (~STK_STATE_EN_IRS_MASK));
+ w_reg |= STK_STATE_EN_IRS_MASK;
+
+ ret = i2c_smbus_write_byte_data(ps_data->client, STK_STATE_REG, w_reg);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error\n", __func__);
+ return ret;
+ }
+ msleep(100);
+
+ do
+ {
+ msleep(50);
+ ret = stk3x1x_get_flag(ps_data);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error\n", __func__);
+ return ret;
+ }
+ retry++;
+ }while(retry < 5 && ((ret&STK_FLG_IR_RDY_MASK) == 0));
+
+ if(retry == 5)
+ {
+ printk(KERN_ERR "%s: ir data is not ready for 300ms\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = stk3x1x_get_flag(ps_data);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error\n", __func__);
+ return ret;
+ }
+
+ ret = stk3x1x_set_flag(ps_data, ret, STK_FLG_IR_RDY_MASK);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error\n", __func__);
+ return ret;
+ }
+
+ tmp_word_data = i2c_smbus_read_word_data(ps_data->client, STK_DATA1_IR_REG);
+ if(tmp_word_data < 0)
+ {
+ printk(KERN_ERR "%s fail, err=0x%x", __func__, tmp_word_data);
+ return tmp_word_data;
+ }
+ word_data = ((tmp_word_data & 0xFF00) >> 8) | ((tmp_word_data & 0x00FF) << 8) ;
+
+ if(ps_data->ps_enabled)
+ stk3x1x_enable_ps(ps_data, 1);
+ return word_data;
+}
+
+
+static ssize_t stk_als_code_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ int32_t reading;
+
+ reading = stk3x1x_get_als_reading(ps_data);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", reading);
+}
+
+
+static ssize_t stk_als_enable_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ int32_t enable, ret;
+
+ mutex_lock(&ps_data->io_lock);
+ enable = (ps_data->als_enabled)?1:0;
+ mutex_unlock(&ps_data->io_lock);
+ ret = i2c_smbus_read_byte_data(ps_data->client,STK_STATE_REG);
+ ret = (ret & STK_STATE_EN_ALS_MASK)?1:0;
+
+ if(enable != ret)
+ printk(KERN_ERR "%s: driver and sensor mismatch! driver_enable=0x%x, sensor_enable=%x\n", __func__, enable, ret);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t stk_als_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ uint8_t en;
+ if (sysfs_streq(buf, "1"))
+ en = 1;
+ else if (sysfs_streq(buf, "0"))
+ en = 0;
+ else
+ {
+ printk(KERN_ERR "%s, invalid value %d\n", __func__, *buf);
+ return -EINVAL;
+ }
+ printk(KERN_INFO "%s: Enable ALS : %d\n", __func__, en);
+ mutex_lock(&ps_data->io_lock);
+ stk3x1x_enable_als(ps_data, en);
+ mutex_unlock(&ps_data->io_lock);
+ return size;
+}
+
+static ssize_t stk_als_lux_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ int32_t als_reading;
+ uint32_t als_lux;
+ als_reading = stk3x1x_get_als_reading(ps_data);
+ mutex_lock(&ps_data->io_lock);
+ als_lux = stk_alscode2lux(ps_data, als_reading);
+ mutex_unlock(&ps_data->io_lock);
+ return scnprintf(buf, PAGE_SIZE, "%d lux\n", als_lux);
+}
+
+static ssize_t stk_als_lux_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ unsigned long value = 0;
+ int ret;
+ ret = strict_strtoul(buf, 16, &value);
+ if(ret < 0)
+ {
+ printk(KERN_ERR "%s:strict_strtoul failed, ret=0x%x\n", __func__, ret);
+ return ret;
+ }
+ mutex_lock(&ps_data->io_lock);
+ ps_data->als_lux_last = value;
+ input_report_abs(ps_data->als_input_dev, ABS_MISC, value);
+ input_sync(ps_data->als_input_dev);
+ mutex_unlock(&ps_data->io_lock);
+ printk(KERN_INFO "%s: als input event %ld lux\n",__func__, value);
+
+ return size;
+}
+
+
+static ssize_t stk_als_transmittance_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ int32_t transmittance;
+ mutex_lock(&ps_data->io_lock);
+ transmittance = ps_data->als_transmittance;
+ mutex_unlock(&ps_data->io_lock);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", transmittance);
+}
+
+
+static ssize_t stk_als_transmittance_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ unsigned long value = 0;
+ int ret;
+ ret = strict_strtoul(buf, 10, &value);
+ if(ret < 0)
+ {
+ printk(KERN_ERR "%s:strict_strtoul failed, ret=0x%x\n", __func__, ret);
+ return ret;
+ }
+ mutex_lock(&ps_data->io_lock);
+ ps_data->als_transmittance = value;
+ mutex_unlock(&ps_data->io_lock);
+ return size;
+}
+
+static ssize_t stk_als_delay_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ return scnprintf(buf, PAGE_SIZE, "%lld\n", ktime_to_ns(ps_data->als_poll_delay));
+}
+
+
+static ssize_t stk_als_delay_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size)
+{
+ uint64_t value = 0;
+ int ret;
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ ret = strict_strtoull(buf, 10, &value);
+ if(ret < 0)
+ {
+ printk(KERN_ERR "%s:strict_strtoull failed, ret=0x%x\n", __func__, ret);
+ return ret;
+ }
+#ifdef STK_DEBUG_PRINTF
+ printk(KERN_INFO "%s: set als poll delay=%lld\n", __func__, value);
+#endif
+ if(value < MIN_ALS_POLL_DELAY_NS)
+ {
+ printk(KERN_ERR "%s: delay is too small\n", __func__);
+ value = MIN_ALS_POLL_DELAY_NS;
+ }
+ mutex_lock(&ps_data->io_lock);
+ if(value != ktime_to_ns(ps_data->als_poll_delay))
+ ps_data->als_poll_delay = ns_to_ktime(value);
+ mutex_unlock(&ps_data->io_lock);
+ return size;
+}
+
+static ssize_t stk_als_ir_code_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ int32_t reading;
+ reading = stk3x1x_get_ir_reading(ps_data);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", reading);
+}
+
+static ssize_t stk_ps_code_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ uint32_t reading;
+ reading = stk3x1x_get_ps_reading(ps_data);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", reading);
+}
+
+static ssize_t stk_ps_enable_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ int32_t enable, ret;
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+
+ mutex_lock(&ps_data->io_lock);
+ enable = (ps_data->ps_enabled)?1:0;
+ mutex_unlock(&ps_data->io_lock);
+ ret = i2c_smbus_read_byte_data(ps_data->client,STK_STATE_REG);
+ ret = (ret & STK_STATE_EN_PS_MASK)?1:0;
+
+ if(enable != ret)
+ printk(KERN_ERR "%s: driver and sensor mismatch! driver_enable=0x%x, sensor_enable=%x\n", __func__, enable, ret);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t stk_ps_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ uint8_t en;
+ if (sysfs_streq(buf, "1"))
+ en = 1;
+ else if (sysfs_streq(buf, "0"))
+ en = 0;
+ else
+ {
+ printk(KERN_ERR "%s, invalid value %d\n", __func__, *buf);
+ return -EINVAL;
+ }
+ printk(KERN_INFO "%s: Enable PS : %d\n", __func__, en);
+ mutex_lock(&ps_data->io_lock);
+ stk3x1x_enable_ps(ps_data, en);
+ mutex_unlock(&ps_data->io_lock);
+ return size;
+}
+
+static ssize_t stk_ps_enable_aso_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ int32_t ret;
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+
+ ret = i2c_smbus_read_byte_data(ps_data->client,STK_STATE_REG);
+ ret = (ret & STK_STATE_EN_ASO_MASK)?1:0;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t stk_ps_enable_aso_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ uint8_t en;
+ int32_t ret;
+ uint8_t w_state_reg;
+
+ if (sysfs_streq(buf, "1"))
+ en = 1;
+ else if (sysfs_streq(buf, "0"))
+ en = 0;
+ else
+ {
+ printk(KERN_ERR "%s, invalid value %d\n", __func__, *buf);
+ return -EINVAL;
+ }
+ printk(KERN_INFO "%s: Enable PS ASO : %d\n", __func__, en);
+
+ ret = i2c_smbus_read_byte_data(ps_data->client, STK_STATE_REG);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error\n", __func__);
+ return ret;
+ }
+ w_state_reg = (uint8_t)(ret & (~STK_STATE_EN_ASO_MASK));
+ if(en)
+ w_state_reg |= STK_STATE_EN_ASO_MASK;
+
+ ret = i2c_smbus_write_byte_data(ps_data->client, STK_STATE_REG, w_state_reg);
+ if (ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error\n", __func__);
+ return ret;
+ }
+
+ return size;
+}
+
+
+static ssize_t stk_ps_offset_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ int32_t word_data, tmp_word_data;
+
+ tmp_word_data = i2c_smbus_read_word_data(ps_data->client, STK_DATA1_OFFSET_REG);
+ if(tmp_word_data < 0)
+ {
+ printk(KERN_ERR "%s fail, err=0x%x", __func__, tmp_word_data);
+ return tmp_word_data;
+ }
+ word_data = ((tmp_word_data & 0xFF00) >> 8) | ((tmp_word_data & 0x00FF) << 8) ;
+ return scnprintf(buf, PAGE_SIZE, "%d\n", word_data);
+}
+
+static ssize_t stk_ps_offset_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ unsigned long value = 0;
+ int ret;
+ uint16_t offset;
+
+ ret = strict_strtoul(buf, 10, &value);
+ if(ret < 0)
+ {
+ printk(KERN_ERR "%s:strict_strtoul failed, ret=0x%x\n", __func__, ret);
+ return ret;
+ }
+ if(value > 65535)
+ {
+ printk(KERN_ERR "%s: invalid value, offset=%ld\n", __func__, value);
+ return -EINVAL;
+ }
+
+ offset = (uint16_t) ((value&0x00FF) << 8) | ((value&0xFF00) >>8);
+ ret = i2c_smbus_write_word_data(ps_data->client,STK_DATA1_OFFSET_REG,offset);
+ if(ret < 0)
+ {
+ printk(KERN_ERR "%s: write i2c error\n", __func__);
+ return ret;
+ }
+ return size;
+}
+
+
+static ssize_t stk_ps_distance_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ int32_t dist=1, ret;
+
+ mutex_lock(&ps_data->io_lock);
+ ret = stk3x1x_get_flag(ps_data);
+ if(ret < 0)
+ {
+ printk(KERN_ERR "%s: stk3x1x_get_flag failed, ret=0x%x\n", __func__, ret);
+ return ret;
+ }
+ dist = (ret & STK_FLG_NF_MASK)?1:0;
+
+ ps_data->ps_distance_last = dist;
+ input_report_abs(ps_data->ps_input_dev, ABS_DISTANCE, dist);
+ input_sync(ps_data->ps_input_dev);
+ mutex_unlock(&ps_data->io_lock);
+ wake_lock_timeout(&ps_data->ps_wakelock, 3*HZ);
+ printk(KERN_INFO "%s: ps input event %d cm\n",__func__, dist);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", dist);
+}
+
+
+static ssize_t stk_ps_distance_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ unsigned long value = 0;
+ int ret;
+ ret = strict_strtoul(buf, 10, &value);
+ if(ret < 0)
+ {
+ printk(KERN_ERR "%s:strict_strtoul failed, ret=0x%x\n", __func__, ret);
+ return ret;
+ }
+ mutex_lock(&ps_data->io_lock);
+ ps_data->ps_distance_last = value;
+ input_report_abs(ps_data->ps_input_dev, ABS_DISTANCE, value);
+ input_sync(ps_data->ps_input_dev);
+ mutex_unlock(&ps_data->io_lock);
+ wake_lock_timeout(&ps_data->ps_wakelock, 3*HZ);
+ printk(KERN_INFO "%s: ps input event %ld cm\n",__func__, value);
+ return size;
+}
+
+
+static ssize_t stk_ps_code_thd_l_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ int32_t ps_thd_l1_reg, ps_thd_l2_reg;
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ mutex_lock(&ps_data->io_lock);
+ ps_thd_l1_reg = i2c_smbus_read_byte_data(ps_data->client,STK_THDL1_PS_REG);
+ if(ps_thd_l1_reg < 0)
+ {
+ printk(KERN_ERR "%s fail, err=0x%x", __func__, ps_thd_l1_reg);
+ return -EINVAL;
+ }
+ ps_thd_l2_reg = i2c_smbus_read_byte_data(ps_data->client,STK_THDL2_PS_REG);
+ if(ps_thd_l2_reg < 0)
+ {
+ printk(KERN_ERR "%s fail, err=0x%x", __func__, ps_thd_l2_reg);
+ return -EINVAL;
+ }
+ mutex_unlock(&ps_data->io_lock);
+ ps_thd_l1_reg = ps_thd_l1_reg<<8 | ps_thd_l2_reg;
+ return scnprintf(buf, PAGE_SIZE, "%d\n", ps_thd_l1_reg);
+}
+
+
+static ssize_t stk_ps_code_thd_l_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ unsigned long value = 0;
+ int ret;
+ ret = strict_strtoul(buf, 10, &value);
+ if(ret < 0)
+ {
+ printk(KERN_ERR "%s:strict_strtoul failed, ret=0x%x\n", __func__, ret);
+ return ret;
+ }
+ mutex_lock(&ps_data->io_lock);
+ stk3x1x_set_ps_thd_l(ps_data, value);
+ mutex_unlock(&ps_data->io_lock);
+ return size;
+}
+
+static ssize_t stk_ps_code_thd_h_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ int32_t ps_thd_h1_reg, ps_thd_h2_reg;
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ mutex_lock(&ps_data->io_lock);
+ ps_thd_h1_reg = i2c_smbus_read_byte_data(ps_data->client,STK_THDH1_PS_REG);
+ if(ps_thd_h1_reg < 0)
+ {
+ printk(KERN_ERR "%s fail, err=0x%x", __func__, ps_thd_h1_reg);
+ return -EINVAL;
+ }
+ ps_thd_h2_reg = i2c_smbus_read_byte_data(ps_data->client,STK_THDH2_PS_REG);
+ if(ps_thd_h2_reg < 0)
+ {
+ printk(KERN_ERR "%s fail, err=0x%x", __func__, ps_thd_h2_reg);
+ return -EINVAL;
+ }
+ mutex_unlock(&ps_data->io_lock);
+ ps_thd_h1_reg = ps_thd_h1_reg<<8 | ps_thd_h2_reg;
+ return scnprintf(buf, PAGE_SIZE, "%d\n", ps_thd_h1_reg);
+}
+
+
+static ssize_t stk_ps_code_thd_h_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ unsigned long value = 0;
+ int ret;
+ ret = strict_strtoul(buf, 10, &value);
+ if(ret < 0)
+ {
+ printk(KERN_ERR "%s:strict_strtoul failed, ret=0x%x\n", __func__, ret);
+ return ret;
+ }
+ mutex_lock(&ps_data->io_lock);
+ stk3x1x_set_ps_thd_h(ps_data, value);
+ mutex_unlock(&ps_data->io_lock);
+ return size;
+}
+
+#if 0
+static ssize_t stk_als_lux_thd_l_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ int32_t als_thd_l0_reg,als_thd_l1_reg;
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ uint32_t als_lux;
+
+ mutex_lock(&ps_data->io_lock);
+ als_thd_l0_reg = i2c_smbus_read_byte_data(ps_data->client,STK_THDL1_ALS_REG);
+ als_thd_l1_reg = i2c_smbus_read_byte_data(ps_data->client,STK_THDL2_ALS_REG);
+ if(als_thd_l0_reg < 0)
+ {
+ printk(KERN_ERR "%s fail, err=0x%x", __func__, als_thd_l0_reg);
+ return -EINVAL;
+ }
+ if(als_thd_l1_reg < 0)
+ {
+ printk(KERN_ERR "%s fail, err=0x%x", __func__, als_thd_l1_reg);
+ return -EINVAL;
+ }
+ als_thd_l0_reg|=(als_thd_l1_reg<<8);
+ als_lux = stk_alscode2lux(ps_data, als_thd_l0_reg);
+ mutex_unlock(&ps_data->io_lock);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", als_lux);
+}
+
+
+static ssize_t stk_als_lux_thd_l_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ unsigned long value = 0;
+ int ret;
+ ret = strict_strtoul(buf, 10, &value);
+ if(ret < 0)
+ {
+ printk(KERN_ERR "%s:strict_strtoul failed, ret=0x%x\n", __func__, ret);
+ return ret;
+ }
+ mutex_lock(&ps_data->io_lock);
+ value = stk_lux2alscode(ps_data, value);
+ stk3x1x_set_als_thd_l(ps_data, value);
+ mutex_unlock(&ps_data->io_lock);
+ return size;
+}
+
+static ssize_t stk_als_lux_thd_h_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ int32_t als_thd_h0_reg,als_thd_h1_reg;
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ uint32_t als_lux;
+
+ mutex_lock(&ps_data->io_lock);
+ als_thd_h0_reg = i2c_smbus_read_byte_data(ps_data->client,STK_THDH1_ALS_REG);
+ als_thd_h1_reg = i2c_smbus_read_byte_data(ps_data->client,STK_THDH2_ALS_REG);
+ if(als_thd_h0_reg < 0)
+ {
+ printk(KERN_ERR "%s fail, err=0x%x", __func__, als_thd_h0_reg);
+ return -EINVAL;
+ }
+ if(als_thd_h1_reg < 0)
+ {
+ printk(KERN_ERR "%s fail, err=0x%x", __func__, als_thd_h1_reg);
+ return -EINVAL;
+ }
+ als_thd_h0_reg|=(als_thd_h1_reg<<8);
+ als_lux = stk_alscode2lux(ps_data, als_thd_h0_reg);
+ mutex_unlock(&ps_data->io_lock);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", als_lux);
+}
+
+
+static ssize_t stk_als_lux_thd_h_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ unsigned long value = 0;
+ int ret;
+ ret = strict_strtoul(buf, 10, &value);
+ if(ret < 0)
+ {
+ printk(KERN_ERR "%s:strict_strtoul failed, ret=0x%x\n", __func__, ret);
+ return ret;
+ }
+ mutex_lock(&ps_data->io_lock);
+ value = stk_lux2alscode(ps_data, value);
+ stk3x1x_set_als_thd_h(ps_data, value);
+ mutex_unlock(&ps_data->io_lock);
+ return size;
+}
+#endif
+
+
+static ssize_t stk_all_reg_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ int32_t ps_reg[27];
+ uint8_t cnt;
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+ mutex_lock(&ps_data->io_lock);
+ for(cnt=0;cnt<25;cnt++)
+ {
+ ps_reg[cnt] = i2c_smbus_read_byte_data(ps_data->client, (cnt));
+ if(ps_reg[cnt] < 0)
+ {
+ mutex_unlock(&ps_data->io_lock);
+ printk(KERN_ERR "stk_all_reg_show:i2c_smbus_read_byte_data fail, ret=%d", ps_reg[cnt]);
+ return -EINVAL;
+ }
+ else
+ {
+ printk(KERN_INFO "reg[0x%2X]=0x%2X\n", cnt, ps_reg[cnt]);
+ }
+ }
+ ps_reg[cnt] = i2c_smbus_read_byte_data(ps_data->client, STK_PDT_ID_REG);
+ if(ps_reg[cnt] < 0)
+ {
+ mutex_unlock(&ps_data->io_lock);
+ printk( KERN_ERR "all_reg_show:i2c_smbus_read_byte_data fail, ret=%d", ps_reg[cnt]);
+ return -EINVAL;
+ }
+ printk( KERN_INFO "reg[0x%x]=0x%2X\n", STK_PDT_ID_REG, ps_reg[cnt]);
+ cnt++;
+ ps_reg[cnt] = i2c_smbus_read_byte_data(ps_data->client, STK_RSRVD_REG);
+ if(ps_reg[cnt] < 0)
+ {
+ mutex_unlock(&ps_data->io_lock);
+ printk( KERN_ERR "all_reg_show:i2c_smbus_read_byte_data fail, ret=%d", ps_reg[cnt]);
+ return -EINVAL;
+ }
+ printk( KERN_INFO "reg[0x%x]=0x%2X\n", STK_RSRVD_REG, ps_reg[cnt]);
+ mutex_unlock(&ps_data->io_lock);
+
+ return scnprintf(buf, PAGE_SIZE, "%2X %2X %2X %2X %2X,%2X %2X %2X %2X %2X,%2X %2X %2X %2X %2X,%2X %2X %2X %2X %2X,%2X %2X %2X %2X %2X,%2X %2X\n",
+ ps_reg[0], ps_reg[1], ps_reg[2], ps_reg[3], ps_reg[4], ps_reg[5], ps_reg[6], ps_reg[7], ps_reg[8],
+ ps_reg[9], ps_reg[10], ps_reg[11], ps_reg[12], ps_reg[13], ps_reg[14], ps_reg[15], ps_reg[16], ps_reg[17],
+ ps_reg[18], ps_reg[19], ps_reg[20], ps_reg[21], ps_reg[22], ps_reg[23], ps_reg[24], ps_reg[25], ps_reg[26]);
+}
+
+static ssize_t stk_recv_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return 0;
+}
+
+
+static ssize_t stk_recv_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size)
+{
+ unsigned long value = 0;
+ int ret;
+ int32_t recv_data;
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+
+ if((ret = strict_strtoul(buf, 16, &value)) < 0)
+ {
+ printk(KERN_ERR "%s:strict_strtoul failed, ret=0x%x\n", __func__, ret);
+ return ret;
+ }
+ recv_data = i2c_smbus_read_byte_data(ps_data->client,value);
+ printk("%s: reg 0x%x=0x%x\n", __func__, (int)value, recv_data);
+ return size;
+}
+
+
+static ssize_t stk_send_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return 0;
+}
+
+
+static ssize_t stk_send_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size)
+{
+ int addr, cmd;
+ u8 addr_u8, cmd_u8;
+ int32_t ret, i;
+ char *token[10];
+ struct stk3x1x_data *ps_data = dev_get_drvdata(dev);
+
+ for (i = 0; i < 2; i++)
+ token[i] = strsep((char **)&buf, " ");
+ if((ret = strict_strtoul(token[0], 16, (unsigned long *)&(addr))) < 0)
+ {
+ printk(KERN_ERR "%s:strict_strtoul failed, ret=0x%x\n", __func__, ret);
+ return ret;
+ }
+ if((ret = strict_strtoul(token[1], 16, (unsigned long *)&(cmd))) < 0)
+ {
+ printk(KERN_ERR "%s:strict_strtoul failed, ret=0x%x\n", __func__, ret);
+ return ret;
+ }
+ printk(KERN_INFO "%s: write reg 0x%x=0x%x\n", __func__, addr, cmd);
+
+ addr_u8 = (u8) addr;
+ cmd_u8 = (u8) cmd;
+ //mutex_lock(&ps_data->io_lock);
+ ret = i2c_smbus_write_byte_data(ps_data->client,addr_u8,cmd_u8);
+ //mutex_unlock(&ps_data->io_lock);
+ if (0 != ret)
+ {
+ printk(KERN_ERR "%s: i2c_smbus_write_byte_data fail\n", __func__);
+ return ret;
+ }
+
+ return size;
+}
+
+
+static struct device_attribute als_enable_attribute = __ATTR(enable,0664,stk_als_enable_show,stk_als_enable_store);
+static struct device_attribute als_lux_attribute = __ATTR(lux,0664,stk_als_lux_show,stk_als_lux_store);
+static struct device_attribute als_code_attribute = __ATTR(code, 0444, stk_als_code_show, NULL);
+static struct device_attribute als_transmittance_attribute = __ATTR(transmittance,0664,stk_als_transmittance_show,stk_als_transmittance_store);
+static struct device_attribute als_poll_delay_attribute = __ATTR(delay,0664,stk_als_delay_show,stk_als_delay_store);
+static struct device_attribute als_ir_code_attribute = __ATTR(ircode,0444,stk_als_ir_code_show,NULL);
+
+
+static struct attribute *stk_als_attrs [] =
+{
+ &als_enable_attribute.attr,
+ &als_lux_attribute.attr,
+ &als_code_attribute.attr,
+ &als_transmittance_attribute.attr,
+ &als_poll_delay_attribute.attr,
+ &als_ir_code_attribute.attr,
+ NULL
+};
+
+static struct attribute_group stk_als_attribute_group = {
+ .name = "driver",
+ .attrs = stk_als_attrs,
+};
+
+
+static struct device_attribute ps_enable_attribute = __ATTR(enable,0664,stk_ps_enable_show,stk_ps_enable_store);
+static struct device_attribute ps_enable_aso_attribute = __ATTR(enableaso,0664,stk_ps_enable_aso_show,stk_ps_enable_aso_store);
+static struct device_attribute ps_distance_attribute = __ATTR(distance,0664,stk_ps_distance_show, stk_ps_distance_store);
+static struct device_attribute ps_offset_attribute = __ATTR(offset,0664,stk_ps_offset_show, stk_ps_offset_store);
+static struct device_attribute ps_code_attribute = __ATTR(code, 0444, stk_ps_code_show, NULL);
+static struct device_attribute ps_code_thd_l_attribute = __ATTR(codethdl,0664,stk_ps_code_thd_l_show,stk_ps_code_thd_l_store);
+static struct device_attribute ps_code_thd_h_attribute = __ATTR(codethdh,0664,stk_ps_code_thd_h_show,stk_ps_code_thd_h_store);
+static struct device_attribute recv_attribute = __ATTR(recv,0664,stk_recv_show,stk_recv_store);
+static struct device_attribute send_attribute = __ATTR(send,0664,stk_send_show, stk_send_store);
+static struct device_attribute all_reg_attribute = __ATTR(allreg, 0444, stk_all_reg_show, NULL);
+
+static struct attribute *stk_ps_attrs [] =
+{
+ &ps_enable_attribute.attr,
+ &ps_enable_aso_attribute.attr,
+ &ps_distance_attribute.attr,
+ &ps_offset_attribute.attr,
+ &ps_code_attribute.attr,
+ &ps_code_thd_l_attribute.attr,
+ &ps_code_thd_h_attribute.attr,
+ &recv_attribute.attr,
+ &send_attribute.attr,
+ &all_reg_attribute.attr,
+ NULL
+};
+
+static struct attribute_group stk_ps_attribute_group = {
+ .name = "driver",
+ .attrs = stk_ps_attrs,
+};
+
+#ifdef STK_POLL_ALS
+static enum hrtimer_restart stk_als_timer_func(struct hrtimer *timer)
+{
+ struct stk3x1x_data *ps_data = container_of(timer, struct stk3x1x_data, als_timer);
+ queue_work(ps_data->stk_als_wq, &ps_data->stk_als_work);
+ hrtimer_forward_now(&ps_data->als_timer, ps_data->als_poll_delay);
+ return HRTIMER_RESTART;
+}
+
+static void stk_als_work_func(struct work_struct *work)
+{
+ struct stk3x1x_data *ps_data = container_of(work, struct stk3x1x_data, stk_als_work);
+ int32_t reading;
+
+ mutex_lock(&ps_data->io_lock);
+ reading = stk3x1x_get_als_reading(ps_data);
+ if(reading < 0)
+ return;
+ ps_data->als_lux_last = stk_alscode2lux(ps_data, reading);
+ input_report_abs(ps_data->als_input_dev, ABS_MISC, ps_data->als_lux_last);
+ input_sync(ps_data->als_input_dev);
+ mutex_unlock(&ps_data->io_lock);
+ //printk(KERN_INFO "%s: als input event %d lux\n",__func__, ps_data->als_lux_last);
+}
+#endif
+
+static enum hrtimer_restart stk_ps_timer_func(struct hrtimer *timer)
+{
+ struct stk3x1x_data *ps_data = container_of(timer, struct stk3x1x_data, ps_timer);
+ queue_work(ps_data->stk_ps_wq, &ps_data->stk_ps_work);
+#ifdef STK_POLL_PS
+ hrtimer_forward_now(&ps_data->ps_timer, ps_data->ps_poll_delay);
+ return HRTIMER_RESTART;
+#else
+ hrtimer_cancel(&ps_data->ps_timer);
+ return HRTIMER_NORESTART;
+#endif
+}
+
+static void stk_ps_work_func(struct work_struct *work)
+{
+ struct stk3x1x_data *ps_data = container_of(work, struct stk3x1x_data, stk_ps_work);
+ uint32_t reading;
+ int32_t near_far_state;
+ uint8_t org_flag_reg;
+ int32_t ret;
+ uint8_t disable_flag = 0;
+ mutex_lock(&ps_data->io_lock);
+
+ org_flag_reg = stk3x1x_get_flag(ps_data);
+ if(org_flag_reg < 0)
+ {
+ printk(KERN_ERR "%s: get_status_reg fail, ret=%d", __func__, org_flag_reg);
+ goto err_i2c_rw;
+ }
+ near_far_state = (org_flag_reg & STK_FLG_NF_MASK)?1:0;
+ reading = stk3x1x_get_ps_reading(ps_data);
+ if(ps_data->ps_distance_last != near_far_state)
+ {
+ ps_data->ps_distance_last = near_far_state;
+ input_report_abs(ps_data->ps_input_dev, ABS_DISTANCE, near_far_state);
+ input_sync(ps_data->ps_input_dev);
+ wake_lock_timeout(&ps_data->ps_wakelock, 3*HZ);
+#ifdef STK_DEBUG_PRINTF
+ printk(KERN_INFO "%s: ps input event %d cm, ps code = %d\n",__func__, near_far_state, reading);
+#endif
+ }
+ ret = stk3x1x_set_flag(ps_data, org_flag_reg, disable_flag);
+ if(ret < 0)
+ {
+ printk(KERN_ERR "%s:stk3x1x_set_flag fail, ret=%d\n", __func__, ret);
+ goto err_i2c_rw;
+ }
+
+ mutex_unlock(&ps_data->io_lock);
+ return;
+
+err_i2c_rw:
+ mutex_unlock(&ps_data->io_lock);
+ msleep(30);
+ return;
+}
+
+
+#if (!defined(STK_POLL_PS) || !defined(STK_POLL_ALS))
+static void stk_work_func(struct work_struct *work)
+{
+ uint32_t reading;
+#if ((STK_INT_PS_MODE != 0x03) && (STK_INT_PS_MODE != 0x02))
+ int32_t ret;
+ uint8_t disable_flag = 0;
+ uint8_t org_flag_reg;
+#endif /* #if ((STK_INT_PS_MODE != 0x03) && (STK_INT_PS_MODE != 0x02)) */
+
+#ifndef CONFIG_STK_PS_ALS_USE_CHANGE_THRESHOLD
+ uint32_t nLuxIndex;
+#endif
+ struct stk3x1x_data *ps_data = container_of(work, struct stk3x1x_data, stk_work);
+ int32_t near_far_state;
+
+ mutex_lock(&ps_data->io_lock);
+
+#if (STK_INT_PS_MODE == 0x03)
+ near_far_state = gpio_get_value(ps_data->int_pin);
+#elif (STK_INT_PS_MODE == 0x02)
+ near_far_state = !(gpio_get_value(ps_data->int_pin));
+#endif
+
+#if ((STK_INT_PS_MODE == 0x03) || (STK_INT_PS_MODE == 0x02))
+ ps_data->ps_distance_last = near_far_state;
+ input_report_abs(ps_data->ps_input_dev, ABS_DISTANCE, near_far_state);
+ input_sync(ps_data->ps_input_dev);
+ wake_lock_timeout(&ps_data->ps_wakelock, 3*HZ);
+ reading = stk3x1x_get_ps_reading(ps_data);
+#ifdef STK_DEBUG_PRINTF
+ printk(KERN_INFO "%s: ps input event %d cm, ps code = %d\n",__func__, near_far_state, reading);
+#endif
+#else
+ /* mode 0x01 or 0x04 */
+ org_flag_reg = stk3x1x_get_flag(ps_data);
+ if(org_flag_reg < 0)
+ {
+ printk(KERN_ERR "%s: get_status_reg fail, org_flag_reg=%d", __func__, org_flag_reg);
+ goto err_i2c_rw;
+ }
+
+ if (org_flag_reg & STK_FLG_ALSINT_MASK)
+ {
+ disable_flag |= STK_FLG_ALSINT_MASK;
+ reading = stk3x1x_get_als_reading(ps_data);
+ if(reading < 0)
+ {
+ printk(KERN_ERR "%s: stk3x1x_get_als_reading fail, ret=%d", __func__, reading);
+ goto err_i2c_rw;
+ }
+#ifndef CONFIG_STK_PS_ALS_USE_CHANGE_THRESHOLD
+ nLuxIndex = stk_get_lux_interval_index(reading);
+ stk3x1x_set_als_thd_h(ps_data, code_threshold_table[nLuxIndex]);
+ stk3x1x_set_als_thd_l(ps_data, code_threshold_table[nLuxIndex-1]);
+#else
+ stk_als_set_new_thd(ps_data, reading);
+#endif //CONFIG_STK_PS_ALS_USE_CHANGE_THRESHOLD
+ ps_data->als_lux_last = stk_alscode2lux(ps_data, reading);
+ input_report_abs(ps_data->als_input_dev, ABS_MISC, ps_data->als_lux_last);
+ input_sync(ps_data->als_input_dev);
+#ifdef STK_DEBUG_PRINTF
+ printk(KERN_INFO "%s: als input event %d lux\n",__func__, ps_data->als_lux_last);
+#endif
+ }
+ if (org_flag_reg & STK_FLG_PSINT_MASK)
+ {
+ disable_flag |= STK_FLG_PSINT_MASK;
+ near_far_state = (org_flag_reg & STK_FLG_NF_MASK)?1:0;
+
+ ps_data->ps_distance_last = near_far_state;
+ input_report_abs(ps_data->ps_input_dev, ABS_DISTANCE, near_far_state);
+ input_sync(ps_data->ps_input_dev);
+ wake_lock_timeout(&ps_data->ps_wakelock, 3*HZ);
+ reading = stk3x1x_get_ps_reading(ps_data);
+#ifdef STK_DEBUG_PRINTF
+ printk(KERN_INFO "%s: ps input event=%d, ps code = %d\n",__func__, near_far_state, reading);
+#endif
+ }
+
+ ret = stk3x1x_set_flag(ps_data, org_flag_reg, disable_flag);
+ if(ret < 0)
+ {
+ printk(KERN_ERR "%s:reset_int_flag fail, ret=%d\n", __func__, ret);
+ goto err_i2c_rw;
+ }
+#endif
+
+ msleep(1);
+ enable_irq(ps_data->irq);
+ mutex_unlock(&ps_data->io_lock);
+ return;
+
+err_i2c_rw:
+ mutex_unlock(&ps_data->io_lock);
+ msleep(30);
+ enable_irq(ps_data->irq);
+ return;
+}
+#endif
+
+#if (!defined(STK_POLL_PS) || !defined(STK_POLL_ALS))
+static irqreturn_t stk_oss_irq_handler(int irq, void *data)
+{
+ struct stk3x1x_data *pData = data;
+ disable_irq_nosync(irq);
+ queue_work(pData->stk_wq,&pData->stk_work);
+ return IRQ_HANDLED;
+}
+#endif /* #if (!defined(STK_POLL_PS) || !defined(STK_POLL_ALS)) */
+static int32_t stk3x1x_init_all_setting(struct i2c_client *client, struct stk3x1x_platform_data *plat_data)
+{
+ int32_t ret;
+ struct stk3x1x_data *ps_data = i2c_get_clientdata(client);
+
+ mutex_lock(&ps_data->io_lock);
+ ps_data->als_enabled = false;
+ ps_data->ps_enabled = false;
+ mutex_unlock(&ps_data->io_lock);
+
+ ret = stk3x1x_software_reset(ps_data);
+ if(ret < 0)
+ return ret;
+
+ stk3x1x_check_pid(ps_data);
+ if(ret < 0)
+ return ret;
+
+ ret = stk3x1x_init_all_reg(ps_data, plat_data);
+ if(ret < 0)
+ return ret;
+#ifndef CONFIG_STK_PS_ALS_USE_CHANGE_THRESHOLD
+ stk_init_code_threshold_table(ps_data);
+#endif
+ return 0;
+}
+
+#if (!defined(STK_POLL_PS) || !defined(STK_POLL_ALS))
+static int stk3x1x_setup_irq(struct i2c_client *client)
+{
+ int irq, err = -EIO;
+ struct stk3x1x_data *ps_data = i2c_get_clientdata(client);
+
+ irq = gpio_to_irq(ps_data->int_pin);
+#ifdef STK_DEBUG_PRINTF
+ printk(KERN_INFO "%s: int pin #=%d, irq=%d\n",__func__, ps_data->int_pin, irq);
+#endif
+ if (irq <= 0)
+ {
+ printk(KERN_ERR "irq number is not specified, irq # = %d, int pin=%d\n",irq, ps_data->int_pin);
+ return irq;
+ }
+ ps_data->irq = irq;
+ err = gpio_request(ps_data->int_pin,"stk-int");
+ if(err < 0)
+ {
+ printk(KERN_ERR "%s: gpio_request, err=%d", __func__, err);
+ return err;
+ }
+ err = gpio_direction_input(ps_data->int_pin);
+ if(err < 0)
+ {
+ printk(KERN_ERR "%s: gpio_direction_input, err=%d", __func__, err);
+ return err;
+ }
+#if ((STK_INT_PS_MODE == 0x03) || (STK_INT_PS_MODE == 0x02))
+ err = request_any_context_irq(irq, stk_oss_irq_handler, IRQF_TRIGGER_FALLING|IRQF_TRIGGER_RISING, DEVICE_NAME, ps_data);
+#else
+ err = request_any_context_irq(irq, stk_oss_irq_handler, IRQF_TRIGGER_LOW, DEVICE_NAME, ps_data);
+#endif
+ if (err < 0)
+ {
+ printk(KERN_WARNING "%s: request_any_context_irq(%d) failed for (%d)\n", __func__, irq, err);
+ goto err_request_any_context_irq;
+ }
+ disable_irq(irq);
+
+ return 0;
+err_request_any_context_irq:
+ gpio_free(ps_data->int_pin);
+ return err;
+}
+#endif
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void stk3x1x_early_suspend(struct early_suspend *h)
+{
+ struct stk3x1x_data *ps_data = container_of(h, struct stk3x1x_data, stk_early_suspend);
+#ifndef STK_POLL_PS
+ int err;
+#endif
+
+ printk(KERN_INFO "%s", __func__);
+ mutex_lock(&ps_data->io_lock);
+ if(ps_data->als_enabled)
+ {
+ stk3x1x_enable_als(ps_data, 0);
+ ps_data->als_enabled = true;
+ }
+ if(ps_data->ps_enabled)
+ {
+#ifdef STK_POLL_PS
+ wake_lock(&ps_data->ps_nosuspend_wl);
+#else
+ err = enable_irq_wake(ps_data->irq);
+ if (err)
+ printk(KERN_WARNING "%s: set_irq_wake(%d) failed, err=(%d)\n", __func__, ps_data->irq, err);
+#endif
+ }
+ mutex_unlock(&ps_data->io_lock);
+ return;
+}
+
+static void stk3x1x_late_resume(struct early_suspend *h)
+{
+ struct stk3x1x_data *ps_data = container_of(h, struct stk3x1x_data, stk_early_suspend);
+#ifndef STK_POLL_PS
+ int err;
+#endif
+
+ printk(KERN_INFO "%s", __func__);
+ mutex_lock(&ps_data->io_lock);
+ if(ps_data->als_enabled)
+ stk3x1x_enable_als(ps_data, 1);
+
+ if(ps_data->ps_enabled)
+ {
+#ifdef STK_POLL_PS
+ wake_lock(&ps_data->ps_nosuspend_wl);
+#else
+ err = disable_irq_wake(ps_data->irq);
+ if (err)
+ printk(KERN_WARNING "%s: disable_irq_wake(%d) failed, err=(%d)\n", __func__, ps_data->irq, err);
+#endif
+ }
+ mutex_unlock(&ps_data->io_lock);
+ return;
+}
+#endif //#ifdef CONFIG_HAS_EARLYSUSPEND
+
+
+static int stk3x1x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int err = -ENODEV;
+ struct stk3x1x_data *ps_data;
+ struct stk3x1x_platform_data *plat_data;
+ printk(KERN_INFO "%s: driver version = %s\n", __func__, DRIVER_VERSION);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ {
+ printk(KERN_ERR "%s: No Support for I2C_FUNC_SMBUS_BYTE_DATA\n", __func__);
+ return -ENODEV;
+ }
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
+ {
+ printk(KERN_ERR "%s: No Support for I2C_FUNC_SMBUS_WORD_DATA\n", __func__);
+ return -ENODEV;
+ }
+
+ ps_data = kzalloc(sizeof(struct stk3x1x_data),GFP_KERNEL);
+ if(!ps_data)
+ {
+ printk(KERN_ERR "%s: failed to allocate stk3x1x_data\n", __func__);
+ return -ENOMEM;
+ }
+ ps_data->client = client;
+ i2c_set_clientdata(client,ps_data);
+ mutex_init(&ps_data->io_lock);
+ wake_lock_init(&ps_data->ps_wakelock,WAKE_LOCK_SUSPEND, "stk_input_wakelock");
+
+#ifdef STK_POLL_PS
+ wake_lock_init(&ps_data->ps_nosuspend_wl,WAKE_LOCK_SUSPEND, "stk_nosuspend_wakelock");
+#endif
+ if(client->dev.platform_data != NULL)
+ {
+ plat_data = client->dev.platform_data;
+ ps_data->als_transmittance = plat_data->transmittance;
+ ps_data->int_pin = plat_data->int_pin;
+ if(ps_data->als_transmittance == 0)
+ {
+ printk(KERN_ERR "%s: Please set als_transmittance in platform data\n", __func__);
+ goto err_als_input_allocate;
+ }
+ }
+ else
+ {
+ printk(KERN_ERR "%s: no stk3x1x platform data!\n", __func__);
+ goto err_als_input_allocate;
+ }
+
+ ps_data->als_input_dev = input_allocate_device();
+ if (ps_data->als_input_dev==NULL)
+ {
+ printk(KERN_ERR "%s: could not allocate als device\n", __func__);
+ err = -ENOMEM;
+ goto err_als_input_allocate;
+ }
+ ps_data->ps_input_dev = input_allocate_device();
+ if (ps_data->ps_input_dev==NULL)
+ {
+ printk(KERN_ERR "%s: could not allocate ps device\n", __func__);
+ err = -ENOMEM;
+ goto err_ps_input_allocate;
+ }
+ ps_data->als_input_dev->name = ALS_NAME;
+ ps_data->ps_input_dev->name = PS_NAME;
+ set_bit(EV_ABS, ps_data->als_input_dev->evbit);
+ set_bit(EV_ABS, ps_data->ps_input_dev->evbit);
+ input_set_abs_params(ps_data->als_input_dev, ABS_MISC, 0, stk_alscode2lux(ps_data, (1<<16)-1), 0, 0);
+ input_set_abs_params(ps_data->ps_input_dev, ABS_DISTANCE, 0,1, 0, 0);
+ err = input_register_device(ps_data->als_input_dev);
+ if (err<0)
+ {
+ printk(KERN_ERR "%s: can not register als input device\n", __func__);
+ goto err_als_input_register;
+ }
+ err = input_register_device(ps_data->ps_input_dev);
+ if (err<0)
+ {
+ printk(KERN_ERR "%s: can not register ps input device\n", __func__);
+ goto err_ps_input_register;
+ }
+
+ err = sysfs_create_group(&ps_data->als_input_dev->dev.kobj, &stk_als_attribute_group);
+ if (err < 0)
+ {
+ printk(KERN_ERR "%s:could not create sysfs group for als\n", __func__);
+ goto err_als_sysfs_create_group;
+ }
+ err = sysfs_create_group(&ps_data->ps_input_dev->dev.kobj, &stk_ps_attribute_group);
+ if (err < 0)
+ {
+ printk(KERN_ERR "%s:could not create sysfs group for ps\n", __func__);
+ goto err_ps_sysfs_create_group;
+ }
+ input_set_drvdata(ps_data->als_input_dev, ps_data);
+ input_set_drvdata(ps_data->ps_input_dev, ps_data);
+
+#ifdef STK_POLL_ALS
+ ps_data->stk_als_wq = create_singlethread_workqueue("stk_als_wq");
+ INIT_WORK(&ps_data->stk_als_work, stk_als_work_func);
+ hrtimer_init(&ps_data->als_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ ps_data->als_poll_delay = ns_to_ktime(110 * NSEC_PER_MSEC);
+ ps_data->als_timer.function = stk_als_timer_func;
+#endif
+
+ ps_data->stk_ps_wq = create_singlethread_workqueue("stk_ps_wq");
+ INIT_WORK(&ps_data->stk_ps_work, stk_ps_work_func);
+ hrtimer_init(&ps_data->ps_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ ps_data->ps_poll_delay = ns_to_ktime(110 * NSEC_PER_MSEC);
+ ps_data->ps_timer.function = stk_ps_timer_func;
+#if (!defined(STK_POLL_ALS) || !defined(STK_POLL_PS))
+ ps_data->stk_wq = create_singlethread_workqueue("stk_wq");
+ INIT_WORK(&ps_data->stk_work, stk_work_func);
+ err = stk3x1x_setup_irq(client);
+ if(err < 0)
+ goto err_stk3x1x_setup_irq;
+#endif
+
+ err = stk3x1x_init_all_setting(client, plat_data);
+ if(err < 0)
+ goto err_init_all_setting;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ ps_data->stk_early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+ ps_data->stk_early_suspend.suspend = stk3x1x_early_suspend;
+ ps_data->stk_early_suspend.resume = stk3x1x_late_resume;
+ register_early_suspend(&ps_data->stk_early_suspend);
+#endif
+ printk(KERN_INFO "%s: probe successfully", __func__);
+ return 0;
+
+err_init_all_setting:
+#ifndef STK_POLL_PS
+ free_irq(ps_data->irq, ps_data);
+ gpio_free(plat_data->int_pin);
+#endif
+#if (!defined(STK_POLL_ALS) || !defined(STK_POLL_PS))
+err_stk3x1x_setup_irq:
+#endif
+#ifdef STK_POLL_ALS
+ hrtimer_try_to_cancel(&ps_data->als_timer);
+ destroy_workqueue(ps_data->stk_als_wq);
+#endif
+ destroy_workqueue(ps_data->stk_ps_wq);
+#if (!defined(STK_POLL_ALS) || !defined(STK_POLL_PS))
+ destroy_workqueue(ps_data->stk_wq);
+#endif
+ sysfs_remove_group(&ps_data->ps_input_dev->dev.kobj, &stk_ps_attribute_group);
+err_ps_sysfs_create_group:
+ sysfs_remove_group(&ps_data->als_input_dev->dev.kobj, &stk_als_attribute_group);
+err_als_sysfs_create_group:
+ input_unregister_device(ps_data->ps_input_dev);
+err_ps_input_register:
+ input_unregister_device(ps_data->als_input_dev);
+err_als_input_register:
+ input_free_device(ps_data->ps_input_dev);
+err_ps_input_allocate:
+ input_free_device(ps_data->als_input_dev);
+err_als_input_allocate:
+#ifdef STK_POLL_PS
+ wake_lock_destroy(&ps_data->ps_nosuspend_wl);
+#endif
+ wake_lock_destroy(&ps_data->ps_wakelock);
+ mutex_destroy(&ps_data->io_lock);
+ kfree(ps_data);
+ return err;
+}
+
+
+static int stk3x1x_remove(struct i2c_client *client)
+{
+ struct stk3x1x_data *ps_data = i2c_get_clientdata(client);
+#ifndef STK_POLL_PS
+ free_irq(ps_data->irq, ps_data);
+ gpio_free(ps_data->int_pin);
+#endif
+#ifdef STK_POLL_ALS
+ hrtimer_try_to_cancel(&ps_data->als_timer);
+ destroy_workqueue(ps_data->stk_als_wq);
+#endif
+ destroy_workqueue(ps_data->stk_ps_wq);
+#if (!defined(STK_POLL_ALS) || !defined(STK_POLL_PS))
+ destroy_workqueue(ps_data->stk_wq);
+#endif
+ sysfs_remove_group(&ps_data->ps_input_dev->dev.kobj, &stk_ps_attribute_group);
+ sysfs_remove_group(&ps_data->als_input_dev->dev.kobj, &stk_als_attribute_group);
+ input_unregister_device(ps_data->ps_input_dev);
+ input_unregister_device(ps_data->als_input_dev);
+ input_free_device(ps_data->ps_input_dev);
+ input_free_device(ps_data->als_input_dev);
+#ifdef STK_POLL_PS
+ wake_lock_destroy(&ps_data->ps_nosuspend_wl);
+#endif
+ wake_lock_destroy(&ps_data->ps_wakelock);
+ mutex_destroy(&ps_data->io_lock);
+ kfree(ps_data);
+
+ return 0;
+}
+
+static const struct i2c_device_id stk_ps_id[] =
+{
+ { "stk_ps", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, stk_ps_id);
+
+static struct i2c_driver stk_ps_driver =
+{
+ .driver = {
+ .name = DEVICE_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = stk3x1x_probe,
+ .remove = stk3x1x_remove,
+ .id_table = stk_ps_id,
+};
+
+
+static int __init stk3x1x_init(void)
+{
+ int ret;
+ ret = i2c_add_driver(&stk_ps_driver);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void __exit stk3x1x_exit(void)
+{
+ i2c_del_driver(&stk_ps_driver);
+}
+
+module_init(stk3x1x_init);
+module_exit(stk3x1x_exit);
+MODULE_AUTHOR("Lex Hsieh <lex_hsieh@sitronix.com.tw>");
+MODULE_DESCRIPTION("Sensortek stk3x1x Proximity Sensor driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/iommu/msm_iommu-v1.c b/drivers/iommu/msm_iommu-v1.c
index b9c4cae..53c7c30 100644
--- a/drivers/iommu/msm_iommu-v1.c
+++ b/drivers/iommu/msm_iommu-v1.c
@@ -692,7 +692,6 @@
if (ret)
goto fail;
- ret = __flush_iotlb_va(domain, va);
fail:
mutex_unlock(&msm_iommu_lock);
return ret;
@@ -742,7 +741,6 @@
if (ret)
goto fail;
- __flush_iotlb(domain);
fail:
mutex_unlock(&msm_iommu_lock);
return ret;
diff --git a/drivers/iommu/msm_iommu_sec.c b/drivers/iommu/msm_iommu_sec.c
index 474efdf..78fffb2 100644
--- a/drivers/iommu/msm_iommu_sec.c
+++ b/drivers/iommu/msm_iommu_sec.c
@@ -371,7 +371,7 @@
map.info.ctx_id = ctx_drvdata->num;
map.info.va = va;
map.info.size = len;
- map.flags = IOMMU_TLBINVAL_FLAG;
+ map.flags = 0;
flush_va = &pa;
flush_pa = virt_to_phys(&pa);
@@ -421,7 +421,7 @@
map.info.ctx_id = ctx_drvdata->num;
map.info.va = va;
map.info.size = len;
- map.flags = IOMMU_TLBINVAL_FLAG;
+ map.flags = 0;
if (sg->length == len) {
pa = get_phys_addr(sg);
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c
index 692a04e..9d606a1 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb/dvb-core/dvb_demux.c
@@ -128,6 +128,20 @@
DMX_IDX_H264_NON_IDR_START
};
+static const struct dvb_dmx_video_patterns h264_non_access_unit_del = {
+ {0x00, 0x00, 0x01, 0x09},
+ {0xFF, 0xFF, 0xFF, 0x1F},
+ 4,
+ DMX_IDX_H264_ACCESS_UNIT_DEL
+};
+
+static const struct dvb_dmx_video_patterns h264_non_sei = {
+ {0x00, 0x00, 0x01, 0x06},
+ {0xFF, 0xFF, 0xFF, 0x1F},
+ 4,
+ DMX_IDX_H264_SEI
+};
+
static const struct dvb_dmx_video_patterns vc1_seq_hdr = {
{0x00, 0x00, 0x01, 0x0F},
{0xFF, 0xFF, 0xFF, 0xFF},
@@ -1791,6 +1805,12 @@
case DMX_IDX_H264_NON_IDR_START:
return &h264_non_idr;
+ case DMX_IDX_H264_ACCESS_UNIT_DEL:
+ return &h264_non_access_unit_del;
+
+ case DMX_IDX_H264_SEI:
+ return &h264_non_sei;
+
case DMX_IDX_VC1_SEQ_HEADER:
return &vc1_seq_hdr;
@@ -1913,6 +1933,20 @@
}
if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types & DMX_IDX_H264_ACCESS_UNIT_DEL)) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_H264_ACCESS_UNIT_DEL);
+ feed->pattern_num++;
+ }
+
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types & DMX_IDX_H264_SEI)) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_H264_SEI);
+ feed->pattern_num++;
+ }
+
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
(feed->idx_params.types &
(DMX_IDX_VC1_SEQ_HEADER |
DMX_IDX_VC1_FIRST_SEQ_FRAME_START |
diff --git a/drivers/media/platform/msm/camera_v2/camera/camera.c b/drivers/media/platform/msm/camera_v2/camera/camera.c
index d3618c0..c70e151 100644
--- a/drivers/media/platform/msm/camera_v2/camera/camera.c
+++ b/drivers/media/platform/msm/camera_v2/camera/camera.c
@@ -328,6 +328,10 @@
pr_debug("%s: num planes :%c\n", __func__,
user_fmt->num_planes);
+ /*num_planes need to bound checked, otherwise for loop
+ can execute forever */
+ if (WARN_ON(user_fmt->num_planes > VIDEO_MAX_PLANES))
+ return -EINVAL;
for (i = 0; i < user_fmt->num_planes; i++)
pr_debug("%s: plane size[%d]\n", __func__,
user_fmt->plane_sizes[i]);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
index f1f4c17..8c42ed2 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
@@ -90,7 +90,8 @@
void (*enable_wm) (struct vfe_device *vfe_dev,
uint8_t wm_idx, uint8_t enable);
void (*cfg_io_format) (struct vfe_device *vfe_dev,
- struct msm_vfe_axi_stream *stream_info);
+ enum msm_vfe_axi_stream_src stream_src,
+ uint32_t io_format);
void (*cfg_framedrop) (struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info);
void (*clear_framedrop) (struct vfe_device *vfe_dev,
@@ -289,6 +290,7 @@
enum msm_vfe_inputmux input_mux;
uint32_t width;
long pixel_clock;
+ uint32_t input_format;
};
enum msm_wm_ub_cfg_type {
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
index 07a66e6..aac973e 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
@@ -144,7 +144,7 @@
/* CGC_OVERRIDE */
msm_camera_io_w(0x07FFFFFF, vfe_dev->vfe_base + 0xC);
/* BUS_CFG */
- msm_camera_io_w(0x00000001, vfe_dev->vfe_base + 0x3C);
+ msm_camera_io_w(0x00000009, vfe_dev->vfe_base + 0x3C);
msm_camera_io_w(0x01000025, vfe_dev->vfe_base + 0x1C);
msm_camera_io_w_mb(0x1CFFFFFF, vfe_dev->vfe_base + 0x20);
msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x24);
@@ -481,11 +481,11 @@
}
static void msm_vfe32_cfg_io_format(struct vfe_device *vfe_dev,
- struct msm_vfe_axi_stream *stream_info)
+ enum msm_vfe_axi_stream_src stream_src, uint32_t io_format)
{
int bpp, bpp_reg = 0;
uint32_t io_format_reg;
- bpp = msm_isp_get_bit_per_pixel(stream_info->output_format);
+ bpp = msm_isp_get_bit_per_pixel(io_format);
switch (bpp) {
case 8:
@@ -499,7 +499,9 @@
break;
}
io_format_reg = msm_camera_io_r(vfe_dev->vfe_base + 0x6F8);
- switch (stream_info->stream_src) {
+ switch (stream_src) {
+ case PIX_ENCODER:
+ case PIX_VIEWFINDER:
case CAMIF_RAW:
io_format_reg &= 0xFFFFCFFF;
io_format_reg |= bpp_reg << 12;
@@ -508,8 +510,6 @@
io_format_reg &= 0xFFFFFFC8;
io_format_reg |= bpp_reg << 4;
break;
- case PIX_ENCODER:
- case PIX_VIEWFINDER:
case RDI_INTF_0:
case RDI_INTF_1:
case RDI_INTF_2:
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
index 2db25a6..84b95f1 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
@@ -688,11 +688,11 @@
}
static void msm_vfe40_cfg_io_format(struct vfe_device *vfe_dev,
- struct msm_vfe_axi_stream *stream_info)
+ enum msm_vfe_axi_stream_src stream_src, uint32_t io_format)
{
int bpp, bpp_reg = 0;
uint32_t io_format_reg;
- bpp = msm_isp_get_bit_per_pixel(stream_info->output_format);
+ bpp = msm_isp_get_bit_per_pixel(io_format);
switch (bpp) {
case 8:
@@ -706,7 +706,9 @@
break;
}
io_format_reg = msm_camera_io_r(vfe_dev->vfe_base + 0x54);
- switch (stream_info->stream_src) {
+ switch (stream_src) {
+ case PIX_ENCODER:
+ case PIX_VIEWFINDER:
case CAMIF_RAW:
io_format_reg &= 0xFFFFCFFF;
io_format_reg |= bpp_reg << 12;
@@ -715,8 +717,6 @@
io_format_reg &= 0xFFFFFFC8;
io_format_reg |= bpp_reg << 4;
break;
- case PIX_ENCODER:
- case PIX_VIEWFINDER:
case RDI_INTF_0:
case RDI_INTF_1:
case RDI_INTF_2:
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index d3138ed..5b7658d 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -474,6 +474,7 @@
int msm_isp_request_axi_stream(struct vfe_device *vfe_dev, void *arg)
{
int rc = 0, i;
+ uint32_t io_format = 0;
struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd = arg;
struct msm_vfe_axi_stream *stream_info;
@@ -497,10 +498,20 @@
stream_info[HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)];
msm_isp_axi_reserve_wm(&vfe_dev->axi_data, stream_info);
- if (stream_cfg_cmd->stream_src == CAMIF_RAW ||
- stream_cfg_cmd->stream_src == IDEAL_RAW)
- vfe_dev->hw_info->vfe_ops.axi_ops.
- cfg_io_format(vfe_dev, stream_info);
+ if (stream_info->stream_src < RDI_INTF_0) {
+ io_format = vfe_dev->axi_data.src_info[VFE_PIX_0].input_format;
+ if (stream_info->stream_src == CAMIF_RAW ||
+ stream_info->stream_src == IDEAL_RAW) {
+ if (stream_info->stream_src == CAMIF_RAW &&
+ io_format != stream_info->output_format)
+ pr_warn("%s: Overriding input format\n",
+ __func__);
+
+ io_format = stream_info->output_format;
+ }
+ vfe_dev->hw_info->vfe_ops.axi_ops.cfg_io_format(
+ vfe_dev, stream_info->stream_src, io_format);
+ }
msm_isp_calculate_framedrop(&vfe_dev->axi_data, stream_cfg_cmd);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
index d857a14..33f63b3 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
@@ -150,6 +150,12 @@
stats_idx = vfe_dev->hw_info->vfe_ops.stats_ops.
get_stats_idx(stream_req_cmd->stats_type);
+ if ((stats_idx > MSM_ISP_STATS_MAX) ||
+ (stats_idx == -EINVAL)) {
+ pr_err("%s: Stats idx Error\n", __func__);
+ return rc;
+ }
+
stream_info = &stats_data->stream_info[stats_idx];
if (stream_info->state != STATS_AVALIABLE) {
pr_err("%s: Stats already requested\n", __func__);
@@ -188,7 +194,7 @@
int msm_isp_request_stats_stream(struct vfe_device *vfe_dev, void *arg)
{
- int rc = 0;
+ int rc = -1;
struct msm_vfe_stats_stream_request_cmd *stream_req_cmd = arg;
struct msm_vfe_stats_stream *stream_info = NULL;
struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
@@ -202,6 +208,11 @@
}
stats_idx = STATS_IDX(stream_req_cmd->stream_handle);
+ if (stats_idx > MSM_ISP_STATS_MAX) {
+ pr_err("%s: Stats idx Error\n", __func__);
+ return rc;
+ }
+
stream_info = &stats_data->stream_info[stats_idx];
framedrop_period = msm_isp_get_framedrop_period(
@@ -228,9 +239,14 @@
struct msm_vfe_stats_stream_release_cmd *stream_release_cmd = arg;
struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
int stats_idx = STATS_IDX(stream_release_cmd->stream_handle);
- struct msm_vfe_stats_stream *stream_info =
- &stats_data->stream_info[stats_idx];
+ struct msm_vfe_stats_stream *stream_info = NULL;
+ if (stats_idx > MSM_ISP_STATS_MAX) {
+ pr_err("%s: Stats idx Error\n", __func__);
+ return rc;
+ }
+
+ stream_info = &stats_data->stream_info[stats_idx];
if (stream_info->state == STATS_AVALIABLE) {
pr_err("%s: stream already release\n", __func__);
return rc;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
index 3806213..590b636 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
@@ -273,6 +273,9 @@
return rc;
}
+ vfe_dev->axi_data.src_info[VFE_PIX_0].input_format =
+ input_cfg->d.pix_cfg.input_format;
+
vfe_dev->hw_info->vfe_ops.core_ops.cfg_camif(
vfe_dev, &input_cfg->d.pix_cfg);
return rc;
diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
index a4eb274..822c0c8 100644
--- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
+++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
@@ -1219,6 +1219,14 @@
goto ERROR1;
}
+ if ((new_frame->msg_len == 0) ||
+ (new_frame->msg_len > MSM_CPP_MAX_FRAME_LENGTH)) {
+ pr_err("%s:%d: Invalid frame len:%d\n", __func__,
+ __LINE__, new_frame->msg_len);
+ rc = -EINVAL;
+ goto ERROR1;
+ }
+
cpp_frame_msg = kzalloc(sizeof(uint32_t)*new_frame->msg_len,
GFP_KERNEL);
if (!cpp_frame_msg) {
@@ -1380,7 +1388,10 @@
pr_err("ioctl_ptr is null\n");
return -EINVAL;
}
-
+ if (cpp_dev == NULL) {
+ pr_err("cpp_dev is null\n");
+ return -EINVAL;
+ }
mutex_lock(&cpp_dev->mutex);
CPP_DBG("E cmd: %d\n", cmd);
switch (cmd) {
@@ -1396,8 +1407,16 @@
case VIDIOC_MSM_CPP_LOAD_FIRMWARE: {
if (cpp_dev->is_firmware_loaded == 0) {
- kfree(cpp_dev->fw_name_bin);
- cpp_dev->fw_name_bin = NULL;
+ if (cpp_dev->fw_name_bin != NULL) {
+ kfree(cpp_dev->fw_name_bin);
+ cpp_dev->fw_name_bin = NULL;
+ }
+ if ((ioctl_ptr->len == 0) ||
+ (ioctl_ptr->len > MSM_CPP_MAX_FW_NAME_LEN)) {
+ pr_err("ioctl_ptr->len is 0\n");
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
cpp_dev->fw_name_bin = kzalloc(ioctl_ptr->len+1,
GFP_KERNEL);
if (!cpp_dev->fw_name_bin) {
@@ -1406,13 +1425,9 @@
mutex_unlock(&cpp_dev->mutex);
return -EINVAL;
}
-
if (ioctl_ptr->ioctl_ptr == NULL) {
pr_err("ioctl_ptr->ioctl_ptr=NULL\n");
- return -EINVAL;
- }
- if (ioctl_ptr->len == 0) {
- pr_err("ioctl_ptr->len is 0\n");
+ mutex_unlock(&cpp_dev->mutex);
return -EINVAL;
}
rc = (copy_from_user(cpp_dev->fw_name_bin,
@@ -1426,11 +1441,6 @@
return -EINVAL;
}
*(cpp_dev->fw_name_bin+ioctl_ptr->len) = '\0';
- if (cpp_dev == NULL) {
- pr_err("cpp_dev is null\n");
- return -EINVAL;
- }
-
disable_irq(cpp_dev->irq->start);
cpp_load_fw(cpp_dev, cpp_dev->fw_name_bin);
enable_irq(cpp_dev->irq->start);
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
index 8de8997..2bc460b 100644
--- a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
@@ -198,7 +198,8 @@
patterns[1] = dvb_dmx_get_pattern(DMX_IDX_H264_PPS);
patterns[2] = dvb_dmx_get_pattern(DMX_IDX_H264_IDR_START);
patterns[3] = dvb_dmx_get_pattern(DMX_IDX_H264_NON_IDR_START);
- *patterns_num = 4;
+ patterns[4] = dvb_dmx_get_pattern(DMX_IDX_H264_SEI);
+ *patterns_num = 5;
break;
case DMX_VIDEO_CODEC_VC1:
diff --git a/drivers/media/platform/msm/dvb/video/mpq_dvb_video.c b/drivers/media/platform/msm/dvb/video/mpq_dvb_video.c
index 0908a6e..9ddb9b7 100644
--- a/drivers/media/platform/msm/dvb/video/mpq_dvb_video.c
+++ b/drivers/media/platform/msm/dvb/video/mpq_dvb_video.c
@@ -134,6 +134,8 @@
case DMX_IDX_H264_SPS:
case DMX_IDX_MPEG_SEQ_HEADER:
case DMX_IDX_VC1_SEQ_HEADER:
+ case DMX_IDX_H264_ACCESS_UNIT_DEL:
+ case DMX_IDX_H264_SEI:
DBG("SPS FOUND\n");
frame_found = false;
break;
diff --git a/drivers/media/platform/msm/vidc/hfi_response_handler.c b/drivers/media/platform/msm/vidc/hfi_response_handler.c
index a63ee61..653ba46 100644
--- a/drivers/media/platform/msm/vidc/hfi_response_handler.c
+++ b/drivers/media/platform/msm/vidc/hfi_response_handler.c
@@ -45,6 +45,7 @@
case HFI_ERR_SESSION_UNSUPPORTED_PROPERTY:
case HFI_ERR_SESSION_UNSUPPORTED_SETTING:
case HFI_ERR_SESSION_INSUFFICIENT_RESOURCES:
+ case HFI_ERR_SESSION_UNSUPPORTED_STREAM:
vidc_err = VIDC_ERR_NOT_SUPPORTED;
break;
case HFI_ERR_SYS_MAX_SESSIONS_REACHED:
@@ -786,6 +787,8 @@
data_done.input_done.offset = pkt->offset;
data_done.input_done.filled_len = pkt->filled_len;
data_done.input_done.packet_buffer = pkt->packet_buffer;
+ data_done.input_done.status =
+ hfi_map_err_status((u32) pkt->error_type);
callback(SESSION_ETB_DONE, &data_done);
}
@@ -1003,7 +1006,6 @@
struct hfi_msg_sys_session_end_done_packet *pkt)
{
struct msm_vidc_cb_cmd_done cmd_done;
- struct hal_session *sess_close;
dprintk(VIDC_DBG, "RECEIVED:SESSION_END_DONE");
@@ -1021,12 +1023,6 @@
cmd_done.status = hfi_map_err_status((u32)pkt->error_type);
cmd_done.data = NULL;
cmd_done.size = 0;
- sess_close = (struct hal_session *)pkt->session_id;
- dprintk(VIDC_INFO, "deleted the session: 0x%x",
- sess_close->session_id);
- list_del(&sess_close->list);
- kfree(sess_close);
- sess_close = NULL;
callback(SESSION_END_DONE, &cmd_done);
}
@@ -1035,7 +1031,6 @@
struct hfi_msg_sys_session_abort_done_packet *pkt)
{
struct msm_vidc_cb_cmd_done cmd_done;
- struct hal_session *sess_close;
dprintk(VIDC_DBG, "RECEIVED:SESSION_ABORT_DONE");
@@ -1053,16 +1048,6 @@
cmd_done.data = NULL;
cmd_done.size = 0;
- sess_close = (struct hal_session *)pkt->session_id;
- if (!sess_close) {
- dprintk(VIDC_ERR, "%s: invalid session pointer\n", __func__);
- return;
- }
- dprintk(VIDC_ERR, "deleted the session: 0x%x",
- sess_close->session_id);
- list_del(&sess_close->list);
- kfree(sess_close);
- sess_close = NULL;
callback(SESSION_ABORT_DONE, &cmd_done);
}
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 57b98dc..f94b6f1 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -627,6 +627,8 @@
if (inst->core)
hdev = inst->core->device;
if (hdev && inst->session) {
+ dprintk(VIDC_DBG,
+ "cleaning up inst: 0x%p", inst);
rc = call_hfi_op(hdev, session_clean,
(void *) inst->session);
if (rc)
@@ -693,10 +695,24 @@
{
struct msm_vidc_cb_cmd_done *response = data;
struct msm_vidc_inst *inst;
+ struct hfi_device *hdev = NULL;
+
if (response) {
inst = (struct msm_vidc_inst *)response->session_id;
- signal_session_msg_receipt(cmd, inst);
+ if (!inst || !inst->core || !inst->core->device) {
+ dprintk(VIDC_ERR, "%s invalid params\n", __func__);
+ return;
+ }
+ hdev = inst->core->device;
+ mutex_lock(&inst->lock);
+ if (inst->session) {
+ dprintk(VIDC_DBG, "cleaning up inst: 0x%p", inst);
+ call_hfi_op(hdev, session_clean,
+ (void *) inst->session);
+ }
inst->session = NULL;
+ mutex_unlock(&inst->lock);
+ signal_session_msg_receipt(cmd, inst);
show_stats(inst);
} else {
dprintk(VIDC_ERR,
@@ -737,22 +753,45 @@
struct msm_vidc_cb_data_done *response = data;
struct vb2_buffer *vb;
struct msm_vidc_inst *inst;
+ struct vidc_hal_ebd *empty_buf_done;
+
if (!response) {
dprintk(VIDC_ERR, "Invalid response from vidc_hal\n");
return;
}
vb = response->clnt_data;
inst = (struct msm_vidc_inst *)response->session_id;
+ if (!inst) {
+ dprintk(VIDC_ERR, "%s Invalid response from vidc_hal\n",
+ __func__);
+ return;
+ }
if (vb) {
vb->v4l2_planes[0].bytesused = response->input_done.filled_len;
vb->v4l2_planes[0].data_offset = response->input_done.offset;
if (vb->v4l2_planes[0].data_offset > vb->v4l2_planes[0].length)
- dprintk(VIDC_ERR, "Error: data_offset overflow\n");
+ dprintk(VIDC_INFO, "data_offset overflow length\n");
if (vb->v4l2_planes[0].bytesused > vb->v4l2_planes[0].length)
- dprintk(VIDC_ERR, "Error: buffer overflow\n");
+ dprintk(VIDC_INFO, "bytesused overflow length\n");
if ((u8 *)vb->v4l2_planes[0].m.userptr !=
response->input_done.packet_buffer)
- dprintk(VIDC_ERR, "Error: unexpected buffer address\n");
+ dprintk(VIDC_INFO, "Unexpected buffer address\n");
+ vb->v4l2_buf.flags = 0;
+ empty_buf_done = (struct vidc_hal_ebd *)&response->input_done;
+ if (empty_buf_done) {
+ if (empty_buf_done->status == VIDC_ERR_NOT_SUPPORTED) {
+ dprintk(VIDC_INFO,
+ "Failed : Unsupported input stream\n");
+ vb->v4l2_buf.flags |=
+ V4L2_QCOM_BUF_INPUT_UNSUPPORTED;
+ }
+ if (empty_buf_done->status == VIDC_ERR_BITSTREAM_ERR) {
+ dprintk(VIDC_INFO,
+ "Failed : Corrupted input stream\n");
+ vb->v4l2_buf.flags |=
+ V4L2_QCOM_BUF_DATA_CORRUPT;
+ }
+ }
mutex_lock(&inst->bufq[OUTPUT_PORT].lock);
vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
mutex_unlock(&inst->bufq[OUTPUT_PORT].lock);
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index 5c22552..010f15d 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -936,6 +936,7 @@
u32 timestamp_hi;
u32 timestamp_lo;
u32 flags;
+ u32 status;
u32 mark_target;
u32 mark_data;
u32 stats;
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index bd838fc..b750602 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -2647,6 +2647,11 @@
struct qseecom_dev_handle *data = file->private_data;
void __user *argp = (void __user *) arg;
+ if (!data) {
+ pr_err("Invalid/uninitialized device handle\n");
+ return -EINVAL;
+ }
+
if (data->abort) {
pr_err("Aborting qseecom driver\n");
return -ENODEV;
diff --git a/drivers/net/ethernet/msm/msm_rmnet_bam.c b/drivers/net/ethernet/msm/msm_rmnet_bam.c
index 83f486c..3f3d76a 100644
--- a/drivers/net/ethernet/msm/msm_rmnet_bam.c
+++ b/drivers/net/ethernet/msm/msm_rmnet_bam.c
@@ -55,7 +55,7 @@
#define DBG2(x...) DBG(DEBUG_MASK_LVL2, x)
/* Configure device instances */
-#define RMNET_DEVICE_COUNT (8)
+#define RMNET_DEVICE_COUNT 9
/* allow larger frames */
#define RMNET_DATA_LEN 2000
@@ -85,6 +85,7 @@
u32 operation_mode; /* IOCTL specified mode (protocol, QoS header) */
uint8_t device_up;
uint8_t in_reset;
+ struct platform_driver *bam_pdev;
};
#ifdef CONFIG_MSM_RMNET_DEBUG
@@ -401,6 +402,14 @@
__func__, p->ch_id, r);
return -ENODEV;
}
+
+ r = platform_driver_register(p->bam_pdev);
+ if (r) {
+ pr_err("%s: bam pdev registration failed n=%d rc=%d\n",
+ __func__, p->ch_id, r);
+ msm_bam_dmux_close(p->ch_id);
+ return r;
+ }
}
p->device_up = DEVICE_ACTIVE;
@@ -711,6 +720,11 @@
break;
}
+ if (i >= RMNET_DEVICE_COUNT) {
+ pr_err("%s: wrong netdev %s\n", __func__, pdev->name);
+ return -ENODEV;
+ }
+
p = netdev_priv(netdevs[i]);
if (p->in_reset) {
p->in_reset = 0;
@@ -766,7 +780,7 @@
if (i >= RMNET_REV_DEVICE_COUNT) {
pr_err("%s: wrong netdev %s\n", __func__, pdev->name);
- return 0;
+ return -ENODEV;
}
p = netdev_priv(netdevs_rev[i]);
@@ -871,8 +885,13 @@
#endif
for (n = 0; n < RMNET_DEVICE_COUNT; n++) {
+ const char *dev_name = "rmnet%d";
+
+ if (n == BAM_DMUX_USB_RMNET_0)
+ dev_name = "rmnet_usb%d";
+
dev = alloc_netdev(sizeof(struct rmnet_private),
- "rmnet%d", rmnet_setup);
+ dev_name, rmnet_setup);
if (!dev) {
pr_err("%s: no memory for netdev %d\n", __func__, n);
@@ -898,6 +917,7 @@
if (ret) {
pr_err("%s: unable to register netdev"
" %d rc=%d\n", __func__, n, ret);
+ netdevs[n] = NULL;
free_netdev(dev);
return ret;
}
@@ -921,18 +941,16 @@
bam_rmnet_drivers[n].probe = bam_rmnet_probe;
bam_rmnet_drivers[n].remove = bam_rmnet_remove;
tempname = kmalloc(BAM_DMUX_CH_NAME_MAX_LEN, GFP_KERNEL);
- if (tempname == NULL)
- return -ENOMEM;
+ if (tempname == NULL) {
+ netdevs[n] = NULL;
+ ret = -ENOMEM;
+ goto error;
+ }
scnprintf(tempname, BAM_DMUX_CH_NAME_MAX_LEN, "bam_dmux_ch_%d",
n);
bam_rmnet_drivers[n].driver.name = tempname;
bam_rmnet_drivers[n].driver.owner = THIS_MODULE;
- ret = platform_driver_register(&bam_rmnet_drivers[n]);
- if (ret) {
- pr_err("%s: registration failed n=%d rc=%d\n",
- __func__, n, ret);
- return ret;
- }
+ p->bam_pdev = &bam_rmnet_drivers[n];
}
/*Support for new rmnet ports */
for (n = 0; n < RMNET_REV_DEVICE_COUNT; n++) {
@@ -960,6 +978,7 @@
if (ret) {
pr_err("%s: unable to register rev netdev %d rc=%d\n",
__func__, n, ret);
+ netdevs_rev[n] = NULL;
free_netdev(dev);
return ret;
}
@@ -968,20 +987,23 @@
bam_rmnet_rev_drivers[n].probe = bam_rmnet_rev_probe;
bam_rmnet_rev_drivers[n].remove = bam_rmnet_rev_remove;
tempname = kmalloc(BAM_DMUX_CH_NAME_MAX_LEN, GFP_KERNEL);
- if (tempname == NULL)
- return -ENOMEM;
+ if (tempname == NULL) {
+ netdevs_rev[n] = NULL;
+ ret = -ENOMEM;
+ goto error;
+ }
scnprintf(tempname, BAM_DMUX_CH_NAME_MAX_LEN, "bam_dmux_ch_%d",
(n+BAM_DMUX_DATA_REV_RMNET_0));
bam_rmnet_rev_drivers[n].driver.name = tempname;
bam_rmnet_rev_drivers[n].driver.owner = THIS_MODULE;
- ret = platform_driver_register(&bam_rmnet_rev_drivers[n]);
- if (ret) {
- pr_err("%s: new rev driver registration failed n=%d rc=%d\n",
- __func__, n, ret);
- return ret;
- }
+ p->bam_pdev = &bam_rmnet_rev_drivers[n];
}
return 0;
+
+error:
+ unregister_netdev(dev);
+ free_netdev(dev);
+ return ret;
}
module_init(rmnet_init);
diff --git a/drivers/power/pm8921-bms.c b/drivers/power/pm8921-bms.c
index b518f1f..c246036 100644
--- a/drivers/power/pm8921-bms.c
+++ b/drivers/power/pm8921-bms.c
@@ -1970,6 +1970,11 @@
pr_debug("new delta ocv = %d\n", delta_ocv_uv);
}
+ if (wake_lock_active(&chip->low_voltage_wake_lock)) {
+ pr_debug("Low Voltage, apply only ibat limited corrections\n");
+ goto skip_limiting_corrections;
+ }
+
if (chip->last_ocv_uv > 3800000)
correction_limit_uv = the_chip->high_ocv_correction_limit_uv;
else
@@ -1986,6 +1991,7 @@
pr_debug("new delta ocv = %d\n", delta_ocv_uv);
}
+skip_limiting_corrections:
chip->last_ocv_uv -= delta_ocv_uv;
if (chip->last_ocv_uv >= chip->max_voltage_uv)
@@ -2278,7 +2284,6 @@
int new_calculated_soc;
static int firsttime = 1;
- calib_hkadc_check(chip, batt_temp);
calculate_soc_params(chip, raw, batt_temp, chargecycles,
&fcc_uah,
&unusable_charge_uah,
@@ -2426,6 +2431,7 @@
get_batt_temp(chip, &batt_temp);
mutex_lock(&chip->last_ocv_uv_mutex);
+ calib_hkadc_check(chip, batt_temp);
read_soc_params_raw(chip, &raw, batt_temp);
soc = calculate_state_of_charge(chip, &raw,
@@ -2762,6 +2768,7 @@
get_batt_temp(the_chip, &batt_temp);
mutex_lock(&the_chip->last_ocv_uv_mutex);
+ calib_hkadc_check(the_chip, batt_temp);
read_soc_params_raw(the_chip, &raw, batt_temp);
mutex_unlock(&the_chip->last_ocv_uv_mutex);
@@ -2907,6 +2914,7 @@
mutex_lock(&the_chip->last_ocv_uv_mutex);
+ calib_hkadc_check(the_chip, batt_temp);
read_soc_params_raw(the_chip, &raw, batt_temp);
calculate_cc_uah(the_chip, raw.cc, &bms_end_cc_uah);
diff --git a/drivers/power/pm8921-charger.c b/drivers/power/pm8921-charger.c
index d5b2cc6..f3f59e6 100644
--- a/drivers/power/pm8921-charger.c
+++ b/drivers/power/pm8921-charger.c
@@ -298,6 +298,7 @@
bool disable_aicl;
int usb_type;
bool disable_chg_rmvl_wrkarnd;
+ struct msm_xo_voter *voter;
};
/* user space parameter to limit usb current */
@@ -4034,6 +4035,7 @@
int err;
u8 temp;
+ msm_xo_mode_vote(chip->voter, MSM_XO_MODE_ON);
temp = 0xD1;
err = pm_chg_write(chip, CHG_TEST, temp);
if (err) {
@@ -4092,6 +4094,8 @@
pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
return;
}
+
+ msm_xo_mode_vote(chip->voter, MSM_XO_MODE_OFF);
}
static void pm8921_chg_set_hw_clk_switching(struct pm8921_chg_chip *chip)
@@ -4099,6 +4103,7 @@
int err;
u8 temp;
+ msm_xo_mode_vote(chip->voter, MSM_XO_MODE_ON);
temp = 0xD1;
err = pm_chg_write(chip, CHG_TEST, temp);
if (err) {
@@ -4112,6 +4117,7 @@
pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
return;
}
+ msm_xo_mode_vote(chip->voter, MSM_XO_MODE_OFF);
}
#define VREF_BATT_THERM_FORCE_ON BIT(7)
@@ -4800,6 +4806,7 @@
chip->ibatmax_max_adj_ma = find_ibat_max_adj_ma(
chip->max_bat_chg_current);
+ chip->voter = msm_xo_get(MSM_XO_TCXO_D0, "pm8921_charger");
rc = pm8921_chg_hw_init(chip);
if (rc) {
pr_err("couldn't init hardware rc=%d\n", rc);
diff --git a/drivers/power/qpnp-charger.c b/drivers/power/qpnp-charger.c
index e93d085..950b88a 100644
--- a/drivers/power/qpnp-charger.c
+++ b/drivers/power/qpnp-charger.c
@@ -306,8 +306,8 @@
unsigned int maxinput_dc_ma;
unsigned int hot_batt_p;
unsigned int cold_batt_p;
- unsigned int warm_bat_decidegc;
- unsigned int cool_bat_decidegc;
+ int warm_bat_decidegc;
+ int cool_bat_decidegc;
unsigned int safe_current;
unsigned int revision;
unsigned int type;
diff --git a/drivers/spi/spi_qsd.c b/drivers/spi/spi_qsd.c
index c9b2476..25b4b5e 100644
--- a/drivers/spi/spi_qsd.c
+++ b/drivers/spi/spi_qsd.c
@@ -2919,15 +2919,6 @@
goto err_probe_reqmem;
}
- if (pdata && pdata->ver_reg_exists) {
- enum msm_spi_qup_version ver =
- msm_spi_get_qup_hw_ver(&pdev->dev, dd);
- if (dd->qup_ver != ver)
- dev_warn(&pdev->dev,
- "%s: HW version different then initially assumed by probe",
- __func__);
- }
-
if (pdata && pdata->rsl_id) {
struct remote_mutex_id rmid;
rmid.r_spinlock_id = pdata->rsl_id;
@@ -2986,6 +2977,16 @@
}
pclk_enabled = 1;
+
+ if (pdata && pdata->ver_reg_exists) {
+ enum msm_spi_qup_version ver =
+ msm_spi_get_qup_hw_ver(&pdev->dev, dd);
+ if (dd->qup_ver != ver)
+ dev_warn(&pdev->dev,
+ "%s: HW version different then initially assumed by probe",
+ __func__);
+ }
+
/* GSBI dose not exists on B-family MSM-chips */
if (dd->qup_ver != SPI_QUP_VERSION_BFAM) {
rc = msm_spi_configure_gsbi(dd, pdev);
diff --git a/drivers/usb/gadget/u_bam.c b/drivers/usb/gadget/u_bam.c
index a775459..b0b2f56 100644
--- a/drivers/usb/gadget/u_bam.c
+++ b/drivers/usb/gadget/u_bam.c
@@ -127,6 +127,8 @@
struct work_struct connect_w;
struct work_struct disconnect_w;
+ struct work_struct suspend_w;
+ struct work_struct resume_w;
};
static struct bam_portmaster {
@@ -542,7 +544,9 @@
struct bam_ch_info *d = &port->data_ch;
int status;
+ spin_lock(&port->port_lock_ul);
if (!port->port_usb) {
+ spin_unlock(&port->port_lock_ul);
pr_err("%s: port->port_usb is NULL", __func__);
return;
}
@@ -551,6 +555,7 @@
status = usb_ep_queue(port->port_usb->out, d->rx_req, GFP_ATOMIC);
if (status)
pr_err("%s: error enqueuing transfer, %d\n", __func__, status);
+ spin_unlock(&port->port_lock_ul);
}
static void gbam_start_endless_tx(struct gbam_port *port)
@@ -558,7 +563,9 @@
struct bam_ch_info *d = &port->data_ch;
int status;
+ spin_lock(&port->port_lock_dl);
if (!port->port_usb) {
+ spin_unlock(&port->port_lock_dl);
pr_err("%s: port->port_usb is NULL", __func__);
return;
}
@@ -567,6 +574,8 @@
status = usb_ep_queue(port->port_usb->in, d->tx_req, GFP_ATOMIC);
if (status)
pr_err("%s: error enqueuing transfer, %d\n", __func__, status);
+ spin_unlock(&port->port_lock_dl);
+
}
static void gbam_stop_endless_rx(struct gbam_port *port)
@@ -574,7 +583,9 @@
struct bam_ch_info *d = &port->data_ch;
int status;
+ spin_lock(&port->port_lock_ul);
if (!port->port_usb) {
+ spin_unlock(&port->port_lock_ul);
pr_err("%s: port->port_usb is NULL", __func__);
return;
}
@@ -583,14 +594,17 @@
status = usb_ep_dequeue(port->port_usb->out, d->rx_req);
if (status)
pr_err("%s: error dequeuing transfer, %d\n", __func__, status);
-
+ spin_unlock(&port->port_lock_ul);
}
+
static void gbam_stop_endless_tx(struct gbam_port *port)
{
struct bam_ch_info *d = &port->data_ch;
int status;
+ spin_lock(&port->port_lock_dl);
if (!port->port_usb) {
+ spin_unlock(&port->port_lock_dl);
pr_err("%s: port->port_usb is NULL", __func__);
return;
}
@@ -599,6 +613,7 @@
status = usb_ep_dequeue(port->port_usb->in, d->tx_req);
if (status)
pr_err("%s: error dequeuing transfer, %d\n", __func__, status);
+ spin_unlock(&port->port_lock_dl);
}
static void gbam_start(void *param, enum usb_bam_pipe_dir dir)
@@ -893,6 +908,46 @@
pr_debug("%s: done\n", __func__);
}
+static int gbam_wake_cb(void *param)
+{
+ struct gbam_port *port = (struct gbam_port *)param;
+ struct bam_ch_info *d;
+ struct f_rmnet *dev;
+
+ dev = port_to_rmnet(port->gr);
+ d = &port->data_ch;
+
+ pr_debug("%s: woken up by peer\n", __func__);
+
+ return usb_gadget_wakeup(dev->cdev->gadget);
+}
+
+static void gbam2bam_suspend_work(struct work_struct *w)
+{
+ struct gbam_port *port = container_of(w, struct gbam_port, suspend_w);
+ struct bam_ch_info *d = &port->data_ch;
+
+ pr_debug("%s: suspend work started\n", __func__);
+
+ usb_bam_register_wake_cb(d->dst_connection_idx, gbam_wake_cb, port);
+ if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ usb_bam_register_start_stop_cbs(gbam_start, gbam_stop, port);
+ usb_bam_suspend(&d->ipa_params);
+ }
+}
+
+static void gbam2bam_resume_work(struct work_struct *w)
+{
+ struct gbam_port *port = container_of(w, struct gbam_port, resume_w);
+ struct bam_ch_info *d = &port->data_ch;
+
+ pr_debug("%s: resume work started\n", __func__);
+
+ usb_bam_register_wake_cb(d->dst_connection_idx, NULL, NULL);
+ if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA)
+ usb_bam_resume(&d->ipa_params);
+}
+
static int gbam_peer_reset_cb(void *param)
{
struct gbam_port *port = (struct gbam_port *)param;
@@ -1122,6 +1177,8 @@
INIT_WORK(&port->connect_w, gbam2bam_connect_work);
INIT_WORK(&port->disconnect_w, gbam2bam_disconnect_work);
+ INIT_WORK(&port->suspend_w, gbam2bam_suspend_work);
+ INIT_WORK(&port->resume_w, gbam2bam_resume_work);
/* data ch */
d = &port->data_ch;
@@ -1446,20 +1503,6 @@
return ret;
}
-static int gbam_wake_cb(void *param)
-{
- struct gbam_port *port = (struct gbam_port *)param;
- struct bam_ch_info *d;
- struct f_rmnet *dev;
-
- dev = port_to_rmnet(port->gr);
- d = &port->data_ch;
-
- pr_debug("%s: woken up by peer\n", __func__);
-
- return usb_gadget_wakeup(dev->cdev->gadget);
-}
-
void gbam_suspend(struct grmnet *gr, u8 port_num, enum transport_type trans)
{
struct gbam_port *port;
@@ -1474,11 +1517,7 @@
pr_debug("%s: suspended port %d\n", __func__, port_num);
- usb_bam_register_wake_cb(d->dst_connection_idx, gbam_wake_cb, port);
- if (trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
- usb_bam_register_start_stop_cbs(gbam_start, gbam_stop, port);
- usb_bam_suspend(&d->ipa_params);
- }
+ queue_work(gbam_wq, &port->suspend_w);
}
void gbam_resume(struct grmnet *gr, u8 port_num, enum transport_type trans)
@@ -1495,7 +1534,5 @@
pr_debug("%s: resumed port %d\n", __func__, port_num);
- usb_bam_register_wake_cb(d->dst_connection_idx, NULL, NULL);
- if (trans == USB_GADGET_XPORT_BAM2BAM_IPA)
- usb_bam_resume(&d->ipa_params);
+ queue_work(gbam_wq, &port->resume_w);
}
diff --git a/drivers/usb/gadget/u_bam_data.c b/drivers/usb/gadget/u_bam_data.c
index 577a4fe..b315605 100644
--- a/drivers/usb/gadget/u_bam_data.c
+++ b/drivers/usb/gadget/u_bam_data.c
@@ -62,10 +62,15 @@
struct work_struct connect_w;
struct work_struct disconnect_w;
+ struct work_struct suspend_w;
+ struct work_struct resume_w;
};
struct bam_data_port *bam2bam_data_ports[BAM2BAM_DATA_N_PORTS];
+static void bam2bam_data_suspend_work(struct work_struct *w);
+static void bam2bam_data_resume_work(struct work_struct *w);
+
/*------------data_path----------------------------*/
static void bam_data_endless_rx_complete(struct usb_ep *ep,
@@ -351,6 +356,8 @@
INIT_WORK(&port->connect_w, bam2bam_data_connect_work);
INIT_WORK(&port->disconnect_w, bam2bam_data_disconnect_work);
+ INIT_WORK(&port->suspend_w, bam2bam_data_suspend_work);
+ INIT_WORK(&port->resume_w, bam2bam_data_resume_work);
/* data ch */
d = &port->data_ch;
@@ -578,12 +585,8 @@
d = &port->data_ch;
pr_debug("%s: suspended port %d\n", __func__, port_num);
- usb_bam_register_wake_cb(d->dst_connection_idx, bam_data_wake_cb, port);
- if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
- usb_bam_register_start_stop_cbs(bam_data_start, bam_data_stop,
- port);
- usb_bam_suspend(&d->ipa_params);
- }
+
+ queue_work(bam_data_wq, &port->suspend_w);
}
void bam_data_resume(u8 port_num)
@@ -596,6 +599,34 @@
d = &port->data_ch;
pr_debug("%s: resumed port %d\n", __func__, port_num);
+
+ queue_work(bam_data_wq, &port->resume_w);
+}
+
+static void bam2bam_data_suspend_work(struct work_struct *w)
+{
+ struct bam_data_port *port =
+ container_of(w, struct bam_data_port, suspend_w);
+ struct bam_data_ch_info *d = &port->data_ch;
+
+ pr_debug("%s: suspend work started\n", __func__);
+
+ usb_bam_register_wake_cb(d->dst_connection_idx, bam_data_wake_cb, port);
+ if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ usb_bam_register_start_stop_cbs(bam_data_start, bam_data_stop,
+ port);
+ usb_bam_suspend(&d->ipa_params);
+ }
+}
+
+static void bam2bam_data_resume_work(struct work_struct *w)
+{
+ struct bam_data_port *port =
+ container_of(w, struct bam_data_port, resume_w);
+ struct bam_data_ch_info *d = &port->data_ch;
+
+ pr_debug("%s: resume work started\n", __func__);
+
usb_bam_register_wake_cb(d->dst_connection_idx, NULL, NULL);
if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA)
usb_bam_resume(&d->ipa_params);
diff --git a/drivers/usb/host/ehci-msm-hsic.c b/drivers/usb/host/ehci-msm-hsic.c
index 253658e..cfc5961 100644
--- a/drivers/usb/host/ehci-msm-hsic.c
+++ b/drivers/usb/host/ehci-msm-hsic.c
@@ -1122,10 +1122,16 @@
u32 cmd;
unsigned long flags;
int retries = 0, ret, cnt = RESET_SIGNAL_TIME_USEC;
+ s32 next_latency = 0;
- if (pdata && pdata->swfi_latency)
- pm_qos_update_request(&mehci->pm_qos_req_dma,
- pdata->swfi_latency + 1);
+ if (pdata && pdata->swfi_latency) {
+ next_latency = pdata->swfi_latency + 1;
+ pm_qos_update_request(&mehci->pm_qos_req_dma, next_latency);
+ if (pdata->standalone_latency)
+ next_latency = pdata->standalone_latency + 1;
+ else
+ next_latency = PM_QOS_DEFAULT_VALUE;
+ }
mehci->bus_reset = 1;
@@ -1196,9 +1202,8 @@
pr_debug("reset completed\n");
fail:
mehci->bus_reset = 0;
- if (pdata && pdata->swfi_latency)
- pm_qos_update_request(&mehci->pm_qos_req_dma,
- PM_QOS_DEFAULT_VALUE);
+ if (next_latency)
+ pm_qos_update_request(&mehci->pm_qos_req_dma, next_latency);
}
static int ehci_hsic_bus_suspend(struct usb_hcd *hcd)
@@ -1229,19 +1234,27 @@
int retry_cnt = 0;
int tight_resume = 0;
struct msm_hsic_host_platform_data *pdata = mehci->dev->platform_data;
+ s32 next_latency = 0;
dbg_log_event(NULL, "Resume RH", 0);
+ if (pdata && pdata->swfi_latency) {
+ next_latency = pdata->swfi_latency + 1;
+ pm_qos_update_request(&mehci->pm_qos_req_dma, next_latency);
+ if (pdata->standalone_latency)
+ next_latency = pdata->standalone_latency + 1;
+ else
+ next_latency = PM_QOS_DEFAULT_VALUE;
+ }
+
/* keep delay between bus states */
if (time_before(jiffies, ehci->next_statechange))
usleep_range(5000, 5000);
spin_lock_irq(&ehci->lock);
if (!HCD_HW_ACCESSIBLE(hcd)) {
- spin_unlock_irq(&ehci->lock);
mehci->resume_status = -ESHUTDOWN;
- complete(&mehci->rt_completion);
- return 0;
+ goto exit;
}
if (unlikely(ehci->debug)) {
@@ -1313,13 +1326,7 @@
&mehci->timer->gptimer1_ctrl);
spin_unlock_irq(&ehci->lock);
- if (pdata && pdata->swfi_latency)
- pm_qos_update_request(&mehci->pm_qos_req_dma,
- pdata->swfi_latency + 1);
wait_for_completion(&mehci->gpt0_completion);
- if (pdata && pdata->standalone_latency)
- pm_qos_update_request(&mehci->pm_qos_req_dma,
- pdata->standalone_latency + 1);
spin_lock_irq(&ehci->lock);
} else {
dbg_log_event(NULL, "FPR: Tightloop", 0);
@@ -1357,9 +1364,11 @@
dbg_log_event(NULL, "FPR: RT-Done", 0);
mehci->resume_status = 1;
+exit:
spin_unlock_irq(&ehci->lock);
-
complete(&mehci->rt_completion);
+ if (next_latency)
+ pm_qos_update_request(&mehci->pm_qos_req_dma, next_latency);
return 0;
}
diff --git a/drivers/video/msm/mdss/mdss_mdp_ctl.c b/drivers/video/msm/mdss/mdss_mdp_ctl.c
index b5a5383..f44ebaf 100644
--- a/drivers/video/msm/mdss/mdss_mdp_ctl.c
+++ b/drivers/video/msm/mdss/mdss_mdp_ctl.c
@@ -1337,7 +1337,7 @@
* writeback block
*/
head[len] = head[len - 1];
- head[len].num = -1;
+ head[len].num = head[len - 1].num;
}
mdata->ctl_off = head;
diff --git a/drivers/video/msm/mdss/mhl_msc.c b/drivers/video/msm/mdss/mhl_msc.c
index d0f93cf..e7cd1be 100644
--- a/drivers/video/msm/mdss/mhl_msc.c
+++ b/drivers/video/msm/mdss/mhl_msc.c
@@ -74,6 +74,22 @@
}
}
+static bool mhl_qualify_path_enable(struct mhl_tx_ctrl *mhl_ctrl)
+{
+ int rc = false;
+
+ if (!mhl_ctrl)
+ return rc;
+
+ if (mhl_ctrl->tmds_en_state ||
+ /* Identify sink with non-standard INT STAT SIZE */
+ (mhl_ctrl->devcap[DEVCAP_OFFSET_MHL_VERSION] == 0x10 &&
+ mhl_ctrl->devcap[DEVCAP_OFFSET_INT_STAT_SIZE] == 0x44))
+ rc = true;
+
+ return rc;
+}
+
void mhl_register_msc(struct mhl_tx_ctrl *ctrl)
{
if (ctrl)
@@ -253,6 +269,11 @@
mhl_tmds_ctrl(mhl_ctrl, TMDS_ENABLE);
mhl_drive_hpd(mhl_ctrl, HPD_UP);
break;
+ case DEVCAP_OFFSET_MHL_VERSION:
+ case DEVCAP_OFFSET_INT_STAT_SIZE:
+ if (mhl_qualify_path_enable(mhl_ctrl))
+ mhl_tmds_ctrl(mhl_ctrl, TMDS_ENABLE);
+ break;
}
break;
case MHL_WRITE_BURST:
diff --git a/drivers/video/msm/mdss/mhl_sii8334.c b/drivers/video/msm/mdss/mhl_sii8334.c
index add15a4..c66d50d 100644
--- a/drivers/video/msm/mdss/mhl_sii8334.c
+++ b/drivers/video/msm/mdss/mhl_sii8334.c
@@ -794,10 +794,14 @@
void mhl_tmds_ctrl(struct mhl_tx_ctrl *mhl_ctrl, uint8_t on)
{
struct i2c_client *client = mhl_ctrl->i2c_handle;
- if (on)
+
+ if (on) {
MHL_SII_REG_NAME_MOD(REG_TMDS_CCTRL, BIT4, BIT4);
- else
+ mhl_ctrl->tmds_en_state = true;
+ } else {
MHL_SII_REG_NAME_MOD(REG_TMDS_CCTRL, BIT4, 0x00);
+ mhl_ctrl->tmds_en_state = false;
+ }
}
void mhl_drive_hpd(struct mhl_tx_ctrl *mhl_ctrl, uint8_t to_state)
diff --git a/include/linux/dvb/dmx.h b/include/linux/dvb/dmx.h
index dd675f3..02ecb0c 100644
--- a/include/linux/dvb/dmx.h
+++ b/include/linux/dvb/dmx.h
@@ -152,6 +152,8 @@
#define DMX_IDX_VC1_FIRST_SEQ_FRAME_END 0x00800000
#define DMX_IDX_VC1_FRAME_START 0x01000000
#define DMX_IDX_VC1_FRAME_END 0x02000000
+#define DMX_IDX_H264_ACCESS_UNIT_DEL 0x04000000
+#define DMX_IDX_H264_SEI 0x08000000
struct dmx_pes_filter_params
{
diff --git a/include/linux/mhl_8334.h b/include/linux/mhl_8334.h
index f0a54eb..d1ee11c 100644
--- a/include/linux/mhl_8334.h
+++ b/include/linux/mhl_8334.h
@@ -149,6 +149,7 @@
uint16_t devcap_state;
uint8_t status[2];
uint8_t path_en_state;
+ uint8_t tmds_en_state;
void *hdmi_mhl_ops;
struct work_struct mhl_msc_send_work;
struct list_head list_cmd;
diff --git a/include/linux/stk3x1x.h b/include/linux/stk3x1x.h
new file mode 100644
index 0000000..c34116a
--- /dev/null
+++ b/include/linux/stk3x1x.h
@@ -0,0 +1,29 @@
+/*
+ *
+ * $Id: stk3x1x.h
+ *
+ * Copyright (C) 2012 Lex Hsieh <lex_hsieh@sitronix.com.tw>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ *
+ */
+#ifndef __STK3X1X_H__
+#define __STK3X1X_H__
+
+/* platform data */
+struct stk3x1x_platform_data
+{
+ uint8_t state_reg;
+ uint8_t psctrl_reg;
+ uint8_t alsctrl_reg;
+ uint8_t ledctrl_reg;
+ uint8_t wait_reg;
+ uint16_t ps_thd_h;
+ uint16_t ps_thd_l;
+ int int_pin;
+ uint32_t transmittance;
+};
+
+#endif // __STK3X1X_H__
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 2424518..101325e 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -704,6 +704,7 @@
#define V4L2_QCOM_BUF_FLAG_DECODEONLY 0x40000
#define V4L2_QCOM_BUF_DATA_CORRUPT 0x80000
#define V4L2_QCOM_BUF_DROP_FRAME 0x100000
+#define V4L2_QCOM_BUF_INPUT_UNSUPPORTED 0x200000
/*
* O V E R L A Y P R E V I E W
diff --git a/include/media/msmb_isp.h b/include/media/msmb_isp.h
index 5ae852a..ec8ec9a 100644
--- a/include/media/msmb_isp.h
+++ b/include/media/msmb_isp.h
@@ -98,6 +98,7 @@
struct msm_vfe_camif_cfg camif_cfg;
enum msm_vfe_inputmux input_mux;
enum ISP_START_PIXEL_PATTERN pixel_pattern;
+ uint32_t input_format;
};
struct msm_vfe_rdi_cfg {
diff --git a/include/media/msmb_pproc.h b/include/media/msmb_pproc.h
index 162729a..de42c38 100644
--- a/include/media/msmb_pproc.h
+++ b/include/media/msmb_pproc.h
@@ -13,6 +13,8 @@
#define MAX_NUM_CPP_STRIPS 8
#define MSM_CPP_MAX_NUM_PLANES 3
+#define MSM_CPP_MAX_FRAME_LENGTH 1024
+#define MSM_CPP_MAX_FW_NAME_LEN 32
enum msm_cpp_frame_type {
MSM_CPP_OFFLINE_FRAME,
diff --git a/sound/soc/codecs/wcd9306.c b/sound/soc/codecs/wcd9306.c
index a6390dd..ff190cd 100644
--- a/sound/soc/codecs/wcd9306.c
+++ b/sound/soc/codecs/wcd9306.c
@@ -38,6 +38,9 @@
#include "wcd9xxx-resmgr.h"
#include "wcd9xxx-common.h"
+#define TAPAN_HPH_PA_SETTLE_COMP_ON 3000
+#define TAPAN_HPH_PA_SETTLE_COMP_OFF 13000
+
static atomic_t kp_tapan_priv;
static int spkr_drv_wrnd_param_set(const char *val,
const struct kernel_param *kp);
@@ -224,8 +227,8 @@
};
static const u32 comp_shift[] = {
- 4, /* Compander 0's clock source is on interpolator 7 */
0,
+ 1,
2,
};
@@ -234,47 +237,44 @@
COMPANDER_1,
COMPANDER_2,
COMPANDER_2,
- COMPANDER_2,
- COMPANDER_2,
- COMPANDER_0,
COMPANDER_MAX,
};
static const struct comp_sample_dependent_params comp_samp_params[] = {
{
/* 8 Khz */
- .peak_det_timeout = 0x02,
+ .peak_det_timeout = 0x06,
.rms_meter_div_fact = 0x09,
.rms_meter_resamp_fact = 0x06,
},
{
/* 16 Khz */
- .peak_det_timeout = 0x03,
+ .peak_det_timeout = 0x07,
.rms_meter_div_fact = 0x0A,
.rms_meter_resamp_fact = 0x0C,
},
{
/* 32 Khz */
- .peak_det_timeout = 0x05,
+ .peak_det_timeout = 0x08,
.rms_meter_div_fact = 0x0B,
.rms_meter_resamp_fact = 0x1E,
},
{
/* 48 Khz */
- .peak_det_timeout = 0x05,
+ .peak_det_timeout = 0x09,
.rms_meter_div_fact = 0x0B,
.rms_meter_resamp_fact = 0x28,
},
{
/* 96 Khz */
- .peak_det_timeout = 0x06,
+ .peak_det_timeout = 0x0A,
.rms_meter_div_fact = 0x0C,
.rms_meter_resamp_fact = 0x50,
},
{
/* 192 Khz */
- .peak_det_timeout = 0x07,
- .rms_meter_div_fact = 0xD,
+ .peak_det_timeout = 0x0B,
+ .rms_meter_div_fact = 0xC,
.rms_meter_resamp_fact = 0xA0,
},
};
@@ -673,6 +673,37 @@
dev_dbg(codec->dev, "%s: Compander %d enable current %d, new %d\n",
__func__, comp, tapan->comp_enabled[comp], value);
tapan->comp_enabled[comp] = value;
+
+ if (comp == COMPANDER_1 &&
+ tapan->comp_enabled[comp] == 1) {
+ /* Wavegen to 5 msec */
+ snd_soc_write(codec, TAPAN_A_RX_HPH_CNP_WG_CTL, 0xDA);
+ snd_soc_write(codec, TAPAN_A_RX_HPH_CNP_WG_TIME, 0x15);
+ snd_soc_write(codec, TAPAN_A_RX_HPH_BIAS_WG_OCP, 0x2A);
+
+ /* Enable Chopper */
+ snd_soc_update_bits(codec,
+ TAPAN_A_RX_HPH_CHOP_CTL, 0x80, 0x80);
+
+ snd_soc_write(codec, TAPAN_A_NCP_DTEST, 0x20);
+ pr_debug("%s: Enabled Chopper and set wavegen to 5 msec\n",
+ __func__);
+ } else if (comp == COMPANDER_1 &&
+ tapan->comp_enabled[comp] == 0) {
+ /* Wavegen to 20 msec */
+ snd_soc_write(codec, TAPAN_A_RX_HPH_CNP_WG_CTL, 0xDB);
+ snd_soc_write(codec, TAPAN_A_RX_HPH_CNP_WG_TIME, 0x58);
+ snd_soc_write(codec, TAPAN_A_RX_HPH_BIAS_WG_OCP, 0x1A);
+
+ /* Disable CHOPPER block */
+ snd_soc_update_bits(codec,
+ TAPAN_A_RX_HPH_CHOP_CTL, 0x80, 0x00);
+
+ snd_soc_write(codec, TAPAN_A_NCP_DTEST, 0x10);
+ pr_debug("%s: Disabled Chopper and set wavegen to 20 msec\n",
+ __func__);
+ }
+
return 0;
}
@@ -708,26 +739,52 @@
static void tapan_discharge_comp(struct snd_soc_codec *codec, int comp)
{
- /* Update RSM to 1, DIVF to 5 */
- snd_soc_write(codec, TAPAN_A_CDC_COMP0_B3_CTL + (comp * 8), 1);
+ /* Level meter DIV Factor to 5*/
snd_soc_update_bits(codec, TAPAN_A_CDC_COMP0_B2_CTL + (comp * 8), 0xF0,
- 1 << 5);
- /* Wait for 1ms */
- usleep_range(1000, 1000);
+ 0x05 << 4);
+ /* RMS meter Sampling to 0x01 */
+ snd_soc_write(codec, TAPAN_A_CDC_COMP0_B3_CTL + (comp * 8), 0x01);
+
+ /* Worst case timeout for compander CnP sleep timeout */
+ usleep_range(3000, 3000);
+}
+
+static enum wcd9xxx_buck_volt tapan_codec_get_buck_mv(
+ struct snd_soc_codec *codec)
+{
+ int buck_volt = WCD9XXX_CDC_BUCK_UNSUPPORTED;
+ struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
+ struct wcd9xxx_pdata *pdata = tapan->resmgr.pdata;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pdata->regulator); i++) {
+ if (!strncmp(pdata->regulator[i].name,
+ WCD9XXX_SUPPLY_BUCK_NAME,
+ sizeof(WCD9XXX_SUPPLY_BUCK_NAME))) {
+ if ((pdata->regulator[i].min_uV ==
+ WCD9XXX_CDC_BUCK_MV_1P8) ||
+ (pdata->regulator[i].min_uV ==
+ WCD9XXX_CDC_BUCK_MV_2P15))
+ buck_volt = pdata->regulator[i].min_uV;
+ break;
+ }
+ }
+ pr_debug("%s: S4 voltage requested is %d\n", __func__, buck_volt);
+ return buck_volt;
}
static int tapan_config_compander(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- int mask, emask;
- bool timedout;
- unsigned long timeout;
+ int mask, enable_mask;
+ u8 rdac5_mux;
struct snd_soc_codec *codec = w->codec;
struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
const int comp = w->shift;
const u32 rate = tapan->comp_fs[comp];
const struct comp_sample_dependent_params *comp_params =
&comp_samp_params[rate];
+ enum wcd9xxx_buck_volt buck_mv;
dev_dbg(codec->dev, "%s: %s event %d compander %d, enabled %d",
__func__, w->name, event, comp, tapan->comp_enabled[comp]);
@@ -737,72 +794,105 @@
/* Compander 0 has single channel */
mask = (comp == COMPANDER_0 ? 0x01 : 0x03);
- emask = (comp == COMPANDER_0 ? 0x02 : 0x03);
+ buck_mv = tapan_codec_get_buck_mv(codec);
+
+ rdac5_mux = snd_soc_read(codec, TAPAN_A_CDC_CONN_MISC);
+ rdac5_mux = (rdac5_mux & 0x04) >> 2;
+
+ if (comp == COMPANDER_0) { /* SPK compander */
+ enable_mask = 0x02;
+ } else if (comp == COMPANDER_1) { /* HPH compander */
+ enable_mask = 0x03;
+ } else if (comp == COMPANDER_2) { /* LO compander */
+
+ if (rdac5_mux == 0) { /* DEM4 */
+
+ /* for LO Stereo SE, enable Compander 2 left
+ * channel on RX3 interpolator Path and Compander 2
+ * rigt channel on RX4 interpolator Path.
+ */
+ enable_mask = 0x03;
+ } else if (rdac5_mux == 1) { /* DEM3_INV */
+
+ /* for LO mono differential only enable Compander 2
+ * left channel on RX3 interpolator Path.
+ */
+ enable_mask = 0x02;
+ } else {
+ dev_err(codec->dev, "%s: invalid rdac5_mux val %d",
+ __func__, rdac5_mux);
+ return -EINVAL;
+ }
+ } else {
+ dev_err(codec->dev, "%s: invalid compander %d", __func__, comp);
+ return -EINVAL;
+ }
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
+ /* Set compander Sample rate */
+ snd_soc_update_bits(codec,
+ TAPAN_A_CDC_COMP0_FS_CFG + (comp * 8),
+ 0x07, rate);
+ /* Set the static gain offset for HPH Path */
+ if (comp == COMPANDER_1) {
+ if (buck_mv == WCD9XXX_CDC_BUCK_MV_2P15)
+ snd_soc_update_bits(codec,
+ TAPAN_A_CDC_COMP0_B4_CTL + (comp * 8),
+ 0x80, 0x00);
+ else
+ snd_soc_update_bits(codec,
+ TAPAN_A_CDC_COMP0_B4_CTL + (comp * 8),
+ 0x80, 0x80);
+ }
+ /* Enable RX interpolation path compander clocks */
+ snd_soc_update_bits(codec, TAPAN_A_CDC_CLK_RX_B2_CTL,
+ 0x01 << comp_shift[comp],
+ 0x01 << comp_shift[comp]);
+
+ /* Toggle compander reset bits */
+ snd_soc_update_bits(codec, TAPAN_A_CDC_CLK_OTHR_RESET_B2_CTL,
+ 0x01 << comp_shift[comp],
+ 0x01 << comp_shift[comp]);
+ snd_soc_update_bits(codec, TAPAN_A_CDC_CLK_OTHR_RESET_B2_CTL,
+ 0x01 << comp_shift[comp], 0);
+
/* Set gain source to compander */
tapan_config_gain_compander(codec, comp, true);
- /* Enable RX interpolation path clocks */
- snd_soc_update_bits(codec, TAPAN_A_CDC_CLK_RX_B2_CTL,
- mask << comp_shift[comp],
- mask << comp_shift[comp]);
+
+ /* Compander enable */
+ snd_soc_update_bits(codec, TAPAN_A_CDC_COMP0_B1_CTL +
+ (comp * 8), enable_mask, enable_mask);
tapan_discharge_comp(codec, comp);
- /* Clear compander halt */
- snd_soc_update_bits(codec, TAPAN_A_CDC_COMP0_B1_CTL +
- (comp * 8),
- 1 << 2, 0);
+ /* Set sample rate dependent paramater */
+ snd_soc_write(codec, TAPAN_A_CDC_COMP0_B3_CTL + (comp * 8),
+ comp_params->rms_meter_resamp_fact);
+ snd_soc_update_bits(codec,
+ TAPAN_A_CDC_COMP0_B2_CTL + (comp * 8),
+ 0xF0, comp_params->rms_meter_div_fact << 4);
+ snd_soc_update_bits(codec,
+ TAPAN_A_CDC_COMP0_B2_CTL + (comp * 8),
+ 0x0F, comp_params->peak_det_timeout);
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ /* Disable compander */
+ snd_soc_update_bits(codec,
+ TAPAN_A_CDC_COMP0_B1_CTL + (comp * 8),
+ enable_mask, 0x00);
+
/* Toggle compander reset bits */
snd_soc_update_bits(codec, TAPAN_A_CDC_CLK_OTHR_RESET_B2_CTL,
mask << comp_shift[comp],
mask << comp_shift[comp]);
snd_soc_update_bits(codec, TAPAN_A_CDC_CLK_OTHR_RESET_B2_CTL,
mask << comp_shift[comp], 0);
- break;
- case SND_SOC_DAPM_POST_PMU:
- /* Set sample rate dependent paramater */
- snd_soc_update_bits(codec,
- TAPAN_A_CDC_COMP0_FS_CFG + (comp * 8),
- 0x07, rate);
- snd_soc_write(codec, TAPAN_A_CDC_COMP0_B3_CTL + (comp * 8),
- comp_params->rms_meter_resamp_fact);
- snd_soc_update_bits(codec,
- TAPAN_A_CDC_COMP0_B2_CTL + (comp * 8),
- 0x0F, comp_params->peak_det_timeout);
- snd_soc_update_bits(codec,
- TAPAN_A_CDC_COMP0_B2_CTL + (comp * 8),
- 0xF0, comp_params->rms_meter_div_fact << 4);
- /* Compander enable */
- snd_soc_update_bits(codec, TAPAN_A_CDC_COMP0_B1_CTL +
- (comp * 8), emask, emask);
- break;
- case SND_SOC_DAPM_PRE_PMD:
- /* Halt compander */
- snd_soc_update_bits(codec,
- TAPAN_A_CDC_COMP0_B1_CTL + (comp * 8),
- 1 << 2, 1 << 2);
- /* Wait up to a second for shutdown complete */
- timeout = jiffies + HZ;
- do {
- if ((snd_soc_read(codec,
- TAPAN_A_CDC_COMP0_SHUT_DOWN_STATUS +
- (comp * 8)) & mask) == mask)
- break;
- } while (!(timedout = time_after(jiffies, timeout)));
- dev_dbg(codec->dev, "%s: Compander %d shutdown %s in %dms\n",
- __func__, comp, timedout ? "timedout" : "completed",
- jiffies_to_msecs(timeout - HZ - jiffies));
- break;
- case SND_SOC_DAPM_POST_PMD:
- /* Disable compander */
- snd_soc_update_bits(codec,
- TAPAN_A_CDC_COMP0_B1_CTL + (comp * 8),
- emask, 0x00);
+
/* Turn off the clock for compander in pair */
snd_soc_update_bits(codec, TAPAN_A_CDC_CLK_RX_B2_CTL,
mask << comp_shift[comp], 0);
+
/* Set gain source to register */
tapan_config_gain_compander(codec, comp, false);
break;
@@ -2267,6 +2357,7 @@
struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
enum wcd9xxx_notify_event e_pre_on, e_post_off;
u8 req_clsh_state;
+ u32 pa_settle_time = TAPAN_HPH_PA_SETTLE_COMP_OFF;
dev_dbg(codec->dev, "%s: %s event = %d\n", __func__, w->name, event);
if (w->shift == 5) {
@@ -2282,23 +2373,32 @@
return -EINVAL;
}
+ if (tapan->comp_enabled[COMPANDER_1])
+ pa_settle_time = TAPAN_HPH_PA_SETTLE_COMP_ON;
+
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
/* Let MBHC module know PA is turning on */
wcd9xxx_resmgr_notifier_call(&tapan->resmgr, e_pre_on);
break;
-
case SND_SOC_DAPM_POST_PMU:
+ dev_dbg(codec->dev, "%s: sleep %d ms after %s PA enable.\n",
+ __func__, pa_settle_time / 1000, w->name);
+ /* Time needed for PA to settle */
+ usleep_range(pa_settle_time, pa_settle_time + 1000);
+
wcd9xxx_clsh_fsm(codec, &tapan->clsh_d,
req_clsh_state,
WCD9XXX_CLSH_REQ_ENABLE,
WCD9XXX_CLSH_EVENT_POST_PA);
-
- usleep_range(5000, 5010);
break;
-
case SND_SOC_DAPM_POST_PMD:
+ dev_dbg(codec->dev, "%s: sleep %d ms after %s PA disable.\n",
+ __func__, pa_settle_time / 1000, w->name);
+ /* Time needed for PA to settle */
+ usleep_range(pa_settle_time, pa_settle_time + 1000);
+
/* Let MBHC module know PA turned off */
wcd9xxx_resmgr_notifier_call(&tapan->resmgr, e_post_off);
@@ -2306,10 +2406,6 @@
req_clsh_state,
WCD9XXX_CLSH_REQ_DISABLE,
WCD9XXX_CLSH_EVENT_POST_PA);
-
- dev_dbg(codec->dev, "%s: sleep 10 ms after %s PA disable.\n",
- __func__, w->name);
- usleep_range(5000, 5010);
break;
}
return 0;
@@ -2549,6 +2645,7 @@
{"RX1 MIX1", NULL, "COMP1_CLK"},
{"RX2 MIX1", NULL, "COMP1_CLK"},
{"RX3 MIX1", NULL, "COMP2_CLK"},
+ {"RX4 MIX1", NULL, "COMP0_CLK"},
{"RX1 MIX1", NULL, "RX1 MIX1 INP1"},
{"RX1 MIX1", NULL, "RX1 MIX1 INP2"},
@@ -3019,6 +3116,7 @@
u16 rx_mix_1_reg_1, rx_mix_1_reg_2;
u16 rx_fs_reg;
u8 rx_mix_1_reg_1_val, rx_mix_1_reg_2_val;
+ u8 rdac5_mux;
struct snd_soc_codec *codec = dai->codec;
struct wcd9xxx_ch *ch;
struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
@@ -3036,6 +3134,9 @@
rx_mix_1_reg_1 = TAPAN_A_CDC_CONN_RX1_B1_CTL;
+ rdac5_mux = snd_soc_read(codec, TAPAN_A_CDC_CONN_MISC);
+ rdac5_mux = (rdac5_mux & 0x04) >> 2;
+
for (j = 0; j < NUM_INTERPOLATORS; j++) {
rx_mix_1_reg_2 = rx_mix_1_reg_1 + 1;
@@ -3060,9 +3161,14 @@
snd_soc_update_bits(codec, rx_fs_reg,
0xE0, rx_fs_rate_reg_val);
- if (comp_rx_path[j] < COMPANDER_MAX)
- tapan->comp_fs[comp_rx_path[j]]
- = compander_fs;
+ if (comp_rx_path[j] < COMPANDER_MAX) {
+ if ((j == 3) && (rdac5_mux == 1))
+ tapan->comp_fs[COMPANDER_0] =
+ compander_fs;
+ else
+ tapan->comp_fs[comp_rx_path[j]]
+ = compander_fs;
+ }
}
if (j <= 1)
rx_mix_1_reg_1 += 3;
@@ -3893,13 +3999,13 @@
SND_SOC_DAPM_SUPPLY("COMP0_CLK", SND_SOC_NOPM, 0, 0,
tapan_config_compander, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
+ SND_SOC_DAPM_PRE_PMD),
SND_SOC_DAPM_SUPPLY("COMP1_CLK", SND_SOC_NOPM, 1, 0,
tapan_config_compander, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
+ SND_SOC_DAPM_PRE_PMD),
SND_SOC_DAPM_SUPPLY("COMP2_CLK", SND_SOC_NOPM, 2, 0,
tapan_config_compander, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
+ SND_SOC_DAPM_PRE_PMD),
SND_SOC_DAPM_INPUT("AMIC1"),
SND_SOC_DAPM_MICBIAS_E("MIC BIAS1 External", TAPAN_A_MICB_1_CTL, 7, 0,
@@ -4239,12 +4345,11 @@
TAPAN_REG_VAL(TAPAN_A_RX_HPH_CHOP_CTL, 0xF4),
TAPAN_REG_VAL(TAPAN_A_BIAS_CURR_CTL_2, 0x08),
TAPAN_REG_VAL(WCD9XXX_A_BUCK_CTRL_CCL_1, 0x5B),
- TAPAN_REG_VAL(WCD9XXX_A_BUCK_CTRL_CCL_3, 0x60),
+ TAPAN_REG_VAL(WCD9XXX_A_BUCK_CTRL_CCL_3, 0x6F),
/* TODO: Check below reg writes conflict with above */
/* PROGRAM_THE_0P85V_VBG_REFERENCE = V_0P858V */
TAPAN_REG_VAL(TAPAN_A_BIAS_CURR_CTL_2, 0x04),
- TAPAN_REG_VAL(WCD9XXX_A_BUCK_CTRL_CCL_4, 0x54),
TAPAN_REG_VAL(TAPAN_A_RX_HPH_CHOP_CTL, 0x74),
TAPAN_REG_VAL(TAPAN_A_RX_BUCK_BIAS1, 0x62),
@@ -4414,6 +4519,15 @@
{TAPAN_A_CDC_COMP0_B5_CTL, 0x7F, 0x7F},
{TAPAN_A_CDC_COMP1_B5_CTL, 0x7F, 0x7F},
{TAPAN_A_CDC_COMP2_B5_CTL, 0x7F, 0x7F},
+
+ /*
+ * Setup wavegen timer to 20msec and disable chopper
+ * as default. This corresponds to Compander OFF
+ */
+ {TAPAN_A_RX_HPH_CNP_WG_CTL, 0xFF, 0xDB},
+ {TAPAN_A_RX_HPH_CNP_WG_TIME, 0xFF, 0x58},
+ {TAPAN_A_RX_HPH_BIAS_WG_OCP, 0xFF, 0x1A},
+ {TAPAN_A_RX_HPH_CHOP_CTL, 0xFF, 0x24},
};
static void tapan_codec_init_reg(struct snd_soc_codec *codec)
@@ -4596,30 +4710,6 @@
return 0;
}
-static enum wcd9xxx_buck_volt tapan_codec_get_buck_mv(
- struct snd_soc_codec *codec)
-{
- int buck_volt = WCD9XXX_CDC_BUCK_UNSUPPORTED;
- struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
- struct wcd9xxx_pdata *pdata = tapan->resmgr.pdata;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(pdata->regulator); i++) {
- if (!strncmp(pdata->regulator[i].name,
- WCD9XXX_SUPPLY_BUCK_NAME,
- sizeof(WCD9XXX_SUPPLY_BUCK_NAME))) {
- if ((pdata->regulator[i].min_uV ==
- WCD9XXX_CDC_BUCK_MV_1P8) ||
- (pdata->regulator[i].min_uV ==
- WCD9XXX_CDC_BUCK_MV_2P15))
- buck_volt = pdata->regulator[i].min_uV;
- break;
- }
- }
- pr_debug("%s: S4 voltage requested is %d\n", __func__, buck_volt);
- return buck_volt;
-}
-
static int tapan_codec_probe(struct snd_soc_codec *codec)
{
struct wcd9xxx *control;
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index edb24fc..de60430 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -63,6 +63,7 @@
static int lsm_mux_slim_port;
static int slim0_rx_aanc_fb_port;
static int msm_route_ec_ref_rx = 3; /* NONE */
+static uint32_t voc_session_id = ALL_SESSION_VSID;
enum {
MADNONE,
@@ -572,7 +573,7 @@
}
if ((msm_bedais[reg].port_id == VOICE_RECORD_RX)
|| (msm_bedais[reg].port_id == VOICE_RECORD_TX))
- voc_start_record(msm_bedais[reg].port_id, set);
+ voc_start_record(msm_bedais[reg].port_id, set, voc_session_id);
mutex_unlock(&routing_lock);
}
@@ -2632,6 +2633,30 @@
msm_routing_put_rms_value_control),
};
+static int msm_voc_session_id_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ voc_session_id = ucontrol->value.integer.value[0];
+
+ pr_debug("%s: voc_session_id=%u\n", __func__, voc_session_id);
+
+ return 0;
+}
+
+static int msm_voc_session_id_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = voc_session_id;
+
+ return 0;
+}
+
+static struct snd_kcontrol_new msm_voc_session_controls[] = {
+ SOC_SINGLE_MULTI_EXT("Voc VSID", SND_SOC_NOPM, 0,
+ 0xFFFFFFFF, 0, 1, msm_voc_session_id_get,
+ msm_voc_session_id_put),
+};
+
static const struct snd_kcontrol_new eq_enable_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia1 EQ Enable", SND_SOC_NOPM,
MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_eq_enable_mixer,
@@ -3910,6 +3935,10 @@
snd_soc_add_platform_controls(platform,
get_rms_controls,
ARRAY_SIZE(get_rms_controls));
+
+ snd_soc_add_platform_controls(platform, msm_voc_session_controls,
+ ARRAY_SIZE(msm_voc_session_controls));
+
return 0;
}
diff --git a/sound/soc/msm/qdsp6v2/q6voice.c b/sound/soc/msm/qdsp6v2/q6voice.c
index 7243f19..056e2dc 100644
--- a/sound/soc/msm/qdsp6v2/q6voice.c
+++ b/sound/soc/msm/qdsp6v2/q6voice.c
@@ -130,6 +130,28 @@
return ret;
}
+static bool voice_is_valid_session_id(uint32_t session_id)
+{
+ bool ret = false;
+
+ switch (session_id) {
+ case VOICE_SESSION_VSID:
+ case VOICE2_SESSION_VSID:
+ case VOLTE_SESSION_VSID:
+ case VOIP_SESSION_VSID:
+ case QCHAT_SESSION_VSID:
+ case ALL_SESSION_VSID:
+ ret = true;
+ break;
+ default:
+ pr_err("%s: Invalid session_id : %x\n", __func__, session_id);
+
+ break;
+ }
+
+ return ret;
+}
+
static u16 voice_get_mvm_handle(struct voice_data *v)
{
if (v == NULL) {
@@ -2989,6 +3011,22 @@
v->music_info.force = 1;
voice_cvs_stop_playback(v);
voice_cvs_stop_record(v);
+ /* If voice call is active during VoLTE, SRVCC happens.
+ Start recording on voice session if recording started during VoLTE.
+ */
+ if (is_volte_session(v->session_id) &&
+ ((common.voice[VOC_PATH_PASSIVE].voc_state == VOC_RUN) ||
+ (common.voice[VOC_PATH_PASSIVE].voc_state == VOC_CHANGE))) {
+ if (v->rec_info.rec_enable) {
+ voice_cvs_start_record(
+ &common.voice[VOC_PATH_PASSIVE],
+ v->rec_info.rec_mode);
+ common.srvcc_rec_flag = true;
+
+ pr_debug("%s: switch recording, srvcc_rec_flag %d\n",
+ __func__, common.srvcc_rec_flag);
+ }
+ }
/* send stop voice cmd */
voice_send_stop_voice_cmd(v);
@@ -3561,17 +3599,35 @@
return ret;
}
-int voc_start_record(uint32_t port_id, uint32_t set)
+int voc_start_record(uint32_t port_id, uint32_t set, uint32_t session_id)
{
int ret = 0;
int rec_mode = 0;
u16 cvs_handle;
- int i, rec_set = 0;
+ int rec_set = 0;
+ struct voice_session_itr itr;
+ struct voice_data *v = NULL;
- for (i = 0; i < MAX_VOC_SESSIONS; i++) {
- struct voice_data *v = &common.voice[i];
- pr_debug("%s: i:%d port_id: %d, set: %d\n",
- __func__, i, port_id, set);
+ /* check if session_id is valid */
+ if (!voice_is_valid_session_id(session_id)) {
+ pr_err("%s: Invalid session id:%u\n", __func__,
+ session_id);
+
+ return -EINVAL;
+ }
+
+ voice_itr_init(&itr, session_id);
+ pr_debug("%s: session_id:%u\n", __func__, session_id);
+
+ while (voice_itr_get_next_session(&itr, &v)) {
+ if (v == NULL) {
+ pr_err("%s: v is NULL, sessionid:%u\n", __func__,
+ session_id);
+
+ break;
+ }
+ pr_debug("%s: port_id: %d, set: %d, v: %p\n",
+ __func__, port_id, set, v);
mutex_lock(&v->lock);
rec_mode = v->rec_info.rec_mode;
@@ -3579,13 +3635,11 @@
if (set) {
if ((v->rec_route_state.ul_flag != 0) &&
(v->rec_route_state.dl_flag != 0)) {
- pr_debug("%s: i=%d, rec mode already set.\n",
- __func__, i);
+ pr_debug("%s: rec mode already set.\n",
+ __func__);
+
mutex_unlock(&v->lock);
- if (i < MAX_VOC_SESSIONS)
- continue;
- else
- return 0;
+ continue;
}
if (port_id == VOICE_RECORD_TX) {
@@ -3615,13 +3669,10 @@
} else {
if ((v->rec_route_state.ul_flag == 0) &&
(v->rec_route_state.dl_flag == 0)) {
- pr_debug("%s: i=%d, rec already stops.\n",
- __func__, i);
+ pr_debug("%s: rec already stops.\n",
+ __func__);
mutex_unlock(&v->lock);
- if (i < MAX_VOC_SESSIONS)
- continue;
- else
- return 0;
+ continue;
}
if (port_id == VOICE_RECORD_TX) {
@@ -3650,8 +3701,8 @@
}
}
}
- pr_debug("%s: i=%d, mode =%d, set =%d\n", __func__,
- i, rec_mode, rec_set);
+ pr_debug("%s: mode =%d, set =%d\n", __func__,
+ rec_mode, rec_set);
cvs_handle = voice_get_cvs_handle(v);
if (cvs_handle != 0) {
@@ -3661,6 +3712,18 @@
ret = voice_cvs_stop_record(v);
}
+ /* During SRVCC, recording will switch from VoLTE session to
+ voice session.
+ Then stop recording, need to stop recording on voice session.
+ */
+ if ((!rec_set) && common.srvcc_rec_flag) {
+ pr_debug("%s, srvcc_rec_flag:%d\n", __func__,
+ common.srvcc_rec_flag);
+
+ voice_cvs_stop_record(&common.voice[VOC_PATH_PASSIVE]);
+ common.srvcc_rec_flag = false;
+ }
+
/* Cache the value */
v->rec_info.rec_enable = rec_set;
v->rec_info.rec_mode = rec_mode;
@@ -4590,6 +4653,9 @@
c->voice[i].shmem_info.mem_handle = 0;
}
}
+ /* clean up srvcc rec flag */
+ c->srvcc_rec_flag = false;
+
return 0;
}
diff --git a/sound/soc/msm/qdsp6v2/q6voice.h b/sound/soc/msm/qdsp6v2/q6voice.h
index 1e9c813..20f2857 100644
--- a/sound/soc/msm/qdsp6v2/q6voice.h
+++ b/sound/soc/msm/qdsp6v2/q6voice.h
@@ -1338,6 +1338,8 @@
struct dtmf_driver_info dtmf_info;
struct voice_data voice[MAX_VOC_SESSIONS];
+
+ bool srvcc_rec_flag;
};
struct voice_session_itr {
@@ -1438,7 +1440,7 @@
uint32_t voc_get_session_id(char *name);
int voc_start_playback(uint32_t set, uint16_t port_id);
-int voc_start_record(uint32_t port_id, uint32_t set);
+int voc_start_record(uint32_t port_id, uint32_t set, uint32_t session_id);
int voice_get_idx_for_session(u32 session_id);
#endif