Merge "SSR: Make smem ramdump devices unique" into msm-3.4
diff --git a/Documentation/devicetree/bindings/arm/msm/tz-log.txt b/Documentation/devicetree/bindings/arm/msm/tz-log.txt
new file mode 100644
index 0000000..6928611
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/tz-log.txt
@@ -0,0 +1,16 @@
+* TZLOG (Trust Zone Log)
+
+The tz_log driver is a platform device driver that exposes a debugfs
+interface for accessing and displaying diagnostic information
+related to secure code (Trustzone/QSEE).
+
+Required properties:
+- compatible : Should be "qcom,tz-log"
+- reg : Offset and size of the register set for the device
+
+Example:
+
+ qcom,tz-log@fe805720 {
+ compatible = "qcom,tz-log";
+ reg = <0xfe805720 0x1000>;
+ };
diff --git a/Documentation/devicetree/bindings/mmc/msm_sdcc.txt b/Documentation/devicetree/bindings/mmc/msm_sdcc.txt
index 0c1762d..35ac0ec 100644
--- a/Documentation/devicetree/bindings/mmc/msm_sdcc.txt
+++ b/Documentation/devicetree/bindings/mmc/msm_sdcc.txt
@@ -10,6 +10,8 @@
- qcom,sdcc-clk-rates : specifies supported SDCC clock frequencies, Units - Hz.
- qcom,sdcc-sup-voltages: specifies supported voltage ranges for card. Should always be
specified in pairs (min, max), Units - mV.
+ - <supply-name>-supply: phandle to the regulator device tree node
+ "supply-name" examples are "vdd", "vdd-io".
Optional Properties:
- cell-index - defines slot ID.
@@ -21,7 +23,17 @@
- qcom,sdcc-nonremovable - specifies whether the card in slot is
hot pluggable or hard wired.
- qcom,sdcc-disable_cmd23 - disable sending CMD23 to card when controller can't support it.
- - qcom,sdcc-hs200 - enable eMMC4.5 HS200 bus speed mode
+ - qcom,sdcc-bus-speed-mode - specifies supported bus speed modes by host.
+ - qcom,sdcc-current-limit - specifies max. current the host can drive.
+ - qcom,sdcc-xpc - specifies if the host can supply more than 150mA for SDXC cards.
+
+In the following, <supply> can be vdd (flash core voltage) or vdd-io (I/O voltage).
+ - qcom,sdcc-<supply>-always_on - specifies whether supply should be kept "on" always.
+ - qcom,sdcc-<supply>-lpm_sup - specifies whether supply can be kept in low power mode (lpm).
+ - qcom,sdcc-<supply>-voltage_level - specifies voltage levels for supply. Should be
+ specified in pairs (min, max), units uV.
+ - qcom,sdcc-<supply>-current_level - specifies load levels for supply in lpm or
+ high power mode (hpm). Should be specified in pairs (lpm, hpm), units uA.
Example:
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
new file mode 100644
index 0000000..b6086e7
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -0,0 +1,135 @@
+Qualcomm audio devices for ALSA sound soc
+
+* msm-pcm
+
+Required properties:
+
+ - compatible : "qcom,msm-pcm-dsp"
+
+* msm-pcm-routing
+
+Required properties:
+
+ - compatible : "qcom,msm-pcm-routing"
+
+* msm-pcm-lpa
+
+Required properties:
+
+ - compatible : "qcom,msm-pcm-lpa"
+
+* msm-voip-dsp
+
+Required properties:
+
+ - compatible : "qcom,msm-voip-dsp"
+
+* msm-stub-codec
+
+Required properties:
+
+ - compatible : "qcom,msm-stub-codec"
+
+* msm-dai-fe
+
+Required properties:
+
+ - compatible : "qcom,msm-dai-fe"
+
+* msm-auxpcm
+
+[First Level Nodes]
+
+Required properties:
+
+ - compatible : "qcom,msm-auxpcm-resource"
+
+ - qcom,msm-cpudai-auxpcm-clk: clock for auxpcm
+
+ - qcom,msm-cpudai-auxpcm-mode: mode information
+ 0 - for PCM
+
+ - qcom,msm-cpudai-auxpcm-sync: sync information
+
+ - qcom,msm-cpudai-auxpcm-frame: No.of bytes per frame
+ 5 - 256BPF
+
+ - qcom,msm-cpudai-auxpcm-quant: Type of quantization
+ 2 - Linear quantization
+
+ - qcom,msm-cpudai-auxpcm-slot: Slot number for multichannel scenario
+ Value is 1
+
+ - qcom,msm-cpudai-auxpcm-data: Data field - 0
+
+ - qcom,msm-cpudai-auxpcm-pcm-clk-rate: Clock rate for pcm - 2048000
+
+[Second Level Nodes]
+
+Required Properties:
+
+ - qcom,msm-auxpcm-dev-id: This property specifies the device
+ port id.
+ For Rx device, the port id is 4106
+ and for Tx device, the port id is 4107
+
+ - compatible: "qcom,msm-auxpcm-dev"
+
+* msm-pcm-hostless
+
+Required properties:
+
+ - compatible : "qcom,msm-pcm-hostless"
+
+Example:
+
+ qcom,msm-pcm {
+ compatible = "qcom,msm-pcm-dsp";
+ };
+
+ qcom,msm-pcm-routing {
+ compatible = "qcom,msm-pcm-routing";
+ };
+
+ qcom,msm-pcm-lpa {
+ compatible = "qcom,msm-pcm-lpa";
+ };
+
+ qcom,msm-voip-dsp {
+ compatible = "qcom,msm-voip-dsp";
+ };
+
+ qcom,msm-stub-codec {
+ compatible = "qcom,msm-stub-codec";
+ };
+
+ qcom,msm-dai-fe {
+ compatible = "qcom,msm-dai-fe";
+ };
+
+ qcom,msm-auxpcm {
+ compatible = "qcom,msm-auxpcm-resource";
+ qcom,msm-cpudai-auxpcm-clk = "pcm_clk";
+ qcom,msm-cpudai-auxpcm-mode = <0>;
+ qcom,msm-cpudai-auxpcm-sync = <1>;
+ qcom,msm-cpudai-auxpcm-frame = <5>;
+ qcom,msm-cpudai-auxpcm-quant = <2>;
+ qcom,msm-cpudai-auxpcm-slot = <1>;
+ qcom,msm-cpudai-auxpcm-data = <0>;
+ qcom,msm-cpudai-auxpcm-pcm-clk-rate = <2048000>;
+
+ qcom,msm-auxpcm-rx {
+ qcom,msm-auxpcm-dev-id = <4106>;
+ compatible = "qcom,msm-auxpcm-dev";
+ };
+
+ qcom,msm-auxpcm-tx {
+ qcom,msm-auxpcm-dev-id = <4107>;
+ compatible = "qcom,msm-auxpcm-dev";
+ };
+ };
+
+ qcom,msm-pcm-hostless {
+ compatible = "qcom,msm-pcm-hostless";
+ };
+
diff --git a/Documentation/devicetree/bindings/usb/msm-ssusb.txt b/Documentation/devicetree/bindings/usb/msm-ssusb.txt
index 40b3bc3..a2b7dfc 100644
--- a/Documentation/devicetree/bindings/usb/msm-ssusb.txt
+++ b/Documentation/devicetree/bindings/usb/msm-ssusb.txt
@@ -3,7 +3,13 @@
Required properties :
- compatible : should be "qcom,dwc-usb3-msm"
- reg : offset and length of the register set in the memory map
-- interrupts: IRQ line
+- interrupts: IRQ lines used by this controller
+- interrupt-names : Required interrupt resource entries are:
+ "irq" : Interrupt for DWC3 core
+ "otg_irq" : Interrupt for DWC3 core's OTG Events
+- <supply-name>-supply: phandle to the regulator device tree node
+ Required "supply-name" examples are "SSUSB_VDDCX", "SSUSB_1p8",
+ "HSUSB_VDDCX", "HSUSB_1p8", "HSUSB_3p3".
- qcom,dwc-usb3-msm-dbm-eps: Number of endpoints avaliable for
the DBM (Device Bus Manager). The DBM is HW unit which is part of
the MSM USB3.0 core (which also includes the Synopsys DesignWare
@@ -12,7 +18,13 @@
Example MSM USB3.0 controller device node :
usb@f9200000 {
compatible = "qcom,dwc-usb3-msm";
- reg = <0xf9200000 0xCCFF>;
- interrupts = <0 131 0>
+ reg = <0xF9200000 0xFA000>;
+ interrupts = <0 131 0 0 179 0>;
+ interrupt-names = "irq", "otg_irq";
+ SSUSB_VDDCX-supply = <&pm8841_s2>;
+ SSUSB_1p8-supply = <&pm8941_l6>;
+ HSUSB_VDDCX-supply = <&pm8841_s2>;
+ HSUSB_1p8-supply = <&pm8941_l6>;
+ HSUSB_3p3-supply = <&pm8941_l24>;
qcom,dwc-usb3-msm-dbm-eps = <4>
};
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 64349f0..9a1c759 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -508,6 +508,11 @@
Also note the kernel might malfunction if you disable
some critical bits.
+ cma=nn[MG] [ARM,KNL]
+ Sets the size of kernel global memory area for contiguous
+ memory allocations. For more information, see
+ include/linux/dma-contiguous.h
+
cmo_free_hint= [PPC] Format: { yes | no }
Specify whether pages are marked as being inactive
when they are freed. This is used in CMO environments
@@ -515,6 +520,10 @@
a hypervisor.
Default: yes
+ coherent_pool=nn[KMG] [ARM,KNL]
+ Sets the size of memory pool for coherent, atomic dma
+ allocations if Contiguous Memory Allocator (CMA) is used.
+
code_bytes [X86] How many bytes of object code to print
in an oops report.
Range: 0 - 8192
diff --git a/arch/Kconfig b/arch/Kconfig
index bba59d1..0d88760 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -142,6 +142,9 @@
config HAVE_DMA_ATTRS
bool
+config HAVE_DMA_CONTIGUOUS
+ bool
+
config USE_GENERIC_SMP_HELPERS
bool
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 5e1cd06..bc0191a 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -4,6 +4,8 @@
select HAVE_AOUT
select HAVE_DMA_API_DEBUG
select HAVE_IDE if PCI || ISA || PCMCIA
+ select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7)
+ select CMA if (CPU_V6 || CPU_V6K || CPU_V7)
select HAVE_MEMBLOCK
select RTC_LIB
select SYS_SUPPORTS_APM_EMULATION
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index 6bcc3c2..ad6f99b 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -83,12 +83,21 @@
compatible = "qcom,msm-sdcc";
reg = <0xf9824000 0x1000>;
interrupts = <0 123 0>;
+ vdd-supply = <&pm8941_l20>;
+ vdd-io-supply = <&pm8941_s3>;
+
+ qcom,sdcc-vdd-voltage_level = <2950000 2950000>;
+ qcom,sdcc-vdd-current_level = <800 500000>;
+
+ qcom,sdcc-vdd-io-always_on;
+ qcom,sdcc-vdd-io-voltage_level = <1800000 1800000>;
+ qcom,sdcc-vdd-io-current_level = <250 154000>;
qcom,sdcc-clk-rates = <400000 25000000 50000000 100000000 200000000>;
qcom,sdcc-sup-voltages = <2950 2950>;
qcom,sdcc-bus-width = <8>;
- qcom,sdcc-hs200;
qcom,sdcc-nonremovable;
+ qcom,sdcc-bus-speed-mode = "HS200_1p8v", "DDR_1p8v";
};
qcom,sdcc@f98a4000 {
@@ -96,10 +105,23 @@
compatible = "qcom,msm-sdcc";
reg = <0xf98a4000 0x1000>;
interrupts = <0 125 0>;
+ vdd-supply = <&pm8941_l21>;
+ vdd-io-supply = <&pm8941_l13>;
+
+ qcom,sdcc-vdd-voltage_level = <2950000 2950000>;
+ qcom,sdcc-vdd-current_level = <9000 800000>;
+
+ qcom,sdcc-vdd-io-always_on;
+ qcom,sdcc-vdd-io-lpm_sup;
+ qcom,sdcc-vdd-io-voltage_level = <1800000 2950000>;
+ qcom,sdcc-vdd-io-current_level = <6 22000>;
qcom,sdcc-clk-rates = <400000 25000000 50000000 100000000 200000000>;
qcom,sdcc-sup-voltages = <2950 2950>;
qcom,sdcc-bus-width = <4>;
+ qcom,sdcc-xpc;
+ qcom,sdcc-bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
+ qcom,sdcc-current-limit = <800>;
};
qcom,sdcc@f9864000 {
@@ -111,6 +133,7 @@
qcom,sdcc-clk-rates = <400000 25000000 50000000 100000000>;
qcom,sdcc-sup-voltages = <1800 1800>;
qcom,sdcc-bus-width = <4>;
+ qcom,sdcc-bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50";
status = "disable";
};
@@ -123,6 +146,7 @@
qcom,sdcc-clk-rates = <400000 25000000 50000000 100000000>;
qcom,sdcc-sup-voltages = <1800 1800>;
qcom,sdcc-bus-width = <4>;
+ qcom,sdcc-bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50";
status = "disable";
};
@@ -297,7 +321,8 @@
qcom,ssusb@F9200000 {
compatible = "qcom,dwc-usb3-msm";
reg = <0xF9200000 0xFA000>;
- interrupts = <0 131 0>;
+ interrupts = <0 131 0 0 179 0>;
+ interrupt-names = "irq", "otg_irq";
SSUSB_VDDCX-supply = <&pm8841_s2>;
SSUSB_1p8-supply = <&pm8941_l6>;
HSUSB_VDDCX-supply = <&pm8841_s2>;
@@ -318,52 +343,55 @@
qcom,firmware-name = "adsp";
};
- qcom,msm-pcm {
- compatible = "qcom,msm-pcm-dsp";
- };
+ qcom,msm-pcm {
+ compatible = "qcom,msm-pcm-dsp";
+ };
- qcom,msm-pcm-routing {
- compatible = "qcom,msm-pcm-routing";
- };
+ qcom,msm-pcm-routing {
+ compatible = "qcom,msm-pcm-routing";
+ };
- qcom,msm-pcm-lpa {
- compatible = "qcom,msm-pcm-lpa";
- };
+ qcom,msm-pcm-lpa {
+ compatible = "qcom,msm-pcm-lpa";
+ };
- qcom,msm-voip-dsp {
- compatible = "qcom,msm-voip-dsp";
- };
+ qcom,msm-voip-dsp {
+ compatible = "qcom,msm-voip-dsp";
+ };
- qcom,msm-stub-codec {
- compatible = "qcom,msm-stub-codec";
- };
+ qcom,msm-stub-codec {
+ compatible = "qcom,msm-stub-codec";
+ };
- qcom,msm-dai-fe {
- compatible = "qcom,msm-dai-fe";
- };
+ qcom,msm-dai-fe {
+ compatible = "qcom,msm-dai-fe";
+ };
- qcom,msm-dai-q6 {
- compatible = "qcom,msm-dai-q6";
- qcom,msm-cpudai-auxpcm-clk = "pcm_clk";
- qcom,msm-cpudai-auxpcm-mode = <0>;
- qcom,msm-cpudai-auxpcm-sync = <1>;
- qcom,msm-cpudai-auxpcm-frame = <5>;
- qcom,msm-cpudai-auxpcm-quant = <2>;
- qcom,msm-cpudai-auxpcm-slot = <1>;
- qcom,msm-cpudai-auxpcm-data = <0>;
- qcom,msm-cpudai-auxpcm-pcm-clk-rate = <2048000>;
+ qcom,msm-auxpcm {
+ compatible = "qcom,msm-auxpcm-resource";
+ qcom,msm-cpudai-auxpcm-clk = "pcm_clk";
+ qcom,msm-cpudai-auxpcm-mode = <0>;
+ qcom,msm-cpudai-auxpcm-sync = <1>;
+ qcom,msm-cpudai-auxpcm-frame = <5>;
+ qcom,msm-cpudai-auxpcm-quant = <2>;
+ qcom,msm-cpudai-auxpcm-slot = <1>;
+ qcom,msm-cpudai-auxpcm-data = <0>;
+ qcom,msm-cpudai-auxpcm-pcm-clk-rate = <2048000>;
- qcom,msm-dai-q6-rx {
- qcom,msm-dai-q6-id = <4106>;
- };
- qcom,msm-dai-q6-tx {
- qcom,msm-dai-q6-id = <4107>;
- };
- };
+ qcom,msm-auxpcm-rx {
+ qcom,msm-auxpcm-dev-id = <4106>;
+ compatible = "qcom,msm-auxpcm-dev";
+ };
- qcom,msm-pcm-hostless {
- compatible = "qcom,msm-pcm-hostless";
- };
+ qcom,msm-auxpcm-tx {
+ qcom,msm-auxpcm-dev-id = <4107>;
+ compatible = "qcom,msm-auxpcm-dev";
+ };
+ };
+
+ qcom,msm-pcm-hostless {
+ compatible = "qcom,msm-pcm-hostless";
+ };
qcom,mss@fc880000 {
compatible = "qcom,pil-q6v5-mss";
diff --git a/arch/arm/configs/msm7627a-perf_defconfig b/arch/arm/configs/msm7627a-perf_defconfig
index a163829..dd9418f 100644
--- a/arch/arm/configs/msm7627a-perf_defconfig
+++ b/arch/arm/configs/msm7627a-perf_defconfig
@@ -304,7 +304,6 @@
# CONFIG_MMC_BLOCK_BOUNCE is not set
CONFIG_MMC_TEST=m
CONFIG_MMC_MSM=y
-CONFIG_MMC_MSM_CARD_HW_DETECTION=y
CONFIG_MMC_MSM_SDC3_SUPPORT=y
CONFIG_MMC_MSM_SDC3_8_BIT_SUPPORT=y
CONFIG_LEDS_GPIO=y
diff --git a/arch/arm/configs/msm7627a_defconfig b/arch/arm/configs/msm7627a_defconfig
index d8e2e3c..b332e6b 100644
--- a/arch/arm/configs/msm7627a_defconfig
+++ b/arch/arm/configs/msm7627a_defconfig
@@ -305,7 +305,6 @@
# CONFIG_MMC_BLOCK_BOUNCE is not set
CONFIG_MMC_TEST=m
CONFIG_MMC_MSM=y
-CONFIG_MMC_MSM_CARD_HW_DETECTION=y
CONFIG_MMC_MSM_SDC3_SUPPORT=y
CONFIG_MMC_MSM_SDC3_8_BIT_SUPPORT=y
CONFIG_LEDS_MSM_PDM=y
diff --git a/arch/arm/configs/msm7630-perf_defconfig b/arch/arm/configs/msm7630-perf_defconfig
index 7de7fee..262c983 100644
--- a/arch/arm/configs/msm7630-perf_defconfig
+++ b/arch/arm/configs/msm7630-perf_defconfig
@@ -330,7 +330,6 @@
# CONFIG_MMC_BLOCK_BOUNCE is not set
CONFIG_MMC_TEST=m
CONFIG_MMC_MSM=y
-CONFIG_MMC_MSM_CARD_HW_DETECTION=y
# CONFIG_MMC_MSM_SDC1_SUPPORT is not set
CONFIG_MMC_MSM_SDC2_8_BIT_SUPPORT=y
CONFIG_MMC_MSM_SDC3_SUPPORT=y
diff --git a/arch/arm/configs/msm7630_defconfig b/arch/arm/configs/msm7630_defconfig
index bead86e..7eaaf35 100644
--- a/arch/arm/configs/msm7630_defconfig
+++ b/arch/arm/configs/msm7630_defconfig
@@ -328,7 +328,6 @@
# CONFIG_MMC_BLOCK_BOUNCE is not set
CONFIG_MMC_TEST=m
CONFIG_MMC_MSM=y
-CONFIG_MMC_MSM_CARD_HW_DETECTION=y
# CONFIG_MMC_MSM_SDC1_SUPPORT is not set
CONFIG_MMC_MSM_SDC2_8_BIT_SUPPORT=y
CONFIG_MMC_MSM_SDC3_SUPPORT=y
diff --git a/arch/arm/configs/msm8660-perf_defconfig b/arch/arm/configs/msm8660-perf_defconfig
index fe30dc8..ea4c0f6 100644
--- a/arch/arm/configs/msm8660-perf_defconfig
+++ b/arch/arm/configs/msm8660-perf_defconfig
@@ -68,6 +68,7 @@
CONFIG_MSM_PIL_QDSP6V3=y
CONFIG_MSM_PIL_TZAPPS=y
CONFIG_MSM_PIL_DSPS=y
+CONFIG_MSM_PIL_VIDC=y
CONFIG_MSM_SUBSYSTEM_RESTART=y
CONFIG_MSM_TZ_LOG=y
CONFIG_MSM_RPM_LOG=y
@@ -390,7 +391,6 @@
# CONFIG_MMC_BLOCK_BOUNCE is not set
CONFIG_MMC_TEST=m
CONFIG_MMC_MSM=y
-CONFIG_MMC_MSM_CARD_HW_DETECTION=y
CONFIG_MMC_MSM_SDC1_8_BIT_SUPPORT=y
CONFIG_MMC_MSM_SDC2_8_BIT_SUPPORT=y
CONFIG_MMC_MSM_SDC3_SUPPORT=y
diff --git a/arch/arm/configs/msm8660_defconfig b/arch/arm/configs/msm8660_defconfig
index 45339ee..20557e5 100644
--- a/arch/arm/configs/msm8660_defconfig
+++ b/arch/arm/configs/msm8660_defconfig
@@ -67,6 +67,7 @@
CONFIG_MSM_PIL_QDSP6V3=y
CONFIG_MSM_PIL_TZAPPS=y
CONFIG_MSM_PIL_DSPS=y
+CONFIG_MSM_PIL_VIDC=y
CONFIG_MSM_SUBSYSTEM_RESTART=y
CONFIG_MSM_TZ_LOG=y
CONFIG_MSM_RPM_LOG=y
@@ -392,7 +393,6 @@
# CONFIG_MMC_BLOCK_BOUNCE is not set
CONFIG_MMC_TEST=m
CONFIG_MMC_MSM=y
-CONFIG_MMC_MSM_CARD_HW_DETECTION=y
CONFIG_MMC_MSM_SDC1_8_BIT_SUPPORT=y
CONFIG_MMC_MSM_SDC2_8_BIT_SUPPORT=y
CONFIG_MMC_MSM_SDC3_SUPPORT=y
diff --git a/arch/arm/configs/msm8960-perf_defconfig b/arch/arm/configs/msm8960-perf_defconfig
index dd33d76..6c213c3 100644
--- a/arch/arm/configs/msm8960-perf_defconfig
+++ b/arch/arm/configs/msm8960-perf_defconfig
@@ -426,7 +426,6 @@
# CONFIG_MMC_BLOCK_BOUNCE is not set
CONFIG_MMC_TEST=m
CONFIG_MMC_MSM=y
-CONFIG_MMC_MSM_CARD_HW_DETECTION=y
CONFIG_MMC_MSM_SDC1_8_BIT_SUPPORT=y
# CONFIG_MMC_MSM_SDC2_SUPPORT is not set
CONFIG_MMC_MSM_SDC3_SUPPORT=y
diff --git a/arch/arm/configs/msm8960_defconfig b/arch/arm/configs/msm8960_defconfig
index ad834a7..9eea6f6 100644
--- a/arch/arm/configs/msm8960_defconfig
+++ b/arch/arm/configs/msm8960_defconfig
@@ -428,7 +428,6 @@
# CONFIG_MMC_BLOCK_BOUNCE is not set
CONFIG_MMC_TEST=m
CONFIG_MMC_MSM=y
-CONFIG_MMC_MSM_CARD_HW_DETECTION=y
CONFIG_MMC_MSM_SDC1_8_BIT_SUPPORT=y
# CONFIG_MMC_MSM_SDC2_SUPPORT is not set
CONFIG_MMC_MSM_SDC3_SUPPORT=y
diff --git a/arch/arm/configs/msm9615_defconfig b/arch/arm/configs/msm9615_defconfig
index 37bc416..b4cfe4b 100644
--- a/arch/arm/configs/msm9615_defconfig
+++ b/arch/arm/configs/msm9615_defconfig
@@ -244,7 +244,6 @@
# CONFIG_MMC_BLOCK_BOUNCE is not set
CONFIG_MMC_TEST=m
CONFIG_MMC_MSM=y
-CONFIG_MMC_MSM_CARD_HW_DETECTION=y
CONFIG_MMC_MSM_SPS_SUPPORT=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
diff --git a/arch/arm/include/asm/dma-contiguous.h b/arch/arm/include/asm/dma-contiguous.h
new file mode 100644
index 0000000..3ed37b4
--- /dev/null
+++ b/arch/arm/include/asm/dma-contiguous.h
@@ -0,0 +1,15 @@
+#ifndef ASMARM_DMA_CONTIGUOUS_H
+#define ASMARM_DMA_CONTIGUOUS_H
+
+#ifdef __KERNEL__
+#ifdef CONFIG_CMA
+
+#include <linux/types.h>
+#include <asm-generic/dma-contiguous.h>
+
+void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
+
+#endif
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
index 5f731df..cd5be28 100644
--- a/arch/arm/include/asm/mach/map.h
+++ b/arch/arm/include/asm/mach/map.h
@@ -36,6 +36,7 @@
#define MT_MEMORY_R 15
#define MT_MEMORY_RW 16
#define MT_MEMORY_RX 17
+#define MT_MEMORY_DMA_READY 18
#ifdef CONFIG_MMU
extern void iotable_init(struct map_desc *, int);
diff --git a/arch/arm/include/asm/mach/mmc.h b/arch/arm/include/asm/mach/mmc.h
index a734547..562f13c 100644
--- a/arch/arm/include/asm/mach/mmc.h
+++ b/arch/arm/include/asm/mach/mmc.h
@@ -142,6 +142,8 @@
unsigned int xpc_cap;
/* Supported UHS-I Modes */
unsigned int uhs_caps;
+ /* More capabilities */
+ unsigned int uhs_caps2;
void (*sdio_lpm_gpio_setup)(struct device *, unsigned int);
unsigned int status_irq;
unsigned int status_gpio;
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index e30f1d8..b012f0f 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -81,6 +81,7 @@
extern void paging_init(struct machine_desc *desc);
extern void sanity_check_meminfo(void);
extern void reboot_setup(char *str);
+extern void setup_dma_zone(struct machine_desc *desc);
unsigned int processor_id;
EXPORT_SYMBOL(processor_id);
@@ -941,12 +942,8 @@
machine_desc = mdesc;
machine_name = mdesc->name;
-#ifdef CONFIG_ZONE_DMA
- if (mdesc->dma_zone_size) {
- extern unsigned long arm_dma_zone_size;
- arm_dma_zone_size = mdesc->dma_zone_size;
- }
-#endif
+ setup_dma_zone(mdesc);
+
if (mdesc->restart_mode)
reboot_setup(&mdesc->restart_mode);
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index 4f0c261..466d65b 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -136,6 +136,7 @@
select MSM_MULTIMEDIA_USE_ION
select MSM_PM8X60 if PM
select MSM_RUN_QUEUE_STATS
+ select ARM_HAS_SG_CHAIN
config ARCH_MSM8960
bool "MSM8960"
@@ -169,6 +170,7 @@
select MSM_PM8X60 if PM
select HOLES_IN_ZONE if SPARSEMEM
select MSM_RUN_QUEUE_STATS
+ select ARM_HAS_SG_CHAIN
config ARCH_MSM8930
bool "MSM8930"
@@ -199,6 +201,7 @@
select MULTI_IRQ_HANDLER
select MSM_PM8X60 if PM
select HOLES_IN_ZONE if SPARSEMEM
+ select ARM_HAS_SG_CHAIN
config ARCH_APQ8064
bool "APQ8064"
@@ -224,6 +227,7 @@
select QCACHE
select MIGHT_HAVE_PCI
select ARCH_SUPPORTS_MSI
+ select ARM_HAS_SG_CHAIN
config ARCH_MSM8974
bool "MSM8974"
@@ -247,6 +251,7 @@
select MSM_QDSP6V2_CODECS
select MSM_AUDIO_QDSP6V2 if SND_SOC
select MSM_RPM_REGULATOR_SMD
+ select ARM_HAS_SG_CHAIN
config ARCH_FSM9XXX
bool "FSM9XXX"
@@ -278,6 +283,7 @@
select MSM_QDSP6_APR
select MSM_AUDIO_QDSP6 if SND_SOC
select FIQ
+ select ARM_HAS_SG_CHAIN
config ARCH_MSM8625
bool "MSM8625"
diff --git a/arch/arm/mach-msm/acpuclock-8960.c b/arch/arm/mach-msm/acpuclock-8960.c
index 6c14efa..9809bdf 100644
--- a/arch/arm/mach-msm/acpuclock-8960.c
+++ b/arch/arm/mach-msm/acpuclock-8960.c
@@ -154,6 +154,8 @@
struct l2_level *l2_vote;
struct vreg vreg[NUM_VREG];
unsigned int *hfpll_vdd_tbl;
+ bool regulators_initialized;
+ bool clocks_initialized;
};
static unsigned int hfpll_vdd_tbl_8960[] = {
@@ -842,6 +844,8 @@
[PVS_FAST] = acpu_freq_tbl_8930_fast,
};
+static struct acpu_level *max_acpu_level;
+
static unsigned long acpuclk_8960_get_rate(int cpu)
{
return scalable[cpu].current_speed->khz;
@@ -1305,7 +1309,7 @@
}
/* Initialize a HFPLL at a given rate and enable it. */
-static void __init hfpll_init(struct scalable *sc, struct core_speed *tgt_s)
+static void __cpuinit hfpll_init(struct scalable *sc, struct core_speed *tgt_s)
{
pr_debug("Initializing HFPLL%d\n", sc - scalable);
@@ -1326,69 +1330,66 @@
}
/* Voltage regulator initialization. */
-static void __init regulator_init(struct acpu_level *lvl)
+static void __cpuinit regulator_init(int cpu, struct acpu_level *lvl)
{
- int cpu, ret;
- struct scalable *sc;
+ int ret;
+ struct scalable *sc = &scalable[cpu];
unsigned int vdd_mem, vdd_dig, vdd_core;
vdd_mem = calculate_vdd_mem(lvl);
vdd_dig = calculate_vdd_dig(lvl);
- for_each_possible_cpu(cpu) {
- sc = &scalable[cpu];
-
- /* Set initial vdd_mem vote. */
- ret = rpm_vreg_set_voltage(sc->vreg[VREG_MEM].rpm_vreg_id,
- sc->vreg[VREG_MEM].rpm_vreg_voter, vdd_mem,
- sc->vreg[VREG_MEM].max_vdd, 0);
- if (ret) {
- pr_err("%s initialization failed (%d)\n",
- sc->vreg[VREG_MEM].name, ret);
- BUG();
- }
- sc->vreg[VREG_MEM].cur_vdd = vdd_mem;
-
- /* Set initial vdd_dig vote. */
- ret = rpm_vreg_set_voltage(sc->vreg[VREG_DIG].rpm_vreg_id,
- sc->vreg[VREG_DIG].rpm_vreg_voter, vdd_dig,
- sc->vreg[VREG_DIG].max_vdd, 0);
- if (ret) {
- pr_err("%s initialization failed (%d)\n",
- sc->vreg[VREG_DIG].name, ret);
- BUG();
- }
- sc->vreg[VREG_DIG].cur_vdd = vdd_dig;
-
- /* Setup Krait CPU regulators and initial core voltage. */
- sc->vreg[VREG_CORE].reg = regulator_get(NULL,
- sc->vreg[VREG_CORE].name);
- if (IS_ERR(sc->vreg[VREG_CORE].reg)) {
- pr_err("regulator_get(%s) failed (%ld)\n",
- sc->vreg[VREG_CORE].name,
- PTR_ERR(sc->vreg[VREG_CORE].reg));
- BUG();
- }
- vdd_core = calculate_vdd_core(lvl);
- ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg, vdd_core,
- sc->vreg[VREG_CORE].max_vdd);
- if (ret) {
- pr_err("%s initialization failed (%d)\n",
- sc->vreg[VREG_CORE].name, ret);
- BUG();
- }
- sc->vreg[VREG_CORE].cur_vdd = vdd_core;
- ret = regulator_enable(sc->vreg[VREG_CORE].reg);
- if (ret) {
- pr_err("regulator_enable(%s) failed (%d)\n",
- sc->vreg[VREG_CORE].name, ret);
- BUG();
- }
+ /* Set initial vdd_mem vote. */
+ ret = rpm_vreg_set_voltage(sc->vreg[VREG_MEM].rpm_vreg_id,
+ sc->vreg[VREG_MEM].rpm_vreg_voter, vdd_mem,
+ sc->vreg[VREG_MEM].max_vdd, 0);
+ if (ret) {
+ pr_err("%s initialization failed (%d)\n",
+ sc->vreg[VREG_MEM].name, ret);
+ BUG();
}
+ sc->vreg[VREG_MEM].cur_vdd = vdd_mem;
+
+ /* Set initial vdd_dig vote. */
+ ret = rpm_vreg_set_voltage(sc->vreg[VREG_DIG].rpm_vreg_id,
+ sc->vreg[VREG_DIG].rpm_vreg_voter, vdd_dig,
+ sc->vreg[VREG_DIG].max_vdd, 0);
+ if (ret) {
+ pr_err("%s initialization failed (%d)\n",
+ sc->vreg[VREG_DIG].name, ret);
+ BUG();
+ }
+ sc->vreg[VREG_DIG].cur_vdd = vdd_dig;
+
+ /* Setup Krait CPU regulators and initial core voltage. */
+ sc->vreg[VREG_CORE].reg = regulator_get(NULL,
+ sc->vreg[VREG_CORE].name);
+ if (IS_ERR(sc->vreg[VREG_CORE].reg)) {
+ pr_err("regulator_get(%s) failed (%ld)\n",
+ sc->vreg[VREG_CORE].name,
+ PTR_ERR(sc->vreg[VREG_CORE].reg));
+ BUG();
+ }
+ vdd_core = calculate_vdd_core(lvl);
+ ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg, vdd_core,
+ sc->vreg[VREG_CORE].max_vdd);
+ if (ret) {
+ pr_err("%s initialization failed (%d)\n",
+ sc->vreg[VREG_CORE].name, ret);
+ BUG();
+ }
+ sc->vreg[VREG_CORE].cur_vdd = vdd_core;
+ ret = regulator_enable(sc->vreg[VREG_CORE].reg);
+ if (ret) {
+ pr_err("regulator_enable(%s) failed (%d)\n",
+ sc->vreg[VREG_CORE].name, ret);
+ BUG();
+ }
+ sc->regulators_initialized = true;
}
/* Set initial rate for a given core. */
-static void __init init_clock_sources(struct scalable *sc,
+static void __cpuinit init_clock_sources(struct scalable *sc,
struct core_speed *tgt_s)
{
uint32_t regval;
@@ -1412,13 +1413,13 @@
sc->current_speed = tgt_s;
}
-static void __init per_cpu_init(void *data)
+static void __cpuinit per_cpu_init(void *data)
{
- struct acpu_level *max_acpu_level = data;
int cpu = smp_processor_id();
init_clock_sources(&scalable[cpu], &max_acpu_level->speed);
scalable[cpu].l2_vote = max_acpu_level->l2_level;
+ scalable[cpu].clocks_initialized = true;
}
/* Register with bus driver. */
@@ -1502,17 +1503,23 @@
/* Fall through. */
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
- acpuclk_8960_set_rate(cpu, HOT_UNPLUG_KHZ, SETRATE_HOTPLUG);
+ if (scalable[cpu].clocks_initialized)
+ acpuclk_8960_set_rate(cpu, HOT_UNPLUG_KHZ,
+ SETRATE_HOTPLUG);
break;
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
- if (WARN_ON(!prev_khz[cpu]))
- return NOTIFY_BAD;
- acpuclk_8960_set_rate(cpu, prev_khz[cpu], SETRATE_HOTPLUG);
+ if (scalable[cpu].clocks_initialized)
+ acpuclk_8960_set_rate(cpu, prev_khz[cpu],
+ SETRATE_HOTPLUG);
+ if (!scalable[cpu].regulators_initialized)
+ regulator_init(cpu, max_acpu_level);
break;
case CPU_STARTING:
case CPU_STARTING_FROZEN:
- if (cpu_is_krait_v1() || cpu_is_apq8064()) {
+ if (!scalable[cpu].clocks_initialized) {
+ per_cpu_init(NULL);
+ } else if (cpu_is_krait_v1() || cpu_is_apq8064()) {
set_sec_clk_src(&scalable[cpu], prev_sec_src[cpu]);
set_pri_clk_src(&scalable[cpu], prev_pri_src[cpu]);
}
@@ -1578,9 +1585,9 @@
}
}
-static struct acpu_level * __init select_freq_plan(void)
+static void __init select_freq_plan(void)
{
- struct acpu_level *l, *max_acpu_level = NULL;
+ struct acpu_level *l;
/* Select frequency tables. */
if (cpu_is_msm8960()) {
@@ -1628,8 +1635,6 @@
max_acpu_level = l;
BUG_ON(!max_acpu_level);
pr_info("Max ACPU freq: %u KHz\n", max_acpu_level->speed.khz);
-
- return max_acpu_level;
}
static struct acpuclk_data acpuclk_8960_data = {
@@ -1641,13 +1646,16 @@
static int __init acpuclk_8960_probe(struct platform_device *pdev)
{
- struct acpu_level *max_acpu_level = select_freq_plan();
+ int cpu;
- regulator_init(max_acpu_level);
+ select_freq_plan();
+
+ for_each_online_cpu(cpu)
+ regulator_init(cpu, max_acpu_level);
bus_init(max_acpu_level->l2_level->bw_level);
init_clock_sources(&scalable[L2], &max_acpu_level->l2_level->speed);
- on_each_cpu(per_cpu_init, max_acpu_level, true);
+ on_each_cpu(per_cpu_init, NULL, true);
cpufreq_table_init();
diff --git a/arch/arm/mach-msm/board-8064-storage.c b/arch/arm/mach-msm/board-8064-storage.c
index a53f771..a33b62b 100644
--- a/arch/arm/mach-msm/board-8064-storage.c
+++ b/arch/arm/mach-msm/board-8064-storage.c
@@ -251,6 +251,7 @@
.pin_data = &mmc_slot_pin_data[SDCC1],
.vreg_data = &mmc_slot_vreg_data[SDCC1],
.uhs_caps = MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50,
+ .uhs_caps2 = MMC_CAP2_HS200_1_8V_SDR,
.mpm_sdiowakeup_int = MSM_MPM_PIN_SDC1_DAT1,
.msm_bus_voting_data = &sps_to_ddr_bus_voting_data,
};
diff --git a/arch/arm/mach-msm/board-8930-gpu.c b/arch/arm/mach-msm/board-8930-gpu.c
index 2721f66..0c5ae5f 100644
--- a/arch/arm/mach-msm/board-8930-gpu.c
+++ b/arch/arm/mach-msm/board-8930-gpu.c
@@ -16,6 +16,7 @@
#include <linux/msm_kgsl.h>
#include <mach/msm_bus_board.h>
#include <mach/board.h>
+#include <mach/socinfo.h>
#include "devices.h"
#include "board-8930.h"
@@ -160,5 +161,8 @@
void __init msm8930_init_gpu(void)
{
+ if (cpu_is_msm8627())
+ kgsl_3d0_pdata.pwrlevel[0].gpu_freq = 400000000;
+
platform_device_register(&device_kgsl_3d0);
}
diff --git a/arch/arm/mach-msm/board-8930-storage.c b/arch/arm/mach-msm/board-8930-storage.c
index 5c0a84c..7280b22 100644
--- a/arch/arm/mach-msm/board-8930-storage.c
+++ b/arch/arm/mach-msm/board-8930-storage.c
@@ -21,6 +21,7 @@
#include <mach/msm_bus_board.h>
#include <mach/board.h>
#include <mach/gpiomux.h>
+#include <mach/socinfo.h>
#include "devices.h"
#include "board-8930.h"
@@ -245,6 +246,7 @@
.pin_data = &mmc_slot_pin_data[SDCC1],
.mpm_sdiowakeup_int = MSM_MPM_PIN_SDC1_DAT1,
.msm_bus_voting_data = &sps_to_ddr_bus_voting_data,
+ .uhs_caps2 = MMC_CAP2_HS200_1_8V_SDR,
};
#endif
@@ -265,7 +267,6 @@
#endif
.vreg_data = &mmc_slot_vreg_data[SDCC3],
.pin_data = &mmc_slot_pin_data[SDCC3],
-#ifdef CONFIG_MMC_MSM_CARD_HW_DETECTION
/*TODO: Insert right replacement for PM8038 */
#ifndef MSM8930_PHASE_2
.status_gpio = PM8921_GPIO_PM_TO_SYS(26),
@@ -276,7 +277,6 @@
#endif
.irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
.is_status_gpio_active_low = true,
-#endif
.xpc_cap = 1,
.uhs_caps = (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_DDR50 |
@@ -303,11 +303,27 @@
msm_add_sdcc(1, &msm8960_sdc1_data);
#endif
#ifdef CONFIG_MMC_MSM_SDC3_SUPPORT
+ /*
+ * All 8930 platform boards using the 1.2 SoC have been reworked so that
+ * the sd card detect line's esd circuit is no longer powered by the sd
+ * card's voltage regulator. So this means we can turn the regulator off
+ * to save power without affecting the sd card detect functionality.
+ * This change to the boards will be true for newer versions of the SoC
+ * as well.
+ */
+ if ((SOCINFO_VERSION_MAJOR(socinfo_get_version()) >= 1 &&
+ SOCINFO_VERSION_MINOR(socinfo_get_version()) >= 2) ||
+ machine_is_msm8930_cdp()) {
+ msm8960_sdc3_data.vreg_data->vdd_data->always_on = false;
+ msm8960_sdc3_data.vreg_data->vdd_data->reset_at_init = false;
+ }
+
/* SDC3: External card slot */
if (!machine_is_msm8930_cdp()) {
msm8960_sdc3_data.wpswitch_gpio = 0;
msm8960_sdc3_data.is_wpswitch_active_low = false;
}
+
msm_add_sdcc(3, &msm8960_sdc3_data);
#endif
}
diff --git a/arch/arm/mach-msm/board-8960-regulator.c b/arch/arm/mach-msm/board-8960-regulator.c
index 6bd1b7d..2664d6b 100644
--- a/arch/arm/mach-msm/board-8960-regulator.c
+++ b/arch/arm/mach-msm/board-8960-regulator.c
@@ -246,7 +246,7 @@
REGULATOR_SUPPLY("ext_3p3v", NULL),
REGULATOR_SUPPLY("vdd_ana", "3-005b"),
REGULATOR_SUPPLY("vdd_lvds_3p3v", "mipi_dsi.1"),
- REGULATOR_SUPPLY("mhl_ext_3p3v", "msm_otg"),
+ REGULATOR_SUPPLY("mhl_usb_hs_switch", "msm_otg"),
};
VREG_CONSUMERS(EXT_OTG_SW) = {
REGULATOR_SUPPLY("ext_otg_sw", NULL),
diff --git a/arch/arm/mach-msm/board-8960-storage.c b/arch/arm/mach-msm/board-8960-storage.c
index 4b09f82..67f44aa 100644
--- a/arch/arm/mach-msm/board-8960-storage.c
+++ b/arch/arm/mach-msm/board-8960-storage.c
@@ -295,6 +295,7 @@
.pin_data = &mmc_slot_pin_data[SDCC1],
.mpm_sdiowakeup_int = MSM_MPM_PIN_SDC1_DAT1,
.msm_bus_voting_data = &sps_to_ddr_bus_voting_data,
+ .uhs_caps2 = MMC_CAP2_HS200_1_8V_SDR,
};
#endif
@@ -326,12 +327,10 @@
#endif
.vreg_data = &mmc_slot_vreg_data[SDCC3],
.pin_data = &mmc_slot_pin_data[SDCC3],
-#ifdef CONFIG_MMC_MSM_CARD_HW_DETECTION
.status_gpio = PM8921_GPIO_PM_TO_SYS(26),
.status_irq = PM8921_GPIO_IRQ(PM8921_IRQ_BASE, 26),
.irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
.is_status_gpio_active_low = true,
-#endif
.xpc_cap = 1,
.uhs_caps = (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_DDR50 |
diff --git a/arch/arm/mach-msm/board-8960.c b/arch/arm/mach-msm/board-8960.c
index 6fd2b4d..251c1de 100644
--- a/arch/arm/mach-msm/board-8960.c
+++ b/arch/arm/mach-msm/board-8960.c
@@ -1292,6 +1292,7 @@
.soft_reset_inverted = 1,
.peripheral_platform_device = NULL,
.ramdump_timeout_ms = 600000,
+ .no_powerdown_after_ramdumps = 1,
};
#define MSM_TSIF0_PHYS (0x18200000)
diff --git a/arch/arm/mach-msm/board-9615-storage.c b/arch/arm/mach-msm/board-9615-storage.c
index 2025bd0..6cb34f8 100644
--- a/arch/arm/mach-msm/board-9615-storage.c
+++ b/arch/arm/mach-msm/board-9615-storage.c
@@ -178,11 +178,9 @@
.sup_clk_cnt = ARRAY_SIZE(sdc1_sup_clk_rates),
.vreg_data = &mmc_slot_vreg_data[SDCC1],
.pin_data = &mmc_slot_pin_data[SDCC1],
-#ifdef CONFIG_MMC_MSM_CARD_HW_DETECTION
.status_gpio = GPIO_SDC1_HW_DET,
.status_irq = MSM_GPIO_TO_INT(GPIO_SDC1_HW_DET),
.irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
-#endif
.xpc_cap = 1,
.uhs_caps = (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
MMC_CAP_MAX_CURRENT_400),
diff --git a/arch/arm/mach-msm/board-msm7627a-display.c b/arch/arm/mach-msm/board-msm7627a-display.c
index 3da68ad..9259161 100644
--- a/arch/arm/mach-msm/board-msm7627a-display.c
+++ b/arch/arm/mach-msm/board-msm7627a-display.c
@@ -1155,14 +1155,14 @@
if (rc < 0)
return rc;
- gpio_reg_2p85v = regulator_get(&msm8625_mipi_dsi_device.dev,
+ gpio_reg_2p85v = regulator_get(&mipi_dsi_device.dev,
"lcd_vdd");
if (IS_ERR(gpio_reg_2p85v)) {
pr_err("%s:ext_2p85v regulator get failed", __func__);
return -EINVAL;
}
- gpio_reg_1p8v = regulator_get(&msm8625_mipi_dsi_device.dev,
+ gpio_reg_1p8v = regulator_get(&mipi_dsi_device.dev,
"lcd_vddi");
if (IS_ERR(gpio_reg_1p8v)) {
pr_err("%s:ext_1p8v regulator get failed", __func__);
diff --git a/arch/arm/mach-msm/board-msm7627a-storage.c b/arch/arm/mach-msm/board-msm7627a-storage.c
index e2184f4..49ff393 100644
--- a/arch/arm/mach-msm/board-msm7627a-storage.c
+++ b/arch/arm/mach-msm/board-msm7627a-storage.c
@@ -233,8 +233,7 @@
return rc;
}
-#if defined(CONFIG_MMC_MSM_SDC1_SUPPORT) \
- && defined(CONFIG_MMC_MSM_CARD_HW_DETECTION)
+#ifdef CONFIG_MMC_MSM_SDC1_SUPPORT
static unsigned int msm7627a_sdcc_slot_status(struct device *dev)
{
int status;
@@ -266,9 +265,7 @@
}
return status;
}
-#endif
-#ifdef CONFIG_MMC_MSM_SDC1_SUPPORT
static struct mmc_platform_data sdc1_plat_data = {
.ocr_mask = MMC_VDD_28_29,
.translate_vdd = msm_sdcc_setup_power,
@@ -276,10 +273,8 @@
.msmsdcc_fmin = 144000,
.msmsdcc_fmid = 24576000,
.msmsdcc_fmax = 49152000,
-#ifdef CONFIG_MMC_MSM_CARD_HW_DETECTION
.status = msm7627a_sdcc_slot_status,
.irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
-#endif
};
#endif
@@ -382,13 +377,11 @@
gpio_sdc1_config();
if (mmc_regulator_init(1, "mmc", 2850000))
return;
-#ifdef CONFIG_MMC_MSM_CARD_HW_DETECTION
/* 8x25 EVT do not use hw detector */
if (!(machine_is_msm8625_evt()))
sdc1_plat_data.status_irq = MSM_GPIO_TO_INT(gpio_sdc1_hw_det);
if (machine_is_msm8625_evt())
sdc1_plat_data.status = NULL;
-#endif
msm_add_sdcc(1, &sdc1_plat_data);
#endif
diff --git a/arch/arm/mach-msm/board-msm7x30.c b/arch/arm/mach-msm/board-msm7x30.c
index 2834f24..bb94474 100644
--- a/arch/arm/mach-msm/board-msm7x30.c
+++ b/arch/arm/mach-msm/board-msm7x30.c
@@ -255,7 +255,6 @@
}
};
-#ifdef CONFIG_MMC_MSM_CARD_HW_DETECTION
struct pm8xxx_gpio_init_info sdcc_det = {
PM8058_GPIO_PM_TO_SYS(PMIC_GPIO_SD_DET - 1),
{
@@ -275,7 +274,6 @@
pr_err("%s PMIC_GPIO_SD_DET config failed\n", __func__);
return rc;
}
-#endif
if (machine_is_msm8x55_svlte_surf() || machine_is_msm8x55_svlte_ffa() ||
machine_is_msm7x30_fluid())
@@ -6206,14 +6204,12 @@
#endif
#ifdef CONFIG_MMC_MSM_SDC4_SUPPORT
-#ifdef CONFIG_MMC_MSM_CARD_HW_DETECTION
static unsigned int msm7x30_sdcc_slot_status(struct device *dev)
{
return (unsigned int)
gpio_get_value_cansleep(
PM8058_GPIO_PM_TO_SYS(PMIC_GPIO_SD_DET - 1));
}
-#endif
static int msm_sdcc_get_wpswitch(struct device *dv)
{
@@ -6302,11 +6298,9 @@
.ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29,
.translate_vdd = msm_sdcc_setup_power,
.mmc_bus_width = MMC_CAP_4_BIT_DATA,
-#ifdef CONFIG_MMC_MSM_CARD_HW_DETECTION
.status = msm7x30_sdcc_slot_status,
.status_irq = PM8058_GPIO_IRQ(PMIC8058_IRQ_BASE, PMIC_GPIO_SD_DET - 1),
.irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
-#endif
.wpswitch = msm_sdcc_get_wpswitch,
.msmsdcc_fmin = 144000,
.msmsdcc_fmid = 24576000,
diff --git a/arch/arm/mach-msm/board-msm8x60.c b/arch/arm/mach-msm/board-msm8x60.c
index 4e50ce5..0459240 100644
--- a/arch/arm/mach-msm/board-msm8x60.c
+++ b/arch/arm/mach-msm/board-msm8x60.c
@@ -5216,6 +5216,7 @@
&msm_pil_modem,
&msm_pil_tzapps,
&msm_pil_dsps,
+ &msm_pil_vidc,
&qseecom_device,
#ifdef CONFIG_I2C_QUP
&msm_gsbi3_qup_i2c_device,
@@ -5768,7 +5769,6 @@
.inv_int_pol = 0,
},
},
-#ifdef CONFIG_MMC_MSM_CARD_HW_DETECTION
{
PM8058_GPIO_PM_TO_SYS(PMIC_GPIO_SDC3_DET - 1),
{
@@ -5779,7 +5779,6 @@
.inv_int_pol = 0,
},
},
-#endif
{ /* core&surf gpio expander */
PM8058_GPIO_PM_TO_SYS(UI_INT1_N),
{
@@ -8429,7 +8428,6 @@
}
#ifdef CONFIG_MMC_MSM_SDC3_SUPPORT
-#ifdef CONFIG_MMC_MSM_CARD_HW_DETECTION
static unsigned int msm8x60_sdcc_slot_status(struct device *dev)
{
int status;
@@ -8451,7 +8449,6 @@
}
#endif
#endif
-#endif
#define MSM_MPM_PIN_SDC3_DAT1 21
#define MSM_MPM_PIN_SDC4_DAT1 23
@@ -8497,12 +8494,10 @@
.translate_vdd = msm_sdcc_setup_power,
.mmc_bus_width = MMC_CAP_4_BIT_DATA,
.wpswitch = msm_sdc3_get_wpswitch,
-#ifdef CONFIG_MMC_MSM_CARD_HW_DETECTION
.status = msm8x60_sdcc_slot_status,
.status_irq = PM8058_GPIO_IRQ(PM8058_IRQ_BASE,
PMIC_GPIO_SDC3_DET - 1),
.irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
-#endif
.msmsdcc_fmin = 400000,
.msmsdcc_fmid = 24000000,
.msmsdcc_fmax = 48000000,
diff --git a/arch/arm/mach-msm/board-qrd7627a.c b/arch/arm/mach-msm/board-qrd7627a.c
index 8eb961c..5b9ea36 100644
--- a/arch/arm/mach-msm/board-qrd7627a.c
+++ b/arch/arm/mach-msm/board-qrd7627a.c
@@ -686,7 +686,7 @@
};
/* GPIO regulator */
-static struct platform_device qrd_msm8625_vreg_gpio_ext_2p85v __devinitdata = {
+static struct platform_device qrd_vreg_gpio_ext_2p85v __devinitdata = {
.name = GPIO_REGULATOR_DEV_NAME,
.id = 35,
.dev = {
@@ -695,7 +695,7 @@
},
};
-static struct platform_device qrd_msm8625_vreg_gpio_ext_1p8v __devinitdata = {
+static struct platform_device qrd_vreg_gpio_ext_1p8v __devinitdata = {
.name = GPIO_REGULATOR_DEV_NAME,
.id = 40,
.dev = {
@@ -732,6 +732,8 @@
&msm_device_otg,
&msm_device_gadget_peripheral,
&msm_kgsl_3d0,
+ &qrd_vreg_gpio_ext_2p85v,
+ &qrd_vreg_gpio_ext_1p8v,
};
static struct platform_device *qrd3_devices[] __initdata = {
@@ -748,8 +750,8 @@
&msm8625_device_otg,
&msm8625_device_gadget_peripheral,
&msm8625_kgsl_3d0,
- &qrd_msm8625_vreg_gpio_ext_2p85v,
- &qrd_msm8625_vreg_gpio_ext_1p8v,
+ &qrd_vreg_gpio_ext_2p85v,
+ &qrd_vreg_gpio_ext_1p8v,
};
static unsigned pmem_kernel_ebi1_size = PMEM_KERNEL_EBI1_SIZE;
diff --git a/arch/arm/mach-msm/clock-7x30.c b/arch/arm/mach-msm/clock-7x30.c
index 225ea2b..f3ac7d7 100644
--- a/arch/arm/mach-msm/clock-7x30.c
+++ b/arch/arm/mach-msm/clock-7x30.c
@@ -2328,60 +2328,53 @@
},
};
-static DEFINE_CLK_PCOM(adsp_clk, ADSP_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(codec_ssbi_clk, CODEC_SSBI_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(ebi1_clk, EBI1_CLK, CLKFLAG_SKIP_AUTO_OFF | CLKFLAG_MIN);
-static DEFINE_CLK_PCOM(ebi1_fixed_clk, EBI1_FIXED_CLK, CLKFLAG_MIN |
- CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(ecodec_clk, ECODEC_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(gp_clk, GP_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(adsp_clk, ADSP_CLK, 0);
+static DEFINE_CLK_PCOM(codec_ssbi_clk, CODEC_SSBI_CLK, 0);
+static DEFINE_CLK_PCOM(ebi1_clk, EBI1_CLK, CLKFLAG_MIN);
+static DEFINE_CLK_PCOM(ebi1_fixed_clk, EBI1_FIXED_CLK, CLKFLAG_MIN);
+static DEFINE_CLK_PCOM(ecodec_clk, ECODEC_CLK, 0);
+static DEFINE_CLK_PCOM(gp_clk, GP_CLK, 0);
static DEFINE_CLK_PCOM(uart3_clk, UART3_CLK, 0);
static DEFINE_CLK_PCOM(usb_phy_clk, USB_PHY_CLK, CLKFLAG_MIN);
-static DEFINE_CLK_PCOM(p_grp_2d_clk, GRP_2D_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_grp_2d_p_clk, GRP_2D_P_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_hdmi_clk, HDMI_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_grp_2d_clk, GRP_2D_CLK, 0);
+static DEFINE_CLK_PCOM(p_grp_2d_p_clk, GRP_2D_P_CLK, 0);
+static DEFINE_CLK_PCOM(p_hdmi_clk, HDMI_CLK, 0);
static DEFINE_CLK_PCOM(p_jpeg_clk, JPEG_CLK, CLKFLAG_MIN);
static DEFINE_CLK_PCOM(p_jpeg_p_clk, JPEG_P_CLK, 0);
-static DEFINE_CLK_PCOM(p_lpa_codec_clk, LPA_CODEC_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_lpa_core_clk, LPA_CORE_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_lpa_p_clk, LPA_P_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_mi2s_m_clk, MI2S_M_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_mi2s_s_clk, MI2S_S_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_mi2s_codec_rx_m_clk, MI2S_CODEC_RX_M_CLK,
- CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_mi2s_codec_rx_s_clk, MI2S_CODEC_RX_S_CLK,
- CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_mi2s_codec_tx_m_clk, MI2S_CODEC_TX_M_CLK,
- CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_mi2s_codec_tx_s_clk, MI2S_CODEC_TX_S_CLK,
- CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_lpa_codec_clk, LPA_CODEC_CLK, 0);
+static DEFINE_CLK_PCOM(p_lpa_core_clk, LPA_CORE_CLK, 0);
+static DEFINE_CLK_PCOM(p_lpa_p_clk, LPA_P_CLK, 0);
+static DEFINE_CLK_PCOM(p_mi2s_m_clk, MI2S_M_CLK, 0);
+static DEFINE_CLK_PCOM(p_mi2s_s_clk, MI2S_S_CLK, 0);
+static DEFINE_CLK_PCOM(p_mi2s_codec_rx_m_clk, MI2S_CODEC_RX_M_CLK, 0);
+static DEFINE_CLK_PCOM(p_mi2s_codec_rx_s_clk, MI2S_CODEC_RX_S_CLK, 0);
+static DEFINE_CLK_PCOM(p_mi2s_codec_tx_m_clk, MI2S_CODEC_TX_M_CLK, 0);
+static DEFINE_CLK_PCOM(p_mi2s_codec_tx_s_clk, MI2S_CODEC_TX_S_CLK, 0);
static DEFINE_CLK_PCOM(p_sdac_clk, SDAC_CLK, 0);
static DEFINE_CLK_PCOM(p_sdac_m_clk, SDAC_M_CLK, 0);
-static DEFINE_CLK_PCOM(p_vfe_clk, VFE_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_vfe_camif_clk, VFE_CAMIF_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_vfe_mdc_clk, VFE_MDC_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_vfe_clk, VFE_CLK, 0);
+static DEFINE_CLK_PCOM(p_vfe_camif_clk, VFE_CAMIF_CLK, 0);
+static DEFINE_CLK_PCOM(p_vfe_mdc_clk, VFE_MDC_CLK, 0);
static DEFINE_CLK_PCOM(p_vfe_p_clk, VFE_P_CLK, 0);
-static DEFINE_CLK_PCOM(p_grp_3d_clk, GRP_3D_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_grp_3d_p_clk, GRP_3D_P_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_grp_3d_clk, GRP_3D_CLK, 0);
+static DEFINE_CLK_PCOM(p_grp_3d_p_clk, GRP_3D_P_CLK, 0);
static DEFINE_CLK_PCOM(p_imem_clk, IMEM_CLK, 0);
-static DEFINE_CLK_PCOM(p_mdp_lcdc_pad_pclk_clk, MDP_LCDC_PAD_PCLK_CLK,
- CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_mdp_lcdc_pclk_clk, MDP_LCDC_PCLK_CLK,
- CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_mdp_p_clk, MDP_P_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_mdp_lcdc_pad_pclk_clk, MDP_LCDC_PAD_PCLK_CLK, 0);
+static DEFINE_CLK_PCOM(p_mdp_lcdc_pclk_clk, MDP_LCDC_PCLK_CLK, 0);
+static DEFINE_CLK_PCOM(p_mdp_p_clk, MDP_P_CLK, 0);
static DEFINE_CLK_PCOM(p_mdp_vsync_clk, MDP_VSYNC_CLK, 0);
-static DEFINE_CLK_PCOM(p_tsif_ref_clk, TSIF_REF_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_tsif_p_clk, TSIF_P_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_tv_dac_clk, TV_DAC_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_tv_enc_clk, TV_ENC_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_tsif_ref_clk, TSIF_REF_CLK, 0);
+static DEFINE_CLK_PCOM(p_tsif_p_clk, TSIF_P_CLK, 0);
+static DEFINE_CLK_PCOM(p_tv_dac_clk, TV_DAC_CLK, 0);
+static DEFINE_CLK_PCOM(p_tv_enc_clk, TV_ENC_CLK, 0);
static DEFINE_CLK_PCOM(p_emdh_clk, EMDH_CLK, CLKFLAG_MIN | CLKFLAG_MAX);
static DEFINE_CLK_PCOM(p_emdh_p_clk, EMDH_P_CLK, 0);
-static DEFINE_CLK_PCOM(p_i2c_clk, I2C_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_i2c_2_clk, I2C_2_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_mdc_clk, MDC_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_i2c_clk, I2C_CLK, 0);
+static DEFINE_CLK_PCOM(p_i2c_2_clk, I2C_2_CLK, 0);
+static DEFINE_CLK_PCOM(p_mdc_clk, MDC_CLK, 0);
static DEFINE_CLK_PCOM(p_pmdh_clk, PMDH_CLK, CLKFLAG_MIN | CLKFLAG_MAX);
-static DEFINE_CLK_PCOM(p_pmdh_p_clk, PMDH_P_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_pmdh_p_clk, PMDH_P_CLK, 0);
static DEFINE_CLK_PCOM(p_sdc1_clk, SDC1_CLK, 0);
static DEFINE_CLK_PCOM(p_sdc1_p_clk, SDC1_P_CLK, 0);
static DEFINE_CLK_PCOM(p_sdc2_clk, SDC2_CLK, 0);
@@ -2390,36 +2383,35 @@
static DEFINE_CLK_PCOM(p_sdc3_p_clk, SDC3_P_CLK, 0);
static DEFINE_CLK_PCOM(p_sdc4_clk, SDC4_CLK, 0);
static DEFINE_CLK_PCOM(p_sdc4_p_clk, SDC4_P_CLK, 0);
-static DEFINE_CLK_PCOM(p_uart2_clk, UART2_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_uart2_clk, UART2_CLK, 0);
static DEFINE_CLK_PCOM(p_usb_hs2_clk, USB_HS2_CLK, 0);
static DEFINE_CLK_PCOM(p_usb_hs2_core_clk, USB_HS2_CORE_CLK, 0);
static DEFINE_CLK_PCOM(p_usb_hs2_p_clk, USB_HS2_P_CLK, 0);
static DEFINE_CLK_PCOM(p_usb_hs3_clk, USB_HS3_CLK, 0);
static DEFINE_CLK_PCOM(p_usb_hs3_core_clk, USB_HS3_CORE_CLK, 0);
static DEFINE_CLK_PCOM(p_usb_hs3_p_clk, USB_HS3_P_CLK, 0);
-static DEFINE_CLK_PCOM(p_qup_i2c_clk, QUP_I2C_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_spi_clk, SPI_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_spi_p_clk, SPI_P_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_qup_i2c_clk, QUP_I2C_CLK, 0);
+static DEFINE_CLK_PCOM(p_spi_clk, SPI_CLK, 0);
+static DEFINE_CLK_PCOM(p_spi_p_clk, SPI_P_CLK, 0);
static DEFINE_CLK_PCOM(p_uart1_clk, UART1_CLK, 0);
static DEFINE_CLK_PCOM(p_uart1dm_clk, UART1DM_CLK, 0);
-static DEFINE_CLK_PCOM(p_uart2dm_clk, UART2DM_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_uart2dm_clk, UART2DM_CLK, 0);
static DEFINE_CLK_PCOM(p_usb_hs_clk, USB_HS_CLK, 0);
static DEFINE_CLK_PCOM(p_usb_hs_core_clk, USB_HS_CORE_CLK, 0);
static DEFINE_CLK_PCOM(p_usb_hs_p_clk, USB_HS_P_CLK, 0);
-static DEFINE_CLK_PCOM(p_cam_m_clk, CAM_M_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_cam_m_clk, CAM_M_CLK, 0);
static DEFINE_CLK_PCOM(p_camif_pad_p_clk, CAMIF_PAD_P_CLK, 0);
-static DEFINE_CLK_PCOM(p_csi0_clk, CSI0_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_csi0_vfe_clk, CSI0_VFE_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_csi0_p_clk, CSI0_P_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_csi0_clk, CSI0_CLK, 0);
+static DEFINE_CLK_PCOM(p_csi0_vfe_clk, CSI0_VFE_CLK, 0);
+static DEFINE_CLK_PCOM(p_csi0_p_clk, CSI0_P_CLK, 0);
static DEFINE_CLK_PCOM(p_mdp_clk, MDP_CLK, CLKFLAG_MIN);
-static DEFINE_CLK_PCOM(p_mfc_clk, MFC_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_mfc_div2_clk, MFC_DIV2_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_mfc_p_clk, MFC_P_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_vpe_clk, VPE_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_adm_clk, ADM_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_ce_clk, CE_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(p_axi_rotator_clk, AXI_ROTATOR_CLK,
- CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(p_mfc_clk, MFC_CLK, 0);
+static DEFINE_CLK_PCOM(p_mfc_div2_clk, MFC_DIV2_CLK, 0);
+static DEFINE_CLK_PCOM(p_mfc_p_clk, MFC_P_CLK, 0);
+static DEFINE_CLK_PCOM(p_vpe_clk, VPE_CLK, 0);
+static DEFINE_CLK_PCOM(p_adm_clk, ADM_CLK, 0);
+static DEFINE_CLK_PCOM(p_ce_clk, CE_CLK, 0);
+static DEFINE_CLK_PCOM(p_axi_rotator_clk, AXI_ROTATOR_CLK, 0);
static DEFINE_CLK_PCOM(p_rotator_imem_clk, ROTATOR_IMEM_CLK, 0);
static DEFINE_CLK_PCOM(p_rotator_p_clk, ROTATOR_P_CLK, 0);
diff --git a/arch/arm/mach-msm/clock-8960.c b/arch/arm/mach-msm/clock-8960.c
index fb0e168..3f4eb8e 100644
--- a/arch/arm/mach-msm/clock-8960.c
+++ b/arch/arm/mach-msm/clock-8960.c
@@ -2990,7 +2990,6 @@
static struct clk_ops clk_ops_pix_rdi_8960 = {
.enable = pix_rdi_clk_enable,
.disable = pix_rdi_clk_disable,
- .auto_off = pix_rdi_clk_disable,
.handoff = pix_rdi_clk_handoff,
.set_rate = pix_rdi_clk_set_rate,
.get_rate = pix_rdi_clk_get_rate,
@@ -6078,18 +6077,19 @@
*/
/*
* Initialize MM AHB registers: Enable the FPB clock and disable HW
- * gating on non-8960 for all clocks. Also set VFE_AHB's
+ * gating on 8627 for all clocks. Also set VFE_AHB's
* FORCE_CORE_ON bit to prevent its memory from being collapsed when
* the clock is halted. The sleep and wake-up delays are set to safe
* values.
*/
- if (cpu_is_msm8960() || cpu_is_apq8064()) {
- rmwreg(0x44000000, AHB_EN_REG, 0x6C000103);
- writel_relaxed(0x3C7097F9, AHB_EN2_REG);
- } else {
+ if (cpu_is_msm8627()) {
rmwreg(0x00000003, AHB_EN_REG, 0x6C000103);
writel_relaxed(0x000007F9, AHB_EN2_REG);
+ } else {
+ rmwreg(0x44000000, AHB_EN_REG, 0x6C000103);
+ writel_relaxed(0x3C7097F9, AHB_EN2_REG);
}
+
if (cpu_is_apq8064())
rmwreg(0x00000001, AHB_EN3_REG, 0x00000001);
@@ -6101,25 +6101,26 @@
* support it. Also set FORCE_CORE_ON bits, and any sleep and wake-up
* delays to safe values. */
if ((cpu_is_msm8960() &&
- SOCINFO_VERSION_MAJOR(socinfo_get_version()) >= 3) ||
- cpu_is_apq8064()) {
- rmwreg(0x0003AFF9, MAXI_EN_REG, 0x0803FFFF);
- rmwreg(0x3A27FCFF, MAXI_EN2_REG, 0x3A3FFFFF);
- rmwreg(0x0027FCFF, MAXI_EN4_REG, 0x017FFFFF);
- } else {
+ SOCINFO_VERSION_MAJOR(socinfo_get_version()) < 3) ||
+ cpu_is_msm8627()) {
rmwreg(0x000007F9, MAXI_EN_REG, 0x0803FFFF);
rmwreg(0x3027FCFF, MAXI_EN2_REG, 0x3A3FFFFF);
- rmwreg(0x0027FCFF, MAXI_EN4_REG, 0x017FFFFF);
+ } else {
+ rmwreg(0x0003AFF9, MAXI_EN_REG, 0x0803FFFF);
+ rmwreg(0x3A27FCFF, MAXI_EN2_REG, 0x3A3FFFFF);
}
+
rmwreg(0x0027FCFF, MAXI_EN3_REG, 0x003FFFFF);
+ rmwreg(0x0027FCFF, MAXI_EN4_REG, 0x017FFFFF);
+
if (cpu_is_apq8064())
rmwreg(0x019FECFF, MAXI_EN5_REG, 0x01FFEFFF);
if (cpu_is_msm8930())
rmwreg(0x000004FF, MAXI_EN5_REG, 0x00000FFF);
- if (cpu_is_msm8960() || cpu_is_apq8064())
- rmwreg(0x00003C38, SAXI_EN_REG, 0x00003FFF);
- else
+ if (cpu_is_msm8627())
rmwreg(0x000003C7, SAXI_EN_REG, 0x00003FFF);
+ else
+ rmwreg(0x00003C38, SAXI_EN_REG, 0x00003FFF);
/* Enable IMEM's clk_on signal */
imem_reg = ioremap(0x04b00040, 4);
diff --git a/arch/arm/mach-msm/clock-8974.c b/arch/arm/mach-msm/clock-8974.c
index 5c7cddd..2660d5e 100644
--- a/arch/arm/mach-msm/clock-8974.c
+++ b/arch/arm/mach-msm/clock-8974.c
@@ -5019,6 +5019,12 @@
clk_set_rate(&axi_clk_src.c, 333330000);
clk_set_rate(&ocmemnoc_clk_src.c, 333330000);
+ /*
+ * Hold an active set vote for CXO; this is because CXO is expected
+ * to remain on whenever CPUs aren't power collapsed.
+ */
+ clk_prepare_enable(&cxo_a_clk_src.c);
+
/* Set rates for single-rate clocks. */
clk_set_rate(&usb30_master_clk_src.c,
usb30_master_clk_src.freq_tbl[0].freq_hz);
diff --git a/arch/arm/mach-msm/clock-8x60.c b/arch/arm/mach-msm/clock-8x60.c
index 15cdacb..4493ddc 100644
--- a/arch/arm/mach-msm/clock-8x60.c
+++ b/arch/arm/mach-msm/clock-8x60.c
@@ -602,7 +602,6 @@
.c = {
.dbg_name = "smi_2x_axi_clk",
.ops = &clk_ops_branch,
- .flags = CLKFLAG_SKIP_AUTO_OFF,
CLK_INIT(smi_2x_axi_clk.c),
},
};
@@ -3724,6 +3723,8 @@
CLK_LOOKUP("vcodec_iommu0_clk", vcodec_axi_clk.c, "msm_vidc.0"),
CLK_LOOKUP("vcodec_iommu1_clk", vcodec_axi_clk.c, "msm_vidc.0"),
CLK_LOOKUP("smmu_iface_clk", smmu_p_clk.c, "msm_vidc.0"),
+ CLK_LOOKUP("core_clk", vcodec_axi_clk.c, "pil_vidc"),
+ CLK_LOOKUP("smmu_iface_clk", smmu_p_clk.c, "pil_vidc"),
CLK_LOOKUP("dfab_dsps_clk", dfab_dsps_clk.c, NULL),
CLK_LOOKUP("core_clk", dfab_usb_hs_clk.c, "msm_otg"),
diff --git a/arch/arm/mach-msm/clock-9615.c b/arch/arm/mach-msm/clock-9615.c
index 57b7a76..f5ce5a7 100644
--- a/arch/arm/mach-msm/clock-9615.c
+++ b/arch/arm/mach-msm/clock-9615.c
@@ -250,7 +250,6 @@
static struct clk_ops clk_ops_pll_acpu_vote = {
.enable = pll_acpu_vote_clk_enable,
.disable = pll_acpu_vote_clk_disable,
- .auto_off = pll_acpu_vote_clk_disable,
.is_enabled = pll_vote_clk_is_enabled,
.get_parent = pll_vote_clk_get_parent,
};
diff --git a/arch/arm/mach-msm/clock-debug.c b/arch/arm/mach-msm/clock-debug.c
index e8c3e05..7263512 100644
--- a/arch/arm/mach-msm/clock-debug.c
+++ b/arch/arm/mach-msm/clock-debug.c
@@ -169,16 +169,23 @@
static int clock_debug_print_clock(struct clk *c)
{
- size_t ln = 0;
- char s[128];
+ char *start = "";
if (!c || !c->count)
return 0;
- ln += snprintf(s, sizeof(s), "\t%s", c->dbg_name);
- while (ln < sizeof(s) && (c = clk_get_parent(c)))
- ln += snprintf(s + ln, sizeof(s) - ln, " -> %s", c->dbg_name);
- pr_info("%s\n", s);
+ pr_info("\t");
+ do {
+ if (c->vdd_class)
+ pr_cont("%s%s [%ld, %lu]", start, c->dbg_name, c->rate,
+ c->vdd_class->cur_level);
+ else
+ pr_cont("%s%s [%ld]", start, c->dbg_name, c->rate);
+ start = " -> ";
+ } while ((c = clk_get_parent(c)));
+
+ pr_cont("\n");
+
return 1;
}
diff --git a/arch/arm/mach-msm/clock-local.c b/arch/arm/mach-msm/clock-local.c
index 0f9404b..ca913dc 100644
--- a/arch/arm/mach-msm/clock-local.c
+++ b/arch/arm/mach-msm/clock-local.c
@@ -805,7 +805,6 @@
.enable_hwcg = branch_clk_enable_hwcg,
.disable_hwcg = branch_clk_disable_hwcg,
.in_hwcg_mode = branch_clk_in_hwcg_mode,
- .auto_off = branch_clk_disable,
.is_enabled = branch_clk_is_enabled,
.reset = branch_clk_reset,
.get_parent = branch_clk_get_parent,
@@ -828,7 +827,6 @@
.enable_hwcg = rcg_clk_enable_hwcg,
.disable_hwcg = rcg_clk_disable_hwcg,
.in_hwcg_mode = rcg_clk_in_hwcg_mode,
- .auto_off = rcg_clk_disable,
.handoff = rcg_clk_handoff,
.set_rate = rcg_clk_set_rate,
.list_rate = rcg_clk_list_rate,
@@ -941,7 +939,6 @@
.in_hwcg_mode = cdiv_clk_in_hwcg_mode,
.enable_hwcg = cdiv_clk_enable_hwcg,
.disable_hwcg = cdiv_clk_disable_hwcg,
- .auto_off = cdiv_clk_disable,
.handoff = cdiv_clk_handoff,
.set_rate = cdiv_clk_set_rate,
.get_rate = cdiv_clk_get_rate,
diff --git a/arch/arm/mach-msm/clock-local2.c b/arch/arm/mach-msm/clock-local2.c
index 9fe9591..23b4723 100644
--- a/arch/arm/mach-msm/clock-local2.c
+++ b/arch/arm/mach-msm/clock-local2.c
@@ -589,7 +589,6 @@
struct clk_ops clk_ops_branch = {
.enable = branch_clk_enable,
.disable = branch_clk_disable,
- .auto_off = branch_clk_disable,
.set_rate = branch_clk_set_rate,
.get_rate = branch_clk_get_rate,
.list_rate = branch_clk_list_rate,
@@ -602,7 +601,6 @@
struct clk_ops clk_ops_vote = {
.enable = local_vote_clk_enable,
.disable = local_vote_clk_disable,
- .auto_off = local_vote_clk_disable,
.reset = local_vote_clk_reset,
.handoff = local_vote_clk_handoff,
};
diff --git a/arch/arm/mach-msm/clock-pcom-lookup.c b/arch/arm/mach-msm/clock-pcom-lookup.c
index 83940cf..2a2cc01 100644
--- a/arch/arm/mach-msm/clock-pcom-lookup.c
+++ b/arch/arm/mach-msm/clock-pcom-lookup.c
@@ -22,19 +22,19 @@
#define PLLn_MODE(n) (MSM_CLK_CTL_BASE + 0x300 + 28 * (n))
#define PLL4_MODE (MSM_CLK_CTL_BASE + 0x374)
-static DEFINE_CLK_PCOM(adm_clk, ADM_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(adsp_clk, ADSP_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(ahb_m_clk, AHB_M_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(ahb_s_clk, AHB_S_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(cam_m_clk, CAM_M_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(adm_clk, ADM_CLK, 0);
+static DEFINE_CLK_PCOM(adsp_clk, ADSP_CLK, 0);
+static DEFINE_CLK_PCOM(ahb_m_clk, AHB_M_CLK, 0);
+static DEFINE_CLK_PCOM(ahb_s_clk, AHB_S_CLK, 0);
+static DEFINE_CLK_PCOM(cam_m_clk, CAM_M_CLK, 0);
static DEFINE_CLK_PCOM(axi_rotator_clk, AXI_ROTATOR_CLK, 0);
-static DEFINE_CLK_PCOM(ce_clk, CE_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(csi0_clk, CSI0_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(csi0_p_clk, CSI0_P_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(csi0_vfe_clk, CSI0_VFE_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(csi1_clk, CSI1_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(csi1_p_clk, CSI1_P_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(csi1_vfe_clk, CSI1_VFE_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(ce_clk, CE_CLK, 0);
+static DEFINE_CLK_PCOM(csi0_clk, CSI0_CLK, 0);
+static DEFINE_CLK_PCOM(csi0_p_clk, CSI0_P_CLK, 0);
+static DEFINE_CLK_PCOM(csi0_vfe_clk, CSI0_VFE_CLK, 0);
+static DEFINE_CLK_PCOM(csi1_clk, CSI1_CLK, 0);
+static DEFINE_CLK_PCOM(csi1_p_clk, CSI1_P_CLK, 0);
+static DEFINE_CLK_PCOM(csi1_vfe_clk, CSI1_VFE_CLK, 0);
static struct pll_shared_clk pll0_clk = {
.id = PLL_0,
@@ -113,38 +113,36 @@
};
static DEFINE_CLK_PCOM(dsi_ref_clk, DSI_REF_CLK, 0);
-static DEFINE_CLK_PCOM(ebi1_clk, EBI1_CLK,
- CLKFLAG_SKIP_AUTO_OFF | CLKFLAG_MIN);
-static DEFINE_CLK_PCOM(ebi2_clk, EBI2_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(ecodec_clk, ECODEC_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(ebi1_clk, EBI1_CLK, CLKFLAG_MIN);
+static DEFINE_CLK_PCOM(ebi2_clk, EBI2_CLK, 0);
+static DEFINE_CLK_PCOM(ecodec_clk, ECODEC_CLK, 0);
static DEFINE_CLK_PCOM(emdh_clk, EMDH_CLK, CLKFLAG_MIN | CLKFLAG_MAX);
-static DEFINE_CLK_PCOM(gp_clk, GP_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(grp_2d_clk, GRP_2D_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(grp_2d_p_clk, GRP_2D_P_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(gp_clk, GP_CLK, 0);
+static DEFINE_CLK_PCOM(grp_2d_clk, GRP_2D_CLK, 0);
+static DEFINE_CLK_PCOM(grp_2d_p_clk, GRP_2D_P_CLK, 0);
static DEFINE_CLK_PCOM(grp_3d_clk, GRP_3D_CLK, 0);
-static DEFINE_CLK_PCOM(grp_3d_p_clk, GRP_3D_P_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(grp_3d_p_clk, GRP_3D_P_CLK, 0);
static DEFINE_CLK_PCOM(gsbi1_qup_clk, GSBI1_QUP_CLK, 0);
static DEFINE_CLK_PCOM(gsbi1_qup_p_clk, GSBI1_QUP_P_CLK, 0);
static DEFINE_CLK_PCOM(gsbi2_qup_clk, GSBI2_QUP_CLK, 0);
static DEFINE_CLK_PCOM(gsbi2_qup_p_clk, GSBI2_QUP_P_CLK, 0);
-static DEFINE_CLK_PCOM(gsbi_clk, GSBI_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(gsbi_p_clk, GSBI_P_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(hdmi_clk, HDMI_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(i2c_clk, I2C_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(icodec_rx_clk, ICODEC_RX_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(icodec_tx_clk, ICODEC_TX_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(gsbi_clk, GSBI_CLK, 0);
+static DEFINE_CLK_PCOM(gsbi_p_clk, GSBI_P_CLK, 0);
+static DEFINE_CLK_PCOM(hdmi_clk, HDMI_CLK, 0);
+static DEFINE_CLK_PCOM(i2c_clk, I2C_CLK, 0);
+static DEFINE_CLK_PCOM(icodec_rx_clk, ICODEC_RX_CLK, 0);
+static DEFINE_CLK_PCOM(icodec_tx_clk, ICODEC_TX_CLK, 0);
static DEFINE_CLK_PCOM(imem_clk, IMEM_CLK, 0);
-static DEFINE_CLK_PCOM(mdc_clk, MDC_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(mdc_clk, MDC_CLK, 0);
static DEFINE_CLK_PCOM(mdp_clk, MDP_CLK, CLKFLAG_MIN);
static DEFINE_CLK_PCOM(mdp_lcdc_pad_pclk_clk, MDP_LCDC_PAD_PCLK_CLK,
- CLKFLAG_SKIP_AUTO_OFF);
+ 0);
static DEFINE_CLK_PCOM(mdp_lcdc_pclk_clk, MDP_LCDC_PCLK_CLK,
- CLKFLAG_SKIP_AUTO_OFF);
+ 0);
static DEFINE_CLK_PCOM(mdp_vsync_clk, MDP_VSYNC_CLK, 0);
static DEFINE_CLK_PCOM(mdp_dsi_p_clk, MDP_DSI_P_CLK, 0);
-static DEFINE_CLK_PCOM(pbus_clk, PBUS_CLK,
- CLKFLAG_SKIP_AUTO_OFF | CLKFLAG_MIN);
-static DEFINE_CLK_PCOM(pcm_clk, PCM_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(pbus_clk, PBUS_CLK, CLKFLAG_MIN);
+static DEFINE_CLK_PCOM(pcm_clk, PCM_CLK, 0);
static DEFINE_CLK_PCOM(pmdh_clk, PMDH_CLK, CLKFLAG_MIN | CLKFLAG_MAX);
static DEFINE_CLK_PCOM(sdac_clk, SDAC_CLK, 0);
static DEFINE_CLK_PCOM(sdc1_clk, SDC1_CLK, 0);
@@ -155,12 +153,12 @@
static DEFINE_CLK_PCOM(sdc3_p_clk, SDC3_P_CLK, 0);
static DEFINE_CLK_PCOM(sdc4_clk, SDC4_CLK, 0);
static DEFINE_CLK_PCOM(sdc4_p_clk, SDC4_P_CLK, 0);
-static DEFINE_CLK_PCOM(spi_clk, SPI_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(tsif_clk, TSIF_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(tsif_p_clk, TSIF_P_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(tsif_ref_clk, TSIF_REF_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(tv_dac_clk, TV_DAC_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(tv_enc_clk, TV_ENC_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(spi_clk, SPI_CLK, 0);
+static DEFINE_CLK_PCOM(tsif_clk, TSIF_CLK, 0);
+static DEFINE_CLK_PCOM(tsif_p_clk, TSIF_P_CLK, 0);
+static DEFINE_CLK_PCOM(tsif_ref_clk, TSIF_REF_CLK, 0);
+static DEFINE_CLK_PCOM(tv_dac_clk, TV_DAC_CLK, 0);
+static DEFINE_CLK_PCOM(tv_enc_clk, TV_ENC_CLK, 0);
static DEFINE_CLK_PCOM(uart1_clk, UART1_CLK, 0);
static DEFINE_CLK_PCOM(uart1dm_clk, UART1DM_CLK, 0);
static DEFINE_CLK_PCOM(uart2_clk, UART2_CLK, 0);
@@ -173,8 +171,8 @@
static DEFINE_CLK_PCOM(usb_hs_clk, USB_HS_CLK, 0);
static DEFINE_CLK_PCOM(usb_hs_core_clk, USB_HS_CORE_CLK, 0);
static DEFINE_CLK_PCOM(usb_hs_p_clk, USB_HS_P_CLK, 0);
-static DEFINE_CLK_PCOM(usb_otg_clk, USB_OTG_CLK, CLKFLAG_SKIP_AUTO_OFF);
-static DEFINE_CLK_PCOM(usb_phy_clk, USB_PHY_CLK, CLKFLAG_SKIP_AUTO_OFF);
+static DEFINE_CLK_PCOM(usb_otg_clk, USB_OTG_CLK, 0);
+static DEFINE_CLK_PCOM(usb_phy_clk, USB_PHY_CLK, 0);
static DEFINE_CLK_PCOM(vdc_clk, VDC_CLK, CLKFLAG_MIN);
static DEFINE_CLK_PCOM(vfe_axi_clk, VFE_AXI_CLK, 0);
static DEFINE_CLK_PCOM(vfe_clk, VFE_CLK, 0);
diff --git a/arch/arm/mach-msm/clock-pcom.c b/arch/arm/mach-msm/clock-pcom.c
index 02c8765..428423a 100644
--- a/arch/arm/mach-msm/clock-pcom.c
+++ b/arch/arm/mach-msm/clock-pcom.c
@@ -190,7 +190,6 @@
struct clk_ops clk_ops_pcom = {
.enable = pc_clk_enable,
.disable = pc_clk_disable,
- .auto_off = pc_clk_disable,
.reset = pc_reset,
.set_rate = pc_clk_set_rate,
.set_max_rate = pc_clk_set_max_rate,
@@ -205,7 +204,6 @@
struct clk_ops clk_ops_pcom_ext_config = {
.enable = pc_clk_enable,
.disable = pc_clk_disable,
- .auto_off = pc_clk_disable,
.reset = pc_reset,
.set_rate = pc_clk_set_ext_config,
.set_max_rate = pc_clk_set_max_rate,
diff --git a/arch/arm/mach-msm/clock-pll.c b/arch/arm/mach-msm/clock-pll.c
index 8c1f67e..2938135 100644
--- a/arch/arm/mach-msm/clock-pll.c
+++ b/arch/arm/mach-msm/clock-pll.c
@@ -121,7 +121,6 @@
struct clk_ops clk_ops_pll_vote = {
.enable = pll_vote_clk_enable,
.disable = pll_vote_clk_disable,
- .auto_off = pll_vote_clk_disable,
.is_enabled = pll_vote_clk_is_enabled,
.get_parent = pll_vote_clk_get_parent,
.handoff = pll_vote_clk_handoff,
@@ -300,7 +299,6 @@
struct clk_ops clk_ops_local_pll = {
.enable = local_pll_clk_enable,
.disable = local_pll_clk_disable,
- .auto_off = local_pll_clk_disable,
.handoff = local_pll_clk_handoff,
.get_parent = local_pll_clk_get_parent,
};
diff --git a/arch/arm/mach-msm/clock-rpm.h b/arch/arm/mach-msm/clock-rpm.h
index 107fb02..ce878ce 100644
--- a/arch/arm/mach-msm/clock-rpm.h
+++ b/arch/arm/mach-msm/clock-rpm.h
@@ -58,7 +58,6 @@
.rpmrs_data = (rpmrsdata),\
.c = { \
.ops = &clk_ops_rpm, \
- .flags = CLKFLAG_SKIP_AUTO_OFF, \
.dbg_name = #name, \
CLK_INIT(name.c), \
.depends = dep, \
@@ -74,7 +73,6 @@
.rpmrs_data = (rpmrsdata),\
.c = { \
.ops = &clk_ops_rpm, \
- .flags = CLKFLAG_SKIP_AUTO_OFF, \
.dbg_name = #active, \
CLK_INIT(active.c), \
.depends = dep, \
@@ -96,7 +94,6 @@
.rpmrs_data = (rpmrsdata),\
.c = { \
.ops = &clk_ops_rpm_branch, \
- .flags = CLKFLAG_SKIP_AUTO_OFF, \
.dbg_name = #name, \
.rate = (r), \
CLK_INIT(name.c), \
@@ -115,7 +112,6 @@
.rpmrs_data = (rpmrsdata),\
.c = { \
.ops = &clk_ops_rpm_branch, \
- .flags = CLKFLAG_SKIP_AUTO_OFF, \
.dbg_name = #active, \
.rate = (r), \
CLK_INIT(active.c), \
@@ -134,7 +130,6 @@
.rpmrs_data = (rpmrsdata),\
.c = { \
.ops = &clk_ops_rpm, \
- .flags = CLKFLAG_SKIP_AUTO_OFF, \
.dbg_name = #name, \
CLK_INIT(name.c), \
.warned = true, \
@@ -150,7 +145,6 @@
.rpmrs_data = (rpmrsdata),\
.c = { \
.ops = &clk_ops_rpm, \
- .flags = CLKFLAG_SKIP_AUTO_OFF, \
.dbg_name = #active, \
CLK_INIT(active.c), \
.warned = true, \
diff --git a/arch/arm/mach-msm/clock-voter.h b/arch/arm/mach-msm/clock-voter.h
index c9aebba..407aac6 100644
--- a/arch/arm/mach-msm/clock-voter.h
+++ b/arch/arm/mach-msm/clock-voter.h
@@ -34,7 +34,6 @@
.c = { \
.dbg_name = #clk_name, \
.ops = &clk_ops_voter, \
- .flags = CLKFLAG_SKIP_AUTO_OFF, \
.rate = _default_rate, \
CLK_INIT(clk_name.c), \
}, \
diff --git a/arch/arm/mach-msm/clock.c b/arch/arm/mach-msm/clock.c
index 8bf98fa..da8c3a9 100644
--- a/arch/arm/mach-msm/clock.c
+++ b/arch/arm/mach-msm/clock.c
@@ -506,34 +506,16 @@
clk_init_data->post_init();
}
-/*
- * The bootloader and/or AMSS may have left various clocks enabled.
- * Disable any clocks that have not been explicitly enabled by a
- * clk_enable() call and don't have the CLKFLAG_SKIP_AUTO_OFF flag.
- */
static int __init clock_late_init(void)
{
- unsigned n, count = 0;
struct handoff_clk *h, *h_temp;
- unsigned long flags;
- int ret = 0;
+ int n, ret = 0;
clock_debug_init(clk_init_data);
- for (n = 0; n < clk_init_data->size; n++) {
- struct clk *clk = clk_init_data->table[n].clk;
+ for (n = 0; n < clk_init_data->size; n++)
+ clock_debug_add(clk_init_data->table[n].clk);
- clock_debug_add(clk);
- spin_lock_irqsave(&clk->lock, flags);
- if (!(clk->flags & CLKFLAG_SKIP_AUTO_OFF)) {
- if (!clk->count && clk->ops->auto_off) {
- count++;
- clk->ops->auto_off(clk);
- }
- }
- spin_unlock_irqrestore(&clk->lock, flags);
- }
- pr_info("clock_late_init() disabled %d unused clocks\n", count);
-
+ pr_info("%s: Removing enables held for handed-off clocks\n", __func__);
list_for_each_entry_safe(h, h_temp, &handoff_list, list) {
clk_disable_unprepare(h->clk);
list_del(&h->list);
diff --git a/arch/arm/mach-msm/clock.h b/arch/arm/mach-msm/clock.h
index 51bfa67..56d3c6f 100644
--- a/arch/arm/mach-msm/clock.h
+++ b/arch/arm/mach-msm/clock.h
@@ -33,7 +33,6 @@
#define CLKFLAG_RETAIN 0x00000040
#define CLKFLAG_NORETAIN 0x00000080
#define CLKFLAG_SKIP_HANDOFF 0x00000100
-#define CLKFLAG_SKIP_AUTO_OFF 0x00000200
#define CLKFLAG_MIN 0x00000400
#define CLKFLAG_MAX 0x00000800
@@ -90,7 +89,6 @@
int (*enable)(struct clk *clk);
void (*disable)(struct clk *clk);
void (*unprepare)(struct clk *clk);
- void (*auto_off)(struct clk *clk);
void (*enable_hwcg)(struct clk *clk);
void (*disable_hwcg)(struct clk *clk);
int (*in_hwcg_mode)(struct clk *clk);
diff --git a/arch/arm/mach-msm/devices-8064.c b/arch/arm/mach-msm/devices-8064.c
index 472a87e..614785e 100644
--- a/arch/arm/mach-msm/devices-8064.c
+++ b/arch/arm/mach-msm/devices-8064.c
@@ -1192,6 +1192,7 @@
.disable_dmx = 0,
.disable_fullhd = 0,
.cont_mode_dpb_count = 18,
+ .fw_addr = 0x9fe00000,
};
struct platform_device apq8064_msm_device_vidc = {
@@ -1804,8 +1805,8 @@
FS_8X60(FS_MDP, "vdd", "mdp.0", &mdp_fs_data),
FS_8X60(FS_ROT, "vdd", "msm_rotator.0", &rot_fs_data),
FS_8X60(FS_IJPEG, "vdd", "msm_gemini.0", &ijpeg_fs_data),
- FS_8X60(FS_VFE, "fs_vfe", NULL, &vfe_fs_data),
- FS_8X60(FS_VPE, "fs_vpe", NULL, &vpe_fs_data),
+ FS_8X60(FS_VFE, "vdd", "msm_vfe.0", &vfe_fs_data),
+ FS_8X60(FS_VPE, "vdd", "msm_vpe.0", &vpe_fs_data),
FS_8X60(FS_GFX3D, "vdd", "kgsl-3d0.0", &gfx3d_fs_data),
FS_8X60(FS_VED, "vdd", "msm_vidc.0", &ved_fs_data),
FS_8X60(FS_VCAP, "vdd", "msm_vcap.0", &vcap_fs_data),
diff --git a/arch/arm/mach-msm/devices-8930.c b/arch/arm/mach-msm/devices-8930.c
index 6ea8d7b..3212364 100644
--- a/arch/arm/mach-msm/devices-8930.c
+++ b/arch/arm/mach-msm/devices-8930.c
@@ -435,8 +435,8 @@
FS_8X60(FS_MDP, "vdd", "mdp.0", &mdp_fs_data),
FS_8X60(FS_ROT, "vdd", "msm_rotator.0", &rot_fs_data),
FS_8X60(FS_IJPEG, "vdd", "msm_gemini.0", &ijpeg_fs_data),
- FS_8X60(FS_VFE, "fs_vfe", NULL, &vfe_fs_data),
- FS_8X60(FS_VPE, "fs_vpe", NULL, &vpe_fs_data),
+ FS_8X60(FS_VFE, "vdd", "msm_vfe.0", &vfe_fs_data),
+ FS_8X60(FS_VPE, "vdd", "msm_vpe.0", &vpe_fs_data),
FS_8X60(FS_GFX3D, "vdd", "kgsl-3d0.0", &gfx3d_fs_data),
FS_8X60(FS_VED, "vdd", "msm_vidc.0", &ved_fs_data),
};
@@ -695,6 +695,7 @@
#endif
.disable_dmx = 1,
.disable_fullhd = 0,
+ .fw_addr = 0x9fe00000,
};
struct platform_device apq8930_msm_device_vidc = {
diff --git a/arch/arm/mach-msm/devices-8960.c b/arch/arm/mach-msm/devices-8960.c
index 3522e80..369e826 100644
--- a/arch/arm/mach-msm/devices-8960.c
+++ b/arch/arm/mach-msm/devices-8960.c
@@ -729,6 +729,7 @@
.disable_dmx = 0,
.disable_fullhd = 0,
.cont_mode_dpb_count = 18,
+ .fw_addr = 0x9fe00000,
};
struct platform_device msm_device_vidc = {
@@ -2155,8 +2156,8 @@
FS_8X60(FS_MDP, "vdd", "mdp.0", &mdp_fs_data),
FS_8X60(FS_ROT, "vdd", "msm_rotator.0", &rot_fs_data),
FS_8X60(FS_IJPEG, "vdd", "msm_gemini.0", &ijpeg_fs_data),
- FS_8X60(FS_VFE, "fs_vfe", NULL, &vfe_fs_data),
- FS_8X60(FS_VPE, "fs_vpe", NULL, &vpe_fs_data),
+ FS_8X60(FS_VFE, "vdd", "msm_vfe.0", &vfe_fs_data),
+ FS_8X60(FS_VPE, "vdd", "msm_vpe.0", &vpe_fs_data),
FS_8X60(FS_GFX3D, "vdd", "kgsl-3d0.0", &gfx3d_fs_data),
FS_8X60(FS_GFX2D0, "vdd", "kgsl-2d0.0", &gfx2d0_fs_data),
FS_8X60(FS_GFX2D1, "vdd", "kgsl-2d1.1", &gfx2d1_fs_data),
diff --git a/arch/arm/mach-msm/devices-msm7x27a.c b/arch/arm/mach-msm/devices-msm7x27a.c
index 1c58490..c159926 100644
--- a/arch/arm/mach-msm/devices-msm7x27a.c
+++ b/arch/arm/mach-msm/devices-msm7x27a.c
@@ -1476,7 +1476,7 @@
},
};
-struct platform_device msm8625_mipi_dsi_device = {
+static struct platform_device msm8625_mipi_dsi_device = {
.name = "mipi_dsi",
.id = 1,
.num_resources = ARRAY_SIZE(msm8625_mipi_dsi_resources),
@@ -1504,6 +1504,8 @@
.resource = msm8625_mdp_resources,
};
+struct platform_device mipi_dsi_device;
+
void __init msm_fb_register_device(char *name, void *data)
{
if (!strncmp(name, "mdp", 3)) {
@@ -1512,10 +1514,13 @@
else
msm_register_device(&msm_mdp_device, data);
} else if (!strncmp(name, "mipi_dsi", 8)) {
- if (cpu_is_msm8625())
+ if (cpu_is_msm8625()) {
msm_register_device(&msm8625_mipi_dsi_device, data);
- else
+ mipi_dsi_device = msm8625_mipi_dsi_device;
+ } else {
msm_register_device(&msm_mipi_dsi_device, data);
+ mipi_dsi_device = msm_mipi_dsi_device;
+ }
} else if (!strncmp(name, "lcdc", 4)) {
msm_register_device(&msm_lcdc_device, data);
} else {
diff --git a/arch/arm/mach-msm/devices-msm7x30.c b/arch/arm/mach-msm/devices-msm7x30.c
index 722575d..4b02f7a 100644
--- a/arch/arm/mach-msm/devices-msm7x30.c
+++ b/arch/arm/mach-msm/devices-msm7x30.c
@@ -1321,7 +1321,7 @@
static struct kgsl_device_platform_data kgsl_2d0_pdata = {
.pwrlevel = {
{
- .gpu_freq = 0,
+ .gpu_freq = 192000000,
.bus_freq = 192000000,
},
},
diff --git a/arch/arm/mach-msm/devices-msm8x60.c b/arch/arm/mach-msm/devices-msm8x60.c
index 8bc455e..1a9e27b 100644
--- a/arch/arm/mach-msm/devices-msm8x60.c
+++ b/arch/arm/mach-msm/devices-msm8x60.c
@@ -231,6 +231,11 @@
.dev.platform_data = "dsps",
};
+struct platform_device msm_pil_vidc = {
+ .name = "pil_vidc",
+ .id = -1,
+};
+
static struct resource msm_uart1_dm_resources[] = {
{
.start = MSM_UART1DM_PHYS,
@@ -2286,15 +2291,18 @@
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
.memtype = ION_CP_MM_HEAP_ID,
.enable_ion = 1,
- .cp_enabled = 0,
+ .cp_enabled = 1,
+ .secure_wb_heap = 1,
#else
.memtype = MEMTYPE_SMI_KERNEL,
.enable_ion = 0,
+ .secure_wb_heap = 0,
#endif
.disable_dmx = 0,
.disable_fullhd = 0,
.cont_mode_dpb_count = 8,
.disable_turbo = 1,
+ .fw_addr = 0x38000000,
};
struct platform_device msm_device_vidc = {
@@ -2688,8 +2696,8 @@
FS_8X60(FS_MDP, "vdd", "mdp.0", &mdp_fs_data),
FS_8X60(FS_ROT, "vdd", "msm_rotator.0", &rot_fs_data),
FS_8X60(FS_VED, "vdd", "msm_vidc.0", &ved_fs_data),
- FS_8X60(FS_VFE, "fs_vfe", NULL, &vfe_fs_data),
- FS_8X60(FS_VPE, "fs_vpe", NULL, &vpe_fs_data),
+ FS_8X60(FS_VFE, "vdd", "msm_vfe.0", &vfe_fs_data),
+ FS_8X60(FS_VPE, "vdd", "msm_vpe.0", &vpe_fs_data),
FS_8X60(FS_GFX3D, "vdd", "kgsl-3d0.0", &gfx3d_fs_data),
FS_8X60(FS_GFX2D0, "vdd", "kgsl-2d0.0", &gfx2d0_fs_data),
FS_8X60(FS_GFX2D1, "vdd", "kgsl-2d1.1", &gfx2d1_fs_data),
diff --git a/arch/arm/mach-msm/devices.h b/arch/arm/mach-msm/devices.h
index d2fede2..4f37e08 100644
--- a/arch/arm/mach-msm/devices.h
+++ b/arch/arm/mach-msm/devices.h
@@ -302,7 +302,7 @@
extern struct platform_device msm_kgsl_2d1;
extern struct platform_device msm_mipi_dsi1_device;
-extern struct platform_device msm8625_mipi_dsi_device;
+extern struct platform_device mipi_dsi_device;
extern struct platform_device msm_lvds_device;
extern struct platform_device msm_ebi2_lcdc_device;
diff --git a/arch/arm/mach-msm/include/mach/board.h b/arch/arm/mach-msm/include/mach/board.h
index ef0b517..1aa3814 100644
--- a/arch/arm/mach-msm/include/mach/board.h
+++ b/arch/arm/mach-msm/include/mach/board.h
@@ -519,11 +519,13 @@
int disable_dmx;
int disable_fullhd;
u32 cp_enabled;
+ u32 secure_wb_heap;
#ifdef CONFIG_MSM_BUS_SCALING
struct msm_bus_scale_pdata *vidc_bus_client_pdata;
#endif
int cont_mode_dpb_count;
int disable_turbo;
+ unsigned long fw_addr;
};
struct vcap_platform_data {
diff --git a/arch/arm/mach-msm/mdm2.c b/arch/arm/mach-msm/mdm2.c
index f851545..6e7086e 100644
--- a/arch/arm/mach-msm/mdm2.c
+++ b/arch/arm/mach-msm/mdm2.c
@@ -77,6 +77,7 @@
mutex_unlock(&hsic_status_lock);
}
+/* This function can be called from atomic context. */
static void mdm_toggle_soft_reset(struct mdm_modem_drv *mdm_drv)
{
int soft_reset_direction_assert = 0,
@@ -88,11 +89,20 @@
}
gpio_direction_output(mdm_drv->ap2mdm_soft_reset_gpio,
soft_reset_direction_assert);
- usleep_range(5000, 10000);
+ /* Use mdelay because this function can be called from atomic
+ * context.
+ */
+ mdelay(10);
gpio_direction_output(mdm_drv->ap2mdm_soft_reset_gpio,
soft_reset_direction_de_assert);
}
+/* This function can be called from atomic context. */
+static void mdm_atomic_soft_reset(struct mdm_modem_drv *mdm_drv)
+{
+ mdm_toggle_soft_reset(mdm_drv);
+}
+
static void mdm_power_down_common(struct mdm_modem_drv *mdm_drv)
{
int i;
@@ -242,6 +252,7 @@
static struct mdm_ops mdm_cb = {
.power_on_mdm_cb = mdm_power_on_common,
.reset_mdm_cb = mdm_power_on_common,
+ .atomic_reset_mdm_cb = mdm_atomic_soft_reset,
.power_down_mdm_cb = mdm_power_down_common,
.debug_state_changed_cb = debug_state_changed,
.status_cb = mdm_status_changed,
diff --git a/arch/arm/mach-msm/mdm_common.c b/arch/arm/mach-msm/mdm_common.c
index 1b09c34..5b181e1 100644
--- a/arch/arm/mach-msm/mdm_common.c
+++ b/arch/arm/mach-msm/mdm_common.c
@@ -313,8 +313,8 @@
if (i <= 0) {
pr_err("%s: MDM2AP_STATUS never went low\n", __func__);
/* Reset the modem so that it will go into download mode. */
- if (mdm_drv && mdm_drv->ops->reset_mdm_cb)
- mdm_drv->ops->reset_mdm_cb(mdm_drv);
+ if (mdm_drv && mdm_drv->ops->atomic_reset_mdm_cb)
+ mdm_drv->ops->atomic_reset_mdm_cb(mdm_drv);
}
return NOTIFY_DONE;
}
@@ -351,6 +351,7 @@
static int mdm_subsys_shutdown(const struct subsys_data *crashed_subsys)
{
+ mdm_drv->mdm_ready = 0;
gpio_direction_output(mdm_drv->ap2mdm_errfatal_gpio, 1);
if (mdm_drv->pdata->ramdump_delay_ms > 0) {
/* Wait for the external modem to complete
diff --git a/arch/arm/mach-msm/mdm_private.h b/arch/arm/mach-msm/mdm_private.h
index 53bfaf0..7ac3727 100644
--- a/arch/arm/mach-msm/mdm_private.h
+++ b/arch/arm/mach-msm/mdm_private.h
@@ -18,6 +18,7 @@
struct mdm_ops {
void (*power_on_mdm_cb)(struct mdm_modem_drv *mdm_drv);
void (*reset_mdm_cb)(struct mdm_modem_drv *mdm_drv);
+ void (*atomic_reset_mdm_cb)(struct mdm_modem_drv *mdm_drv);
void (*normal_boot_done_cb)(struct mdm_modem_drv *mdm_drv);
void (*power_down_mdm_cb)(struct mdm_modem_drv *mdm_drv);
void (*debug_state_changed_cb)(int value);
diff --git a/arch/arm/mach-msm/modem-8660.c b/arch/arm/mach-msm/modem-8660.c
index 0b7b768..9c558e4 100644
--- a/arch/arm/mach-msm/modem-8660.c
+++ b/arch/arm/mach-msm/modem-8660.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -275,8 +275,6 @@
{
if (reset_modem == 1)
smsm_reset_modem(SMSM_RESET);
- else if (reset_modem == 2)
- subsystem_restart("lpass");
reset_modem = 0;
schedule_delayed_work(&debug_crash_modem_work, msecs_to_jiffies(1000));
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_core.h b/arch/arm/mach-msm/msm_bus/msm_bus_core.h
index a9d6e4f..264afbd 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_core.h
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_core.h
@@ -70,7 +70,7 @@
int ahb;
int hw_sel;
const char *slaveclk[NUM_CTX];
- const char *memclk;
+ const char *memclk[NUM_CTX];
unsigned int buswidth;
unsigned int ws;
unsigned int mode;
@@ -116,7 +116,7 @@
struct path_node *pnode;
int commit_index;
struct nodeclk nodeclk[NUM_CTX];
- struct nodeclk memclk;
+ struct nodeclk memclk[NUM_CTX];
void *hw_data;
};
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_fabric.c b/arch/arm/mach-msm/msm_bus/msm_bus_fabric.c
index 5d6653a..e035e35 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_fabric.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_fabric.c
@@ -163,17 +163,18 @@
info->nodeclk[ctx].enable = false;
info->nodeclk[ctx].dirty = false;
}
- }
- if (info->node_info->memclk) {
- info->memclk.clk = clk_get_sys("msm_bus",
- info->node_info->memclk);
- if (IS_ERR(info->memclk.clk)) {
- MSM_BUS_ERR("Couldn't get clk %s\n",
- info->node_info->memclk);
- err = -EINVAL;
+
+ if (info->node_info->memclk[ctx]) {
+ info->memclk[ctx].clk = clk_get_sys("msm_bus",
+ info->node_info->memclk[ctx]);
+ if (IS_ERR(info->memclk[ctx].clk)) {
+ MSM_BUS_ERR("Couldn't get clk %s\n",
+ info->node_info->memclk[ctx]);
+ err = -EINVAL;
+ }
+ info->memclk[ctx].enable = false;
+ info->memclk[ctx].dirty = false;
}
- info->memclk.enable = false;
- info->memclk.dirty = false;
}
ret = info->node_info->gateway ?
@@ -316,13 +317,13 @@
nodeclk->rate = rate;
}
}
- if (!status && slave->memclk.clk) {
+ if (!status && slave->memclk[ctx].clk) {
rate = *slave->link_info.sel_clk;
- if (slave->memclk.rate != rate) {
- slave->memclk.rate = rate;
- slave->memclk.dirty = true;
+ if (slave->memclk[ctx].rate != rate) {
+ slave->memclk[ctx].rate = rate;
+ slave->memclk[ctx].dirty = true;
}
- slave->memclk.rate = rate;
+ slave->memclk[ctx].rate = rate;
fabric->clk_dirty = true;
}
}
@@ -361,10 +362,19 @@
static int msm_bus_fabric_clk_set(int enable, struct msm_bus_inode_info *info)
{
int i, status = 0;
- for (i = 0; i < NUM_CTX; i++)
+ long rounded_rate;
+
+ for (i = 0; i < NUM_CTX; i++) {
if (info->nodeclk[i].dirty) {
- status = clk_set_rate(info->nodeclk[i].clk, info->
- nodeclk[i].rate);
+ if (info->nodeclk[i].rate != 0) {
+ rounded_rate = clk_round_rate(info->
+ nodeclk[i].clk, info->nodeclk[i].rate);
+ status = clk_set_rate(info->nodeclk[i].clk,
+ rounded_rate);
+ MSM_BUS_DBG("AXI: node: %d set_rate: %ld\n",
+ info->node_info->id, rounded_rate);
+ }
+
if (enable && !(info->nodeclk[i].enable)) {
clk_prepare_enable(info->nodeclk[i].clk);
info->nodeclk[i].dirty = false;
@@ -377,17 +387,26 @@
}
}
- if (info->memclk.dirty) {
- status = clk_set_rate(info->memclk.clk, info->memclk.rate);
- if (enable && !(info->memclk.enable)) {
- clk_prepare_enable(info->memclk.clk);
- info->memclk.dirty = false;
- info->memclk.enable = true;
- } else if (info->memclk.rate == 0 && (!enable) &&
- (info->memclk.enable)) {
- clk_disable_unprepare(info->memclk.clk);
- info->memclk.dirty = false;
- info->memclk.enable = false;
+ if (info->memclk[i].dirty) {
+ if (info->nodeclk[i].rate != 0) {
+ rounded_rate = clk_round_rate(info->
+ memclk[i].clk, info->memclk[i].rate);
+ status = clk_set_rate(info->memclk[i].clk,
+ rounded_rate);
+ MSM_BUS_DBG("AXI: node: %d set_rate: %ld\n",
+ info->node_info->id, rounded_rate);
+ }
+
+ if (enable && !(info->memclk[i].enable)) {
+ clk_prepare_enable(info->memclk[i].clk);
+ info->memclk[i].dirty = false;
+ info->memclk[i].enable = true;
+ } else if (info->memclk[i].rate == 0 && (!enable) &&
+ (info->memclk[i].enable)) {
+ clk_disable_unprepare(info->memclk[i].clk);
+ info->memclk[i].dirty = false;
+ info->memclk[i].enable = false;
+ }
}
}
diff --git a/arch/arm/mach-msm/msm_rq_stats.c b/arch/arm/mach-msm/msm_rq_stats.c
index 738819f..2ea7ed3 100644
--- a/arch/arm/mach-msm/msm_rq_stats.c
+++ b/arch/arm/mach-msm/msm_rq_stats.c
@@ -44,7 +44,7 @@
sysfs_notify(rq_info.kobj, NULL, "def_timer_ms");
}
-static ssize_t show_run_queue_avg(struct kobject *kobj,
+static ssize_t run_queue_avg_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
unsigned int val = 0;
@@ -59,6 +59,8 @@
return snprintf(buf, PAGE_SIZE, "%d.%d\n", val/10, val%10);
}
+static struct kobj_attribute run_queue_avg_attr = __ATTR_RO(run_queue_avg);
+
static ssize_t show_run_queue_poll_ms(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@@ -93,6 +95,10 @@
return count;
}
+static struct kobj_attribute run_queue_poll_ms_attr =
+ __ATTR(run_queue_poll_ms, S_IWUSR | S_IRUSR, show_run_queue_poll_ms,
+ store_run_queue_poll_ms);
+
static ssize_t show_def_timer_ms(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@@ -111,67 +117,33 @@
return count;
}
-#define MSM_RQ_STATS_RO_ATTRIB(att) ({ \
- struct attribute *attrib = NULL; \
- struct kobj_attribute *ptr = NULL; \
- ptr = kzalloc(sizeof(struct kobj_attribute), GFP_KERNEL); \
- if (ptr) { \
- ptr->attr.name = #att; \
- ptr->attr.mode = S_IRUGO; \
- ptr->show = show_##att; \
- ptr->store = NULL; \
- attrib = &ptr->attr; \
- } \
- attrib; })
+static struct kobj_attribute def_timer_ms_attr =
+ __ATTR(def_timer_ms, S_IWUSR | S_IRUSR, show_def_timer_ms,
+ store_def_timer_ms);
-#define MSM_RQ_STATS_RW_ATTRIB(att) ({ \
- struct attribute *attrib = NULL; \
- struct kobj_attribute *ptr = NULL; \
- ptr = kzalloc(sizeof(struct kobj_attribute), GFP_KERNEL); \
- if (ptr) { \
- ptr->attr.name = #att; \
- ptr->attr.mode = S_IWUSR|S_IRUSR; \
- ptr->show = show_##att; \
- ptr->store = store_##att; \
- attrib = &ptr->attr; \
- } \
- attrib; })
+static struct attribute *rq_attrs[] = {
+ &def_timer_ms_attr.attr,
+ &run_queue_avg_attr.attr,
+ &run_queue_poll_ms_attr.attr,
+ NULL,
+};
+
+static struct attribute_group rq_attr_group = {
+ .attrs = rq_attrs,
+};
static int init_rq_attribs(void)
{
- int i;
- int err = 0;
- const int attr_count = 4;
-
- struct attribute **attribs =
- kzalloc(sizeof(struct attribute *) * attr_count, GFP_KERNEL);
-
- if (!attribs)
- goto rel;
+ int err;
rq_info.rq_avg = 0;
-
- attribs[0] = MSM_RQ_STATS_RW_ATTRIB(def_timer_ms);
- attribs[1] = MSM_RQ_STATS_RO_ATTRIB(run_queue_avg);
- attribs[2] = MSM_RQ_STATS_RW_ATTRIB(run_queue_poll_ms);
- attribs[3] = NULL;
-
- for (i = 0; i < attr_count - 1 ; i++) {
- if (!attribs[i])
- goto rel2;
- }
-
- rq_info.attr_group = kzalloc(sizeof(struct attribute_group),
- GFP_KERNEL);
- if (!rq_info.attr_group)
- goto rel3;
- rq_info.attr_group->attrs = attribs;
+ rq_info.attr_group = &rq_attr_group;
/* Create /sys/devices/system/cpu/cpu0/rq-stats/... */
rq_info.kobj = kobject_create_and_add("rq-stats",
- &get_cpu_device(0)->kobj);
+ &get_cpu_device(0)->kobj);
if (!rq_info.kobj)
- goto rel3;
+ return -ENOMEM;
err = sysfs_create_group(rq_info.kobj, rq_info.attr_group);
if (err)
@@ -179,19 +151,7 @@
else
kobject_uevent(rq_info.kobj, KOBJ_ADD);
- if (!err)
- return err;
-
-rel3:
- kfree(rq_info.attr_group);
- kfree(rq_info.kobj);
-rel2:
- for (i = 0; i < attr_count - 1; i++)
- kfree(attribs[i]);
-rel:
- kfree(attribs);
-
- return -ENOMEM;
+ return err;
}
static int __init msm_rq_stats_init(void)
diff --git a/arch/arm/mach-msm/perf_event_msm_krait_l2.c b/arch/arm/mach-msm/perf_event_msm_krait_l2.c
index 5f76a92..3635572 100644
--- a/arch/arm/mach-msm/perf_event_msm_krait_l2.c
+++ b/arch/arm/mach-msm/perf_event_msm_krait_l2.c
@@ -19,7 +19,10 @@
#include <mach/msm-krait-l2-accessors.h>
#define MAX_L2_PERIOD ((1ULL << 32) - 1)
-#define MAX_KRAIT_L2_CTRS 5
+#define MAX_KRAIT_L2_CTRS 10
+
+#define PMCR_NUM_EV_SHIFT 11
+#define PMCR_NUM_EV_MASK 0x1f
#define L2_EVT_MASK 0xfffff
@@ -29,7 +32,6 @@
#define L2PMCCNTCR 0x408
#define L2PMCCNTSR 0x40A
#define L2CYCLE_CTR_BIT 31
-#define L2CYCLE_CTR_EVENT_IDX 4
#define L2CYCLE_CTR_RAW_CODE 0xfe
#define L2PMOVSR 0x406
@@ -109,6 +111,9 @@
/* L2 slave port traffic filtering */
static u32 l2_slv_filter_prefix = 0x000f0010;
+static int total_l2_ctrs;
+static int l2_cycle_ctr_idx;
+
static u32 pmu_type;
static struct arm_pmu krait_l2_pmu;
@@ -203,7 +208,7 @@
static void enable_intenset(u32 idx)
{
- if (idx == L2CYCLE_CTR_EVENT_IDX)
+ if (idx == l2_cycle_ctr_idx)
set_l2_indirect_reg(L2PMINTENSET, 1 << L2CYCLE_CTR_BIT);
else
set_l2_indirect_reg(L2PMINTENSET, 1 << idx);
@@ -211,7 +216,7 @@
static void disable_intenclr(u32 idx)
{
- if (idx == L2CYCLE_CTR_EVENT_IDX)
+ if (idx == l2_cycle_ctr_idx)
set_l2_indirect_reg(L2PMINTENCLR, 1 << L2CYCLE_CTR_BIT);
else
set_l2_indirect_reg(L2PMINTENCLR, 1 << idx);
@@ -219,7 +224,7 @@
static void enable_counter(u32 idx)
{
- if (idx == L2CYCLE_CTR_EVENT_IDX)
+ if (idx == l2_cycle_ctr_idx)
set_l2_indirect_reg(L2PMCNTENSET, 1 << L2CYCLE_CTR_BIT);
else
set_l2_indirect_reg(L2PMCNTENSET, 1 << idx);
@@ -227,7 +232,7 @@
static void disable_counter(u32 idx)
{
- if (idx == L2CYCLE_CTR_EVENT_IDX)
+ if (idx == l2_cycle_ctr_idx)
set_l2_indirect_reg(L2PMCNTENCLR, 1 << L2CYCLE_CTR_BIT);
else
set_l2_indirect_reg(L2PMCNTENCLR, 1 << idx);
@@ -238,7 +243,7 @@
u32 val;
u32 counter_reg = (idx * 16) + IA_L2PMXEVCNTR_BASE;
- if (idx == L2CYCLE_CTR_EVENT_IDX)
+ if (idx == l2_cycle_ctr_idx)
val = get_l2_indirect_reg(L2PMCCNTR);
else
val = get_l2_indirect_reg(counter_reg);
@@ -250,7 +255,7 @@
{
u32 counter_reg = (idx * 16) + IA_L2PMXEVCNTR_BASE;
- if (idx == L2CYCLE_CTR_EVENT_IDX)
+ if (idx == l2_cycle_ctr_idx)
set_l2_indirect_reg(L2PMCCNTR, val);
else
set_l2_indirect_reg(counter_reg, val);
@@ -330,11 +335,11 @@
int ctr = 0;
if (hwc->config_base == L2CYCLE_CTR_RAW_CODE) {
- if (!test_and_set_bit(L2CYCLE_CTR_EVENT_IDX, cpuc->used_mask))
- return L2CYCLE_CTR_EVENT_IDX;
+ if (!test_and_set_bit(l2_cycle_ctr_idx, cpuc->used_mask))
+ return l2_cycle_ctr_idx;
}
- for (ctr = 0; ctr < MAX_KRAIT_L2_CTRS - 1; ctr++) {
+ for (ctr = 0; ctr < total_l2_ctrs - 1; ctr++) {
if (!test_and_set_bit(ctr, cpuc->used_mask))
return ctr;
}
@@ -389,7 +394,7 @@
bitp = __ffs(pmovsr);
if (bitp == L2CYCLE_CTR_BIT)
- idx = L2CYCLE_CTR_EVENT_IDX;
+ idx = l2_cycle_ctr_idx;
else
idx = bitp;
@@ -488,6 +493,19 @@
return 1;
}
+int get_num_events(void)
+{
+ int val;
+
+ val = get_l2_indirect_reg(L2PMCR);
+
+ /*
+ * Read bits 15:11 of the L2PMCR and add 1
+ * for the cycle counter.
+ */
+ return ((val >> PMCR_NUM_EV_SHIFT) & PMCR_NUM_EV_MASK) + 1;
+}
+
static struct arm_pmu krait_l2_pmu = {
.id = ARM_PERF_PMU_ID_KRAIT_L2,
.type = ARM_PMU_DEVICE_L2CC,
@@ -505,7 +523,6 @@
.map_event = krait_l2_map_event,
.max_period = MAX_L2_PERIOD,
.get_hw_events = krait_l2_get_hw_events,
- .num_events = MAX_KRAIT_L2_CTRS,
.test_set_event_constraints = msm_l2_test_set_ev_constraint,
.clear_event_constraints = msm_l2_clear_ev_constraint,
.pmu.attr_groups = msm_l2_pmu_attr_grps,
@@ -533,6 +550,21 @@
/* Reset all ctrs */
set_l2_indirect_reg(L2PMCR, L2PMCR_RESET_ALL);
+ /* Get num of counters in the L2cc PMU. */
+ total_l2_ctrs = get_num_events();
+ krait_l2_pmu.num_events = total_l2_ctrs;
+
+ pr_info("Detected %d counters on the L2CC PMU.\n",
+ total_l2_ctrs);
+
+ /*
+ * The L2 cycle counter index in the used_mask
+ * bit stream is always after the other counters.
+ * Counter indexes begin from 0 to keep it consistent
+ * with the h/w.
+ */
+ l2_cycle_ctr_idx = total_l2_ctrs - 1;
+
/* Avoid spurious interrupt if any */
get_reset_pmovsr();
diff --git a/arch/arm/mach-msm/perf_event_msm_l2.c b/arch/arm/mach-msm/perf_event_msm_l2.c
index 5a5bf57..aae2552 100644
--- a/arch/arm/mach-msm/perf_event_msm_l2.c
+++ b/arch/arm/mach-msm/perf_event_msm_l2.c
@@ -16,9 +16,9 @@
#include <linux/spinlock.h>
-#define MAX_SCORPION_L2_CTRS 5
+#define MAX_SCORPION_L2_CTRS 10
+
#define SCORPION_L2CYCLE_CTR_BIT 31
-#define SCORPION_L2CYCLE_CTR_EVENT_IDX 4
#define SCORPION_L2CYCLE_CTR_RAW_CODE 0xfe
#define SCORPIONL2_PMNC_E (1 << 0) /* Enable all counters */
#define SCORPION_L2_EVT_PREFIX 3
@@ -29,6 +29,8 @@
#define L2_EVT_PREFIX_SHIFT 16
#define L2_SLAVE_EVT_PREFIX 4
+#define PMCR_NUM_EV_SHIFT 11
+#define PMCR_NUM_EV_MASK 0x1f
/*
* The L2 PMU is shared between all CPU's, so protect
@@ -70,6 +72,9 @@
NULL,
};
+static u32 total_l2_ctrs;
+static u32 l2_cycle_ctr_idx;
+
static u32 pmu_type;
static struct arm_pmu scorpion_l2_pmu;
@@ -508,7 +513,7 @@
static void scorpion_l2_enable_intenset(u32 idx)
{
- if (idx == SCORPION_L2CYCLE_CTR_EVENT_IDX) {
+ if (idx == l2_cycle_ctr_idx) {
asm volatile ("mcr p15, 3, %0, c15, c5, 1" : : "r"
(1 << SCORPION_L2CYCLE_CTR_BIT));
} else {
@@ -518,7 +523,7 @@
static void scorpion_l2_disable_intenclr(u32 idx)
{
- if (idx == SCORPION_L2CYCLE_CTR_EVENT_IDX) {
+ if (idx == l2_cycle_ctr_idx) {
asm volatile ("mcr p15, 3, %0, c15, c5, 0" : : "r"
(1 << SCORPION_L2CYCLE_CTR_BIT));
} else {
@@ -528,7 +533,7 @@
static void scorpion_l2_enable_counter(u32 idx)
{
- if (idx == SCORPION_L2CYCLE_CTR_EVENT_IDX) {
+ if (idx == l2_cycle_ctr_idx) {
asm volatile ("mcr p15, 3, %0, c15, c4, 3" : : "r"
(1 << SCORPION_L2CYCLE_CTR_BIT));
} else {
@@ -538,7 +543,7 @@
static void scorpion_l2_disable_counter(u32 idx)
{
- if (idx == SCORPION_L2CYCLE_CTR_EVENT_IDX) {
+ if (idx == l2_cycle_ctr_idx) {
asm volatile ("mcr p15, 3, %0, c15, c4, 2" : : "r"
(1 << SCORPION_L2CYCLE_CTR_BIT));
} else {
@@ -551,7 +556,7 @@
u32 val;
unsigned long iflags;
- if (idx == SCORPION_L2CYCLE_CTR_EVENT_IDX) {
+ if (idx == l2_cycle_ctr_idx) {
asm volatile ("mrc p15, 3, %0, c15, c4, 5" : "=r" (val));
} else {
raw_spin_lock_irqsave(&scorpion_l2_pmu_hw_events.pmu_lock,
@@ -571,7 +576,7 @@
{
unsigned long iflags;
- if (idx == SCORPION_L2CYCLE_CTR_EVENT_IDX) {
+ if (idx == l2_cycle_ctr_idx) {
asm volatile ("mcr p15, 3, %0, c15, c4, 5" : : "r" (val));
} else {
raw_spin_lock_irqsave(&scorpion_l2_pmu_hw_events.pmu_lock,
@@ -662,12 +667,12 @@
int ctr = 0;
if (hwc->config_base == SCORPION_L2CYCLE_CTR_RAW_CODE) {
- if (!test_and_set_bit(SCORPION_L2CYCLE_CTR_EVENT_IDX,
+ if (!test_and_set_bit(l2_cycle_ctr_idx,
cpuc->used_mask))
- return SCORPION_L2CYCLE_CTR_EVENT_IDX;
+ return l2_cycle_ctr_idx;
}
- for (ctr = 0; ctr < MAX_SCORPION_L2_CTRS - 1; ctr++) {
+ for (ctr = 0; ctr < total_l2_ctrs - 1; ctr++) {
if (!test_and_set_bit(ctr, cpuc->used_mask))
return ctr;
}
@@ -726,7 +731,7 @@
bitp = __ffs(pmovsr);
if (bitp == SCORPION_L2CYCLE_CTR_BIT)
- idx = SCORPION_L2CYCLE_CTR_EVENT_IDX;
+ idx = l2_cycle_ctr_idx;
else
idx = bitp;
@@ -834,6 +839,18 @@
return 1;
}
+static int get_num_events(void)
+{
+ int val;
+
+ val = scorpion_l2_pmnc_read();
+ /*
+ * Read bits 15:11 of the L2PMCR and add 1
+ * for the cycle counter.
+ */
+ return ((val >> PMCR_NUM_EV_SHIFT) & PMCR_NUM_EV_MASK) + 1;
+}
+
static struct arm_pmu scorpion_l2_pmu = {
.id = ARM_PERF_PMU_ID_SCORPIONMP_L2,
.type = ARM_PMU_DEVICE_L2CC,
@@ -851,7 +868,6 @@
.map_event = scorpion_l2_map_event,
.max_period = (1LLU << 32) - 1,
.get_hw_events = scorpion_l2_get_hw_events,
- .num_events = MAX_SCORPION_L2_CTRS,
.test_set_event_constraints = msm_l2_test_set_ev_constraint,
.clear_event_constraints = msm_l2_clear_ev_constraint,
.pmu.attr_groups = msm_l2_pmu_attr_grps,
@@ -879,6 +895,20 @@
/* Avoid spurious interrupt if any */
scorpion_l2_get_reset_pmovsr();
+ total_l2_ctrs = get_num_events();
+ scorpion_l2_pmu.num_events = total_l2_ctrs;
+
+ pr_info("Detected %d counters on the L2CC PMU.\n",
+ total_l2_ctrs);
+
+ /*
+ * The L2 cycle counter index in the used_mask
+ * bit stream is always after the other counters.
+ * Counter indexes begin from 0 to keep it consistent
+ * with the h/w.
+ */
+ l2_cycle_ctr_idx = total_l2_ctrs - 1;
+
return platform_driver_register(&scorpion_l2_pmu_driver);
}
device_initcall(register_scorpion_l2_pmu_driver);
diff --git a/arch/arm/mach-msm/peripheral-loader.c b/arch/arm/mach-msm/peripheral-loader.c
index bfbf4bc..3f6eb95 100644
--- a/arch/arm/mach-msm/peripheral-loader.c
+++ b/arch/arm/mach-msm/peripheral-loader.c
@@ -114,11 +114,15 @@
static int pil_proxy_vote(struct pil_device *pil)
{
+ int ret = 0;
+
if (pil->desc->ops->proxy_vote) {
wake_lock(&pil->wlock);
- return pil->desc->ops->proxy_vote(pil->desc);
+ ret = pil->desc->ops->proxy_vote(pil->desc);
+ if (ret)
+ wake_unlock(&pil->wlock);
}
- return 0;
+ return ret;
}
static void pil_proxy_unvote(struct pil_device *pil, unsigned long timeout)
@@ -226,7 +230,7 @@
static int segment_is_loadable(const struct elf32_phdr *p)
{
- return (p->p_type & PT_LOAD) && !segment_is_hash(p->p_flags);
+ return (p->p_type == PT_LOAD) && !segment_is_hash(p->p_flags);
}
/* Sychronize request_firmware() with suspend */
diff --git a/arch/arm/mach-msm/pil-gss.c b/arch/arm/mach-msm/pil-gss.c
index dc7baa1..73248db 100644
--- a/arch/arm/mach-msm/pil-gss.c
+++ b/arch/arm/mach-msm/pil-gss.c
@@ -356,6 +356,8 @@
desc->ops = &pil_gss_ops;
dev_info(&pdev->dev, "using non-secure boot\n");
}
+ /* Force into low power mode because hardware doesn't do this */
+ desc->ops->shutdown(desc);
drv->pil = msm_pil_register(desc);
if (IS_ERR(drv->pil)) {
diff --git a/arch/arm/mach-msm/pm-8x60.c b/arch/arm/mach-msm/pm-8x60.c
index b74ebfb..595484e 100644
--- a/arch/arm/mach-msm/pm-8x60.c
+++ b/arch/arm/mach-msm/pm-8x60.c
@@ -1040,8 +1040,6 @@
msm_pm_mode_sysfs_add();
msm_pm_add_stats(enable_stats, ARRAY_SIZE(enable_stats));
- msm_spm_allow_x_cpu_set_vdd(false);
-
suspend_set_ops(&msm_pm_ops);
msm_pm_qtimer_available();
msm_cpuidle_init();
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_acdb.c b/arch/arm/mach-msm/qdsp6v2/audio_acdb.c
index 22779b4..1092c77 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_acdb.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_acdb.c
@@ -646,7 +646,7 @@
goto err;
}
- acdb_data.ion_handle = ion_import_fd(acdb_data.ion_client,
+ acdb_data.ion_handle = ion_import_dma_buf(acdb_data.ion_client,
atomic_read(&acdb_data.map_handle));
if (IS_ERR_OR_NULL(acdb_data.ion_handle)) {
pr_err("%s: Could not import map handle!!!\n", __func__);
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_lpa.c b/arch/arm/mach-msm/qdsp6v2/audio_lpa.c
index 0591a71..2502d61 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_lpa.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_lpa.c
@@ -481,7 +481,7 @@
goto client_error;
}
- handle = ion_import_fd(client, info->fd);
+ handle = ion_import_dma_buf(client, info->fd);
if (IS_ERR_OR_NULL(handle)) {
pr_err("%s: could not get handle of the given fd\n", __func__);
goto import_error;
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c b/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c
index fdc596d..1c29617 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c
@@ -699,7 +699,7 @@
goto client_error;
}
- handle = ion_import_fd(client, info->fd);
+ handle = ion_import_dma_buf(client, info->fd);
if (IS_ERR_OR_NULL(handle)) {
pr_err("%s: could not get handle of the given fd\n", __func__);
goto import_error;
diff --git a/arch/arm/mach-msm/smd_tty.c b/arch/arm/mach-msm/smd_tty.c
index 68e0f41..44ef822 100644
--- a/arch/arm/mach-msm/smd_tty.c
+++ b/arch/arm/mach-msm/smd_tty.c
@@ -144,14 +144,8 @@
avail = tty_prepare_flip_string(tty, &ptr, avail);
if (avail <= 0) {
- if (!timer_pending(&info->buf_req_timer)) {
- init_timer(&info->buf_req_timer);
- info->buf_req_timer.expires = jiffies +
- ((30 * HZ)/1000);
- info->buf_req_timer.function = buf_req_retry;
- info->buf_req_timer.data = param;
- add_timer(&info->buf_req_timer);
- }
+ mod_timer(&info->buf_req_timer,
+ jiffies + msecs_to_jiffies(30));
return;
}
@@ -572,6 +566,8 @@
smd_tty[idx].driver.driver.owner = THIS_MODULE;
spin_lock_init(&smd_tty[idx].reset_lock);
smd_tty[idx].is_open = 0;
+ setup_timer(&smd_tty[idx].buf_req_timer, buf_req_retry,
+ (unsigned long)&smd_tty[idx]);
init_waitqueue_head(&smd_tty[idx].ch_opened_wait_queue);
ret = platform_driver_register(&smd_tty[idx].driver);
diff --git a/arch/arm/mach-msm/spm.c b/arch/arm/mach-msm/spm.c
index 4654fba..3d90678 100644
--- a/arch/arm/mach-msm/spm.c
+++ b/arch/arm/mach-msm/spm.c
@@ -72,8 +72,6 @@
};
static DEFINE_PER_CPU_SHARED_ALIGNED(struct msm_spm_device, msm_spm_devices);
-static atomic_t msm_spm_set_vdd_x_cpu_allowed = ATOMIC_INIT(1);
-
/******************************************************************************
* Internal helper functions
*****************************************************************************/
@@ -189,20 +187,9 @@
int msm_spm_set_vdd(unsigned int cpu, unsigned int vlevel)
{
- unsigned long flags;
struct msm_spm_device *dev;
uint32_t timeout_us;
- local_irq_save(flags);
-
- if (!atomic_read(&msm_spm_set_vdd_x_cpu_allowed) &&
- unlikely(smp_processor_id() != cpu)) {
- if (msm_spm_debug_mask & MSM_SPM_DEBUG_VCTL)
- pr_info("%s: attempting to set vdd of cpu %u from "
- "cpu %u\n", __func__, cpu, smp_processor_id());
- goto set_vdd_x_cpu_bail;
- }
-
dev = &per_cpu(msm_spm_devices, cpu);
if (msm_spm_debug_mask & MSM_SPM_DEBUG_VCTL)
@@ -239,15 +226,12 @@
pr_info("%s: cpu %u done, remaining timeout %uus\n",
__func__, cpu, timeout_us);
- local_irq_restore(flags);
return 0;
set_vdd_bail:
pr_err("%s: cpu %u failed, remaining timeout %uus, vlevel 0x%x\n",
__func__, cpu, timeout_us, msm_spm_get_sts_curr_pmic_data(dev));
-set_vdd_x_cpu_bail:
- local_irq_restore(flags);
return -EIO;
}
@@ -263,11 +247,6 @@
mb();
}
-void msm_spm_allow_x_cpu_set_vdd(bool allowed)
-{
- atomic_set(&msm_spm_set_vdd_x_cpu_allowed, allowed ? 1 : 0);
-}
-
int __init msm_spm_init(struct msm_spm_platform_data *data, int nr_devs)
{
unsigned int cpu;
diff --git a/arch/arm/mach-msm/spm.h b/arch/arm/mach-msm/spm.h
index 154303b..e81e335 100644
--- a/arch/arm/mach-msm/spm.h
+++ b/arch/arm/mach-msm/spm.h
@@ -146,16 +146,9 @@
*/
int msm_spm_turn_on_cpu_rail(unsigned int cpu);
-
/* Internal low power management specific functions */
/**
- * msm_spm_allow_x_cpu_set_vdd(): Turn on/off cross calling to set voltage
- * @allowed: boolean to indicate on/off.
- */
-void msm_spm_allow_x_cpu_set_vdd(bool allowed);
-
-/**
* msm_spm_reinit(): Reinitialize SPM registers
*/
void msm_spm_reinit(void);
@@ -251,11 +244,6 @@
/* empty */
}
-static inline void msm_spm_allow_x_cpu_set_vdd(bool allowed)
-{
- /* empty */
-}
-
static inline int msm_spm_turn_on_cpu_rail(unsigned int cpu)
{
return -ENOSYS;
diff --git a/arch/arm/mach-msm/spm_devices.c b/arch/arm/mach-msm/spm_devices.c
index 6e81be6..079a3ac 100644
--- a/arch/arm/mach-msm/spm_devices.c
+++ b/arch/arm/mach-msm/spm_devices.c
@@ -40,31 +40,15 @@
static struct msm_spm_device msm_spm_l2_device;
static DEFINE_PER_CPU_SHARED_ALIGNED(struct msm_spm_device, msm_cpu_spm_device);
-static atomic_t msm_spm_set_vdd_x_cpu_allowed = ATOMIC_INIT(1);
-
-void msm_spm_allow_x_cpu_set_vdd(bool allowed)
-{
- atomic_set(&msm_spm_set_vdd_x_cpu_allowed, allowed ? 1 : 0);
-}
-EXPORT_SYMBOL(msm_spm_allow_x_cpu_set_vdd);
int msm_spm_set_vdd(unsigned int cpu, unsigned int vlevel)
{
- unsigned long flags;
struct msm_spm_device *dev;
int ret = -EIO;
- local_irq_save(flags);
- if (!atomic_read(&msm_spm_set_vdd_x_cpu_allowed) &&
- unlikely(smp_processor_id() != cpu)) {
- goto set_vdd_x_cpu_bail;
- }
-
dev = &per_cpu(msm_cpu_spm_device, cpu);
ret = msm_spm_drv_set_vdd(&dev->reg_data, vlevel);
-set_vdd_x_cpu_bail:
- local_irq_restore(flags);
return ret;
}
EXPORT_SYMBOL(msm_spm_set_vdd);
diff --git a/arch/arm/mach-msm/subsystem_restart.c b/arch/arm/mach-msm/subsystem_restart.c
index c98a672..e630e31 100644
--- a/arch/arm/mach-msm/subsystem_restart.c
+++ b/arch/arm/mach-msm/subsystem_restart.c
@@ -92,6 +92,9 @@
/* MSM 8960 restart ordering info */
static const char * const order_8960[] = {"modem", "lpass"};
+/*SGLTE restart ordering info*/
+static const char * const order_8960_sglte[] = {"external_modem",
+ "modem"};
static struct subsys_soc_restart_order restart_orders_8960_one = {
.subsystem_list = order_8960,
@@ -99,9 +102,19 @@
.subsys_ptrs = {[ARRAY_SIZE(order_8960)] = NULL}
};
+static struct subsys_soc_restart_order restart_orders_8960_fusion_sglte = {
+ .subsystem_list = order_8960_sglte,
+ .count = ARRAY_SIZE(order_8960_sglte),
+ .subsys_ptrs = {[ARRAY_SIZE(order_8960_sglte)] = NULL}
+ };
+
static struct subsys_soc_restart_order *restart_orders_8960[] = {
&restart_orders_8960_one,
-};
+ };
+
+static struct subsys_soc_restart_order *restart_orders_8960_sglte[] = {
+ &restart_orders_8960_fusion_sglte,
+ };
/* These will be assigned to one of the sets above after
* runtime SoC identification.
@@ -557,8 +570,18 @@
if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_msm9615() ||
cpu_is_apq8064()) {
- restart_orders = restart_orders_8960;
- n_restart_orders = ARRAY_SIZE(restart_orders_8960);
+ if (socinfo_get_platform_subtype() == PLATFORM_SUBTYPE_SGLTE) {
+ restart_orders = restart_orders_8960_sglte;
+ n_restart_orders =
+ ARRAY_SIZE(restart_orders_8960_sglte);
+ } else {
+ restart_orders = restart_orders_8960;
+ n_restart_orders = ARRAY_SIZE(restart_orders_8960);
+ }
+ for (i = 0; i < n_restart_orders; i++) {
+ mutex_init(&restart_orders[i]->powerup_lock);
+ mutex_init(&restart_orders[i]->shutdown_lock);
+ }
}
if (restart_orders == NULL || n_restart_orders < 1) {
diff --git a/arch/arm/mach-msm/tz_log.c b/arch/arm/mach-msm/tz_log.c
index 7426bb2..db797cd 100644
--- a/arch/arm/mach-msm/tz_log.c
+++ b/arch/arm/mach-msm/tz_log.c
@@ -536,12 +536,19 @@
return 0;
}
+static struct of_device_id tzlog_match[] = {
+ { .compatible = "qcom,tz-log",
+ },
+ {}
+};
+
static struct platform_driver tz_log_driver = {
.probe = tz_log_probe,
.remove = __devexit_p(tz_log_remove),
.driver = {
.name = "tz_log",
.owner = THIS_MODULE,
+ .of_match_table = tzlog_match,
},
};
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 702408c..1b2a723 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -17,7 +17,9 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <linux/dma-contiguous.h>
#include <linux/highmem.h>
+#include <linux/memblock.h>
#include <linux/slab.h>
#include <asm/memory.h>
@@ -26,6 +28,9 @@
#include <asm/tlbflush.h>
#include <asm/sizes.h>
#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/system_info.h>
+#include <asm/dma-contiguous.h>
#include "mm.h"
@@ -56,6 +61,19 @@
return mask;
}
+static void __dma_clear_buffer(struct page *page, size_t size)
+{
+ void *ptr;
+ /*
+ * Ensure that the allocated pages are zeroed, and that any data
+ * lurking in the kernel direct-mapped region is invalidated.
+ */
+ ptr = page_address(page);
+ memset(ptr, 0, size);
+ dmac_flush_range(ptr, ptr + size);
+ outer_flush_range(__pa(ptr), __pa(ptr) + size);
+}
+
/*
* Allocate a DMA buffer for 'dev' of size 'size' using the
* specified gfp mask. Note that 'size' must be page aligned.
@@ -64,23 +82,6 @@
{
unsigned long order = get_order(size);
struct page *page, *p, *e;
- void *ptr;
- u64 mask = get_coherent_dma_mask(dev);
-
-#ifdef CONFIG_DMA_API_DEBUG
- u64 limit = (mask + 1) & ~mask;
- if (limit && size >= limit) {
- dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
- size, mask);
- return NULL;
- }
-#endif
-
- if (!mask)
- return NULL;
-
- if (mask < 0xffffffffULL)
- gfp |= GFP_DMA;
page = alloc_pages(gfp, order);
if (!page)
@@ -93,14 +94,7 @@
for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
__free_page(p);
- /*
- * Ensure that the allocated pages are zeroed, and that any data
- * lurking in the kernel direct-mapped region is invalidated.
- */
- ptr = page_address(page);
- memset(ptr, 0, size);
- dmac_flush_range(ptr, ptr + size);
- outer_flush_range(__pa(ptr), __pa(ptr) + size);
+ __dma_clear_buffer(page, size);
return page;
}
@@ -170,6 +164,9 @@
unsigned long base = consistent_base;
unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
+ if (cpu_architecture() >= CPU_ARCH_ARMv6)
+ return 0;
+
consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
if (!consistent_pte) {
pr_err("%s: no memory\n", __func__);
@@ -210,9 +207,101 @@
return ret;
}
-
core_initcall(consistent_init);
+static void *__alloc_from_contiguous(struct device *dev, size_t size,
+ pgprot_t prot, struct page **ret_page);
+
+static struct arm_vmregion_head coherent_head = {
+ .vm_lock = __SPIN_LOCK_UNLOCKED(&coherent_head.vm_lock),
+ .vm_list = LIST_HEAD_INIT(coherent_head.vm_list),
+};
+
+size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8;
+
+static int __init early_coherent_pool(char *p)
+{
+ coherent_pool_size = memparse(p, &p);
+ return 0;
+}
+early_param("coherent_pool", early_coherent_pool);
+
+/*
+ * Initialise the coherent pool for atomic allocations.
+ */
+static int __init coherent_init(void)
+{
+ pgprot_t prot = pgprot_dmacoherent(pgprot_kernel);
+ size_t size = coherent_pool_size;
+ struct page *page;
+ void *ptr;
+
+ if (cpu_architecture() < CPU_ARCH_ARMv6)
+ return 0;
+
+ ptr = __alloc_from_contiguous(NULL, size, prot, &page);
+ if (ptr) {
+ coherent_head.vm_start = (unsigned long) ptr;
+ coherent_head.vm_end = (unsigned long) ptr + size;
+ printk(KERN_INFO "DMA: preallocated %u KiB pool for atomic coherent allocations\n",
+ (unsigned)size / 1024);
+ return 0;
+ }
+ printk(KERN_ERR "DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
+ (unsigned)size / 1024);
+ return -ENOMEM;
+}
+/*
+ * CMA is activated by core_initcall, so we must be called after it.
+ */
+postcore_initcall(coherent_init);
+
+struct dma_contig_early_reserve {
+ phys_addr_t base;
+ unsigned long size;
+};
+
+static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata;
+
+static int dma_mmu_remap_num __initdata;
+
+void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
+{
+ dma_mmu_remap[dma_mmu_remap_num].base = base;
+ dma_mmu_remap[dma_mmu_remap_num].size = size;
+ dma_mmu_remap_num++;
+}
+
+void __init dma_contiguous_remap(void)
+{
+ int i;
+ for (i = 0; i < dma_mmu_remap_num; i++) {
+ phys_addr_t start = dma_mmu_remap[i].base;
+ phys_addr_t end = start + dma_mmu_remap[i].size;
+ struct map_desc map;
+ unsigned long addr;
+
+ if (end > arm_lowmem_limit)
+ end = arm_lowmem_limit;
+ if (start >= end)
+ return;
+
+ map.pfn = __phys_to_pfn(start);
+ map.virtual = __phys_to_virt(start);
+ map.length = end - start;
+ map.type = MT_MEMORY_DMA_READY;
+
+ /*
+ * Clear previous low-memory mapping
+ */
+ for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
+ addr += PGDIR_SIZE)
+ pmd_clear(pmd_off_k(addr));
+
+ iotable_init(&map, 1);
+ }
+}
+
static void *
__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
const void *caller)
@@ -319,20 +408,173 @@
arm_vmregion_free(&consistent_head, c);
}
+static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
+ void *data)
+{
+ struct page *page = virt_to_page(addr);
+ pgprot_t prot = *(pgprot_t *)data;
+
+ set_pte_ext(pte, mk_pte(page, prot), 0);
+ return 0;
+}
+
+static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
+{
+ unsigned long start = (unsigned long) page_address(page);
+ unsigned end = start + size;
+
+ apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
+ dsb();
+ flush_tlb_kernel_range(start, end);
+}
+
+static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
+ pgprot_t prot, struct page **ret_page,
+ const void *caller)
+{
+ struct page *page;
+ void *ptr;
+ page = __dma_alloc_buffer(dev, size, gfp);
+ if (!page)
+ return NULL;
+
+ ptr = __dma_alloc_remap(page, size, gfp, prot, caller);
+ if (!ptr) {
+ __dma_free_buffer(page, size);
+ return NULL;
+ }
+
+ *ret_page = page;
+ return ptr;
+}
+
+static void *__alloc_from_pool(struct device *dev, size_t size,
+ struct page **ret_page, const void *caller)
+{
+ struct arm_vmregion *c;
+ size_t align;
+
+ if (!coherent_head.vm_start) {
+ printk(KERN_ERR "%s: coherent pool not initialised!\n",
+ __func__);
+ dump_stack();
+ return NULL;
+ }
+
+ /*
+ * Align the region allocation - allocations from pool are rather
+ * small, so align them to their order in pages, minimum is a page
+ * size. This helps reduce fragmentation of the DMA space.
+ */
+ align = PAGE_SIZE << get_order(size);
+ c = arm_vmregion_alloc(&coherent_head, align, size, 0, caller);
+ if (c) {
+ void *ptr = (void *)c->vm_start;
+ struct page *page = virt_to_page(ptr);
+ *ret_page = page;
+ return ptr;
+ }
+ return NULL;
+}
+
+static int __free_from_pool(void *cpu_addr, size_t size)
+{
+ unsigned long start = (unsigned long)cpu_addr;
+ unsigned long end = start + size;
+ struct arm_vmregion *c;
+
+ if (start < coherent_head.vm_start || end > coherent_head.vm_end)
+ return 0;
+
+ c = arm_vmregion_find_remove(&coherent_head, (unsigned long)start);
+
+ if ((c->vm_end - c->vm_start) != size) {
+ printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
+ __func__, c->vm_end - c->vm_start, size);
+ dump_stack();
+ size = c->vm_end - c->vm_start;
+ }
+
+ arm_vmregion_free(&coherent_head, c);
+ return 1;
+}
+
+static void *__alloc_from_contiguous(struct device *dev, size_t size,
+ pgprot_t prot, struct page **ret_page)
+{
+ unsigned long order = get_order(size);
+ size_t count = size >> PAGE_SHIFT;
+ struct page *page;
+
+ page = dma_alloc_from_contiguous(dev, count, order);
+ if (!page)
+ return NULL;
+
+ __dma_clear_buffer(page, size);
+ __dma_remap(page, size, prot);
+
+ *ret_page = page;
+ return page_address(page);
+}
+
+static void __free_from_contiguous(struct device *dev, struct page *page,
+ size_t size)
+{
+ __dma_remap(page, size, pgprot_kernel);
+ dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
+}
+
+#define nommu() 0
+
#else /* !CONFIG_MMU */
-#define __dma_alloc_remap(page, size, gfp, prot, c) page_address(page)
-#define __dma_free_remap(addr, size) do { } while (0)
+#define nommu() 1
+
+#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL
+#define __alloc_from_pool(dev, size, ret_page, c) NULL
+#define __alloc_from_contiguous(dev, size, prot, ret) NULL
+#define __free_from_pool(cpu_addr, size) 0
+#define __free_from_contiguous(dev, page, size) do { } while (0)
+#define __dma_free_remap(cpu_addr, size) do { } while (0)
#endif /* CONFIG_MMU */
-static void *
-__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
- pgprot_t prot, const void *caller)
+static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
+ struct page **ret_page)
{
struct page *page;
+ page = __dma_alloc_buffer(dev, size, gfp);
+ if (!page)
+ return NULL;
+
+ *ret_page = page;
+ return page_address(page);
+}
+
+
+
+static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ gfp_t gfp, pgprot_t prot, const void *caller)
+{
+ u64 mask = get_coherent_dma_mask(dev);
+ struct page *page;
void *addr;
+#ifdef CONFIG_DMA_API_DEBUG
+ u64 limit = (mask + 1) & ~mask;
+ if (limit && size >= limit) {
+ dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
+ size, mask);
+ return NULL;
+ }
+#endif
+
+ if (!mask)
+ return NULL;
+
+ if (mask < 0xffffffffULL)
+ gfp |= GFP_DMA;
+
/*
* Following is a work-around (a.k.a. hack) to prevent pages
* with __GFP_COMP being passed to split_page() which cannot
@@ -345,19 +587,17 @@
*handle = ~0;
size = PAGE_ALIGN(size);
- page = __dma_alloc_buffer(dev, size, gfp);
- if (!page)
- return NULL;
-
- if (!arch_is_coherent())
- addr = __dma_alloc_remap(page, size, gfp, prot, caller);
+ if (arch_is_coherent() || nommu())
+ addr = __alloc_simple_buffer(dev, size, gfp, &page);
+ else if (cpu_architecture() < CPU_ARCH_ARMv6)
+ addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
+ else if (gfp & GFP_ATOMIC)
+ addr = __alloc_from_pool(dev, size, &page, caller);
else
- addr = page_address(page);
+ addr = __alloc_from_contiguous(dev, size, prot, &page);
if (addr)
*handle = pfn_to_dma(dev, page_to_pfn(page));
- else
- __dma_free_buffer(page, size);
return addr;
}
@@ -366,8 +606,8 @@
* Allocate DMA-coherent memory space and return both the kernel remapped
* virtual and bus address for that space.
*/
-void *
-dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
+void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle,
+ gfp_t gfp)
{
void *memory;
@@ -398,25 +638,11 @@
{
int ret = -ENXIO;
#ifdef CONFIG_MMU
- unsigned long user_size, kern_size;
- struct arm_vmregion *c;
-
- user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
-
- c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
- if (c) {
- unsigned long off = vma->vm_pgoff;
-
- kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT;
-
- if (off < kern_size &&
- user_size <= (kern_size - off)) {
- ret = remap_pfn_range(vma, vma->vm_start,
- page_to_pfn(c->vm_pages) + off,
- user_size << PAGE_SHIFT,
- vma->vm_page_prot);
- }
- }
+ unsigned long pfn = dma_to_pfn(dev, dma_addr);
+ ret = remap_pfn_range(vma, vma->vm_start,
+ pfn + vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
#endif /* CONFIG_MMU */
return ret;
@@ -438,23 +664,33 @@
}
EXPORT_SYMBOL(dma_mmap_writecombine);
+
/*
- * free a page as defined by the above mapping.
- * Must not be called with IRQs disabled.
+ * Free a buffer as defined by the above mapping.
*/
void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
{
- WARN_ON(irqs_disabled());
+ struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
return;
size = PAGE_ALIGN(size);
- if (!arch_is_coherent())
+ if (arch_is_coherent() || nommu()) {
+ __dma_free_buffer(page, size);
+ } else if (cpu_architecture() < CPU_ARCH_ARMv6) {
__dma_free_remap(cpu_addr, size);
-
- __dma_free_buffer(pfn_to_page(dma_to_pfn(dev, handle)), size);
+ __dma_free_buffer(page, size);
+ } else {
+ if (__free_from_pool(cpu_addr, size))
+ return;
+ /*
+ * Non-atomic allocations cannot be freed with IRQs disabled
+ */
+ WARN_ON(irqs_disabled());
+ __free_from_contiguous(dev, page, size);
+ }
}
EXPORT_SYMBOL(dma_free_coherent);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 59e252b..32f61f5 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -22,6 +22,7 @@
#include <linux/gfp.h>
#include <linux/memblock.h>
#include <linux/sort.h>
+#include <linux/dma-contiguous.h>
#include <asm/mach-types.h>
#include <asm/memblock.h>
@@ -235,6 +236,17 @@
}
#endif
+void __init setup_dma_zone(struct machine_desc *mdesc)
+{
+#ifdef CONFIG_ZONE_DMA
+ if (mdesc->dma_zone_size) {
+ arm_dma_zone_size = mdesc->dma_zone_size;
+ arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
+ } else
+ arm_dma_limit = 0xffffffff;
+#endif
+}
+
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
static void __init arm_bootmem_free_apnm(unsigned long max_low,
unsigned long max_high)
@@ -305,12 +317,9 @@
* Adjust the sizes according to any special requirements for
* this machine type.
*/
- if (arm_dma_zone_size) {
+ if (arm_dma_zone_size)
arm_adjust_dma_zone(zone_size, zhole_size,
arm_dma_zone_size >> PAGE_SHIFT);
- arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
- } else
- arm_dma_limit = 0xffffffff;
#endif
free_area_init_node(0, zone_size, min, zhole_size);
@@ -425,6 +434,12 @@
if (mdesc->reserve)
mdesc->reserve();
+ /*
+ * reserve memory for DMA contigouos allocations,
+ * must come from DMA area inside low memory
+ */
+ dma_contiguous_reserve(min(arm_dma_limit, arm_lowmem_limit));
+
arm_memblock_steal_permitted = false;
memblock_allow_resize();
memblock_dump_all();
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 411fbd9..bd41abc 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -68,6 +68,8 @@
#endif
struct map_desc;
+extern phys_addr_t arm_lowmem_limit;
void __init bootmem_init(void);
void arm_mm_memblock_reserve(void);
+void dma_contiguous_remap(void);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index e6b733b..a20b6cb 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -304,6 +304,11 @@
PMD_SECT_UNCACHED | PMD_SECT_XN,
.domain = DOMAIN_KERNEL,
},
+ [MT_MEMORY_DMA_READY] = {
+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
+ .prot_l1 = PMD_TYPE_TABLE,
+ .domain = DOMAIN_KERNEL,
+ },
};
const struct mem_type *get_mem_type(unsigned int type)
@@ -445,6 +450,7 @@
if (arch_is_coherent() && cpu_is_xsc3()) {
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
+ mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
}
@@ -478,6 +484,7 @@
mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
+ mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY_R].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
@@ -533,6 +540,7 @@
mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
+ mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
mem_types[MT_MEMORY_R].prot_sect |= ecc_mask | cp->pmd;
mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
@@ -643,7 +651,7 @@
* L1 entries, whereas PGDs refer to a group of L1 entries making
* up one logical pointer to an L2 table.
*/
- if (((addr | end | phys) & ~SECTION_MASK) == 0 && !force_pages) {
+ if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0 && !force_pages) {
pmd_t *p = pmd;
#ifndef CONFIG_ARM_LPAE
@@ -862,7 +870,7 @@
}
early_param("vmalloc", early_vmalloc);
-static phys_addr_t lowmem_limit __initdata = 0;
+phys_addr_t arm_lowmem_limit __initdata = 0;
void __init sanity_check_meminfo(void)
{
@@ -953,8 +961,8 @@
bank->size = newsize;
}
#endif
- if (!bank->highmem && bank->start + bank->size > lowmem_limit)
- lowmem_limit = bank->start + bank->size;
+ if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit)
+ arm_lowmem_limit = bank->start + bank->size;
j++;
}
@@ -979,8 +987,8 @@
}
#endif
meminfo.nr_banks = j;
- high_memory = __va(lowmem_limit - 1) + 1;
- memblock_set_current_limit(lowmem_limit);
+ high_memory = __va(arm_lowmem_limit - 1) + 1;
+ memblock_set_current_limit(arm_lowmem_limit);
}
static inline void prepare_page_table(void)
@@ -1005,8 +1013,8 @@
* Find the end of the first block of lowmem.
*/
end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
- if (end >= lowmem_limit)
- end = lowmem_limit;
+ if (end >= arm_lowmem_limit)
+ end = arm_lowmem_limit;
/*
* Clear out all the kernel space mappings, except for the first
@@ -1251,8 +1259,8 @@
start = reg->base;
end = start + reg->size;
- if (end > lowmem_limit)
- end = lowmem_limit;
+ if (end > arm_lowmem_limit)
+ end = arm_lowmem_limit;
if (start >= end)
break;
@@ -1323,11 +1331,12 @@
{
void *zero_page;
- memblock_set_current_limit(lowmem_limit);
+ memblock_set_current_limit(arm_lowmem_limit);
build_mem_type_table();
prepare_page_table();
map_lowmem();
+ dma_contiguous_remap();
devicemaps_init(mdesc);
kmap_init();
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index c9866b0..7cbdfda 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -31,6 +31,7 @@
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_WANT_FRAME_POINTERS
select HAVE_DMA_ATTRS
+ select HAVE_DMA_CONTIGUOUS if !SWIOTLB
select HAVE_KRETPROBES
select HAVE_OPTPROBES
select HAVE_FTRACE_MCOUNT_RECORD
diff --git a/arch/x86/include/asm/dma-contiguous.h b/arch/x86/include/asm/dma-contiguous.h
new file mode 100644
index 0000000..c092416
--- /dev/null
+++ b/arch/x86/include/asm/dma-contiguous.h
@@ -0,0 +1,13 @@
+#ifndef ASMX86_DMA_CONTIGUOUS_H
+#define ASMX86_DMA_CONTIGUOUS_H
+
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <asm-generic/dma-contiguous.h>
+
+static inline void
+dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { }
+
+#endif
+#endif
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 4b4331d..7b9227b 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -13,6 +13,7 @@
#include <asm/io.h>
#include <asm/swiotlb.h>
#include <asm-generic/dma-coherent.h>
+#include <linux/dma-contiguous.h>
#ifdef CONFIG_ISA
# define ISA_DMA_BIT_MASK DMA_BIT_MASK(24)
@@ -62,6 +63,10 @@
dma_addr_t *dma_addr, gfp_t flag,
struct dma_attrs *attrs);
+extern void dma_generic_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_addr,
+ struct dma_attrs *attrs);
+
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
if (!dev->dma_mask)
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 3003250..62c9457 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -100,14 +100,18 @@
struct dma_attrs *attrs)
{
unsigned long dma_mask;
- struct page *page;
+ struct page *page = NULL;
+ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
dma_addr_t addr;
dma_mask = dma_alloc_coherent_mask(dev, flag);
flag |= __GFP_ZERO;
again:
- page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
+ if (!(flag & GFP_ATOMIC))
+ page = dma_alloc_from_contiguous(dev, count, get_order(size));
+ if (!page)
+ page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
if (!page)
return NULL;
@@ -127,6 +131,16 @@
return page_address(page);
}
+void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_addr, struct dma_attrs *attrs)
+{
+ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ struct page *page = virt_to_page(vaddr);
+
+ if (!dma_release_from_contiguous(dev, page, count))
+ free_pages((unsigned long)vaddr, get_order(size));
+}
+
/*
* See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel
* parameter documentation.
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index f960506..871be4a 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -74,12 +74,6 @@
return nents;
}
-static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_addr, struct dma_attrs *attrs)
-{
- free_pages((unsigned long)vaddr, get_order(size));
-}
-
static void nommu_sync_single_for_device(struct device *dev,
dma_addr_t addr, size_t size,
enum dma_data_direction dir)
@@ -97,7 +91,7 @@
struct dma_map_ops nommu_dma_ops = {
.alloc = dma_generic_alloc_coherent,
- .free = nommu_free_coherent,
+ .free = dma_generic_free_coherent,
.map_sg = nommu_map_sg,
.map_page = nommu_map_page,
.sync_single_for_device = nommu_sync_single_for_device,
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 1a29015..d6c956e 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -50,6 +50,7 @@
#include <asm/pci-direct.h>
#include <linux/init_ohci1394_dma.h>
#include <linux/kvm_para.h>
+#include <linux/dma-contiguous.h>
#include <linux/errno.h>
#include <linux/kernel.h>
@@ -934,6 +935,7 @@
}
#endif
memblock.current_limit = get_max_mapped();
+ dma_contiguous_reserve(0);
/*
* NOTE: On x86-32, only from this point on, fixmaps are ready for use.
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 4201aba..0b92897 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -232,4 +232,94 @@
Provides a user space API to the sw sync object.
*WARNING* improper use of this can result in deadlocking kernel
drivers from userspace.
+
+config CMA
+ bool "Contiguous Memory Allocator (EXPERIMENTAL)"
+ depends on HAVE_DMA_CONTIGUOUS && HAVE_MEMBLOCK && EXPERIMENTAL
+ select MIGRATION
+ help
+ This enables the Contiguous Memory Allocator which allows drivers
+ to allocate big physically-contiguous blocks of memory for use with
+ hardware components that do not support I/O map nor scatter-gather.
+
+ For more information see <include/linux/dma-contiguous.h>.
+ If unsure, say "n".
+
+if CMA
+
+config CMA_DEBUG
+ bool "CMA debug messages (DEVELOPMENT)"
+ depends on DEBUG_KERNEL
+ help
+ Turns on debug messages in CMA. This produces KERN_DEBUG
+ messages for every CMA call as well as various messages while
+ processing calls such as dma_alloc_from_contiguous().
+ This option does not affect warning and error messages.
+
+comment "Default contiguous memory area size:"
+
+config CMA_SIZE_MBYTES
+ int "Size in Mega Bytes"
+ depends on !CMA_SIZE_SEL_PERCENTAGE
+ default 16
+ help
+ Defines the size (in MiB) of the default memory area for Contiguous
+ Memory Allocator.
+
+config CMA_SIZE_PERCENTAGE
+ int "Percentage of total memory"
+ depends on !CMA_SIZE_SEL_MBYTES
+ default 10
+ help
+ Defines the size of the default memory area for Contiguous Memory
+ Allocator as a percentage of the total memory in the system.
+
+choice
+ prompt "Selected region size"
+ default CMA_SIZE_SEL_ABSOLUTE
+
+config CMA_SIZE_SEL_MBYTES
+ bool "Use mega bytes value only"
+
+config CMA_SIZE_SEL_PERCENTAGE
+ bool "Use percentage value only"
+
+config CMA_SIZE_SEL_MIN
+ bool "Use lower value (minimum)"
+
+config CMA_SIZE_SEL_MAX
+ bool "Use higher value (maximum)"
+
+endchoice
+
+config CMA_ALIGNMENT
+ int "Maximum PAGE_SIZE order of alignment for contiguous buffers"
+ range 4 9
+ default 8
+ help
+ DMA mapping framework by default aligns all buffers to the smallest
+ PAGE_SIZE order which is greater than or equal to the requested buffer
+ size. This works well for buffers up to a few hundreds kilobytes, but
+ for larger buffers it just a memory waste. With this parameter you can
+ specify the maximum PAGE_SIZE order for contiguous buffers. Larger
+ buffers will be aligned only to this specified order. The order is
+ expressed as a power of two multiplied by the PAGE_SIZE.
+
+ For example, if your system defaults to 4KiB pages, the order value
+ of 8 means that the buffers will be aligned up to 1MiB only.
+
+ If unsure, leave the default value "8".
+
+config CMA_AREAS
+ int "Maximum count of the CMA device-private areas"
+ default 7
+ help
+ CMA allows to create CMA areas for particular devices. This parameter
+ sets the maximum number of such device private CMA areas in the
+ system.
+
+ If unsure, leave the default value "7".
+
+endif
+
endmenu
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index f81ab90..cb4528f 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -6,6 +6,7 @@
attribute_container.o transport_class.o \
topology.o sys.o
obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
+obj-$(CONFIG_CMA) += dma-contiguous.o
obj-y += power/
obj-$(CONFIG_HAS_DMA) += dma-mapping.o
obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
new file mode 100644
index 0000000..78efb03
--- /dev/null
+++ b/drivers/base/dma-contiguous.c
@@ -0,0 +1,401 @@
+/*
+ * Contiguous Memory Allocator for DMA mapping framework
+ * Copyright (c) 2010-2011 by Samsung Electronics.
+ * Written by:
+ * Marek Szyprowski <m.szyprowski@samsung.com>
+ * Michal Nazarewicz <mina86@mina86.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License or (at your optional) any later version of the license.
+ */
+
+#define pr_fmt(fmt) "cma: " fmt
+
+#ifdef CONFIG_CMA_DEBUG
+#ifndef DEBUG
+# define DEBUG
+#endif
+#endif
+
+#include <asm/page.h>
+#include <asm/dma-contiguous.h>
+
+#include <linux/memblock.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/page-isolation.h>
+#include <linux/slab.h>
+#include <linux/swap.h>
+#include <linux/mm_types.h>
+#include <linux/dma-contiguous.h>
+
+#ifndef SZ_1M
+#define SZ_1M (1 << 20)
+#endif
+
+struct cma {
+ unsigned long base_pfn;
+ unsigned long count;
+ unsigned long *bitmap;
+};
+
+struct cma *dma_contiguous_default_area;
+
+#ifdef CONFIG_CMA_SIZE_MBYTES
+#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
+#else
+#define CMA_SIZE_MBYTES 0
+#endif
+
+/*
+ * Default global CMA area size can be defined in kernel's .config.
+ * This is usefull mainly for distro maintainers to create a kernel
+ * that works correctly for most supported systems.
+ * The size can be set in bytes or as a percentage of the total memory
+ * in the system.
+ *
+ * Users, who want to set the size of global CMA area for their system
+ * should use cma= kernel parameter.
+ */
+static const unsigned long size_bytes = CMA_SIZE_MBYTES * SZ_1M;
+static long size_cmdline = -1;
+
+static int __init early_cma(char *p)
+{
+ pr_debug("%s(%s)\n", __func__, p);
+ size_cmdline = memparse(p, &p);
+ return 0;
+}
+early_param("cma", early_cma);
+
+#ifdef CONFIG_CMA_SIZE_PERCENTAGE
+
+static unsigned long __init __maybe_unused cma_early_percent_memory(void)
+{
+ struct memblock_region *reg;
+ unsigned long total_pages = 0;
+
+ /*
+ * We cannot use memblock_phys_mem_size() here, because
+ * memblock_analyze() has not been called yet.
+ */
+ for_each_memblock(memory, reg)
+ total_pages += memblock_region_memory_end_pfn(reg) -
+ memblock_region_memory_base_pfn(reg);
+
+ return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
+}
+
+#else
+
+static inline __maybe_unused unsigned long cma_early_percent_memory(void)
+{
+ return 0;
+}
+
+#endif
+
+/**
+ * dma_contiguous_reserve() - reserve area for contiguous memory handling
+ * @limit: End address of the reserved memory (optional, 0 for any).
+ *
+ * This function reserves memory from early allocator. It should be
+ * called by arch specific code once the early allocator (memblock or bootmem)
+ * has been activated and all other subsystems have already allocated/reserved
+ * memory.
+ */
+void __init dma_contiguous_reserve(phys_addr_t limit)
+{
+ unsigned long selected_size = 0;
+
+ pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
+
+ if (size_cmdline != -1) {
+ selected_size = size_cmdline;
+ } else {
+#ifdef CONFIG_CMA_SIZE_SEL_MBYTES
+ selected_size = size_bytes;
+#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
+ selected_size = cma_early_percent_memory();
+#elif defined(CONFIG_CMA_SIZE_SEL_MIN)
+ selected_size = min(size_bytes, cma_early_percent_memory());
+#elif defined(CONFIG_CMA_SIZE_SEL_MAX)
+ selected_size = max(size_bytes, cma_early_percent_memory());
+#endif
+ }
+
+ if (selected_size) {
+ pr_debug("%s: reserving %ld MiB for global area\n", __func__,
+ selected_size / SZ_1M);
+
+ dma_declare_contiguous(NULL, selected_size, 0, limit);
+ }
+};
+
+static DEFINE_MUTEX(cma_mutex);
+
+static __init int cma_activate_area(unsigned long base_pfn, unsigned long count)
+{
+ unsigned long pfn = base_pfn;
+ unsigned i = count >> pageblock_order;
+ struct zone *zone;
+
+ WARN_ON_ONCE(!pfn_valid(pfn));
+ zone = page_zone(pfn_to_page(pfn));
+
+ do {
+ unsigned j;
+ base_pfn = pfn;
+ for (j = pageblock_nr_pages; j; --j, pfn++) {
+ WARN_ON_ONCE(!pfn_valid(pfn));
+ if (page_zone(pfn_to_page(pfn)) != zone)
+ return -EINVAL;
+ }
+ init_cma_reserved_pageblock(pfn_to_page(base_pfn));
+ } while (--i);
+ return 0;
+}
+
+static __init struct cma *cma_create_area(unsigned long base_pfn,
+ unsigned long count)
+{
+ int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
+ struct cma *cma;
+ int ret = -ENOMEM;
+
+ pr_debug("%s(base %08lx, count %lx)\n", __func__, base_pfn, count);
+
+ cma = kmalloc(sizeof *cma, GFP_KERNEL);
+ if (!cma)
+ return ERR_PTR(-ENOMEM);
+
+ cma->base_pfn = base_pfn;
+ cma->count = count;
+ cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+
+ if (!cma->bitmap)
+ goto no_mem;
+
+ ret = cma_activate_area(base_pfn, count);
+ if (ret)
+ goto error;
+
+ pr_debug("%s: returned %p\n", __func__, (void *)cma);
+ return cma;
+
+error:
+ kfree(cma->bitmap);
+no_mem:
+ kfree(cma);
+ return ERR_PTR(ret);
+}
+
+static struct cma_reserved {
+ phys_addr_t start;
+ unsigned long size;
+ struct device *dev;
+} cma_reserved[MAX_CMA_AREAS] __initdata;
+static unsigned cma_reserved_count __initdata;
+
+static int __init cma_init_reserved_areas(void)
+{
+ struct cma_reserved *r = cma_reserved;
+ unsigned i = cma_reserved_count;
+
+ pr_debug("%s()\n", __func__);
+
+ for (; i; --i, ++r) {
+ struct cma *cma;
+ cma = cma_create_area(PFN_DOWN(r->start),
+ r->size >> PAGE_SHIFT);
+ if (!IS_ERR(cma))
+ dev_set_cma_area(r->dev, cma);
+ }
+ return 0;
+}
+core_initcall(cma_init_reserved_areas);
+
+/**
+ * dma_declare_contiguous() - reserve area for contiguous memory handling
+ * for particular device
+ * @dev: Pointer to device structure.
+ * @size: Size of the reserved memory.
+ * @base: Start address of the reserved memory (optional, 0 for any).
+ * @limit: End address of the reserved memory (optional, 0 for any).
+ *
+ * This function reserves memory for specified device. It should be
+ * called by board specific code when early allocator (memblock or bootmem)
+ * is still activate.
+ */
+int __init dma_declare_contiguous(struct device *dev, unsigned long size,
+ phys_addr_t base, phys_addr_t limit)
+{
+ struct cma_reserved *r = &cma_reserved[cma_reserved_count];
+ unsigned long alignment;
+
+ pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
+ (unsigned long)size, (unsigned long)base,
+ (unsigned long)limit);
+
+ /* Sanity checks */
+ if (cma_reserved_count == ARRAY_SIZE(cma_reserved)) {
+ pr_err("Not enough slots for CMA reserved regions!\n");
+ return -ENOSPC;
+ }
+
+ if (!size)
+ return -EINVAL;
+
+ /* Sanitise input arguments */
+ alignment = PAGE_SIZE << max(MAX_ORDER, pageblock_order);
+ base = ALIGN(base, alignment);
+ size = ALIGN(size, alignment);
+ limit &= ~(alignment - 1);
+
+ /* Reserve memory */
+ if (base) {
+ if (memblock_is_region_reserved(base, size) ||
+ memblock_reserve(base, size) < 0) {
+ base = -EBUSY;
+ goto err;
+ }
+ } else {
+ /*
+ * Use __memblock_alloc_base() since
+ * memblock_alloc_base() panic()s.
+ */
+ phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
+ if (!addr) {
+ base = -ENOMEM;
+ goto err;
+ } else if (addr + size > ~(unsigned long)0) {
+ memblock_free(addr, size);
+ base = -EINVAL;
+ goto err;
+ } else {
+ base = addr;
+ }
+ }
+
+ /*
+ * Each reserved area must be initialised later, when more kernel
+ * subsystems (like slab allocator) are available.
+ */
+ r->start = base;
+ r->size = size;
+ r->dev = dev;
+ cma_reserved_count++;
+ pr_info("CMA: reserved %ld MiB at %08lx\n", size / SZ_1M,
+ (unsigned long)base);
+
+ /* Architecture specific contiguous memory fixup. */
+ dma_contiguous_early_fixup(base, size);
+ return 0;
+err:
+ pr_err("CMA: failed to reserve %ld MiB\n", size / SZ_1M);
+ return base;
+}
+
+/**
+ * dma_alloc_from_contiguous() - allocate pages from contiguous area
+ * @dev: Pointer to device for which the allocation is performed.
+ * @count: Requested number of pages.
+ * @align: Requested alignment of pages (in PAGE_SIZE order).
+ *
+ * This function allocates memory buffer for specified device. It uses
+ * device specific contiguous memory area if available or the default
+ * global one. Requires architecture specific get_dev_cma_area() helper
+ * function.
+ */
+struct page *dma_alloc_from_contiguous(struct device *dev, int count,
+ unsigned int align)
+{
+ unsigned long mask, pfn, pageno, start = 0;
+ struct cma *cma = dev_get_cma_area(dev);
+ int ret;
+
+ if (!cma || !cma->count)
+ return NULL;
+
+ if (align > CONFIG_CMA_ALIGNMENT)
+ align = CONFIG_CMA_ALIGNMENT;
+
+ pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
+ count, align);
+
+ if (!count)
+ return NULL;
+
+ mask = (1 << align) - 1;
+
+ mutex_lock(&cma_mutex);
+
+ for (;;) {
+ pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
+ start, count, mask);
+ if (pageno >= cma->count) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ pfn = cma->base_pfn + pageno;
+ ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
+ if (ret == 0) {
+ bitmap_set(cma->bitmap, pageno, count);
+ break;
+ } else if (ret != -EBUSY) {
+ goto error;
+ }
+ pr_debug("%s(): memory range at %p is busy, retrying\n",
+ __func__, pfn_to_page(pfn));
+ /* try again with a bit different memory target */
+ start = pageno + mask + 1;
+ }
+
+ mutex_unlock(&cma_mutex);
+
+ pr_debug("%s(): returned %p\n", __func__, pfn_to_page(pfn));
+ return pfn_to_page(pfn);
+error:
+ mutex_unlock(&cma_mutex);
+ return NULL;
+}
+
+/**
+ * dma_release_from_contiguous() - release allocated pages
+ * @dev: Pointer to device for which the pages were allocated.
+ * @pages: Allocated pages.
+ * @count: Number of allocated pages.
+ *
+ * This function releases memory allocated by dma_alloc_from_contiguous().
+ * It returns false when provided pages do not belong to contiguous area and
+ * true otherwise.
+ */
+bool dma_release_from_contiguous(struct device *dev, struct page *pages,
+ int count)
+{
+ struct cma *cma = dev_get_cma_area(dev);
+ unsigned long pfn;
+
+ if (!cma || !pages)
+ return false;
+
+ pr_debug("%s(page %p)\n", __func__, (void *)pages);
+
+ pfn = page_to_pfn(pages);
+
+ if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
+ return false;
+
+ VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
+
+ mutex_lock(&cma_mutex);
+ bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
+ free_contig_range(pfn, count);
+ mutex_unlock(&cma_mutex);
+
+ return true;
+}
diff --git a/drivers/char/msm_rotator.c b/drivers/char/msm_rotator.c
index 6cd1806..8650e83 100644
--- a/drivers/char/msm_rotator.c
+++ b/drivers/char/msm_rotator.c
@@ -177,13 +177,13 @@
if (!msm_rotator_dev->client)
return -EINVAL;
- *pihdl = ion_import_fd(msm_rotator_dev->client, mem_id);
+ *pihdl = ion_import_dma_buf(msm_rotator_dev->client, mem_id);
if (IS_ERR_OR_NULL(*pihdl)) {
- pr_err("ion_import_fd() failed\n");
+ pr_err("ion_import_dma_buf() failed\n");
return PTR_ERR(*pihdl);
}
- pr_debug("%s(): ion_hdl %p, ion_buf %p\n", __func__, *pihdl,
- ion_share(msm_rotator_dev->client, *pihdl));
+ pr_debug("%s(): ion_hdl %p, ion_fd %d\n", __func__, *pihdl,
+ ion_share_dma_buf(msm_rotator_dev->client, *pihdl));
if (ion_map_iommu(msm_rotator_dev->client,
*pihdl, ROTATOR_DOMAIN, GEN_POOL,
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c
index 5c7ab3a..4e34e89 100644
--- a/drivers/gpu/ion/ion.c
+++ b/drivers/gpu/ion/ion.c
@@ -22,6 +22,7 @@
#include <linux/anon_inodes.h>
#include <linux/ion.h>
#include <linux/list.h>
+#include <linux/memblock.h>
#include <linux/miscdevice.h>
#include <linux/mm.h>
#include <linux/mm_types.h>
@@ -31,6 +32,7 @@
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
+#include <linux/dma-buf.h>
#include <mach/iommu_domains.h>
#include "ion_priv.h"
@@ -51,14 +53,12 @@
struct rb_root heaps;
long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
unsigned long arg);
- struct rb_root user_clients;
- struct rb_root kernel_clients;
+ struct rb_root clients;
struct dentry *debug_root;
};
/**
* struct ion_client - a process/hw block local address space
- * @ref: for reference counting the client
* @node: node in the tree of all clients
* @dev: backpointer to ion device
* @handles: an rb tree of all the handles in this client
@@ -72,7 +72,6 @@
* as well as the handles themselves, and should be held while modifying either.
*/
struct ion_client {
- struct kref ref;
struct rb_node node;
struct ion_device *dev;
struct rb_root handles;
@@ -92,7 +91,6 @@
* @node: node in the client's handle rbtree
* @kmap_cnt: count of times this client has mapped to kernel
* @dmap_cnt: count of times this client has mapped for dma
- * @usermap_cnt: count of times this client has mapped for userspace
*
* Modifications to node, map_cnt or mapping should be protected by the
* lock in the client. Other fields are never changed after initialization.
@@ -103,8 +101,6 @@
struct ion_buffer *buffer;
struct rb_node node;
unsigned int kmap_cnt;
- unsigned int dmap_cnt;
- unsigned int usermap_cnt;
unsigned int iommu_map_cnt;
};
@@ -217,6 +213,7 @@
unsigned long flags)
{
struct ion_buffer *buffer;
+ struct sg_table *table;
int ret;
buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
@@ -231,8 +228,18 @@
kfree(buffer);
return ERR_PTR(ret);
}
+
buffer->dev = dev;
buffer->size = len;
+
+ table = buffer->heap->ops->map_dma(buffer->heap, buffer);
+ if (IS_ERR_OR_NULL(table)) {
+ heap->ops->free(buffer);
+ kfree(buffer);
+ return ERR_PTR(PTR_ERR(table));
+ }
+ buffer->sg_table = table;
+
mutex_init(&buffer->lock);
ion_buffer_add(dev, buffer);
return buffer;
@@ -275,6 +282,11 @@
struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
struct ion_device *dev = buffer->dev;
+ if (WARN_ON(buffer->kmap_cnt > 0))
+ buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
+
+ buffer->heap->ops->unmap_dma(buffer->heap, buffer);
+
ion_iommu_delayed_unmap(buffer);
buffer->heap->ops->free(buffer);
mutex_lock(&dev->lock);
@@ -310,17 +322,26 @@
return handle;
}
-/* Client lock must be locked when calling */
+static void ion_handle_kmap_put(struct ion_handle *);
+
static void ion_handle_destroy(struct kref *kref)
{
struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
- /* XXX Can a handle be destroyed while it's map count is non-zero?:
- if (handle->map_cnt) unmap
- */
- WARN_ON(handle->kmap_cnt || handle->dmap_cnt || handle->usermap_cnt);
- ion_buffer_put(handle->buffer);
+ struct ion_client *client = handle->client;
+ struct ion_buffer *buffer = handle->buffer;
+
+ mutex_lock(&client->lock);
+
+ mutex_lock(&buffer->lock);
+ while (handle->kmap_cnt)
+ ion_handle_kmap_put(handle);
+ mutex_unlock(&buffer->lock);
+
if (!RB_EMPTY_NODE(&handle->node))
- rb_erase(&handle->node, &handle->client->handles);
+ rb_erase(&handle->node, &client->handles);
+ mutex_unlock(&client->lock);
+
+ ion_buffer_put(buffer);
kfree(handle);
}
@@ -412,6 +433,11 @@
* request of the caller allocate from it. Repeat until allocate has
* succeeded or all heaps have been tried
*/
+ if (WARN_ON(!len))
+ return ERR_PTR(-EINVAL);
+
+ len = PAGE_ALIGN(len);
+
mutex_lock(&dev->lock);
for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
@@ -445,7 +471,10 @@
}
mutex_unlock(&dev->lock);
- if (IS_ERR_OR_NULL(buffer)) {
+ if (buffer == NULL)
+ return ERR_PTR(-ENODEV);
+
+ if (IS_ERR(buffer)) {
pr_debug("ION is unable to allocate 0x%x bytes (alignment: "
"0x%x) from heap(s) %sfor client %s with heap "
"mask 0x%x\n",
@@ -455,22 +484,19 @@
handle = ion_handle_create(client, buffer);
- if (IS_ERR_OR_NULL(handle))
- goto end;
-
/*
* ion_buffer_create will create a buffer with a ref_cnt of 1,
* and ion_handle_create will take a second reference, drop one here
*/
ion_buffer_put(buffer);
- mutex_lock(&client->lock);
- ion_handle_add(client, handle);
- mutex_unlock(&client->lock);
- return handle;
+ if (!IS_ERR(handle)) {
+ mutex_lock(&client->lock);
+ ion_handle_add(client, handle);
+ mutex_unlock(&client->lock);
+ }
-end:
- ion_buffer_put(buffer);
+
return handle;
}
EXPORT_SYMBOL(ion_alloc);
@@ -488,43 +514,11 @@
WARN(1, "%s: invalid handle passed to free.\n", __func__);
return;
}
- ion_handle_put(handle);
mutex_unlock(&client->lock);
+ ion_handle_put(handle);
}
EXPORT_SYMBOL(ion_free);
-static void ion_client_get(struct ion_client *client);
-static int ion_client_put(struct ion_client *client);
-
-static bool _ion_map(int *buffer_cnt, int *handle_cnt)
-{
- bool map;
-
- BUG_ON(*handle_cnt != 0 && *buffer_cnt == 0);
-
- if (*buffer_cnt)
- map = false;
- else
- map = true;
- if (*handle_cnt == 0)
- (*buffer_cnt)++;
- (*handle_cnt)++;
- return map;
-}
-
-static bool _ion_unmap(int *buffer_cnt, int *handle_cnt)
-{
- BUG_ON(*handle_cnt == 0);
- (*handle_cnt)--;
- if (*handle_cnt != 0)
- return false;
- BUG_ON(*buffer_cnt == 0);
- (*buffer_cnt)--;
- if (*buffer_cnt == 0)
- return true;
- return false;
-}
-
int ion_phys(struct ion_client *client, struct ion_handle *handle,
ion_phys_addr_t *addr, size_t *len)
{
@@ -551,52 +545,55 @@
}
EXPORT_SYMBOL(ion_phys);
-void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle,
- unsigned long flags)
+static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
{
- struct ion_buffer *buffer;
void *vaddr;
- mutex_lock(&client->lock);
- if (!ion_handle_validate(client, handle)) {
- pr_err("%s: invalid handle passed to map_kernel.\n",
- __func__);
- mutex_unlock(&client->lock);
- return ERR_PTR(-EINVAL);
+ if (buffer->kmap_cnt) {
+ buffer->kmap_cnt++;
+ return buffer->vaddr;
}
-
- buffer = handle->buffer;
- mutex_lock(&buffer->lock);
-
- if (!handle->buffer->heap->ops->map_kernel) {
- pr_err("%s: map_kernel is not implemented by this heap.\n",
- __func__);
- mutex_unlock(&buffer->lock);
- mutex_unlock(&client->lock);
- return ERR_PTR(-ENODEV);
- }
-
- if (ion_validate_buffer_flags(buffer, flags)) {
- vaddr = ERR_PTR(-EEXIST);
- goto out;
- }
-
- if (_ion_map(&buffer->kmap_cnt, &handle->kmap_cnt)) {
- vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer,
- flags);
- if (IS_ERR_OR_NULL(vaddr))
- _ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt);
- buffer->vaddr = vaddr;
- } else {
- vaddr = buffer->vaddr;
- }
-
-out:
- mutex_unlock(&buffer->lock);
- mutex_unlock(&client->lock);
+ vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
+ if (IS_ERR_OR_NULL(vaddr))
+ return vaddr;
+ buffer->vaddr = vaddr;
+ buffer->kmap_cnt++;
return vaddr;
}
-EXPORT_SYMBOL(ion_map_kernel);
+
+static void *ion_handle_kmap_get(struct ion_handle *handle)
+{
+ struct ion_buffer *buffer = handle->buffer;
+ void *vaddr;
+
+ if (handle->kmap_cnt) {
+ handle->kmap_cnt++;
+ return buffer->vaddr;
+ }
+ vaddr = ion_buffer_kmap_get(buffer);
+ if (IS_ERR_OR_NULL(vaddr))
+ return vaddr;
+ handle->kmap_cnt++;
+ return vaddr;
+}
+
+static void ion_buffer_kmap_put(struct ion_buffer *buffer)
+{
+ buffer->kmap_cnt--;
+ if (!buffer->kmap_cnt) {
+ buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
+ buffer->vaddr = NULL;
+ }
+}
+
+static void ion_handle_kmap_put(struct ion_handle *handle)
+{
+ struct ion_buffer *buffer = handle->buffer;
+
+ handle->kmap_cnt--;
+ if (!handle->kmap_cnt)
+ ion_buffer_kmap_put(buffer);
+}
static struct ion_iommu_map *__ion_iommu_map(struct ion_buffer *buffer,
int domain_num, int partition_num, unsigned long align,
@@ -699,14 +696,10 @@
}
iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
- _ion_map(&buffer->iommu_map_cnt, &handle->iommu_map_cnt);
if (!iommu_map) {
iommu_map = __ion_iommu_map(buffer, domain_num, partition_num,
align, iova_length, flags, iova);
- if (IS_ERR_OR_NULL(iommu_map)) {
- _ion_unmap(&buffer->iommu_map_cnt,
- &handle->iommu_map_cnt);
- } else {
+ if (!IS_ERR_OR_NULL(iommu_map)) {
iommu_map->flags = iommu_flags;
if (iommu_map->flags & ION_IOMMU_UNMAP_DELAYED)
@@ -717,22 +710,20 @@
pr_err("%s: handle %p is already mapped with iommu flags %lx, trying to map with flags %lx\n",
__func__, handle,
iommu_map->flags, iommu_flags);
- _ion_unmap(&buffer->iommu_map_cnt,
- &handle->iommu_map_cnt);
ret = -EINVAL;
} else if (iommu_map->mapped_size != iova_length) {
pr_err("%s: handle %p is already mapped with length"
" %x, trying to map with length %lx\n",
__func__, handle, iommu_map->mapped_size,
iova_length);
- _ion_unmap(&buffer->iommu_map_cnt,
- &handle->iommu_map_cnt);
ret = -EINVAL;
} else {
kref_get(&iommu_map->ref);
*iova = iommu_map->iova_addr;
}
}
+ if (!ret)
+ buffer->iommu_map_cnt++;
*buffer_size = buffer->size;
out:
mutex_unlock(&buffer->lock);
@@ -771,9 +762,9 @@
goto out;
}
- _ion_unmap(&buffer->iommu_map_cnt, &handle->iommu_map_cnt);
kref_put(&iommu_map->ref, ion_iommu_release);
+ buffer->iommu_map_cnt--;
out:
mutex_unlock(&buffer->lock);
@@ -782,51 +773,40 @@
}
EXPORT_SYMBOL(ion_unmap_iommu);
-struct scatterlist *ion_map_dma(struct ion_client *client,
- struct ion_handle *handle,
- unsigned long flags)
+void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle,
+ unsigned long flags)
{
struct ion_buffer *buffer;
- struct scatterlist *sglist;
+ void *vaddr;
mutex_lock(&client->lock);
if (!ion_handle_validate(client, handle)) {
- pr_err("%s: invalid handle passed to map_dma.\n",
+ pr_err("%s: invalid handle passed to map_kernel.\n",
__func__);
mutex_unlock(&client->lock);
return ERR_PTR(-EINVAL);
}
- buffer = handle->buffer;
- mutex_lock(&buffer->lock);
- if (!handle->buffer->heap->ops->map_dma) {
+ buffer = handle->buffer;
+
+ if (!handle->buffer->heap->ops->map_kernel) {
pr_err("%s: map_kernel is not implemented by this heap.\n",
__func__);
- mutex_unlock(&buffer->lock);
mutex_unlock(&client->lock);
return ERR_PTR(-ENODEV);
}
if (ion_validate_buffer_flags(buffer, flags)) {
- sglist = ERR_PTR(-EEXIST);
- goto out;
+ mutex_unlock(&client->lock);
+ return ERR_PTR(-EEXIST);
}
- if (_ion_map(&buffer->dmap_cnt, &handle->dmap_cnt)) {
- sglist = buffer->heap->ops->map_dma(buffer->heap, buffer);
- if (IS_ERR_OR_NULL(sglist))
- _ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt);
- buffer->sglist = sglist;
- } else {
- sglist = buffer->sglist;
- }
-
-out:
+ mutex_lock(&buffer->lock);
+ vaddr = ion_handle_kmap_get(handle);
mutex_unlock(&buffer->lock);
mutex_unlock(&client->lock);
- return sglist;
+ return vaddr;
}
-EXPORT_SYMBOL(ion_map_dma);
void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
{
@@ -835,74 +815,10 @@
mutex_lock(&client->lock);
buffer = handle->buffer;
mutex_lock(&buffer->lock);
- if (_ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt)) {
- buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
- buffer->vaddr = NULL;
- }
+ ion_handle_kmap_put(handle);
mutex_unlock(&buffer->lock);
mutex_unlock(&client->lock);
}
-EXPORT_SYMBOL(ion_unmap_kernel);
-
-void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle)
-{
- struct ion_buffer *buffer;
-
- mutex_lock(&client->lock);
- buffer = handle->buffer;
- mutex_lock(&buffer->lock);
- if (_ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt)) {
- buffer->heap->ops->unmap_dma(buffer->heap, buffer);
- buffer->sglist = NULL;
- }
- mutex_unlock(&buffer->lock);
- mutex_unlock(&client->lock);
-}
-EXPORT_SYMBOL(ion_unmap_dma);
-
-struct ion_buffer *ion_share(struct ion_client *client,
- struct ion_handle *handle)
-{
- bool valid_handle;
-
- mutex_lock(&client->lock);
- valid_handle = ion_handle_validate(client, handle);
- mutex_unlock(&client->lock);
- if (!valid_handle) {
- WARN("%s: invalid handle passed to share.\n", __func__);
- return ERR_PTR(-EINVAL);
- }
-
- /* do not take an extra reference here, the burden is on the caller
- * to make sure the buffer doesn't go away while it's passing it
- * to another client -- ion_free should not be called on this handle
- * until the buffer has been imported into the other client
- */
- return handle->buffer;
-}
-EXPORT_SYMBOL(ion_share);
-
-struct ion_handle *ion_import(struct ion_client *client,
- struct ion_buffer *buffer)
-{
- struct ion_handle *handle = NULL;
-
- mutex_lock(&client->lock);
- /* if a handle exists for this buffer just take a reference to it */
- handle = ion_handle_lookup(client, buffer);
- if (!IS_ERR_OR_NULL(handle)) {
- ion_handle_get(handle);
- goto end;
- }
- handle = ion_handle_create(client, buffer);
- if (IS_ERR_OR_NULL(handle))
- goto end;
- ion_handle_add(client, handle);
-end:
- mutex_unlock(&client->lock);
- return handle;
-}
-EXPORT_SYMBOL(ion_import);
static int check_vaddr_bounds(unsigned long start, unsigned long end)
{
@@ -969,30 +885,6 @@
}
-static const struct file_operations ion_share_fops;
-
-struct ion_handle *ion_import_fd(struct ion_client *client, int fd)
-{
- struct file *file = fget(fd);
- struct ion_handle *handle;
-
- if (!file) {
- pr_err("%s: imported fd not found in file table.\n", __func__);
- return ERR_PTR(-EINVAL);
- }
- if (file->f_op != &ion_share_fops) {
- pr_err("%s: imported file %s is not a shared ion"
- " file.", __func__, file->f_dentry->d_name.name);
- handle = ERR_PTR(-EINVAL);
- goto end;
- }
- handle = ion_import(client, file->private_data);
-end:
- fput(file);
- return handle;
-}
-EXPORT_SYMBOL(ion_import_fd);
-
static int ion_debug_client_show(struct seq_file *s, void *unused)
{
struct ion_client *client = s->private;
@@ -1033,9 +925,6 @@
}
seq_printf(s, "\n");
}
-
- seq_printf(s, "%16.16s %d\n", "client refcount:",
- atomic_read(&client->ref.refcount));
mutex_unlock(&client->lock);
return 0;
@@ -1053,29 +942,6 @@
.release = single_release,
};
-static struct ion_client *ion_client_lookup(struct ion_device *dev,
- struct task_struct *task)
-{
- struct rb_node *n = dev->user_clients.rb_node;
- struct ion_client *client;
-
- mutex_lock(&dev->lock);
- while (n) {
- client = rb_entry(n, struct ion_client, node);
- if (task == client->task) {
- ion_client_get(client);
- mutex_unlock(&dev->lock);
- return client;
- } else if (task < client->task) {
- n = n->rb_left;
- } else if (task > client->task) {
- n = n->rb_right;
- }
- }
- mutex_unlock(&dev->lock);
- return NULL;
-}
-
struct ion_client *ion_client_create(struct ion_device *dev,
unsigned int heap_mask,
const char *name)
@@ -1107,19 +973,10 @@
}
task_unlock(current->group_leader);
- /* if this isn't a kernel thread, see if a client already
- exists */
- if (task) {
- client = ion_client_lookup(dev, task);
- if (!IS_ERR_OR_NULL(client)) {
- put_task_struct(current->group_leader);
- return client;
- }
- }
-
client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
if (!client) {
- put_task_struct(current->group_leader);
+ if (task)
+ put_task_struct(current->group_leader);
return ERR_PTR(-ENOMEM);
}
@@ -1139,36 +996,20 @@
client->heap_mask = heap_mask;
client->task = task;
client->pid = pid;
- kref_init(&client->ref);
mutex_lock(&dev->lock);
- if (task) {
- p = &dev->user_clients.rb_node;
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct ion_client, node);
+ p = &dev->clients.rb_node;
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_client, node);
- if (task < entry->task)
- p = &(*p)->rb_left;
- else if (task > entry->task)
- p = &(*p)->rb_right;
- }
- rb_link_node(&client->node, parent, p);
- rb_insert_color(&client->node, &dev->user_clients);
- } else {
- p = &dev->kernel_clients.rb_node;
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct ion_client, node);
-
- if (client < entry)
- p = &(*p)->rb_left;
- else if (client > entry)
- p = &(*p)->rb_right;
- }
- rb_link_node(&client->node, parent, p);
- rb_insert_color(&client->node, &dev->kernel_clients);
+ if (client < entry)
+ p = &(*p)->rb_left;
+ else if (client > entry)
+ p = &(*p)->rb_right;
}
+ rb_link_node(&client->node, parent, p);
+ rb_insert_color(&client->node, &dev->clients);
client->debug_root = debugfs_create_file(name, 0664,
@@ -1179,9 +1020,8 @@
return client;
}
-static void _ion_client_destroy(struct kref *kref)
+void ion_client_destroy(struct ion_client *client)
{
- struct ion_client *client = container_of(kref, struct ion_client, ref);
struct ion_device *dev = client->dev;
struct rb_node *n;
@@ -1192,12 +1032,9 @@
ion_handle_destroy(&handle->ref);
}
mutex_lock(&dev->lock);
- if (client->task) {
- rb_erase(&client->node, &dev->user_clients);
+ if (client->task)
put_task_struct(client->task);
- } else {
- rb_erase(&client->node, &dev->kernel_clients);
- }
+ rb_erase(&client->node, &dev->clients);
debugfs_remove_recursive(client->debug_root);
mutex_unlock(&dev->lock);
@@ -1205,23 +1042,6 @@
kfree(client);
}
-static void ion_client_get(struct ion_client *client)
-{
- kref_get(&client->ref);
-}
-
-static int ion_client_put(struct ion_client *client)
-{
- return kref_put(&client->ref, _ion_client_destroy);
-}
-
-void ion_client_destroy(struct ion_client *client)
-{
- if (client)
- ion_client_put(client);
-}
-EXPORT_SYMBOL(ion_client_destroy);
-
int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
unsigned long *flags)
{
@@ -1266,77 +1086,63 @@
}
EXPORT_SYMBOL(ion_handle_get_size);
-static int ion_share_release(struct inode *inode, struct file* file)
+struct sg_table *ion_sg_table(struct ion_client *client,
+ struct ion_handle *handle)
{
- struct ion_buffer *buffer = file->private_data;
+ struct ion_buffer *buffer;
+ struct sg_table *table;
- pr_debug("%s: %d\n", __func__, __LINE__);
- /* drop the reference to the buffer -- this prevents the
- buffer from going away because the client holding it exited
- while it was being passed */
- ion_buffer_put(buffer);
- return 0;
+ mutex_lock(&client->lock);
+ if (!ion_handle_validate(client, handle)) {
+ pr_err("%s: invalid handle passed to map_dma.\n",
+ __func__);
+ mutex_unlock(&client->lock);
+ return ERR_PTR(-EINVAL);
+ }
+ buffer = handle->buffer;
+ table = buffer->sg_table;
+ mutex_unlock(&client->lock);
+ return table;
+}
+
+static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
+{
+ struct dma_buf *dmabuf = attachment->dmabuf;
+ struct ion_buffer *buffer = dmabuf->priv;
+
+ return buffer->sg_table;
+}
+
+static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *table,
+ enum dma_data_direction direction)
+{
}
static void ion_vma_open(struct vm_area_struct *vma)
{
-
- struct ion_buffer *buffer = vma->vm_file->private_data;
- struct ion_handle *handle = vma->vm_private_data;
- struct ion_client *client;
+ struct ion_buffer *buffer = vma->vm_private_data;
pr_debug("%s: %d\n", __func__, __LINE__);
- /* check that the client still exists and take a reference so
- it can't go away until this vma is closed */
- client = ion_client_lookup(buffer->dev, current->group_leader);
- if (IS_ERR_OR_NULL(client)) {
- vma->vm_private_data = NULL;
- return;
- }
- ion_handle_get(handle);
+
mutex_lock(&buffer->lock);
buffer->umap_cnt++;
mutex_unlock(&buffer->lock);
- pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
- __func__, __LINE__,
- atomic_read(&client->ref.refcount),
- atomic_read(&handle->ref.refcount),
- atomic_read(&buffer->ref.refcount));
}
static void ion_vma_close(struct vm_area_struct *vma)
{
- struct ion_handle *handle = vma->vm_private_data;
- struct ion_buffer *buffer = vma->vm_file->private_data;
- struct ion_client *client;
+ struct ion_buffer *buffer = vma->vm_private_data;
pr_debug("%s: %d\n", __func__, __LINE__);
- /* this indicates the client is gone, nothing to do here */
- if (!handle)
- return;
- client = handle->client;
+
mutex_lock(&buffer->lock);
buffer->umap_cnt--;
mutex_unlock(&buffer->lock);
if (buffer->heap->ops->unmap_user)
buffer->heap->ops->unmap_user(buffer->heap, buffer);
-
-
- pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
- __func__, __LINE__,
- atomic_read(&client->ref.refcount),
- atomic_read(&handle->ref.refcount),
- atomic_read(&buffer->ref.refcount));
- mutex_lock(&client->lock);
- ion_handle_put(handle);
- mutex_unlock(&client->lock);
- ion_client_put(client);
- pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
- __func__, __LINE__,
- atomic_read(&client->ref.refcount),
- atomic_read(&handle->ref.refcount),
- atomic_read(&buffer->ref.refcount));
}
static struct vm_operations_struct ion_vm_ops = {
@@ -1344,127 +1150,198 @@
.close = ion_vma_close,
};
-static int ion_share_mmap(struct file *file, struct vm_area_struct *vma)
+static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
{
- struct ion_buffer *buffer = file->private_data;
- unsigned long size = vma->vm_end - vma->vm_start;
- struct ion_client *client;
- struct ion_handle *handle;
+ struct ion_buffer *buffer = dmabuf->priv;
int ret;
- unsigned long flags = file->f_flags & O_DSYNC ?
- ION_SET_CACHE(UNCACHED) :
- ION_SET_CACHE(CACHED);
-
- pr_debug("%s: %d\n", __func__, __LINE__);
- /* make sure the client still exists, it's possible for the client to
- have gone away but the map/share fd still to be around, take
- a reference to it so it can't go away while this mapping exists */
- client = ion_client_lookup(buffer->dev, current->group_leader);
- if (IS_ERR_OR_NULL(client)) {
- pr_err("%s: trying to mmap an ion handle in a process with no "
- "ion client\n", __func__);
+ if (!buffer->heap->ops->map_user) {
+ pr_err("%s: this heap does not define a method for mapping "
+ "to userspace\n", __func__);
return -EINVAL;
}
- if ((size > buffer->size) || (size + (vma->vm_pgoff << PAGE_SHIFT) >
- buffer->size)) {
- pr_err("%s: trying to map larger area than handle has available"
- "\n", __func__);
- ret = -EINVAL;
- goto err;
- }
-
- /* find the handle and take a reference to it */
- handle = ion_import(client, buffer);
- if (IS_ERR_OR_NULL(handle)) {
- ret = -EINVAL;
- goto err;
- }
-
- if (!handle->buffer->heap->ops->map_user) {
- pr_err("%s: this heap does not define a method for mapping "
- "to userspace\n", __func__);
- ret = -EINVAL;
- goto err1;
- }
-
mutex_lock(&buffer->lock);
-
- if (ion_validate_buffer_flags(buffer, flags)) {
- ret = -EEXIST;
- mutex_unlock(&buffer->lock);
- goto err1;
- }
-
/* now map it to userspace */
- ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma,
- flags);
+ ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
- buffer->umap_cnt++;
if (ret) {
+ mutex_unlock(&buffer->lock);
pr_err("%s: failure mapping buffer to userspace\n",
__func__);
- goto err2;
+ } else {
+ buffer->umap_cnt++;
+ mutex_unlock(&buffer->lock);
+
+ vma->vm_ops = &ion_vm_ops;
+ /*
+ * move the buffer into the vm_private_data so we can access it
+ * from vma_open/close
+ */
+ vma->vm_private_data = buffer;
}
- mutex_unlock(&buffer->lock);
-
- vma->vm_ops = &ion_vm_ops;
- /* move the handle into the vm_private_data so we can access it from
- vma_open/close */
- vma->vm_private_data = handle;
- pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
- __func__, __LINE__,
- atomic_read(&client->ref.refcount),
- atomic_read(&handle->ref.refcount),
- atomic_read(&buffer->ref.refcount));
- return 0;
-
-err2:
- buffer->umap_cnt--;
- mutex_unlock(&buffer->lock);
- /* drop the reference to the handle */
-err1:
- mutex_lock(&client->lock);
- ion_handle_put(handle);
- mutex_unlock(&client->lock);
-err:
- /* drop the reference to the client */
- ion_client_put(client);
return ret;
}
-static const struct file_operations ion_share_fops = {
- .owner = THIS_MODULE,
- .release = ion_share_release,
- .mmap = ion_share_mmap,
+static void ion_dma_buf_release(struct dma_buf *dmabuf)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+ ion_buffer_put(buffer);
+}
+
+static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+ return buffer->vaddr + offset;
+}
+
+static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
+ void *ptr)
+{
+ return;
+}
+
+static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
+ size_t len,
+ enum dma_data_direction direction)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+ void *vaddr;
+
+ if (!buffer->heap->ops->map_kernel) {
+ pr_err("%s: map kernel is not implemented by this heap.\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ mutex_lock(&buffer->lock);
+ vaddr = ion_buffer_kmap_get(buffer);
+ mutex_unlock(&buffer->lock);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+ if (!vaddr)
+ return -ENOMEM;
+ return 0;
+}
+
+static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
+ size_t len,
+ enum dma_data_direction direction)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+
+ mutex_lock(&buffer->lock);
+ ion_buffer_kmap_put(buffer);
+ mutex_unlock(&buffer->lock);
+}
+
+struct dma_buf_ops dma_buf_ops = {
+ .map_dma_buf = ion_map_dma_buf,
+ .unmap_dma_buf = ion_unmap_dma_buf,
+ .mmap = ion_mmap,
+ .release = ion_dma_buf_release,
+ .begin_cpu_access = ion_dma_buf_begin_cpu_access,
+ .end_cpu_access = ion_dma_buf_end_cpu_access,
+ .kmap_atomic = ion_dma_buf_kmap,
+ .kunmap_atomic = ion_dma_buf_kunmap,
+ .kmap = ion_dma_buf_kmap,
+ .kunmap = ion_dma_buf_kunmap,
};
-static int ion_ioctl_share(struct file *parent, struct ion_client *client,
- struct ion_handle *handle)
+static int ion_share_set_flags(struct ion_client *client,
+ struct ion_handle *handle,
+ unsigned long flags)
{
- int fd = get_unused_fd();
- struct file *file;
+ struct ion_buffer *buffer;
+ bool valid_handle;
+ unsigned long ion_flags = ION_SET_CACHE(CACHED);
+ if (flags & O_DSYNC)
+ ion_flags = ION_SET_CACHE(UNCACHED);
- if (fd < 0)
- return -ENFILE;
+ mutex_lock(&client->lock);
+ valid_handle = ion_handle_validate(client, handle);
+ mutex_unlock(&client->lock);
+ if (!valid_handle) {
+ WARN(1, "%s: invalid handle passed to set_flags.\n", __func__);
+ return -EINVAL;
+ }
- file = anon_inode_getfile("ion_share_fd", &ion_share_fops,
- handle->buffer, O_RDWR);
- if (IS_ERR_OR_NULL(file))
- goto err;
+ buffer = handle->buffer;
- if (parent->f_flags & O_DSYNC)
- file->f_flags |= O_DSYNC;
+ mutex_lock(&buffer->lock);
+ if (ion_validate_buffer_flags(buffer, ion_flags)) {
+ mutex_unlock(&buffer->lock);
+ return -EEXIST;
+ }
+ mutex_unlock(&buffer->lock);
+ return 0;
+}
- ion_buffer_get(handle->buffer);
- fd_install(fd, file);
+int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
+{
+ struct ion_buffer *buffer;
+ struct dma_buf *dmabuf;
+ bool valid_handle;
+ int fd;
+
+ mutex_lock(&client->lock);
+ valid_handle = ion_handle_validate(client, handle);
+ mutex_unlock(&client->lock);
+ if (!valid_handle) {
+ WARN("%s: invalid handle passed to share.\n", __func__);
+ return -EINVAL;
+ }
+
+ buffer = handle->buffer;
+ ion_buffer_get(buffer);
+ dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
+ if (IS_ERR(dmabuf)) {
+ ion_buffer_put(buffer);
+ return PTR_ERR(dmabuf);
+ }
+ fd = dma_buf_fd(dmabuf, O_CLOEXEC);
+ if (fd < 0) {
+ dma_buf_put(dmabuf);
+ ion_buffer_put(buffer);
+ }
return fd;
+}
-err:
- put_unused_fd(fd);
- return -ENFILE;
+struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
+{
+ struct dma_buf *dmabuf;
+ struct ion_buffer *buffer;
+ struct ion_handle *handle;
+
+ dmabuf = dma_buf_get(fd);
+ if (IS_ERR_OR_NULL(dmabuf))
+ return ERR_PTR(PTR_ERR(dmabuf));
+ /* if this memory came from ion */
+
+ if (dmabuf->ops != &dma_buf_ops) {
+ pr_err("%s: can not import dmabuf from another exporter\n",
+ __func__);
+ dma_buf_put(dmabuf);
+ return ERR_PTR(-EINVAL);
+ }
+ buffer = dmabuf->priv;
+
+ mutex_lock(&client->lock);
+ /* if a handle exists for this buffer just take a reference to it */
+ handle = ion_handle_lookup(client, buffer);
+ if (!IS_ERR_OR_NULL(handle)) {
+ ion_handle_get(handle);
+ goto end;
+ }
+ handle = ion_handle_create(client, buffer);
+ if (IS_ERR_OR_NULL(handle))
+ goto end;
+ ion_handle_add(client, handle);
+end:
+ mutex_unlock(&client->lock);
+ dma_buf_put(dmabuf);
+ return handle;
}
static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
@@ -1481,11 +1358,13 @@
data.handle = ion_alloc(client, data.len, data.align,
data.flags);
- if (IS_ERR_OR_NULL(data.handle))
- return -ENOMEM;
+ if (IS_ERR(data.handle))
+ return PTR_ERR(data.handle);
- if (copy_to_user((void __user *)arg, &data, sizeof(data)))
+ if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
+ ion_free(client, data.handle);
return -EFAULT;
+ }
break;
}
case ION_IOC_FREE:
@@ -1508,18 +1387,15 @@
case ION_IOC_SHARE:
{
struct ion_fd_data data;
-
+ int ret;
if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
return -EFAULT;
- mutex_lock(&client->lock);
- if (!ion_handle_validate(client, data.handle)) {
- pr_err("%s: invalid handle passed to share ioctl.\n",
- __func__);
- mutex_unlock(&client->lock);
- return -EINVAL;
- }
- data.fd = ion_ioctl_share(filp, client, data.handle);
- mutex_unlock(&client->lock);
+
+ ret = ion_share_set_flags(client, data.handle, filp->f_flags);
+ if (ret)
+ return ret;
+
+ data.fd = ion_share_dma_buf(client, data.handle);
if (copy_to_user((void __user *)arg, &data, sizeof(data)))
return -EFAULT;
if (data.fd < 0)
@@ -1533,12 +1409,9 @@
if (copy_from_user(&data, (void __user *)arg,
sizeof(struct ion_fd_data)))
return -EFAULT;
-
- data.handle = ion_import_fd(client, data.fd);
- if (IS_ERR(data.handle)) {
- ret = PTR_ERR(data.handle);
+ data.handle = ion_import_dma_buf(client, data.fd);
+ if (IS_ERR(data.handle))
data.handle = NULL;
- }
if (copy_to_user((void __user *)arg, &data,
sizeof(struct ion_fd_data)))
return -EFAULT;
@@ -1581,8 +1454,8 @@
}
if (!data.handle) {
- handle = ion_import_fd(client, data.fd);
- if (IS_ERR_OR_NULL(handle)) {
+ handle = ion_import_dma_buf(client, data.fd);
+ if (IS_ERR(handle)) {
pr_info("%s: Could not import handle: %d\n",
__func__, (int)handle);
return -EINVAL;
@@ -1629,7 +1502,7 @@
struct ion_client *client = file->private_data;
pr_debug("%s: %d\n", __func__, __LINE__);
- ion_client_put(client);
+ ion_client_destroy(client);
return 0;
}
@@ -1739,14 +1612,7 @@
struct rb_node *j;
const char *client_name = NULL;
- for (j = rb_first(&dev->user_clients); j && !client_name;
- j = rb_next(j)) {
- struct ion_client *client = rb_entry(j, struct ion_client,
- node);
- if (ion_debug_find_buffer_owner(client, buffer))
- client_name = client->name;
- }
- for (j = rb_first(&dev->kernel_clients); j && !client_name;
+ for (j = rb_first(&dev->clients); j && !client_name;
j = rb_next(j)) {
struct ion_client *client = rb_entry(j, struct ion_client,
node);
@@ -1828,27 +1694,23 @@
mutex_lock(&dev->lock);
seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
- for (n = rb_first(&dev->user_clients); n; n = rb_next(n)) {
- struct ion_client *client = rb_entry(n, struct ion_client,
- node);
- char task_comm[TASK_COMM_LEN];
- size_t size = ion_debug_heap_total(client, heap->id);
- if (!size)
- continue;
- get_task_comm(task_comm, client->task);
- seq_printf(s, "%16.s %16u %16x\n", task_comm, client->pid,
- size);
- }
-
- for (n = rb_first(&dev->kernel_clients); n; n = rb_next(n)) {
+ for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
struct ion_client *client = rb_entry(n, struct ion_client,
node);
size_t size = ion_debug_heap_total(client, heap->id);
if (!size)
continue;
- seq_printf(s, "%16.s %16u %16x\n", client->name, client->pid,
- size);
+ if (client->task) {
+ char task_comm[TASK_COMM_LEN];
+
+ get_task_comm(task_comm, client->task);
+ seq_printf(s, "%16.s %16u %16u\n", task_comm,
+ client->pid, size);
+ } else {
+ seq_printf(s, "%16.s %16u %16u\n", client->name,
+ client->pid, size);
+ }
}
ion_heap_print_debug(s, heap);
mutex_unlock(&dev->lock);
@@ -1873,6 +1735,11 @@
struct rb_node *parent = NULL;
struct ion_heap *entry;
+ if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
+ !heap->ops->unmap_dma)
+ pr_err("%s: can not add heap with invalid ops struct.\n",
+ __func__);
+
heap->dev = dev;
mutex_lock(&dev->lock);
while (*p) {
@@ -1970,7 +1837,7 @@
}
/* now see which buffers we can access */
- for (n = rb_first(&dev->kernel_clients); n; n = rb_next(n)) {
+ for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
struct ion_client *client = rb_entry(n, struct ion_client,
node);
@@ -1986,21 +1853,6 @@
}
- for (n = rb_first(&dev->user_clients); n; n = rb_next(n)) {
- struct ion_client *client = rb_entry(n, struct ion_client,
- node);
-
- mutex_lock(&client->lock);
- for (n2 = rb_first(&client->handles); n2; n2 = rb_next(n2)) {
- struct ion_handle *handle = rb_entry(n2,
- struct ion_handle, node);
-
- handle->buffer->marked = 0;
-
- }
- mutex_unlock(&client->lock);
-
- }
/* And anyone still marked as a 1 means a leaked handle somewhere */
for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
@@ -2059,8 +1911,7 @@
idev->buffers = RB_ROOT;
mutex_init(&idev->lock);
idev->heaps = RB_ROOT;
- idev->user_clients = RB_ROOT;
- idev->kernel_clients = RB_ROOT;
+ idev->clients = RB_ROOT;
debugfs_create_file("check_leaked_fds", 0664, idev->debug_root, idev,
&debug_leak_fops);
return idev;
@@ -2072,3 +1923,19 @@
/* XXX need to free the heaps and clients ? */
kfree(dev);
}
+
+void __init ion_reserve(struct ion_platform_data *data)
+{
+ int i, ret;
+
+ for (i = 0; i < data->nr; i++) {
+ if (data->heaps[i].size == 0)
+ continue;
+ ret = memblock_reserve(data->heaps[i].base,
+ data->heaps[i].size);
+ if (ret)
+ pr_err("memblock reserve of %x@%lx failed\n",
+ data->heaps[i].size,
+ data->heaps[i].base);
+ }
+}
diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c
index 1fdc1f9..a591eb4 100644
--- a/drivers/gpu/ion/ion_carveout_heap.c
+++ b/drivers/gpu/ion/ion_carveout_heap.c
@@ -108,28 +108,38 @@
buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL;
}
-struct scatterlist *ion_carveout_heap_map_dma(struct ion_heap *heap,
+struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
- struct scatterlist *sglist;
+ struct sg_table *table;
+ int ret;
- sglist = vmalloc(sizeof(struct scatterlist));
- if (!sglist)
+ table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table)
return ERR_PTR(-ENOMEM);
- sg_init_table(sglist, 1);
- sglist->length = buffer->size;
- sglist->offset = 0;
- sglist->dma_address = buffer->priv_phys;
+ ret = sg_alloc_table(table, 1, GFP_KERNEL);
+ if (ret)
+ goto err0;
- return sglist;
+ table->sgl->length = buffer->size;
+ table->sgl->offset = 0;
+ table->sgl->dma_address = buffer->priv_phys;
+
+ return table;
+
+err0:
+ kfree(table);
+ return ERR_PTR(ret);
}
void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
- if (buffer->sglist)
- vfree(buffer->sglist);
+ if (buffer->sg_table)
+ sg_free_table(buffer->sg_table);
+ kfree(buffer->sg_table);
+ buffer->sg_table = 0;
}
static int ion_carveout_request_region(struct ion_carveout_heap *carveout_heap)
@@ -163,8 +173,7 @@
}
void *ion_carveout_heap_map_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer,
- unsigned long flags)
+ struct ion_buffer *buffer)
{
struct ion_carveout_heap *carveout_heap =
container_of(heap, struct ion_carveout_heap, heap);
@@ -173,7 +182,7 @@
if (ion_carveout_request_region(carveout_heap))
return NULL;
- if (ION_IS_CACHED(flags))
+ if (ION_IS_CACHED(buffer->flags))
ret_value = ioremap_cached(buffer->priv_phys, buffer->size);
else
ret_value = ioremap(buffer->priv_phys, buffer->size);
@@ -197,7 +206,7 @@
}
int ion_carveout_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
- struct vm_area_struct *vma, unsigned long flags)
+ struct vm_area_struct *vma)
{
struct ion_carveout_heap *carveout_heap =
container_of(heap, struct ion_carveout_heap, heap);
@@ -206,7 +215,7 @@
if (ion_carveout_request_region(carveout_heap))
return -EINVAL;
- if (!ION_IS_CACHED(flags))
+ if (!ION_IS_CACHED(buffer->flags))
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
ret_value = remap_pfn_range(vma, vma->vm_start,
diff --git a/drivers/gpu/ion/ion_cp_heap.c b/drivers/gpu/ion/ion_cp_heap.c
index c5e9caf..23ccab3 100644
--- a/drivers/gpu/ion/ion_cp_heap.c
+++ b/drivers/gpu/ion/ion_cp_heap.c
@@ -366,33 +366,42 @@
buffer->priv_phys = ION_CP_ALLOCATE_FAIL;
}
-struct scatterlist *ion_cp_heap_create_sglist(struct ion_buffer *buffer)
+struct sg_table *ion_cp_heap_create_sg_table(struct ion_buffer *buffer)
{
- struct scatterlist *sglist;
+ struct sg_table *table;
+ int ret;
- sglist = vmalloc(sizeof(*sglist));
- if (!sglist)
+ table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table)
return ERR_PTR(-ENOMEM);
- sg_init_table(sglist, 1);
- sglist->length = buffer->size;
- sglist->offset = 0;
- sglist->dma_address = buffer->priv_phys;
+ ret = sg_alloc_table(table, 1, GFP_KERNEL);
+ if (ret)
+ goto err0;
- return sglist;
+ table->sgl->length = buffer->size;
+ table->sgl->offset = 0;
+ table->sgl->dma_address = buffer->priv_phys;
+
+ return table;
+err0:
+ kfree(table);
+ return ERR_PTR(ret);
}
-struct scatterlist *ion_cp_heap_map_dma(struct ion_heap *heap,
+struct sg_table *ion_cp_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
- return ion_cp_heap_create_sglist(buffer);
+ return ion_cp_heap_create_sg_table(buffer);
}
void ion_cp_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
- if (buffer->sglist)
- vfree(buffer->sglist);
+ if (buffer->sg_table)
+ sg_free_table(buffer->sg_table);
+ kfree(buffer->sg_table);
+ buffer->sg_table = 0;
}
/**
@@ -441,9 +450,7 @@
return NULL;
}
-void *ion_cp_heap_map_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer,
- unsigned long flags)
+void *ion_cp_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer)
{
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
@@ -452,7 +459,7 @@
mutex_lock(&cp_heap->lock);
if ((cp_heap->heap_protected == HEAP_NOT_PROTECTED) ||
((cp_heap->heap_protected == HEAP_PROTECTED) &&
- !ION_IS_CACHED(flags))) {
+ !ION_IS_CACHED(buffer->flags))) {
if (ion_cp_request_region(cp_heap)) {
mutex_unlock(&cp_heap->lock);
@@ -461,10 +468,10 @@
if (cp_heap->reusable) {
ret_value = ion_map_fmem_buffer(buffer, cp_heap->base,
- cp_heap->reserved_vrange, flags);
+ cp_heap->reserved_vrange, buffer->flags);
} else {
- if (ION_IS_CACHED(flags))
+ if (ION_IS_CACHED(buffer->flags))
ret_value = ioremap_cached(buffer->priv_phys,
buffer->size);
else
@@ -510,7 +517,7 @@
}
int ion_cp_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
- struct vm_area_struct *vma, unsigned long flags)
+ struct vm_area_struct *vma)
{
int ret_value = -EAGAIN;
struct ion_cp_heap *cp_heap =
@@ -523,7 +530,7 @@
return -EINVAL;
}
- if (!ION_IS_CACHED(flags))
+ if (!ION_IS_CACHED(buffer->flags))
vma->vm_page_prot = pgprot_writecombine(
vma->vm_page_prot);
@@ -764,7 +771,6 @@
struct iommu_domain *domain;
int ret = 0;
unsigned long extra;
- struct scatterlist *sglist = 0;
struct ion_cp_heap *cp_heap =
container_of(buffer->heap, struct ion_cp_heap, heap);
int prot = IOMMU_WRITE | IOMMU_READ;
@@ -819,12 +825,7 @@
goto out1;
}
- sglist = ion_cp_heap_create_sglist(buffer);
- if (IS_ERR_OR_NULL(sglist)) {
- ret = -ENOMEM;
- goto out1;
- }
- ret = iommu_map_range(domain, data->iova_addr, sglist,
+ ret = iommu_map_range(domain, data->iova_addr, buffer->sg_table->sgl,
buffer->size, prot);
if (ret) {
pr_err("%s: could not map %lx in domain %p\n",
@@ -839,14 +840,11 @@
if (ret)
goto out2;
}
- vfree(sglist);
return ret;
out2:
iommu_unmap_range(domain, data->iova_addr, buffer->size);
out1:
- if (!IS_ERR_OR_NULL(sglist))
- vfree(sglist);
msm_free_iova_address(data->iova_addr, domain_num, partition_num,
data->mapped_size);
out:
diff --git a/drivers/gpu/ion/ion_iommu_heap.c b/drivers/gpu/ion/ion_iommu_heap.c
index 9ea6f2b..d0f101c 100644
--- a/drivers/gpu/ion/ion_iommu_heap.c
+++ b/drivers/gpu/ion/ion_iommu_heap.c
@@ -35,7 +35,6 @@
struct page **pages;
int nrpages;
unsigned long size;
- struct scatterlist *iommu_sglist;
};
static int ion_iommu_heap_allocate(struct ion_heap *heap,
@@ -47,6 +46,10 @@
struct ion_iommu_priv_data *data = NULL;
if (msm_use_iommu()) {
+ struct scatterlist *sg;
+ struct sg_table *table;
+ unsigned int i;
+
data = kmalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -59,25 +62,26 @@
ret = -ENOMEM;
goto err1;
}
- data->iommu_sglist = vmalloc(sizeof(*data->iommu_sglist) *
- data->nrpages);
- if (!data->iommu_sglist) {
+
+ table = buffer->sg_table =
+ kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+
+ if (!table) {
ret = -ENOMEM;
goto err1;
}
+ ret = sg_alloc_table(table, data->nrpages, GFP_KERNEL);
+ if (ret)
+ goto err2;
- sg_init_table(data->iommu_sglist, data->nrpages);
-
- for (i = 0; i < data->nrpages; i++) {
+ for_each_sg(table->sgl, sg, table->nents, i) {
data->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!data->pages[i])
- goto err2;
+ goto err3;
- sg_set_page(&data->iommu_sglist[i], data->pages[i],
- PAGE_SIZE, 0);
+ sg_set_page(sg, data->pages[i], PAGE_SIZE, 0);
}
-
buffer->priv_virt = data;
return 0;
@@ -86,9 +90,11 @@
}
+err3:
+ sg_free_table(buffer->sg_table);
err2:
- vfree(data->iommu_sglist);
- data->iommu_sglist = NULL;
+ kfree(buffer->sg_table);
+ buffer->sg_table = 0;
for (i = 0; i < data->nrpages; i++) {
if (data->pages[i])
@@ -111,16 +117,12 @@
for (i = 0; i < data->nrpages; i++)
__free_page(data->pages[i]);
- vfree(data->iommu_sglist);
- data->iommu_sglist = NULL;
-
kfree(data->pages);
kfree(data);
}
void *ion_iommu_heap_map_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer,
- unsigned long flags)
+ struct ion_buffer *buffer)
{
struct ion_iommu_priv_data *data = buffer->priv_virt;
pgprot_t page_prot = PAGE_KERNEL;
@@ -128,7 +130,7 @@
if (!data)
return NULL;
- if (!ION_IS_CACHED(flags))
+ if (!ION_IS_CACHED(buffer->flags))
page_prot = pgprot_noncached(page_prot);
buffer->vaddr = vmap(data->pages, data->nrpages, VM_IOREMAP, page_prot);
@@ -147,7 +149,7 @@
}
int ion_iommu_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
- struct vm_area_struct *vma, unsigned long flags)
+ struct vm_area_struct *vma)
{
struct ion_iommu_priv_data *data = buffer->priv_virt;
int i;
@@ -155,7 +157,7 @@
if (!data)
return -EINVAL;
- if (!ION_IS_CACHED(flags))
+ if (!ION_IS_CACHED(buffer->flags))
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
curr_addr = vma->vm_start;
@@ -183,7 +185,6 @@
struct iommu_domain *domain;
int ret = 0;
unsigned long extra;
- struct ion_iommu_priv_data *buffer_data = buffer->priv_virt;
int prot = IOMMU_WRITE | IOMMU_READ;
prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
@@ -207,7 +208,8 @@
}
ret = iommu_map_range(domain, data->iova_addr,
- buffer_data->iommu_sglist, buffer->size, prot);
+ buffer->sg_table->sgl,
+ buffer->size, prot);
if (ret) {
pr_err("%s: could not map %lx in domain %p\n",
__func__, data->iova_addr, domain);
@@ -299,16 +301,19 @@
return 0;
}
-static struct scatterlist *ion_iommu_heap_map_dma(struct ion_heap *heap,
+static struct sg_table *ion_iommu_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
- struct ion_iommu_priv_data *data = buffer->priv_virt;
- return data->iommu_sglist;
+ return buffer->sg_table;
}
static void ion_iommu_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
+ if (buffer->sg_table)
+ sg_free_table(buffer->sg_table);
+ kfree(buffer->sg_table);
+ buffer->sg_table = 0;
}
static struct ion_heap_ops iommu_heap_ops = {
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h
index 6940e2f..273e57e 100644
--- a/drivers/gpu/ion/ion_priv.h
+++ b/drivers/gpu/ion/ion_priv.h
@@ -26,18 +26,6 @@
#include <linux/iommu.h>
#include <linux/seq_file.h>
-struct ion_mapping;
-
-struct ion_dma_mapping {
- struct kref ref;
- struct scatterlist *sglist;
-};
-
-struct ion_kernel_mapping {
- struct kref ref;
- void *vaddr;
-};
-
enum {
DI_PARTITION_NUM = 0,
DI_DOMAIN_NUM = 1,
@@ -92,7 +80,7 @@
* @kmap_cnt: number of times the buffer is mapped to the kernel
* @vaddr: the kenrel mapping if kmap_cnt is not zero
* @dmap_cnt: number of times the buffer is mapped for dma
- * @sglist: the scatterlist for the buffer is dmap_cnt is not zero
+ * @sg_table: the sg table for the buffer if dmap_cnt is not zero
*/
struct ion_buffer {
struct kref ref;
@@ -109,7 +97,7 @@
int kmap_cnt;
void *vaddr;
int dmap_cnt;
- struct scatterlist *sglist;
+ struct sg_table *sg_table;
int umap_cnt;
unsigned int iommu_map_cnt;
struct rb_root iommu_maps;
@@ -136,14 +124,13 @@
void (*free) (struct ion_buffer *buffer);
int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer,
ion_phys_addr_t *addr, size_t *len);
- struct scatterlist *(*map_dma) (struct ion_heap *heap,
+ struct sg_table *(*map_dma) (struct ion_heap *heap,
struct ion_buffer *buffer);
void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer);
- void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer,
- unsigned long flags);
+ void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer,
- struct vm_area_struct *vma, unsigned long flags);
+ struct vm_area_struct *vma);
void (*unmap_user) (struct ion_heap *mapper, struct ion_buffer *buffer);
int (*cache_op)(struct ion_heap *heap, struct ion_buffer *buffer,
void *vaddr, unsigned int offset,
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c
index 08b271b..c79c184 100644
--- a/drivers/gpu/ion/ion_system_heap.c
+++ b/drivers/gpu/ion/ion_system_heap.c
@@ -38,71 +38,90 @@
unsigned long size, unsigned long align,
unsigned long flags)
{
- buffer->priv_virt = vmalloc_user(size);
- if (!buffer->priv_virt)
- return -ENOMEM;
+ struct sg_table *table;
+ struct scatterlist *sg;
+ int i, j;
+ int npages = PAGE_ALIGN(size) / PAGE_SIZE;
+ table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+ i = sg_alloc_table(table, npages, GFP_KERNEL);
+ if (i)
+ goto err0;
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ struct page *page;
+ page = alloc_page(GFP_KERNEL|__GFP_ZERO);
+ if (!page)
+ goto err1;
+ sg_set_page(sg, page, PAGE_SIZE, 0);
+ }
+ buffer->priv_virt = table;
atomic_add(size, &system_heap_allocated);
return 0;
+err1:
+ for_each_sg(table->sgl, sg, i, j)
+ __free_page(sg_page(sg));
+ sg_free_table(table);
+err0:
+ kfree(table);
+ return -ENOMEM;
}
void ion_system_heap_free(struct ion_buffer *buffer)
{
- vfree(buffer->priv_virt);
+ int i;
+ struct scatterlist *sg;
+ struct sg_table *table = buffer->priv_virt;
+
+ for_each_sg(table->sgl, sg, table->nents, i)
+ __free_page(sg_page(sg));
+ if (buffer->sg_table)
+ sg_free_table(buffer->sg_table);
+ kfree(buffer->sg_table);
atomic_sub(buffer->size, &system_heap_allocated);
}
-struct scatterlist *ion_system_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
+struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
{
- struct scatterlist *sglist;
- struct page *page;
- int i;
- int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
- void *vaddr = buffer->priv_virt;
-
- sglist = vmalloc(npages * sizeof(struct scatterlist));
- if (!sglist)
- return ERR_PTR(-ENOMEM);
- memset(sglist, 0, npages * sizeof(struct scatterlist));
- sg_init_table(sglist, npages);
- for (i = 0; i < npages; i++) {
- page = vmalloc_to_page(vaddr);
- if (!page)
- goto end;
- sg_set_page(&sglist[i], page, PAGE_SIZE, 0);
- vaddr += PAGE_SIZE;
- }
- /* XXX do cache maintenance for dma? */
- return sglist;
-end:
- vfree(sglist);
- return NULL;
+ return buffer->priv_virt;
}
void ion_system_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
- /* XXX undo cache maintenance for dma? */
- if (buffer->sglist)
- vfree(buffer->sglist);
+ return;
}
void *ion_system_heap_map_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer,
- unsigned long flags)
+ struct ion_buffer *buffer)
{
- if (ION_IS_CACHED(flags))
- return buffer->priv_virt;
- else {
+ if (!ION_IS_CACHED(buffer->flags)) {
pr_err("%s: cannot map system heap uncached\n", __func__);
return ERR_PTR(-EINVAL);
+ } else {
+ struct scatterlist *sg;
+ int i;
+ void *vaddr;
+ struct sg_table *table = buffer->priv_virt;
+ struct page **pages = kmalloc(
+ sizeof(struct page *) * table->nents,
+ GFP_KERNEL);
+
+ for_each_sg(table->sgl, sg, table->nents, i)
+ pages[i] = sg_page(sg);
+ vaddr = vmap(pages, table->nents, VM_MAP, PAGE_KERNEL);
+ kfree(pages);
+
+ return vaddr;
}
}
void ion_system_heap_unmap_kernel(struct ion_heap *heap,
struct ion_buffer *buffer)
{
+ vunmap(buffer->vaddr);
}
void ion_system_heap_unmap_iommu(struct ion_iommu_map *data)
@@ -132,14 +151,27 @@
}
int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
- struct vm_area_struct *vma, unsigned long flags)
+ struct vm_area_struct *vma)
{
- if (ION_IS_CACHED(flags))
- return remap_vmalloc_range(vma, buffer->priv_virt,
- vma->vm_pgoff);
- else {
+ if (!ION_IS_CACHED(buffer->flags)) {
pr_err("%s: cannot map system heap uncached\n", __func__);
return -EINVAL;
+ } else {
+ struct sg_table *table = buffer->priv_virt;
+ unsigned long addr = vma->vm_start;
+ unsigned long offset = vma->vm_pgoff;
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ if (offset) {
+ offset--;
+ continue;
+ }
+ vm_insert_page(vma, addr, sg_page(sg));
+ addr += PAGE_SIZE;
+ }
+ return 0;
}
}
@@ -168,42 +200,20 @@
if (system_heap_has_outer_cache) {
unsigned long pstart;
- void *vend;
- void *vtemp;
- unsigned long ln = 0;
- vend = buffer->priv_virt + buffer->size;
- vtemp = buffer->priv_virt + offset;
-
- if ((vtemp+length) > vend) {
- pr_err("Trying to flush outside of mapped range.\n");
- pr_err("End of mapped range: %p, trying to flush to "
- "address %p\n", vend, vtemp+length);
- WARN(1, "%s: called with heap name %s, buffer size 0x%x, "
- "vaddr 0x%p, offset 0x%x, length: 0x%x\n",
- __func__, heap->name, buffer->size, vaddr,
- offset, length);
- return -EINVAL;
- }
-
- for (; ln < length && vtemp < vend;
- vtemp += PAGE_SIZE, ln += PAGE_SIZE) {
- struct page *page = vmalloc_to_page(vtemp);
- if (!page) {
- WARN(1, "Could not find page for virt. address %p\n",
- vtemp);
- return -EINVAL;
- }
+ struct sg_table *table = buffer->priv_virt;
+ struct scatterlist *sg;
+ int i;
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ struct page *page = sg_page(sg);
pstart = page_to_phys(page);
/*
* If page -> phys is returning NULL, something
* has really gone wrong...
*/
if (!pstart) {
- WARN(1, "Could not translate %p to physical address\n",
- vtemp);
+ WARN(1, "Could not translate virtual address to physical address\n");
return -EINVAL;
}
-
outer_cache_op(pstart, pstart + PAGE_SIZE);
}
}
@@ -227,14 +237,11 @@
unsigned long iova_length,
unsigned long flags)
{
- int ret = 0, i;
+ int ret = 0;
struct iommu_domain *domain;
unsigned long extra;
unsigned long extra_iova_addr;
- struct page *page;
- int npages = buffer->size >> PAGE_SHIFT;
- void *vaddr = buffer->priv_virt;
- struct scatterlist *sglist = 0;
+ struct sg_table *table = buffer->priv_virt;
int prot = IOMMU_WRITE | IOMMU_READ;
prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
@@ -261,23 +268,7 @@
goto out1;
}
-
- sglist = vmalloc(sizeof(*sglist) * npages);
- if (!sglist) {
- ret = -ENOMEM;
- goto out1;
- }
-
- sg_init_table(sglist, npages);
- for (i = 0; i < npages; i++) {
- page = vmalloc_to_page(vaddr);
- if (!page)
- goto out1;
- sg_set_page(&sglist[i], page, PAGE_SIZE, 0);
- vaddr += PAGE_SIZE;
- }
-
- ret = iommu_map_range(domain, data->iova_addr, sglist,
+ ret = iommu_map_range(domain, data->iova_addr, table->sgl,
buffer->size, prot);
if (ret) {
@@ -293,13 +284,11 @@
if (ret)
goto out2;
}
- vfree(sglist);
return ret;
out2:
iommu_unmap_range(domain, data->iova_addr, buffer->size);
out1:
- vfree(sglist);
msm_free_iova_address(data->iova_addr, domain_num, partition_num,
data->mapped_size);
out:
@@ -366,27 +355,32 @@
return 0;
}
-struct scatterlist *ion_system_contig_heap_map_dma(struct ion_heap *heap,
+struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
- struct scatterlist *sglist;
+ struct sg_table *table;
+ int ret;
- sglist = vmalloc(sizeof(struct scatterlist));
- if (!sglist)
+ table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table)
return ERR_PTR(-ENOMEM);
- sg_init_table(sglist, 1);
- sg_set_page(sglist, virt_to_page(buffer->priv_virt), buffer->size, 0);
- return sglist;
+ ret = sg_alloc_table(table, 1, GFP_KERNEL);
+ if (ret) {
+ kfree(table);
+ return ERR_PTR(ret);
+ }
+ sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size,
+ 0);
+ return table;
}
int ion_system_contig_heap_map_user(struct ion_heap *heap,
struct ion_buffer *buffer,
- struct vm_area_struct *vma,
- unsigned long flags)
+ struct vm_area_struct *vma)
{
unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
- if (ION_IS_CACHED(flags))
+ if (ION_IS_CACHED(buffer->flags))
return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index b280183..41fd72f 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -111,6 +111,21 @@
.ib_check_level = 0,
};
+/* This set of registers are used for Hang detection
+ * If the values of these registers are same after
+ * KGSL_TIMEOUT_PART time, GPU hang is reported in
+ * kernel log.
+ */
+unsigned int hang_detect_regs[] = {
+ A3XX_RBBM_STATUS,
+ REG_CP_RB_RPTR,
+ REG_CP_IB1_BASE,
+ REG_CP_IB1_BUFSZ,
+ REG_CP_IB2_BASE,
+ REG_CP_IB2_BUFSZ,
+};
+
+const unsigned int hang_detect_regs_count = ARRAY_SIZE(hang_detect_regs);
/*
* This is the master list of all GPU cores that are supported by this
@@ -246,6 +261,7 @@
}
static void adreno_iommu_setstate(struct kgsl_device *device,
+ unsigned int context_id,
uint32_t flags)
{
unsigned int pt_val, reg_pt_val;
@@ -256,17 +272,27 @@
struct kgsl_memdesc **reg_map_desc;
void *reg_map_array = NULL;
int num_iommu_units, i;
+ struct kgsl_context *context;
+ struct adreno_context *adreno_ctx = NULL;
if (!adreno_dev->drawctxt_active)
return kgsl_mmu_device_setstate(&device->mmu, flags);
num_iommu_units = kgsl_mmu_get_reg_map_desc(&device->mmu,
®_map_array);
+
+ context = idr_find(&device->context_idr, context_id);
+ adreno_ctx = context->devctxt;
+
reg_map_desc = reg_map_array;
if (kgsl_mmu_enable_clk(&device->mmu,
KGSL_IOMMU_CONTEXT_USER))
goto done;
+ cmds += __adreno_add_idle_indirect_cmds(cmds,
+ device->mmu.setstate_memory.gpuaddr +
+ KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+
if (cpu_is_msm8960())
cmds += adreno_add_change_mh_phys_limit_cmds(cmds, 0xFFFFF000,
device->mmu.setstate_memory.gpuaddr +
@@ -335,10 +361,9 @@
*cmds++ = cp_type3_packet(CP_INVALIDATE_STATE, 1);
*cmds++ = 0x7fff;
- if (flags & KGSL_MMUFLAGS_TLBFLUSH)
- cmds += __adreno_add_idle_indirect_cmds(cmds,
- device->mmu.setstate_memory.gpuaddr +
- KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+ cmds += __adreno_add_idle_indirect_cmds(cmds,
+ device->mmu.setstate_memory.gpuaddr +
+ KGSL_IOMMU_SETSTATE_NOP_OFFSET);
}
if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
/*
@@ -374,15 +399,29 @@
KGSL_IOMMU_SETSTATE_NOP_OFFSET);
sizedwords += (cmds - &link[0]);
- if (sizedwords)
- adreno_ringbuffer_issuecmds(device,
- KGSL_CMD_FLAGS_PMODE, &link[0], sizedwords);
+ if (sizedwords) {
+ /*
+ * add an interrupt at the end of commands so that the smmu
+ * disable clock off function will get called
+ */
+ *cmds++ = cp_type3_packet(CP_INTERRUPT, 1);
+ *cmds++ = CP_INT_CNTL__RB_INT_MASK;
+ sizedwords += 2;
+ /* This returns the per context timestamp but we need to
+ * use the global timestamp for iommu clock disablement */
+ adreno_ringbuffer_issuecmds(device, adreno_ctx,
+ KGSL_CMD_FLAGS_PMODE,
+ &link[0], sizedwords);
+ kgsl_mmu_disable_clk_on_ts(&device->mmu,
+ adreno_dev->ringbuffer.timestamp[KGSL_MEMSTORE_GLOBAL], true);
+ }
done:
if (num_iommu_units)
kfree(reg_map_array);
}
static void adreno_gpummu_setstate(struct kgsl_device *device,
+ unsigned int context_id,
uint32_t flags)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
@@ -390,6 +429,8 @@
unsigned int *cmds = &link[0];
int sizedwords = 0;
unsigned int mh_mmu_invalidate = 0x00000003; /*invalidate all and tc */
+ struct kgsl_context *context;
+ struct adreno_context *adreno_ctx = NULL;
/*
* Fix target freeze issue by adding TLB flush for each submit
@@ -404,6 +445,9 @@
* easier to filter out the mmu accesses from the dump
*/
if (!kgsl_cff_dump_enable && adreno_dev->drawctxt_active) {
+ context = idr_find(&device->context_idr, context_id);
+ adreno_ctx = context->devctxt;
+
if (flags & KGSL_MMUFLAGS_PTUPDATE) {
/* wait for graphics pipe to be idle */
*cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
@@ -476,7 +520,8 @@
sizedwords += 2;
}
- adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE,
+ adreno_ringbuffer_issuecmds(device, adreno_ctx,
+ KGSL_CMD_FLAGS_PMODE,
&link[0], sizedwords);
} else {
kgsl_mmu_device_setstate(&device->mmu, flags);
@@ -484,13 +529,14 @@
}
static void adreno_setstate(struct kgsl_device *device,
+ unsigned int context_id,
uint32_t flags)
{
/* call the mmu specific handler */
if (KGSL_MMU_TYPE_GPU == kgsl_mmu_get_mmutype())
- return adreno_gpummu_setstate(device, flags);
+ return adreno_gpummu_setstate(device, context_id, flags);
else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
- return adreno_iommu_setstate(device, flags);
+ return adreno_iommu_setstate(device, context_id, flags);
}
static unsigned int
@@ -720,6 +766,10 @@
kgsl_mh_start(device);
}
+ /* Assign correct RBBM status register to hang detect regs
+ */
+ hang_detect_regs[0] = adreno_dev->gpudev->reg_rbbm_status;
+
status = kgsl_mmu_start(device);
if (status)
goto error_clk_off;
@@ -886,8 +936,7 @@
return ret;
}
-static int
-adreno_dump_and_recover(struct kgsl_device *device)
+int adreno_dump_and_recover(struct kgsl_device *device)
{
int result = -ETIMEDOUT;
@@ -927,6 +976,7 @@
done:
return result;
}
+EXPORT_SYMBOL(adreno_dump_and_recover);
static int adreno_getproperty(struct kgsl_device *device,
enum kgsl_property_type type,
@@ -1086,7 +1136,10 @@
unsigned long wait_time_part;
unsigned int msecs;
unsigned int msecs_first;
- unsigned int msecs_part;
+ unsigned int msecs_part = KGSL_TIMEOUT_PART;
+ unsigned int prev_reg_val[hang_detect_regs_count];
+
+ memset(prev_reg_val, 0, sizeof(prev_reg_val));
kgsl_cffdump_regpoll(device->id,
adreno_dev->gpudev->reg_rbbm_status << 2,
@@ -1098,7 +1151,6 @@
if (rb->flags & KGSL_FLAGS_STARTED) {
msecs = adreno_dev->wait_timeout;
msecs_first = (msecs <= 100) ? ((msecs + 4) / 5) : 100;
- msecs_part = (msecs - msecs_first + 3) / 4;
wait_time = jiffies + wait_timeout;
wait_time_part = jiffies + msecs_to_jiffies(msecs_first);
adreno_poke(device);
@@ -1107,6 +1159,8 @@
adreno_poke(device);
wait_time_part = jiffies +
msecs_to_jiffies(msecs_part);
+ if ((adreno_hang_detect(device, prev_reg_val)))
+ goto err;
}
GSL_RB_GET_READPTR(rb, &rb->rptr);
if (time_after(jiffies, wait_time)) {
@@ -1119,6 +1173,7 @@
/* now, wait for the GPU to finish its operations */
wait_time = jiffies + wait_timeout;
+ wait_time_part = jiffies + msecs_to_jiffies(msecs_part);
while (time_before(jiffies, wait_time)) {
adreno_regread(device, adreno_dev->gpudev->reg_rbbm_status,
&rbbm_status);
@@ -1129,6 +1184,16 @@
if (!(rbbm_status & 0x80000000))
return 0;
}
+
+ /* Dont wait for timeout, detect hang faster.
+ */
+ if (time_after(jiffies, wait_time_part)) {
+ wait_time_part = jiffies +
+ msecs_to_jiffies(msecs_part);
+ if ((adreno_hang_detect(device, prev_reg_val)))
+ goto err;
+ }
+
}
err:
@@ -1315,6 +1380,7 @@
int status;
unsigned int ref_ts, enableflag;
unsigned int context_id;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
mutex_lock(&device->mutex);
context_id = _get_context_id(context);
@@ -1360,8 +1426,15 @@
* get an interrupt */
cmds[0] = cp_type3_packet(CP_NOP, 1);
cmds[1] = 0;
- adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
- &cmds[0], 2);
+
+ if (adreno_dev->drawctxt_active)
+ adreno_ringbuffer_issuecmds(device,
+ adreno_dev->drawctxt_active,
+ KGSL_CMD_FLAGS_NONE, &cmds[0], 2);
+ else
+ /* We would never call this function if there
+ * was no active contexts running */
+ BUG();
}
}
unlock:
@@ -1386,6 +1459,32 @@
__ret; \
})
+
+
+unsigned int adreno_hang_detect(struct kgsl_device *device,
+ unsigned int *prev_reg_val)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ unsigned int curr_reg_val[hang_detect_regs_count];
+ unsigned int hang_detected = 1;
+ unsigned int i;
+
+ if (!adreno_dev->fast_hang_detect)
+ return 0;
+
+ for (i = 0; i < hang_detect_regs_count; i++) {
+ adreno_regread(device, hang_detect_regs[i],
+ &curr_reg_val[i]);
+ if (curr_reg_val[i] != prev_reg_val[i]) {
+ prev_reg_val[i] = curr_reg_val[i];
+ hang_detected = 0;
+ }
+ }
+
+ return hang_detected;
+}
+
+
/* MUST be called with the device mutex held */
static int adreno_waittimestamp(struct kgsl_device *device,
struct kgsl_context *context,
@@ -1397,16 +1496,20 @@
static uint io_cnt;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
- int retries;
+ int retries = 0;
unsigned int msecs_first;
- unsigned int msecs_part;
+ unsigned int msecs_part = KGSL_TIMEOUT_PART;
unsigned int ts_issued;
unsigned int context_id = _get_context_id(context);
+ unsigned int time_elapsed = 0;
+ unsigned int prev_reg_val[hang_detect_regs_count];
+
+ memset(prev_reg_val, 0, sizeof(prev_reg_val));
ts_issued = adreno_dev->ringbuffer.timestamp[context_id];
/* Don't wait forever, set a max value for now */
- if (msecs == -1)
+ if (msecs == KGSL_TIMEOUT_DEFAULT)
msecs = adreno_dev->wait_timeout;
if (timestamp_cmp(timestamp, ts_issued) > 0) {
@@ -1422,8 +1525,7 @@
* been updated properly.
*/
msecs_first = (msecs <= 100) ? ((msecs + 4) / 5) : 100;
- msecs_part = (msecs - msecs_first + 3) / 4;
- for (retries = 0; retries < 5; retries++) {
+ do {
/*
* If the context ID is invalid, we are in a race with
* the context being destroyed by userspace so bail.
@@ -1448,6 +1550,11 @@
if (io_cnt <
pwr->pwrlevels[pwr->active_pwrlevel].io_fraction)
io = 0;
+
+ if ((retries > 0) &&
+ (adreno_hang_detect(device, prev_reg_val)))
+ goto hang_dump;
+
mutex_unlock(&device->mutex);
/* We need to make sure that the process is
* placed in wait-q before its condition is called
@@ -1469,7 +1576,14 @@
goto done;
}
/*this wait timed out*/
- }
+
+ time_elapsed = time_elapsed +
+ (retries ? msecs_part : msecs_first);
+ retries++;
+
+ } while (time_elapsed < msecs);
+
+hang_dump:
status = -ETIMEDOUT;
KGSL_DRV_ERR(device,
"Device hang detected while waiting for timestamp: "
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index feaa36f..fcbf1d9 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -82,6 +82,7 @@
unsigned int pix_shader_start;
unsigned int instruction_size;
unsigned int ib_check_level;
+ unsigned int fast_hang_detect;
};
struct adreno_gpudev {
@@ -99,7 +100,8 @@
int (*ctxt_create)(struct adreno_device *, struct adreno_context *);
void (*ctxt_save)(struct adreno_device *, struct adreno_context *);
void (*ctxt_restore)(struct adreno_device *, struct adreno_context *);
- void (*ctxt_draw_workaround)(struct adreno_device *);
+ void (*ctxt_draw_workaround)(struct adreno_device *,
+ struct adreno_context *);
irqreturn_t (*irq_handler)(struct adreno_device *);
void (*irq_control)(struct adreno_device *, int);
void * (*snapshot)(struct adreno_device *, void *, int *, int);
@@ -123,6 +125,10 @@
extern const unsigned int a3xx_registers[];
extern const unsigned int a3xx_registers_count;
+extern unsigned int hang_detect_regs[];
+extern const unsigned int hang_detect_regs_count;
+
+
int adreno_idle(struct kgsl_device *device, unsigned int timeout);
void adreno_regread(struct kgsl_device *device, unsigned int offsetwords,
unsigned int *value);
@@ -143,6 +149,11 @@
void *adreno_snapshot(struct kgsl_device *device, void *snapshot, int *remain,
int hang);
+int adreno_dump_and_recover(struct kgsl_device *device);
+
+unsigned int adreno_hang_detect(struct kgsl_device *device,
+ unsigned int *prev_reg_val);
+
static inline int adreno_is_a200(struct adreno_device *adreno_dev)
{
return (adreno_dev->gpurev == ADRENO_REV_A200);
@@ -250,7 +261,6 @@
{
unsigned int *start = cmds;
- cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
*cmds++ = cp_type0_packet(MH_MMU_MPU_END, 1);
*cmds++ = new_phys_limit;
cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
@@ -263,7 +273,6 @@
{
unsigned int *start = cmds;
- cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
*cmds++ = cp_type0_packet(REG_CP_STATE_DEBUG_INDEX, 1);
*cmds++ = (cur_ctx_bank ? 0 : 0x20);
cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
diff --git a/drivers/gpu/msm/adreno_a2xx.c b/drivers/gpu/msm/adreno_a2xx.c
index d846d3d..3aa3be5 100644
--- a/drivers/gpu/msm/adreno_a2xx.c
+++ b/drivers/gpu/msm/adreno_a2xx.c
@@ -1450,7 +1450,8 @@
return ret;
}
-static void a2xx_drawctxt_workaround(struct adreno_device *adreno_dev)
+static void a2xx_drawctxt_draw_workaround(struct adreno_device *adreno_dev,
+ struct adreno_context *context)
{
struct kgsl_device *device = &adreno_dev->dev;
unsigned int cmd[11];
@@ -1497,7 +1498,7 @@
| adreno_dev->pix_shader_start;
}
- adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE,
+ adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_PMODE,
&cmd[0], cmds - cmd);
}
@@ -1516,12 +1517,13 @@
if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
/* save registers and constants. */
- adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
+ adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE,
context->reg_save, 3);
if (context->flags & CTXT_FLAGS_SHADER_SAVE) {
/* save shader partitioning and instructions. */
- adreno_ringbuffer_issuecmds(device,
+ adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_PMODE,
context->shader_save, 3);
@@ -1529,7 +1531,8 @@
* fixup shader partitioning parameter for
* SET_SHADER_BASES.
*/
- adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
+ adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE,
context->shader_fixup, 3);
context->flags |= CTXT_FLAGS_SHADER_RESTORE;
@@ -1541,19 +1544,21 @@
/* save gmem.
* (note: changes shader. shader must already be saved.)
*/
- adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE,
+ adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_PMODE,
context->context_gmem_shadow.gmem_save, 3);
/* Restore TP0_CHICKEN */
if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
- adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
+ adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE,
context->chicken_restore, 3);
}
adreno_dev->gpudev->ctx_switches_since_last_draw = 0;
context->flags |= CTXT_FLAGS_GMEM_RESTORE;
} else if (adreno_is_a2xx(adreno_dev))
- a2xx_drawctxt_workaround(adreno_dev);
+ a2xx_drawctxt_draw_workaround(adreno_dev, context);
}
static void a2xx_drawctxt_restore(struct adreno_device *adreno_dev,
@@ -1564,7 +1569,8 @@
if (context == NULL) {
/* No context - set the default apgetable and thats it */
- kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable);
+ kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable,
+ adreno_dev->drawctxt_active->id);
return;
}
@@ -1576,8 +1582,9 @@
cmds[3] = device->memstore.gpuaddr +
KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context);
cmds[4] = context->id;
- adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE, cmds, 5);
- kgsl_mmu_setstate(&device->mmu, context->pagetable);
+ adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE,
+ cmds, 5);
+ kgsl_mmu_setstate(&device->mmu, context->pagetable, context->id);
#ifndef CONFIG_MSM_KGSL_CFF_DUMP_NO_CONTEXT_MEM_DUMP
kgsl_cffdump_syncmem(NULL, &context->gpustate,
@@ -1589,12 +1596,14 @@
* (note: changes shader. shader must not already be restored.)
*/
if (context->flags & CTXT_FLAGS_GMEM_RESTORE) {
- adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE,
+ adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_PMODE,
context->context_gmem_shadow.gmem_restore, 3);
if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
/* Restore TP0_CHICKEN */
- adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
+ adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE,
context->chicken_restore, 3);
}
@@ -1604,12 +1613,12 @@
if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
/* restore registers and constants. */
- adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
- context->reg_restore, 3);
+ adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE, context->reg_restore, 3);
/* restore shader instructions & partitioning. */
if (context->flags & CTXT_FLAGS_SHADER_RESTORE) {
- adreno_ringbuffer_issuecmds(device,
+ adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->shader_restore, 3);
}
@@ -1618,8 +1627,8 @@
if (adreno_is_a20x(adreno_dev)) {
cmds[0] = cp_type3_packet(CP_SET_BIN_BASE_OFFSET, 1);
cmds[1] = context->bin_base_offset;
- adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
- cmds, 2);
+ adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE, cmds, 2);
}
}
@@ -2011,7 +2020,7 @@
.ctxt_create = a2xx_drawctxt_create,
.ctxt_save = a2xx_drawctxt_save,
.ctxt_restore = a2xx_drawctxt_restore,
- .ctxt_draw_workaround = a2xx_drawctxt_workaround,
+ .ctxt_draw_workaround = a2xx_drawctxt_draw_workaround,
.irq_handler = a2xx_irq_handler,
.irq_control = a2xx_irq_control,
.snapshot = a2xx_snapshot,
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index a6b4210..6eebeb8 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -2226,16 +2226,17 @@
if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
/* Fixup self modifying IBs for save operations */
- adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
- context->save_fixup, 3);
+ adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE, context->save_fixup, 3);
/* save registers and constants. */
- adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
+ adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE,
context->regconstant_save, 3);
if (context->flags & CTXT_FLAGS_SHADER_SAVE) {
/* Save shader instructions */
- adreno_ringbuffer_issuecmds(device,
+ adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_PMODE, context->shader_save, 3);
context->flags |= CTXT_FLAGS_SHADER_RESTORE;
@@ -2249,7 +2250,8 @@
* already be saved.)
*/
- adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE,
+ adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_PMODE,
context->context_gmem_shadow.
gmem_save, 3);
context->flags |= CTXT_FLAGS_GMEM_RESTORE;
@@ -2264,7 +2266,8 @@
if (context == NULL) {
/* No context - set the default pagetable and thats it */
- kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable);
+ kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable,
+ adreno_dev->drawctxt_active->id);
return;
}
@@ -2276,8 +2279,9 @@
cmds[3] = device->memstore.gpuaddr +
KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context);
cmds[4] = context->id;
- adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE, cmds, 5);
- kgsl_mmu_setstate(&device->mmu, context->pagetable);
+ adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE,
+ cmds, 5);
+ kgsl_mmu_setstate(&device->mmu, context->pagetable, context->id);
/*
* Restore GMEM. (note: changes shader.
@@ -2285,29 +2289,34 @@
*/
if (context->flags & CTXT_FLAGS_GMEM_RESTORE) {
- adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE,
+ adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_PMODE,
context->context_gmem_shadow.
gmem_restore, 3);
context->flags &= ~CTXT_FLAGS_GMEM_RESTORE;
}
if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
- adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
- context->reg_restore, 3);
+ adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE, context->reg_restore, 3);
/* Fixup self modifying IBs for restore operations */
- adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
+ adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE,
context->restore_fixup, 3);
- adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
+ adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE,
context->constant_restore, 3);
if (context->flags & CTXT_FLAGS_SHADER_RESTORE)
- adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
+ adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE,
context->shader_restore, 3);
/* Restore HLSQ_CONTROL_0 register */
- adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
+ adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE,
context->hlsqcontrol_restore, 3);
}
}
diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c
index 822cf14..e3c9a18 100644
--- a/drivers/gpu/msm/adreno_debugfs.c
+++ b/drivers/gpu/msm/adreno_debugfs.c
@@ -117,6 +117,11 @@
debugfs_create_u32("ib_check", 0644, device->d_debugfs,
&adreno_dev->ib_check_level);
+ /* By Default enable fast hang detection */
+ adreno_dev->fast_hang_detect = 1;
+ debugfs_create_u32("fast_hang_detect", 0644, device->d_debugfs,
+ &adreno_dev->fast_hang_detect);
+
/* Create post mortem control files */
pm_d_debugfs = debugfs_create_dir("postmortem", device->d_debugfs);
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index 267fd45..098c4f5 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -274,7 +274,7 @@
if (adreno_dev->gpudev->ctxt_draw_workaround &&
adreno_is_a225(adreno_dev))
adreno_dev->gpudev->ctxt_draw_workaround(
- adreno_dev);
+ adreno_dev, drawctxt);
return;
}
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 3d46221..afcceee 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -53,6 +53,14 @@
unsigned int freecmds;
unsigned int *cmds;
uint cmds_gpu;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
+ unsigned long wait_timeout = msecs_to_jiffies(adreno_dev->wait_timeout);
+ unsigned long wait_time;
+ unsigned long wait_time_part;
+ unsigned int msecs_part = KGSL_TIMEOUT_PART;
+ unsigned int prev_reg_val[hang_detect_regs_count];
+
+ memset(prev_reg_val, 0, sizeof(prev_reg_val));
/* if wptr ahead, fill the remaining with NOPs */
if (wptr_ahead) {
@@ -79,13 +87,46 @@
rb->wptr = 0;
}
+ wait_time = jiffies + wait_timeout;
+ wait_time_part = jiffies + msecs_to_jiffies(msecs_part);
/* wait for space in ringbuffer */
- do {
+ while (1) {
GSL_RB_GET_READPTR(rb, &rb->rptr);
freecmds = rb->rptr - rb->wptr;
- } while ((freecmds != 0) && (freecmds <= numcmds));
+ if (freecmds == 0 || freecmds > numcmds)
+ break;
+
+ /* Dont wait for timeout, detect hang faster.
+ */
+ if (time_after(jiffies, wait_time_part)) {
+ wait_time_part = jiffies +
+ msecs_to_jiffies(msecs_part);
+ if ((adreno_hang_detect(rb->device,
+ prev_reg_val))){
+ KGSL_DRV_ERR(rb->device,
+ "Hang detected while waiting for freespace in"
+ "ringbuffer rptr: 0x%x, wptr: 0x%x\n",
+ rb->rptr, rb->wptr);
+ goto err;
+ }
+ }
+
+ if (time_after(jiffies, wait_time)) {
+ KGSL_DRV_ERR(rb->device,
+ "Timed out while waiting for freespace in ringbuffer "
+ "rptr: 0x%x, wptr: 0x%x\n", rb->rptr, rb->wptr);
+ goto err;
+ }
+
+err:
+ if (!adreno_dump_and_recover(rb->device))
+ wait_time = jiffies + wait_timeout;
+ else
+ /* GPU is hung and we cannot recover */
+ BUG();
+ }
}
unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb,
@@ -439,15 +480,13 @@
unsigned int context_id = KGSL_MEMSTORE_GLOBAL;
unsigned int gpuaddr = rb->device->memstore.gpuaddr;
- if (context != NULL) {
- /*
- * if the context was not created with per context timestamp
- * support, we must use the global timestamp since issueibcmds
- * will be returning that one.
- */
- if (context->flags & CTXT_FLAGS_PER_CONTEXT_TS)
- context_id = context->id;
- }
+ /*
+ * if the context was not created with per context timestamp
+ * support, we must use the global timestamp since issueibcmds
+ * will be returning that one.
+ */
+ if (context->flags & CTXT_FLAGS_PER_CONTEXT_TS)
+ context_id = context->id;
/* reserve space to temporarily turn off protected mode
* error checking if needed
@@ -460,7 +499,7 @@
total_sizedwords += 7;
total_sizedwords += 2; /* scratchpad ts for recovery */
- if (context) {
+ if (context->flags & CTXT_FLAGS_PER_CONTEXT_TS) {
total_sizedwords += 3; /* sop timestamp */
total_sizedwords += 4; /* eop timestamp */
total_sizedwords += 3; /* global timestamp without cache
@@ -470,6 +509,15 @@
}
ringcmds = adreno_ringbuffer_allocspace(rb, total_sizedwords);
+ /* GPU may hang during space allocation, if thats the case the current
+ * context may have hung the GPU */
+ if (context->flags & CTXT_FLAGS_GPU_HANG) {
+ KGSL_CTXT_WARN(rb->device,
+ "Context %p caused a gpu hang. Will not accept commands for context %d\n",
+ context, context->id);
+ return rb->timestamp[context_id];
+ }
+
rcmd_gpu = rb->buffer_desc.gpuaddr
+ sizeof(uint)*(rb->wptr-total_sizedwords);
@@ -525,7 +573,7 @@
GSL_RB_WRITE(ringcmds, rcmd_gpu, 0x00);
}
- if (context) {
+ if (context->flags & CTXT_FLAGS_PER_CONTEXT_TS) {
/* start-of-pipeline timestamp */
GSL_RB_WRITE(ringcmds, rcmd_gpu,
cp_type3_packet(CP_MEM_WRITE, 2));
@@ -591,8 +639,9 @@
return timestamp;
}
-void
+unsigned int
adreno_ringbuffer_issuecmds(struct kgsl_device *device,
+ struct adreno_context *drawctxt,
unsigned int flags,
unsigned int *cmds,
int sizedwords)
@@ -601,8 +650,9 @@
struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
if (device->state & KGSL_STATE_HUNG)
- return;
- adreno_ringbuffer_addcmds(rb, NULL, flags, cmds, sizedwords);
+ return kgsl_readtimestamp(device, KGSL_MEMSTORE_GLOBAL,
+ KGSL_TIMESTAMP_RETIRED);
+ return adreno_ringbuffer_addcmds(rb, drawctxt, flags, cmds, sizedwords);
}
static bool _parse_ibs(struct kgsl_device_private *dev_priv, uint gpuaddr,
@@ -869,7 +919,7 @@
*cmds++ = cp_nop_packet(1);
*cmds++ = KGSL_END_OF_IB_IDENTIFIER;
- kgsl_setstate(&device->mmu,
+ kgsl_setstate(&device->mmu, context->id,
kgsl_mmu_pt_get_flags(device->mmu.hwpagetable,
device->id));
diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h
index ae2e4c7..6429f46 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.h
+++ b/drivers/gpu/msm/adreno_ringbuffer.h
@@ -103,7 +103,8 @@
void adreno_ringbuffer_close(struct adreno_ringbuffer *rb);
-void adreno_ringbuffer_issuecmds(struct kgsl_device *device,
+unsigned int adreno_ringbuffer_issuecmds(struct kgsl_device *device,
+ struct adreno_context *drawctxt,
unsigned int flags,
unsigned int *cmdaddr,
int sizedwords);
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 5883f08..a6b782a 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -62,9 +62,9 @@
* @returns - 0 on success or error code on failure
*/
-static int kgsl_add_event(struct kgsl_device *device, u32 id, u32 ts,
+int kgsl_add_event(struct kgsl_device *device, u32 id, u32 ts,
void (*cb)(struct kgsl_device *, void *, u32, u32), void *priv,
- struct kgsl_device_private *owner)
+ void *owner)
{
struct kgsl_event *event;
struct list_head *n;
@@ -122,6 +122,7 @@
queue_work(device->work_queue, &device->ts_expired_ws);
return 0;
}
+EXPORT_SYMBOL(kgsl_add_event);
/**
* kgsl_cancel_events_ctxt - Cancel all events for a context
@@ -162,8 +163,8 @@
* @owner - driver instance that owns the events to cancel
*
*/
-static void kgsl_cancel_events(struct kgsl_device *device,
- struct kgsl_device_private *owner)
+void kgsl_cancel_events(struct kgsl_device *device,
+ void *owner)
{
struct kgsl_event *event, *event_tmp;
unsigned int id, cur;
@@ -189,6 +190,7 @@
kfree(event);
}
}
+EXPORT_SYMBOL(kgsl_cancel_events);
/* kgsl_get_mem_entry - get the mem_entry structure for the specified object
* @ptbase - the pagetable base of the object
@@ -247,13 +249,12 @@
kgsl_driver.stats.mapped -= entry->memdesc.size;
/*
- * Ion takes care of freeing the sglist for us (how nice </sarcasm>) so
- * unmap the dma before freeing the sharedmem so kgsl_sharedmem_free
+ * Ion takes care of freeing the sglist for us so
+ * clear the sg before freeing the sharedmem so kgsl_sharedmem_free
* doesn't try to free it again
*/
if (entry->memtype == KGSL_MEM_ENTRY_ION) {
- ion_unmap_dma(kgsl_ion_client, entry->priv_data);
entry->memdesc.sg = NULL;
}
@@ -1746,12 +1747,12 @@
{
struct ion_handle *handle;
struct scatterlist *s;
- unsigned long flags;
+ struct sg_table *sg_table;
if (IS_ERR_OR_NULL(kgsl_ion_client))
return -ENODEV;
- handle = ion_import_fd(kgsl_ion_client, fd);
+ handle = ion_import_dma_buf(kgsl_ion_client, fd);
if (IS_ERR_OR_NULL(handle))
return PTR_ERR(handle);
@@ -1760,13 +1761,12 @@
entry->memdesc.pagetable = pagetable;
entry->memdesc.size = 0;
- if (ion_handle_get_flags(kgsl_ion_client, handle, &flags))
+ sg_table = ion_sg_table(kgsl_ion_client, handle);
+
+ if (IS_ERR_OR_NULL(sg_table))
goto err;
- entry->memdesc.sg = ion_map_dma(kgsl_ion_client, handle, flags);
-
- if (IS_ERR_OR_NULL(entry->memdesc.sg))
- goto err;
+ entry->memdesc.sg = sg_table->sgl;
/* Calculate the size of the memdesc from the sglist */
@@ -1890,7 +1890,6 @@
fput(entry->priv_data);
break;
case KGSL_MEM_ENTRY_ION:
- ion_unmap_dma(kgsl_ion_client, entry->priv_data);
ion_free(kgsl_ion_client, entry->priv_data);
break;
default:
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index b67f460..7ffa83b 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -179,6 +179,13 @@
struct kgsl_process_private *private, unsigned int gpuaddr,
size_t size);
+int kgsl_add_event(struct kgsl_device *device, u32 id, u32 ts,
+ void (*cb)(struct kgsl_device *, void *, u32, u32), void *priv,
+ void *owner);
+
+void kgsl_cancel_events(struct kgsl_device *device,
+ void *owner);
+
extern const struct dev_pm_ops kgsl_pm_ops;
struct early_suspend;
@@ -213,7 +220,8 @@
static inline void *kgsl_memdesc_map(struct kgsl_memdesc *memdesc)
{
- if (memdesc->hostptr == NULL && memdesc->ops->map_kernel_mem)
+ if (memdesc->hostptr == NULL && memdesc->ops &&
+ memdesc->ops->map_kernel_mem)
memdesc->ops->map_kernel_mem(memdesc);
return memdesc->hostptr;
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 932c995..75b688b 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -25,6 +25,7 @@
#define KGSL_TIMEOUT_NONE 0
#define KGSL_TIMEOUT_DEFAULT 0xFFFFFFFF
+#define KGSL_TIMEOUT_PART 2000 /* 2 sec */
#define FIRST_TIMEOUT (HZ / 2)
@@ -97,7 +98,8 @@
/* Optional functions - these functions are not mandatory. The
driver will check that the function pointer is not NULL before
calling the hook */
- void (*setstate) (struct kgsl_device *device, uint32_t flags);
+ void (*setstate) (struct kgsl_device *device, unsigned int context_id,
+ uint32_t flags);
int (*drawctxt_create) (struct kgsl_device *device,
struct kgsl_pagetable *pagetable, struct kgsl_context *context,
uint32_t flags);
@@ -125,7 +127,7 @@
void (*func)(struct kgsl_device *, void *, u32, u32);
void *priv;
struct list_head list;
- struct kgsl_device_private *owner;
+ void *owner;
};
diff --git a/drivers/gpu/msm/kgsl_gpummu.c b/drivers/gpu/msm/kgsl_gpummu.c
index 429d035..998eaab 100644
--- a/drivers/gpu/msm/kgsl_gpummu.c
+++ b/drivers/gpu/msm/kgsl_gpummu.c
@@ -485,7 +485,8 @@
}
static void kgsl_gpummu_setstate(struct kgsl_mmu *mmu,
- struct kgsl_pagetable *pagetable)
+ struct kgsl_pagetable *pagetable,
+ unsigned int context_id)
{
if (mmu->flags & KGSL_FLAGS_STARTED) {
/* page table not current, then setup mmu to use new
@@ -499,7 +500,7 @@
kgsl_mmu_pt_get_flags(pagetable, mmu->device->id);
/* call device specific set page table */
- kgsl_setstate(mmu, KGSL_MMUFLAGS_TLBFLUSH |
+ kgsl_setstate(mmu, context_id, KGSL_MMUFLAGS_TLBFLUSH |
KGSL_MMUFLAGS_PTUPDATE);
}
}
@@ -583,7 +584,7 @@
kgsl_regwrite(mmu->device, MH_MMU_VA_RANGE,
(KGSL_PAGETABLE_BASE |
(CONFIG_MSM_KGSL_PAGE_TABLE_SIZE >> 16)));
- kgsl_setstate(mmu, KGSL_MMUFLAGS_TLBFLUSH);
+ kgsl_setstate(mmu, KGSL_MEMSTORE_GLOBAL, KGSL_MMUFLAGS_TLBFLUSH);
mmu->flags |= KGSL_FLAGS_STARTED;
return 0;
@@ -591,7 +592,8 @@
static int
kgsl_gpummu_unmap(void *mmu_specific_pt,
- struct kgsl_memdesc *memdesc)
+ struct kgsl_memdesc *memdesc,
+ unsigned int *tlb_flags)
{
unsigned int numpages;
unsigned int pte, ptefirst, ptelast, superpte;
@@ -729,7 +731,7 @@
.mmu_pagefault = kgsl_gpummu_pagefault,
.mmu_get_current_ptbase = kgsl_gpummu_get_current_ptbase,
.mmu_enable_clk = NULL,
- .mmu_disable_clk = NULL,
+ .mmu_disable_clk_on_ts = NULL,
.mmu_get_hwpagetable_asid = NULL,
.mmu_get_pt_lsb = NULL,
.mmu_get_reg_map_desc = NULL,
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index d20cf7e..25d0463 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -130,6 +130,89 @@
}
/*
+ * kgsl_iommu_disable_clk_event - An event function that is executed when
+ * the required timestamp is reached. It disables the IOMMU clocks if
+ * the timestamp on which the clocks can be disabled has expired.
+ * @device - The kgsl device pointer
+ * @data - The data passed during event creation, it is the MMU pointer
+ * @id - Context ID, should always be KGSL_MEMSTORE_GLOBAL
+ * @ts - The current timestamp that has expired for the device
+ *
+ * Disables IOMMU clocks if timestamp has expired
+ * Return - void
+ */
+static void kgsl_iommu_clk_disable_event(struct kgsl_device *device, void *data,
+ unsigned int id, unsigned int ts)
+{
+ struct kgsl_mmu *mmu = data;
+ struct kgsl_iommu *iommu = mmu->priv;
+
+ if (!iommu->clk_event_queued) {
+ if (0 > timestamp_cmp(ts, iommu->iommu_last_cmd_ts))
+ KGSL_DRV_ERR(device,
+ "IOMMU disable clock event being cancelled, "
+ "iommu_last_cmd_ts: %x, retired ts: %x\n",
+ iommu->iommu_last_cmd_ts, ts);
+ return;
+ }
+
+ if (0 <= timestamp_cmp(ts, iommu->iommu_last_cmd_ts)) {
+ kgsl_iommu_disable_clk(mmu);
+ iommu->clk_event_queued = false;
+ } else {
+ /* add new event to fire when ts is reached, this can happen
+ * if we queued an event and someone requested the clocks to
+ * be disbaled on a later timestamp */
+ if (kgsl_add_event(device, id, iommu->iommu_last_cmd_ts,
+ kgsl_iommu_clk_disable_event, mmu, mmu)) {
+ KGSL_DRV_ERR(device,
+ "Failed to add IOMMU disable clk event\n");
+ iommu->clk_event_queued = false;
+ }
+ }
+}
+
+/*
+ * kgsl_iommu_disable_clk_on_ts - Sets up event to disable IOMMU clocks
+ * @mmu - The kgsl MMU pointer
+ * @ts - Timestamp on which the clocks should be disabled
+ * @ts_valid - Indicates whether ts parameter is valid, if this parameter
+ * is false then it means that the calling function wants to disable the
+ * IOMMU clocks immediately without waiting for any timestamp
+ *
+ * Creates an event to disable the IOMMU clocks on timestamp and if event
+ * already exists then updates the timestamp of disabling the IOMMU clocks
+ * with the passed in ts if it is greater than the current value at which
+ * the clocks will be disabled
+ * Return - void
+ */
+static void
+kgsl_iommu_disable_clk_on_ts(struct kgsl_mmu *mmu, unsigned int ts,
+ bool ts_valid)
+{
+ struct kgsl_iommu *iommu = mmu->priv;
+
+ if (iommu->clk_event_queued) {
+ if (ts_valid && (0 <
+ timestamp_cmp(ts, iommu->iommu_last_cmd_ts)))
+ iommu->iommu_last_cmd_ts = ts;
+ } else {
+ if (ts_valid) {
+ iommu->iommu_last_cmd_ts = ts;
+ iommu->clk_event_queued = true;
+ if (kgsl_add_event(mmu->device, KGSL_MEMSTORE_GLOBAL,
+ ts, kgsl_iommu_clk_disable_event, mmu, mmu)) {
+ KGSL_DRV_ERR(mmu->device,
+ "Failed to add IOMMU disable clk event\n");
+ iommu->clk_event_queued = false;
+ }
+ } else {
+ kgsl_iommu_disable_clk(mmu);
+ }
+ }
+}
+
+/*
* kgsl_iommu_enable_clk - Enable iommu clocks
* @mmu - Pointer to mmu structure
* @ctx_id - The context bank whose clocks are to be turned on
@@ -534,7 +617,8 @@
}
static void kgsl_iommu_setstate(struct kgsl_mmu *mmu,
- struct kgsl_pagetable *pagetable)
+ struct kgsl_pagetable *pagetable,
+ unsigned int context_id)
{
if (mmu->flags & KGSL_FLAGS_STARTED) {
struct kgsl_iommu *iommu = mmu->priv;
@@ -551,7 +635,8 @@
flags |= KGSL_MMUFLAGS_TLBFLUSH;
flags |= kgsl_mmu_pt_get_flags(mmu->hwpagetable,
mmu->device->id);
- kgsl_setstate(mmu, KGSL_MMUFLAGS_PTUPDATE | flags);
+ kgsl_setstate(mmu, context_id,
+ KGSL_MMUFLAGS_PTUPDATE | flags);
}
}
}
@@ -751,12 +836,12 @@
KGSL_IOMMU_CONTEXT_USER,
CONTEXTIDR);
- kgsl_iommu_disable_clk(mmu);
+ kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
mmu->flags |= KGSL_FLAGS_STARTED;
done:
if (status) {
- kgsl_iommu_disable_clk(mmu);
+ kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
kgsl_detach_pagetable_iommu_domain(mmu);
}
return status;
@@ -764,7 +849,8 @@
static int
kgsl_iommu_unmap(void *mmu_specific_pt,
- struct kgsl_memdesc *memdesc)
+ struct kgsl_memdesc *memdesc,
+ unsigned int *tlb_flags)
{
int ret;
unsigned int range = kgsl_sg_size(memdesc->sg, memdesc->sglen);
@@ -785,6 +871,14 @@
"with err: %d\n", iommu_pt->domain, gpuaddr,
range, ret);
+#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
+ /*
+ * Flushing only required if per process pagetables are used. With
+ * global case, flushing will happen inside iommu_map function
+ */
+ if (!ret)
+ *tlb_flags = UINT_MAX;
+#endif
return 0;
}
@@ -814,19 +908,12 @@
return ret;
}
-#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
- /*
- * Flushing only required if per process pagetables are used. With
- * global case, flushing will happen inside iommu_map function
- */
- if (!ret)
- *tlb_flags = UINT_MAX;
-#endif
return ret;
}
static void kgsl_iommu_stop(struct kgsl_mmu *mmu)
{
+ struct kgsl_iommu *iommu = mmu->priv;
/*
* stop device mmu
*
@@ -841,6 +928,11 @@
mmu->flags &= ~KGSL_FLAGS_STARTED;
}
+
+ /* switch off MMU clocks and cancel any events it has queued */
+ iommu->clk_event_queued = false;
+ kgsl_cancel_events(mmu->device, mmu);
+ kgsl_iommu_disable_clk(mmu);
}
static int kgsl_iommu_close(struct kgsl_mmu *mmu)
@@ -883,7 +975,7 @@
pt_base = readl_relaxed(iommu->iommu_units[0].reg_map.hostptr +
(KGSL_IOMMU_CONTEXT_USER << KGSL_IOMMU_CTX_SHIFT) +
KGSL_IOMMU_TTBR0);
- kgsl_iommu_disable_clk(mmu);
+ kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
return pt_base & (KGSL_IOMMU_TTBR0_PA_MASK <<
KGSL_IOMMU_TTBR0_PA_SHIFT);
}
@@ -996,7 +1088,7 @@
}
}
/* Disable smmu clock */
- kgsl_iommu_disable_clk(mmu);
+ kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
}
/*
@@ -1046,7 +1138,7 @@
.mmu_pagefault = NULL,
.mmu_get_current_ptbase = kgsl_iommu_get_current_ptbase,
.mmu_enable_clk = kgsl_iommu_enable_clk,
- .mmu_disable_clk = kgsl_iommu_disable_clk,
+ .mmu_disable_clk_on_ts = kgsl_iommu_disable_clk_on_ts,
.mmu_get_hwpagetable_asid = kgsl_iommu_get_hwpagetable_asid,
.mmu_get_pt_lsb = kgsl_iommu_get_pt_lsb,
.mmu_get_reg_map_desc = kgsl_iommu_get_reg_map_desc,
diff --git a/drivers/gpu/msm/kgsl_iommu.h b/drivers/gpu/msm/kgsl_iommu.h
index efc3d9c..354a5cf 100644
--- a/drivers/gpu/msm/kgsl_iommu.h
+++ b/drivers/gpu/msm/kgsl_iommu.h
@@ -103,6 +103,8 @@
* instance of the IOMMU driver
* @iommu_last_cmd_ts: The timestamp of last command submitted that
* aceeses iommu registers
+ * @clk_event_queued: Indicates whether an event to disable clocks
+ * is already queued or not
* @device: Pointer to kgsl device
* @asids: A bit structure indicating which id's are presently used
* @asid: Contains the initial value of IOMMU_CONTEXTIDR when a domain
@@ -113,6 +115,7 @@
struct kgsl_iommu_unit iommu_units[KGSL_IOMMU_MAX_UNITS];
unsigned int unit_count;
unsigned int iommu_last_cmd_ts;
+ bool clk_event_queued;
struct kgsl_device *device;
unsigned long *asids;
unsigned int asid;
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index dfaadba..c02274d 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -543,13 +543,14 @@
}
EXPORT_SYMBOL(kgsl_mmu_putpagetable);
-void kgsl_setstate(struct kgsl_mmu *mmu, uint32_t flags)
+void kgsl_setstate(struct kgsl_mmu *mmu, unsigned int context_id,
+ uint32_t flags)
{
struct kgsl_device *device = mmu->device;
if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
return;
else if (device->ftbl->setstate)
- device->ftbl->setstate(device, flags);
+ device->ftbl->setstate(device, context_id, flags);
else if (mmu->mmu_ops->mmu_device_setstate)
mmu->mmu_ops->mmu_device_setstate(mmu, flags);
}
@@ -684,7 +685,8 @@
if (KGSL_MMU_TYPE_IOMMU != kgsl_mmu_get_mmutype())
spin_lock(&pagetable->lock);
- pagetable->pt_ops->mmu_unmap(pagetable->priv, memdesc);
+ pagetable->pt_ops->mmu_unmap(pagetable->priv, memdesc,
+ &pagetable->tlb_flags);
if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
spin_lock(&pagetable->lock);
/* Remove the statistics */
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index 4c0c015..d06ce45 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -125,14 +125,15 @@
int (*mmu_start) (struct kgsl_mmu *mmu);
void (*mmu_stop) (struct kgsl_mmu *mmu);
void (*mmu_setstate) (struct kgsl_mmu *mmu,
- struct kgsl_pagetable *pagetable);
+ struct kgsl_pagetable *pagetable,
+ unsigned int context_id);
void (*mmu_device_setstate) (struct kgsl_mmu *mmu,
uint32_t flags);
void (*mmu_pagefault) (struct kgsl_mmu *mmu);
unsigned int (*mmu_get_current_ptbase)
(struct kgsl_mmu *mmu);
- void (*mmu_disable_clk)
- (struct kgsl_mmu *mmu);
+ void (*mmu_disable_clk_on_ts)
+ (struct kgsl_mmu *mmu, uint32_t ts, bool ts_valid);
int (*mmu_enable_clk)
(struct kgsl_mmu *mmu, int ctx_id);
int (*mmu_get_hwpagetable_asid)(struct kgsl_mmu *mmu);
@@ -149,7 +150,8 @@
unsigned int protflags,
unsigned int *tlb_flags);
int (*mmu_unmap) (void *mmu_pt,
- struct kgsl_memdesc *memdesc);
+ struct kgsl_memdesc *memdesc,
+ unsigned int *tlb_flags);
void *(*mmu_create_pagetable) (void);
void (*mmu_destroy_pagetable) (void *pt);
int (*mmu_pt_equal) (struct kgsl_pagetable *pt,
@@ -193,7 +195,8 @@
int kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc);
unsigned int kgsl_virtaddr_to_physaddr(void *virtaddr);
-void kgsl_setstate(struct kgsl_mmu *mmu, uint32_t flags);
+void kgsl_setstate(struct kgsl_mmu *mmu, unsigned int context_id,
+ uint32_t flags);
int kgsl_mmu_get_ptname_from_ptbase(unsigned int pt_base);
int kgsl_mmu_pt_get_flags(struct kgsl_pagetable *pt,
enum kgsl_deviceid id);
@@ -219,10 +222,11 @@
}
static inline void kgsl_mmu_setstate(struct kgsl_mmu *mmu,
- struct kgsl_pagetable *pagetable)
+ struct kgsl_pagetable *pagetable,
+ unsigned int context_id)
{
if (mmu->mmu_ops && mmu->mmu_ops->mmu_setstate)
- mmu->mmu_ops->mmu_setstate(mmu, pagetable);
+ mmu->mmu_ops->mmu_setstate(mmu, pagetable, context_id);
}
static inline void kgsl_mmu_device_setstate(struct kgsl_mmu *mmu,
@@ -291,10 +295,11 @@
return 0;
}
-static inline void kgsl_mmu_disable_clk(struct kgsl_mmu *mmu)
+static inline void kgsl_mmu_disable_clk_on_ts(struct kgsl_mmu *mmu,
+ unsigned int ts, bool ts_valid)
{
- if (mmu->mmu_ops && mmu->mmu_ops->mmu_disable_clk)
- mmu->mmu_ops->mmu_disable_clk(mmu);
+ if (mmu->mmu_ops && mmu->mmu_ops->mmu_disable_clk_on_ts)
+ mmu->mmu_ops->mmu_disable_clk_on_ts(mmu, ts, ts_valid);
}
#endif /* __KGSL_MMU_H */
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 409fe40..6df073a 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -439,6 +439,8 @@
if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
&pwr->power_flags)) {
trace_kgsl_rail(device, state);
+ if (pwr->gpu_dig)
+ regulator_disable(pwr->gpu_dig);
if (pwr->gpu_reg)
regulator_disable(pwr->gpu_reg);
}
@@ -449,8 +451,18 @@
if (pwr->gpu_reg) {
int status = regulator_enable(pwr->gpu_reg);
if (status)
- KGSL_DRV_ERR(device, "regulator_enable "
- "failed: %d\n", status);
+ KGSL_DRV_ERR(device,
+ "core regulator_enable "
+ "failed: %d\n",
+ status);
+ }
+ if (pwr->gpu_dig) {
+ int status = regulator_enable(pwr->gpu_dig);
+ if (status)
+ KGSL_DRV_ERR(device,
+ "cx regulator_enable "
+ "failed: %d\n",
+ status);
}
}
}
@@ -534,6 +546,13 @@
if (IS_ERR(pwr->gpu_reg))
pwr->gpu_reg = NULL;
+ if (pwr->gpu_reg) {
+ pwr->gpu_dig = regulator_get(&pdev->dev, "vdd_dig");
+ if (IS_ERR(pwr->gpu_dig))
+ pwr->gpu_dig = NULL;
+ } else
+ pwr->gpu_dig = NULL;
+
pwr->power_flags = 0;
pwr->nap_allowed = pdata->nap_allowed;
@@ -596,6 +615,11 @@
pwr->gpu_reg = NULL;
}
+ if (pwr->gpu_dig) {
+ regulator_put(pwr->gpu_dig);
+ pwr->gpu_dig = NULL;
+ }
+
for (i = 1; i < KGSL_MAX_CLKS; i++)
if (pwr->grp_clks[i]) {
clk_put(pwr->grp_clks[i]);
@@ -713,7 +737,6 @@
}
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_NAP);
- kgsl_mmu_disable_clk(&device->mmu);
kgsl_pwrctrl_set_state(device, KGSL_STATE_NAP);
case KGSL_STATE_NAP:
case KGSL_STATE_SLEEP:
@@ -755,7 +778,6 @@
gpu_freq);
_sleep_accounting(device);
kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
- kgsl_mmu_disable_clk(&device->mmu);
kgsl_pwrctrl_set_state(device, KGSL_STATE_SLEEP);
pm_qos_update_request(&device->pm_qos_req_dma,
PM_QOS_DEFAULT_VALUE);
@@ -888,7 +910,6 @@
/* Order pwrrail/clk sequence based upon platform */
kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
- kgsl_mmu_disable_clk(&device->mmu);
kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
}
EXPORT_SYMBOL(kgsl_pwrctrl_disable);
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h
index 1e5c21c..954c818 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.h
+++ b/drivers/gpu/msm/kgsl_pwrctrl.h
@@ -50,6 +50,7 @@
unsigned int interval_timeout;
bool strtstp_sleepwake;
struct regulator *gpu_reg;
+ struct regulator *gpu_dig;
uint32_t pcl;
unsigned int nap_allowed;
unsigned int idle_needed;
diff --git a/drivers/gpu/msm/kgsl_pwrscale_idlestats.c b/drivers/gpu/msm/kgsl_pwrscale_idlestats.c
index 71af893..4102302 100644
--- a/drivers/gpu/msm/kgsl_pwrscale_idlestats.c
+++ b/drivers/gpu/msm/kgsl_pwrscale_idlestats.c
@@ -89,6 +89,7 @@
struct kgsl_pwrscale *pwrscale)
{
struct idlestats_priv *priv = pwrscale->priv;
+ struct kgsl_power_stats stats;
int i, busy, nr_cpu = 1;
if (priv->pulse.busy_start_time != 0) {
@@ -111,6 +112,19 @@
spin_unlock(&priv->cpu_info.lock);
}
priv->pulse.wait_interval /= nr_cpu;
+
+ /* This is called from within a mutex protected function, so
+ no additional locking required */
+ device->ftbl->power_stats(device, &stats);
+
+ /* If total_time is zero, then we don't have
+ any interesting statistics to store */
+ if (stats.total_time == 0) {
+ priv->pulse.busy_start_time = 0;
+ return;
+ }
+
+ priv->pulse.busy_interval = stats.busy_time;
msm_idle_stats_idle_end(&priv->idledev, &priv->pulse);
}
priv->pulse.busy_start_time = ktime_to_us(ktime_get());
@@ -120,21 +134,8 @@
struct kgsl_pwrscale *pwrscale)
{
int i, nr_cpu;
- struct kgsl_power_stats stats;
struct idlestats_priv *priv = pwrscale->priv;
- /* This is called from within a mutex protected function, so
- no additional locking required */
- device->ftbl->power_stats(device, &stats);
-
- /* If total_time is zero, then we don't have
- any interesting statistics to store */
- if (stats.total_time == 0) {
- priv->pulse.busy_start_time = 0;
- return;
- }
-
- priv->pulse.busy_interval = stats.busy_time;
nr_cpu = num_possible_cpus();
for (i = 0; i < nr_cpu; i++)
if (cpu_online(i))
diff --git a/drivers/gpu/msm/z180.c b/drivers/gpu/msm/z180.c
index bc2685c..6efba45 100644
--- a/drivers/gpu/msm/z180.c
+++ b/drivers/gpu/msm/z180.c
@@ -444,11 +444,13 @@
(ctrl & KGSL_CONTEXT_CTX_SWITCH)) {
KGSL_CMD_INFO(device, "context switch %d -> %d\n",
context->id, z180_dev->ringbuffer.prevctx);
- kgsl_mmu_setstate(&device->mmu, pagetable);
+ kgsl_mmu_setstate(&device->mmu, pagetable,
+ KGSL_MEMSTORE_GLOBAL);
cnt = PACKETSIZE_STATESTREAM;
ofs = 0;
}
kgsl_setstate(&device->mmu,
+ KGSL_MEMSTORE_GLOBAL,
kgsl_mmu_pt_get_flags(device->mmu.hwpagetable,
device->id));
@@ -861,7 +863,8 @@
if (z180_dev->ringbuffer.prevctx == context->id) {
z180_dev->ringbuffer.prevctx = Z180_INVALID_CONTEXT;
device->mmu.hwpagetable = device->mmu.defaultpagetable;
- kgsl_setstate(&device->mmu, KGSL_MMUFLAGS_PTUPDATE);
+ kgsl_setstate(&device->mmu, KGSL_MEMSTORE_GLOBAL,
+ KGSL_MMUFLAGS_PTUPDATE);
}
}
diff --git a/drivers/hwmon/epm_adc.c b/drivers/hwmon/epm_adc.c
index a8b99b9..fd4524e 100644
--- a/drivers/hwmon/epm_adc.c
+++ b/drivers/hwmon/epm_adc.c
@@ -71,8 +71,6 @@
struct epm_adc_drv {
struct platform_device *pdev;
struct device *hwmon;
- struct sensor_device_attribute *sens_attr;
- char **fnames;
struct spi_device *epm_spi_client;
struct mutex conv_lock;
uint32_t bus_id;
@@ -723,60 +721,57 @@
return snprintf(buf, 16, "Result: %d\n", conv.physical);
}
-static struct sensor_device_attribute epm_adc_in_attr =
- SENSOR_ATTR(NULL, S_IRUGO, epm_adc_show_in, NULL, 0);
+static struct sensor_device_attribute epm_adc_in_attrs[] = {
+ SENSOR_ATTR(ads0_chan0, S_IRUGO, epm_adc_show_in, NULL, 0),
+ SENSOR_ATTR(ads0_chan1, S_IRUGO, epm_adc_show_in, NULL, 1),
+ SENSOR_ATTR(ads0_chan2, S_IRUGO, epm_adc_show_in, NULL, 2),
+ SENSOR_ATTR(ads0_chan3, S_IRUGO, epm_adc_show_in, NULL, 3),
+ SENSOR_ATTR(ads0_chan4, S_IRUGO, epm_adc_show_in, NULL, 4),
+ SENSOR_ATTR(ads0_chan5, S_IRUGO, epm_adc_show_in, NULL, 5),
+ SENSOR_ATTR(ads0_chan6, S_IRUGO, epm_adc_show_in, NULL, 6),
+ SENSOR_ATTR(ads0_chan7, S_IRUGO, epm_adc_show_in, NULL, 7),
+ SENSOR_ATTR(ads0_chan8, S_IRUGO, epm_adc_show_in, NULL, 8),
+ SENSOR_ATTR(ads0_chan9, S_IRUGO, epm_adc_show_in, NULL, 9),
+ SENSOR_ATTR(ads0_chan10, S_IRUGO, epm_adc_show_in, NULL, 10),
+ SENSOR_ATTR(ads0_chan11, S_IRUGO, epm_adc_show_in, NULL, 11),
+ SENSOR_ATTR(ads0_chan12, S_IRUGO, epm_adc_show_in, NULL, 12),
+ SENSOR_ATTR(ads0_chan13, S_IRUGO, epm_adc_show_in, NULL, 13),
+ SENSOR_ATTR(ads0_chan14, S_IRUGO, epm_adc_show_in, NULL, 14),
+ SENSOR_ATTR(ads0_chan15, S_IRUGO, epm_adc_show_in, NULL, 15),
+ SENSOR_ATTR(ads1_chan0, S_IRUGO, epm_adc_show_in, NULL, 16),
+ SENSOR_ATTR(ads1_chan1, S_IRUGO, epm_adc_show_in, NULL, 17),
+ SENSOR_ATTR(ads1_chan2, S_IRUGO, epm_adc_show_in, NULL, 18),
+ SENSOR_ATTR(ads1_chan3, S_IRUGO, epm_adc_show_in, NULL, 19),
+ SENSOR_ATTR(ads1_chan4, S_IRUGO, epm_adc_show_in, NULL, 20),
+ SENSOR_ATTR(ads1_chan5, S_IRUGO, epm_adc_show_in, NULL, 21),
+ SENSOR_ATTR(ads1_chan6, S_IRUGO, epm_adc_show_in, NULL, 22),
+ SENSOR_ATTR(ads1_chan7, S_IRUGO, epm_adc_show_in, NULL, 23),
+ SENSOR_ATTR(ads1_chan8, S_IRUGO, epm_adc_show_in, NULL, 24),
+ SENSOR_ATTR(ads1_chan9, S_IRUGO, epm_adc_show_in, NULL, 25),
+ SENSOR_ATTR(ads1_chan10, S_IRUGO, epm_adc_show_in, NULL, 26),
+ SENSOR_ATTR(ads1_chan11, S_IRUGO, epm_adc_show_in, NULL, 27),
+ SENSOR_ATTR(ads1_chan12, S_IRUGO, epm_adc_show_in, NULL, 28),
+ SENSOR_ATTR(ads1_chan13, S_IRUGO, epm_adc_show_in, NULL, 29),
+ SENSOR_ATTR(ads1_chan14, S_IRUGO, epm_adc_show_in, NULL, 30),
+ SENSOR_ATTR(ads1_chan15, S_IRUGO, epm_adc_show_in, NULL, 31),
+};
static int __devinit epm_adc_init_hwmon(struct platform_device *pdev,
struct epm_adc_drv *epm_adc)
{
struct epm_adc_platform_data *pdata = pdev->dev.platform_data;
- int num_chans = pdata->num_channels, dev_idx = 0, chan_idx = 0;
- int i = 0, rc = 0;
- const char prefix[] = "ads", postfix[] = "_chan";
- char tmpbuf[3];
+ int i, rc, num_chans = pdata->num_channels;
- epm_adc->fnames = devm_kzalloc(&pdev->dev,
- num_chans * EPM_ADC_MAX_FNAME +
- num_chans * sizeof(char *), GFP_KERNEL);
- if (!epm_adc->fnames) {
- dev_err(&pdev->dev, "Unable to allocate memory\n");
- return -ENOMEM;
- }
-
- epm_adc->sens_attr = devm_kzalloc(&pdev->dev, num_chans *
- sizeof(struct sensor_device_attribute), GFP_KERNEL);
- if (!epm_adc->sens_attr) {
- dev_err(&pdev->dev, "Unable to allocate memory\n");
- rc = -ENOMEM;
- }
-
- for (i = 0; i < num_chans; i++, chan_idx++) {
- epm_adc->fnames[i] = (char *)epm_adc->fnames +
- (i * EPM_ADC_MAX_FNAME) + (num_chans *
- sizeof(char *));
- if (chan_idx == pdata->chan_per_adc) {
- chan_idx = 0;
- dev_idx++;
- }
- strlcpy(epm_adc->fnames[i], prefix, EPM_ADC_MAX_FNAME);
- snprintf(tmpbuf, sizeof(tmpbuf), "%d", dev_idx);
- strlcat(epm_adc->fnames[i], tmpbuf, EPM_ADC_MAX_FNAME);
- strlcat(epm_adc->fnames[i], postfix, EPM_ADC_MAX_FNAME);
- snprintf(tmpbuf, sizeof(tmpbuf), "%d", chan_idx);
- strlcat(epm_adc->fnames[i], tmpbuf, EPM_ADC_MAX_FNAME);
- epm_adc_in_attr.index = i;
- epm_adc_in_attr.dev_attr.attr.name = epm_adc->fnames[i];
- memcpy(&epm_adc->sens_attr[i], &epm_adc_in_attr,
- sizeof(epm_adc_in_attr));
+ for (i = 0; i < num_chans; i++) {
rc = device_create_file(&pdev->dev,
- &epm_adc->sens_attr[i].dev_attr);
+ &epm_adc_in_attrs[i].dev_attr);
if (rc) {
dev_err(&pdev->dev, "device_create_file failed\n");
return rc;
}
}
- return rc;
+ return 0;
}
static int __devinit epm_adc_spi_probe(struct spi_device *spi)
@@ -866,10 +861,8 @@
int num_chans = pdata->num_channels;
int i = 0;
- if (epm_adc->sens_attr)
- for (i = 0; i < num_chans; i++)
- device_remove_file(&pdev->dev,
- &epm_adc->sens_attr[i].dev_attr);
+ for (i = 0; i < num_chans; i++)
+ device_remove_file(&pdev->dev, &epm_adc_in_attrs[i].dev_attr);
hwmon_device_unregister(epm_adc->hwmon);
misc_deregister(&epm_adc->misc);
epm_adc = NULL;
diff --git a/drivers/media/video/msm/gemini/msm_gemini_platform.c b/drivers/media/video/msm/gemini/msm_gemini_platform.c
index d7f6bd8..28d2439 100644
--- a/drivers/media/video/msm/gemini/msm_gemini_platform.c
+++ b/drivers/media/video/msm/gemini/msm_gemini_platform.c
@@ -47,7 +47,7 @@
unsigned long size;
int rc;
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
- *ionhandle = ion_import_fd(gemini_client, fd);
+ *ionhandle = ion_import_dma_buf(gemini_client, fd);
if (IS_ERR_OR_NULL(*ionhandle))
return 0;
diff --git a/drivers/media/video/msm/mercury/msm_mercury_platform.c b/drivers/media/video/msm/mercury/msm_mercury_platform.c
index 67ce82d..e90c63c 100644
--- a/drivers/media/video/msm/mercury/msm_mercury_platform.c
+++ b/drivers/media/video/msm/mercury/msm_mercury_platform.c
@@ -52,7 +52,7 @@
unsigned long size;
int rc;
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
- *ionhandle = ion_import_fd(mercury_client, fd);
+ *ionhandle = ion_import_dma_buf(mercury_client, fd);
if (IS_ERR_OR_NULL(*ionhandle))
return 0;
diff --git a/drivers/media/video/msm/msm_camera.c b/drivers/media/video/msm/msm_camera.c
index 9f32bfe..2f1d1ab 100644
--- a/drivers/media/video/msm/msm_camera.c
+++ b/drivers/media/video/msm/msm_camera.c
@@ -314,7 +314,7 @@
if (!region)
goto out;
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
- region->handle = ion_import_fd(client_for_ion, info->fd);
+ region->handle = ion_import_dma_buf(client_for_ion, info->fd);
if (IS_ERR_OR_NULL(region->handle))
goto out1;
ion_phys(client_for_ion, region->handle,
diff --git a/drivers/media/video/msm/msm_mctl.c b/drivers/media/video/msm/msm_mctl.c
index 0da5043..be6c543 100644
--- a/drivers/media/video/msm/msm_mctl.c
+++ b/drivers/media/video/msm/msm_mctl.c
@@ -897,6 +897,7 @@
pcam_inst->sensor_pxlcode = pcam->usr_fmts[0].pxlcode;
pcam_inst->my_index = i;
pcam_inst->pcam = pcam;
+ mutex_init(&pcam_inst->inst_lock);
pcam->mctl_node.dev_inst[i] = pcam_inst;
D("%s pcam_inst %p my_index = %d\n", __func__,
@@ -1006,6 +1007,7 @@
pcam->mctl_node.dev_inst[pcam_inst->my_index] = NULL;
v4l2_fh_del(&pcam_inst->eventHandle);
v4l2_fh_exit(&pcam_inst->eventHandle);
+ mutex_destroy(&pcam_inst->inst_lock);
kfree(pcam_inst);
if (NULL != pmctl) {
diff --git a/drivers/media/video/msm/msm_mem.c b/drivers/media/video/msm/msm_mem.c
index 0043f72..5d412db 100644
--- a/drivers/media/video/msm/msm_mem.c
+++ b/drivers/media/video/msm/msm_mem.c
@@ -132,7 +132,7 @@
if (!region)
goto out;
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
- region->handle = ion_import_fd(client, info->fd);
+ region->handle = ion_import_dma_buf(client, info->fd);
if (IS_ERR_OR_NULL(region->handle))
goto out1;
if (ion_map_iommu(client, region->handle, CAMERA_DOMAIN, GEN_POOL,
diff --git a/drivers/media/video/msm/msm_vfe31_v4l2.c b/drivers/media/video/msm/msm_vfe31_v4l2.c
index 885cd90..fa985ce 100644
--- a/drivers/media/video/msm/msm_vfe31_v4l2.c
+++ b/drivers/media/video/msm/msm_vfe31_v4l2.c
@@ -3751,19 +3751,11 @@
}
}
- if (vfe31_ctrl->fs_vfe == NULL) {
- vfe31_ctrl->fs_vfe =
- regulator_get(&vfe31_ctrl->pdev->dev, "fs_vfe");
- if (IS_ERR(vfe31_ctrl->fs_vfe)) {
- pr_err("%s: Regulator FS_VFE get failed %ld\n",
- __func__, PTR_ERR(vfe31_ctrl->fs_vfe));
- vfe31_ctrl->fs_vfe = NULL;
- goto vfe_fs_failed;
- } else if (regulator_enable(vfe31_ctrl->fs_vfe)) {
+ if (vfe31_ctrl->fs_vfe) {
+ rc = regulator_enable(vfe31_ctrl->fs_vfe);
+ if (rc) {
pr_err("%s: Regulator FS_VFE enable failed\n",
__func__);
- regulator_put(vfe31_ctrl->fs_vfe);
- vfe31_ctrl->fs_vfe = NULL;
goto vfe_fs_failed;
}
}
@@ -3795,8 +3787,6 @@
vfe_clk_enable_failed:
regulator_disable(vfe31_ctrl->fs_vfe);
- regulator_put(vfe31_ctrl->fs_vfe);
- vfe31_ctrl->fs_vfe = NULL;
vfe_fs_failed:
if (!mctl->sdata->csi_if)
iounmap(vfe31_ctrl->camifbase);
@@ -3822,12 +3812,11 @@
msm_cam_clk_enable(&vfe31_ctrl->pdev->dev, vfe_clk_info,
vfe31_ctrl->vfe_clk, ARRAY_SIZE(vfe_clk_info), 0);
- if (vfe31_ctrl->fs_vfe) {
+
+ if (vfe31_ctrl->fs_vfe)
regulator_disable(vfe31_ctrl->fs_vfe);
- regulator_put(vfe31_ctrl->fs_vfe);
- vfe31_ctrl->fs_vfe = NULL;
- }
- CDBG("%s, 31ee_irq\n", __func__);
+
+ CDBG("%s Releasing resources\n", __func__);
if (!pmctl->sdata->csi_if)
iounmap(vfe31_ctrl->camifbase);
iounmap(vfe31_ctrl->vfebase);
@@ -3919,6 +3908,13 @@
disable_irq(vfe31_ctrl->vfeirq->start);
vfe31_ctrl->pdev = pdev;
+ vfe31_ctrl->fs_vfe = regulator_get(&vfe31_ctrl->pdev->dev, "vdd");
+ if (IS_ERR(vfe31_ctrl->fs_vfe)) {
+ pr_err("%s: Regulator get failed %ld\n", __func__,
+ PTR_ERR(vfe31_ctrl->fs_vfe));
+ vfe31_ctrl->fs_vfe = NULL;
+ }
+
sd_info.sdev_type = VFE_DEV;
sd_info.sd_index = 0;
sd_info.irq_num = vfe31_ctrl->vfeirq->start;
diff --git a/drivers/media/video/msm/msm_vfe32.c b/drivers/media/video/msm/msm_vfe32.c
index ffc193a..2f2d0c7 100644
--- a/drivers/media/video/msm/msm_vfe32.c
+++ b/drivers/media/video/msm/msm_vfe32.c
@@ -4489,19 +4489,10 @@
goto remap_failed;
}
- if (axi_ctrl->fs_vfe == NULL) {
- axi_ctrl->fs_vfe =
- regulator_get(&axi_ctrl->pdev->dev, "fs_vfe");
- if (IS_ERR(axi_ctrl->fs_vfe)) {
- pr_err("%s: Regulator FS_VFE get failed %ld\n",
- __func__, PTR_ERR(axi_ctrl->fs_vfe));
- axi_ctrl->fs_vfe = NULL;
- goto fs_failed;
- } else if (regulator_enable(axi_ctrl->fs_vfe)) {
- pr_err("%s: Regulator FS_VFE enable failed\n",
- __func__);
- regulator_put(axi_ctrl->fs_vfe);
- axi_ctrl->fs_vfe = NULL;
+ if (axi_ctrl->fs_vfe) {
+ rc = regulator_enable(axi_ctrl->fs_vfe);
+ if (rc) {
+ pr_err("%s: Regulator enable failed\n", __func__);
goto fs_failed;
}
}
@@ -4528,8 +4519,6 @@
return rc;
clk_enable_failed:
regulator_disable(axi_ctrl->fs_vfe);
- regulator_put(axi_ctrl->fs_vfe);
- axi_ctrl->fs_vfe = NULL;
fs_failed:
iounmap(axi_ctrl->share_ctrl->vfebase);
axi_ctrl->share_ctrl->vfebase = NULL;
@@ -4584,11 +4573,9 @@
tasklet_kill(&axi_ctrl->vfe32_tasklet);
msm_cam_clk_enable(&axi_ctrl->pdev->dev, vfe32_clk_info,
axi_ctrl->vfe_clk, ARRAY_SIZE(vfe32_clk_info), 0);
- if (axi_ctrl->fs_vfe) {
+ if (axi_ctrl->fs_vfe)
regulator_disable(axi_ctrl->fs_vfe);
- regulator_put(axi_ctrl->fs_vfe);
- axi_ctrl->fs_vfe = NULL;
- }
+
iounmap(axi_ctrl->share_ctrl->vfebase);
axi_ctrl->share_ctrl->vfebase = NULL;
@@ -5139,6 +5126,13 @@
goto vfe32_no_resource;
}
+ axi_ctrl->fs_vfe = regulator_get(&pdev->dev, "vdd");
+ if (IS_ERR(axi_ctrl->fs_vfe)) {
+ pr_err("%s: Regulator get failed %ld\n", __func__,
+ PTR_ERR(axi_ctrl->fs_vfe));
+ axi_ctrl->fs_vfe = NULL;
+ }
+
/* Request for this device irq from the camera server. If the
* IRQ Router is present on this target, the interrupt will be
* handled by the camera server and the interrupt service
diff --git a/drivers/media/video/msm/msm_vpe.c b/drivers/media/video/msm/msm_vpe.c
index fb22cf9..2b58f44 100644
--- a/drivers/media/video/msm/msm_vpe.c
+++ b/drivers/media/video/msm/msm_vpe.c
@@ -528,16 +528,13 @@
vpe_ctrl->state = VPE_STATE_INIT;
spin_unlock_irqrestore(&vpe_ctrl->lock, flags);
enable_irq(vpe_ctrl->vpeirq->start);
- vpe_ctrl->fs_vpe = regulator_get(NULL, "fs_vpe");
- if (IS_ERR(vpe_ctrl->fs_vpe)) {
- pr_err("%s: Regulator FS_VPE get failed %ld\n", __func__,
- PTR_ERR(vpe_ctrl->fs_vpe));
- vpe_ctrl->fs_vpe = NULL;
- goto vpe_fs_failed;
- } else if (regulator_enable(vpe_ctrl->fs_vpe)) {
- pr_err("%s: Regulator FS_VPE enable failed\n", __func__);
- regulator_put(vpe_ctrl->fs_vpe);
- goto vpe_fs_failed;
+
+ if (vpe_ctrl->fs_vpe) {
+ rc = regulator_enable(vpe_ctrl->fs_vpe);
+ if (rc) {
+ pr_err("%s: Regulator enable failed\n", __func__);
+ goto vpe_fs_failed;
+ }
}
rc = msm_cam_clk_enable(&vpe_ctrl->pdev->dev, vpe_clk_info,
@@ -549,8 +546,6 @@
vpe_clk_failed:
regulator_disable(vpe_ctrl->fs_vpe);
- regulator_put(vpe_ctrl->fs_vpe);
- vpe_ctrl->fs_vpe = NULL;
vpe_fs_failed:
disable_irq(vpe_ctrl->vpeirq->start);
vpe_ctrl->state = VPE_STATE_IDLE;
@@ -576,8 +571,6 @@
vpe_ctrl->vpe_clk, ARRAY_SIZE(vpe_clk_info), 0);
regulator_disable(vpe_ctrl->fs_vpe);
- regulator_put(vpe_ctrl->fs_vpe);
- vpe_ctrl->fs_vpe = NULL;
spin_lock_irqsave(&vpe_ctrl->lock, flags);
vpe_ctrl->state = VPE_STATE_IDLE;
spin_unlock_irqrestore(&vpe_ctrl->lock, flags);
@@ -1026,6 +1019,13 @@
goto vpe_no_resource;
}
+ vpe_ctrl->fs_vpe = regulator_get(&pdev->dev, "vdd");
+ if (IS_ERR(vpe_ctrl->fs_vpe)) {
+ pr_err("%s: Regulator FS_VPE get failed %ld\n", __func__,
+ PTR_ERR(vpe_ctrl->fs_vpe));
+ vpe_ctrl->fs_vpe = NULL;
+ }
+
disable_irq(vpe_ctrl->vpeirq->start);
atomic_set(&vpe_ctrl->active, 0);
@@ -1042,7 +1042,7 @@
vpe_no_resource:
pr_err("%s: VPE Probe failed.\n", __func__);
kfree(vpe_ctrl);
- return 0;
+ return rc;
}
struct platform_driver msm_vpe_driver = {
diff --git a/drivers/media/video/msm_vidc/msm_smem.c b/drivers/media/video/msm_vidc/msm_smem.c
index bdace3c..2913d74 100644
--- a/drivers/media/video/msm_vidc/msm_smem.c
+++ b/drivers/media/video/msm_vidc/msm_smem.c
@@ -26,7 +26,7 @@
unsigned long ionflag;
size_t len;
int rc = 0;
- hndl = ion_import_fd(client->clnt, fd);
+ hndl = ion_import_dma_buf(client->clnt, fd);
if (IS_ERR_OR_NULL(hndl)) {
pr_err("Failed to get handle: %p, %d, %d, %p\n",
client, fd, offset, hndl);
diff --git a/drivers/media/video/msm_vidc/vidc_hal.c b/drivers/media/video/msm_vidc/vidc_hal.c
index 583b5a9..85e984d 100644
--- a/drivers/media/video/msm_vidc/vidc_hal.c
+++ b/drivers/media/video/msm_vidc/vidc_hal.c
@@ -996,10 +996,10 @@
HFI_PROPERTY_PARAM_VDEC_OUTPUT_ORDER;
switch (*data) {
case HAL_OUTPUT_ORDER_DECODE:
- pkt->rg_property_data[1] = HFI_OUTPUT_ORDER_DISPLAY;
+ pkt->rg_property_data[1] = HFI_OUTPUT_ORDER_DECODE;
break;
case HAL_OUTPUT_ORDER_DISPLAY:
- pkt->rg_property_data[1] = HFI_OUTPUT_ORDER_DECODE;
+ pkt->rg_property_data[1] = HFI_OUTPUT_ORDER_DISPLAY;
break;
default:
HAL_MSG_ERROR("invalid output order: 0x%x",
diff --git a/drivers/media/video/vcap_v4l2.c b/drivers/media/video/vcap_v4l2.c
index db0dbc2..0a033ae 100644
--- a/drivers/media/video/vcap_v4l2.c
+++ b/drivers/media/video/vcap_v4l2.c
@@ -339,7 +339,7 @@
buf = container_of(vb, struct vcap_buffer, vb);
- buf->ion_handle = ion_import_fd(dev->ion_client, b->m.userptr);
+ buf->ion_handle = ion_import_dma_buf(dev->ion_client, b->m.userptr);
if (IS_ERR((void *)buf->ion_handle)) {
pr_err("%s: Could not alloc memory\n", __func__);
buf->ion_handle = NULL;
diff --git a/drivers/media/video/videobuf2-msm-mem.c b/drivers/media/video/videobuf2-msm-mem.c
index 186195d..740d183 100644
--- a/drivers/media/video/videobuf2-msm-mem.c
+++ b/drivers/media/video/videobuf2-msm-mem.c
@@ -185,7 +185,7 @@
if (mem->phyaddr != 0)
return 0;
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
- mem->ion_handle = ion_import_fd(client, (int)mem->vaddr);
+ mem->ion_handle = ion_import_dma_buf(client, (int)mem->vaddr);
if (IS_ERR_OR_NULL(mem->ion_handle)) {
pr_err("%s ION import failed\n", __func__);
return PTR_ERR(mem->ion_handle);
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index f2ed62d..d4eb6e0 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -261,7 +261,8 @@
ion_phys_addr_t pa;
/* Get the handle of the shared fd */
- svc->ihandle = ion_import_fd(qseecom.ion_clnt, listener->ifd_data_fd);
+ svc->ihandle = ion_import_dma_buf(qseecom.ion_clnt,
+ listener->ifd_data_fd);
if (svc->ihandle == NULL) {
pr_err("Ion client could not retrieve the handle\n");
return -ENOMEM;
@@ -503,7 +504,8 @@
return -EFAULT;
/* Get the handle of the shared fd */
- data->client.ihandle = ion_import_fd(qseecom.ion_clnt, req.ifd_data_fd);
+ data->client.ihandle = ion_import_dma_buf(qseecom.ion_clnt,
+ req.ifd_data_fd);
if (IS_ERR_OR_NULL(data->client.ihandle)) {
pr_err("Ion client could not retrieve the handle\n");
return -ENOMEM;
@@ -665,7 +667,7 @@
pr_warn("App (%s) does not exist, loading apps for first time\n",
(char *)(req.app_name));
/* Get the handle of the shared fd */
- ihandle = ion_import_fd(qseecom.ion_clnt,
+ ihandle = ion_import_dma_buf(qseecom.ion_clnt,
load_img_req.ifd_data_fd);
if (IS_ERR_OR_NULL(ihandle)) {
pr_err("Ion client could not retrieve the handle\n");
@@ -1065,7 +1067,7 @@
for (i = 0; i < MAX_ION_FD; i++) {
if (req->ifd_data[i].fd > 0) {
/* Get the handle of the shared fd */
- ihandle = ion_import_fd(qseecom.ion_clnt,
+ ihandle = ion_import_dma_buf(qseecom.ion_clnt,
req->ifd_data[i].fd);
if (IS_ERR_OR_NULL(ihandle)) {
pr_err("Ion client can't retrieve the handle\n");
@@ -1299,7 +1301,7 @@
}
/* Get the handle of the shared fd */
- ihandle = ion_import_fd(qseecom.ion_clnt,
+ ihandle = ion_import_dma_buf(qseecom.ion_clnt,
load_img_req.ifd_data_fd);
if (IS_ERR_OR_NULL(ihandle)) {
pr_err("Ion client could not retrieve the handle\n");
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 96eae7d..0c3f994 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -465,13 +465,6 @@
This provides support for the SD/MMC cell found in the
MSM and QSD SOCs from Qualcomm.
-config MMC_MSM_CARD_HW_DETECTION
- boolean "Qualcomm MMC Hardware detection support"
- depends on MMC_MSM
- default n
- help
- Select Y if the hardware has support to detect card insertion/removal.
-
config MMC_MSM_SDC1_SUPPORT
boolean "Qualcomm SDC1 support"
depends on MMC_MSM
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index 03bcc7c..a4af6c9 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -162,7 +162,7 @@
{
unsigned short ret = NR_SG;
- if (host->is_sps_mode) {
+ if (is_sps_mode(host)) {
ret = SPS_MAX_DESCS;
} else { /* DMA or PIO mode */
if (NR_SG > MAX_NR_SG_DMA_PIO)
@@ -263,41 +263,89 @@
static void msmsdcc_soft_reset(struct msmsdcc_host *host)
{
/*
- * Reset SDCC controller's DPSM (data path state machine
- * and CPSM (command path state machine).
+ * Reset controller state machines without resetting
+ * configuration registers (MCI_POWER, MCI_CLK, MCI_INT_MASKn).
*/
- writel_relaxed(0, host->base + MMCICOMMAND);
- msmsdcc_sync_reg_wr(host);
- writel_relaxed(0, host->base + MMCIDATACTRL);
- msmsdcc_sync_reg_wr(host);
+ if (is_sw_reset_save_config(host)) {
+ ktime_t start;
+
+ writel_relaxed(readl_relaxed(host->base + MMCIPOWER)
+ | MCI_SW_RST_CFG, host->base + MMCIPOWER);
+ msmsdcc_sync_reg_wr(host);
+
+ start = ktime_get();
+ while (readl_relaxed(host->base + MMCIPOWER) & MCI_SW_RST_CFG) {
+ /*
+ * SW reset can take upto 10HCLK + 15MCLK cycles.
+ * Calculating based on min clk rates (hclk = 27MHz,
+ * mclk = 400KHz) it comes to ~40us. Let's poll for
+ * max. 1ms for reset completion.
+ */
+ if (ktime_to_us(ktime_sub(ktime_get(), start)) > 1000) {
+ pr_err("%s: %s failed\n",
+ mmc_hostname(host->mmc), __func__);
+ BUG();
+ }
+ }
+ } else {
+ writel_relaxed(0, host->base + MMCICOMMAND);
+ msmsdcc_sync_reg_wr(host);
+ writel_relaxed(0, host->base + MMCIDATACTRL);
+ msmsdcc_sync_reg_wr(host);
+ }
}
static void msmsdcc_hard_reset(struct msmsdcc_host *host)
{
int ret;
- /* Reset the controller */
- ret = clk_reset(host->clk, CLK_RESET_ASSERT);
- if (ret)
- pr_err("%s: Clock assert failed at %u Hz"
- " with err %d\n", mmc_hostname(host->mmc),
+ /*
+ * Reset SDCC controller to power on default state.
+ * Don't issue a reset request to clock control block if
+ * SDCC controller itself can support hard reset.
+ */
+ if (is_sw_hard_reset(host)) {
+ ktime_t start;
+
+ writel_relaxed(readl_relaxed(host->base + MMCIPOWER)
+ | MCI_SW_RST, host->base + MMCIPOWER);
+ msmsdcc_sync_reg_wr(host);
+
+ start = ktime_get();
+ while (readl_relaxed(host->base + MMCIPOWER) & MCI_SW_RST) {
+ /*
+ * See comment in msmsdcc_soft_reset() on choosing 1ms
+ * poll timeout.
+ */
+ if (ktime_to_us(ktime_sub(ktime_get(), start)) > 1000) {
+ pr_err("%s: %s failed\n",
+ mmc_hostname(host->mmc), __func__);
+ BUG();
+ }
+ }
+ } else {
+ ret = clk_reset(host->clk, CLK_RESET_ASSERT);
+ if (ret)
+ pr_err("%s: Clock assert failed at %u Hz" \
+ " with err %d\n", mmc_hostname(host->mmc),
host->clk_rate, ret);
- ret = clk_reset(host->clk, CLK_RESET_DEASSERT);
- if (ret)
- pr_err("%s: Clock deassert failed at %u Hz"
- " with err %d\n", mmc_hostname(host->mmc),
- host->clk_rate, ret);
+ ret = clk_reset(host->clk, CLK_RESET_DEASSERT);
+ if (ret)
+ pr_err("%s: Clock deassert failed at %u Hz" \
+ " with err %d\n", mmc_hostname(host->mmc),
+ host->clk_rate, ret);
- mb();
- /* Give some delay for clock reset to propogate to controller */
- msmsdcc_delay(host);
+ mb();
+ /* Give some delay for clock reset to propogate to controller */
+ msmsdcc_delay(host);
+ }
}
static void msmsdcc_reset_and_restore(struct msmsdcc_host *host)
{
- if (host->sdcc_version) {
- if (host->is_sps_mode) {
+ if (is_soft_reset(host)) {
+ if (is_sps_mode(host)) {
/* Reset DML first */
msmsdcc_dml_reset(host);
/*
@@ -313,7 +361,7 @@
pr_debug("%s: Applied soft reset to Controller\n",
mmc_hostname(host->mmc));
- if (host->is_sps_mode)
+ if (is_sps_mode(host))
msmsdcc_dml_init(host);
} else {
/* Give Clock reset (hard reset) to controller */
@@ -393,7 +441,7 @@
static inline void msmsdcc_sync_reg_wr(struct msmsdcc_host *host)
{
mb();
- if (!host->sdcc_version)
+ if (!is_wait_for_reg_write(host))
udelay(host->reg_write_delay);
else if (readl_relaxed(host->base + MCI_STATUS2) &
MCI_MCLK_REG_WR_ACTIVE) {
@@ -773,14 +821,14 @@
bool ret = true;
u32 xfer_size = data->blksz * data->blocks;
- if (host->is_sps_mode) {
+ if (is_sps_mode(host)) {
/*
* BAM Mode: Fall back on PIO if size is less
* than or equal to SPS_MIN_XFER_SIZE bytes.
*/
if (xfer_size <= SPS_MIN_XFER_SIZE)
ret = false;
- } else if (host->is_dma_mode) {
+ } else if (is_dma_mode(host)) {
/*
* ADM Mode: Fall back on PIO if size is less than FIFO size
* or not integer multiple of FIFO size
@@ -1125,9 +1173,9 @@
datactrl |= MCI_AUTO_PROG_DONE;
if (msmsdcc_is_dma_possible(host, data)) {
- if (host->is_dma_mode && !msmsdcc_config_dma(host, data)) {
+ if (is_dma_mode(host) && !msmsdcc_config_dma(host, data)) {
datactrl |= MCI_DPSM_DMAENABLE;
- } else if (host->is_sps_mode) {
+ } else if (is_sps_mode(host)) {
if (!msmsdcc_is_dml_busy(host)) {
if (!msmsdcc_sps_start_xfer(host, data)) {
/* Now kick start DML transfer */
@@ -1176,7 +1224,7 @@
"%s: data timeout is zero. timeout_ns=0x%x, timeout_clks=0x%x\n",
mmc_hostname(host->mmc), data->timeout_ns, data->timeout_clks);
- if (host->is_dma_mode && (datactrl & MCI_DPSM_DMAENABLE)) {
+ if (is_dma_mode(host) && (datactrl & MCI_DPSM_DMAENABLE)) {
/* Use ADM (Application Data Mover) HW for Data transfer */
/* Save parameters for the dma exec function */
host->cmd_timeout = timeout;
@@ -1626,10 +1674,10 @@
if (!cmd->data || cmd->error) {
if (host->curr.data && host->dma.sg &&
- host->is_dma_mode)
+ is_dma_mode(host))
msm_dmov_flush(host->dma.channel, 0);
else if (host->curr.data && host->sps.sg &&
- host->is_sps_mode){
+ is_sps_mode(host)) {
/* Stop current SPS transfer */
msmsdcc_sps_exit_curr_xfer(host);
}
@@ -1782,9 +1830,9 @@
MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
msmsdcc_data_err(host, data, status);
host->curr.data_xfered = 0;
- if (host->dma.sg && host->is_dma_mode)
+ if (host->dma.sg && is_dma_mode(host))
msm_dmov_flush(host->dma.channel, 0);
- else if (host->sps.sg && host->is_sps_mode) {
+ else if (host->sps.sg && is_sps_mode(host)) {
/* Stop current SPS transfer */
msmsdcc_sps_exit_curr_xfer(host);
} else {
@@ -1969,7 +2017,7 @@
msmsdcc_sdio_al_lpm(mmc, false);
/* check if sps pipe reset is pending? */
- if (host->is_sps_mode && host->sps.pipe_reset_pending) {
+ if (is_sps_mode(host) && host->sps.pipe_reset_pending) {
msmsdcc_sps_pipes_reset_and_restore(host);
host->sps.pipe_reset_pending = false;
}
@@ -2035,7 +2083,7 @@
}
if (mrq->data && (mrq->data->flags & MMC_DATA_WRITE)) {
- if (host->sdcc_version) {
+ if (is_auto_prog_done(host)) {
if (!mrq->stop)
host->curr.wait_for_auto_prog_done = true;
} else {
@@ -2112,8 +2160,15 @@
goto out;
}
- if (regulator_count_voltages(vreg->reg) > 0)
+ if (regulator_count_voltages(vreg->reg) > 0) {
vreg->set_voltage_sup = 1;
+ /* sanity check */
+ if (!vreg->high_vol_level || !vreg->hpm_uA) {
+ pr_err("%s: %s invalid constraints specified\n",
+ __func__, vreg->name);
+ rc = -EINVAL;
+ }
+ }
out:
return rc;
@@ -4516,11 +4571,11 @@
if (host->curr.data) {
if (!msmsdcc_is_dma_possible(host, host->curr.data))
pr_info("%s: PIO mode\n", mmc_hostname(host->mmc));
- else if (host->is_dma_mode)
+ else if (is_dma_mode(host))
pr_info("%s: ADM mode: busy=%d, chnl=%d, crci=%d\n",
mmc_hostname(host->mmc), host->dma.busy,
host->dma.channel, host->dma.crci);
- else if (host->is_sps_mode) {
+ else if (is_sps_mode(host)) {
if (host->sps.busy && atomic_read(&host->clks_on))
msmsdcc_print_regs("SDCC-DML", host->dml_base,
host->dml_memres->start,
@@ -4570,9 +4625,9 @@
if (mrq->data && !mrq->data->error)
mrq->data->error = -ETIMEDOUT;
host->curr.data_xfered = 0;
- if (host->dma.sg && host->is_dma_mode) {
+ if (host->dma.sg && is_dma_mode(host)) {
msm_dmov_flush(host->dma.channel, 0);
- } else if (host->sps.sg && host->is_sps_mode) {
+ } else if (host->sps.sg && is_sps_mode(host)) {
/* Stop current SPS transfer */
msmsdcc_sps_exit_curr_xfer(host);
} else {
@@ -4594,16 +4649,78 @@
spin_unlock_irqrestore(&host->lock, flags);
}
+#define MAX_PROP_SIZE 32
+static int msmsdcc_dt_parse_vreg_info(struct device *dev,
+ struct msm_mmc_reg_data **vreg_data, const char *vreg_name)
+{
+ int len, ret = 0;
+ const __be32 *prop;
+ char prop_name[MAX_PROP_SIZE];
+ struct msm_mmc_reg_data *vreg;
+ struct device_node *np = dev->of_node;
+
+ snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", vreg_name);
+ if (of_parse_phandle(np, prop_name, 0)) {
+ vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
+ if (!vreg) {
+ dev_err(dev, "No memory for vreg: %s\n", vreg_name);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ vreg->name = vreg_name;
+
+ snprintf(prop_name, MAX_PROP_SIZE,
+ "qcom,sdcc-%s-always_on", vreg_name);
+ if (of_get_property(np, prop_name, NULL))
+ vreg->always_on = true;
+
+ snprintf(prop_name, MAX_PROP_SIZE,
+ "qcom,sdcc-%s-lpm_sup", vreg_name);
+ if (of_get_property(np, prop_name, NULL))
+ vreg->lpm_sup = true;
+
+ snprintf(prop_name, MAX_PROP_SIZE,
+ "qcom,sdcc-%s-voltage_level", vreg_name);
+ prop = of_get_property(np, prop_name, &len);
+ if (!prop || (len != (2 * sizeof(__be32)))) {
+ dev_warn(dev, "%s %s property\n",
+ prop ? "invalid format" : "no", prop_name);
+ } else {
+ vreg->low_vol_level = be32_to_cpup(&prop[0]);
+ vreg->high_vol_level = be32_to_cpup(&prop[1]);
+ }
+
+ snprintf(prop_name, MAX_PROP_SIZE,
+ "qcom,sdcc-%s-current_level", vreg_name);
+ prop = of_get_property(np, prop_name, &len);
+ if (!prop || (len != (2 * sizeof(__be32)))) {
+ dev_warn(dev, "%s %s property\n",
+ prop ? "invalid format" : "no", prop_name);
+ } else {
+ vreg->lpm_uA = be32_to_cpup(&prop[0]);
+ vreg->hpm_uA = be32_to_cpup(&prop[1]);
+ }
+
+ *vreg_data = vreg;
+ dev_dbg(dev, "%s: %s %s vol=[%d %d]uV, curr=[%d %d]uA\n",
+ vreg->name, vreg->always_on ? "always_on," : "",
+ vreg->lpm_sup ? "lpm_sup," : "", vreg->low_vol_level,
+ vreg->high_vol_level, vreg->lpm_uA, vreg->hpm_uA);
+ }
+
+err:
+ return ret;
+}
+
static struct mmc_platform_data *msmsdcc_populate_pdata(struct device *dev)
{
int i, ret;
struct mmc_platform_data *pdata;
struct device_node *np = dev->of_node;
- u32 bus_width = 0;
- u32 *clk_table;
- int clk_table_len;
- u32 *sup_voltages;
- int sup_volt_len;
+ u32 bus_width = 0, current_limit = 0;
+ u32 *clk_table, *sup_voltages;
+ int clk_table_len, sup_volt_len, len;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata) {
@@ -4686,6 +4803,65 @@
dev_err(dev, "Supported clock rates not specified\n");
}
+ pdata->vreg_data = devm_kzalloc(dev,
+ sizeof(struct msm_mmc_slot_reg_data), GFP_KERNEL);
+ if (!pdata->vreg_data) {
+ dev_err(dev, "could not allocate memory for vreg_data\n");
+ goto err;
+ }
+
+ if (msmsdcc_dt_parse_vreg_info(dev,
+ &pdata->vreg_data->vdd_data, "vdd"))
+ goto err;
+
+ if (msmsdcc_dt_parse_vreg_info(dev,
+ &pdata->vreg_data->vdd_io_data, "vdd-io"))
+ goto err;
+
+ len = of_property_count_strings(np, "qcom,sdcc-bus-speed-mode");
+
+ for (i = 0; i < len; i++) {
+ const char *name = NULL;
+
+ of_property_read_string_index(np,
+ "qcom,sdcc-bus-speed-mode", i, &name);
+ if (!name)
+ continue;
+
+ if (!strncmp(name, "SDR12", sizeof("SDR12")))
+ pdata->uhs_caps |= MMC_CAP_UHS_SDR12;
+ else if (!strncmp(name, "SDR25", sizeof("SDR25")))
+ pdata->uhs_caps |= MMC_CAP_UHS_SDR25;
+ else if (!strncmp(name, "SDR50", sizeof("SDR50")))
+ pdata->uhs_caps |= MMC_CAP_UHS_SDR50;
+ else if (!strncmp(name, "DDR50", sizeof("DDR50")))
+ pdata->uhs_caps |= MMC_CAP_UHS_DDR50;
+ else if (!strncmp(name, "SDR104", sizeof("SDR104")))
+ pdata->uhs_caps |= MMC_CAP_UHS_SDR104;
+ else if (!strncmp(name, "HS200_1p8v", sizeof("HS200_1p8v")))
+ pdata->uhs_caps2 |= MMC_CAP2_HS200_1_8V_SDR;
+ else if (!strncmp(name, "HS200_1p2v", sizeof("HS200_1p2v")))
+ pdata->uhs_caps2 |= MMC_CAP2_HS200_1_2V_SDR;
+ else if (!strncmp(name, "DDR_1p8v", sizeof("DDR_1p8v")))
+ pdata->uhs_caps |= MMC_CAP_1_8V_DDR
+ | MMC_CAP_UHS_DDR50;
+ else if (!strncmp(name, "DDR_1p2v", sizeof("DDR_1p2v")))
+ pdata->uhs_caps |= MMC_CAP_1_2V_DDR
+ | MMC_CAP_UHS_DDR50;
+ }
+
+ of_property_read_u32(np, "qcom,sdcc-current-limit", ¤t_limit);
+ if (current_limit == 800)
+ pdata->uhs_caps |= MMC_CAP_MAX_CURRENT_800;
+ else if (current_limit == 600)
+ pdata->uhs_caps |= MMC_CAP_MAX_CURRENT_600;
+ else if (current_limit == 400)
+ pdata->uhs_caps |= MMC_CAP_MAX_CURRENT_400;
+ else if (current_limit == 200)
+ pdata->uhs_caps |= MMC_CAP_MAX_CURRENT_200;
+
+ if (of_get_property(np, "qcom,sdcc-xpc", NULL))
+ pdata->xpc_cap = true;
if (of_get_property(np, "qcom,sdcc-nonremovable", NULL))
pdata->nonremovable = true;
if (of_get_property(np, "qcom,sdcc-disable_cmd23", NULL))
@@ -4823,9 +4999,9 @@
host->curr.cmd = NULL;
if (!plat->disable_bam && bam_memres && dml_memres && bam_irqres)
- host->is_sps_mode = 1;
+ set_hw_caps(host, MSMSDCC_SPS_BAM_SUP);
else if (dmares)
- host->is_dma_mode = 1;
+ set_hw_caps(host, MSMSDCC_DMA_SUP);
host->base = ioremap(core_memres->start,
resource_size(core_memres));
@@ -4858,7 +5034,7 @@
tasklet_init(&host->sps.tlet, msmsdcc_sps_complete_tlet,
(unsigned long)host);
- if (host->is_dma_mode) {
+ if (is_dma_mode(host)) {
/* Setup DMA */
ret = msmsdcc_init_dma(host);
if (ret)
@@ -4917,13 +5093,8 @@
if (!host->clk_rate)
dev_err(&pdev->dev, "Failed to read MCLK\n");
- /*
- * Lookup the Controller Version, to identify the supported features
- * Version number read as 0 would indicate SDCC3 or earlier versions
- */
- host->sdcc_version = readl_relaxed(host->base + MCI_VERSION);
- pr_info("%s: mci-version: %x\n", mmc_hostname(host->mmc),
- host->sdcc_version);
+ set_default_hw_caps(host);
+
/*
* Set the register write delay according to min. clock frequency
* supported and update later when the host->clk_rate changes.
@@ -4961,7 +5132,7 @@
/* Clocks has to be running before accessing SPS/DML HW blocks */
- if (host->is_sps_mode) {
+ if (is_sps_mode(host)) {
/* Initialize SPS */
ret = msmsdcc_sps_init(host);
if (ret)
@@ -4994,10 +5165,11 @@
* status is to use the AUTO_PROG_DONE status provided by SDCC4
* controller. So let's enable the CMD23 for SDCC4 only.
*/
- if (!plat->disable_cmd23 && host->sdcc_version)
+ if (!plat->disable_cmd23 && is_auto_prog_done(host))
mmc->caps |= MMC_CAP_CMD23;
mmc->caps |= plat->uhs_caps;
+ mmc->caps2 |= plat->uhs_caps2;
/*
* XPC controls the maximum current in the default speed mode of SDXC
* card. XPC=0 means 100mA (max.) but speed class is not supported.
@@ -5012,12 +5184,6 @@
mmc->caps2 |= (MMC_CAP2_BOOTPART_NOACC | MMC_CAP2_DETECT_ON_ERR);
mmc->caps2 |= MMC_CAP2_SANITIZE;
- if (pdev->dev.of_node) {
- if (of_get_property((&pdev->dev)->of_node,
- "qcom,sdcc-hs200", NULL))
- mmc->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
- }
-
if (plat->nonremovable)
mmc->caps |= MMC_CAP_NONREMOVABLE;
mmc->caps |= MMC_CAP_SDIO_IRQ;
@@ -5174,6 +5340,8 @@
(unsigned int) plat->status_irq, host->dma.channel,
host->dma.crci);
+ pr_info("%s: Controller capabilities: 0x%.8x\n",
+ mmc_hostname(mmc), host->hw_caps);
pr_info("%s: 8 bit data mode %s\n", mmc_hostname(mmc),
(mmc->caps & MMC_CAP_8_BIT_DATA ? "enabled" : "disabled"));
pr_info("%s: 4 bit data mode %s\n", mmc_hostname(mmc),
@@ -5188,14 +5356,14 @@
pr_info("%s: Power save feature enable = %d\n",
mmc_hostname(mmc), msmsdcc_pwrsave);
- if (host->is_dma_mode && host->dma.channel != -1
+ if (is_dma_mode(host) && host->dma.channel != -1
&& host->dma.crci != -1) {
pr_info("%s: DM non-cached buffer at %p, dma_addr 0x%.8x\n",
mmc_hostname(mmc), host->dma.nc, host->dma.nc_busaddr);
pr_info("%s: DM cmd busaddr 0x%.8x, cmdptr busaddr 0x%.8x\n",
mmc_hostname(mmc), host->dma.cmd_busaddr,
host->dma.cmdptr_busaddr);
- } else if (host->is_sps_mode) {
+ } else if (is_sps_mode(host)) {
pr_info("%s: SPS-BAM data transfer mode available\n",
mmc_hostname(mmc));
} else
@@ -5246,10 +5414,10 @@
irq_free:
free_irq(core_irqres->start, host);
dml_exit:
- if (host->is_sps_mode)
+ if (is_sps_mode(host))
msmsdcc_dml_exit(host);
sps_exit:
- if (host->is_sps_mode)
+ if (is_sps_mode(host))
msmsdcc_sps_exit(host);
vreg_deinit:
msmsdcc_vreg_init(host, false);
@@ -5272,7 +5440,7 @@
bus_clk_put:
if (!IS_ERR_OR_NULL(host->bus_clk))
clk_put(host->bus_clk);
- if (host->is_dma_mode) {
+ if (is_dma_mode(host)) {
if (host->dmares)
dma_free_coherent(NULL,
sizeof(struct msmsdcc_nc_dmadata),
@@ -5341,14 +5509,14 @@
msmsdcc_vreg_init(host, false);
- if (host->is_dma_mode) {
+ if (is_dma_mode(host)) {
if (host->dmares)
dma_free_coherent(NULL,
sizeof(struct msmsdcc_nc_dmadata),
host->dma.nc, host->dma.nc_busaddr);
}
- if (host->is_sps_mode) {
+ if (is_sps_mode(host)) {
msmsdcc_dml_exit(host);
msmsdcc_sps_exit(host);
}
diff --git a/drivers/mmc/host/msm_sdcc.h b/drivers/mmc/host/msm_sdcc.h
index dc32d1c..baeabd2 100644
--- a/drivers/mmc/host/msm_sdcc.h
+++ b/drivers/mmc/host/msm_sdcc.h
@@ -38,6 +38,8 @@
#define MCI_PWR_UP 0x02
#define MCI_PWR_ON 0x03
#define MCI_OD (1 << 6)
+#define MCI_SW_RST (1 << 7)
+#define MCI_SW_RST_CFG (1 << 8)
#define MMCICLOCK 0x004
#define MCI_CLK_ENABLE (1 << 8)
@@ -363,14 +365,12 @@
u32 pwr;
struct mmc_platform_data *plat;
- u32 sdcc_version;
+ unsigned int hw_caps;
unsigned int oldstat;
struct msmsdcc_dma_data dma;
struct msmsdcc_sps_data sps;
- bool is_dma_mode;
- bool is_sps_mode;
struct msmsdcc_pio_data pio;
#ifdef CONFIG_HAS_EARLYSUSPEND
@@ -415,6 +415,47 @@
struct device_attribute polling;
};
+#define MSMSDCC_VERSION_MASK 0xFFFF
+#define MSMSDCC_DMA_SUP (1 << 0)
+#define MSMSDCC_SPS_BAM_SUP (1 << 1)
+#define MSMSDCC_SOFT_RESET (1 << 2)
+#define MSMSDCC_AUTO_PROG_DONE (1 << 3)
+#define MSMSDCC_REG_WR_ACTIVE (1 << 4)
+#define MSMSDCC_SW_RST (1 << 5)
+#define MSMSDCC_SW_RST_CFG (1 << 6)
+
+#define set_hw_caps(h, val) ((h)->hw_caps |= val)
+#define is_sps_mode(h) ((h)->hw_caps & MSMSDCC_SPS_BAM_SUP)
+#define is_dma_mode(h) ((h)->hw_caps & MSMSDCC_DMA_SUP)
+#define is_soft_reset(h) ((h)->hw_caps & MSMSDCC_SOFT_RESET)
+#define is_auto_prog_done(h) ((h)->hw_caps & MSMSDCC_AUTO_PROG_DONE)
+#define is_wait_for_reg_write(h) ((h)->hw_caps & MSMSDCC_REG_WR_ACTIVE)
+#define is_sw_hard_reset(h) ((h)->hw_caps & MSMSDCC_SW_RST)
+#define is_sw_reset_save_config(h) ((h)->hw_caps & MSMSDCC_SW_RST_CFG)
+
+/* Set controller capabilities based on version */
+static inline void set_default_hw_caps(struct msmsdcc_host *host)
+{
+ u32 version;
+ /*
+ * Lookup the Controller Version, to identify the supported features
+ * Version number read as 0 would indicate SDCC3 or earlier versions.
+ */
+ version = readl_relaxed(host->base + MCI_VERSION);
+ pr_info("%s: SDCC Version: 0x%.8x\n", mmc_hostname(host->mmc), version);
+
+ if (!version)
+ return;
+
+ version &= MSMSDCC_VERSION_MASK;
+ if (version) /* SDCC v4 and greater */
+ host->hw_caps |= MSMSDCC_AUTO_PROG_DONE |
+ MSMSDCC_SOFT_RESET | MSMSDCC_REG_WR_ACTIVE;
+
+ if (version >= 0x2D) /* SDCC v4 2.1.0 and greater */
+ host->hw_caps |= MSMSDCC_SW_RST | MSMSDCC_SW_RST_CFG;
+}
+
int msmsdcc_set_pwrsave(struct mmc_host *mmc, int pwrsave);
int msmsdcc_sdio_al_lpm(struct mmc_host *mmc, bool enable);
diff --git a/drivers/power/pm8921-bms.c b/drivers/power/pm8921-bms.c
index 600913f..85389d0 100644
--- a/drivers/power/pm8921-bms.c
+++ b/drivers/power/pm8921-bms.c
@@ -472,29 +472,12 @@
* VBATT_MUL_FACTOR;
}
-#define CC_RESOLUTION_N_V1 1085069
-#define CC_RESOLUTION_D_V1 100000
-#define CC_RESOLUTION_N_V2 868056
-#define CC_RESOLUTION_D_V2 10000
-static s64 cc_to_microvolt_v1(s64 cc)
-{
- return div_s64(cc * CC_RESOLUTION_N_V1, CC_RESOLUTION_D_V1);
-}
-
-static s64 cc_to_microvolt_v2(s64 cc)
-{
- return div_s64(cc * CC_RESOLUTION_N_V2, CC_RESOLUTION_D_V2);
-}
+#define CC_RESOLUTION_N 868056
+#define CC_RESOLUTION_D 10000
static s64 cc_to_microvolt(struct pm8921_bms_chip *chip, s64 cc)
{
- /*
- * resolution (the value of a single bit) was changed after revision 2.0
- * for more accurate readings
- */
- return (chip->revision < PM8XXX_REVISION_8921_2p0) ?
- cc_to_microvolt_v1((s64)cc) :
- cc_to_microvolt_v2((s64)cc);
+ return div_s64(cc * CC_RESOLUTION_N, CC_RESOLUTION_D);
}
#define CC_READING_TICKS 55
diff --git a/drivers/power/pm8921-charger.c b/drivers/power/pm8921-charger.c
index a2eb39e..dc40c8e 100644
--- a/drivers/power/pm8921-charger.c
+++ b/drivers/power/pm8921-charger.c
@@ -265,6 +265,7 @@
enum pm8921_chg_hot_thr hot_thr;
int rconn_mohm;
enum pm8921_chg_led_src_config led_src_config;
+ bool host_mode;
};
/* user space parameter to limit usb current */
@@ -1141,6 +1142,7 @@
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_SCOPE,
};
static enum power_supply_property pm_power_props_mains[] = {
@@ -1196,6 +1198,67 @@
return 0;
}
+static int switch_usb_to_charge_mode(struct pm8921_chg_chip *chip)
+{
+ int rc;
+
+ if (!chip->host_mode)
+ return 0;
+
+ /* enable usbin valid comparator and remove force usb ovp fet off */
+ rc = pm8xxx_writeb(chip->dev->parent, USB_OVP_TEST, 0xB2);
+ if (rc < 0) {
+ pr_err("Failed to write 0xB2 to USB_OVP_TEST rc = %d\n", rc);
+ return rc;
+ }
+
+ chip->host_mode = 0;
+
+ return 0;
+}
+
+static int switch_usb_to_host_mode(struct pm8921_chg_chip *chip)
+{
+ int rc;
+
+ if (chip->host_mode)
+ return 0;
+
+ /* disable usbin valid comparator and force usb ovp fet off */
+ rc = pm8xxx_writeb(chip->dev->parent, USB_OVP_TEST, 0xB3);
+ if (rc < 0) {
+ pr_err("Failed to write 0xB3 to USB_OVP_TEST rc = %d\n", rc);
+ return rc;
+ }
+
+ chip->host_mode = 1;
+
+ return 0;
+}
+
+static int pm_power_set_property_usb(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ /* Check if called before init */
+ if (!the_chip)
+ return -EINVAL;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_SCOPE:
+ if (val->intval == POWER_SUPPLY_SCOPE_SYSTEM)
+ return switch_usb_to_host_mode(the_chip);
+ if (val->intval == POWER_SUPPLY_SCOPE_DEVICE)
+ return switch_usb_to_charge_mode(the_chip);
+ else
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int pm_power_get_property_usb(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
@@ -1234,6 +1297,13 @@
else
return 0;
break;
+
+ case POWER_SUPPLY_PROP_SCOPE:
+ if (the_chip->host_mode)
+ val->intval = POWER_SUPPLY_SCOPE_SYSTEM;
+ else
+ val->intval = POWER_SUPPLY_SCOPE_DEVICE;
+ break;
default:
return -EINVAL;
}
@@ -3851,6 +3921,7 @@
chip->usb_psy.properties = pm_power_props_usb,
chip->usb_psy.num_properties = ARRAY_SIZE(pm_power_props_usb),
chip->usb_psy.get_property = pm_power_get_property_usb,
+ chip->usb_psy.set_property = pm_power_set_property_usb,
chip->dc_psy.name = "pm8921-dc",
chip->dc_psy.type = POWER_SUPPLY_TYPE_MAINS,
diff --git a/drivers/power/pm8xxx-ccadc.c b/drivers/power/pm8xxx-ccadc.c
index ef31575..861bac8 100644
--- a/drivers/power/pm8xxx-ccadc.c
+++ b/drivers/power/pm8xxx-ccadc.c
@@ -79,27 +79,10 @@
static struct pm8xxx_ccadc_chip *the_chip;
#ifdef DEBUG
-static s64 microvolt_to_ccadc_reading_v1(s64 uv)
-{
- return div_s64(uv * CCADC_READING_RESOLUTION_D_V1,
- CCADC_READING_RESOLUTION_N_V1);
-}
-
-static s64 microvolt_to_ccadc_reading_v2(s64 uv)
-{
- return div_s64(uv * CCADC_READING_RESOLUTION_D_V2,
- CCADC_READING_RESOLUTION_N_V2);
-}
-
static s64 microvolt_to_ccadc_reading(struct pm8xxx_ccadc_chip *chip, s64 cc)
{
- /*
- * resolution (the value of a single bit) was changed after revision 2.0
- * for more accurate readings
- */
- return (the_chip->revision < PM8XXX_REVISION_8921_2p0) ?
- microvolt_to_ccadc_reading_v1((s64)cc) :
- microvolt_to_ccadc_reading_v2((s64)cc);
+ return div_s64(uv * CCADC_READING_RESOLUTION_D,
+ CCADC_READING_RESOLUTION_N);
}
#endif
diff --git a/drivers/tty/n_smux.c b/drivers/tty/n_smux.c
index d03b4b4..c271ca4 100644
--- a/drivers/tty/n_smux.c
+++ b/drivers/tty/n_smux.c
@@ -33,8 +33,6 @@
#define SMUX_NOTIFY_FIFO_SIZE 128
#define SMUX_TX_QUEUE_SIZE 256
-#define SMUX_WM_LOW 2
-#define SMUX_WM_HIGH 4
#define SMUX_PKT_LOG_SIZE 80
/* Maximum size we can accept in a single RX buffer */
@@ -48,7 +46,7 @@
#define SMUX_WAKEUP_DELAY_MIN (1 << 15)
/* inactivity timeout for no rx/tx activity */
-#define SMUX_INACTIVITY_TIMEOUT_MS 1000
+#define SMUX_INACTIVITY_TIMEOUT_MS 1000000
/* RX get_rx_buffer retry timeout values */
#define SMUX_RX_RETRY_MIN_MS (1 << 0) /* 1 ms */
@@ -83,6 +81,30 @@
pr_info(x); \
} while (0)
+#define SMUX_PWR_PKT_RX(pkt) do { \
+ if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
+ smux_log_pkt(pkt, 1); \
+} while (0)
+
+#define SMUX_PWR_PKT_TX(pkt) do { \
+ if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
+ if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
+ pkt->hdr.flags == SMUX_WAKEUP_ACK) \
+ pr_info("smux: TX Wakeup ACK\n"); \
+ else if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
+ pkt->hdr.flags == SMUX_WAKEUP_REQ) \
+ pr_info("smux: TX Wakeup REQ\n"); \
+ else \
+ smux_log_pkt(pkt, 0); \
+ } \
+} while (0)
+
+#define SMUX_PWR_BYTE_TX(pkt) do { \
+ if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
+ smux_log_pkt(pkt, 0); \
+ } \
+} while (0)
+
#define SMUX_LOG_PKT_RX(pkt) do { \
if (smux_debug_mask & MSM_SMUX_PKT) \
smux_log_pkt(pkt, 1); \
@@ -143,7 +165,7 @@
SMUX_PWR_OFF,
SMUX_PWR_TURNING_ON,
SMUX_PWR_ON,
- SMUX_PWR_TURNING_OFF_FLUSH,
+ SMUX_PWR_TURNING_OFF_FLUSH, /* power-off req/ack in TX queue */
SMUX_PWR_TURNING_OFF,
SMUX_PWR_OFF_FLUSH,
};
@@ -172,12 +194,15 @@
unsigned local_state;
unsigned local_mode;
uint8_t local_tiocm;
+ unsigned options;
unsigned remote_state;
unsigned remote_mode;
uint8_t remote_tiocm;
int tx_flow_control;
+ int rx_flow_control_auto;
+ int rx_flow_control_client;
/* client callbacks and private data */
void *priv;
@@ -270,6 +295,7 @@
unsigned pwr_wakeup_delay_us;
unsigned tx_activity_flag;
unsigned powerdown_enabled;
+ unsigned power_ctl_remote_req_received;
struct list_head power_queue;
};
@@ -330,6 +356,8 @@
static int ssr_notifier_cb(struct notifier_block *this,
unsigned long code,
void *data);
+static void smux_uart_power_on_atomic(void);
+static int smux_rx_flow_control_updated(struct smux_lch_t *ch);
/**
* Convert TTY Error Flags to string for logging purposes.
@@ -402,10 +430,13 @@
ch->local_state = SMUX_LCH_LOCAL_CLOSED;
ch->local_mode = SMUX_LCH_MODE_NORMAL;
ch->local_tiocm = 0x0;
+ ch->options = SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
ch->remote_mode = SMUX_LCH_MODE_NORMAL;
ch->remote_tiocm = 0x0;
ch->tx_flow_control = 0;
+ ch->rx_flow_control_auto = 0;
+ ch->rx_flow_control_client = 0;
ch->priv = 0;
ch->notify = 0;
ch->get_rx_buffer = 0;
@@ -486,6 +517,8 @@
ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
ch->remote_mode = SMUX_LCH_MODE_NORMAL;
ch->tx_flow_control = 0;
+ ch->rx_flow_control_auto = 0;
+ ch->rx_flow_control_client = 0;
/* Purge RX retry queue */
if (ch->rx_retry_queue_cnt)
@@ -537,67 +570,76 @@
char local_mode;
char remote_state;
char remote_mode;
- struct smux_lch_t *ch;
+ struct smux_lch_t *ch = NULL;
unsigned char *data;
- ch = &smux_lch[pkt->hdr.lcid];
+ if (!smux_assert_lch_id(pkt->hdr.lcid))
+ ch = &smux_lch[pkt->hdr.lcid];
- switch (ch->local_state) {
- case SMUX_LCH_LOCAL_CLOSED:
- local_state = 'C';
- break;
- case SMUX_LCH_LOCAL_OPENING:
- local_state = 'o';
- break;
- case SMUX_LCH_LOCAL_OPENED:
- local_state = 'O';
- break;
- case SMUX_LCH_LOCAL_CLOSING:
- local_state = 'c';
- break;
- default:
- local_state = 'U';
- break;
- }
+ if (ch) {
+ switch (ch->local_state) {
+ case SMUX_LCH_LOCAL_CLOSED:
+ local_state = 'C';
+ break;
+ case SMUX_LCH_LOCAL_OPENING:
+ local_state = 'o';
+ break;
+ case SMUX_LCH_LOCAL_OPENED:
+ local_state = 'O';
+ break;
+ case SMUX_LCH_LOCAL_CLOSING:
+ local_state = 'c';
+ break;
+ default:
+ local_state = 'U';
+ break;
+ }
- switch (ch->local_mode) {
- case SMUX_LCH_MODE_LOCAL_LOOPBACK:
- local_mode = 'L';
- break;
- case SMUX_LCH_MODE_REMOTE_LOOPBACK:
- local_mode = 'R';
- break;
- case SMUX_LCH_MODE_NORMAL:
- local_mode = 'N';
- break;
- default:
- local_mode = 'U';
- break;
- }
+ switch (ch->local_mode) {
+ case SMUX_LCH_MODE_LOCAL_LOOPBACK:
+ local_mode = 'L';
+ break;
+ case SMUX_LCH_MODE_REMOTE_LOOPBACK:
+ local_mode = 'R';
+ break;
+ case SMUX_LCH_MODE_NORMAL:
+ local_mode = 'N';
+ break;
+ default:
+ local_mode = 'U';
+ break;
+ }
- switch (ch->remote_state) {
- case SMUX_LCH_REMOTE_CLOSED:
- remote_state = 'C';
- break;
- case SMUX_LCH_REMOTE_OPENED:
- remote_state = 'O';
- break;
+ switch (ch->remote_state) {
+ case SMUX_LCH_REMOTE_CLOSED:
+ remote_state = 'C';
+ break;
+ case SMUX_LCH_REMOTE_OPENED:
+ remote_state = 'O';
+ break;
- default:
- remote_state = 'U';
- break;
- }
+ default:
+ remote_state = 'U';
+ break;
+ }
- switch (ch->remote_mode) {
- case SMUX_LCH_MODE_REMOTE_LOOPBACK:
- remote_mode = 'R';
- break;
- case SMUX_LCH_MODE_NORMAL:
- remote_mode = 'N';
- break;
- default:
- remote_mode = 'U';
- break;
+ switch (ch->remote_mode) {
+ case SMUX_LCH_MODE_REMOTE_LOOPBACK:
+ remote_mode = 'R';
+ break;
+ case SMUX_LCH_MODE_NORMAL:
+ remote_mode = 'N';
+ break;
+ default:
+ remote_mode = 'U';
+ break;
+ }
+ } else {
+ /* broadcast channel */
+ local_state = '-';
+ local_mode = '-';
+ remote_state = '-';
+ remote_mode = '-';
}
/* determine command type (ACK, etc) */
@@ -611,6 +653,11 @@
if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
break;
+
+ case SMUX_CMD_PWR_CTL:
+ if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK)
+ snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
+ break;
};
i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
@@ -1342,6 +1389,7 @@
uint8_t lcid;
int ret = 0;
int do_retry = 0;
+ int tx_ready = 0;
int tmp;
int rx_len;
struct smux_lch_t *ch;
@@ -1385,8 +1433,20 @@
if (!list_empty(&ch->rx_retry_queue)) {
do_retry = 1;
+
+ if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
+ !ch->rx_flow_control_auto &&
+ ((ch->rx_retry_queue_cnt + 1) >= SMUX_RX_WM_HIGH)) {
+ /* need to flow control RX */
+ ch->rx_flow_control_auto = 1;
+ tx_ready |= smux_rx_flow_control_updated(ch);
+ schedule_notify(ch->lcid, SMUX_RX_RETRY_HIGH_WM_HIT,
+ NULL);
+ }
if ((ch->rx_retry_queue_cnt + 1) > SMUX_RX_RETRY_MAX_PKTS) {
/* retry queue full */
+ pr_err("%s: ch %d RX retry queue full\n",
+ __func__, lcid);
schedule_notify(lcid, SMUX_READ_FAIL, NULL);
ret = -ENOMEM;
spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
@@ -1410,7 +1470,7 @@
}
ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
smux_tx_queue(ack_pkt, ch, 0);
- list_channel(ch);
+ tx_ready = 1;
} else {
pr_err("%s: Remote loopack allocation failure\n",
__func__);
@@ -1436,6 +1496,8 @@
/* buffer allocation failed - add to retry queue */
do_retry = 1;
} else if (tmp < 0) {
+ pr_err("%s: ch %d Client RX buffer alloc failed %d\n",
+ __func__, lcid, tmp);
schedule_notify(lcid, SMUX_READ_FAIL, NULL);
ret = -ENOMEM;
}
@@ -1482,6 +1544,8 @@
spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
}
+ if (tx_ready)
+ list_channel(ch);
out:
return ret;
}
@@ -1599,21 +1663,20 @@
static int smux_handle_rx_power_cmd(struct smux_pkt_t *pkt)
{
struct smux_pkt_t *ack_pkt = NULL;
+ int power_down = 0;
unsigned long flags;
+ SMUX_PWR_PKT_RX(pkt);
+
spin_lock_irqsave(&smux.tx_lock_lha2, flags);
if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK) {
/* local sleep request ack */
- if (smux.power_state == SMUX_PWR_TURNING_OFF) {
+ if (smux.power_state == SMUX_PWR_TURNING_OFF)
/* Power-down complete, turn off UART */
- SMUX_PWR("%s: Power %d->%d\n", __func__,
- smux.power_state, SMUX_PWR_OFF_FLUSH);
- smux.power_state = SMUX_PWR_OFF_FLUSH;
- queue_work(smux_tx_wq, &smux_inactivity_work);
- } else {
+ power_down = 1;
+ else
pr_err("%s: sleep request ack invalid in state %d\n",
__func__, smux.power_state);
- }
} else {
/*
* Remote sleep request
@@ -1625,9 +1688,10 @@
* The state here is set to SMUX_PWR_TURNING_OFF_FLUSH and
* the TX thread will set the state to SMUX_PWR_TURNING_OFF
* when it sends the packet.
+ *
+ * If we are already powering down, then no ACK is sent.
*/
- if (smux.power_state == SMUX_PWR_ON
- || smux.power_state == SMUX_PWR_TURNING_OFF) {
+ if (smux.power_state == SMUX_PWR_ON) {
ack_pkt = smux_alloc_pkt();
if (ack_pkt) {
SMUX_PWR("%s: Power %d->%d\n", __func__,
@@ -1644,11 +1708,31 @@
&smux.power_queue);
queue_work(smux_tx_wq, &smux_tx_work);
}
+ } else if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH) {
+ /* Local power-down request still in TX queue */
+ SMUX_PWR("%s: Power-down shortcut - no ack\n",
+ __func__);
+ smux.power_ctl_remote_req_received = 1;
+ } else if (smux.power_state == SMUX_PWR_TURNING_OFF) {
+ /*
+ * Local power-down request already sent to remote
+ * side, so this request gets treated as an ACK.
+ */
+ SMUX_PWR("%s: Power-down shortcut - no ack\n",
+ __func__);
+ power_down = 1;
} else {
pr_err("%s: sleep request invalid in state %d\n",
__func__, smux.power_state);
}
}
+
+ if (power_down) {
+ SMUX_PWR("%s: Power %d->%d\n", __func__,
+ smux.power_state, SMUX_PWR_OFF_FLUSH);
+ smux.power_state = SMUX_PWR_OFF_FLUSH;
+ queue_work(smux_tx_wq, &smux_inactivity_work);
+ }
spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
return 0;
@@ -1665,10 +1749,9 @@
{
int ret = -ENXIO;
- SMUX_LOG_PKT_RX(pkt);
-
switch (pkt->hdr.cmd) {
case SMUX_CMD_OPEN_LCH:
+ SMUX_LOG_PKT_RX(pkt);
if (smux_assert_lch_id(pkt->hdr.lcid)) {
pr_err("%s: invalid channel id %d\n",
__func__, pkt->hdr.lcid);
@@ -1678,6 +1761,7 @@
break;
case SMUX_CMD_DATA:
+ SMUX_LOG_PKT_RX(pkt);
if (smux_assert_lch_id(pkt->hdr.lcid)) {
pr_err("%s: invalid channel id %d\n",
__func__, pkt->hdr.lcid);
@@ -1687,6 +1771,7 @@
break;
case SMUX_CMD_CLOSE_LCH:
+ SMUX_LOG_PKT_RX(pkt);
if (smux_assert_lch_id(pkt->hdr.lcid)) {
pr_err("%s: invalid channel id %d\n",
__func__, pkt->hdr.lcid);
@@ -1696,6 +1781,7 @@
break;
case SMUX_CMD_STATUS:
+ SMUX_LOG_PKT_RX(pkt);
if (smux_assert_lch_id(pkt->hdr.lcid)) {
pr_err("%s: invalid channel id %d\n",
__func__, pkt->hdr.lcid);
@@ -1709,10 +1795,12 @@
break;
case SMUX_CMD_BYTE:
+ SMUX_LOG_PKT_RX(pkt);
ret = smux_handle_rx_byte_cmd(pkt);
break;
default:
+ SMUX_LOG_PKT_RX(pkt);
pr_err("%s: command %d unknown\n", __func__, pkt->hdr.cmd);
ret = -EINVAL;
}
@@ -1769,8 +1857,12 @@
queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
smux_send_byte(SMUX_WAKEUP_ACK);
- } else {
+ } else if (smux.power_state == SMUX_PWR_ON) {
smux_send_byte(SMUX_WAKEUP_ACK);
+ } else {
+ /* stale wakeup request from previous wakeup */
+ SMUX_PWR("%s: stale Wakeup REQ in state %d\n",
+ __func__, smux.power_state);
}
spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
}
@@ -1794,7 +1886,7 @@
} else if (smux.power_state != SMUX_PWR_ON) {
/* invalid message */
- pr_err("%s: wakeup request ack invalid in state %d\n",
+ SMUX_PWR("%s: stale Wakeup REQ ACK in state %d\n",
__func__, smux.power_state);
}
spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
@@ -1828,9 +1920,11 @@
smux.rx_state = SMUX_RX_MAGIC;
break;
case SMUX_WAKEUP_REQ:
+ SMUX_PWR("smux: RX Wakeup REQ\n");
smux_handle_wakeup_req();
break;
case SMUX_WAKEUP_ACK:
+ SMUX_PWR("smux: RX Wakeup ACK\n");
smux_handle_wakeup_ack();
break;
default:
@@ -2048,8 +2142,10 @@
*/
static void smux_flush_tty(void)
{
+ mutex_lock(&smux.mutex_lha0);
if (!smux.tty) {
pr_err("%s: ldisc not loaded\n", __func__);
+ mutex_unlock(&smux.mutex_lha0);
return;
}
@@ -2058,6 +2154,8 @@
if (tty_chars_in_buffer(smux.tty) > 0)
pr_err("%s: unable to flush UART queue\n", __func__);
+
+ mutex_unlock(&smux.mutex_lha0);
}
/**
@@ -2106,8 +2204,10 @@
/**
* Power-up the UART.
+ *
+ * Must be called with smux.mutex_lha0 already locked.
*/
-static void smux_uart_power_on(void)
+static void smux_uart_power_on_atomic(void)
{
struct uart_state *state;
@@ -2121,19 +2221,32 @@
}
/**
+ * Power-up the UART.
+ */
+static void smux_uart_power_on(void)
+{
+ mutex_lock(&smux.mutex_lha0);
+ smux_uart_power_on_atomic();
+ mutex_unlock(&smux.mutex_lha0);
+}
+
+/**
* Power down the UART.
*/
static void smux_uart_power_off(void)
{
struct uart_state *state;
+ mutex_lock(&smux.mutex_lha0);
if (!smux.tty || !smux.tty->driver_data) {
pr_err("%s: unable to find UART port for tty %p\n",
__func__, smux.tty);
+ mutex_unlock(&smux.mutex_lha0);
return;
}
state = smux.tty->driver_data;
msm_hs_request_clock_off(state->uart_port);
+ mutex_unlock(&smux.mutex_lha0);
}
/**
@@ -2148,44 +2261,17 @@
{
unsigned long flags;
unsigned wakeup_delay;
- int complete = 0;
- while (!smux.in_reset) {
- spin_lock_irqsave(&smux.tx_lock_lha2, flags);
- if (smux.power_state == SMUX_PWR_ON) {
- /* wakeup complete */
- complete = 1;
- spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
- break;
- } else {
- /* retry */
- wakeup_delay = smux.pwr_wakeup_delay_us;
- smux.pwr_wakeup_delay_us <<= 1;
- if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
- smux.pwr_wakeup_delay_us =
- SMUX_WAKEUP_DELAY_MAX;
- }
+ if (smux.in_reset)
+ return;
+
+ spin_lock_irqsave(&smux.tx_lock_lha2, flags);
+ if (smux.power_state == SMUX_PWR_ON) {
+ /* wakeup complete */
+ smux.pwr_wakeup_delay_us = 1;
spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
- SMUX_DBG("%s: triggering wakeup\n", __func__);
- smux_send_byte(SMUX_WAKEUP_REQ);
-
- if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
- SMUX_DBG("%s: sleeping for %u us\n", __func__,
- wakeup_delay);
- usleep_range(wakeup_delay, 2*wakeup_delay);
- } else {
- /* schedule delayed work */
- SMUX_DBG("%s: scheduling delayed wakeup in %u ms\n",
- __func__, wakeup_delay / 1000);
- queue_delayed_work(smux_tx_wq,
- &smux_wakeup_delayed_work,
- msecs_to_jiffies(wakeup_delay / 1000));
- break;
- }
- }
-
- if (complete) {
SMUX_DBG("%s: wakeup complete\n", __func__);
+
/*
* Cancel any pending retry. This avoids a race condition with
* a new power-up request because:
@@ -2194,6 +2280,38 @@
* workqueue as new TX wakeup requests
*/
cancel_delayed_work(&smux_wakeup_delayed_work);
+ queue_work(smux_tx_wq, &smux_tx_work);
+ } else if (smux.power_state == SMUX_PWR_TURNING_ON) {
+ /* retry wakeup */
+ wakeup_delay = smux.pwr_wakeup_delay_us;
+ smux.pwr_wakeup_delay_us <<= 1;
+ if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
+ smux.pwr_wakeup_delay_us =
+ SMUX_WAKEUP_DELAY_MAX;
+
+ spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
+ SMUX_PWR("%s: triggering wakeup\n", __func__);
+ smux_send_byte(SMUX_WAKEUP_REQ);
+
+ if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
+ SMUX_DBG("%s: sleeping for %u us\n", __func__,
+ wakeup_delay);
+ usleep_range(wakeup_delay, 2*wakeup_delay);
+ queue_work(smux_tx_wq, &smux_wakeup_work);
+ } else {
+ /* schedule delayed work */
+ SMUX_DBG("%s: scheduling delayed wakeup in %u ms\n",
+ __func__, wakeup_delay / 1000);
+ queue_delayed_work(smux_tx_wq,
+ &smux_wakeup_delayed_work,
+ msecs_to_jiffies(wakeup_delay / 1000));
+ }
+ } else {
+ /* wakeup aborted */
+ smux.pwr_wakeup_delay_us = 1;
+ spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
+ SMUX_PWR("%s: wakeup aborted\n", __func__);
+ cancel_delayed_work(&smux_wakeup_delayed_work);
}
}
@@ -2221,8 +2339,9 @@
if (pkt) {
SMUX_PWR("%s: Power %d->%d\n", __func__,
smux.power_state,
- SMUX_PWR_TURNING_OFF);
- smux.power_state = SMUX_PWR_TURNING_OFF;
+ SMUX_PWR_TURNING_OFF_FLUSH);
+ smux.power_state =
+ SMUX_PWR_TURNING_OFF_FLUSH;
/* send power-down request */
pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
@@ -2236,9 +2355,6 @@
__func__);
}
}
- } else {
- SMUX_DBG("%s: link inactive, but powerdown disabled\n",
- __func__);
}
}
smux.tx_activity_flag = 0;
@@ -2275,18 +2391,32 @@
/**
* Remove RX retry packet from channel and free it.
*
- * Must be called with state_lock_lhb1 locked.
- *
* @ch Channel for retry packet
* @retry Retry packet to remove
+ *
+ * @returns 1 if flow control updated; 0 otherwise
+ *
+ * Must be called with state_lock_lhb1 locked.
*/
-void smux_remove_rx_retry(struct smux_lch_t *ch,
+int smux_remove_rx_retry(struct smux_lch_t *ch,
struct smux_rx_pkt_retry *retry)
{
+ int tx_ready = 0;
+
list_del(&retry->rx_retry_list);
--ch->rx_retry_queue_cnt;
smux_free_pkt(retry->pkt);
kfree(retry);
+
+ if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
+ (ch->rx_retry_queue_cnt <= SMUX_RX_WM_LOW) &&
+ ch->rx_flow_control_auto) {
+ ch->rx_flow_control_auto = 0;
+ smux_rx_flow_control_updated(ch);
+ schedule_notify(ch->lcid, SMUX_RX_RETRY_LOW_WM_HIT, NULL);
+ tx_ready = 1;
+ }
+ return tx_ready;
}
/**
@@ -2357,6 +2487,8 @@
union notifier_metadata metadata;
int tmp;
unsigned long flags;
+ int immediate_retry = 0;
+ int tx_ready = 0;
ch = container_of(work, struct smux_lch_t, rx_retry_work.work);
@@ -2368,7 +2500,7 @@
retry = list_first_entry(&ch->rx_retry_queue,
struct smux_rx_pkt_retry,
rx_retry_list);
- smux_remove_rx_retry(ch, retry);
+ (void)smux_remove_rx_retry(ch, retry);
}
}
@@ -2383,7 +2515,8 @@
rx_retry_list);
spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
- SMUX_DBG("%s: retrying rx pkt %p\n", __func__, retry);
+ SMUX_DBG("%s: ch %d retrying rx pkt %p\n",
+ __func__, ch->lcid, retry);
metadata.read.pkt_priv = 0;
metadata.read.buffer = 0;
tmp = ch->get_rx_buffer(ch->priv,
@@ -2392,33 +2525,44 @@
retry->pkt->hdr.payload_len);
if (tmp == 0 && metadata.read.buffer) {
/* have valid RX buffer */
+
memcpy(metadata.read.buffer, retry->pkt->payload,
retry->pkt->hdr.payload_len);
metadata.read.len = retry->pkt->hdr.payload_len;
spin_lock_irqsave(&ch->state_lock_lhb1, flags);
- smux_remove_rx_retry(ch, retry);
+ tx_ready = smux_remove_rx_retry(ch, retry);
spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
-
schedule_notify(ch->lcid, SMUX_READ_DONE, &metadata);
+ if (tx_ready)
+ list_channel(ch);
+
+ immediate_retry = 1;
} else if (tmp == -EAGAIN ||
(tmp == 0 && !metadata.read.buffer)) {
/* retry again */
retry->timeout_in_ms <<= 1;
if (retry->timeout_in_ms > SMUX_RX_RETRY_MAX_MS) {
/* timed out */
+ pr_err("%s: ch %d RX retry client timeout\n",
+ __func__, ch->lcid);
spin_lock_irqsave(&ch->state_lock_lhb1, flags);
- smux_remove_rx_retry(ch, retry);
- schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
+ tx_ready = smux_remove_rx_retry(ch, retry);
spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+ schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
+ if (tx_ready)
+ list_channel(ch);
}
} else {
/* client error - drop packet */
+ pr_err("%s: ch %d RX retry client failed (%d)\n",
+ __func__, ch->lcid, tmp);
spin_lock_irqsave(&ch->state_lock_lhb1, flags);
- smux_remove_rx_retry(ch, retry);
+ tx_ready = smux_remove_rx_retry(ch, retry);
spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
-
schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
+ if (tx_ready)
+ list_channel(ch);
}
/* schedule next retry */
@@ -2427,8 +2571,12 @@
retry = list_first_entry(&ch->rx_retry_queue,
struct smux_rx_pkt_retry,
rx_retry_list);
- queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
- msecs_to_jiffies(retry->timeout_in_ms));
+
+ if (immediate_retry)
+ queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
+ else
+ queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
+ msecs_to_jiffies(retry->timeout_in_ms));
}
spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
}
@@ -2472,14 +2620,12 @@
if (!list_empty(&smux.lch_tx_ready_list) ||
!list_empty(&smux.power_queue)) {
/* data to transmit, do wakeup */
- smux.pwr_wakeup_delay_us = 1;
SMUX_PWR("%s: Power %d->%d\n", __func__,
smux.power_state,
SMUX_PWR_TURNING_ON);
smux.power_state = SMUX_PWR_TURNING_ON;
spin_unlock_irqrestore(&smux.tx_lock_lha2,
flags);
- smux_uart_power_on();
queue_work(smux_tx_wq, &smux_wakeup_work);
} else {
/* no activity -- stay asleep */
@@ -2496,8 +2642,39 @@
list_del(&pkt->list);
spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
+ /* Adjust power state if this is a flush command */
+ spin_lock_irqsave(&smux.tx_lock_lha2, flags);
+ if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH &&
+ pkt->hdr.cmd == SMUX_CMD_PWR_CTL) {
+ if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK ||
+ smux.power_ctl_remote_req_received) {
+ /*
+ * Sending remote power-down request ACK
+ * or sending local power-down request
+ * and we already received a remote
+ * power-down request.
+ */
+ SMUX_PWR("%s: Power %d->%d\n", __func__,
+ smux.power_state,
+ SMUX_PWR_OFF_FLUSH);
+ smux.power_state = SMUX_PWR_OFF_FLUSH;
+ smux.power_ctl_remote_req_received = 0;
+ queue_work(smux_tx_wq,
+ &smux_inactivity_work);
+ } else {
+ /* sending local power-down request */
+ SMUX_PWR("%s: Power %d->%d\n", __func__,
+ smux.power_state,
+ SMUX_PWR_TURNING_OFF);
+ smux.power_state = SMUX_PWR_TURNING_OFF;
+ }
+ }
+ spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
+
/* send the packet */
- SMUX_LOG_PKT_TX(pkt);
+ smux_uart_power_on();
+ smux.tx_activity_flag = 1;
+ SMUX_PWR_PKT_TX(pkt);
if (!smux_byte_loopback) {
smux_tx_tty(pkt);
smux_flush_tty();
@@ -2505,19 +2682,6 @@
smux_tx_loopback(pkt);
}
- /* Adjust power state if this is a flush command */
- spin_lock_irqsave(&smux.tx_lock_lha2, flags);
- if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH &&
- pkt->hdr.cmd == SMUX_CMD_PWR_CTL &&
- (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK)) {
- SMUX_PWR("%s: Power %d->%d\n", __func__,
- smux.power_state,
- SMUX_PWR_OFF_FLUSH);
- smux.power_state = SMUX_PWR_OFF_FLUSH;
- queue_work(smux_tx_wq, &smux_inactivity_work);
- }
- spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
-
smux_free_pkt(pkt);
continue;
}
@@ -2534,7 +2698,7 @@
if (smux.power_state != SMUX_PWR_ON) {
/* channel not ready to transmit */
- SMUX_DBG("%s: can not tx with power state %d\n",
+ SMUX_DBG("%s: waiting for link up (state %d)\n",
__func__,
smux.power_state);
spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
@@ -2577,7 +2741,7 @@
--ch->tx_pending_data_cnt;
if (ch->notify_lwm &&
ch->tx_pending_data_cnt
- <= SMUX_WM_LOW) {
+ <= SMUX_TX_WM_LOW) {
ch->notify_lwm = 0;
low_wm_notif = 1;
}
@@ -2604,6 +2768,34 @@
}
}
+/**
+ * Update the RX flow control (sent in the TIOCM Status command).
+ *
+ * @ch Channel for update
+ *
+ * @returns 1 for updated, 0 for not updated
+ *
+ * Must be called with ch->state_lock_lhb1 locked.
+ */
+static int smux_rx_flow_control_updated(struct smux_lch_t *ch)
+{
+ int updated = 0;
+ int prev_state;
+
+ prev_state = ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL;
+
+ if (ch->rx_flow_control_client || ch->rx_flow_control_auto)
+ ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
+ else
+ ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
+
+ if (prev_state != (ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL)) {
+ smux_send_status_cmd(ch);
+ updated = 1;
+ }
+
+ return updated;
+}
/**********************************************************************/
/* Kernel API */
@@ -2646,17 +2838,30 @@
if (clear & SMUX_CH_OPTION_REMOTE_LOOPBACK)
ch->local_mode = SMUX_LCH_MODE_NORMAL;
- /* Flow control */
+ /* RX Flow control */
if (set & SMUX_CH_OPTION_REMOTE_TX_STOP) {
- ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
- ret = smux_send_status_cmd(ch);
- tx_ready = 1;
+ ch->rx_flow_control_client = 1;
+ tx_ready |= smux_rx_flow_control_updated(ch);
}
if (clear & SMUX_CH_OPTION_REMOTE_TX_STOP) {
- ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
- ret = smux_send_status_cmd(ch);
- tx_ready = 1;
+ ch->rx_flow_control_client = 0;
+ tx_ready |= smux_rx_flow_control_updated(ch);
+ }
+
+ /* Auto RX Flow Control */
+ if (set & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
+ SMUX_DBG("%s: auto rx flow control option enabled\n",
+ __func__);
+ ch->options |= SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
+ }
+
+ if (clear & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
+ SMUX_DBG("%s: auto rx flow control option disabled\n",
+ __func__);
+ ch->options &= ~SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
+ ch->rx_flow_control_auto = 0;
+ tx_ready |= smux_rx_flow_control_updated(ch);
}
spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
@@ -2880,16 +3085,16 @@
/* verify high watermark */
SMUX_DBG("%s: pending %d", __func__, ch->tx_pending_data_cnt);
- if (ch->tx_pending_data_cnt >= SMUX_WM_HIGH) {
+ if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH) {
pr_err("%s: ch %d high watermark %d exceeded %d\n",
- __func__, lcid, SMUX_WM_HIGH,
+ __func__, lcid, SMUX_TX_WM_HIGH,
ch->tx_pending_data_cnt);
ret = -EAGAIN;
goto out_inner;
}
/* queue packet for transmit */
- if (++ch->tx_pending_data_cnt == SMUX_WM_HIGH) {
+ if (++ch->tx_pending_data_cnt == SMUX_TX_WM_HIGH) {
ch->notify_lwm = 1;
pr_err("%s: high watermark hit\n", __func__);
schedule_notify(lcid, SMUX_HIGH_WM_HIT, NULL);
@@ -2936,7 +3141,7 @@
ch = &smux_lch[lcid];
spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
- if (ch->tx_pending_data_cnt >= SMUX_WM_HIGH)
+ if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH)
is_full = 1;
spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
@@ -2964,7 +3169,7 @@
ch = &smux_lch[lcid];
spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
- if (ch->tx_pending_data_cnt <= SMUX_WM_LOW)
+ if (ch->tx_pending_data_cnt <= SMUX_TX_WM_LOW)
is_low = 1;
spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
@@ -3264,7 +3469,7 @@
spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
if (power_up_uart)
- smux_uart_power_on();
+ smux_uart_power_on_atomic();
/* Disconnect from TTY */
smux.tty = NULL;
@@ -3385,6 +3590,7 @@
smux.power_state = SMUX_PWR_OFF;
smux.pwr_wakeup_delay_us = 1;
smux.powerdown_enabled = 0;
+ smux.power_ctl_remote_req_received = 0;
INIT_LIST_HEAD(&smux.power_queue);
smux.rx_activity_flag = 0;
smux.tx_activity_flag = 0;
diff --git a/drivers/tty/smux_ctl.c b/drivers/tty/smux_ctl.c
index 69adbf3..2b8f028 100644
--- a/drivers/tty/smux_ctl.c
+++ b/drivers/tty/smux_ctl.c
@@ -49,15 +49,6 @@
static uint32_t smux_ctl_ch_id[] = {
SMUX_DATA_CTL_0,
- SMUX_DATA_CTL_1,
- SMUX_DATA_CTL_2,
- SMUX_DATA_CTL_3,
- SMUX_DATA_CTL_4,
- SMUX_DATA_CTL_5,
- SMUX_DATA_CTL_6,
- SMUX_DATA_CTL_7,
- SMUX_USB_RMNET_CTL_0,
- SMUX_CSVT_CTL_0
};
#define SMUX_CTL_NUM_CHANNELS ARRAY_SIZE(smux_ctl_ch_id)
@@ -78,6 +69,7 @@
uint32_t read_avail;
struct list_head rx_list;
+ int abort_wait;
wait_queue_head_t read_wait_queue;
wait_queue_head_t write_wait_queue;
@@ -359,7 +351,8 @@
r = wait_event_interruptible_timeout(
devp->write_wait_queue,
- (devp->state == SMUX_CONNECTED),
+ (devp->state == SMUX_CONNECTED ||
+ devp->abort_wait),
(5 * HZ));
if (r == 0)
r = -ETIMEDOUT;
@@ -372,6 +365,13 @@
msm_smux_close(devp->id);
return r;
+ } else if (devp->abort_wait) {
+ pr_err("%s: %s: Open command aborted\n",
+ SMUX_CTL_MODULE_NAME, __func__);
+ r = -EIO;
+ atomic_dec(&devp->ref_count);
+ msm_smux_close(devp->id);
+ return r;
} else if (devp->state != SMUX_CONNECTED) {
pr_err(SMUX_CTL_MODULE_NAME ": %s: "
"Invalid open notification\n", __func__);
@@ -440,8 +440,9 @@
if (signal_pending(current))
r = -ERESTARTSYS;
-
- if (smux_ctl_devp[dev_index]->state == SMUX_DISCONNECTED &&
+ else if (smux_ctl_devp[dev_index]->abort_wait)
+ r = -ENETRESET;
+ else if (smux_ctl_devp[dev_index]->state == SMUX_DISCONNECTED &&
smux_ctl_devp[dev_index]->is_channel_reset != 0)
r = -ENETRESET;
@@ -560,6 +561,9 @@
if (signal_pending(current))
r = -ERESTARTSYS;
+
+ else if (smux_ctl_devp[dev_index]->abort_wait)
+ r = -ENETRESET;
else if (smux_ctl_devp[dev_index]->state == SMUX_DISCONNECTED &&
smux_ctl_devp[dev_index]->is_channel_reset != 0)
r = -ENETRESET;
@@ -645,6 +649,13 @@
r = wait_event_interruptible(devp->write_wait_queue,
0 != (write_err = smux_ctl_writeable(id)));
+
+ if (-EIO == r) {
+ pr_err("%s: %s: wait_event_interruptible ret %i\n",
+ SMUX_CTL_MODULE_NAME, __func__, r);
+ return -EIO;
+ }
+
if (r < 0) {
pr_err(SMUX_CTL_MODULE_NAME " :%s: wait_event_interruptible "
"ret %i\n", __func__, r);
@@ -699,6 +710,25 @@
.unlocked_ioctl = smux_ctl_ioctl,
};
+static void smux_ctl_reset_channel(struct smux_ctl_dev *devp)
+{
+ devp->is_high_wm = 0;
+ devp->write_pending = 0;
+ devp->is_channel_reset = 0;
+ devp->state = SMUX_DISCONNECTED;
+ devp->read_avail = 0;
+
+ devp->stats.bytes_tx = 0;
+ devp->stats.bytes_rx = 0;
+ devp->stats.pkts_tx = 0;
+ devp->stats.pkts_rx = 0;
+ devp->stats.cnt_ssr = 0;
+ devp->stats.cnt_read_fail = 0;
+ devp->stats.cnt_write_fail = 0;
+ devp->stats.cnt_high_wm_hit = 0;
+ devp->abort_wait = 0;
+}
+
static int smux_ctl_probe(struct platform_device *pdev)
{
int i;
@@ -706,6 +736,27 @@
SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s Begins\n", __func__);
+ if (smux_ctl_inited) {
+ /* Already loaded once - reinitialize channels */
+ for (i = 0; i < SMUX_CTL_NUM_CHANNELS; ++i) {
+ struct smux_ctl_dev *devp = smux_ctl_devp[i];
+
+ smux_ctl_reset_channel(devp);
+
+ if (atomic_read(&devp->ref_count)) {
+ r = msm_smux_open(devp->id,
+ devp,
+ smux_ctl_notify_cb,
+ smux_ctl_get_rx_buf_cb);
+ if (r)
+ pr_err("%s: unable to reopen ch %d, ret %d\n",
+ __func__, devp->id, r);
+ }
+ }
+ return 0;
+ }
+
+ /* Create character devices */
for (i = 0; i < SMUX_CTL_NUM_CHANNELS; ++i) {
smux_ctl_devp[i] = kzalloc(sizeof(struct smux_ctl_dev),
GFP_KERNEL);
@@ -718,26 +769,13 @@
smux_ctl_devp[i]->id = smux_ctl_ch_id[i];
atomic_set(&smux_ctl_devp[i]->ref_count, 0);
- smux_ctl_devp[i]->is_high_wm = 0;
- smux_ctl_devp[i]->write_pending = 0;
- smux_ctl_devp[i]->is_channel_reset = 0;
- smux_ctl_devp[i]->state = SMUX_DISCONNECTED;
- smux_ctl_devp[i]->read_avail = 0;
-
- smux_ctl_devp[i]->stats.bytes_tx = 0;
- smux_ctl_devp[i]->stats.bytes_rx = 0;
- smux_ctl_devp[i]->stats.pkts_tx = 0;
- smux_ctl_devp[i]->stats.pkts_rx = 0;
- smux_ctl_devp[i]->stats.cnt_ssr = 0;
- smux_ctl_devp[i]->stats.cnt_read_fail = 0;
- smux_ctl_devp[i]->stats.cnt_write_fail = 0;
- smux_ctl_devp[i]->stats.cnt_high_wm_hit = 0;
mutex_init(&smux_ctl_devp[i]->dev_lock);
init_waitqueue_head(&smux_ctl_devp[i]->read_wait_queue);
init_waitqueue_head(&smux_ctl_devp[i]->write_wait_queue);
mutex_init(&smux_ctl_devp[i]->rx_lock);
INIT_LIST_HEAD(&smux_ctl_devp[i]->rx_list);
+ smux_ctl_reset_channel(smux_ctl_devp[i]);
}
r = alloc_chrdev_region(&smux_ctl_number, 0, SMUX_CTL_NUM_CHANNELS,
@@ -761,7 +799,8 @@
cdev_init(&smux_ctl_devp[i]->cdev, &smux_ctl_fops);
smux_ctl_devp[i]->cdev.owner = THIS_MODULE;
- r = cdev_add(&smux_ctl_devp[i]->cdev, (smux_ctl_number + i), 1);
+ r = cdev_add(&smux_ctl_devp[i]->cdev,
+ (smux_ctl_number + i), 1);
if (IS_ERR_VALUE(r)) {
pr_err(SMUX_CTL_MODULE_NAME ": %s: "
@@ -818,15 +857,32 @@
SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s Begins\n", __func__);
for (i = 0; i < SMUX_CTL_NUM_CHANNELS; ++i) {
- cdev_del(&smux_ctl_devp[i]->cdev);
- kfree(smux_ctl_devp[i]);
- device_destroy(smux_ctl_classp,
- MKDEV(MAJOR(smux_ctl_number), i));
- }
- class_destroy(smux_ctl_classp);
- unregister_chrdev_region(MAJOR(smux_ctl_number),
- SMUX_CTL_NUM_CHANNELS);
+ struct smux_ctl_dev *devp = smux_ctl_devp[i];
+ mutex_lock(&devp->dev_lock);
+ devp->abort_wait = 1;
+ wake_up(&devp->write_wait_queue);
+ wake_up(&devp->read_wait_queue);
+ mutex_unlock(&devp->dev_lock);
+
+ /* Empty RX queue */
+ mutex_lock(&devp->rx_lock);
+ while (!list_empty(&devp->rx_list)) {
+ struct smux_ctl_list_elem *list_elem;
+
+ list_elem = list_first_entry(
+ &devp->rx_list,
+ struct smux_ctl_list_elem,
+ list);
+ list_del(&list_elem->list);
+ kfree(list_elem->ctl_pkt.data);
+ kfree(list_elem);
+ }
+ devp->read_avail = 0;
+ mutex_unlock(&devp->rx_lock);
+ }
+
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s Ends\n", __func__);
return 0;
}
@@ -841,8 +897,6 @@
static int __init smux_ctl_init(void)
{
- msm_smux_ctl_debug_mask = MSM_SMUX_CTL_DEBUG | MSM_SMUX_CTL_DUMP_BUFFER;
-
SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s Begins\n", __func__);
return platform_driver_register(&smux_ctl_driver);
}
diff --git a/drivers/tty/smux_private.h b/drivers/tty/smux_private.h
index f644ff0..353c762 100644
--- a/drivers/tty/smux_private.h
+++ b/drivers/tty/smux_private.h
@@ -32,6 +32,10 @@
/* Maximum number of packets in retry queue */
#define SMUX_RX_RETRY_MAX_PKTS 32
+#define SMUX_RX_WM_HIGH 16
+#define SMUX_RX_WM_LOW 4
+#define SMUX_TX_WM_LOW 2
+#define SMUX_TX_WM_HIGH 4
struct tty_struct;
diff --git a/drivers/tty/smux_test.c b/drivers/tty/smux_test.c
index 62e9465..e488a63 100644
--- a/drivers/tty/smux_test.c
+++ b/drivers/tty/smux_test.c
@@ -43,37 +43,46 @@
* @failed - set to true if test fails
*/
#define UT_ASSERT_INT(a, cmp, b) \
- if (!((a)cmp(b))) { \
+ { \
+ int a_tmp = (a); \
+ int b_tmp = (b); \
+ if (!((a_tmp)cmp(b_tmp))) { \
i += scnprintf(buf + i, max - i, \
"%s:%d Fail: " #a "(%d) " #cmp " " #b "(%d)\n", \
__func__, __LINE__, \
- a, b); \
+ a_tmp, b_tmp); \
failed = 1; \
break; \
} \
- do {} while (0)
+ }
#define UT_ASSERT_PTR(a, cmp, b) \
- if (!((a)cmp(b))) { \
+ { \
+ void *a_tmp = (a); \
+ void *b_tmp = (b); \
+ if (!((a_tmp)cmp(b_tmp))) { \
i += scnprintf(buf + i, max - i, \
"%s:%d Fail: " #a "(%p) " #cmp " " #b "(%p)\n", \
__func__, __LINE__, \
- a, b); \
+ a_tmp, b_tmp); \
failed = 1; \
break; \
} \
- do {} while (0)
+ }
#define UT_ASSERT_UINT(a, cmp, b) \
- if (!((a)cmp(b))) { \
+ { \
+ unsigned a_tmp = (a); \
+ unsigned b_tmp = (b); \
+ if (!((a_tmp)cmp(b_tmp))) { \
i += scnprintf(buf + i, max - i, \
"%s:%d Fail: " #a "(%u) " #cmp " " #b "(%u)\n", \
__func__, __LINE__, \
- a, b); \
+ a_tmp, b_tmp); \
failed = 1; \
break; \
} \
- do {} while (0)
+ }
/**
* In-range unit test assertion for test cases.
@@ -94,16 +103,20 @@
* @failed - set to true if test fails
*/
#define UT_ASSERT_INT_IN_RANGE(a, minv, maxv) \
- if (((a) < (minv)) || ((a) > (maxv))) { \
+ { \
+ int a_tmp = (a); \
+ int minv_tmp = (minv); \
+ int maxv_tmp = (maxv); \
+ if (((a_tmp) < (minv_tmp)) || ((a_tmp) > (maxv_tmp))) { \
i += scnprintf(buf + i, max - i, \
"%s:%d Fail: " #a "(%d) < " #minv "(%d) or " \
#a "(%d) > " #maxv "(%d)\n", \
__func__, __LINE__, \
- a, minv, a, maxv); \
+ a_tmp, minv_tmp, a_tmp, maxv_tmp); \
failed = 1; \
break; \
} \
- do {} while (0)
+ }
static unsigned char test_array[] = {1, 1, 2, 3, 5, 8, 13, 21, 34, 55,
@@ -172,6 +185,8 @@
int event_disconnected_ssr;
int event_low_wm;
int event_high_wm;
+ int event_rx_retry_high_wm;
+ int event_rx_retry_low_wm;
/* TIOCM changes */
int event_tiocm;
@@ -222,6 +237,8 @@
cb->event_disconnected_ssr = 0;
cb->event_low_wm = 0;
cb->event_high_wm = 0;
+ cb->event_rx_retry_high_wm = 0;
+ cb->event_rx_retry_low_wm = 0;
cb->event_tiocm = 0;
cb->tiocm_meta.tiocm_old = 0;
cb->tiocm_meta.tiocm_new = 0;
@@ -282,15 +299,17 @@
"\tevent_disconnected_ssr=%d\n"
"\tevent_low_wm=%d\n"
"\tevent_high_wm=%d\n"
+ "\tevent_rx_retry_high_wm=%d\n"
+ "\tevent_rx_retry_low_wm=%d\n"
"\tevent_tiocm=%d\n"
"\tevent_read_done=%d\n"
"\tevent_read_failed=%d\n"
- "\tread_events=%d\n"
+ "\tread_events empty=%d\n"
"\tget_rx_retry=%d\n"
- "\tget_rx_retry_events=%d\n"
+ "\tget_rx_retry_events empty=%d\n"
"\tevent_write_done=%d\n"
"\tevent_write_failed=%d\n"
- "\twrite_events=%d\n",
+ "\twrite_events empty=%d\n",
cb->cb_count,
cb->cb_completion.done,
cb->event_connected,
@@ -298,12 +317,14 @@
cb->event_disconnected_ssr,
cb->event_low_wm,
cb->event_high_wm,
+ cb->event_rx_retry_high_wm,
+ cb->event_rx_retry_low_wm,
cb->event_tiocm,
cb->event_read_done,
cb->event_read_failed,
- !list_empty(&cb->read_events),
+ list_empty(&cb->read_events),
cb->get_rx_buff_retry_count,
- !list_empty(&cb->get_rx_buff_retry_events),
+ list_empty(&cb->get_rx_buff_retry_events),
cb->event_write_done,
cb->event_write_failed,
list_empty(&cb->write_events)
@@ -416,6 +437,19 @@
spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
break;
+ case SMUX_RX_RETRY_HIGH_WM_HIT:
+ spin_lock_irqsave(&cb_data_ptr->lock, flags);
+ ++cb_data_ptr->event_rx_retry_high_wm;
+ spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
+ break;
+
+ case SMUX_RX_RETRY_LOW_WM_HIT:
+ spin_lock_irqsave(&cb_data_ptr->lock, flags);
+ ++cb_data_ptr->event_rx_retry_low_wm;
+ spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
+ break;
+
+
case SMUX_TIOCM_UPDATE:
spin_lock_irqsave(&cb_data_ptr->lock, flags);
++cb_data_ptr->event_tiocm;
@@ -1315,7 +1349,7 @@
/* open port for loopback */
ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
SMUX_CH_OPTION_LOCAL_LOOPBACK,
- 0);
+ SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP);
UT_ASSERT_INT(ret, ==, 0);
ret = msm_smux_open(SMUX_TEST_LCID, &cb_data,
@@ -1568,6 +1602,132 @@
return i;
}
+/**
+ * Verify get_rx_buffer callback retry for auto-rx flow control.
+ *
+ * @buf Buffer for status message
+ * @max Size of buffer
+ *
+ * @returns Number of bytes written to @buf
+ */
+static int smux_ut_local_get_rx_buff_retry_auto(char *buf, int max)
+{
+ static struct smux_mock_callback cb_data;
+ static int cb_initialized;
+ int i = 0;
+ int failed = 0;
+ int ret;
+ int try;
+ int try_rx_retry_wm;
+
+ i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
+ pr_err("%s", buf);
+
+ if (!cb_initialized)
+ mock_cb_data_init(&cb_data);
+
+ mock_cb_data_reset(&cb_data);
+ smux_byte_loopback = SMUX_TEST_LCID;
+ while (!failed) {
+ /* open port for loopback */
+ ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
+ SMUX_CH_OPTION_LOCAL_LOOPBACK
+ | SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP,
+ 0);
+ UT_ASSERT_INT(ret, ==, 0);
+
+ ret = msm_smux_open(SMUX_TEST_LCID, &cb_data,
+ smux_mock_cb, get_rx_buffer_mock);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ), >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_connected, ==, 1);
+ mock_cb_data_reset(&cb_data);
+
+ /* Test high rx-retry watermark */
+ get_rx_buffer_mock_fail = 1;
+ try_rx_retry_wm = 0;
+ for (try = 0; try < SMUX_RX_RETRY_MAX_PKTS; ++try) {
+ pr_err("%s: try %d\n", __func__, try);
+ ret = msm_smux_write(SMUX_TEST_LCID, (void *)1,
+ test_array, sizeof(test_array));
+ UT_ASSERT_INT(ret, ==, 0);
+ if (failed)
+ break;
+
+ if (!try_rx_retry_wm &&
+ cb_data.event_rx_retry_high_wm) {
+ /* RX high watermark hit */
+ try_rx_retry_wm = try + 1;
+ break;
+ }
+
+ while (cb_data.event_write_done <= try) {
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ),
+ >, 0);
+ INIT_COMPLETION(cb_data.cb_completion);
+ }
+ if (failed)
+ break;
+ }
+ if (failed)
+ break;
+
+ /* RX retry high watermark should have been set */
+ UT_ASSERT_INT(cb_data.event_rx_retry_high_wm, ==, 1);
+ UT_ASSERT_INT(try_rx_retry_wm, ==, SMUX_RX_WM_HIGH);
+
+ /*
+ * Disabled RX buffer allocation failure and wait for
+ * the SMUX_RX_WM_HIGH count successful packets.
+ */
+ get_rx_buffer_mock_fail = 0;
+ while (cb_data.event_read_done < SMUX_RX_WM_HIGH) {
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, 2*HZ),
+ >, 0);
+ INIT_COMPLETION(cb_data.cb_completion);
+ }
+ if (failed)
+ break;
+
+ UT_ASSERT_INT(0, ==, cb_data.event_read_failed);
+ UT_ASSERT_INT(SMUX_RX_WM_HIGH, ==,
+ cb_data.event_read_done);
+ UT_ASSERT_INT(cb_data.event_rx_retry_low_wm, ==, 1);
+ mock_cb_data_reset(&cb_data);
+
+ /* close port */
+ ret = msm_smux_close(SMUX_TEST_LCID);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ),
+ >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
+ UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 0);
+ break;
+ }
+
+ if (!failed) {
+ i += scnprintf(buf + i, max - i, "\tOK\n");
+ } else {
+ pr_err("%s: Failed\n", __func__);
+ i += scnprintf(buf + i, max - i, "\tFailed\n");
+ i += mock_cb_data_print(&cb_data, buf + i, max - i);
+ msm_smux_close(SMUX_TEST_LCID);
+ }
+ smux_byte_loopback = 0;
+ mock_cb_data_reset(&cb_data);
+ return i;
+}
+
static char debug_buffer[DEBUG_BUFMAX];
static ssize_t debug_read(struct file *file, char __user *buf,
@@ -1631,6 +1791,8 @@
smux_ut_local_smuxld_receive_buf);
debug_create("ut_local_get_rx_buff_retry", 0444, dent,
smux_ut_local_get_rx_buff_retry);
+ debug_create("ut_local_get_rx_buff_retry_auto", 0444, dent,
+ smux_ut_local_get_rx_buff_retry_auto);
return 0;
}
diff --git a/drivers/usb/dwc3/dwc3_otg.c b/drivers/usb/dwc3/dwc3_otg.c
index 5df030a..23b582d 100644
--- a/drivers/usb/dwc3/dwc3_otg.c
+++ b/drivers/usb/dwc3/dwc3_otg.c
@@ -579,9 +579,11 @@
return -ENOMEM;
}
- dotg->irq = platform_get_irq(to_platform_device(dwc->dev), 0);
+ /* DWC3 has separate IRQ line for OTG events (ID/BSV etc.) */
+ dotg->irq = platform_get_irq_byname(to_platform_device(dwc->dev),
+ "otg_irq");
if (dotg->irq < 0) {
- dev_err(dwc->dev, "%s: missing IRQ\n", __func__);
+ dev_err(dwc->dev, "%s: missing OTG IRQ\n", __func__);
ret = -ENODEV;
goto err1;
}
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index f82c2fe..18f0721 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -3334,6 +3334,8 @@
void __iomem *regs)
{
struct ci13xxx *udc;
+ struct ci13xxx_platform_data *pdata =
+ (struct ci13xxx_platform_data *)(dev->platform_data);
int retval = 0, i;
trace("%p, %p, %p", dev, regs, driver->name);
@@ -3362,6 +3364,9 @@
INIT_LIST_HEAD(&udc->gadget.ep_list);
udc->gadget.ep0 = NULL;
+ if (pdata)
+ udc->gadget.usb_core_id = pdata->usb_core_id;
+
dev_set_name(&udc->gadget.dev, "gadget");
udc->gadget.dev.dma_mask = dev->dma_mask;
udc->gadget.dev.coherent_dma_mask = dev->coherent_dma_mask;
diff --git a/drivers/usb/gadget/ci13xxx_udc.h b/drivers/usb/gadget/ci13xxx_udc.h
index a189b45..6527b76 100644
--- a/drivers/usb/gadget/ci13xxx_udc.h
+++ b/drivers/usb/gadget/ci13xxx_udc.h
@@ -158,6 +158,11 @@
struct usb_phy *transceiver; /* Transceiver struct */
};
+struct ci13xxx_platform_data {
+ u8 usb_core_id;
+ void *prv_data;
+};
+
/******************************************************************************
* REGISTERS
*****************************************************************************/
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 6e807cb..59ff8d7 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -449,6 +449,7 @@
struct f_rndis *rndis = req->context;
struct usb_composite_dev *cdev = rndis->port.func.config->cdev;
int status;
+ rndis_init_msg_type *buf;
/* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */
// spin_lock(&dev->lock);
@@ -456,6 +457,19 @@
if (status < 0)
ERROR(cdev, "RNDIS command error %d, %d/%d\n",
status, req->actual, req->length);
+
+ buf = (rndis_init_msg_type *)req->buf;
+
+ if (buf->MessageType == REMOTE_NDIS_INITIALIZE_MSG) {
+ if (buf->MaxTransferSize > 2048)
+ rndis->port.multi_pkt_xfer = 1;
+ else
+ rndis->port.multi_pkt_xfer = 0;
+ DBG(cdev, "%s: MaxTransferSize: %d : Multi_pkt_txr: %s\n",
+ __func__, buf->MaxTransferSize,
+ rndis->port.multi_pkt_xfer ? "enabled" :
+ "disabled");
+ }
// spin_unlock(&dev->lock);
}
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index 0cb2121..16c4afb 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -585,12 +585,12 @@
resp->MinorVersion = cpu_to_le32(RNDIS_MINOR_VERSION);
resp->DeviceFlags = cpu_to_le32(RNDIS_DF_CONNECTIONLESS);
resp->Medium = cpu_to_le32(RNDIS_MEDIUM_802_3);
- resp->MaxPacketsPerTransfer = cpu_to_le32(1);
- resp->MaxTransferSize = cpu_to_le32(
- params->dev->mtu
+ resp->MaxPacketsPerTransfer = cpu_to_le32(TX_SKB_HOLD_THRESHOLD);
+ resp->MaxTransferSize = cpu_to_le32(TX_SKB_HOLD_THRESHOLD *
+ (params->dev->mtu
+ sizeof(struct ethhdr)
+ sizeof(struct rndis_packet_msg_type)
- + 22);
+ + 22));
resp->PacketAlignmentFactor = cpu_to_le32(0);
resp->AFListOffset = cpu_to_le32(0);
resp->AFListSize = cpu_to_le32(0);
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index 62955c2..78ab8b7 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -59,6 +59,11 @@
spinlock_t req_lock; /* guard {rx,tx}_reqs */
struct list_head tx_reqs, rx_reqs;
unsigned tx_qlen;
+/* Minimum number of TX USB request queued to UDC */
+#define TX_REQ_THRESHOLD 5
+ int no_tx_req_used;
+ int tx_skb_hold_count;
+ u32 tx_req_bufsize;
struct sk_buff_head rx_frames;
@@ -86,7 +91,7 @@
#ifdef CONFIG_USB_GADGET_DUALSPEED
-static unsigned qmult = 5;
+static unsigned qmult = 10;
module_param(qmult, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(qmult, "queue length multiplier at high/super speed");
@@ -457,6 +462,11 @@
{
struct sk_buff *skb = req->context;
struct eth_dev *dev = ep->driver_data;
+ struct net_device *net = dev->net;
+ struct usb_request *new_req;
+ struct usb_ep *in;
+ int length;
+ int retval;
switch (req->status) {
default:
@@ -467,14 +477,73 @@
case -ESHUTDOWN: /* disconnect etc */
break;
case 0:
- dev->net->stats.tx_bytes += skb->len;
+ if (!req->zero)
+ dev->net->stats.tx_bytes += req->length-1;
+ else
+ dev->net->stats.tx_bytes += req->length;
}
dev->net->stats.tx_packets++;
spin_lock(&dev->req_lock);
- list_add(&req->list, &dev->tx_reqs);
- spin_unlock(&dev->req_lock);
- dev_kfree_skb_any(skb);
+ list_add_tail(&req->list, &dev->tx_reqs);
+
+ if (dev->port_usb->multi_pkt_xfer) {
+ dev->no_tx_req_used--;
+ req->length = 0;
+ in = dev->port_usb->in_ep;
+
+ if (!list_empty(&dev->tx_reqs)) {
+ new_req = container_of(dev->tx_reqs.next,
+ struct usb_request, list);
+ list_del(&new_req->list);
+ spin_unlock(&dev->req_lock);
+ if (new_req->length > 0) {
+ length = new_req->length;
+
+ /* NCM requires no zlp if transfer is
+ * dwNtbInMaxSize */
+ if (dev->port_usb->is_fixed &&
+ length == dev->port_usb->fixed_in_len &&
+ (length % in->maxpacket) == 0)
+ new_req->zero = 0;
+ else
+ new_req->zero = 1;
+
+ /* use zlp framing on tx for strict CDC-Ether
+ * conformance, though any robust network rx
+ * path ignores extra padding. and some hardware
+ * doesn't like to write zlps.
+ */
+ if (new_req->zero && !dev->zlp &&
+ (length % in->maxpacket) == 0) {
+ new_req->zero = 0;
+ length++;
+ }
+
+ new_req->length = length;
+ retval = usb_ep_queue(in, new_req, GFP_ATOMIC);
+ switch (retval) {
+ default:
+ DBG(dev, "tx queue err %d\n", retval);
+ break;
+ case 0:
+ spin_lock(&dev->req_lock);
+ dev->no_tx_req_used++;
+ spin_unlock(&dev->req_lock);
+ net->trans_start = jiffies;
+ }
+ } else {
+ spin_lock(&dev->req_lock);
+ list_add(&new_req->list, &dev->tx_reqs);
+ spin_unlock(&dev->req_lock);
+ }
+ } else {
+ spin_unlock(&dev->req_lock);
+ }
+ } else {
+ spin_unlock(&dev->req_lock);
+ dev_kfree_skb_any(skb);
+ }
if (netif_carrier_ok(dev->net))
netif_wake_queue(dev->net);
@@ -485,6 +554,26 @@
return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
}
+static void alloc_tx_buffer(struct eth_dev *dev)
+{
+ struct list_head *act;
+ struct usb_request *req;
+
+ dev->tx_req_bufsize = (TX_SKB_HOLD_THRESHOLD *
+ (dev->net->mtu
+ + sizeof(struct ethhdr)
+ /* size of rndis_packet_msg_type */
+ + 44
+ + 22));
+
+ list_for_each(act, &dev->tx_reqs) {
+ req = container_of(act, struct usb_request, list);
+ if (!req->buf)
+ req->buf = kmalloc(dev->tx_req_bufsize,
+ GFP_ATOMIC);
+ }
+}
+
static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
struct net_device *net)
{
@@ -511,6 +600,10 @@
return NETDEV_TX_OK;
}
+ /* Allocate memory for tx_reqs to support multi packet transfer */
+ if (dev->port_usb->multi_pkt_xfer && !dev->tx_req_bufsize)
+ alloc_tx_buffer(dev);
+
/* apply outgoing CDC or RNDIS filters */
if (!is_promisc(cdc_filter)) {
u8 *dest = skb->data;
@@ -565,11 +658,39 @@
spin_unlock_irqrestore(&dev->lock, flags);
if (!skb)
goto drop;
-
- length = skb->len;
}
- req->buf = skb->data;
- req->context = skb;
+
+ spin_lock_irqsave(&dev->req_lock, flags);
+ dev->tx_skb_hold_count++;
+ spin_unlock_irqrestore(&dev->req_lock, flags);
+
+ if (dev->port_usb->multi_pkt_xfer) {
+ memcpy(req->buf + req->length, skb->data, skb->len);
+ req->length = req->length + skb->len;
+ length = req->length;
+ dev_kfree_skb_any(skb);
+
+ spin_lock_irqsave(&dev->req_lock, flags);
+ if (dev->tx_skb_hold_count < TX_SKB_HOLD_THRESHOLD) {
+ if (dev->no_tx_req_used > TX_REQ_THRESHOLD) {
+ list_add(&req->list, &dev->tx_reqs);
+ spin_unlock_irqrestore(&dev->req_lock, flags);
+ goto success;
+ }
+ }
+
+ dev->no_tx_req_used++;
+ spin_unlock_irqrestore(&dev->req_lock, flags);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->tx_skb_hold_count = 0;
+ spin_unlock_irqrestore(&dev->lock, flags);
+ } else {
+ length = skb->len;
+ req->buf = skb->data;
+ req->context = skb;
+ }
+
req->complete = tx_complete;
/* NCM requires no zlp if transfer is dwNtbInMaxSize */
@@ -584,8 +705,10 @@
* though any robust network rx path ignores extra padding.
* and some hardware doesn't like to write zlps.
*/
- if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
+ if (req->zero && !dev->zlp && (length % in->maxpacket) == 0) {
+ req->zero = 0;
length++;
+ }
req->length = length;
@@ -593,7 +716,7 @@
if (gadget_is_dualspeed(dev->gadget) &&
(dev->gadget->speed == USB_SPEED_HIGH)) {
dev->tx_qlen++;
- if (dev->tx_qlen == qmult) {
+ if (dev->tx_qlen == (qmult/2)) {
req->no_interrupt = 0;
dev->tx_qlen = 0;
} else {
@@ -613,7 +736,8 @@
}
if (retval) {
- dev_kfree_skb_any(skb);
+ if (!dev->port_usb->multi_pkt_xfer)
+ dev_kfree_skb_any(skb);
drop:
dev->net->stats.tx_dropped++;
spin_lock_irqsave(&dev->req_lock, flags);
@@ -622,6 +746,7 @@
list_add(&req->list, &dev->tx_reqs);
spin_unlock_irqrestore(&dev->req_lock, flags);
}
+success:
return NETDEV_TX_OK;
}
@@ -924,6 +1049,9 @@
dev->wrap = link->wrap;
spin_lock(&dev->lock);
+ dev->tx_skb_hold_count = 0;
+ dev->no_tx_req_used = 0;
+ dev->tx_req_bufsize = 0;
dev->port_usb = link;
link->ioport = dev;
if (netif_running(dev->net)) {
@@ -989,6 +1117,8 @@
list_del(&req->list);
spin_unlock(&dev->req_lock);
+ if (link->multi_pkt_xfer)
+ kfree(req->buf);
usb_ep_free_request(link->in_ep, req);
spin_lock(&dev->req_lock);
}
diff --git a/drivers/usb/gadget/u_ether.h b/drivers/usb/gadget/u_ether.h
index 37431f5..faa9a3b 100644
--- a/drivers/usb/gadget/u_ether.h
+++ b/drivers/usb/gadget/u_ether.h
@@ -53,6 +53,9 @@
bool is_fixed;
u32 fixed_out_len;
u32 fixed_in_len;
+/* Max number of SKB packets to be used to create Multi Packet RNDIS */
+#define TX_SKB_HOLD_THRESHOLD 3
+ bool multi_pkt_xfer;
struct sk_buff *(*wrap)(struct gether *port,
struct sk_buff *skb);
int (*unwrap)(struct gether *port,
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 6a6f6e5..7309438 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -977,6 +977,9 @@
/* PCI errors [4.15.2.4] */
if (unlikely ((status & STS_FATAL) != 0)) {
ehci_err(ehci, "fatal error\n");
+ if (hcd->driver->dump_regs)
+ hcd->driver->dump_regs(hcd);
+ panic("System error\n");
dbg_cmd(ehci, "fatal", cmd);
dbg_status(ehci, "fatal", status);
ehci_halt(ehci);
diff --git a/drivers/usb/host/ehci-msm-hsic.c b/drivers/usb/host/ehci-msm-hsic.c
index 874c728..e49e2a0 100644
--- a/drivers/usb/host/ehci-msm-hsic.c
+++ b/drivers/usb/host/ehci-msm-hsic.c
@@ -45,6 +45,8 @@
#include <mach/rpm-regulator.h>
#define MSM_USB_BASE (hcd->regs)
+#define USB_REG_START_OFFSET 0x90
+#define USB_REG_END_OFFSET 0x250
struct msm_hsic_hcd {
struct ehci_hcd ehci;
@@ -68,6 +70,8 @@
enum usb_vdd_type vdd_type;
};
+struct msm_hsic_hcd *__mehci;
+
static bool debug_bus_voting_enabled = true;
static unsigned int enable_dbg_log = 1;
@@ -256,6 +260,22 @@
return container_of((void *) mehci, struct usb_hcd, hcd_priv);
}
+static void dump_hsic_regs(struct usb_hcd *hcd)
+{
+ int i;
+ struct msm_hsic_hcd *mehci = hcd_to_hsic(hcd);
+
+ if (atomic_read(&mehci->in_lpm))
+ return;
+
+ for (i = USB_REG_START_OFFSET; i <= USB_REG_END_OFFSET; i += 0x10)
+ pr_info("%p: %08x\t%08x\t%08x\t%08x\n", hcd->regs + i,
+ readl_relaxed(hcd->regs + i),
+ readl_relaxed(hcd->regs + i + 4),
+ readl_relaxed(hcd->regs + i + 8),
+ readl_relaxed(hcd->regs + i + 0xc));
+}
+
#define ULPI_IO_TIMEOUT_USEC (10 * 1000)
#define USB_PHY_VDD_DIG_VOL_NONE 0 /*uV */
@@ -872,6 +892,7 @@
.bus_resume = ehci_hsic_bus_resume,
.log_urb_complete = dbg_log_event,
+ .dump_regs = dump_hsic_regs,
.enable_ulpi_control = ehci_msm_enable_ulpi_control,
.disable_ulpi_control = ehci_msm_disable_ulpi_control,
@@ -1333,6 +1354,8 @@
}
}
+ __mehci = mehci;
+
/*
* This pdev->dev is assigned parent of root-hub by USB core,
* hence, runtime framework automatically calls this driver's
diff --git a/drivers/usb/otg/msm_otg.c b/drivers/usb/otg/msm_otg.c
index 1d9c84f..14118e7 100644
--- a/drivers/usb/otg/msm_otg.c
+++ b/drivers/usb/otg/msm_otg.c
@@ -74,7 +74,7 @@
static struct regulator *hsusb_1p8;
static struct regulator *hsusb_vddcx;
static struct regulator *vbus_otg;
-static struct regulator *mhl_analog_switch;
+static struct regulator *mhl_usb_hs_switch;
static struct power_supply *psy;
static bool aca_id_turned_on;
@@ -250,16 +250,16 @@
if (!pdata->mhl_enable)
return;
- if (!mhl_analog_switch) {
- pr_err("%s: mhl_analog_switch is NULL.\n", __func__);
+ if (!mhl_usb_hs_switch) {
+ pr_err("%s: mhl_usb_hs_switch is NULL.\n", __func__);
return;
}
if (on) {
- if (regulator_enable(mhl_analog_switch))
- pr_err("unable to enable mhl_analog_switch\n");
+ if (regulator_enable(mhl_usb_hs_switch))
+ pr_err("unable to enable mhl_usb_hs_switch\n");
} else {
- regulator_disable(mhl_analog_switch);
+ regulator_disable(mhl_usb_hs_switch);
}
}
@@ -972,6 +972,21 @@
}
#endif
+static int msm_otg_notify_host_mode(struct msm_otg *motg, bool host_mode)
+{
+ if (!psy)
+ goto psy_not_supported;
+
+ if (host_mode)
+ power_supply_set_scope(psy, POWER_SUPPLY_SCOPE_SYSTEM);
+ else
+ power_supply_set_scope(psy, POWER_SUPPLY_SCOPE_DEVICE);
+
+psy_not_supported:
+ dev_dbg(motg->phy.dev, "Power Supply doesn't support USB charger\n");
+ return -ENXIO;
+}
+
static int msm_otg_notify_chg_type(struct msm_otg *motg)
{
static int charger_type;
@@ -1220,7 +1235,7 @@
* current from the source.
*/
if (on) {
- pm8921_disable_source_current(on);
+ msm_otg_notify_host_mode(motg, on);
ret = regulator_enable(vbus_otg);
if (ret) {
pr_err("unable to enable vbus_otg\n");
@@ -1233,7 +1248,7 @@
pr_err("unable to disable vbus_otg\n");
return;
}
- pm8921_disable_source_current(on);
+ msm_otg_notify_host_mode(motg, on);
vbus_is_on = false;
}
}
@@ -3340,10 +3355,10 @@
}
if (pdata->mhl_enable) {
- mhl_analog_switch = devm_regulator_get(motg->phy.dev,
- "mhl_ext_3p3v");
- if (IS_ERR(mhl_analog_switch)) {
- dev_err(&pdev->dev, "Unable to get mhl_analog_switch\n");
+ mhl_usb_hs_switch = devm_regulator_get(motg->phy.dev,
+ "mhl_usb_hs_switch");
+ if (IS_ERR(mhl_usb_hs_switch)) {
+ dev_err(&pdev->dev, "Unable to get mhl_usb_hs_switch\n");
goto free_ldo_init;
}
}
diff --git a/drivers/video/msm/mdp.c b/drivers/video/msm/mdp.c
index 2bc7f5b..ea9e5ab 100644
--- a/drivers/video/msm/mdp.c
+++ b/drivers/video/msm/mdp.c
@@ -847,6 +847,7 @@
MDP_OUTP(base + 0x0018, INTR_HIST_DONE | INTR_HIST_RESET_SEQ_DONE);
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+ mgmt->hist = NULL;
complete(&mgmt->mdp_hist_comp);
mdp_disable_irq(mgmt->irq_term);
return 0;
@@ -926,7 +927,6 @@
mgmt->frame_cnt = req->frame_cnt;
mgmt->bit_mask = req->bit_mask;
mgmt->num_bins = req->num_bins;
- mgmt->hist = NULL;
ret = mdp_histogram_enable(mgmt);
@@ -963,6 +963,7 @@
if (!mfd->panel_power_on) {
mgmt->mdp_is_hist_data = FALSE;
+ mgmt->hist = NULL;
complete(&mgmt->mdp_hist_comp);
ret = -EINVAL;
goto error_lock;
@@ -1099,21 +1100,31 @@
goto error;
}
- switch (mgmt->block) {
- case MDP_BLOCK_DMA_P:
- case MDP_BLOCK_DMA_S:
- ret = _mdp_histogram_read_dma_data(mgmt);
- break;
- case MDP_BLOCK_VG_1:
- case MDP_BLOCK_VG_2:
- ret = _mdp_histogram_read_vg_data(mgmt);
- break;
- default:
- pr_err("%s, invalid MDP block = %d\n", __func__, mgmt->block);
+ if (mgmt->hist == NULL) {
+ if ((mgmt->mdp_is_hist_init == TRUE) &&
+ ((!completion_done(&mgmt->mdp_hist_comp)) &&
+ waitqueue_active(&mgmt->mdp_hist_comp.wait)))
+ pr_err("mgmt->hist invalid NULL\n");
ret = -EINVAL;
- goto error;
}
+ if (!ret) {
+ switch (mgmt->block) {
+ case MDP_BLOCK_DMA_P:
+ case MDP_BLOCK_DMA_S:
+ ret = _mdp_histogram_read_dma_data(mgmt);
+ break;
+ case MDP_BLOCK_VG_1:
+ case MDP_BLOCK_VG_2:
+ ret = _mdp_histogram_read_vg_data(mgmt);
+ break;
+ default:
+ pr_err("%s, invalid MDP block = %d\n", __func__,
+ mgmt->block);
+ ret = -EINVAL;
+ goto error;
+ }
+ }
/*
* if read was triggered by an underrun or failed copying,
* don't wake up readers
@@ -1614,16 +1625,7 @@
__mdp_histogram_kickoff(mgmt);
if (isr & INTR_HIST_DONE) {
- if ((waitqueue_active(&mgmt->mdp_hist_comp.wait))
- && (mgmt->hist != NULL)) {
- if (!queue_work(mdp_hist_wq,
- &mgmt->mdp_histogram_worker)) {
- pr_err("%s %d- can't queue hist_read\n",
- __func__, mgmt->block);
- }
- } else {
- __mdp_histogram_reset(mgmt);
- }
+ queue_work(mdp_hist_wq, &mgmt->mdp_histogram_worker);
}
}
@@ -2123,15 +2125,26 @@
disable_irq(mdp_irq);
hdmi_pll_fs = regulator_get(&pdev->dev, "hdmi_pll_fs");
- if (IS_ERR(hdmi_pll_fs))
+ if (IS_ERR(hdmi_pll_fs)) {
hdmi_pll_fs = NULL;
+ } else {
+ if (mdp_rev != MDP_REV_44) {
+ ret = regulator_set_voltage(hdmi_pll_fs, 1800000,
+ 1800000);
+ if (ret) {
+ pr_err("set_voltage failed for hdmi_pll_fs, ret=%d\n",
+ ret);
+ }
+ }
+ }
footswitch = regulator_get(&pdev->dev, "vdd");
- if (IS_ERR(footswitch))
+ if (IS_ERR(footswitch)) {
footswitch = NULL;
- else {
+ } else {
if (hdmi_pll_fs)
regulator_enable(hdmi_pll_fs);
+
regulator_enable(footswitch);
mdp_footswitch_on = 1;
diff --git a/drivers/video/msm/mdp4_overlay.c b/drivers/video/msm/mdp4_overlay.c
index 1287743..ad37d2f 100644
--- a/drivers/video/msm/mdp4_overlay.c
+++ b/drivers/video/msm/mdp4_overlay.c
@@ -116,13 +116,13 @@
if (!display_iclient)
return -EINVAL;
- *srcp_ihdl = ion_import_fd(display_iclient, mem_id);
+ *srcp_ihdl = ion_import_dma_buf(display_iclient, mem_id);
if (IS_ERR_OR_NULL(*srcp_ihdl)) {
- pr_err("ion_import_fd() failed\n");
+ pr_err("ion_import_dma_buf() failed\n");
return PTR_ERR(*srcp_ihdl);
}
- pr_debug("%s(): ion_hdl %p, ion_buf %p\n", __func__, *srcp_ihdl,
- ion_share(display_iclient, *srcp_ihdl));
+ pr_debug("%s(): ion_hdl %p, ion_buf %d\n", __func__, *srcp_ihdl,
+ ion_share_dma_buf(display_iclient, *srcp_ihdl));
pr_debug("mixer %u, pipe %u, plane %u\n", pipe->mixer_num,
pipe->pipe_ndx, plane);
if (ion_map_iommu(display_iclient, *srcp_ihdl,
@@ -585,14 +585,14 @@
mdp4_scale_setup(pipe);
+ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
/* Ensure proper covert matrix loaded when color space swaps */
curr = inpdw(rgb_base + 0x0058);
/* Don't touch bits you don't want to configure*/
mask = 0xFFFEFFFF;
pipe->op_mode = (pipe->op_mode & mask) | (curr & ~mask);
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-
outpdw(rgb_base + 0x0000, src_size); /* MDP_RGB_SRC_SIZE */
outpdw(rgb_base + 0x0004, src_xy); /* MDP_RGB_SRC_XY */
outpdw(rgb_base + 0x0008, dst_size); /* MDP_RGB_DST_SIZE */
diff --git a/drivers/video/msm/mdp4_overlay_dsi_cmd.c b/drivers/video/msm/mdp4_overlay_dsi_cmd.c
index 7ba4e75..8ebf8a0 100644
--- a/drivers/video/msm/mdp4_overlay_dsi_cmd.c
+++ b/drivers/video/msm/mdp4_overlay_dsi_cmd.c
@@ -652,7 +652,7 @@
{
unsigned long flag;
-
+ mdp4_iommu_attach();
/* change mdp clk */
mdp4_set_perf_level();
@@ -705,7 +705,6 @@
mdp4_overlay_update_dsi_cmd(mfd);
- mdp4_iommu_attach();
mdp4_dsi_cmd_kickoff_ui(mfd, dsi_pipe);
mdp4_iommu_unmap(dsi_pipe);
/* signal if pan function is waiting for the update completion */
diff --git a/drivers/video/msm/mdp4_overlay_dsi_video.c b/drivers/video/msm/mdp4_overlay_dsi_video.c
index 05c6fe8..3cdd72e 100644
--- a/drivers/video/msm/mdp4_overlay_dsi_video.c
+++ b/drivers/video/msm/mdp4_overlay_dsi_video.c
@@ -281,6 +281,7 @@
ret = panel_next_on(pdev);
if (ret == 0) {
+ mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
if (display_on != NULL) {
msleep(50);
display_on(pdev);
@@ -515,7 +516,6 @@
mdp4_iommu_attach();
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 1);
- mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
dsi_video_enabled = 1;
}
diff --git a/drivers/video/msm/mdp4_overlay_dtv.c b/drivers/video/msm/mdp4_overlay_dtv.c
index 03b22f1..e26522b 100644
--- a/drivers/video/msm/mdp4_overlay_dtv.c
+++ b/drivers/video/msm/mdp4_overlay_dtv.c
@@ -201,6 +201,7 @@
/* Test pattern 8 x 8 pixel */
/* MDP_OUTP(MDP_BASE + DTV_BASE + 0x4C, 0x80000808); */
+ mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
/* MDP cmd block disable */
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
@@ -534,7 +535,6 @@
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
/* enable DTV block */
MDP_OUTP(MDP_BASE + DTV_BASE, 1);
- mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
dtv_enabled = 1;
}
diff --git a/drivers/video/msm/mdp4_overlay_lcdc.c b/drivers/video/msm/mdp4_overlay_lcdc.c
index fd6d365..98c8191 100644
--- a/drivers/video/msm/mdp4_overlay_lcdc.c
+++ b/drivers/video/msm/mdp4_overlay_lcdc.c
@@ -261,6 +261,9 @@
mdp_histogram_ctrl_all(TRUE);
ret = panel_next_on(pdev);
+ if (ret == 0)
+ mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
/* MDP cmd block disable */
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
@@ -422,7 +425,6 @@
mdp4_iommu_attach();
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
MDP_OUTP(MDP_BASE + LCDC_BASE, 1);
- mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
lcdc_enabled = 1;
}
diff --git a/drivers/video/msm/mdp4_overlay_writeback.c b/drivers/video/msm/mdp4_overlay_writeback.c
index 8dccf78..d59c4a8 100644
--- a/drivers/video/msm/mdp4_overlay_writeback.c
+++ b/drivers/video/msm/mdp4_overlay_writeback.c
@@ -413,7 +413,7 @@
else if (mfd->iclient) {
struct ion_handle *srcp_ihdl;
ulong len;
- srcp_ihdl = ion_import_fd(mfd->iclient,
+ srcp_ihdl = ion_import_dma_buf(mfd->iclient,
data->memory_id);
if (IS_ERR_OR_NULL(srcp_ihdl)) {
pr_err("%s: ion import fd failed\n", __func__);
diff --git a/drivers/video/msm/mdss/mdss_mdp_util.c b/drivers/video/msm/mdss/mdss_mdp_util.c
index 2e86806..f6b4fce 100644
--- a/drivers/video/msm/mdss/mdss_mdp_util.c
+++ b/drivers/video/msm/mdss/mdss_mdp_util.c
@@ -326,7 +326,7 @@
}
} else if (iclient) {
data->iclient = iclient;
- data->srcp_ihdl = ion_import_fd(iclient, img->memory_id);
+ data->srcp_ihdl = ion_import_dma_buf(iclient, img->memory_id);
if (IS_ERR_OR_NULL(data->srcp_ihdl))
return PTR_ERR(data->srcp_ihdl);
ret = ion_phys(iclient, data->srcp_ihdl,
diff --git a/drivers/video/msm/mipi_toshiba_video_wsvga_pt.c b/drivers/video/msm/mipi_toshiba_video_wsvga_pt.c
index 6edd776..eb2946b 100644
--- a/drivers/video/msm/mipi_toshiba_video_wsvga_pt.c
+++ b/drivers/video/msm/mipi_toshiba_video_wsvga_pt.c
@@ -22,14 +22,14 @@
/* regulator */
{0x09, 0x08, 0x05, 0x00, 0x20},
/* timing */
- {0xab, 0x8a, 0x18, 0x00, 0x92, 0x97, 0x1b, 0x8c,
- 0x0c, 0x03, 0x04, 0xa0},
+ {0x73, 0x2e, 0x11, 0x00, 0x3c, 0x46, 0x14, 0x31,
+ 0x1c, 0x03, 0x04, 0xa0},
/* phy ctrl */
{0x5f, 0x00, 0x00, 0x10},
/* strength */
{0xff, 0x00, 0x06, 0x00},
/* pll control */
- {0x0, 0x7f, 0x31, 0xda, 0x00, 0x50, 0x48, 0x63,
+ {0x0, 0x49, 0x30, 0xc4, 0x00, 0x20, 0x07, 0x62,
0x41, 0x0f, 0x01,
0x00, 0x14, 0x03, 0x00, 0x02, 0x00, 0x20, 0x00, 0x01 },
};
@@ -59,8 +59,8 @@
pinfo.lcdc.h_back_porch = 16;
pinfo.lcdc.h_front_porch = 23;
pinfo.lcdc.h_pulse_width = 8;
- pinfo.lcdc.v_back_porch = 2;
- pinfo.lcdc.v_front_porch = 7;
+ pinfo.lcdc.v_back_porch = 3;
+ pinfo.lcdc.v_front_porch = 45;
pinfo.lcdc.v_pulse_width = 2;
pinfo.lcdc.border_clr = 0; /* blk */
pinfo.lcdc.underflow_clr = 0xff; /* blue */
@@ -68,7 +68,6 @@
pinfo.bl_max = MIPI_TOSHIBA_PWM_LEVEL;
pinfo.bl_min = 1;
pinfo.fb_num = 2;
- pinfo.clk_rate = 384000000;
pinfo.mipi.mode = DSI_VIDEO_MODE;
pinfo.mipi.pulse_mode_hsa_he = FALSE;
@@ -84,8 +83,8 @@
pinfo.mipi.data_lane0 = TRUE;
pinfo.mipi.data_lane1 = TRUE;
pinfo.mipi.data_lane2 = TRUE;
- pinfo.mipi.t_clk_post = 0x20;
- pinfo.mipi.t_clk_pre = 0x2d;
+ pinfo.mipi.t_clk_post = 0x04;
+ pinfo.mipi.t_clk_pre = 0x1a;
pinfo.mipi.esc_byte_ratio = 4;
pinfo.mipi.stream = 0; /* dma_p */
pinfo.mipi.mdp_trigger = 0;
diff --git a/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c b/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c
index a5171f0..291de5f 100644
--- a/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c
+++ b/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c
@@ -25,7 +25,6 @@
#include "vidc.h"
#include "vcd_res_tracker.h"
-#define PIL_FW_BASE_ADDR 0x9fe00000
#define PIL_FW_SIZE 0x200000
static unsigned int vidc_clk_table[4] = {
@@ -181,6 +180,7 @@
{
u32 alloc_size;
struct ddl_context *ddl_context;
+ unsigned long fw_addr;
int rc = 0;
DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz);
if (!addr) {
@@ -216,8 +216,9 @@
goto bail_out;
}
} else {
+ fw_addr = resource_context.vidc_platform_data->fw_addr;
addr->alloc_handle = NULL;
- addr->alloced_phys_addr = PIL_FW_BASE_ADDR;
+ addr->alloced_phys_addr = fw_addr;
addr->buffer_size = sz;
}
} else {
@@ -966,6 +967,10 @@
}
msm_ion_secure_heap(ION_HEAP(resource_context.memtype));
msm_ion_secure_heap(ION_HEAP(resource_context.cmd_mem_type));
+
+ if (resource_context.vidc_platform_data->secure_wb_heap)
+ msm_ion_secure_heap(ION_HEAP(ION_CP_WB_HEAP_ID));
+
res_trk_disable_iommu_clocks();
mutex_unlock(&resource_context.secure_lock);
}
@@ -988,6 +993,10 @@
}
msm_ion_unsecure_heap(ION_HEAP(resource_context.cmd_mem_type));
msm_ion_unsecure_heap(ION_HEAP(resource_context.memtype));
+
+ if (resource_context.vidc_platform_data->secure_wb_heap)
+ msm_ion_unsecure_heap(ION_HEAP(ION_CP_WB_HEAP_ID));
+
res_trk_disable_iommu_clocks();
mutex_unlock(&resource_context.secure_lock);
}
diff --git a/drivers/video/msm/vidc/common/dec/vdec.c b/drivers/video/msm/vidc/common/dec/vdec.c
index ed8b452..634011b 100644
--- a/drivers/video/msm/vidc/common/dec/vdec.c
+++ b/drivers/video/msm/vidc/common/dec/vdec.c
@@ -891,7 +891,7 @@
vcd_h264_mv_buffer->client_data = (void *) mapped_buffer;
vcd_h264_mv_buffer->dev_addr = (u8 *)mapped_buffer->iova[0];
} else {
- client_ctx->h264_mv_ion_handle = ion_import_fd(
+ client_ctx->h264_mv_ion_handle = ion_import_dma_buf(
client_ctx->user_ion_client,
vcd_h264_mv_buffer->pmem_fd);
if (IS_ERR_OR_NULL(client_ctx->h264_mv_ion_handle)) {
@@ -1790,7 +1790,7 @@
}
put_pmem_file(pmem_file);
} else {
- client_ctx->seq_hdr_ion_handle = ion_import_fd(
+ client_ctx->seq_hdr_ion_handle = ion_import_dma_buf(
client_ctx->user_ion_client,
seq_header.pmem_fd);
if (!client_ctx->seq_hdr_ion_handle) {
diff --git a/drivers/video/msm/vidc/common/enc/venc_internal.c b/drivers/video/msm/vidc/common/enc/venc_internal.c
index 43e8d5e..9450ee7 100644
--- a/drivers/video/msm/vidc/common/enc/venc_internal.c
+++ b/drivers/video/msm/vidc/common/enc/venc_internal.c
@@ -1821,7 +1821,7 @@
control->client_data = (void *) mapped_buffer;
control->dev_addr = (u8 *)mapped_buffer->iova[0];
} else {
- client_ctx->recon_buffer_ion_handle[i] = ion_import_fd(
+ client_ctx->recon_buffer_ion_handle[i] = ion_import_dma_buf(
client_ctx->user_ion_client, control->pmem_fd);
if (IS_ERR_OR_NULL(client_ctx->recon_buffer_ion_handle[i])) {
ERR("%s(): get_ION_handle failed\n", __func__);
diff --git a/drivers/video/msm/vidc/common/init/vidc_init.c b/drivers/video/msm/vidc/common/init/vidc_init.c
index 23c990a..dcacb3c 100644
--- a/drivers/video/msm/vidc/common/init/vidc_init.c
+++ b/drivers/video/msm/vidc/common/init/vidc_init.c
@@ -627,7 +627,7 @@
buf_addr_table[*num_of_buffers].dev_addr =
mapped_buffer->iova[0];
} else {
- buff_ion_handle = ion_import_fd(
+ buff_ion_handle = ion_import_dma_buf(
client_ctx->user_ion_client, pmem_fd);
if (IS_ERR_OR_NULL(buff_ion_handle)) {
ERR("%s(): get_ION_handle failed\n",
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_core.h b/drivers/video/msm/vidc/common/vcd/vcd_core.h
index 5351589..79bcac0 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd_core.h
+++ b/drivers/video/msm/vidc/common/vcd/vcd_core.h
@@ -37,6 +37,7 @@
#define VCD_TIMESTAMP_RESOLUTION 1000000
#define VCD_DEC_INITIAL_FRAME_RATE 30
+#define VCD_MAXPERF_FPS_THRESHOLD_X_1000 (59*1000)
#define VCD_FIRST_IP_RCVD 0x00000004
#define VCD_FIRST_OP_RCVD 0x00000008
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_power_sm.c b/drivers/video/msm/vidc/common/vcd/vcd_power_sm.c
index 33b2300..44d270a 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd_power_sm.c
+++ b/drivers/video/msm/vidc/common/vcd/vcd_power_sm.c
@@ -341,6 +341,18 @@
u32 new_perf_lvl;
new_perf_lvl = frm_p_units *\
(fps->fps_numerator / fps->fps_denominator);
+
+ if ((fps->fps_numerator * 1000) / fps->fps_denominator
+ > VCD_MAXPERF_FPS_THRESHOLD_X_1000) {
+ u32 max_perf_level = 0;
+ if (res_trk_get_max_perf_level(&max_perf_level)) {
+ new_perf_lvl = max_perf_level;
+ VCD_MSG_HIGH("Using max perf level(%d) for >60fps\n",
+ new_perf_lvl);
+ } else {
+ VCD_MSG_ERROR("Failed to get max perf level\n");
+ }
+ }
if (cctxt->status.req_perf_lvl) {
dev_ctxt->reqd_perf_lvl =
dev_ctxt->reqd_perf_lvl - cctxt->reqd_perf_lvl +
diff --git a/include/asm-generic/dma-contiguous.h b/include/asm-generic/dma-contiguous.h
new file mode 100644
index 0000000..c544356
--- /dev/null
+++ b/include/asm-generic/dma-contiguous.h
@@ -0,0 +1,28 @@
+#ifndef ASM_DMA_CONTIGUOUS_H
+#define ASM_DMA_CONTIGUOUS_H
+
+#ifdef __KERNEL__
+#ifdef CONFIG_CMA
+
+#include <linux/device.h>
+#include <linux/dma-contiguous.h>
+
+static inline struct cma *dev_get_cma_area(struct device *dev)
+{
+ if (dev && dev->cma_area)
+ return dev->cma_area;
+ return dma_contiguous_default_area;
+}
+
+static inline void dev_set_cma_area(struct device *dev, struct cma *cma)
+{
+ if (dev)
+ dev->cma_area = cma;
+ if (!dev || !dma_contiguous_default_area)
+ dma_contiguous_default_area = cma;
+}
+
+#endif
+#endif
+
+#endif
diff --git a/include/linux/device.h b/include/linux/device.h
index 84be123..9fca83b 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -666,6 +666,10 @@
struct dma_coherent_mem *dma_mem; /* internal for coherent mem
override */
+#ifdef CONFIG_CMA
+ struct cma *cma_area; /* contiguous memory area for dma
+ allocations */
+#endif
/* arch specific additions */
struct dev_archdata archdata;
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
new file mode 100644
index 0000000..2f303e4
--- /dev/null
+++ b/include/linux/dma-contiguous.h
@@ -0,0 +1,110 @@
+#ifndef __LINUX_CMA_H
+#define __LINUX_CMA_H
+
+/*
+ * Contiguous Memory Allocator for DMA mapping framework
+ * Copyright (c) 2010-2011 by Samsung Electronics.
+ * Written by:
+ * Marek Szyprowski <m.szyprowski@samsung.com>
+ * Michal Nazarewicz <mina86@mina86.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License or (at your optional) any later version of the license.
+ */
+
+/*
+ * Contiguous Memory Allocator
+ *
+ * The Contiguous Memory Allocator (CMA) makes it possible to
+ * allocate big contiguous chunks of memory after the system has
+ * booted.
+ *
+ * Why is it needed?
+ *
+ * Various devices on embedded systems have no scatter-getter and/or
+ * IO map support and require contiguous blocks of memory to
+ * operate. They include devices such as cameras, hardware video
+ * coders, etc.
+ *
+ * Such devices often require big memory buffers (a full HD frame
+ * is, for instance, more then 2 mega pixels large, i.e. more than 6
+ * MB of memory), which makes mechanisms such as kmalloc() or
+ * alloc_page() ineffective.
+ *
+ * At the same time, a solution where a big memory region is
+ * reserved for a device is suboptimal since often more memory is
+ * reserved then strictly required and, moreover, the memory is
+ * inaccessible to page system even if device drivers don't use it.
+ *
+ * CMA tries to solve this issue by operating on memory regions
+ * where only movable pages can be allocated from. This way, kernel
+ * can use the memory for pagecache and when device driver requests
+ * it, allocated pages can be migrated.
+ *
+ * Driver usage
+ *
+ * CMA should not be used by the device drivers directly. It is
+ * only a helper framework for dma-mapping subsystem.
+ *
+ * For more information, see kernel-docs in drivers/base/dma-contiguous.c
+ */
+
+#ifdef __KERNEL__
+
+struct cma;
+struct page;
+struct device;
+
+#ifdef CONFIG_CMA
+
+/*
+ * There is always at least global CMA area and a few optional device
+ * private areas configured in kernel .config.
+ */
+#define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS)
+
+extern struct cma *dma_contiguous_default_area;
+
+void dma_contiguous_reserve(phys_addr_t addr_limit);
+int dma_declare_contiguous(struct device *dev, unsigned long size,
+ phys_addr_t base, phys_addr_t limit);
+
+struct page *dma_alloc_from_contiguous(struct device *dev, int count,
+ unsigned int order);
+bool dma_release_from_contiguous(struct device *dev, struct page *pages,
+ int count);
+
+#else
+
+#define MAX_CMA_AREAS (0)
+
+static inline void dma_contiguous_reserve(phys_addr_t limit) { }
+
+static inline
+int dma_declare_contiguous(struct device *dev, unsigned long size,
+ phys_addr_t base, phys_addr_t limit)
+{
+ return -ENOSYS;
+}
+
+static inline
+struct page *dma_alloc_from_contiguous(struct device *dev, int count,
+ unsigned int order)
+{
+ return NULL;
+}
+
+static inline
+bool dma_release_from_contiguous(struct device *dev, struct page *pages,
+ int count)
+{
+ return false;
+}
+
+#endif
+
+#endif
+
+#endif
diff --git a/include/linux/ion.h b/include/linux/ion.h
index fca5517..2519270 100644
--- a/include/linux/ion.h
+++ b/include/linux/ion.h
@@ -21,7 +21,6 @@
#include <linux/ioctl.h>
#include <linux/types.h>
-
struct ion_handle;
/**
* enum ion_heap_types - list of all possible types of heaps
@@ -261,6 +260,17 @@
#ifdef CONFIG_ION
/**
+ * ion_reserve() - reserve memory for ion heaps if applicable
+ * @data: platform data specifying starting physical address and
+ * size
+ *
+ * Calls memblock reserve to set aside memory for heaps that are
+ * located at specific memory addresses or of specfic sizes not
+ * managed by the kernel
+ */
+void ion_reserve(struct ion_platform_data *data);
+
+/**
* ion_client_create() - allocate a client and returns it
* @dev: the global ion device
* @heap_mask: mask of heaps this client can allocate from
@@ -323,7 +333,7 @@
* This function queries the heap for a particular handle to get the
* handle's physical address. It't output is only correct if
* a heap returns physically contiguous memory -- in other cases
- * this api should not be implemented -- ion_map_dma should be used
+ * this api should not be implemented -- ion_sg_table should be used
* instead. Returns -EINVAL if the handle is invalid. This has
* no implications on the reference counting of the handle --
* the returned value may not be valid if the caller is not
@@ -333,6 +343,17 @@
ion_phys_addr_t *addr, size_t *len);
/**
+ * ion_map_dma - return an sg_table describing a handle
+ * @client: the client
+ * @handle: the handle
+ *
+ * This function returns the sg_table describing
+ * a particular ion handle.
+ */
+struct sg_table *ion_sg_table(struct ion_client *client,
+ struct ion_handle *handle);
+
+/**
* ion_map_kernel - create mapping for the given handle
* @client: the client
* @handle: handle to map
@@ -353,64 +374,22 @@
void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle);
/**
- * ion_map_dma - create a dma mapping for a given handle
+ * ion_share_dma_buf() - given an ion client, create a dma-buf fd
* @client: the client
- * @handle: handle to map
- *
- * Return an sglist describing the given handle
+ * @handle: the handle
*/
-struct scatterlist *ion_map_dma(struct ion_client *client,
- struct ion_handle *handle,
- unsigned long flags);
+int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle);
/**
- * ion_unmap_dma() - destroy a dma mapping for a handle
+ * ion_import_dma_buf() - given an dma-buf fd from the ion exporter get handle
* @client: the client
- * @handle: handle to unmap
- */
-void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle);
-
-/**
- * ion_share() - given a handle, obtain a buffer to pass to other clients
- * @client: the client
- * @handle: the handle to share
+ * @fd: the dma-buf fd
*
- * Given a handle, return a buffer, which exists in a global name
- * space, and can be passed to other clients. Should be passed into ion_import
- * to obtain a new handle for this buffer.
- *
- * NOTE: This function does do not an extra reference. The burden is on the
- * caller to make sure the buffer doesn't go away while it's being passed to
- * another client. That is, ion_free should not be called on this handle until
- * the buffer has been imported into the other client.
+ * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf,
+ * import that fd and return a handle representing it. If a dma-buf from
+ * another exporter is passed in this function will return ERR_PTR(-EINVAL)
*/
-struct ion_buffer *ion_share(struct ion_client *client,
- struct ion_handle *handle);
-
-/**
- * ion_import() - given an buffer in another client, import it
- * @client: this blocks client
- * @buffer: the buffer to import (as obtained from ion_share)
- *
- * Given a buffer, add it to the client and return the handle to use to refer
- * to it further. This is called to share a handle from one kernel client to
- * another.
- */
-struct ion_handle *ion_import(struct ion_client *client,
- struct ion_buffer *buffer);
-
-/**
- * ion_import_fd() - given an fd obtained via ION_IOC_SHARE ioctl, import it
- * @client: this blocks client
- * @fd: the fd
- *
- * A helper function for drivers that will be recieving ion buffers shared
- * with them from userspace. These buffers are represented by a file
- * descriptor obtained as the return from the ION_IOC_SHARE ioctl.
- * This function coverts that fd into the underlying buffer, and returns
- * the handle to use to refer to it further.
- */
-struct ion_handle *ion_import_fd(struct ion_client *client, int fd);
+struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd);
/**
* ion_handle_get_flags - get the flags for a given handle
@@ -575,6 +554,11 @@
void *vaddr, unsigned long len, unsigned int cmd);
#else
+static inline void ion_reserve(struct ion_platform_data *data)
+{
+
+}
+
static inline struct ion_client *ion_client_create(struct ion_device *dev,
unsigned int heap_mask, const char *name)
{
@@ -605,6 +589,12 @@
return -ENODEV;
}
+static inline struct sg_table *ion_sg_table(struct ion_client *client,
+ struct ion_handle *handle)
+{
+ return ERR_PTR(-ENODEV);
+}
+
static inline void *ion_map_kernel(struct ion_client *client,
struct ion_handle *handle, unsigned long flags)
{
@@ -614,29 +604,12 @@
static inline void ion_unmap_kernel(struct ion_client *client,
struct ion_handle *handle) { }
-static inline struct scatterlist *ion_map_dma(struct ion_client *client,
- struct ion_handle *handle, unsigned long flags)
+static inline int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
{
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
}
-static inline void ion_unmap_dma(struct ion_client *client,
- struct ion_handle *handle) { }
-
-static inline struct ion_buffer *ion_share(struct ion_client *client,
- struct ion_handle *handle)
-{
- return ERR_PTR(-ENODEV);
-}
-
-static inline struct ion_handle *ion_import(struct ion_client *client,
- struct ion_buffer *buffer)
-{
- return ERR_PTR(-ENODEV);
-}
-
-static inline struct ion_handle *ion_import_fd(struct ion_client *client,
- int fd)
+static inline struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
{
return ERR_PTR(-ENODEV);
}
diff --git a/include/linux/mfd/pm8xxx/ccadc.h b/include/linux/mfd/pm8xxx/ccadc.h
index 0bd4cc3..29f7a62 100644
--- a/include/linux/mfd/pm8xxx/ccadc.h
+++ b/include/linux/mfd/pm8xxx/ccadc.h
@@ -27,32 +27,13 @@
unsigned int calib_delay_ms;
};
-#define CCADC_READING_RESOLUTION_N_V1 1085069
-#define CCADC_READING_RESOLUTION_D_V1 100000
-#define CCADC_READING_RESOLUTION_N_V2 542535
-#define CCADC_READING_RESOLUTION_D_V2 100000
-
-static s64 pm8xxx_ccadc_reading_to_microvolt_v1(s64 cc)
-{
- return div_s64(cc * CCADC_READING_RESOLUTION_N_V1,
- CCADC_READING_RESOLUTION_D_V1);
-}
-
-static s64 pm8xxx_ccadc_reading_to_microvolt_v2(s64 cc)
-{
- return div_s64(cc * CCADC_READING_RESOLUTION_N_V2,
- CCADC_READING_RESOLUTION_D_V2);
-}
+#define CCADC_READING_RESOLUTION_N 542535
+#define CCADC_READING_RESOLUTION_D 100000
static inline s64 pm8xxx_ccadc_reading_to_microvolt(int revision, s64 cc)
{
- /*
- * resolution (the value of a single bit) was changed after revision 2.0
- * for more accurate readings
- */
- return (revision < PM8XXX_REVISION_8921_2p0) ?
- pm8xxx_ccadc_reading_to_microvolt_v1((s64)cc) :
- pm8xxx_ccadc_reading_to_microvolt_v2((s64)cc);
+ return div_s64(cc * CCADC_READING_RESOLUTION_N,
+ CCADC_READING_RESOLUTION_D);
}
#if defined(CONFIG_PM8XXX_CCADC) || defined(CONFIG_PM8XXX_CCADC_MODULE)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index f90965a..f8a3a10 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -63,8 +63,10 @@
#ifdef CONFIG_CMA
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
+# define cma_wmark_pages(zone) zone->min_cma_pages
#else
# define is_migrate_cma(migratetype) false
+# define cma_wmark_pages(zone) 0
#endif
#define for_each_migratetype_order(order, type) \
@@ -371,6 +373,13 @@
/* see spanned/present_pages for more description */
seqlock_t span_seqlock;
#endif
+#ifdef CONFIG_CMA
+ /*
+ * CMA needs to increase watermark levels during the allocation
+ * process to make sure that the system is not starved.
+ */
+ unsigned long min_cma_pages;
+#endif
struct free_area free_area[MAX_ORDER];
#ifndef CONFIG_SPARSEMEM
diff --git a/include/linux/smux.h b/include/linux/smux.h
index 308f969..24a6371 100644
--- a/include/linux/smux.h
+++ b/include/linux/smux.h
@@ -77,6 +77,8 @@
SMUX_TIOCM_UPDATE,
SMUX_LOW_WM_HIT, /* @metadata is NULL */
SMUX_HIGH_WM_HIT, /* @metadata is NULL */
+ SMUX_RX_RETRY_HIGH_WM_HIT, /* @metadata is NULL */
+ SMUX_RX_RETRY_LOW_WM_HIT, /* @metadata is NULL */
};
/**
@@ -86,6 +88,7 @@
SMUX_CH_OPTION_LOCAL_LOOPBACK = 1 << 0,
SMUX_CH_OPTION_REMOTE_LOOPBACK = 1 << 1,
SMUX_CH_OPTION_REMOTE_TX_STOP = 1 << 2,
+ SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP = 1 << 3,
};
/**
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index eabe4e8..dc13bd9 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -349,6 +349,7 @@
/* to log completion events*/
void (*log_urb_complete)(struct urb *urb, char * event,
unsigned extra);
+ void (*dump_regs)(struct usb_hcd *);
void (*enable_ulpi_control)(struct usb_hcd *hcd, u32 linestate);
void (*disable_ulpi_control)(struct usb_hcd *hcd);
};
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 893aeea..e312ab3 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -654,6 +654,7 @@
#define L2CAP_ATT_MTU_RSP 0x03
#define L2CAP_ATT_RESPONSE_BIT 0x01
#define L2CAP_ATT_INDICATE 0x1D
+#define L2CAP_ATT_CONFIRM 0x1E
#define L2CAP_ATT_NOT_SUPPORTED 0x06
#define __delta_seq(x, y, pi) ((x) >= (y) ? (x) - (y) : \
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a17921f..c34cbbd 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5090,6 +5090,11 @@
zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2);
zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
+
+ zone->watermark[WMARK_MIN] += cma_wmark_pages(zone);
+ zone->watermark[WMARK_LOW] += cma_wmark_pages(zone);
+ zone->watermark[WMARK_HIGH] += cma_wmark_pages(zone);
+
setup_zone_migrate_reserve(zone);
spin_unlock_irqrestore(&zone->lock, flags);
}
@@ -5695,6 +5700,54 @@
return ret > 0 ? 0 : ret;
}
+/*
+ * Update zone's cma pages counter used for watermark level calculation.
+ */
+static inline void __update_cma_watermarks(struct zone *zone, int count)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&zone->lock, flags);
+ zone->min_cma_pages += count;
+ spin_unlock_irqrestore(&zone->lock, flags);
+ setup_per_zone_wmarks();
+}
+
+/*
+ * Trigger memory pressure bump to reclaim some pages in order to be able to
+ * allocate 'count' pages in single page units. Does similar work as
+ *__alloc_pages_slowpath() function.
+ */
+static int __reclaim_pages(struct zone *zone, gfp_t gfp_mask, int count)
+{
+ enum zone_type high_zoneidx = gfp_zone(gfp_mask);
+ struct zonelist *zonelist = node_zonelist(0, gfp_mask);
+ int did_some_progress = 0;
+ int order = 1;
+
+ /*
+ * Increase level of watermarks to force kswapd do his job
+ * to stabilise at new watermark level.
+ */
+ __update_cma_watermarks(zone, count);
+
+ /* Obey watermarks as if the page was being allocated */
+ while (!zone_watermark_ok(zone, 0, low_wmark_pages(zone), 0, 0)) {
+ wake_all_kswapd(order, zonelist, high_zoneidx, zone_idx(zone));
+
+ did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
+ NULL);
+ if (!did_some_progress) {
+ /* Exhausted what can be done so it's blamo time */
+ out_of_memory(zonelist, gfp_mask, order, NULL, false);
+ }
+ }
+
+ /* Restore original watermark levels. */
+ __update_cma_watermarks(zone, -count);
+
+ return count;
+}
+
/**
* alloc_contig_range() -- tries to allocate given range of pages
* @start: start PFN to allocate
@@ -5793,6 +5846,13 @@
goto done;
}
+ /*
+ * Reclaim enough pages to make sure that contiguous allocation
+ * will not starve the system.
+ */
+ __reclaim_pages(zone, GFP_HIGHUSER_MOVABLE, end-start);
+
+ /* Grab isolated pages from freelists. */
outer_end = isolate_freepages_range(outer_start, end);
if (!outer_end) {
ret = -EBUSY;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index e9e8521..733dc22 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -3291,7 +3291,7 @@
struct l2cap_conf_rfc rfc = { .mode = pi->mode };
void *ptr = req->data;
- BT_DBG("sk %p", sk);
+ BT_DBG("sk %p mode %d", sk, pi->mode);
if (pi->num_conf_req || pi->num_conf_rsp)
goto done;
@@ -3317,7 +3317,6 @@
if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
!(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
break;
-
rfc.txwin_size = 0;
rfc.max_transmit = 0;
rfc.retrans_timeout = 0;
@@ -3467,6 +3466,9 @@
BT_DBG("sk %p", sk);
+ if (pi->omtu > mtu)
+ mtu = pi->omtu;
+
while (len >= L2CAP_CONF_OPT_SIZE) {
len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
@@ -3568,6 +3570,8 @@
if (pi->mode != rfc.mode) {
result = L2CAP_CONF_UNACCEPT;
rfc.mode = pi->mode;
+ if (mtu > L2CAP_DEFAULT_MTU)
+ pi->omtu = mtu;
if (pi->num_conf_rsp == 1)
return -ECONNREFUSED;
@@ -7259,14 +7263,31 @@
static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid,
struct sk_buff *skb)
{
- struct sock *sk;
+ struct sock *sk = NULL;
struct sk_buff *skb_rsp;
struct l2cap_hdr *lh;
int dir;
- u8 mtu_rsp[] = {L2CAP_ATT_MTU_RSP, 23, 0};
u8 err_rsp[] = {L2CAP_ATT_ERROR, 0x00, 0x00, 0x00,
L2CAP_ATT_NOT_SUPPORTED};
+ if (skb->data[0] == L2CAP_ATT_MTU_REQ) {
+ u8 mtu_rsp[] = {L2CAP_ATT_MTU_RSP, 23, 0};
+
+ skb_rsp = bt_skb_alloc(sizeof(mtu_rsp) + L2CAP_HDR_SIZE,
+ GFP_ATOMIC);
+ if (!skb_rsp)
+ goto drop;
+
+ lh = (struct l2cap_hdr *) skb_put(skb_rsp, L2CAP_HDR_SIZE);
+ lh->len = cpu_to_le16(sizeof(mtu_rsp));
+ lh->cid = cpu_to_le16(L2CAP_CID_LE_DATA);
+ memcpy(skb_put(skb_rsp, sizeof(mtu_rsp)), mtu_rsp,
+ sizeof(mtu_rsp));
+ hci_send_acl(conn->hcon, NULL, skb_rsp, 0);
+
+ goto free_skb;
+ }
+
dir = (skb->data[0] & L2CAP_ATT_RESPONSE_BIT) ? 0 : 1;
sk = l2cap_find_sock_by_fixed_cid_and_dir(cid, conn->src,
@@ -7287,28 +7308,30 @@
if (l2cap_pi(sk)->imtu < skb->len)
goto drop;
- if (skb->data[0] == L2CAP_ATT_MTU_REQ) {
- skb_rsp = bt_skb_alloc(sizeof(mtu_rsp) + L2CAP_HDR_SIZE,
- GFP_ATOMIC);
- if (!skb_rsp)
- goto drop;
-
- lh = (struct l2cap_hdr *) skb_put(skb_rsp, L2CAP_HDR_SIZE);
- lh->len = cpu_to_le16(sizeof(mtu_rsp));
- lh->cid = cpu_to_le16(L2CAP_CID_LE_DATA);
- memcpy(skb_put(skb_rsp, sizeof(mtu_rsp)), mtu_rsp,
- sizeof(mtu_rsp));
- hci_send_acl(conn->hcon, NULL, skb_rsp, 0);
-
- goto free_skb;
- }
-
if (!sock_queue_rcv_skb(sk, skb))
goto done;
drop:
- if (skb->data[0] & L2CAP_ATT_RESPONSE_BIT &&
- skb->data[0] != L2CAP_ATT_INDICATE)
+ if (skb->data[0] != L2CAP_ATT_INDICATE)
+ goto not_indicate;
+
+ /* If this is an incoming Indication, we are required to confirm */
+
+ skb_rsp = bt_skb_alloc(sizeof(u8) + L2CAP_HDR_SIZE, GFP_ATOMIC);
+ if (!skb_rsp)
+ goto free_skb;
+
+ lh = (struct l2cap_hdr *) skb_put(skb_rsp, L2CAP_HDR_SIZE);
+ lh->len = cpu_to_le16(sizeof(u8));
+ lh->cid = cpu_to_le16(L2CAP_CID_LE_DATA);
+ err_rsp[0] = L2CAP_ATT_CONFIRM;
+ memcpy(skb_put(skb_rsp, sizeof(u8)), err_rsp, sizeof(u8));
+ hci_send_acl(conn->hcon, NULL, skb_rsp, 0);
+ goto free_skb;
+
+not_indicate:
+ if (skb->data[0] & L2CAP_ATT_RESPONSE_BIT ||
+ skb->data[0] == L2CAP_ATT_CONFIRM)
goto free_skb;
/* If this is an incoming PDU that requires a response, respond with
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 4829e6b..3fa4a02 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -1174,7 +1174,7 @@
static int l2cap_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
- struct sock *srv_sk = NULL;
+ struct sock *sk2 = NULL;
int err;
BT_DBG("sock %p, sk %p", sock, sk);
@@ -1182,15 +1182,16 @@
if (!sk)
return 0;
- /* If this is an ATT Client socket, find the matching Server */
- if (l2cap_pi(sk)->scid == L2CAP_CID_LE_DATA && !l2cap_pi(sk)->incoming)
- srv_sk = l2cap_find_sock_by_fixed_cid_and_dir(L2CAP_CID_LE_DATA,
- &bt_sk(sk)->src, &bt_sk(sk)->dst, 1);
+ /* If this is an ATT socket, find it's matching server/client */
+ if (l2cap_pi(sk)->scid == L2CAP_CID_LE_DATA)
+ sk2 = l2cap_find_sock_by_fixed_cid_and_dir(L2CAP_CID_LE_DATA,
+ &bt_sk(sk)->src, &bt_sk(sk)->dst,
+ l2cap_pi(sk)->incoming ? 0 : 1);
- /* If server socket found, request tear down */
- BT_DBG("client:%p server:%p", sk, srv_sk);
- if (srv_sk)
- l2cap_sock_set_timer(srv_sk, 1);
+ /* If matching socket found, request tear down */
+ BT_DBG("sock:%p companion:%p", sk, sk2);
+ if (sk2)
+ l2cap_sock_set_timer(sk2, 1);
err = l2cap_sock_shutdown(sock, 2);
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index ac85423..b773553 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -418,6 +418,7 @@
static int update_class(struct hci_dev *hdev)
{
u8 cod[3];
+ int err = 0;
BT_DBG("%s", hdev->name);
@@ -431,7 +432,12 @@
if (memcmp(cod, hdev->dev_class, 3) == 0)
return 0;
- return hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
+ err = hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
+
+ if (err == 0)
+ memcpy(hdev->dev_class, cod, 3);
+
+ return err;
}
static int set_limited_discoverable(struct sock *sk, u16 index,
@@ -1000,12 +1006,12 @@
hdev->major_class |= cp->major & MGMT_MAJOR_CLASS_MASK;
hdev->minor_class = cp->minor;
- if (test_bit(HCI_UP, &hdev->flags))
+ if (test_bit(HCI_UP, &hdev->flags)) {
err = update_class(hdev);
- else
- err = 0;
-
- if (err == 0)
+ if (err == 0)
+ err = cmd_complete(sk, index,
+ MGMT_OP_SET_DEV_CLASS, hdev->dev_class, sizeof(u8)*3);
+ } else
err = cmd_complete(sk, index, MGMT_OP_SET_DEV_CLASS, NULL, 0);
hci_dev_unlock_bh(hdev);
diff --git a/sound/soc/codecs/msm_stub.c b/sound/soc/codecs/msm_stub.c
index 0a3157f..7e603b4 100644
--- a/sound/soc/codecs/msm_stub.c
+++ b/sound/soc/codecs/msm_stub.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -12,6 +12,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
@@ -44,6 +45,11 @@
static int __devinit msm_stub_dev_probe(struct platform_device *pdev)
{
+ if (pdev->dev.of_node)
+ dev_set_name(&pdev->dev, "%s.%d", "msm-stub-codec", 1);
+
+ dev_dbg(&pdev->dev, "dev name %s\n", dev_name(&pdev->dev));
+
return snd_soc_register_codec(&pdev->dev,
&soc_msm_stub, msm_stub_dais, ARRAY_SIZE(msm_stub_dais));
}
@@ -53,11 +59,16 @@
snd_soc_unregister_codec(&pdev->dev);
return 0;
}
+static const struct of_device_id msm_stub_codec_dt_match[] = {
+ { .compatible = "qcom,msm-stub-codec", },
+ {}
+};
static struct platform_driver msm_stub_driver = {
.driver = {
.name = "msm-stub-codec",
.owner = THIS_MODULE,
+ .of_match_table = msm_stub_codec_dt_match,
},
.probe = msm_stub_dev_probe,
.remove = __devexit_p(msm_stub_dev_remove),
diff --git a/sound/soc/codecs/wcd9304.c b/sound/soc/codecs/wcd9304.c
index df913a4..d9a8ae0 100644
--- a/sound/soc/codecs/wcd9304.c
+++ b/sound/soc/codecs/wcd9304.c
@@ -1457,7 +1457,8 @@
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
- u16 dec_reset_reg;
+ u16 dec_reset_reg, gain_reg;
+ u8 current_gain;
pr_debug("%s %d\n", __func__, event);
@@ -1474,6 +1475,12 @@
1 << w->shift);
snd_soc_update_bits(codec, dec_reset_reg, 1 << w->shift, 0x0);
break;
+ case SND_SOC_DAPM_POST_PMU:
+ /* Reprogram the digital gain after power up of Decimator */
+ gain_reg = SITAR_A_CDC_TX1_VOL_CTL_GAIN + (8 * w->shift);
+ current_gain = snd_soc_read(codec, gain_reg);
+ snd_soc_write(codec, gain_reg, current_gain);
+ break;
}
return 0;
}
@@ -1482,6 +1489,8 @@
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
+ u16 gain_reg;
+ u8 current_gain;
pr_debug("%s %d %s\n", __func__, event, w->name);
@@ -1492,6 +1501,11 @@
snd_soc_update_bits(codec, SITAR_A_CDC_CLK_RX_RESET_CTL,
1 << w->shift, 0x0);
break;
+ case SND_SOC_DAPM_POST_PMU:
+ /* Reprogram gain after power up interpolator */
+ gain_reg = SITAR_A_CDC_RX1_VOL_CTL_B2_CTL + (8 * w->shift);
+ current_gain = snd_soc_read(codec, gain_reg);
+ snd_soc_write(codec, gain_reg, current_gain);
}
return 0;
}
@@ -1804,11 +1818,14 @@
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MIXER_E("RX1 MIX1", SITAR_A_CDC_CLK_RX_B1_CTL, 0, 0, NULL,
- 0, sitar_codec_reset_interpolator, SND_SOC_DAPM_PRE_PMU),
+ 0, sitar_codec_reset_interpolator,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_MIXER_E("RX2 MIX1", SITAR_A_CDC_CLK_RX_B1_CTL, 1, 0, NULL,
- 0, sitar_codec_reset_interpolator, SND_SOC_DAPM_PRE_PMU),
+ 0, sitar_codec_reset_interpolator,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_MIXER_E("RX3 MIX1", SITAR_A_CDC_CLK_RX_B1_CTL, 2, 0, NULL,
- 0, sitar_codec_reset_interpolator, SND_SOC_DAPM_PRE_PMU),
+ 0, sitar_codec_reset_interpolator,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_MUX("DAC1 MUX", SND_SOC_NOPM, 0, 0,
&rx_dac1_mux),
@@ -1880,13 +1897,17 @@
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MUX_E("DEC1 MUX", SITAR_A_CDC_CLK_TX_CLK_EN_B1_CTL, 0, 0,
- &dec1_mux, sitar_codec_enable_dec, SND_SOC_DAPM_PRE_PMU),
+ &dec1_mux, sitar_codec_enable_dec,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_MUX_E("DEC2 MUX", SITAR_A_CDC_CLK_TX_CLK_EN_B1_CTL, 1, 0,
- &dec2_mux, sitar_codec_enable_dec, SND_SOC_DAPM_PRE_PMU),
+ &dec2_mux, sitar_codec_enable_dec,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_MUX_E("DEC3 MUX", SITAR_A_CDC_CLK_TX_CLK_EN_B1_CTL, 2, 0,
- &dec3_mux, sitar_codec_enable_dec, SND_SOC_DAPM_PRE_PMU),
+ &dec3_mux, sitar_codec_enable_dec,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_MUX_E("DEC4 MUX", SITAR_A_CDC_CLK_TX_CLK_EN_B1_CTL, 3, 0,
- &dec4_mux, sitar_codec_enable_dec, SND_SOC_DAPM_PRE_PMU),
+ &dec4_mux, sitar_codec_enable_dec,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_MUX("ANC1 MUX", SND_SOC_NOPM, 0, 0, &anc1_mux),
SND_SOC_DAPM_MUX("ANC2 MUX", SND_SOC_NOPM, 0, 0, &anc2_mux),
@@ -2130,10 +2151,11 @@
static int sitar_volatile(struct snd_soc_codec *ssc, unsigned int reg)
{
+ int i;
+
/* Registers lower than 0x100 are top level registers which can be
* written by the Sitar core driver.
*/
-
if ((reg >= SITAR_A_CDC_MBHC_EN_CTL) || (reg < 0x100))
return 1;
@@ -2142,6 +2164,15 @@
(reg <= SITAR_A_CDC_IIR1_COEF_B5_CTL))
return 1;
+ for (i = 0; i < NUM_DECIMATORS; i++) {
+ if (reg == SITAR_A_CDC_TX1_VOL_CTL_GAIN + (8 * i))
+ return 1;
+ }
+
+ for (i = 0; i < NUM_INTERPOLATORS; i++) {
+ if (reg == SITAR_A_CDC_RX1_VOL_CTL_B2_CTL + (8 * i))
+ return 1;
+ }
return 0;
}
diff --git a/sound/soc/codecs/wcd9310.c b/sound/soc/codecs/wcd9310.c
index ed10ebe..74ae595 100644
--- a/sound/soc/codecs/wcd9310.c
+++ b/sound/soc/codecs/wcd9310.c
@@ -329,6 +329,12 @@
*/
struct mutex codec_resource_lock;
+ /* Work to perform polling on microphone voltage
+ * in order to correct plug type once plug type
+ * is detected as headphone
+ */
+ struct work_struct hs_correct_plug_work_nogpio;
+
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs_poke;
struct dentry *debugfs_mbhc;
@@ -6287,22 +6293,24 @@
}
/* should be called under interrupt context that hold suspend */
-static void tabla_schedule_hs_detect_plug(struct tabla_priv *tabla)
+static void tabla_schedule_hs_detect_plug(struct tabla_priv *tabla,
+ struct work_struct *correct_plug_work)
{
pr_debug("%s: scheduling tabla_hs_correct_gpio_plug\n", __func__);
tabla->hs_detect_work_stop = false;
wcd9xxx_lock_sleep(tabla->codec->control_data);
- schedule_work(&tabla->hs_correct_plug_work);
+ schedule_work(correct_plug_work);
}
/* called under codec_resource_lock acquisition */
-static void tabla_cancel_hs_detect_plug(struct tabla_priv *tabla)
+static void tabla_cancel_hs_detect_plug(struct tabla_priv *tabla,
+ struct work_struct *correct_plug_work)
{
pr_debug("%s: canceling hs_correct_plug_work\n", __func__);
tabla->hs_detect_work_stop = true;
wmb();
TABLA_RELEASE_LOCK(tabla->codec_resource_lock);
- if (cancel_work_sync(&tabla->hs_correct_plug_work)) {
+ if (cancel_work_sync(correct_plug_work)) {
pr_debug("%s: hs_correct_plug_work is canceled\n", __func__);
wcd9xxx_unlock_sleep(tabla->codec->control_data);
}
@@ -6574,11 +6582,13 @@
if (plug_type == PLUG_TYPE_INVALID ||
plug_type == PLUG_TYPE_GND_MIC_SWAP) {
- tabla_schedule_hs_detect_plug(tabla);
+ tabla_schedule_hs_detect_plug(tabla,
+ &tabla->hs_correct_plug_work);
} else if (plug_type == PLUG_TYPE_HEADPHONE) {
tabla_codec_report_plug(codec, 1, SND_JACK_HEADPHONE);
- tabla_schedule_hs_detect_plug(tabla);
+ tabla_schedule_hs_detect_plug(tabla,
+ &tabla->hs_correct_plug_work);
} else {
pr_debug("%s: Valid plug found, determine plug type %d\n",
__func__, plug_type);
@@ -6635,6 +6645,8 @@
tabla_codec_report_plug(codec, 1, SND_JACK_HEADPHONE);
tabla_codec_cleanup_hs_polling(codec);
tabla_codec_enable_hs_detect(codec, 0, 0, false);
+ tabla_schedule_hs_detect_plug(tabla,
+ &tabla->hs_correct_plug_work_nogpio);
} else if (plug_type == PLUG_TYPE_HEADSET) {
pr_debug("%s: Headset detected\n", __func__);
tabla_codec_report_plug(codec, 1, SND_JACK_HEADSET);
@@ -6681,10 +6693,13 @@
int ret;
struct snd_soc_codec *codec = priv->codec;
struct wcd9xxx *core = dev_get_drvdata(priv->codec->dev->parent);
+ struct tabla_priv *tabla = snd_soc_codec_get_drvdata(codec);
+
+ /* Cancel possibly running hs_detect_work */
+ tabla_cancel_hs_detect_plug(tabla,
+ &tabla->hs_correct_plug_work_nogpio);
if (is_removal) {
- /* cancel possiblely running hs detect work */
- tabla_cancel_hs_detect_plug(priv);
/*
* If headphone is removed while playback is in progress,
@@ -6898,8 +6913,9 @@
} while (min_us > 0);
if (removed) {
- /* cancel possiblely running hs detect work */
- tabla_cancel_hs_detect_plug(priv);
+ /* Cancel possibly running hs_detect_work */
+ tabla_cancel_hs_detect_plug(priv,
+ &priv->hs_correct_plug_work_nogpio);
/*
* If this removal is not false, first check the micbias
* switch status and switch it to LDOH if it is already
@@ -6994,7 +7010,8 @@
wmb();
/* cancel detect plug */
- tabla_cancel_hs_detect_plug(tabla);
+ tabla_cancel_hs_detect_plug(tabla,
+ &tabla->hs_correct_plug_work);
/* Disable Mic Bias pull down and HPH Switch to GND */
snd_soc_update_bits(codec, tabla->mbhc_bias_regs.ctl_reg, 0x01,
@@ -7006,7 +7023,8 @@
wmb();
/* cancel detect plug */
- tabla_cancel_hs_detect_plug(tabla);
+ tabla_cancel_hs_detect_plug(tabla,
+ &tabla->hs_correct_plug_work);
if (tabla->current_plug == PLUG_TYPE_HEADPHONE) {
tabla_codec_report_plug(codec, 0, SND_JACK_HEADPHONE);
@@ -7066,6 +7084,76 @@
return r;
}
+static void tabla_hs_correct_plug_nogpio(struct work_struct *work)
+{
+ struct tabla_priv *tabla;
+ struct snd_soc_codec *codec;
+ unsigned long timeout;
+ int retry = 0;
+ enum tabla_mbhc_plug_type plug_type;
+ bool is_headset = false;
+
+ pr_debug("%s(): Poll Microphone voltage for %d seconds\n",
+ __func__, TABLA_HS_DETECT_PLUG_TIME_MS / 1000);
+
+ tabla = container_of(work, struct tabla_priv,
+ hs_correct_plug_work_nogpio);
+ codec = tabla->codec;
+
+ /* Make sure the MBHC mux is connected to MIC Path */
+ snd_soc_write(codec, TABLA_A_MBHC_SCALING_MUX_1, 0x84);
+
+ /* setup for microphone polling */
+ tabla_turn_onoff_override(codec, true);
+ tabla->mbhc_cfg.mclk_cb_fn(codec, 1, false);
+
+ timeout = jiffies + msecs_to_jiffies(TABLA_HS_DETECT_PLUG_TIME_MS);
+ while (!time_after(jiffies, timeout)) {
+ ++retry;
+
+ msleep(TABLA_HS_DETECT_PLUG_INERVAL_MS);
+ TABLA_ACQUIRE_LOCK(tabla->codec_resource_lock);
+ plug_type = tabla_codec_get_plug_type(codec, false);
+ TABLA_RELEASE_LOCK(tabla->codec_resource_lock);
+
+ if (plug_type == PLUG_TYPE_HIGH_HPH
+ || plug_type == PLUG_TYPE_INVALID) {
+
+ /* this means the plug is removed
+ * End microphone polling and setup
+ * for low power removal detection.
+ */
+ pr_debug("%s(): Plug may be removed, setup removal\n",
+ __func__);
+ break;
+ } else if (plug_type == PLUG_TYPE_HEADSET) {
+ /* Plug is corrected from headphone to headset,
+ * report headset and end the polling
+ */
+ is_headset = true;
+ TABLA_ACQUIRE_LOCK(tabla->codec_resource_lock);
+ tabla_turn_onoff_override(codec, false);
+ tabla_codec_report_plug(codec, 1, SND_JACK_HEADSET);
+ tabla_codec_start_hs_polling(codec);
+ TABLA_RELEASE_LOCK(tabla->codec_resource_lock);
+ pr_debug("%s(): corrected from headphone to headset\n",
+ __func__);
+ break;
+ }
+ }
+
+ /* Undo setup for microphone polling depending
+ * result from polling
+ */
+ tabla->mbhc_cfg.mclk_cb_fn(codec, 0, false);
+ if (!is_headset) {
+ tabla_turn_onoff_override(codec, false);
+ tabla_codec_cleanup_hs_polling(codec);
+ tabla_codec_enable_hs_detect(codec, 0, 0, false);
+ }
+ wcd9xxx_unlock_sleep(codec->control_data);
+}
+
static int tabla_mbhc_init_and_calibrate(struct tabla_priv *tabla)
{
int ret = 0;
@@ -7078,6 +7166,8 @@
tabla->mbhc_cfg.mclk_cb_fn(codec, 0, false);
tabla_codec_calibrate_hs_polling(codec);
if (!tabla->mbhc_cfg.gpio) {
+ INIT_WORK(&tabla->hs_correct_plug_work_nogpio,
+ tabla_hs_correct_plug_nogpio);
ret = tabla_codec_enable_hs_detect(codec, 1,
MBHC_USE_MB_TRIGGER |
MBHC_USE_HPHL_TRIGGER,
diff --git a/sound/soc/msm/msm-dai-fe.c b/sound/soc/msm/msm-dai-fe.c
index 210cfa9..4cd4a2c 100644
--- a/sound/soc/msm/msm-dai-fe.c
+++ b/sound/soc/msm/msm-dai-fe.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/platform_device.h>
+#include <linux/of_device.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
@@ -469,8 +470,11 @@
static __devinit int msm_fe_dai_dev_probe(struct platform_device *pdev)
{
+ if (pdev->dev.of_node)
+ dev_set_name(&pdev->dev, "%s", "msm-dai-fe");
+
dev_dbg(&pdev->dev, "%s: dev name %s\n", __func__,
- dev_name(&pdev->dev));
+ dev_name(&pdev->dev));
return snd_soc_register_dais(&pdev->dev, msm_fe_dais,
ARRAY_SIZE(msm_fe_dais));
}
@@ -481,12 +485,18 @@
return 0;
}
+static const struct of_device_id msm_dai_fe_dt_match[] = {
+ {.compatible = "qcom,msm-dai-fe"},
+ {}
+};
+
static struct platform_driver msm_fe_dai_driver = {
.probe = msm_fe_dai_dev_probe,
.remove = msm_fe_dai_dev_remove,
.driver = {
.name = "msm-dai-fe",
.owner = THIS_MODULE,
+ .of_match_table = msm_dai_fe_dt_match,
},
};
diff --git a/sound/soc/msm/msm-pcm-hostless.c b/sound/soc/msm/msm-pcm-hostless.c
index c61511d..c9b23d0 100644
--- a/sound/soc/msm/msm-pcm-hostless.c
+++ b/sound/soc/msm/msm-pcm-hostless.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/of_device.h>
#include <sound/core.h>
#include <sound/soc.h>
#include <sound/pcm.h>
@@ -25,6 +26,9 @@
static __devinit int msm_pcm_hostless_probe(struct platform_device *pdev)
{
+ if (pdev->dev.of_node)
+ dev_set_name(&pdev->dev, "%s", "msm-pcm-hostless");
+
pr_debug("%s: dev name %s\n", __func__, dev_name(&pdev->dev));
return snd_soc_register_platform(&pdev->dev,
&msm_soc_hostless_platform);
@@ -36,10 +40,16 @@
return 0;
}
+static const struct of_device_id msm_pcm_hostless_dt_match[] = {
+ {.compatible = "qcom,msm-pcm-hostless"},
+ {}
+};
+
static struct platform_driver msm_pcm_hostless_driver = {
.driver = {
.name = "msm-pcm-hostless",
.owner = THIS_MODULE,
+ .of_match_table = msm_pcm_hostless_dt_match,
},
.probe = msm_pcm_hostless_probe,
.remove = __devexit_p(msm_pcm_hostless_remove),
diff --git a/sound/soc/msm/msm8930.c b/sound/soc/msm/msm8930.c
index 20ac6e1..439d4b1 100644
--- a/sound/soc/msm/msm8930.c
+++ b/sound/soc/msm/msm8930.c
@@ -307,8 +307,8 @@
{"Ext Spk Left Neg", NULL, "LINEOUT2"},
/* Headset Mic */
- {"AMIC2", NULL, "MIC BIAS2 Internal1"},
- {"MIC BIAS2 Internal1", NULL, "Headset Mic"},
+ {"AMIC2", NULL, "MIC BIAS2 External"},
+ {"MIC BIAS2 External", NULL, "Headset Mic"},
/* Microphone path */
{"AMIC1", NULL, "MIC BIAS2 External"},
diff --git a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
index 1605062..783a03d 100644
--- a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
@@ -18,6 +18,7 @@
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/clk.h>
+#include <linux/of_device.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
@@ -42,239 +43,7 @@
static struct clk *pcm_clk;
static DEFINE_MUTEX(aux_pcm_mutex);
static int aux_pcm_count;
-static struct msm_dai_auxpcm_pdata *auxpcm_plat_data;
-static u8 num_of_bits_set(u8 sd_line_mask)
-{
- u8 num_bits_set = 0;
-
- while (sd_line_mask) {
- num_bits_set++;
- sd_line_mask = sd_line_mask & (sd_line_mask - 1);
- }
- return num_bits_set;
-}
-
-static int msm_dai_q6_cdc_hw_params(struct snd_pcm_hw_params *params,
- struct snd_soc_dai *dai, int stream)
-{
- struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
-
- dai_data->channels = params_channels(params);
- switch (dai_data->channels) {
- case 2:
- dai_data->port_config.i2s.mono_stereo = MSM_AFE_STEREO;
- break;
- case 1:
- dai_data->port_config.i2s.mono_stereo = MSM_AFE_MONO;
- break;
- default:
- return -EINVAL;
- break;
- }
- dai_data->rate = params_rate(params);
- dai_data->port_config.i2s.sample_rate = dai_data->rate;
- dai_data->port_config.i2s.i2s_cfg_minor_version =
- AFE_API_VERSION_I2S_CONFIG;
- dai_data->port_config.i2s.data_format = AFE_LINEAR_PCM_DATA;
- dev_dbg(dai->dev, " channel %d sample rate %d entered\n",
- dai_data->channels, dai_data->rate);
-
- /* Q6 only supports 16 as now */
- dai_data->port_config.i2s.bit_width = 16;
- dai_data->port_config.i2s.channel_mode = 1;
- return 0;
-}
-
-static int msm_dai_q6_i2s_hw_params(struct snd_pcm_hw_params *params,
- struct snd_soc_dai *dai, int stream)
-{
- struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
- struct msm_i2s_data *i2s_pdata =
- (struct msm_i2s_data *) dai->dev->platform_data;
-
- dai_data->channels = params_channels(params);
- if (num_of_bits_set(i2s_pdata->sd_lines) == 1) {
- switch (dai_data->channels) {
- case 2:
- dai_data->port_config.i2s.mono_stereo = MSM_AFE_STEREO;
- break;
- case 1:
- dai_data->port_config.i2s.mono_stereo = MSM_AFE_MONO;
- break;
- default:
- pr_warn("greater than stereo has not been validated");
- break;
- }
- }
- dai_data->rate = params_rate(params);
- dai_data->port_config.i2s.sample_rate = dai_data->rate;
- dai_data->port_config.i2s.i2s_cfg_minor_version =
- AFE_API_VERSION_I2S_CONFIG;
- dai_data->port_config.i2s.data_format = AFE_LINEAR_PCM_DATA;
- /* Q6 only supports 16 as now */
- dai_data->port_config.i2s.bit_width = 16;
- dai_data->port_config.i2s.channel_mode = 1;
-
- return 0;
-}
-
-static int msm_dai_q6_i2s_platform_data_validation(
- struct snd_soc_dai *dai)
-{
- u8 num_of_sd_lines;
- struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
- struct msm_i2s_data *i2s_pdata =
- (struct msm_i2s_data *)dai->dev->platform_data;
- struct snd_soc_dai_driver *dai_driver =
- (struct snd_soc_dai_driver *)dai->driver;
-
- num_of_sd_lines = num_of_bits_set(i2s_pdata->sd_lines);
-
- switch (num_of_sd_lines) {
- case 1:
- switch (i2s_pdata->sd_lines) {
- case MSM_MI2S_SD0:
- dai_data->port_config.i2s.channel_mode =
- AFE_PORT_I2S_SD0;
- break;
- case MSM_MI2S_SD1:
- dai_data->port_config.i2s.channel_mode =
- AFE_PORT_I2S_SD1;
- break;
- case MSM_MI2S_SD2:
- dai_data->port_config.i2s.channel_mode =
- AFE_PORT_I2S_SD2;
- break;
- case MSM_MI2S_SD3:
- dai_data->port_config.i2s.channel_mode =
- AFE_PORT_I2S_SD3;
- break;
- default:
- pr_err("%s: invalid SD line\n",
- __func__);
- goto error_invalid_data;
- }
- break;
- case 2:
- switch (i2s_pdata->sd_lines) {
- case MSM_MI2S_SD0 | MSM_MI2S_SD1:
- dai_data->port_config.i2s.channel_mode =
- AFE_PORT_I2S_QUAD01;
- break;
- case MSM_MI2S_SD2 | MSM_MI2S_SD3:
- dai_data->port_config.i2s.channel_mode =
- AFE_PORT_I2S_QUAD23;
- break;
- default:
- pr_err("%s: invalid SD line\n",
- __func__);
- goto error_invalid_data;
- }
- break;
- case 3:
- switch (i2s_pdata->sd_lines) {
- case MSM_MI2S_SD0 | MSM_MI2S_SD1 | MSM_MI2S_SD2:
- dai_data->port_config.i2s.channel_mode =
- AFE_PORT_I2S_6CHS;
- break;
- default:
- pr_err("%s: invalid SD lines\n",
- __func__);
- goto error_invalid_data;
- }
- break;
- case 4:
- switch (i2s_pdata->sd_lines) {
- case MSM_MI2S_SD0 | MSM_MI2S_SD1 | MSM_MI2S_SD2 | MSM_MI2S_SD3:
- dai_data->port_config.i2s.channel_mode =
- AFE_PORT_I2S_8CHS;
- break;
- default:
- pr_err("%s: invalid SD lines\n",
- __func__);
- goto error_invalid_data;
- }
- break;
- default:
- pr_err("%s: invalid SD lines\n", __func__);
- goto error_invalid_data;
- }
- if (i2s_pdata->capability == MSM_MI2S_CAP_RX)
- dai_driver->playback.channels_max = num_of_sd_lines << 1;
-
- return 0;
-
-error_invalid_data:
- return -EINVAL;
-}
-
-static int msm_dai_q6_cdc_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
-{
- struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
-
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBS_CFS:
- dai_data->port_config.i2s.ws_src = 1; /* CPU is master */
- break;
- case SND_SOC_DAIFMT_CBM_CFM:
- dai_data->port_config.i2s.ws_src = 0; /* CPU is slave */
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-
-static int msm_dai_q6_slim_bus_hw_params(struct snd_pcm_hw_params *params,
- struct snd_soc_dai *dai, int stream)
-{
- struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
-
- dai_data->channels = params_channels(params);
- dai_data->rate = params_rate(params);
-
- /* Q6 only supports 16 as now */
- dai_data->port_config.slim_sch.sb_cfg_minor_version =
- AFE_API_VERSION_SLIMBUS_CONFIG;
- dai_data->port_config.slim_sch.bit_width = 16;
- dai_data->port_config.slim_sch.data_format = 0;
- dai_data->port_config.slim_sch.num_channels = dai_data->channels;
- dai_data->port_config.slim_sch.sample_rate = dai_data->rate;
-
- dev_dbg(dai->dev, "%s:slimbus_dev_id[%hu] bit_wd[%hu] format[%hu]\n"
- "num_channel %hu shared_ch_mapping[0] %hu\n"
- "slave_port_mapping[1] %hu slave_port_mapping[2] %hu\n"
- "sample_rate %d\n", __func__,
- dai_data->port_config.slim_sch.slimbus_dev_id,
- dai_data->port_config.slim_sch.bit_width,
- dai_data->port_config.slim_sch.data_format,
- dai_data->port_config.slim_sch.num_channels,
- dai_data->port_config.slim_sch.shared_ch_mapping[0],
- dai_data->port_config.slim_sch.shared_ch_mapping[1],
- dai_data->port_config.slim_sch.shared_ch_mapping[2],
- dai_data->rate);
-
- return 0;
-}
-
-static int msm_dai_q6_bt_fm_hw_params(struct snd_pcm_hw_params *params,
- struct snd_soc_dai *dai, int stream)
-{
- struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
-
- dai_data->channels = params_channels(params);
- dai_data->rate = params_rate(params);
-
- dev_dbg(dai->dev, "channels %d sample rate %d entered\n",
- dai_data->channels, dai_data->rate);
-
- memset(&dai_data->port_config, 0, sizeof(dai_data->port_config));
-
- return 0;
-}
static int msm_dai_q6_auxpcm_hw_params(
struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
@@ -311,83 +80,6 @@
return 0;
}
-static int msm_dai_q6_afe_rtproxy_hw_params(struct snd_pcm_hw_params *params,
- struct snd_soc_dai *dai)
-{
- struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
-
- dai_data->rate = params_rate(params);
- dai_data->port_config.rtproxy.num_channels = params_channels(params);
- dai_data->port_config.rtproxy.sample_rate = params_rate(params);
-
- pr_debug("channel %d entered,dai_id: %d,rate: %d\n",
- dai_data->port_config.rtproxy.num_channels, dai->id, dai_data->rate);
-
- dai_data->port_config.rtproxy.rt_proxy_cfg_minor_version =
- AFE_API_VERSION_RT_PROXY_CONFIG;
- dai_data->port_config.rtproxy.bit_width = 16; /* Q6 only supports 16 */
- dai_data->port_config.rtproxy.interleaved = 1;
- dai_data->port_config.rtproxy.frame_size = params_period_bytes(params);
- dai_data->port_config.rtproxy.jitter_allowance =
- dai_data->port_config.rtproxy.frame_size/2;
- dai_data->port_config.rtproxy.low_water_mark = 0;
- dai_data->port_config.rtproxy.high_water_mark = 0;
-
- return 0;
-}
-
-/* Current implementation assumes hw_param is called once
- * This may not be the case but what to do when ADM and AFE
- * port are already opened and parameter changes
- */
-static int msm_dai_q6_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params,
- struct snd_soc_dai *dai)
-{
- int rc = 0;
-
- switch (dai->id) {
- case PRIMARY_I2S_TX:
- case PRIMARY_I2S_RX:
- case SECONDARY_I2S_RX:
- rc = msm_dai_q6_cdc_hw_params(params, dai, substream->stream);
- break;
- case MI2S_RX:
- rc = msm_dai_q6_i2s_hw_params(params, dai, substream->stream);
- break;
- case SLIMBUS_0_RX:
- case SLIMBUS_1_RX:
- case SLIMBUS_0_TX:
- case SLIMBUS_1_TX:
- rc = msm_dai_q6_slim_bus_hw_params(params, dai,
- substream->stream);
- break;
- case INT_BT_SCO_RX:
- case INT_BT_SCO_TX:
- case INT_FM_RX:
- case INT_FM_TX:
- rc = msm_dai_q6_bt_fm_hw_params(params, dai, substream->stream);
- break;
- case RT_PROXY_DAI_001_TX:
- case RT_PROXY_DAI_001_RX:
- case RT_PROXY_DAI_002_TX:
- case RT_PROXY_DAI_002_RX:
- rc = msm_dai_q6_afe_rtproxy_hw_params(params, dai);
- break;
- case VOICE_PLAYBACK_TX:
- case VOICE_RECORD_RX:
- case VOICE_RECORD_TX:
- rc = 0;
- break;
- default:
- dev_err(dai->dev, "invalid AFE port ID\n");
- rc = -EINVAL;
- break;
- }
-
- return rc;
-}
-
static void msm_dai_q6_auxpcm_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
@@ -396,8 +88,8 @@
mutex_lock(&aux_pcm_mutex);
if (aux_pcm_count == 0) {
- dev_dbg(dai->dev, "%s(): dai->id %d aux_pcm_count is 0. Just"
- " return\n", __func__, dai->id);
+ dev_dbg(dai->dev, "%s(): dai->id %d aux_pcm_count is 0. Just return\n",
+ __func__, dai->id);
mutex_unlock(&aux_pcm_mutex);
return;
}
@@ -410,8 +102,7 @@
mutex_unlock(&aux_pcm_mutex);
return;
} else if (aux_pcm_count < 0) {
- dev_err(dai->dev, "%s(): ERROR: dai->id %d"
- " aux_pcm_count = %d < 0\n",
+ dev_err(dai->dev, "%s(): ERROR: dai->id %d aux_pcm_count = %d < 0\n",
__func__, dai->id, aux_pcm_count);
aux_pcm_count = 0;
mutex_unlock(&aux_pcm_mutex);
@@ -432,33 +123,6 @@
mutex_unlock(&aux_pcm_mutex);
}
-static void msm_dai_q6_shutdown(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
- int rc = 0;
-
- if (test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
- switch (dai->id) {
- case VOICE_PLAYBACK_TX:
- case VOICE_RECORD_TX:
- case VOICE_RECORD_RX:
- pr_debug("%s, stop pseudo port:%d\n",
- __func__, dai->id);
- rc = afe_stop_pseudo_port(dai->id);
- break;
- default:
- rc = afe_close(dai->id); /* can block */
- break;
- }
- if (IS_ERR_VALUE(rc))
- dev_err(dai->dev, "fail to close AFE port\n");
- pr_debug("%s: dai_data->status_mask = %ld\n", __func__,
- *dai_data->status_mask);
- clear_bit(STATUS_PORT_STARTED, dai_data->status_mask);
- }
-}
-
static int msm_dai_q6_auxpcm_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
@@ -468,13 +132,12 @@
mutex_lock(&aux_pcm_mutex);
if (aux_pcm_count == 2) {
- dev_dbg(dai->dev, "%s(): dai->id %d aux_pcm_count is 2. Just"
- " return.\n", __func__, dai->id);
+ dev_dbg(dai->dev, "%s(): dai->id %d aux_pcm_count is 2. Just return.\n",
+ __func__, dai->id);
mutex_unlock(&aux_pcm_mutex);
return 0;
} else if (aux_pcm_count > 2) {
- dev_err(dai->dev, "%s(): ERROR: dai->id %d"
- " aux_pcm_count = %d > 2\n",
+ dev_err(dai->dev, "%s(): ERROR: dai->id %d aux_pcm_count = %d > 2\n",
__func__, dai->id, aux_pcm_count);
mutex_unlock(&aux_pcm_mutex);
return 0;
@@ -482,8 +145,8 @@
aux_pcm_count++;
if (aux_pcm_count == 2) {
- dev_dbg(dai->dev, "%s(): dai->id %d aux_pcm_count = %d after "
- " increment\n", __func__, dai->id, aux_pcm_count);
+ dev_dbg(dai->dev, "%s(): dai->id %d aux_pcm_count = %d after increment\n",
+ __func__, dai->id, aux_pcm_count);
mutex_unlock(&aux_pcm_mutex);
return 0;
}
@@ -516,21 +179,6 @@
return rc;
}
-static int msm_dai_q6_prepare(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
- int rc = 0;
-
- if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
- /* PORT START should be set if prepare called in active state */
- rc = afe_q6_interface_prepare();
- if (IS_ERR_VALUE(rc))
- dev_err(dai->dev, "fail to open AFE APR\n");
- }
- return rc;
-}
-
static int msm_dai_q6_auxpcm_trigger(struct snd_pcm_substream *substream,
int cmd, struct snd_soc_dai *dai)
{
@@ -560,82 +208,18 @@
}
-static int msm_dai_q6_trigger(struct snd_pcm_substream *substream, int cmd,
- struct snd_soc_dai *dai)
-{
- struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
- int rc = 0;
-
- /* Start/stop port without waiting for Q6 AFE response. Need to have
- * native q6 AFE driver propagates AFE response in order to handle
- * port start/stop command error properly if error does arise.
- */
- pr_debug("%s:port:%d cmd:%d dai_data->status_mask = %ld",
- __func__, dai->id, cmd, *dai_data->status_mask);
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- case SNDRV_PCM_TRIGGER_RESUME:
- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
- switch (dai->id) {
- case VOICE_PLAYBACK_TX:
- case VOICE_RECORD_TX:
- case VOICE_RECORD_RX:
- afe_pseudo_port_start_nowait(dai->id);
- break;
- default:
- afe_port_start_nowait(dai->id,
- &dai_data->port_config, dai_data->rate);
- break;
- }
- set_bit(STATUS_PORT_STARTED,
- dai_data->status_mask);
- }
- break;
- case SNDRV_PCM_TRIGGER_STOP:
- case SNDRV_PCM_TRIGGER_SUSPEND:
- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- if (test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
- switch (dai->id) {
- case VOICE_PLAYBACK_TX:
- case VOICE_RECORD_TX:
- case VOICE_RECORD_RX:
- afe_pseudo_port_stop_nowait(dai->id);
- break;
- default:
- afe_port_stop_nowait(dai->id);
- break;
- }
- clear_bit(STATUS_PORT_STARTED,
- dai_data->status_mask);
- }
- break;
-
- default:
- rc = -EINVAL;
- }
-
- return rc;
-}
static int msm_dai_q6_dai_auxpcm_probe(struct snd_soc_dai *dai)
{
struct msm_dai_q6_dai_data *dai_data;
int rc = 0;
+ struct msm_dai_auxpcm_pdata *auxpcm_pdata = NULL;
- struct msm_dai_auxpcm_pdata *auxpcm_pdata =
- (struct msm_dai_auxpcm_pdata *) dai->dev->platform_data;
+ auxpcm_pdata = (struct msm_dai_auxpcm_pdata *)
+ dev_get_drvdata(dai->dev);
+ dai->dev->platform_data = auxpcm_pdata;
mutex_lock(&aux_pcm_mutex);
- if (!auxpcm_plat_data)
- auxpcm_plat_data = auxpcm_pdata;
- else if (auxpcm_plat_data != auxpcm_pdata) {
-
- dev_err(dai->dev, "AUX PCM RX and TX devices does not have"
- " same platform data\n");
- return -EINVAL;
- }
-
/*
* The clk name for AUX PCM operation is passed as platform
* data to the cpu driver, since cpu drive is unaware of any
@@ -669,8 +253,8 @@
mutex_lock(&aux_pcm_mutex);
if (aux_pcm_count == 0) {
- dev_dbg(dai->dev, "%s(): dai->id %d aux_pcm_count is 0. clean"
- " up and return\n", __func__, dai->id);
+ dev_dbg(dai->dev, "%s(): dai->id %d aux_pcm_count is 0. clean up and return\n",
+ __func__, dai->id);
goto done;
}
@@ -681,14 +265,12 @@
__func__, dai->id, aux_pcm_count);
goto done;
} else if (aux_pcm_count < 0) {
- dev_err(dai->dev, "%s(): ERROR: dai->id %d"
- " aux_pcm_count = %d < 0\n",
+ dev_err(dai->dev, "%s(): ERROR: dai->id %d aux_pcm_count = %d < 0\n",
__func__, dai->id, aux_pcm_count);
goto done;
}
- dev_dbg(dai->dev, "%s(): dai->id %d aux_pcm_count = %d."
- "closing afe\n",
+ dev_dbg(dai->dev, "%s(): dai->id %d aux_pcm_count = %d.closing afe\n",
__func__, dai->id, aux_pcm_count);
rc = afe_close(PCM_RX); /* can block */
@@ -707,172 +289,6 @@
return 0;
}
-static int msm_dai_q6_dai_i2s_probe(struct snd_soc_dai *dai)
-{
- struct msm_dai_q6_dai_data *dai_data;
- int rc = 0;
-
- dai_data = kzalloc(sizeof(struct msm_dai_q6_dai_data),
- GFP_KERNEL);
-
- if (!dai_data) {
- dev_err(dai->dev, "DAI-%d: fail to allocate dai data\n",
- dai->id);
- rc = -ENOMEM;
- goto rtn;
- } else
- dev_set_drvdata(dai->dev, dai_data);
-
- rc = msm_dai_q6_i2s_platform_data_validation(dai);
- if (rc != 0) {
- pr_err("%s: The msm_dai_q6_i2s_platform_data_validation failed\n",
- __func__);
- kfree(dai_data);
- }
-rtn:
- return rc;
-}
-
-static int msm_dai_q6_dai_probe(struct snd_soc_dai *dai)
-{
- struct msm_dai_q6_dai_data *dai_data;
- int rc = 0;
-
- dai_data = kzalloc(sizeof(struct msm_dai_q6_dai_data),
- GFP_KERNEL);
-
- if (!dai_data) {
- dev_err(dai->dev, "DAI-%d: fail to allocate dai data\n",
- dai->id);
- rc = -ENOMEM;
- } else
- dev_set_drvdata(dai->dev, dai_data);
-
- return rc;
-}
-
-static int msm_dai_q6_dai_remove(struct snd_soc_dai *dai)
-{
- struct msm_dai_q6_dai_data *dai_data;
- int rc;
-
- dai_data = dev_get_drvdata(dai->dev);
-
- /* If AFE port is still up, close it */
- if (test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
- switch (dai->id) {
- case VOICE_PLAYBACK_TX:
- case VOICE_RECORD_TX:
- case VOICE_RECORD_RX:
- pr_debug("%s, stop pseudo port:%d\n",
- __func__, dai->id);
- rc = afe_stop_pseudo_port(dai->id);
- break;
- default:
- rc = afe_close(dai->id); /* can block */
- }
- if (IS_ERR_VALUE(rc))
- dev_err(dai->dev, "fail to close AFE port\n");
- clear_bit(STATUS_PORT_STARTED, dai_data->status_mask);
- }
- kfree(dai_data);
- snd_soc_unregister_dai(dai->dev);
-
- return 0;
-}
-
-static int msm_dai_q6_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
-{
- int rc = 0;
-
- dev_dbg(dai->dev, "enter %s, id = %d fmt[%d]\n", __func__,
- dai->id, fmt);
- switch (dai->id) {
- case PRIMARY_I2S_TX:
- case PRIMARY_I2S_RX:
- case MI2S_RX:
- case SECONDARY_I2S_RX:
- rc = msm_dai_q6_cdc_set_fmt(dai, fmt);
- break;
- default:
- dev_err(dai->dev, "invalid cpu_dai set_fmt\n");
- rc = -EINVAL;
- break;
- }
-
- return rc;
-}
-
-static int msm_dai_q6_set_channel_map(struct snd_soc_dai *dai,
- unsigned int tx_num, unsigned int *tx_slot,
- unsigned int rx_num, unsigned int *rx_slot)
-
-{
- int rc = 0;
- struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
- unsigned int i = 0;
-
- dev_dbg(dai->dev, "enter %s, id = %d\n", __func__,
- dai->id);
- switch (dai->id) {
- case SLIMBUS_0_RX:
- case SLIMBUS_1_RX:
- /* channel number to be between 128 and 255. For RX port
- * use channel numbers from 138 to 144, for TX port
- * use channel numbers from 128 to 137
- * For ports between MDM-APQ use channel numbers from 145
- */
- if (!rx_slot)
- return -EINVAL;
- for (i = 0; i < rx_num; i++) {
- dai_data->port_config.slim_sch.shared_ch_mapping[i] =
- rx_slot[i];
- pr_debug("%s: find number of channels[%d] ch[%d]\n",
- __func__, i,
- rx_slot[i]);
- }
- dai_data->port_config.slim_sch.num_channels = rx_num;
- pr_debug("%s:SLIMBUS_0_RX cnt[%d] ch[%d %d]\n", __func__,
- rx_num, dai_data->port_config.slim_sch.shared_ch_mapping[0],
- dai_data->port_config.slim_sch.shared_ch_mapping[1]);
-
- break;
- case SLIMBUS_0_TX:
- case SLIMBUS_1_TX:
- /* channel number to be between 128 and 255. For RX port
- * use channel numbers from 138 to 144, for TX port
- * use channel numbers from 128 to 137
- * For ports between MDM-APQ use channel numbers from 145
- */
- if (!tx_slot)
- return -EINVAL;
- for (i = 0; i < tx_num; i++) {
- dai_data->port_config.slim_sch.shared_ch_mapping[i] =
- tx_slot[i];
- pr_debug("%s: find number of channels[%d] ch[%d]\n",
- __func__, i, tx_slot[i]);
- }
- dai_data->port_config.slim_sch.num_channels = tx_num;
- pr_debug("%s:SLIMBUS_0_TX cnt[%d] ch[%d %d]\n", __func__,
- tx_num, dai_data->port_config.slim_sch.shared_ch_mapping[0],
- dai_data->port_config.slim_sch.shared_ch_mapping[1]);
- break;
- default:
- dev_err(dai->dev, "invalid cpu_dai set_fmt\n");
- rc = -EINVAL;
- break;
- }
- return rc;
-}
-
-static struct snd_soc_dai_ops msm_dai_q6_ops = {
- .prepare = msm_dai_q6_prepare,
- .trigger = msm_dai_q6_trigger,
- .hw_params = msm_dai_q6_hw_params,
- .shutdown = msm_dai_q6_shutdown,
- .set_fmt = msm_dai_q6_set_fmt,
- .set_channel_map = msm_dai_q6_set_channel_map,
-};
static struct snd_soc_dai_ops msm_dai_q6_auxpcm_ops = {
.prepare = msm_dai_q6_auxpcm_prepare,
@@ -881,184 +297,6 @@
.shutdown = msm_dai_q6_auxpcm_shutdown,
};
-static struct snd_soc_dai_driver msm_dai_q6_i2s_rx_dai = {
- .playback = {
- .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
- SNDRV_PCM_RATE_16000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
- .channels_min = 1,
- .channels_max = 4,
- .rate_min = 8000,
- .rate_max = 48000,
- },
- .ops = &msm_dai_q6_ops,
- .probe = msm_dai_q6_dai_i2s_probe,
- .remove = msm_dai_q6_dai_remove,
-};
-
-static struct snd_soc_dai_driver msm_dai_q6_i2s_tx_dai = {
- .capture = {
- .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
- SNDRV_PCM_RATE_16000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
- .channels_min = 1,
- .channels_max = 2,
- .rate_min = 8000,
- .rate_max = 48000,
- },
- .ops = &msm_dai_q6_ops,
- .probe = msm_dai_q6_dai_probe,
- .remove = msm_dai_q6_dai_remove,
-};
-
-static struct snd_soc_dai_driver msm_dai_q6_afe_rx_dai = {
- .playback = {
- .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
- SNDRV_PCM_RATE_16000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
- .channels_min = 1,
- .channels_max = 2,
- .rate_min = 8000,
- .rate_max = 48000,
- },
- .ops = &msm_dai_q6_ops,
- .probe = msm_dai_q6_dai_probe,
- .remove = msm_dai_q6_dai_remove,
-};
-
-static struct snd_soc_dai_driver msm_dai_q6_afe_tx_dai = {
- .capture = {
- .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
- SNDRV_PCM_RATE_16000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
- .channels_min = 1,
- .channels_max = 2,
- .rate_min = 8000,
- .rate_max = 48000,
- },
- .ops = &msm_dai_q6_ops,
- .probe = msm_dai_q6_dai_probe,
- .remove = msm_dai_q6_dai_remove,
-};
-
-static struct snd_soc_dai_driver msm_dai_q6_voice_playback_tx_dai = {
- .playback = {
- .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
- SNDRV_PCM_RATE_16000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
- .channels_min = 1,
- .channels_max = 2,
- .rate_max = 48000,
- .rate_min = 8000,
- },
- .ops = &msm_dai_q6_ops,
- .probe = msm_dai_q6_dai_probe,
- .remove = msm_dai_q6_dai_remove,
-};
-
-static struct snd_soc_dai_driver msm_dai_q6_slimbus_rx_dai = {
- .playback = {
- .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
- SNDRV_PCM_RATE_16000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
- .channels_min = 1,
- .channels_max = 2,
- .rate_min = 8000,
- .rate_max = 48000,
- },
- .ops = &msm_dai_q6_ops,
- .probe = msm_dai_q6_dai_probe,
- .remove = msm_dai_q6_dai_remove,
-};
-
-static struct snd_soc_dai_driver msm_dai_q6_slimbus_tx_dai = {
- .capture = {
- .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
- SNDRV_PCM_RATE_16000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
- .channels_min = 1,
- .channels_max = 2,
- .rate_min = 8000,
- .rate_max = 48000,
- },
- .ops = &msm_dai_q6_ops,
- .probe = msm_dai_q6_dai_probe,
- .remove = msm_dai_q6_dai_remove,
-};
-
-static struct snd_soc_dai_driver msm_dai_q6_incall_record_dai = {
- .capture = {
- .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
- SNDRV_PCM_RATE_16000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
- .channels_min = 1,
- .channels_max = 2,
- .rate_min = 8000,
- .rate_max = 48000,
- },
- .ops = &msm_dai_q6_ops,
- .probe = msm_dai_q6_dai_probe,
- .remove = msm_dai_q6_dai_remove,
-};
-
-static struct snd_soc_dai_driver msm_dai_q6_bt_sco_rx_dai = {
- .playback = {
- .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
- .channels_min = 1,
- .channels_max = 1,
- .rate_max = 16000,
- .rate_min = 8000,
- },
- .ops = &msm_dai_q6_ops,
- .probe = msm_dai_q6_dai_probe,
- .remove = msm_dai_q6_dai_remove,
-};
-
-static struct snd_soc_dai_driver msm_dai_q6_bt_sco_tx_dai = {
- .playback = {
- .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
- .channels_min = 1,
- .channels_max = 1,
- .rate_max = 16000,
- .rate_min = 8000,
- },
- .ops = &msm_dai_q6_ops,
- .probe = msm_dai_q6_dai_probe,
- .remove = msm_dai_q6_dai_remove,
-};
-
-static struct snd_soc_dai_driver msm_dai_q6_fm_rx_dai = {
- .playback = {
- .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
- SNDRV_PCM_RATE_16000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
- .channels_min = 2,
- .channels_max = 2,
- .rate_max = 48000,
- .rate_min = 8000,
- },
- .ops = &msm_dai_q6_ops,
- .probe = msm_dai_q6_dai_probe,
- .remove = msm_dai_q6_dai_remove,
-};
-
-static struct snd_soc_dai_driver msm_dai_q6_fm_tx_dai = {
- .playback = {
- .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
- SNDRV_PCM_RATE_16000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
- .channels_min = 2,
- .channels_max = 2,
- .rate_max = 48000,
- .rate_min = 8000,
- },
- .ops = &msm_dai_q6_ops,
- .probe = msm_dai_q6_dai_probe,
- .remove = msm_dai_q6_dai_remove,
-};
-
static struct snd_soc_dai_driver msm_dai_q6_aux_pcm_rx_dai = {
.playback = {
.rates = SNDRV_PCM_RATE_8000,
@@ -1087,140 +325,217 @@
.remove = msm_dai_q6_dai_auxpcm_remove,
};
-
-static struct snd_soc_dai_driver msm_dai_q6_slimbus_1_rx_dai = {
- .playback = {
- .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
- .channels_min = 1,
- .channels_max = 1,
- .rate_min = 8000,
- .rate_max = 16000,
- },
- .ops = &msm_dai_q6_ops,
- .probe = msm_dai_q6_dai_probe,
- .remove = msm_dai_q6_dai_remove,
-};
-
-static struct snd_soc_dai_driver msm_dai_q6_slimbus_1_tx_dai = {
- .capture = {
- .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
- .channels_min = 1,
- .channels_max = 1,
- .rate_min = 8000,
- .rate_max = 16000,
- },
- .ops = &msm_dai_q6_ops,
- .probe = msm_dai_q6_dai_probe,
- .remove = msm_dai_q6_dai_remove,
-};
-
-static __devinit int msm_dai_q6_dev_probe(struct platform_device *pdev)
+static int msm_auxpcm_dev_probe(struct platform_device *pdev)
{
+ int id;
+ void *plat_data;
int rc = 0;
+ if (pdev->dev.parent == NULL)
+ return -ENODEV;
+
+ plat_data = dev_get_drvdata(pdev->dev.parent);
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,msm-auxpcm-dev-id", &id);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: qcom,msm-auxpcm-dev-id missing in DT node\n",
+ __func__);
+ return rc;
+ }
+
+ pdev->id = id;
+ dev_set_name(&pdev->dev, "%s.%d", "msm-dai-q6", id);
dev_dbg(&pdev->dev, "dev name %s\n", dev_name(&pdev->dev));
- switch (pdev->id) {
- case PRIMARY_I2S_RX:
- case SECONDARY_I2S_RX:
- rc = snd_soc_register_dai(&pdev->dev, &msm_dai_q6_i2s_rx_dai);
- break;
- case PRIMARY_I2S_TX:
- rc = snd_soc_register_dai(&pdev->dev, &msm_dai_q6_i2s_tx_dai);
- break;
+ dev_set_drvdata(&pdev->dev, plat_data);
+
+ switch (id) {
case AFE_PORT_ID_PRIMARY_PCM_RX:
rc = snd_soc_register_dai(&pdev->dev,
- &msm_dai_q6_aux_pcm_rx_dai);
+ &msm_dai_q6_aux_pcm_rx_dai);
break;
case AFE_PORT_ID_PRIMARY_PCM_TX:
rc = snd_soc_register_dai(&pdev->dev,
- &msm_dai_q6_aux_pcm_tx_dai);
- break;
- case MI2S_RX:
- rc = snd_soc_register_dai(&pdev->dev,
- &msm_dai_q6_i2s_rx_dai);
- break;
- case SLIMBUS_0_RX:
- rc = snd_soc_register_dai(&pdev->dev,
- &msm_dai_q6_slimbus_rx_dai);
- break;
- case SLIMBUS_0_TX:
- rc = snd_soc_register_dai(&pdev->dev,
- &msm_dai_q6_slimbus_tx_dai);
- break;
-
- case SLIMBUS_1_RX:
- rc = snd_soc_register_dai(&pdev->dev,
- &msm_dai_q6_slimbus_1_rx_dai);
- break;
- case SLIMBUS_1_TX:
- rc = snd_soc_register_dai(&pdev->dev,
- &msm_dai_q6_slimbus_1_tx_dai);
- break;
- case INT_BT_SCO_RX:
- rc = snd_soc_register_dai(&pdev->dev,
- &msm_dai_q6_bt_sco_rx_dai);
- break;
- case INT_BT_SCO_TX:
- rc = snd_soc_register_dai(&pdev->dev,
- &msm_dai_q6_bt_sco_tx_dai);
- break;
- case INT_FM_RX:
- rc = snd_soc_register_dai(&pdev->dev, &msm_dai_q6_fm_rx_dai);
- break;
- case INT_FM_TX:
- rc = snd_soc_register_dai(&pdev->dev, &msm_dai_q6_fm_tx_dai);
- break;
- case RT_PROXY_DAI_001_RX:
- case RT_PROXY_DAI_002_RX:
- rc = snd_soc_register_dai(&pdev->dev, &msm_dai_q6_afe_rx_dai);
- break;
- case RT_PROXY_DAI_001_TX:
- case RT_PROXY_DAI_002_TX:
- rc = snd_soc_register_dai(&pdev->dev, &msm_dai_q6_afe_tx_dai);
- break;
- case VOICE_PLAYBACK_TX:
- rc = snd_soc_register_dai(&pdev->dev,
- &msm_dai_q6_voice_playback_tx_dai);
- break;
- case VOICE_RECORD_RX:
- case VOICE_RECORD_TX:
- rc = snd_soc_register_dai(&pdev->dev,
- &msm_dai_q6_incall_record_dai);
+ &msm_dai_q6_aux_pcm_tx_dai);
break;
default:
rc = -ENODEV;
break;
}
+
return rc;
}
-static __devexit int msm_dai_q6_dev_remove(struct platform_device *pdev)
+static int msm_auxpcm_resource_probe(
+ struct platform_device *pdev)
+{
+ int rc = 0;
+ struct msm_dai_auxpcm_pdata *auxpcm_pdata = NULL;
+ u32 property_val;
+
+ auxpcm_pdata = kzalloc(sizeof(struct msm_dai_auxpcm_pdata),
+ GFP_KERNEL);
+
+ if (!auxpcm_pdata) {
+ dev_err(&pdev->dev, "Failed to allocate memory for platform data\n");
+ return -ENOMEM;
+ }
+
+ rc = of_property_read_string(pdev->dev.of_node,
+ "qcom,msm-cpudai-auxpcm-clk",
+ &auxpcm_pdata->clk);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: qcom,msm-cpudai-auxpcm-clk missing in DT node\n",
+ __func__);
+ goto fail_free_plat;
+ }
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,msm-cpudai-auxpcm-mode", &property_val);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: qcom,msm-cpudai-auxpcm-mode missing in DT node\n",
+ __func__);
+ goto fail_free_plat;
+ }
+ auxpcm_pdata->mode = (u16)property_val;
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,msm-cpudai-auxpcm-sync", &property_val);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: qcom,msm-cpudai-auxpcm-sync missing in DT node\n",
+ __func__);
+ goto fail_free_plat;
+ }
+ auxpcm_pdata->sync = (u16)property_val;
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,msm-cpudai-auxpcm-frame", &property_val);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: qcom,msm-cpudai-auxpcm-frame missing in DT node\n",
+ __func__);
+ goto fail_free_plat;
+ }
+ auxpcm_pdata->frame = (u16)property_val;
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,msm-cpudai-auxpcm-quant", &property_val);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: qcom,msm-cpudai-auxpcm-quant missing in DT node\n",
+ __func__);
+ goto fail_free_plat;
+ }
+ auxpcm_pdata->quant = (u16)property_val;
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,msm-cpudai-auxpcm-slot", &property_val);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: qcom,msm-cpudai-auxpcm-slot missing in DT node\n",
+ __func__);
+ goto fail_free_plat;
+ }
+ auxpcm_pdata->slot = (u16)property_val;
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,msm-cpudai-auxpcm-data", &property_val);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: qcom,msm-cpudai-auxpcm-data missing in DT node\n",
+ __func__);
+ goto fail_free_plat;
+ }
+ auxpcm_pdata->data = (u16)property_val;
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,msm-cpudai-auxpcm-pcm-clk-rate",
+ &auxpcm_pdata->pcm_clk_rate);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: qcom,msm-cpudai-auxpcm-pcm-clk-rate missing in DT node\n",
+ __func__);
+ goto fail_free_plat;
+ }
+ platform_set_drvdata(pdev, auxpcm_pdata);
+
+ rc = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: failed to add child nodes, rc=%d\n",
+ __func__, rc);
+ goto fail_free_plat;
+ }
+
+ return rc;
+
+fail_free_plat:
+ kfree(auxpcm_pdata);
+ return rc;
+}
+
+static int msm_auxpcm_dev_remove(struct platform_device *pdev)
{
snd_soc_unregister_dai(&pdev->dev);
return 0;
}
-static struct platform_driver msm_dai_q6_driver = {
- .probe = msm_dai_q6_dev_probe,
- .remove = msm_dai_q6_dev_remove,
+static int msm_auxpcm_resource_remove(
+ struct platform_device *pdev)
+{
+ void *auxpcm_pdata;
+
+ auxpcm_pdata = dev_get_drvdata(&pdev->dev);
+ kfree(auxpcm_pdata);
+
+ return 0;
+}
+
+static const struct of_device_id msm_auxpcm_resource_dt_match[] = {
+ { .compatible = "qcom,msm-auxpcm-resource", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, msm_auxpcm_resource_dt_match);
+
+static const struct of_device_id msm_auxpcm_dev_dt_match[] = {
+ { .compatible = "qcom,msm-auxpcm-dev", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, msm_auxpcm_dev_dt_match);
+
+
+static struct platform_driver msm_auxpcm_dev = {
+ .probe = msm_auxpcm_dev_probe,
+ .remove = msm_auxpcm_dev_remove,
.driver = {
- .name = "msm-dai-q6",
+ .name = "msm-auxpcm-dev",
.owner = THIS_MODULE,
+ .of_match_table = msm_auxpcm_dev_dt_match,
},
};
+static struct platform_driver msm_auxpcm_resource = {
+ .probe = msm_auxpcm_resource_probe,
+ .remove = msm_auxpcm_resource_remove,
+ .driver = {
+ .name = "msm-auxpcm-resource",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_auxpcm_resource_dt_match,
+ },
+};
+
+
static int __init msm_dai_q6_init(void)
{
- return platform_driver_register(&msm_dai_q6_driver);
+ int rc;
+
+ rc = platform_driver_register(&msm_auxpcm_dev);
+ if (rc)
+ goto fail;
+
+ rc = platform_driver_register(&msm_auxpcm_resource);
+
+ if (rc) {
+ pr_err("%s: fail to register cpu dai driver\n", __func__);
+ platform_driver_unregister(&msm_auxpcm_dev);
+ }
+fail:
+ return rc;
}
module_init(msm_dai_q6_init);
static void __exit msm_dai_q6_exit(void)
{
- platform_driver_unregister(&msm_dai_q6_driver);
+ platform_driver_unregister(&msm_auxpcm_dev);
+ platform_driver_unregister(&msm_auxpcm_resource);
}
module_exit(msm_dai_q6_exit);
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c
index 05ef2ce..1ac872d 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c
@@ -27,6 +27,7 @@
#include <asm/dma.h>
#include <linux/dma-mapping.h>
#include <linux/android_pmem.h>
+#include <linux/of_device.h>
#include <sound/compress_params.h>
#include <sound/compress_offload.h>
#include <sound/compress_driver.h>
@@ -562,6 +563,9 @@
static __devinit int msm_pcm_probe(struct platform_device *pdev)
{
+ if (pdev->dev.of_node)
+ dev_set_name(&pdev->dev, "%s", "msm-pcm-lpa");
+
dev_info(&pdev->dev, "%s: dev name %s\n",
__func__, dev_name(&pdev->dev));
return snd_soc_register_platform(&pdev->dev,
@@ -574,10 +578,17 @@
return 0;
}
+static const struct of_device_id msm_pcm_lpa_dt_match[] = {
+ {.compatible = "qcom,msm-pcm-lpa"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, msm_pcm_lpa_dt_match);
+
static struct platform_driver msm_pcm_driver = {
.driver = {
.name = "msm-pcm-lpa",
.owner = THIS_MODULE,
+ .of_match_table = msm_pcm_lpa_dt_match,
},
.probe = msm_pcm_probe,
.remove = __devexit_p(msm_pcm_remove),
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
index f94e6c1..c9f9593 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
@@ -28,6 +28,7 @@
#include <asm/dma.h>
#include <linux/dma-mapping.h>
#include <linux/android_pmem.h>
+#include <linux/of_device.h>
#include "msm-pcm-q6-v2.h"
#include "msm-pcm-routing-v2.h"
@@ -163,8 +164,7 @@
break;
}
if (prtd->mmap_flag) {
- pr_debug("%s:writing %d bytes"
- " of buffer to dsp\n",
+ pr_debug("%s:writing %d bytes of buffer to dsp\n",
__func__,
prtd->pcm_count);
q6asm_write_nolock(prtd->audio_client,
@@ -172,8 +172,7 @@
0, 0, NO_TIMESTAMP);
} else {
while (atomic_read(&prtd->out_needed)) {
- pr_debug("%s:writing %d bytes"
- " of buffer to dsp\n",
+ pr_debug("%s:writing %d bytes of buffer to dsp\n",
__func__,
prtd->pcm_count);
q6asm_write_nolock(prtd->audio_client,
@@ -626,17 +625,17 @@
dir = IN;
else
dir = OUT;
-pr_err("%s: before buf alloc\n", __func__);
+ pr_debug("%s: before buf alloc\n", __func__);
ret = q6asm_audio_client_buf_alloc_contiguous(dir,
prtd->audio_client,
runtime->hw.period_bytes_min,
runtime->hw.periods_max);
if (ret < 0) {
- pr_err("Audio Start: Buffer Allocation failed "
- "rc = %d\n", ret);
+ pr_err("Audio Start: Buffer Allocation failed rc = %d\n",
+ ret);
return -ENOMEM;
}
-pr_err("%s: after buf alloc\n", __func__);
+ pr_debug("%s: after buf alloc\n", __func__);
buf = prtd->audio_client->port[dir].buf;
if (buf == NULL || buf[0].data == NULL)
return -ENOMEM;
@@ -684,6 +683,9 @@
static __devinit int msm_pcm_probe(struct platform_device *pdev)
{
+ if (pdev->dev.of_node)
+ dev_set_name(&pdev->dev, "%s", "msm-pcm-dsp");
+
pr_info("%s: dev name %s\n", __func__, dev_name(&pdev->dev));
return snd_soc_register_platform(&pdev->dev,
&msm_soc_platform);
@@ -694,11 +696,17 @@
snd_soc_unregister_platform(&pdev->dev);
return 0;
}
+static const struct of_device_id msm_pcm_dt_match[] = {
+ {.compatible = "qcom,msm-pcm-dsp"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, msm_pcm_dt_match);
static struct platform_driver msm_pcm_driver = {
.driver = {
.name = "msm-pcm-dsp",
.owner = THIS_MODULE,
+ .of_match_table = msm_pcm_dt_match,
},
.probe = msm_pcm_probe,
.remove = __devexit_p(msm_pcm_remove),
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index b7aaf01..67ee8e4 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -18,6 +18,7 @@
#include <linux/platform_device.h>
#include <linux/bitops.h>
#include <linux/mutex.h>
+#include <linux/of_device.h>
#include <sound/core.h>
#include <sound/soc.h>
#include <sound/soc-dapm.h>
@@ -2153,6 +2154,9 @@
static __devinit int msm_routing_pcm_probe(struct platform_device *pdev)
{
+ if (pdev->dev.of_node)
+ dev_set_name(&pdev->dev, "%s", "msm-pcm-routing");
+
dev_dbg(&pdev->dev, "dev name %s\n", dev_name(&pdev->dev));
return snd_soc_register_platform(&pdev->dev,
&msm_soc_routing_platform);
@@ -2164,10 +2168,17 @@
return 0;
}
+static const struct of_device_id msm_pcm_routing_dt_match[] = {
+ {.compatible = "qcom,msm-pcm-routing"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, msm_pcm_routing_dt_match);
+
static struct platform_driver msm_routing_pcm_driver = {
.driver = {
.name = "msm-pcm-routing",
.owner = THIS_MODULE,
+ .of_match_table = msm_pcm_routing_dt_match,
},
.probe = msm_routing_pcm_probe,
.remove = __devexit_p(msm_routing_pcm_remove),