Merge "msm_serial_hs_lite: Read back the NCF register after writing" into msm-3.0
diff --git a/Documentation/devicetree/bindings/arm/msm/rpm-regulator-smd.txt b/Documentation/devicetree/bindings/arm/msm/rpm-regulator-smd.txt
new file mode 100644
index 0000000..786635f
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/rpm-regulator-smd.txt
@@ -0,0 +1,153 @@
+Qualcomm RPM Regulators
+
+rpm-regulator-smd is a regulator driver which supports regulators inside of
+PMICs which are controlled by the RPM processor. Communication with the RPM
+processor takes place over SMD.
+
+Required structure:
+- RPM regulators must be described in two levels of devices nodes. The first
+ level describes the interface with the RPM. The second level describes
+ properties of one regulator framework interface (of potentially many) to
+ the regulator.
+
+[First Level Nodes]
+
+Required properties:
+- compatible: Must be "qcom,rpm-regulator-smd-resource"
+- qcom,resource-name: Resource name string for this regulator to be used in RPM
+ transactions. Length is 4 characters max.
+- qcom,resource-id: Resource instance ID for this regulator to be used in RPM
+ transactions.
+- qcom,regulator-type: Type of this regulator. Supported values are:
+ 0 = LDO
+ 1 = SMPS
+ 2 = VS
+ 3 = NCP
+
+Optional properties:
+- qcom,allow-atomic: Flag specifying if atomic access is allowed for this
+ regulator. Supported values are:
+ 0 or not present = mutex locks used
+ 1 = spinlocks used
+- qcom,enable-time: Time in us to delay after enabling the regulator
+- qcom,hpm-min-load: Load current in uA which corresponds to the minimum load
+ which requires the regulator to be in high power mode.
+
+[Second Level Nodes]
+
+Required properties:
+- compatible: Must be "qcom,rpm-regulator-smd"
+- regulator-name: A string used as a descriptive name for regulator outputs
+- qcom,set: Specifies which sets that requests made with this
+ regulator interface should be sent to. Regulator
+ requests sent in the active set take effect immediately.
+ Requests sent in the sleep set take effect when the Apps
+ processor transitions into RPM assisted power collapse.
+ Supported values are:
+ 1 = Active set only
+ 2 = Sleep set only
+ 3 = Both active and sleep sets
+
+
+
+Optional properties:
+- parent-supply: phandle to the parent supply/regulator node
+- qcom,system-load: Load in uA present on regulator that is not
+ captured by any consumer request
+The following properties specify initial values for parameters to be sent to the
+RPM in regulator requests.
+- qcom,init-enable: 0 = regulator disabled
+ 1 = regulator enabled
+- qcom,init-voltage: Voltage in uV
+- qcom,init-current: Current in mA
+- qcom,init-ldo-mode: Operating mode to be used with LDO regulators
+ Supported values are:
+ 0 = mode determined by current requests
+ 1 = force HPM (NPM)
+- qcom,init-smps-mode: Operating mode to be used with SMPS regulators
+ Supported values are:
+ 0 = auto; hardware determines mode
+ 1 = mode determined by current requests
+ 2 = force HPM (PWM)
+- qcom,init-pin-ctrl-enable: Bit mask specifying which hardware pins should be
+ used to enable the regulator, if any; supported
+ bits are:
+ 0 = ignore all hardware enable signals
+ BIT(0) = follow HW0_EN signal
+ BIT(1) = follow HW1_EN signal
+ BIT(2) = follow HW2_EN signal
+ BIT(3) = follow HW3_EN signal
+- qcom,init-pin-ctrl-mode: Bit mask specifying which hardware pins should be
+ used to force the regulator into high power
+ mode, if any. Supported bits are:
+ 0 = ignore all hardware enable signals
+ BIT(0) = follow HW0_EN signal
+ BIT(1) = follow HW1_EN signal
+ BIT(2) = follow HW2_EN signal
+ BIT(3) = follow HW3_EN signal
+ BIT(4) = follow PMIC awake state
+- qcom,init-frequency: Switching frequency in MHz for SMPS regulators.
+ Supported values are:
+ 0 = Don't care about frequency used
+ 1 = 19.20
+ 2 = 9.60
+ 3 = 6.40
+ 4 = 4.80
+ 5 = 3.84
+ 6 = 3.20
+ 7 = 2.74
+ 8 = 2.40
+ 9 = 2.13
+ 10 = 1.92
+ 11 = 1.75
+ 12 = 1.60
+ 13 = 1.48
+ 14 = 1.37
+ 15 = 1.28
+ 16 = 1.20
+- qcom,init-head-room: Voltage head room in uV required for the
+ regulator
+- qcom,init-quiet-mode: Specify that quiet mode is needed for an SMPS
+ regulator in order to have lower output noise.
+ Supported values are:
+ 0 = No quiet mode
+ 1 = Quiet mode
+ 2 = Super quiet mode
+- qcom,init-freq-reason: Consumer requiring specified frequency for an
+ SMPS regulator. Supported values are:
+ 0 = None
+ 1 = Bluetooth
+ 2 = GPS
+ 4 = WLAN
+ 8 = WAN
+
+All properties specified within the core regulator framework can also be used in
+second level nodes. These bindings can be found in:
+Documentation/devicetree/bindings/regulator/regulator.txt.
+
+Example:
+
+rpm-regulator-smpb1 {
+ qcom,resource-name = "smpb";
+ qcom,resource-id = <1>;
+ qcom,regulator-type = <1>;
+ qcom,hpm-min-load = <100000>;
+ compatible = "qcom,rpm-regulator-smd-resource";
+ status = "disabled";
+
+ pm8841_s1: regulator-s1 {
+ regulator-name = "8841_s1";
+ qcom,set = <3>;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1150000>;
+ qcom,init-voltage = <1150000>;
+ compatible = "qcom,rpm-regulator-smd";
+ };
+ pm8841_s1_ao: regulator-s1-ao {
+ regulator-name = "8841_s1_ao";
+ qcom,set = <1>;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1150000>;
+ compatible = "qcom,rpm-regulator-smd";
+ };
+};
diff --git a/Documentation/devicetree/bindings/arm/msm/rpm-smd.txt b/Documentation/devicetree/bindings/arm/msm/rpm-smd.txt
new file mode 100644
index 0000000..8ebd3ba
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/rpm-smd.txt
@@ -0,0 +1,30 @@
+Resource Power Manager(RPM)
+
+RPM is a dedicated hardware engine for managing shared SoC resources,
+which includes buses, clocks, power rails, etc. The goal of RPM is
+to achieve the maximum power savings while satisfying the SoC's
+operational and performance requirements. RPM accepts resource
+requests from multiple RPM masters. It arbitrates and aggregates the
+requests, and configures the shared resources. The RPM masters are
+the application processor, the modem processor, as well as hardware
+accelerators. The RPM driver communicates with the hardware engine using
+SMD.
+
+The devicetree representation of the SPM block should be:
+
+Required properties
+
+- compatible: "qcom,rpm-smd"
+- rpm-channel-name: The string corresponding to the channel name of the
+ peripheral subsystem
+- rpm-channel-type: The interal SMD edge for this subsystem found in
+ <mach/msm_smd.h>
+
+Example:
+
+ qcom,rpm-smd {
+ compatible = "qcom,rpm-smd"
+ qcom,rpm-channel-name = "rpm_requests";
+ qcom,rpm-channel-type = 15; /* SMD_APPS_RPM */
+ }
+}
diff --git a/Documentation/devicetree/bindings/media/video/msm-vidc.txt b/Documentation/devicetree/bindings/media/video/msm-vidc.txt
new file mode 100644
index 0000000..11af7a9
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-vidc.txt
@@ -0,0 +1,15 @@
+* Qualcomm MSM VIDC
+
+Required properties:
+- compatible : one of:
+ - "qcom,msm-vidc"
+- reg : offset and length of the register set for the device.
+- interrupts : should contain the vidc interrupt.
+
+Example:
+
+ qcom,vidc@fdc00000 {
+ compatible = "qcom,msm-vidc";
+ reg = <0xfdc00000 0xff000>;
+ interrupts = <0 44 0>;
+ };
diff --git a/Documentation/devicetree/bindings/pil/pil-q6v5-lpass.txt b/Documentation/devicetree/bindings/pil/pil-q6v5-lpass.txt
index 002431a..308f992 100644
--- a/Documentation/devicetree/bindings/pil/pil-q6v5-lpass.txt
+++ b/Documentation/devicetree/bindings/pil/pil-q6v5-lpass.txt
@@ -7,17 +7,15 @@
Required properties:
- compatible: Must be "qcom,pil-q6v5-lpass"
-- reg: Three pairs of physical base addresses and region sizes
+- reg: Two pairs of physical base addresses and region sizes
of memory mapped registers. The first region corresponds
- to QDSP6SS_PUB, the second corresponds to LPASS_CC, and
- the third to LPASS_HALTREQ.
+ to QDSP6SS_PUB, and the second to LPASS_HALTREQ.
- qcom,firmware-name: Base name of the firmware image. Ex. "lpass"
Example:
qcom,lpass@fe200000 {
compatible = "qcom,pil-q6v5-lpass";
reg = <0xfe200000 0x00100>,
- <0xfe000000 0x40000>,
<0xfd485100 0x00010>;
qcom,firmware-name = "lpass";
diff --git a/arch/arm/boot/dts/msm-pm8x41-rpm-regulator.dtsi b/arch/arm/boot/dts/msm-pm8x41-rpm-regulator.dtsi
new file mode 100644
index 0000000..019112a
--- /dev/null
+++ b/arch/arm/boot/dts/msm-pm8x41-rpm-regulator.dtsi
@@ -0,0 +1,587 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/ {
+ qcom,rpm-smd {
+ rpm-regulator-smpb1 {
+ qcom,resource-name = "smpb";
+ qcom,resource-id = <1>;
+ qcom,regulator-type = <1>;
+ qcom,hpm-min-load = <100000>;
+ compatible = "qcom,rpm-regulator-smd-resource";
+ status = "disabled";
+
+ regulator-s1 {
+ regulator-name = "8841_s1";
+ qcom,set = <3>;
+ status = "disabled";
+ compatible = "qcom,rpm-regulator-smd";
+ };
+ };
+
+ rpm-regulator-smpb2 {
+ qcom,resource-name = "smpb";
+ qcom,resource-id = <2>;
+ qcom,regulator-type = <1>;
+ qcom,hpm-min-load = <100000>;
+ compatible = "qcom,rpm-regulator-smd-resource";
+ status = "disabled";
+
+ regulator-s2 {
+ regulator-name = "8841_s2";
+ qcom,set = <3>;
+ status = "disabled";
+ compatible = "qcom,rpm-regulator-smd";
+ };
+ };
+
+ rpm-regulator-smpb3 {
+ qcom,resource-name = "smpb";
+ qcom,resource-id = <3>;
+ qcom,regulator-type = <1>;
+ qcom,hpm-min-load = <100000>;
+ compatible = "qcom,rpm-regulator-smd-resource";
+ status = "disabled";
+
+ regulator-s3 {
+ regulator-name = "8841_s3";
+ qcom,set = <3>;
+ status = "disabled";
+ compatible = "qcom,rpm-regulator-smd";
+ };
+ };
+
+ rpm-regulator-smpb4 {
+ qcom,resource-name = "smpb";
+ qcom,resource-id = <4>;
+ qcom,regulator-type = <1>;
+ qcom,hpm-min-load = <100000>;
+ compatible = "qcom,rpm-regulator-smd-resource";
+ status = "disabled";
+
+ regulator-s4 {
+ regulator-name = "8841_s4";
+ qcom,set = <3>;
+ status = "disabled";
+ compatible = "qcom,rpm-regulator-smd";
+ };
+ };
+
+ rpm-regulator-smpa1 {
+ qcom,resource-name = "smpa";
+ qcom,resource-id = <1>;
+ qcom,regulator-type = <1>;
+ qcom,hpm-min-load = <100000>;
+ compatible = "qcom,rpm-regulator-smd-resource";
+ status = "disabled";
+
+ regulator-s1 {
+ regulator-name = "8941_s1";
+ qcom,set = <3>;
+ status = "disabled";
+ compatible = "qcom,rpm-regulator-smd";
+ };
+ };
+
+ rpm-regulator-smpa2 {
+ qcom,resource-name = "smpa";
+ qcom,resource-id = <2>;
+ qcom,regulator-type = <1>;
+ qcom,hpm-min-load = <100000>;
+ compatible = "qcom,rpm-regulator-smd-resource";
+ status = "disabled";
+
+ regulator-s2 {
+ regulator-name = "8941_s2";
+ qcom,set = <3>;
+ status = "disabled";
+ compatible = "qcom,rpm-regulator-smd";
+ };
+ };
+
+ rpm-regulator-smpa3 {
+ qcom,resource-name = "smpa";
+ qcom,resource-id = <3>;
+ qcom,regulator-type = <1>;
+ qcom,hpm-min-load = <100000>;
+ compatible = "qcom,rpm-regulator-smd-resource";
+ status = "disabled";
+
+ regulator-s3 {
+ regulator-name = "8941_s3";
+ qcom,set = <3>;
+ status = "disabled";
+ compatible = "qcom,rpm-regulator-smd";
+ };
+ };
+
+ rpm-regulator-ldoa1 {
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <1>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ compatible = "qcom,rpm-regulator-smd-resource";
+ status = "disabled";
+
+ regulator-l1 {
+ regulator-name = "8941_l1";
+ qcom,set = <3>;
+ status = "disabled";
+ compatible = "qcom,rpm-regulator-smd";
+ };
+ };
+
+ rpm-regulator-ldoa2 {
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <2>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ compatible = "qcom,rpm-regulator-smd-resource";
+ status = "disabled";
+
+ regulator-l2 {
+ regulator-name = "8941_l2";
+ qcom,set = <3>;
+ status = "disabled";
+ compatible = "qcom,rpm-regulator-smd";
+ };
+ };
+
+ rpm-regulator-ldoa3 {
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <3>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ compatible = "qcom,rpm-regulator-smd-resource";
+ status = "disabled";
+
+ regulator-l3 {
+ regulator-name = "8941_l3";
+ qcom,set = <3>;
+ status = "disabled";
+ compatible = "qcom,rpm-regulator-smd";
+ };
+ };
+
+ rpm-regulator-ldoa4 {
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <4>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ compatible = "qcom,rpm-regulator-smd-resource";
+ status = "disabled";
+
+ regulator-l4 {
+ regulator-name = "8941_l4";
+ qcom,set = <3>;
+ status = "disabled";
+ compatible = "qcom,rpm-regulator-smd";
+ };
+ };
+
+ rpm-regulator-ldoa5 {
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <5>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ compatible = "qcom,rpm-regulator-smd-resource";
+ status = "disabled";
+
+ regulator-l5 {
+ regulator-name = "8941_l5";
+ qcom,set = <3>;
+ status = "disabled";
+ compatible = "qcom,rpm-regulator-smd";
+ };
+ };
+
+ rpm-regulator-ldoa6 {
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <6>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ compatible = "qcom,rpm-regulator-smd-resource";
+ status = "disabled";
+
+ regulator-l6 {
+ regulator-name = "8941_l6";
+ qcom,set = <3>;
+ status = "disabled";
+ compatible = "qcom,rpm-regulator-smd";
+ };
+ };
+
+ rpm-regulator-ldoa7 {
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <7>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ compatible = "qcom,rpm-regulator-smd-resource";
+ status = "disabled";
+
+ regulator-l7 {
+ regulator-name = "8941_l7";
+ qcom,set = <3>;
+ status = "disabled";
+ compatible = "qcom,rpm-regulator-smd";
+ };
+ };
+
+ rpm-regulator-ldoa8 {
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <8>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ compatible = "qcom,rpm-regulator-smd-resource";
+ status = "disabled";
+
+ regulator-l8 {
+ regulator-name = "8941_l8";
+ qcom,set = <3>;
+ status = "disabled";
+ compatible = "qcom,rpm-regulator-smd";
+ };
+ };
+
+ rpm-regulator-ldoa9 {
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <9>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ compatible = "qcom,rpm-regulator-smd-resource";
+ status = "disabled";
+
+ regulator-l9 {
+ regulator-name = "8941_l9";
+ qcom,set = <3>;
+ status = "disabled";
+ compatible = "qcom,rpm-regulator-smd";
+ };
+ };
+
+ rpm-regulator-ldoa10 {
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <10>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ compatible = "qcom,rpm-regulator-smd-resource";
+ status = "disabled";
+
+ regulator-l10 {
+ regulator-name = "8941_l10";
+ qcom,set = <3>;
+ status = "disabled";
+ compatible = "qcom,rpm-regulator-smd";
+ };
+ };
+
+ rpm-regulator-ldoa11 {
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <11>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ compatible = "qcom,rpm-regulator-smd-resource";
+ status = "disabled";
+
+ regulator-l11 {
+ regulator-name = "8941_l11";
+ qcom,set = <3>;
+ status = "disabled";
+ compatible = "qcom,rpm-regulator-smd";
+ };
+ };
+
+ rpm-regulator-ldoa12 {
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <12>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ compatible = "qcom,rpm-regulator-smd-resource";
+ status = "disabled";
+
+ regulator-l12 {
+ regulator-name = "8941_l12";
+ qcom,set = <3>;
+ status = "disabled";
+ compatible = "qcom,rpm-regulator-smd";
+ };
+ };
+
+ rpm-regulator-ldoa13 {
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <13>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ compatible = "qcom,rpm-regulator-smd-resource";
+ status = "disabled";
+
+ regulator-l13 {
+ regulator-name = "8941_l13";
+ qcom,set = <3>;
+ status = "disabled";
+ compatible = "qcom,rpm-regulator-smd";
+ };
+ };
+
+ rpm-regulator-ldoa14 {
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <14>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ compatible = "qcom,rpm-regulator-smd-resource";
+ status = "disabled";
+
+ regulator-l14 {
+ regulator-name = "8941_l14";
+ qcom,set = <3>;
+ status = "disabled";
+ compatible = "qcom,rpm-regulator-smd";
+ };
+ };
+
+ rpm-regulator-ldoa15 {
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <15>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ compatible = "qcom,rpm-regulator-smd-resource";
+ status = "disabled";
+
+ regulator-l15 {
+ regulator-name = "8941_l15";
+ qcom,set = <3>;
+ status = "disabled";
+ compatible = "qcom,rpm-regulator-smd";
+ };
+ };
+
+ rpm-regulator-ldoa16 {
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <16>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ compatible = "qcom,rpm-regulator-smd-resource";
+ status = "disabled";
+
+ regulator-l16 {
+ regulator-name = "8941_l16";
+ qcom,set = <3>;
+ status = "disabled";
+ compatible = "qcom,rpm-regulator-smd";
+ };
+ };
+
+ rpm-regulator-ldoa17 {
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <17>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ compatible = "qcom,rpm-regulator-smd-resource";
+ status = "disabled";
+
+ regulator-l17 {
+ regulator-name = "8941_l17";
+ qcom,set = <3>;
+ status = "disabled";
+ compatible = "qcom,rpm-regulator-smd";
+ };
+ };
+
+ rpm-regulator-ldoa18 {
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <18>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ compatible = "qcom,rpm-regulator-smd-resource";
+ status = "disabled";
+
+ regulator-l18 {
+ regulator-name = "8941_l18";
+ qcom,set = <3>;
+ status = "disabled";
+ compatible = "qcom,rpm-regulator-smd";
+ };
+ };
+
+ rpm-regulator-ldoa19 {
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <19>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ compatible = "qcom,rpm-regulator-smd-resource";
+ status = "disabled";
+
+ regulator-l19 {
+ regulator-name = "8941_l19";
+ qcom,set = <3>;
+ status = "disabled";
+ compatible = "qcom,rpm-regulator-smd";
+ };
+ };
+
+ rpm-regulator-ldoa20 {
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <20>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ compatible = "qcom,rpm-regulator-smd-resource";
+ status = "disabled";
+
+ regulator-l20 {
+ regulator-name = "8941_l20";
+ qcom,set = <3>;
+ status = "disabled";
+ compatible = "qcom,rpm-regulator-smd";
+ };
+ };
+
+ rpm-regulator-ldoa21 {
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <21>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ compatible = "qcom,rpm-regulator-smd-resource";
+ status = "disabled";
+
+ regulator-l21 {
+ regulator-name = "8941_l21";
+ qcom,set = <3>;
+ status = "disabled";
+ compatible = "qcom,rpm-regulator-smd";
+ };
+ };
+
+ rpm-regulator-ldoa22 {
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <22>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ compatible = "qcom,rpm-regulator-smd-resource";
+ status = "disabled";
+
+ regulator-l22 {
+ regulator-name = "8941_l22";
+ qcom,set = <3>;
+ status = "disabled";
+ compatible = "qcom,rpm-regulator-smd";
+ };
+ };
+
+ rpm-regulator-ldoa23 {
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <23>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ compatible = "qcom,rpm-regulator-smd-resource";
+ status = "disabled";
+
+ regulator-l23 {
+ regulator-name = "8941_l23";
+ qcom,set = <3>;
+ status = "disabled";
+ compatible = "qcom,rpm-regulator-smd";
+ };
+ };
+
+ rpm-regulator-ldoa24 {
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <24>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ compatible = "qcom,rpm-regulator-smd-resource";
+ status = "disabled";
+
+ regulator-l24 {
+ regulator-name = "8941_l24";
+ qcom,set = <3>;
+ status = "disabled";
+ compatible = "qcom,rpm-regulator-smd";
+ };
+ };
+
+ /* TODO: find out correct resource names for LVS vs MVS */
+ rpm-regulator-vsa1 {
+ qcom,resource-name = "vsa";
+ qcom,resource-id = <1>;
+ qcom,regulator-type = <2>;
+ compatible = "qcom,rpm-regulator-smd-resource";
+ status = "disabled";
+
+ regulator-lvs1 {
+ regulator-name = "8941_lvs1";
+ qcom,set = <3>;
+ status = "disabled";
+ compatible = "qcom,rpm-regulator-smd";
+ };
+ };
+
+ rpm-regulator-vsa2 {
+ qcom,resource-name = "vsa";
+ qcom,resource-id = <2>;
+ qcom,regulator-type = <2>;
+ compatible = "qcom,rpm-regulator-smd-resource";
+ status = "disabled";
+
+ regulator-lvs2 {
+ regulator-name = "8941_lvs2";
+ qcom,set = <3>;
+ status = "disabled";
+ compatible = "qcom,rpm-regulator-smd";
+ };
+ };
+
+ rpm-regulator-vsa3 {
+ qcom,resource-name = "vsa";
+ qcom,resource-id = <3>;
+ qcom,regulator-type = <2>;
+ compatible = "qcom,rpm-regulator-smd-resource";
+ status = "disabled";
+
+ regulator-lvs3 {
+ regulator-name = "8941_lvs3";
+ qcom,set = <3>;
+ status = "disabled";
+ compatible = "qcom,rpm-regulator-smd";
+ };
+ };
+
+ rpm-regulator-vsa4 {
+ qcom,resource-name = "vsa";
+ qcom,resource-id = <4>;
+ qcom,regulator-type = <2>;
+ compatible = "qcom,rpm-regulator-smd-resource";
+ status = "disabled";
+
+ regulator-mvs1 {
+ regulator-name = "8941_mvs1";
+ qcom,set = <3>;
+ status = "disabled";
+ compatible = "qcom,rpm-regulator-smd";
+ };
+ };
+
+ rpm-regulator-vsa5 {
+ qcom,resource-name = "vsa";
+ qcom,resource-id = <5>;
+ qcom,regulator-type = <2>;
+ compatible = "qcom,rpm-regulator-smd-resource";
+ status = "disabled";
+
+ regulator-mvs2 {
+ regulator-name = "8941_mvs2";
+ qcom,set = <3>;
+ status = "disabled";
+ compatible = "qcom,rpm-regulator-smd";
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/msmcopper-regulator.dtsi b/arch/arm/boot/dts/msmcopper-regulator.dtsi
index 393d48b..bb26e00 100644
--- a/arch/arm/boot/dts/msmcopper-regulator.dtsi
+++ b/arch/arm/boot/dts/msmcopper-regulator.dtsi
@@ -313,6 +313,7 @@
regulator-max-microvolt = <1150000>;
qcom,enable-time = <500>;
qcom,pull-down-enable = <1>;
+ regulator-always-on;
status = "okay";
};
@@ -358,4 +359,36 @@
};
};
};
+
+ krait0_vreg: regulator@f9088000 {
+ compatible = "qcom,krait-regulator";
+ regulator-name = "krait0";
+ reg = <0xf9088000 0x1000>;
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1100000>;
+ };
+
+ krait1_vreg: regulator@f9098000 {
+ compatible = "qcom,krait-regulator";
+ regulator-name = "krait1";
+ reg = <0xf9098000 0x1000>;
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1100000>;
+ };
+
+ krait2_vreg: regulator@f90a8000 {
+ compatible = "qcom,krait-regulator";
+ regulator-name = "krait2";
+ reg = <0xf90a8000 0x1000>;
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1100000>;
+ };
+
+ krait3_vreg: regulator@f90b8000 {
+ compatible = "qcom,krait-regulator";
+ regulator-name = "krait3";
+ reg = <0xf90b8000 0x1000>;
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1100000>;
+ };
};
diff --git a/arch/arm/boot/dts/msmcopper-rumi.dts b/arch/arm/boot/dts/msmcopper-rumi.dts
index 8c00535..d6e23ad 100644
--- a/arch/arm/boot/dts/msmcopper-rumi.dts
+++ b/arch/arm/boot/dts/msmcopper-rumi.dts
@@ -30,11 +30,19 @@
status = "disable";
};
- qcom,sdcc@f980b000 {
+ qcom,sdcc@f9824000 {
status = "disable";
};
- qcom,sdcc@f984b000 {
+ qcom,sdcc@f9864000 {
+ status = "disable";
+ };
+
+ qcom,sdcc@f98a4000 {
+ status = "disable";
+ };
+
+ qcom,sdcc@f98e4000 {
status = "disable";
};
diff --git a/arch/arm/boot/dts/msmcopper-sim.dts b/arch/arm/boot/dts/msmcopper-sim.dts
index ab6b8ba..ae3f2dd 100644
--- a/arch/arm/boot/dts/msmcopper-sim.dts
+++ b/arch/arm/boot/dts/msmcopper-sim.dts
@@ -17,4 +17,22 @@
/ {
model = "Qualcomm MSM Copper Simulator";
compatible = "qcom,msmcopper-sim", "qcom,msmcopper";
+
+ qcom,sdcc@f9824000 {
+ qcom,sdcc-disable_cmd23;
+ };
+
+ qcom,sdcc@f98a4000 {
+ status = "disable";
+ };
+
+ qcom,sdcc@f9864000 {
+ qcom,sdcc-clk-rates = <400000 25000000 50000000 100000000 200000000>;
+ qcom,sdcc-sup-voltages = <2950 2950>;
+ qcom,sdcc-disable_cmd23;
+ };
+
+ qcom,sdcc@f98e4000 {
+ status = "disable";
+ };
};
diff --git a/arch/arm/boot/dts/msmcopper.dtsi b/arch/arm/boot/dts/msmcopper.dtsi
index db44d13..8e74aac 100644
--- a/arch/arm/boot/dts/msmcopper.dtsi
+++ b/arch/arm/boot/dts/msmcopper.dtsi
@@ -12,6 +12,7 @@
/include/ "skeleton.dtsi"
/include/ "msmcopper_pm.dtsi"
+/include/ "msm-pm8x41-rpm-regulator.dtsi"
/include/ "msm-pm8841.dtsi"
/include/ "msm-pm8941.dtsi"
/include/ "msmcopper-regulator.dtsi"
@@ -44,6 +45,12 @@
clock-frequency = <19200000>;
};
+ qcom,vidc@fdc00000 {
+ compatible = "qcom,msm-vidc";
+ reg = <0xfdc00000 0xff000>;
+ interrupts = <0 44 0>;
+ };
+
serial@f991f000 {
compatible = "qcom,msm-lsuart-v14";
reg = <0xf991f000 0x1000>;
@@ -69,30 +76,50 @@
qcom,hsusb-otg-otg-control = <1>;
};
- qcom,sdcc@f980b000 {
+ qcom,sdcc@f9824000 {
cell-index = <1>;
compatible = "qcom,msm-sdcc";
- reg = <0xf980b000 0x1000>;
+ reg = <0xf9824000 0x1000>;
interrupts = <0 123 0>;
- qcom,sdcc-clk-rates = <400000 25000000 50000000 96000000 192000000>;
- qcom,sdcc-sup-voltages = <3300 3300>;
+ qcom,sdcc-clk-rates = <400000 25000000 50000000 100000000 200000000>;
+ qcom,sdcc-sup-voltages = <2950 2950>;
qcom,sdcc-bus-width = <8>;
qcom,sdcc-hs200;
qcom,sdcc-nonremovable;
- qcom,sdcc-disable_cmd23;
};
- qcom,sdcc@f984b000 {
+ qcom,sdcc@f98a4000 {
+ cell-index = <2>;
+ compatible = "qcom,msm-sdcc";
+ reg = <0xf98a4000 0x1000>;
+ interrupts = <0 125 0>;
+
+ qcom,sdcc-clk-rates = <400000 25000000 50000000 100000000 200000000>;
+ qcom,sdcc-sup-voltages = <2950 2950>;
+ qcom,sdcc-bus-width = <4>;
+ };
+
+ qcom,sdcc@f9864000 {
cell-index = <3>;
compatible = "qcom,msm-sdcc";
- reg = <0xf984b000 0x1000>;
+ reg = <0xf9864000 0x1000>;
interrupts = <0 127 0>;
- qcom,sdcc-clk-rates = <400000 25000000 50000000>;
- qcom,sdcc-sup-voltages = <3300 3300>;
+ qcom,sdcc-clk-rates = <400000 25000000 50000000 100000000>;
+ qcom,sdcc-sup-voltages = <1800 1800>;
qcom,sdcc-bus-width = <4>;
- qcom,sdcc-disable_cmd23;
+ };
+
+ qcom,sdcc@f98e4000 {
+ cell-index = <4>;
+ compatible = "qcom,msm-sdcc";
+ reg = <0xf98e4000 0x1000>;
+ interrupts = <0 129 0>;
+
+ qcom,sdcc-clk-rates = <400000 25000000 50000000 100000000>;
+ qcom,sdcc-sup-voltages = <1800 1800>;
+ qcom,sdcc-bus-width = <4>;
};
qcom,sps@f9980000 {
@@ -108,7 +135,7 @@
compatible = "qcom,spi-qup-v2";
reg = <0xf9924000 0x1000>;
interrupts = <0 96 0>;
- spi-max-frequency = <24000000>;
+ spi-max-frequency = <25000000>;
};
slim@fe12f000 {
@@ -250,10 +277,9 @@
qcom,lpass@fe200000 {
compatible = "qcom,pil-q6v5-lpass";
reg = <0xfe200000 0x00100>,
- <0xfe000000 0x40000>,
<0xfd485100 0x00010>;
- qcom,firmware-name = "lpass";
+ qcom,firmware-name = "adsp";
};
qcom,pronto@fb21b000 {
@@ -265,4 +291,14 @@
qcom,firmware-name = "wcnss";
};
+
+ qcom,ocmem@fdd00000 {
+ compatible = "qcom,msm_ocmem";
+ };
+
+ qcom,rpm-smd {
+ compatible = "qcom,rpm-smd";
+ rpm-channel-name = "rpm_requests";
+ rpm-channel-type = <15>; /* SMD_APPS_RPM */
+ };
};
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index 875b479..99747ba 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -212,10 +212,13 @@
{
unsigned int i;
void __iomem *base = gic_data_dist_base(gic);
+#ifdef CONFIG_ARCH_MSM8625
+ unsigned long flags;
+#endif
for (i = 0; i * 32 < gic->max_irq; i++) {
#ifdef CONFIG_ARCH_MSM8625
- raw_spin_lock(&irq_controller_lock);
+ raw_spin_lock_irqsave(&irq_controller_lock, flags);
#endif
gic->enabled_irqs[i]
= readl_relaxed(base + GIC_DIST_ENABLE_SET + i * 4);
@@ -225,7 +228,7 @@
writel_relaxed(gic->wakeup_irqs[i],
base + GIC_DIST_ENABLE_SET + i * 4);
#ifdef CONFIG_ARCH_MSM8625
- raw_spin_unlock(&irq_controller_lock);
+ raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
#endif
}
mb();
@@ -248,17 +251,28 @@
u32 enabled;
unsigned long pending[32];
void __iomem *base = gic_data_dist_base(gic);
+#ifdef CONFIG_ARCH_MSM8625
+ unsigned long flags;
+#endif
if (!msm_show_resume_irq_mask)
return;
+#ifdef CONFIG_ARCH_MSM8625
+ raw_spin_lock_irqsave(&irq_controller_lock, flags);
+#else
raw_spin_lock(&irq_controller_lock);
+#endif
for (i = 0; i * 32 < gic->max_irq; i++) {
enabled = readl_relaxed(base + GIC_DIST_ENABLE_CLEAR + i * 4);
pending[i] = readl_relaxed(base + GIC_DIST_PENDING_SET + i * 4);
pending[i] &= enabled;
}
- raw_spin_unlock(&irq_controller_lock);
+#ifdef CONFIG_ARCH_MSM8625
+ raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
+#else
+ raw_spin_lock(&irq_controller_lock);
+#endif
for (i = find_first_bit(pending, gic->max_irq);
i < gic->max_irq;
@@ -272,11 +286,13 @@
{
unsigned int i;
void __iomem *base = gic_data_dist_base(gic);
-
+#ifdef CONFIG_ARCH_MSM8625
+ unsigned long flags;
+#endif
gic_show_resume_irq(gic);
for (i = 0; i * 32 < gic->max_irq; i++) {
#ifdef CONFIG_ARCH_MSM8625
- raw_spin_lock(&irq_controller_lock);
+ raw_spin_lock_irqsave(&irq_controller_lock, flags);
#endif
/* disable all of them */
writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4);
@@ -284,7 +300,7 @@
writel_relaxed(gic->enabled_irqs[i],
base + GIC_DIST_ENABLE_SET + i * 4);
#ifdef CONFIG_ARCH_MSM8625
- raw_spin_unlock(&irq_controller_lock);
+ raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
#endif
}
mb();
@@ -1128,8 +1144,9 @@
unsigned long value = 0;
struct gic_chip_data *gic = &gic_data[0];
void __iomem *base = gic_data_dist_base(gic);
+ unsigned long flags;
- raw_spin_lock(&irq_controller_lock);
+ raw_spin_lock_irqsave(&irq_controller_lock, flags);
/*
* PPI and SGI to be included.
* MSM8625_INT_A9_M2A_5 needs to be ignored, as A9_M2A_5
@@ -1146,13 +1163,14 @@
for (bit = 0; bit < 32; bit++) {
bit = find_next_bit(&value, 32, bit);
if ((bit + 32 * i) != MSM8625_INT_A9_M2A_5) {
- raw_spin_unlock(&irq_controller_lock);
+ raw_spin_unlock_irqrestore(
+ &irq_controller_lock, flags);
return 1;
}
}
}
}
- raw_spin_unlock(&irq_controller_lock);
+ raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
return 0;
}
@@ -1165,14 +1183,13 @@
gic_cpu_save(0);
gic_dist_save(0);
- /* Disable all the Interrupts, if we enter from idle pc */
- if (from_idle) {
- for (i = 0; (i * 32) < gic->max_irq; i++) {
- raw_spin_lock(&irq_controller_lock);
- writel_relaxed(0xffffffff, base
- + GIC_DIST_ENABLE_CLEAR + i * 4);
- raw_spin_unlock(&irq_controller_lock);
- }
+
+ /* Disable all the Interrupts, before we enter pc */
+ for (i = 0; (i * 32) < gic->max_irq; i++) {
+ raw_spin_lock(&irq_controller_lock);
+ writel_relaxed(0xffffffff, base
+ + GIC_DIST_ENABLE_CLEAR + i * 4);
+ raw_spin_unlock(&irq_controller_lock);
}
}
@@ -1193,8 +1210,9 @@
struct gic_chip_data *gic = &gic_data[0];
void __iomem *base = gic_data_dist_base(gic);
unsigned int value = 0;
+ unsigned long flags;
- raw_spin_lock(&irq_controller_lock);
+ raw_spin_lock_irqsave(&irq_controller_lock, flags);
value = __raw_readl(base + GIC_DIST_ACTIVE_BIT + 0x4);
value |= BIT(8);
@@ -1220,6 +1238,6 @@
value |= BIT(8);
__raw_writel(value, base + GIC_DIST_PENDING_SET + 0x4);
mb();
- raw_spin_unlock(&irq_controller_lock);
+ raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
}
#endif
diff --git a/arch/arm/configs/msm-copper_defconfig b/arch/arm/configs/msm-copper_defconfig
index 64451eb..de469da 100644
--- a/arch/arm/configs/msm-copper_defconfig
+++ b/arch/arm/configs/msm-copper_defconfig
@@ -44,6 +44,7 @@
# CONFIG_MSM_HW3D is not set
CONFIG_MSM_PIL_LPASS_QDSP6V5=y
CONFIG_MSM_DIRECT_SCLK_ACCESS=y
+CONFIG_MSM_OCMEM=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_SMP=y
@@ -125,6 +126,12 @@
CONFIG_REGULATOR=y
CONFIG_REGULATOR_STUB=y
CONFIG_REGULATOR_QPNP=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_VIDEO_DEV=y
+# CONFIG_RC_CORE is not set
+# CONFIG_MEDIA_TUNER_CUSTOMISE is not set
+# CONFIG_VIDEO_CAPTURE_DRIVERS is not set
+# CONFIG_RADIO_ADAPTERS is not set
CONFIG_ION=y
CONFIG_ION_MSM=y
CONFIG_FB=y
@@ -195,3 +202,4 @@
CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRC_CCITT=y
CONFIG_LIBCRC32C=y
+CONFIG_MSM_TZ_LOG=y
diff --git a/arch/arm/configs/msm7627a-perf_defconfig b/arch/arm/configs/msm7627a-perf_defconfig
index f195d68..9a2fa39 100644
--- a/arch/arm/configs/msm7627a-perf_defconfig
+++ b/arch/arm/configs/msm7627a-perf_defconfig
@@ -241,7 +241,7 @@
CONFIG_MSM_CAMERA_V4L2=y
CONFIG_OV5647=y
CONFIG_AD5046_ACT=y
-CONFIG_WEBCAM_OV7692_QRD=y
+CONFIG_OV7692=y
CONFIG_WEBCAM_OV9726=y
CONFIG_MT9E013=y
CONFIG_S5K4E1=y
diff --git a/arch/arm/configs/msm7627a_defconfig b/arch/arm/configs/msm7627a_defconfig
index bc350a7..869fb6d 100644
--- a/arch/arm/configs/msm7627a_defconfig
+++ b/arch/arm/configs/msm7627a_defconfig
@@ -242,7 +242,7 @@
CONFIG_MSM_CAMERA_V4L2=y
CONFIG_OV5647=y
CONFIG_AD5046_ACT=y
-CONFIG_WEBCAM_OV7692_QRD=y
+CONFIG_OV7692=y
CONFIG_WEBCAM_OV9726=y
CONFIG_MT9E013=y
CONFIG_S5K4E1=y
diff --git a/arch/arm/configs/msm8960-perf_defconfig b/arch/arm/configs/msm8960-perf_defconfig
index c39b301..4cbd1d1 100644
--- a/arch/arm/configs/msm8960-perf_defconfig
+++ b/arch/arm/configs/msm8960-perf_defconfig
@@ -61,7 +61,9 @@
# CONFIG_MSM_PROC_COMM is not set
CONFIG_MSM_SMD=y
CONFIG_MSM_SMD_PKG4=y
+CONFIG_MSM_PCIE=y
CONFIG_MSM_BAM_DMUX=y
+CONFIG_MSM_RMNET_SMUX=y
CONFIG_MSM_DSPS=y
CONFIG_MSM_IPC_ROUTER=y
CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
@@ -92,6 +94,8 @@
CONFIG_MSM_DCVS=y
CONFIG_MSM_HSIC_SYSMON=y
CONFIG_STRICT_MEMORY_RWX=y
+CONFIG_PCI=y
+CONFIG_PCI_MSI=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_SMP=y
@@ -227,7 +231,7 @@
CONFIG_BT_BNEP_PROTO_FILTER=y
CONFIG_BT_HIDP=y
CONFIG_BT_HCISMD=y
-CONFIG_CFG80211=y
+CONFIG_CFG80211=m
# CONFIG_CFG80211_WEXT is not set
CONFIG_RFKILL=y
CONFIG_GENLOCK=y
@@ -279,6 +283,9 @@
CONFIG_INPUT_UINPUT=y
CONFIG_INPUT_PMIC8XXX_PWRKEY=y
# CONFIG_LEGACY_PTYS is not set
+CONFIG_N_SMUX=y
+CONFIG_N_SMUX_LOOPBACK=y
+CONFIG_SMUX_CTL=y
CONFIG_SERIAL_MSM_HS=y
CONFIG_SERIAL_MSM_HSL=y
CONFIG_SERIAL_MSM_HSL_CONSOLE=y
@@ -311,6 +318,7 @@
CONFIG_MFD_PM8921_CORE=y
CONFIG_MFD_PM8821_CORE=y
CONFIG_MFD_PM8038_CORE=y
+CONFIG_MFD_PM8XXX_SPK=y
CONFIG_MFD_PM8XXX_BATT_ALARM=y
CONFIG_WCD9304_CODEC=y
CONFIG_WCD9310_CODEC=y
@@ -320,6 +328,7 @@
CONFIG_MEDIA_CONTROLLER=y
CONFIG_VIDEO_DEV=y
CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_USER_RC_INPUT=y
CONFIG_IR_GPIO_CIR=y
# CONFIG_MEDIA_TUNER_CUSTOMISE is not set
CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
@@ -329,6 +338,7 @@
CONFIG_MT9M114=y
CONFIG_IMX074_ACT=y
CONFIG_MSM_CAMERA_FLASH_SC628A=y
+CONFIG_MSM_CAMERA_FLASH_TPS61310=y
CONFIG_OV2720=y
CONFIG_MSM_CAMERA_SENSOR=y
CONFIG_MSM_ACTUATOR=y
diff --git a/arch/arm/configs/msm8960_defconfig b/arch/arm/configs/msm8960_defconfig
index dedef45..a901684 100644
--- a/arch/arm/configs/msm8960_defconfig
+++ b/arch/arm/configs/msm8960_defconfig
@@ -60,7 +60,9 @@
# CONFIG_MSM_PROC_COMM is not set
CONFIG_MSM_SMD=y
CONFIG_MSM_SMD_PKG4=y
+CONFIG_MSM_PCIE=y
CONFIG_MSM_BAM_DMUX=y
+CONFIG_MSM_RMNET_SMUX=y
CONFIG_MSM_DSPS=y
CONFIG_MSM_IPC_ROUTER=y
CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
@@ -99,6 +101,8 @@
CONFIG_MSM_CACHE_DUMP_ON_PANIC=y
CONFIG_MSM_HSIC_SYSMON=y
CONFIG_STRICT_MEMORY_RWX=y
+CONFIG_PCI=y
+CONFIG_PCI_MSI=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_SMP=y
@@ -234,7 +238,7 @@
CONFIG_BT_BNEP_PROTO_FILTER=y
CONFIG_BT_HIDP=y
CONFIG_BT_HCISMD=y
-CONFIG_CFG80211=y
+CONFIG_CFG80211=m
# CONFIG_CFG80211_WEXT is not set
CONFIG_RFKILL=y
CONFIG_GENLOCK=y
@@ -288,6 +292,9 @@
CONFIG_INPUT_UINPUT=y
CONFIG_INPUT_PMIC8XXX_PWRKEY=y
# CONFIG_LEGACY_PTYS is not set
+CONFIG_N_SMUX=y
+CONFIG_N_SMUX_LOOPBACK=y
+CONFIG_SMUX_CTL=y
CONFIG_SERIAL_MSM_HS=y
CONFIG_SERIAL_MSM_HSL=y
CONFIG_SERIAL_MSM_HSL_CONSOLE=y
@@ -320,6 +327,7 @@
CONFIG_MFD_PM8921_CORE=y
CONFIG_MFD_PM8821_CORE=y
CONFIG_MFD_PM8038_CORE=y
+CONFIG_MFD_PM8XXX_SPK=y
CONFIG_MFD_PM8XXX_BATT_ALARM=y
CONFIG_WCD9304_CODEC=y
CONFIG_WCD9310_CODEC=y
@@ -329,6 +337,7 @@
CONFIG_MEDIA_CONTROLLER=y
CONFIG_VIDEO_DEV=y
CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_USER_RC_INPUT=y
CONFIG_IR_GPIO_CIR=y
# CONFIG_MEDIA_TUNER_CUSTOMISE is not set
CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
@@ -338,15 +347,16 @@
CONFIG_MT9M114=y
CONFIG_IMX074_ACT=y
CONFIG_MSM_CAMERA_FLASH_SC628A=y
+CONFIG_MSM_CAMERA_FLASH_TPS61310=y
CONFIG_OV2720=y
CONFIG_MSM_CAMERA_SENSOR=y
CONFIG_MSM_ACTUATOR=y
-CONFIG_MSM_GEMINI=y
-CONFIG_S5K3L1YX=y
-CONFIG_IMX091=y
CONFIG_MSM_EEPROM=y
CONFIG_IMX074_EEPROM=y
CONFIG_IMX091_EEPROM=y
+CONFIG_MSM_GEMINI=y
+CONFIG_S5K3L1YX=y
+CONFIG_IMX091=y
CONFIG_RADIO_IRIS=y
CONFIG_RADIO_IRIS_TRANSPORT=m
CONFIG_ION=y
diff --git a/arch/arm/configs/msm9615_defconfig b/arch/arm/configs/msm9615_defconfig
index e6b7b79..ac9f465 100644
--- a/arch/arm/configs/msm9615_defconfig
+++ b/arch/arm/configs/msm9615_defconfig
@@ -240,6 +240,7 @@
CONFIG_MMC_EMBEDDED_SDIO=y
CONFIG_MMC_PARANOID_SD_INIT=y
CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_CLKGATE=y
# CONFIG_MMC_BLOCK_BOUNCE is not set
CONFIG_MMC_TEST=m
CONFIG_MMC_MSM=y
diff --git a/arch/arm/include/asm/mach/mmc.h b/arch/arm/include/asm/mach/mmc.h
index dabd390..745a3a4 100644
--- a/arch/arm/include/asm/mach/mmc.h
+++ b/arch/arm/include/asm/mach/mmc.h
@@ -8,6 +8,7 @@
#include <linux/mmc/card.h>
#include <linux/mmc/sdio_func.h>
#include <mach/gpio.h>
+#include <mach/msm_bus.h>
#define SDC_DAT1_DISABLE 0
#define SDC_DAT1_ENABLE 1
@@ -112,6 +113,12 @@
struct msm_mmc_pad_data *pad_data;
};
+struct msm_mmc_bus_voting_data {
+ struct msm_bus_scale_pdata *use_cases;
+ unsigned int *bw_vecs;
+ unsigned int bw_vecs_size;
+};
+
struct mmc_platform_data {
unsigned int ocr_mask; /* available voltages */
int built_in; /* built-in device flag */
@@ -153,6 +160,7 @@
bool disable_runtime_pm;
bool disable_cmd23;
u32 cpu_dma_latency;
+ struct msm_mmc_bus_voting_data *msm_bus_voting_data;
};
#endif
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index c294d59..eb28379 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -8,7 +8,6 @@
select MSM_VIC
select CPU_V6
select MSM_REMOTE_SPINLOCK_SWP
- select MSM_PM if PM
config ARCH_MSM7X25
bool "MSM7x25"
@@ -17,7 +16,6 @@
select CPU_V6
select MSM_REMOTE_SPINLOCK_SWP
select MULTI_IRQ_HANDLER
- select MSM_PM if PM
config ARCH_MSM7X27
bool "MSM7x27"
@@ -36,6 +34,7 @@
select QCACHE
select MSM_PM2 if PM
select MSM_RUN_QUEUE_STATS if MSM_SOC_REV_A
+ select DONT_MAP_HOLE_AFTER_MEMBANK0
config ARCH_MSM7X30
bool "MSM7x30"
@@ -258,8 +257,9 @@
select MSM_PIL
select MSM_SPM_V2
select MSM_L2_SPM
- select MSM_RPM
select MSM_PM8X60 if PM
+ select MSM_RPM_SMD
+ select REGULATOR
config ARCH_FSM9XXX
bool "FSM9XXX"
@@ -302,6 +302,18 @@
select MULTI_IRQ_HANDLER
select ARM_TICKET_LOCKS
select MSM_RUN_QUEUE_STATS
+
+config ARCH_MSM9625
+ bool "MSM9625"
+ select ARM_GIC
+ select GIC_SECURE
+ select ARCH_MSM_CORTEX_A5
+ select SMP
+ select MSM_SMP
+ select CPU_V7
+ select MULTI_IRQ_HANDLER
+ select MSM_V2_TLMM
+
endmenu
choice
@@ -376,6 +388,10 @@
bool "Resource Power Manager"
select MSM_MPM
+config MSM_RPM_SMD
+ depends on MSM_SMD
+ bool "Support for using SMD as the transport layer for communicatons with RPM"
+
config MSM_MPM
bool "Modem Power Manager"
@@ -869,6 +885,7 @@
default "0x80200000" if ARCH_MSM8930
default "0x20200000" if ARCH_MSMCOPPER
default "0x10000000" if ARCH_FSM9XXX
+ default "0x20200000" if ARCH_MSM9625
default "0x00200000" if !MSM_STACKED_MEMORY
default "0x00000000" if ARCH_QSD8X50 && MSM_SOC_REV_A
default "0x20000000" if ARCH_QSD8X50
@@ -928,14 +945,6 @@
help
Say Y here if high speed MSM UART v1.4 is present.
-config DEBUG_MSM8930_UART
- bool "Kernel low-level debugging messages via MSM 8930 UART"
- depends on ARCH_MSM8930 && DEBUG_LL
- select MSM_HAS_DEBUG_UART_HS
- help
- Say Y here if you want the debug print routines to direct
- their output to the serial port on MSM 8930 devices.
-
config MSM_DEBUG_UART_PHYS
hex
default 0xA9A00000 if (ARCH_MSM7X27 || ARCH_QSD8X50) && DEBUG_MSM_UART1
@@ -982,13 +991,29 @@
config DEBUG_MSM8960_UART
bool "Kernel low-level debugging messages via MSM 8960 UART"
- depends on ARCH_MSM8960
+ depends on ARCH_MSM8960 && DEBUG_LL
select DEBUG_MSM8930_UART
select MSM_HAS_DEBUG_UART_HS
help
Say Y here if you want the debug print routines to direct
their output to the serial port on MSM 8960 devices.
+ config DEBUG_MSM8930_UART
+ bool "Kernel low-level debugging messages via MSM 8930 UART"
+ depends on ARCH_MSM8930 && DEBUG_LL
+ select MSM_HAS_DEBUG_UART_HS
+ help
+ Say Y here if you want the debug print routines to direct
+ their output to the serial port on MSM 8930 devices.
+
+ config DEBUG_APQ8064_UART
+ bool "Kernel low-level debugging messages via APQ 8064 UART"
+ depends on ARCH_APQ8064 && DEBUG_LL
+ select MSM_HAS_DEBUG_UART_HS
+ help
+ Say Y here if you want the debug print routines to direct
+ their output to the serial port on APQ 8064 devices.
+
config DEBUG_MSMCOPPER_UART
bool "Kernel low-level debugging messages via MSM Copper UART"
depends on ARCH_MSMCOPPER
@@ -1784,6 +1809,18 @@
voltages and other parameters of the various power rails supplied
by some Qualcomm PMICs.
+config MSM_RPM_REGULATOR_SMD
+ bool "SMD RPM regulator driver"
+ depends on REGULATOR
+ depends on OF
+ depends on MSM_RPM_SMD
+ help
+ Compile in support for the SMD RPM regulator driver which is used for
+ setting voltages and other parameters of the various power rails
+ supplied by some Qualcomm PMICs. The SMD RPM regulator driver should
+ be used on systems which contain an RPM which communicates with the
+ application processor over SMD.
+
config MSM_PIL
bool "Peripheral image loading"
select FW_LOADER
@@ -2023,10 +2060,6 @@
config MSM_NATIVE_RESTART
bool
-config MSM_PM
- depends on PM
- bool
-
config MSM_PM2
depends on PM
bool
@@ -2224,6 +2257,12 @@
instead of pmem. Selecting this may also involve userspace
dependencies as well.
+config MSM_OCMEM
+ bool "MSM On-Chip memory driver (OCMEM)"
+ help
+ Enable support for On-Chip Memory available on certain MSM chipsets.
+ OCMEM is a low latency, high performance pool shared by subsystems.
+
config MSM_RTB
bool "Register tracing"
help
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index 000cf43..865f6f6 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -152,8 +152,8 @@
obj-$(CONFIG_MSM_HW3D) += hw3d.o
obj-$(CONFIG_PM) += pm-boot.o
obj-$(CONFIG_MSM_PM8X60) += pm-8x60.o pm-data.o
+obj-$(CONFIG_MSM_IDLE_STATS) += pm-stats.o
obj-$(CONFIG_MSM_PM2) += pm2.o
-obj-$(CONFIG_MSM_PM) += pm.o
obj-$(CONFIG_MSM_NOPM) += no-pm.o
obj-$(CONFIG_MSM_PCIE) += pcie.o pcie_irq.o
@@ -186,6 +186,8 @@
obj-$(CONFIG_ARCH_APQ8064) += rpm-regulator-8960.o
endif
+obj-$(CONFIG_MSM_RPM_REGULATOR_SMD) += rpm-regulator-smd.o
+
ifdef CONFIG_MSM_SUBSYSTEM_RESTART
obj-y += subsystem_notif.o
obj-y += subsystem_restart.o
@@ -318,6 +320,7 @@
ifdef CONFIG_VCM
obj-$(CONFIG_ARCH_MSM8X60) += board-msm8x60-vcm.o
endif
+obj-$(CONFIG_MSM_OCMEM) += ocmem.o ocmem_allocator.o
obj-$(CONFIG_ARCH_MSM7X27) += gpiomux-7x27.o gpiomux-v1.o gpiomux.o
obj-$(CONFIG_ARCH_MSM7X30) += gpiomux-7x30.o gpiomux-v1.o gpiomux.o
@@ -363,3 +366,5 @@
obj-$(CONFIG_MSM_HSIC_SYSMON) += hsic_sysmon.o
obj-$(CONFIG_MSM_HSIC_SYSMON_TEST) += hsic_sysmon_test.o
+
+obj-$(CONFIG_MSM_RPM_SMD) += rpm-smd.o
diff --git a/arch/arm/mach-msm/acpuclock-7627.c b/arch/arm/mach-msm/acpuclock-7627.c
index 99311d4..7c2c556 100644
--- a/arch/arm/mach-msm/acpuclock-7627.c
+++ b/arch/arm/mach-msm/acpuclock-7627.c
@@ -249,6 +249,35 @@
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
};
+/* 8625 PLL4 @ 1152MHz with GSM capable modem */
+static struct clkctl_acpu_speed pll0_960_pll1_245_pll2_1200_pll4_1152[] = {
+ { 0, 19200, ACPU_PLL_TCXO, 0, 0, 2400, 3, 0, 30720 },
+ { 0, 61440, ACPU_PLL_1, 1, 3, 7680, 3, 1, 61440 },
+ { 1, 122880, ACPU_PLL_1, 1, 1, 15360, 3, 2, 61440 },
+ { 1, 245760, ACPU_PLL_1, 1, 0, 30720, 3, 3, 61440 },
+ { 1, 320000, ACPU_PLL_0, 4, 2, 40000, 3, 4, 122880 },
+ { 1, 480000, ACPU_PLL_0, 4, 1, 60000, 3, 5, 122880 },
+ { 0, 576000, ACPU_PLL_4, 6, 1, 72000, 3, 6, 160000 },
+ { 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
+ { 1, 1152000, ACPU_PLL_4, 6, 0, 144000, 3, 7, 200000},
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+};
+
+/* 8625 PLL4 @ 1115MHz with CDMA capable modem */
+static struct clkctl_acpu_speed pll0_960_pll1_196_pll2_1200_pll4_1152[] = {
+ { 0, 19200, ACPU_PLL_TCXO, 0, 0, 2400, 3, 0, 24576 },
+ { 0, 65536, ACPU_PLL_1, 1, 3, 8192, 3, 1, 49152 },
+ { 1, 98304, ACPU_PLL_1, 1, 1, 12288, 3, 2, 49152 },
+ { 1, 196608, ACPU_PLL_1, 1, 0, 24576, 3, 3, 98304 },
+ { 1, 320000, ACPU_PLL_0, 4, 2, 40000, 3, 4, 122880 },
+ { 1, 480000, ACPU_PLL_0, 4, 1, 60000, 3, 5, 122880 },
+ { 0, 576000, ACPU_PLL_4, 6, 1, 72000, 3, 6, 160000 },
+ { 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
+ { 1, 1152000, ACPU_PLL_4, 6, 0, 144000, 3, 7, 200000},
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+};
+
+
/* 7625a PLL2 @ 1200MHz with GSM capable modem */
static struct clkctl_acpu_speed pll0_960_pll1_245_pll2_1200_25a[] = {
{ 0, 19200, ACPU_PLL_TCXO, 0, 0, 2400, 3, 0, 30720 },
@@ -365,6 +394,8 @@
PLL_CONFIG(960, 589, 1200, 1008),
PLL_CONFIG(960, 245, 1200, 1209),
PLL_CONFIG(960, 196, 1200, 1209),
+ PLL_CONFIG(960, 245, 1200, 1152),
+ PLL_CONFIG(960, 196, 1200, 1152),
{ 0, 0, 0, 0, 0 }
};
diff --git a/arch/arm/mach-msm/acpuclock-8960.c b/arch/arm/mach-msm/acpuclock-8960.c
index 041e755..6986a29 100644
--- a/arch/arm/mach-msm/acpuclock-8960.c
+++ b/arch/arm/mach-msm/acpuclock-8960.c
@@ -370,6 +370,7 @@
},
[L2] = {
.hfpll_base = MSM_HFPLL_BASE + 0x400,
+ .hfpll_vdd_tbl = hfpll_vdd_dig_tbl_8930,
.aux_clk_sel = MSM_APCS_GCC_BASE + 0x028,
.l2cpmr_iaddr = L2CPMR_IADDR,
.vreg[VREG_HFPLL_B] = { "hfpll_l2", 1800000,
@@ -653,23 +654,23 @@
/* TODO: Update core voltages when data is available. */
static struct acpu_level acpu_freq_tbl_8930[] = {
- { 0, { STBY_KHZ, QSB, 0, 0, 0x00 }, L2(0), 900000 },
- { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(1), 900000 },
- { 1, { 432000, HFPLL, 2, 0, 0x20 }, L2(6), 925000 },
- { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(6), 925000 },
- { 1, { 540000, HFPLL, 2, 0, 0x28 }, L2(6), 937500 },
- { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(6), 962500 },
- { 1, { 648000, HFPLL, 1, 0, 0x18 }, L2(6), 987500 },
- { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(6), 1000000 },
- { 1, { 756000, HFPLL, 1, 0, 0x1C }, L2(11), 1025000 },
- { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(11), 1062500 },
- { 1, { 864000, HFPLL, 1, 0, 0x20 }, L2(11), 1062500 },
- { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(11), 1087500 },
- { 1, { 972000, HFPLL, 1, 0, 0x24 }, L2(16), 1100000 },
- { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(16), 1100000 },
- { 1, { 1080000, HFPLL, 1, 0, 0x28 }, L2(16), 1100000 },
- { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(16), 1100000 },
- { 1, { 1188000, HFPLL, 1, 0, 0x2C }, L2(16), 1125000 },
+ { 0, { STBY_KHZ, QSB, 0, 0, 0x00 }, L2(0), 925000 },
+ { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(1), 925000 },
+ { 1, { 432000, HFPLL, 2, 0, 0x20 }, L2(6), 937500 },
+ { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(6), 962500 },
+ { 1, { 540000, HFPLL, 2, 0, 0x28 }, L2(6), 987500 },
+ { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(6), 1000000 },
+ { 1, { 648000, HFPLL, 1, 0, 0x18 }, L2(6), 1025000 },
+ { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(6), 1037500 },
+ { 1, { 756000, HFPLL, 1, 0, 0x1C }, L2(11), 1062500 },
+ { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(11), 1087500 },
+ { 1, { 864000, HFPLL, 1, 0, 0x20 }, L2(11), 1100000 },
+ { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(11), 1125000 },
+ { 1, { 972000, HFPLL, 1, 0, 0x24 }, L2(16), 1137500 },
+ { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(16), 1162500 },
+ { 1, { 1080000, HFPLL, 1, 0, 0x28 }, L2(16), 1187500 },
+ { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(16), 1200000 },
+ { 1, { 1188000, HFPLL, 1, 0, 0x2C }, L2(16), 1225000 },
{ 0, { 0 } }
};
diff --git a/arch/arm/mach-msm/bam_dmux.c b/arch/arm/mach-msm/bam_dmux.c
index befa5bb..d53e471 100644
--- a/arch/arm/mach-msm/bam_dmux.c
+++ b/arch/arm/mach-msm/bam_dmux.c
@@ -1595,6 +1595,7 @@
static void ul_wakeup(void)
{
int ret;
+ int do_vote_dfab = 0;
mutex_lock(&wakeup_lock);
if (bam_is_connected) { /* bam got connected before lock grabbed */
@@ -1621,16 +1622,19 @@
* don't grab the wakelock the first time because it is
* already grabbed when a2 powers on
*/
- if (likely(a2_pc_disabled_wakelock_skipped))
+ if (likely(a2_pc_disabled_wakelock_skipped)) {
grab_wakelock();
- else
+ do_vote_dfab = 1; /* vote must occur after wait */
+ } else {
a2_pc_disabled_wakelock_skipped = 1;
+ }
if (wait_for_dfab) {
ret = wait_for_completion_timeout(
&dfab_unvote_completion, HZ);
BUG_ON(ret == 0);
}
- vote_dfab();
+ if (likely(do_vote_dfab))
+ vote_dfab();
schedule_delayed_work(&ul_timeout_work,
msecs_to_jiffies(UL_TIMEOUT_DELAY));
bam_is_connected = 1;
diff --git a/arch/arm/mach-msm/board-8064-camera.c b/arch/arm/mach-msm/board-8064-camera.c
index 36953ef..114b271 100644
--- a/arch/arm/mach-msm/board-8064-camera.c
+++ b/arch/arm/mach-msm/board-8064-camera.c
@@ -188,6 +188,7 @@
.flash_sr_type = MSM_CAMERA_FLASH_SRC_EXT,
._fsrc.ext_driver_src.led_en = VFE_CAMIF_TIMER1_GPIO,
._fsrc.ext_driver_src.led_flash_en = VFE_CAMIF_TIMER2_GPIO,
+ ._fsrc.ext_driver_src.flash_id = MAM_CAMERA_EXT_LED_FLASH_SC628A,
};
static struct msm_gpiomux_config apq8064_cam_2d_configs[] = {
diff --git a/arch/arm/mach-msm/board-8064-gpiomux.c b/arch/arm/mach-msm/board-8064-gpiomux.c
index 3431cd0..48d1129 100644
--- a/arch/arm/mach-msm/board-8064-gpiomux.c
+++ b/arch/arm/mach-msm/board-8064-gpiomux.c
@@ -342,6 +342,12 @@
.pull = GPIOMUX_PULL_KEEPER,
};
+static struct gpiomux_setting mbhc_hs_detect = {
+ .func = GPIOMUX_FUNC_1,
+ .drv = GPIOMUX_DRV_2MA,
+ .pull = GPIOMUX_PULL_NONE,
+};
+
static struct gpiomux_setting cdc_mclk = {
.func = GPIOMUX_FUNC_1,
.drv = GPIOMUX_DRV_8MA,
@@ -730,6 +736,12 @@
static struct msm_gpiomux_config apq8064_audio_codec_configs[] __initdata = {
{
+ .gpio = 38,
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &mbhc_hs_detect,
+ },
+ },
+ {
.gpio = 39,
.settings = {
[GPIOMUX_SUSPENDED] = &cdc_mclk,
@@ -765,7 +777,7 @@
.pull = GPIOMUX_PULL_DOWN,
};
-static struct gpiomux_setting ap2mdm_pon_reset_n_cfg = {
+static struct gpiomux_setting ap2mdm_soft_reset_cfg = {
.func = GPIOMUX_FUNC_GPIO,
.drv = GPIOMUX_DRV_8MA,
.pull = GPIOMUX_PULL_DOWN,
@@ -806,11 +818,11 @@
[GPIOMUX_SUSPENDED] = &ap2mdm_cfg,
}
},
- /* AP2MDM_PON_RESET_N */
+ /* AP2MDM_SOFT_RESET, aka AP2MDM_PON_RESET_N */
{
.gpio = 27,
.settings = {
- [GPIOMUX_SUSPENDED] = &ap2mdm_pon_reset_n_cfg,
+ [GPIOMUX_SUSPENDED] = &ap2mdm_soft_reset_cfg,
}
},
/* AP2MDM_WAKEUP */
diff --git a/arch/arm/mach-msm/board-8064-gpu.c b/arch/arm/mach-msm/board-8064-gpu.c
index 41dccac..e24cac6 100644
--- a/arch/arm/mach-msm/board-8064-gpu.c
+++ b/arch/arm/mach-msm/board-8064-gpu.c
@@ -67,6 +67,21 @@
.src = MSM_BUS_MASTER_GRAPHICS_3D,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
+ .ib = KGSL_CONVERT_TO_MBPS(1000),
+ },
+ {
+ .src = MSM_BUS_MASTER_GRAPHICS_3D_PORT1,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 0,
+ .ib = KGSL_CONVERT_TO_MBPS(1000),
+ },
+};
+
+static struct msm_bus_vectors grp3d_nominal_low_vectors[] = {
+ {
+ .src = MSM_BUS_MASTER_GRAPHICS_3D,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 0,
.ib = KGSL_CONVERT_TO_MBPS(2000),
},
{
@@ -117,6 +132,10 @@
grp3d_low_vectors,
},
{
+ ARRAY_SIZE(grp3d_nominal_low_vectors),
+ grp3d_nominal_low_vectors,
+ },
+ {
ARRAY_SIZE(grp3d_nominal_high_vectors),
grp3d_nominal_high_vectors,
},
@@ -177,16 +196,21 @@
.pwrlevel = {
{
.gpu_freq = 400000000,
- .bus_freq = 3,
+ .bus_freq = 4,
.io_fraction = 0,
},
{
.gpu_freq = 325000000,
- .bus_freq = 2,
+ .bus_freq = 3,
.io_fraction = 33,
},
{
.gpu_freq = 200000000,
+ .bus_freq = 2,
+ .io_fraction = 100,
+ },
+ {
+ .gpu_freq = 128000000,
.bus_freq = 1,
.io_fraction = 100,
},
@@ -196,7 +220,7 @@
},
},
.init_level = 1,
- .num_levels = 4,
+ .num_levels = 5,
.set_grp_async = NULL,
.idle_timeout = HZ/10,
.nap_allowed = true,
diff --git a/arch/arm/mach-msm/board-8064-pmic.c b/arch/arm/mach-msm/board-8064-pmic.c
index 91fd400..1a0a287 100644
--- a/arch/arm/mach-msm/board-8064-pmic.c
+++ b/arch/arm/mach-msm/board-8064-pmic.c
@@ -127,6 +127,7 @@
/* TABLA CODEC RESET */
PM8921_GPIO_OUTPUT(34, 1, MED),
PM8921_GPIO_INPUT(31, PM_GPIO_PULL_NO),
+ PM8921_GPIO_OUTPUT(13, 0, HIGH), /* PCIE_CLK_PWR_EN */
};
static struct pm8xxx_gpio_init pm8921_mtp_kp_gpios[] __initdata = {
@@ -146,6 +147,8 @@
PM8921_MPP_INIT(8, D_OUTPUT, PM8921_MPP_DIG_LEVEL_S4, DOUT_CTRL_LOW),
/*MPP9 is used to detect docking station connection/removal on Liquid*/
PM8921_MPP_INIT(9, D_INPUT, PM8921_MPP_DIG_LEVEL_S4, DIN_TO_INT),
+ /* PCIE_RESET_N */
+ PM8921_MPP_INIT(1, D_OUTPUT, PM8921_MPP_DIG_LEVEL_VPH, DOUT_CTRL_HIGH),
};
void __init apq8064_pm8xxx_gpio_mpp_init(void)
diff --git a/arch/arm/mach-msm/board-8064-regulator.c b/arch/arm/mach-msm/board-8064-regulator.c
index 58e83a0..40222b8 100644
--- a/arch/arm/mach-msm/board-8064-regulator.c
+++ b/arch/arm/mach-msm/board-8064-regulator.c
@@ -157,6 +157,8 @@
REGULATOR_SUPPLY("HSUSB_VDDCX", "msm_ehci_host.1"),
REGULATOR_SUPPLY("HSIC_VDDCX", "msm_hsic_host"),
REGULATOR_SUPPLY("riva_vddcx", "wcnss_wlan.0"),
+ REGULATOR_SUPPLY("vp_pcie", "msm_pcie"),
+ REGULATOR_SUPPLY("vptx_pcie", "msm_pcie"),
};
VREG_CONSUMERS(S4) = {
REGULATOR_SUPPLY("8921_s4", NULL),
@@ -213,6 +215,7 @@
};
VREG_CONSUMERS(LVS6) = {
REGULATOR_SUPPLY("8921_lvs6", NULL),
+ REGULATOR_SUPPLY("vdd_pcie_vph", "msm_pcie"),
};
VREG_CONSUMERS(LVS7) = {
REGULATOR_SUPPLY("8921_lvs7", NULL),
@@ -256,6 +259,7 @@
REGULATOR_SUPPLY("lvds_vccs_3p3v", "lvds.0"),
REGULATOR_SUPPLY("dsi1_vccs_3p3v", "mipi_dsi.1"),
REGULATOR_SUPPLY("hdmi_mux_vdd", "hdmi_msm.0"),
+ REGULATOR_SUPPLY("pcie_ext_3p3v", "msm_pcie"),
};
VREG_CONSUMERS(EXT_TS_SW) = {
REGULATOR_SUPPLY("ext_ts_sw", NULL),
@@ -545,7 +549,7 @@
* ID name always_on pd min_uV max_uV en_t supply
* system_uA reg_ID
*/
- PM8XXX_NLDO1200(L26, "8921_l26", 0, 1, 1050000, 1050000, 200, "8921_s7",
+ PM8XXX_NLDO1200(L26, "8921_l26", 0, 1, 375000, 1050000, 200, "8921_s7",
0, 1),
/* ID name always_on pd en_t supply reg_ID */
diff --git a/arch/arm/mach-msm/board-8064-storage.c b/arch/arm/mach-msm/board-8064-storage.c
index b4e7d35..72126c8 100644
--- a/arch/arm/mach-msm/board-8064-storage.c
+++ b/arch/arm/mach-msm/board-8064-storage.c
@@ -23,6 +23,7 @@
#include <mach/gpiomux.h>
#include "devices.h"
#include "board-8064.h"
+#include "board-storage-common-a.h"
/* APQ8064 has 4 SDCC controllers */
@@ -219,6 +220,7 @@
.vreg_data = &mmc_slot_vreg_data[SDCC1],
.uhs_caps = MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50,
.mpm_sdiowakeup_int = MSM_MPM_PIN_SDC1_DAT1,
+ .msm_bus_voting_data = &sps_to_ddr_bus_voting_data,
};
static struct mmc_platform_data *apq8064_sdc1_pdata = &sdc1_data;
#else
@@ -249,6 +251,7 @@
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_DDR50 |
MMC_CAP_UHS_SDR104 | MMC_CAP_MAX_CURRENT_800),
.mpm_sdiowakeup_int = MSM_MPM_PIN_SDC3_DAT1,
+ .msm_bus_voting_data = &sps_to_ddr_bus_voting_data,
};
static struct mmc_platform_data *apq8064_sdc3_pdata = &sdc3_data;
#else
diff --git a/arch/arm/mach-msm/board-8064.c b/arch/arm/mach-msm/board-8064.c
index be539d0..bc87d21 100644
--- a/arch/arm/mach-msm/board-8064.c
+++ b/arch/arm/mach-msm/board-8064.c
@@ -69,6 +69,7 @@
#include <sound/cs8427.h>
#include <media/gpio-ir-recv.h>
#include <linux/fmem.h>
+#include <mach/msm_pcie.h>
#include "msm_watchdog.h"
#include "board-8064.h"
@@ -94,14 +95,15 @@
#ifdef CONFIG_MSM_IOMMU
#define MSM_ION_MM_SIZE 0x3800000
#define MSM_ION_SF_SIZE 0
+#define MSM_ION_QSECOM_SIZE 0x780000 /* (7.5MB) */
#define MSM_ION_HEAP_NUM 7
#else
#define MSM_ION_MM_SIZE MSM_PMEM_ADSP_SIZE
#define MSM_ION_SF_SIZE MSM_PMEM_SIZE
+#define MSM_ION_QSECOM_SIZE 0x600000 /* (6MB) */
#define MSM_ION_HEAP_NUM 8
#endif
#define MSM_ION_MM_FW_SIZE 0x200000 /* (2MB) */
-#define MSM_ION_QSECOM_SIZE 0x300000 /* (3MB) */
#define MSM_ION_MFC_SIZE SZ_8K
#define MSM_ION_AUDIO_SIZE MSM_PMEM_AUDIO_SIZE
#else
@@ -114,6 +116,10 @@
#define MSM_MM_FW_SIZE 0x200000
#define APQ8064_FW_START (APQ8064_FIXED_AREA_START - MSM_MM_FW_SIZE)
+/* PCIe power enable pmic gpio */
+#define PCIE_PWR_EN_PMIC_GPIO 13
+#define PCIE_RST_N_PMIC_MPP 1
+
#ifdef CONFIG_KERNEL_PMEM_EBI_REGION
static unsigned pmem_kernel_ebi1_size = MSM_PMEM_KERNEL_EBI1_SIZE;
static int __init pmem_kernel_ebi1_size_setup(char *p)
@@ -427,6 +433,7 @@
apq8064_fmem_pdata.size = 0;
apq8064_fmem_pdata.reserved_size_low = 0;
apq8064_fmem_pdata.reserved_size_high = 0;
+ apq8064_fmem_pdata.align = PAGE_SIZE;
fixed_low_size = 0;
fixed_middle_size = 0;
fixed_high_size = 0;
@@ -858,9 +865,11 @@
static void __init apq8064_ehci_host_init(void)
{
- if (machine_is_apq8064_liquid()) {
- msm_ehci_host_pdata3.dock_connect_irq =
- PM8921_MPP_IRQ(PM8921_IRQ_BASE, 9);
+ if (machine_is_apq8064_liquid() || machine_is_mpq8064_cdp() ||
+ machine_is_mpq8064_hrd() || machine_is_mpq8064_dtv()) {
+ if (machine_is_apq8064_liquid())
+ msm_ehci_host_pdata3.dock_connect_irq =
+ PM8921_MPP_IRQ(PM8921_IRQ_BASE, 9);
apq8064_device_ehci_host3.dev.platform_data =
&msm_ehci_host_pdata3;
@@ -1123,23 +1132,31 @@
#define ISA1200_HAP_LEN_GPIO PM8921_GPIO_PM_TO_SYS(20)
#define ISA1200_HAP_CLK PM8921_GPIO_PM_TO_SYS(44)
-static int isa1200_power(int on)
+static int isa1200_clk_enable(bool on)
{
int rc = 0;
- gpio_set_value_cansleep(ISA1200_HAP_CLK, !!on);
+ gpio_set_value_cansleep(ISA1200_HAP_CLK, on);
- if (on)
+ if (on) {
rc = pm8xxx_aux_clk_control(CLK_MP3_2, XO_DIV_1, true);
- else
+ if (rc) {
+ pr_err("%s: unable to write aux clock register(%d)\n",
+ __func__, rc);
+ goto err_gpio_dis;
+ }
+ } else {
rc = pm8xxx_aux_clk_control(CLK_MP3_2, XO_DIV_NONE, true);
-
- if (rc) {
- pr_err("%s: unable to write aux clock register(%d)\n",
- __func__, rc);
+ if (rc)
+ pr_err("%s: unable to write aux clock register(%d)\n",
+ __func__, rc);
}
return rc;
+
+err_gpio_dis:
+ gpio_set_value_cansleep(ISA1200_HAP_CLK, !on);
+ return rc;
}
static int isa1200_dev_setup(bool enable)
@@ -1181,7 +1198,7 @@
static struct isa1200_platform_data isa1200_1_pdata = {
.name = "vibrator",
.dev_setup = isa1200_dev_setup,
- .power_on = isa1200_power,
+ .clk_enable = isa1200_clk_enable,
.hap_en_gpio = ISA1200_HAP_EN_GPIO,
.hap_len_gpio = ISA1200_HAP_LEN_GPIO,
.max_timeout = 15000,
@@ -1661,6 +1678,8 @@
static struct mdm_platform_data mdm_platform_data = {
.mdm_version = "3.0",
.ramdump_delay_ms = 2000,
+ .early_power_on = 1,
+ .sfr_query = 1,
.peripheral_platform_device = &apq8064_device_hsic_host,
};
@@ -1997,6 +2016,22 @@
msm_bus_8064_cpss_fpb.dev.platform_data = &msm_bus_8064_cpss_fpb_pdata;
}
+/* PCIe gpios */
+static struct msm_pcie_gpio_info_t msm_pcie_gpio_info[MSM_PCIE_MAX_GPIO] = {
+ {"rst_n", PM8921_MPP_PM_TO_SYS(PCIE_RST_N_PMIC_MPP), 0},
+ {"pwr_en", PM8921_GPIO_PM_TO_SYS(PCIE_PWR_EN_PMIC_GPIO), 1},
+};
+
+static struct msm_pcie_platform msm_pcie_platform_data = {
+ .gpio = msm_pcie_gpio_info,
+};
+
+static void __init mpq8064_pcie_init(void)
+{
+ msm_device_pcie.dev.platform_data = &msm_pcie_platform_data;
+ platform_device_register(&msm_device_pcie);
+}
+
static struct platform_device apq8064_device_ext_5v_vreg __devinitdata = {
.name = GPIO_REGULATOR_DEV_NAME,
.id = PM8921_MPP_PM_TO_SYS(7),
@@ -2252,6 +2287,38 @@
},
};
+static struct platform_device rc_input_loopback_pdev = {
+ .name = "rc-user-input",
+ .id = -1,
+};
+
+static int rf4ce_gpio_init(void)
+{
+ if (!machine_is_mpq8064_cdp())
+ return -EINVAL;
+
+ /* CC2533 SRDY Input */
+ if (!gpio_request(SX150X_GPIO(4, 6), "rf4ce_srdy")) {
+ gpio_direction_input(SX150X_GPIO(4, 6));
+ gpio_export(SX150X_GPIO(4, 6), true);
+ }
+
+ /* CC2533 MRDY Output */
+ if (!gpio_request(SX150X_GPIO(4, 5), "rf4ce_mrdy")) {
+ gpio_direction_output(SX150X_GPIO(4, 5), 1);
+ gpio_export(SX150X_GPIO(4, 5), true);
+ }
+
+ /* CC2533 Reset Output */
+ if (!gpio_request(SX150X_GPIO(4, 7), "rf4ce_reset")) {
+ gpio_direction_output(SX150X_GPIO(4, 7), 0);
+ gpio_export(SX150X_GPIO(4, 7), true);
+ }
+
+ return 0;
+}
+late_initcall(rf4ce_gpio_init);
+
static struct platform_device *mpq_devices[] __initdata = {
&msm_device_sps_apq8064,
&mpq8064_device_qup_i2c_gsbi5,
@@ -2268,6 +2335,7 @@
#ifdef CONFIG_MSM_VCAP
&msm8064_device_vcap,
#endif
+ &rc_input_loopback_pdev,
};
static struct msm_spi_platform_data apq8064_qup_spi_gsbi5_pdata = {
@@ -2870,6 +2938,7 @@
machine_is_mpq8064_dtv()) {
enable_avc_i2c_bus();
platform_add_devices(mpq_devices, ARRAY_SIZE(mpq_devices));
+ mpq8064_pcie_init();
} else {
ethernet_init();
platform_add_devices(cdp_devices, ARRAY_SIZE(cdp_devices));
diff --git a/arch/arm/mach-msm/board-8930-camera.c b/arch/arm/mach-msm/board-8930-camera.c
index cc5b13c..c9d720c 100644
--- a/arch/arm/mach-msm/board-8930-camera.c
+++ b/arch/arm/mach-msm/board-8930-camera.c
@@ -198,12 +198,9 @@
#ifdef CONFIG_MSM_CAMERA_FLASH
static struct msm_camera_sensor_flash_src msm_flash_src = {
.flash_sr_type = MSM_CAMERA_FLASH_SRC_EXT,
- ._fsrc.ext_driver_src.led_en = GPIO_CAM_GP_LED_EN1,
- ._fsrc.ext_driver_src.led_flash_en = GPIO_CAM_GP_LED_EN2,
-#if defined(CONFIG_I2C) && (defined(CONFIG_GPIO_SX150X) || \
- defined(CONFIG_GPIO_SX150X_MODULE))
- ._fsrc.ext_driver_src.expander_info = cam_expander_info,
-#endif
+ ._fsrc.ext_driver_src.led_en = VFE_CAMIF_TIMER1_GPIO,
+ ._fsrc.ext_driver_src.led_flash_en = VFE_CAMIF_TIMER2_GPIO,
+ ._fsrc.ext_driver_src.flash_id = MAM_CAMERA_EXT_LED_FLASH_TPS61310,
};
#endif
@@ -536,7 +533,8 @@
};
static struct msm_camera_sensor_flash_data flash_s5k3l1yx = {
- .flash_type = MSM_CAMERA_FLASH_NONE,
+ .flash_type = MSM_CAMERA_FLASH_LED,
+ .flash_src = &msm_flash_src
};
static struct msm_camera_csi_lane_params s5k3l1yx_csi_lane_params = {
@@ -585,6 +583,15 @@
struct msm_camera_sensor_info *s_info;
s_info = &msm_camera_sensor_s5k3l1yx_data;
s_info->sensor_platform_info->mount_angle = 0;
+ msm_flash_src._fsrc.ext_driver_src.led_en =
+ GPIO_CAM_GP_LED_EN1;
+ msm_flash_src._fsrc.ext_driver_src.led_flash_en =
+ GPIO_CAM_GP_LED_EN2;
+#if defined(CONFIG_I2C) && (defined(CONFIG_GPIO_SX150X) || \
+ defined(CONFIG_GPIO_SX150X_MODULE))
+ msm_flash_src._fsrc.ext_driver_src.expander_info =
+ cam_expander_info;
+#endif
}
platform_device_register(&msm_camera_server);
@@ -615,11 +622,9 @@
I2C_BOARD_INFO("s5k3l1yx", 0x20),
.platform_data = &msm_camera_sensor_s5k3l1yx_data,
},
-#ifdef CONFIG_MSM_CAMERA_FLASH_SC628A
{
- I2C_BOARD_INFO("sc628a", 0x6E),
+ I2C_BOARD_INFO("tps61310", 0x66),
},
-#endif
};
struct msm_camera_board_info msm8930_camera_board_info = {
diff --git a/arch/arm/mach-msm/board-8930-gpiomux.c b/arch/arm/mach-msm/board-8930-gpiomux.c
index cd4aff8..936a798 100644
--- a/arch/arm/mach-msm/board-8930-gpiomux.c
+++ b/arch/arm/mach-msm/board-8930-gpiomux.c
@@ -94,6 +94,12 @@
.pull = GPIOMUX_PULL_NONE,
};
+static struct gpiomux_setting audio_spkr_boost = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_8MA,
+ .pull = GPIOMUX_PULL_NONE,
+};
+
#if defined(CONFIG_KS8851) || defined(CONFIG_KS8851_MODULE)
static struct gpiomux_setting gpio_eth_config = {
.pull = GPIOMUX_PULL_NONE,
@@ -389,6 +395,16 @@
},
};
+static struct msm_gpiomux_config msm8960_audio_spkr_configs[] __initdata = {
+ {
+ .gpio = 15,
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &audio_spkr_boost,
+ },
+ },
+};
+
+
static struct msm_gpiomux_config msm8960_audio_auxpcm_configs[] __initdata = {
{
.gpio = 63,
@@ -636,6 +652,9 @@
msm_gpiomux_install(msm8960_audio_mbhc_configs,
ARRAY_SIZE(msm8960_audio_mbhc_configs));
+ msm_gpiomux_install(msm8960_audio_spkr_configs,
+ ARRAY_SIZE(msm8960_audio_spkr_configs));
+
msm_gpiomux_install(msm8960_audio_auxpcm_configs,
ARRAY_SIZE(msm8960_audio_auxpcm_configs));
diff --git a/arch/arm/mach-msm/board-8930-gpu.c b/arch/arm/mach-msm/board-8930-gpu.c
index e23b76c..3c3843a 100644
--- a/arch/arm/mach-msm/board-8930-gpu.c
+++ b/arch/arm/mach-msm/board-8930-gpu.c
@@ -115,7 +115,7 @@
static struct kgsl_device_platform_data kgsl_3d0_pdata = {
.pwrlevel = {
{
- .gpu_freq = 400000000,
+ .gpu_freq = 450000000,
.bus_freq = 3,
.io_fraction = 0,
},
diff --git a/arch/arm/mach-msm/board-8930-pmic.c b/arch/arm/mach-msm/board-8930-pmic.c
index 86c0438..cf7a829 100644
--- a/arch/arm/mach-msm/board-8930-pmic.c
+++ b/arch/arm/mach-msm/board-8930-pmic.c
@@ -329,6 +329,10 @@
.priority = 0,
};
+static struct pm8xxx_spk_platform_data pm8xxx_spk_pdata = {
+ .spk_add_enable = false,
+};
+
static struct pm8921_bms_platform_data pm8921_bms_pdata __devinitdata = {
.battery_type = BATT_UNKNOWN,
.r_sense = 10,
@@ -351,6 +355,7 @@
.adc_pdata = &pm8xxx_adc_pdata,
.leds_pdata = &pm8xxx_leds_pdata,
.ccadc_pdata = &pm8xxx_ccadc_pdata,
+ .spk_pdata = &pm8xxx_spk_pdata,
};
static struct msm_ssbi_platform_data msm8930_ssbi_pm8038_pdata __devinitdata = {
diff --git a/arch/arm/mach-msm/board-8930-regulator.c b/arch/arm/mach-msm/board-8930-regulator.c
index fc89a11..2f24c95 100644
--- a/arch/arm/mach-msm/board-8930-regulator.c
+++ b/arch/arm/mach-msm/board-8930-regulator.c
@@ -84,6 +84,9 @@
REGULATOR_SUPPLY("VDDIO_CDC", "sitar-slim"),
REGULATOR_SUPPLY("CDC_VDDA_TX", "sitar-slim"),
REGULATOR_SUPPLY("CDC_VDDA_RX", "sitar-slim"),
+ REGULATOR_SUPPLY("VDDIO_CDC", "sitar1p1-slim"),
+ REGULATOR_SUPPLY("CDC_VDDA_TX", "sitar1p1-slim"),
+ REGULATOR_SUPPLY("CDC_VDDA_RX", "sitar1p1-slim"),
REGULATOR_SUPPLY("vddp", "0-0048"),
};
VREG_CONSUMERS(L12) = {
@@ -118,6 +121,8 @@
REGULATOR_SUPPLY("8038_l20", NULL),
REGULATOR_SUPPLY("VDDD_CDC_D", "sitar-slim"),
REGULATOR_SUPPLY("CDC_VDDA_A_1P2V", "sitar-slim"),
+ REGULATOR_SUPPLY("VDDD_CDC_D", "sitar1p1-slim"),
+ REGULATOR_SUPPLY("CDC_VDDA_A_1P2V", "sitar1p1-slim"),
};
VREG_CONSUMERS(L21) = {
REGULATOR_SUPPLY("8038_l21", NULL),
@@ -159,6 +164,7 @@
VREG_CONSUMERS(S4) = {
REGULATOR_SUPPLY("8038_s4", NULL),
REGULATOR_SUPPLY("CDC_VDD_CP", "sitar-slim"),
+ REGULATOR_SUPPLY("CDC_VDD_CP", "sitar1p1-slim"),
};
VREG_CONSUMERS(S5) = {
REGULATOR_SUPPLY("8038_s5", NULL),
@@ -446,11 +452,11 @@
* ID name always_on pd min_uV max_uV en_t supply
* system_uA reg_ID
*/
- PM8XXX_NLDO1200(L16, "8038_l16", 0, 1, 1050000, 1050000, 200, "8038_s3",
+ PM8XXX_NLDO1200(L16, "8038_l16", 0, 1, 375000, 1050000, 200, "8038_s3",
0, 0),
- PM8XXX_NLDO1200(L19, "8038_l19", 0, 1, 1050000, 1050000, 200, "8038_s3",
+ PM8XXX_NLDO1200(L19, "8038_l19", 0, 1, 375000, 1050000, 200, "8038_s3",
0, 1),
- PM8XXX_NLDO1200(L27, "8038_l27", 0, 1, 1050000, 1050000, 200, "8038_s3",
+ PM8XXX_NLDO1200(L27, "8038_l27", 0, 1, 375000, 1050000, 200, "8038_s3",
0, 2),
};
diff --git a/arch/arm/mach-msm/board-8930-storage.c b/arch/arm/mach-msm/board-8930-storage.c
index ecebfa9..6dd7add 100644
--- a/arch/arm/mach-msm/board-8930-storage.c
+++ b/arch/arm/mach-msm/board-8930-storage.c
@@ -24,6 +24,7 @@
#include "devices.h"
#include "board-8930.h"
+#include "board-storage-common-a.h"
/* MSM8960 has 5 SDCC controllers */
enum sdcc_controllers {
@@ -235,6 +236,7 @@
.vreg_data = &mmc_slot_vreg_data[SDCC1],
.pin_data = &mmc_slot_pin_data[SDCC1],
.mpm_sdiowakeup_int = MSM_MPM_PIN_SDC1_DAT1,
+ .msm_bus_voting_data = &sps_to_ddr_bus_voting_data,
};
#endif
@@ -273,6 +275,7 @@
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_DDR50 |
MMC_CAP_UHS_SDR104 | MMC_CAP_MAX_CURRENT_800),
.mpm_sdiowakeup_int = MSM_MPM_PIN_SDC3_DAT1,
+ .msm_bus_voting_data = &sps_to_ddr_bus_voting_data,
};
#endif
@@ -284,6 +287,10 @@
#endif
#ifdef CONFIG_MMC_MSM_SDC3_SUPPORT
/* SDC3: External card slot */
+ if (!machine_is_msm8930_cdp()) {
+ msm8960_sdc3_data.wpswitch_gpio = 0;
+ msm8960_sdc3_data.wpswitch_polarity = 0;
+ }
msm_add_sdcc(3, &msm8960_sdc3_data);
#endif
}
diff --git a/arch/arm/mach-msm/board-8930.c b/arch/arm/mach-msm/board-8930.c
index f52d312..4e2cefc 100644
--- a/arch/arm/mach-msm/board-8930.c
+++ b/arch/arm/mach-msm/board-8930.c
@@ -128,12 +128,17 @@
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
#define MSM_PMEM_KERNEL_EBI1_SIZE 0x65000
+#ifdef CONFIG_MSM_IOMMU
+#define MSM_ION_MM_SIZE 0x3800000 /* Need to be multiple of 64K */
+#define MSM_ION_SF_SIZE 0x0
+#define MSM_ION_QSECOM_SIZE 0x780000 /* (7.5MB) */
+#define MSM_ION_HEAP_NUM 7
+#else
#define MSM_ION_SF_SIZE MSM_PMEM_SIZE
#define MSM_ION_MM_SIZE MSM_PMEM_ADSP_SIZE
-#define MSM_ION_QSECOM_SIZE 0x300000 /* (3MB) */
+#define MSM_ION_QSECOM_SIZE 0x600000 /* (6MB) */
#define MSM_ION_HEAP_NUM 8
-
-
+#endif
#define MSM_ION_MM_FW_SIZE 0x200000 /* (2MB) */
#define MSM_ION_MFC_SIZE SZ_8K
#define MSM_ION_AUDIO_SIZE MSM_PMEM_AUDIO_SIZE
@@ -388,6 +393,7 @@
.memory_type = ION_EBI_TYPE,
.extra_data = (void *) &cp_mfc_msm8930_ion_pdata,
},
+#ifndef CONFIG_MSM_IOMMU
{
.id = ION_SF_HEAP_ID,
.type = ION_HEAP_TYPE_CARVEOUT,
@@ -396,6 +402,7 @@
.memory_type = ION_EBI_TYPE,
.extra_data = (void *) &co_msm8930_ion_pdata,
},
+#endif
{
.id = ION_IOMMU_HEAP_ID,
.type = ION_HEAP_TYPE_IOMMU,
@@ -482,6 +489,7 @@
msm8930_fmem_pdata.size = 0;
msm8930_fmem_pdata.reserved_size_low = 0;
msm8930_fmem_pdata.reserved_size_high = 0;
+ msm8930_fmem_pdata.align = PAGE_SIZE;
fixed_low_size = 0;
fixed_middle_size = 0;
fixed_high_size = 0;
@@ -790,6 +798,70 @@
.platform_data = &sitar_platform_data,
},
};
+
+static struct wcd9xxx_pdata sitar1p1_platform_data = {
+ .slimbus_slave_device = {
+ .name = "sitar-slave",
+ .e_addr = {0, 0, 0x70, 0, 0x17, 2},
+ },
+ .irq = MSM_GPIO_TO_INT(62),
+ .irq_base = SITAR_INTERRUPT_BASE,
+ .num_irqs = NR_WCD9XXX_IRQS,
+ .reset_gpio = 42,
+ .micbias = {
+ .ldoh_v = SITAR_LDOH_2P85_V,
+ .cfilt1_mv = 1800,
+ .cfilt2_mv = 1800,
+ .bias1_cfilt_sel = SITAR_CFILT1_SEL,
+ .bias2_cfilt_sel = SITAR_CFILT2_SEL,
+ },
+ .regulator = {
+ {
+ .name = "CDC_VDD_CP",
+ .min_uV = 1950000,
+ .max_uV = 2200000,
+ .optimum_uA = WCD9XXX_CDC_VDDA_CP_CUR_MAX,
+ },
+ {
+ .name = "CDC_VDDA_RX",
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .optimum_uA = WCD9XXX_CDC_VDDA_RX_CUR_MAX,
+ },
+ {
+ .name = "CDC_VDDA_TX",
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .optimum_uA = WCD9XXX_CDC_VDDA_TX_CUR_MAX,
+ },
+ {
+ .name = "VDDIO_CDC",
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .optimum_uA = WCD9XXX_VDDIO_CDC_CUR_MAX,
+ },
+ {
+ .name = "VDDD_CDC_D",
+ .min_uV = 1200000,
+ .max_uV = 1200000,
+ .optimum_uA = WCD9XXX_VDDD_CDC_D_CUR_MAX,
+ },
+ {
+ .name = "CDC_VDDA_A_1P2V",
+ .min_uV = 1200000,
+ .max_uV = 1200000,
+ .optimum_uA = WCD9XXX_VDDD_CDC_A_CUR_MAX,
+ },
+ },
+};
+
+static struct slim_device msm_slim_sitar1p1 = {
+ .name = "sitar1p1-slim",
+ .e_addr = {0, 1, 0x70, 0, 0x17, 2},
+ .dev = {
+ .platform_data = &sitar1p1_platform_data,
+ },
+};
#endif
@@ -799,6 +871,10 @@
.bus_num = 1,
.slim_slave = &msm_slim_sitar,
},
+ {
+ .bus_num = 1,
+ .slim_slave = &msm_slim_sitar1p1,
+ },
#endif
/* add more slimbus slaves as needed */
};
@@ -1574,16 +1650,16 @@
/* T6 Object */
0, 0, 0, 0, 0, 0,
/* T38 Object */
- 15, 2, 0, 15, 12, 11, 0, 0,
+ 15, 3, 0, 15, 12, 11, 0, 0,
/* T7 Object */
- 48, 255, 25,
+ 32, 16, 50,
/* T8 Object */
- 27, 0, 5, 1, 0, 0, 8, 8, 0, 0,
+ 30, 0, 5, 1, 0, 0, 8, 8, 0, 0,
/* T9 Object */
- 131, 0, 0, 19, 11, 0, 16, 35, 1, 3,
- 10, 15, 1, 11, 4, 5, 40, 10, 43, 4,
- 54, 2, 0, 0, 0, 0, 143, 40, 143, 80,
- 18, 15, 50, 50, 2,
+ 131, 0, 0, 19, 11, 0, 16, 43, 2, 3,
+ 10, 7, 2, 0, 4, 5, 35, 10, 43, 4,
+ 54, 2, 15, 32, 38, 38, 143, 40, 143, 80,
+ 7, 9, 50, 50, 2,
/* T15 Object */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0,
@@ -1603,13 +1679,13 @@
/* T42 Object */
0, 0, 0, 0, 0, 0, 0, 0,
/* T46 Object */
- 0, 3, 16, 48, 0, 0, 1, 0, 0,
+ 0, 3, 8, 16, 0, 0, 1, 0, 0,
/* T47 Object */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* T48 Object */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 8, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 100, 4, 64,
+ 0, 0, 5, 42, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0,
@@ -2260,6 +2336,8 @@
platform_device_register(&msm8930_device_rpm_regulator);
msm_clock_init(&msm8930_clock_init_data);
msm8960_device_otg.dev.platform_data = &msm_otg_pdata;
+ android_usb_pdata.swfi_latency =
+ msm_rpmrs_levels[0].latency_us;
msm8930_init_gpiomux();
msm8960_device_qup_spi_gsbi1.dev.platform_data =
&msm8960_qup_spi_gsbi1_pdata;
diff --git a/arch/arm/mach-msm/board-8960-camera.c b/arch/arm/mach-msm/board-8960-camera.c
index 371bb53..ad9b03d 100644
--- a/arch/arm/mach-msm/board-8960-camera.c
+++ b/arch/arm/mach-msm/board-8960-camera.c
@@ -197,6 +197,7 @@
.flash_sr_type = MSM_CAMERA_FLASH_SRC_EXT,
._fsrc.ext_driver_src.led_en = VFE_CAMIF_TIMER1_GPIO,
._fsrc.ext_driver_src.led_flash_en = VFE_CAMIF_TIMER2_GPIO,
+ ._fsrc.ext_driver_src.flash_id = MAM_CAMERA_EXT_LED_FLASH_SC628A,
};
#endif
diff --git a/arch/arm/mach-msm/board-8960-gpiomux.c b/arch/arm/mach-msm/board-8960-gpiomux.c
index 978eb09..1c6c600 100644
--- a/arch/arm/mach-msm/board-8960-gpiomux.c
+++ b/arch/arm/mach-msm/board-8960-gpiomux.c
@@ -55,6 +55,21 @@
.pull = GPIOMUX_PULL_NONE,
};
+static struct gpiomux_setting external_vfr[] = {
+ /* Suspended state */
+ {
+ .func = GPIOMUX_FUNC_3,
+ .drv = GPIOMUX_DRV_2MA,
+ .pull = GPIOMUX_PULL_KEEPER,
+ },
+ /* Active state */
+ {
+ .func = GPIOMUX_FUNC_3,
+ .drv = GPIOMUX_DRV_2MA,
+ .pull = GPIOMUX_PULL_KEEPER,
+ },
+};
+
static struct gpiomux_setting gsbi_uart = {
.func = GPIOMUX_FUNC_1,
.drv = GPIOMUX_DRV_8MA,
@@ -424,6 +439,16 @@
},
};
+static struct msm_gpiomux_config msm8960_external_vfr_configs[] __initdata = {
+ {
+ .gpio = 23, /* EXTERNAL VFR */
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &external_vfr[0],
+ [GPIOMUX_ACTIVE] = &external_vfr[1],
+ },
+ },
+};
+
static struct msm_gpiomux_config msm8960_gsbi8_uart_configs[] __initdata = {
{
.gpio = 34, /* GSBI8 UART3 */
@@ -953,15 +978,19 @@
else
msm_gpiomux_install(msm8960_gsbi5_uart_configs,
ARRAY_SIZE(msm8960_gsbi5_uart_configs));
- /* For 8960 Fusion 2.2 Primary IPC */
- if (socinfo_get_platform_subtype() == PLATFORM_SUBTYPE_SGLTE)
+
+ if (socinfo_get_platform_subtype() == PLATFORM_SUBTYPE_SGLTE) {
+ /* For 8960 Fusion 2.2 Primary IPC */
msm_gpiomux_install(msm8960_fusion_gsbi_configs,
ARRAY_SIZE(msm8960_fusion_gsbi_configs));
+ /* For SGLTE 8960 Fusion External VFR */
+ msm_gpiomux_install(msm8960_external_vfr_configs,
+ ARRAY_SIZE(msm8960_external_vfr_configs));
+ }
#ifdef CONFIG_MMC_MSM_SDC2_SUPPORT
msm_gpiomux_install(msm8960_sdcc2_configs,
ARRAY_SIZE(msm8960_sdcc2_configs));
#endif
-
return 0;
}
diff --git a/arch/arm/mach-msm/board-8960-pmic.c b/arch/arm/mach-msm/board-8960-pmic.c
index c9a5f77..ea1ab58 100644
--- a/arch/arm/mach-msm/board-8960-pmic.c
+++ b/arch/arm/mach-msm/board-8960-pmic.c
@@ -471,7 +471,7 @@
{
.name = "led:blue",
.flags = PM8XXX_ID_LED_2,
- .default_trigger = "dc-online",
+ .default_trigger = "notification",
},
};
diff --git a/arch/arm/mach-msm/board-8960-regulator.c b/arch/arm/mach-msm/board-8960-regulator.c
index 3923ecf..edb6f03 100644
--- a/arch/arm/mach-msm/board-8960-regulator.c
+++ b/arch/arm/mach-msm/board-8960-regulator.c
@@ -497,11 +497,11 @@
* ID name always_on pd min_uV max_uV en_t supply
* system_uA reg_ID
*/
- PM8XXX_NLDO1200(L26, "8921_l26", 0, 1, 1050000, 1050000, 200, "8921_s7",
+ PM8XXX_NLDO1200(L26, "8921_l26", 0, 1, 375000, 1050000, 200, "8921_s7",
0, 1),
- PM8XXX_NLDO1200(L27, "8921_l27", 0, 1, 1050000, 1050000, 200, "8921_s7",
+ PM8XXX_NLDO1200(L27, "8921_l27", 0, 1, 375000, 1050000, 200, "8921_s7",
0, 2),
- PM8XXX_NLDO1200(L28, "8921_l28", 0, 1, 1050000, 1050000, 200, "8921_s7",
+ PM8XXX_NLDO1200(L28, "8921_l28", 0, 1, 375000, 1050000, 200, "8921_s7",
0, 3),
PM8XXX_LDO(L29, "8921_l29", 0, 1, 2050000, 2100000, 200, "8921_s8",
0, 4),
diff --git a/arch/arm/mach-msm/board-8960-storage.c b/arch/arm/mach-msm/board-8960-storage.c
index df1d846..10a6903 100644
--- a/arch/arm/mach-msm/board-8960-storage.c
+++ b/arch/arm/mach-msm/board-8960-storage.c
@@ -17,12 +17,12 @@
#include <linux/bootmem.h>
#include <asm/mach-types.h>
#include <asm/mach/mmc.h>
-#include <mach/msm_bus_board.h>
#include <mach/board.h>
#include <mach/gpio.h>
#include <mach/gpiomux.h>
#include "devices.h"
#include "board-8960.h"
+#include "board-storage-common-a.h"
/* MSM8960 has 5 SDCC controllers */
enum sdcc_controllers {
@@ -299,6 +299,7 @@
.vreg_data = &mmc_slot_vreg_data[SDCC1],
.pin_data = &mmc_slot_pin_data[SDCC1],
.mpm_sdiowakeup_int = MSM_MPM_PIN_SDC1_DAT1,
+ .msm_bus_voting_data = &sps_to_ddr_bus_voting_data,
};
#endif
@@ -316,6 +317,7 @@
.vreg_data = &mmc_slot_vreg_data[SDCC2],
.pin_data = &mmc_slot_pin_data[SDCC2],
.sdiowakeup_irq = MSM_GPIO_TO_INT(90),
+ .msm_bus_voting_data = &sps_to_ddr_bus_voting_data,
};
#endif
@@ -342,6 +344,7 @@
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_DDR50 |
MMC_CAP_UHS_SDR104 | MMC_CAP_MAX_CURRENT_600),
.mpm_sdiowakeup_int = MSM_MPM_PIN_SDC3_DAT1,
+ .msm_bus_voting_data = &sps_to_ddr_bus_voting_data,
};
#endif
@@ -359,6 +362,7 @@
.vreg_data = &mmc_slot_vreg_data[SDCC4],
.pin_data = &mmc_slot_pin_data[SDCC4],
.sdiowakeup_irq = MSM_GPIO_TO_INT(85),
+ .msm_bus_voting_data = &sps_to_ddr_bus_voting_data,
};
#endif
diff --git a/arch/arm/mach-msm/board-9615-gpiomux.c b/arch/arm/mach-msm/board-9615-gpiomux.c
index 0e18918..624cf5e 100644
--- a/arch/arm/mach-msm/board-9615-gpiomux.c
+++ b/arch/arm/mach-msm/board-9615-gpiomux.c
@@ -105,6 +105,20 @@
};
#endif
+static struct gpiomux_setting wlan_active_config = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_2MA,
+ .pull = GPIOMUX_PULL_NONE,
+ .dir = GPIOMUX_OUT_LOW,
+};
+
+static struct gpiomux_setting wlan_suspend_config = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_2MA,
+ .pull = GPIOMUX_PULL_NONE,
+ .dir = GPIOMUX_IN,
+};
+
static struct msm_gpiomux_config msm9615_audio_codec_configs[] __initdata = {
{
.gpio = 24,
@@ -251,6 +265,7 @@
.gpio = 16, /* GSBI5 I2C QUP SCL */
.settings = {
[GPIOMUX_SUSPENDED] = &gsbi5,
+ [GPIOMUX_ACTIVE] = &gsbi5,
},
},
{
@@ -306,6 +321,17 @@
};
#endif
+static struct msm_gpiomux_config msm9615_wlan_configs[] __initdata = {
+ {
+ .gpio = 21,/* WLAN_RESET_N */
+ .settings = {
+ [GPIOMUX_ACTIVE] = &wlan_active_config,
+ [GPIOMUX_SUSPENDED] = &wlan_suspend_config,
+ },
+ },
+};
+
+
int __init msm9615_init_gpiomux(void)
{
int rc;
@@ -332,6 +358,9 @@
msm_gpiomux_install(msm9615_audio_codec_configs,
ARRAY_SIZE(msm9615_audio_codec_configs));
+ msm_gpiomux_install(msm9615_wlan_configs,
+ ARRAY_SIZE(msm9615_wlan_configs));
+
#ifdef CONFIG_FB_MSM_EBI2
msm_gpiomux_install(msm9615_ebi2_lcdc_configs,
ARRAY_SIZE(msm9615_ebi2_lcdc_configs));
diff --git a/arch/arm/mach-msm/board-9615-regulator.c b/arch/arm/mach-msm/board-9615-regulator.c
index 8328501..0ece37c 100644
--- a/arch/arm/mach-msm/board-9615-regulator.c
+++ b/arch/arm/mach-msm/board-9615-regulator.c
@@ -79,6 +79,10 @@
REGULATOR_SUPPLY("CDC_VDDA_A_1P2V", "tabla2x-slim"),
REGULATOR_SUPPLY("VDDD_CDC_D", "tabla-slim"),
REGULATOR_SUPPLY("VDDD_CDC_D", "tabla2x-slim"),
+ REGULATOR_SUPPLY("VDDD_CDC_D", "0-000d"),
+ REGULATOR_SUPPLY("CDC_VDDA_A_1P2V", "0-000d"),
+ REGULATOR_SUPPLY("VDDD_CDC_D", "tabla top level"),
+ REGULATOR_SUPPLY("CDC_VDDA_A_1P2V", "tabla top level"),
};
VREG_CONSUMERS(S3) = {
REGULATOR_SUPPLY("8018_s3", NULL),
@@ -91,6 +95,14 @@
REGULATOR_SUPPLY("CDC_VDDA_TX", "tabla2x-slim"),
REGULATOR_SUPPLY("VDDIO_CDC", "tabla-slim"),
REGULATOR_SUPPLY("VDDIO_CDC", "tabla2x-slim"),
+ REGULATOR_SUPPLY("VDDIO_CDC", "tabla top level"),
+ REGULATOR_SUPPLY("CDC_VDD_CP", "tabla top level"),
+ REGULATOR_SUPPLY("CDC_VDDA_TX", "tabla top level"),
+ REGULATOR_SUPPLY("CDC_VDDA_RX", "tabla top level"),
+ REGULATOR_SUPPLY("VDDIO_CDC", "0-000d"),
+ REGULATOR_SUPPLY("CDC_VDD_CP", "0-000d"),
+ REGULATOR_SUPPLY("CDC_VDDA_TX", "0-000d"),
+ REGULATOR_SUPPLY("CDC_VDDA_RX", "0-000d"),
};
VREG_CONSUMERS(S4) = {
REGULATOR_SUPPLY("8018_s4", NULL),
diff --git a/arch/arm/mach-msm/board-9615-storage.c b/arch/arm/mach-msm/board-9615-storage.c
index 7580cc3..5bdeb94 100644
--- a/arch/arm/mach-msm/board-9615-storage.c
+++ b/arch/arm/mach-msm/board-9615-storage.c
@@ -22,6 +22,7 @@
#include "devices.h"
#include "board-9615.h"
+#include "board-storage-common-a.h"
#if (defined(CONFIG_MMC_MSM_SDC1_SUPPORT) \
|| defined(CONFIG_MMC_MSM_SDC2_SUPPORT))
@@ -187,6 +188,7 @@
.uhs_caps = (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
MMC_CAP_MAX_CURRENT_400),
.mpm_sdiowakeup_int = MSM_MPM_PIN_SDC1_DAT1,
+ .msm_bus_voting_data = &sps_to_ddr_bus_voting_data,
};
static struct mmc_platform_data *msm9615_sdc1_pdata = &sdc1_data;
#else
@@ -206,6 +208,7 @@
.pclk_src_dfab = 1,
.pin_data = &mmc_slot_pin_data[SDCC2],
.sdiowakeup_irq = MSM_GPIO_TO_INT(GPIO_SDC2_DAT1_WAKEUP),
+ .msm_bus_voting_data = &sps_to_ddr_bus_voting_data,
};
static struct mmc_platform_data *msm9615_sdc2_pdata = &sdc2_data;
#else
diff --git a/arch/arm/mach-msm/board-9615.c b/arch/arm/mach-msm/board-9615.c
index a8602d3..67697d2 100644
--- a/arch/arm/mach-msm/board-9615.c
+++ b/arch/arm/mach-msm/board-9615.c
@@ -51,6 +51,7 @@
#include "pm.h"
#include "acpuclock.h"
#include "pm-boot.h"
+#include <mach/gpiomux.h>
#ifdef CONFIG_ION_MSM
#define MSM_ION_AUDIO_SIZE 0xAF000
@@ -126,6 +127,103 @@
};
#endif
+struct pm8xxx_gpio_init {
+ unsigned gpio;
+ struct pm_gpio config;
+};
+
+struct pm8xxx_mpp_init {
+ unsigned mpp;
+ struct pm8xxx_mpp_config_data config;
+};
+
+#define PM8018_GPIO_INIT(_gpio, _dir, _buf, _val, _pull, _vin, _out_strength, \
+ _func, _inv, _disable) \
+{ \
+ .gpio = PM8018_GPIO_PM_TO_SYS(_gpio), \
+ .config = { \
+ .direction = _dir, \
+ .output_buffer = _buf, \
+ .output_value = _val, \
+ .pull = _pull, \
+ .vin_sel = _vin, \
+ .out_strength = _out_strength, \
+ .function = _func, \
+ .inv_int_pol = _inv, \
+ .disable_pin = _disable, \
+ } \
+}
+
+#define PM8018_MPP_INIT(_mpp, _type, _level, _control) \
+{ \
+ .mpp = PM8018_MPP_PM_TO_SYS(_mpp), \
+ .config = { \
+ .type = PM8XXX_MPP_TYPE_##_type, \
+ .level = _level, \
+ .control = PM8XXX_MPP_##_control, \
+ } \
+}
+
+#define PM8018_GPIO_DISABLE(_gpio) \
+ PM8018_GPIO_INIT(_gpio, PM_GPIO_DIR_IN, 0, 0, 0, PM8018_GPIO_VIN_S3, \
+ 0, 0, 0, 1)
+
+#define PM8018_GPIO_OUTPUT(_gpio, _val, _strength) \
+ PM8018_GPIO_INIT(_gpio, PM_GPIO_DIR_OUT, PM_GPIO_OUT_BUF_CMOS, _val, \
+ PM_GPIO_PULL_NO, PM8018_GPIO_VIN_S3, \
+ PM_GPIO_STRENGTH_##_strength, \
+ PM_GPIO_FUNC_NORMAL, 0, 0)
+
+#define PM8018_GPIO_INPUT(_gpio, _pull) \
+ PM8018_GPIO_INIT(_gpio, PM_GPIO_DIR_IN, PM_GPIO_OUT_BUF_CMOS, 0, \
+ _pull, PM8018_GPIO_VIN_S3, \
+ PM_GPIO_STRENGTH_NO, \
+ PM_GPIO_FUNC_NORMAL, 0, 0)
+
+#define PM8018_GPIO_OUTPUT_FUNC(_gpio, _val, _func) \
+ PM8018_GPIO_INIT(_gpio, PM_GPIO_DIR_OUT, PM_GPIO_OUT_BUF_CMOS, _val, \
+ PM_GPIO_PULL_NO, PM8018_GPIO_VIN_S3, \
+ PM_GPIO_STRENGTH_HIGH, \
+ _func, 0, 0)
+
+#define PM8018_GPIO_OUTPUT_VIN(_gpio, _val, _vin) \
+ PM8018_GPIO_INIT(_gpio, PM_GPIO_DIR_OUT, PM_GPIO_OUT_BUF_CMOS, _val, \
+ PM_GPIO_PULL_NO, _vin, \
+ PM_GPIO_STRENGTH_HIGH, \
+ PM_GPIO_FUNC_NORMAL, 0, 0)
+
+/* Initial PM8018 GPIO configurations */
+static struct pm8xxx_gpio_init pm8018_gpios[] __initdata = {
+ PM8018_GPIO_OUTPUT(2, 0, HIGH) /* EXT_LDO_EN_WLAN */
+};
+
+/* Initial PM8018 MPP configurations */
+static struct pm8xxx_mpp_init pm8018_mpps[] __initdata = {
+};
+
+void __init msm9615_pm8xxx_gpio_mpp_init(void)
+{
+ int i, rc;
+
+ for (i = 0; i < ARRAY_SIZE(pm8018_gpios); i++) {
+ rc = pm8xxx_gpio_config(pm8018_gpios[i].gpio,
+ &pm8018_gpios[i].config);
+ if (rc) {
+ pr_err("%s: pm8018_gpio_config: rc=%d\n", __func__, rc);
+ break;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pm8018_mpps); i++) {
+ rc = pm8xxx_mpp_config(pm8018_mpps[i].mpp,
+ &pm8018_mpps[i].config);
+ if (rc) {
+ pr_err("%s: pm8018_mpp_config: rc=%d\n", __func__, rc);
+ break;
+ }
+ }
+}
+
static struct pm8xxx_adc_amux pm8018_adc_channels_data[] = {
{"vcoin", CHANNEL_VCOIN, CHAN_PATH_SCALING2, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_DEFAULT},
@@ -295,6 +393,95 @@
#define TABLA_INTERRUPT_BASE (NR_MSM_IRQS + NR_GPIO_IRQS)
+/*
+ * MDM9x15 I2S.
+ */
+static struct wcd9xxx_pdata wcd9xxx_i2c_platform_data = {
+ .irq = MSM_GPIO_TO_INT(85),
+ .irq_base = TABLA_INTERRUPT_BASE,
+ .num_irqs = NR_TABLA_IRQS,
+ .reset_gpio = 84,
+ .micbias = {
+ .ldoh_v = TABLA_LDOH_2P85_V,
+ .cfilt1_mv = 1800,
+ .cfilt2_mv = 1800,
+ .cfilt3_mv = 1800,
+ .bias1_cfilt_sel = TABLA_CFILT1_SEL,
+ .bias2_cfilt_sel = TABLA_CFILT2_SEL,
+ .bias3_cfilt_sel = TABLA_CFILT3_SEL,
+ .bias4_cfilt_sel = TABLA_CFILT3_SEL,
+ },
+ .regulator = {
+ {
+ .name = "CDC_VDD_CP",
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .optimum_uA = WCD9XXX_CDC_VDDA_CP_CUR_MAX,
+ },
+ {
+ .name = "CDC_VDDA_RX",
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .optimum_uA = WCD9XXX_CDC_VDDA_RX_CUR_MAX,
+ },
+ {
+ .name = "CDC_VDDA_TX",
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .optimum_uA = WCD9XXX_CDC_VDDA_TX_CUR_MAX,
+ },
+ {
+ .name = "VDDIO_CDC",
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .optimum_uA = WCD9XXX_VDDIO_CDC_CUR_MAX,
+ },
+ {
+ .name = "VDDD_CDC_D",
+ .min_uV = 1225000,
+ .max_uV = 1225000,
+ .optimum_uA = WCD9XXX_VDDD_CDC_D_CUR_MAX,
+ },
+ {
+ .name = "CDC_VDDA_A_1P2V",
+ .min_uV = 1225000,
+ .max_uV = 1225000,
+ .optimum_uA = WCD9XXX_VDDD_CDC_A_CUR_MAX,
+ }
+ },
+};
+
+static struct i2c_board_info wcd9xxx_device_info[] __initdata = {
+ {
+ I2C_BOARD_INFO("tabla top level", TABLA_I2C_SLAVE_ADDR),
+ .platform_data = &wcd9xxx_i2c_platform_data,
+ },
+ {
+ I2C_BOARD_INFO("tabla analog", TABLA_ANALOG_I2C_SLAVE_ADDR),
+ .platform_data = &wcd9xxx_i2c_platform_data,
+ },
+ {
+ I2C_BOARD_INFO("tabla digital1", TABLA_DIGITAL1_I2C_SLAVE_ADDR),
+ .platform_data = &wcd9xxx_i2c_platform_data,
+ },
+ {
+ I2C_BOARD_INFO("tabla digital2", TABLA_DIGITAL2_I2C_SLAVE_ADDR),
+ .platform_data = &wcd9xxx_i2c_platform_data,
+ },
+};
+
+static struct i2c_registry msm9615_i2c_devices[] __initdata = {
+ {
+ I2C_SURF | I2C_FFA | I2C_FLUID,
+ MSM_9615_GSBI5_QUP_I2C_BUS_ID,
+ wcd9xxx_device_info,
+ ARRAY_SIZE(wcd9xxx_device_info),
+ },
+};
+/*
+ * MDM9x15 I2S.
+ */
+
/* Micbias setting is based on 8660 CDP/MTP/FLUID requirement
* 4 micbiases are used to power various analog and digital
* microphones operating at 1800 mV. Technically, all micbiases
@@ -310,7 +497,7 @@
.name = "tabla-slave",
.e_addr = {0, 0, 0x60, 0, 0x17, 2},
},
- .irq = 85,
+ .irq = MSM_GPIO_TO_INT(85),
.irq_base = TABLA_INTERRUPT_BASE,
.num_irqs = NR_WCD9XXX_IRQS,
.reset_gpio = 84,
@@ -567,6 +754,7 @@
.phy_type = SNPS_28NM_INTEGRATED_PHY,
.vbus_power = msm_hsusb_vbus_power,
.disable_reset_on_disconnect = true,
+ .enable_lpm_on_dev_suspend = true,
};
static struct msm_hsic_peripheral_platform_data msm_hsic_peripheral_pdata = {
@@ -700,6 +888,8 @@
&msm_stub_codec,
&msm_voice,
&msm_voip,
+ &msm_i2s_cpudai0,
+ &msm_i2s_cpudai1,
&msm_pcm_hostless,
&msm_cpudai_afe_01_rx,
&msm_cpudai_afe_01_tx,
@@ -728,8 +918,27 @@
static void __init msm9615_i2c_init(void)
{
+ u8 mach_mask = 0;
+ int i;
+ /* Mask is hardcoded to SURF (CDP).
+ * works on MTP with same configuration.
+ */
+ mach_mask = I2C_SURF;
+ if (machine_is_msm9615_cdp())
+ mach_mask = I2C_SURF;
+ else if (machine_is_msm9615_mtp())
+ mach_mask = I2C_FFA;
+ else
+ pr_err("unmatched machine ID in register_i2c_devices\n");
msm9615_device_qup_i2c_gsbi5.dev.platform_data =
&msm9615_i2c_qup_gsbi5_pdata;
+ for (i = 0; i < ARRAY_SIZE(msm9615_i2c_devices); ++i) {
+ if (msm9615_i2c_devices[i].machs & mach_mask) {
+ i2c_register_board_info(msm9615_i2c_devices[i].bus,
+ msm9615_i2c_devices[i].info,
+ msm9615_i2c_devices[i].len);
+ }
+ }
}
static void __init msm9615_reserve(void)
@@ -765,7 +974,7 @@
&msm_hsic_peripheral_pdata;
msm_device_usb_bam.dev.platform_data = &msm_usb_bam_pdata;
platform_add_devices(common_devices, ARRAY_SIZE(common_devices));
-
+ msm9615_pm8xxx_gpio_mpp_init();
acpuclk_init(&acpuclk_9615_soc_data);
/* Ensure ar6000pm device is registered before MMC/SDC */
diff --git a/arch/arm/mach-msm/board-9615.h b/arch/arm/mach-msm/board-9615.h
index 7dd003f..80656b3 100644
--- a/arch/arm/mach-msm/board-9615.h
+++ b/arch/arm/mach-msm/board-9615.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -17,6 +17,34 @@
#include <linux/mfd/pm8xxx/pm8018.h>
#include <linux/regulator/gpio-regulator.h>
+/*
+ * MDM9x15 I2S.
+ */
+#ifdef CONFIG_I2C
+#define I2C_SURF 1
+#define I2C_FFA (1 << 1)
+#define I2C_RUMI (1 << 2)
+#define I2C_SIM (1 << 3)
+#define I2C_FLUID (1 << 4)
+#define I2C_LIQUID (1 << 5)
+
+struct i2c_registry {
+ u8 machs;
+ int bus;
+ struct i2c_board_info *info;
+ int len;
+};
+#endif
+/* Tabla slave address for I2C */
+#define TABLA_I2C_SLAVE_ADDR 0x0d
+#define TABLA_ANALOG_I2C_SLAVE_ADDR 0x77
+#define TABLA_DIGITAL1_I2C_SLAVE_ADDR 0x66
+#define TABLA_DIGITAL2_I2C_SLAVE_ADDR 0x55
+#define MSM_9615_GSBI5_QUP_I2C_BUS_ID 0
+/*
+ * MDM9x15 I2S.
+ */
+
/* Macros assume PMIC GPIOs and MPPs start at 1 */
#define PM8018_GPIO_BASE NR_GPIO_IRQS
#define PM8018_GPIO_PM_TO_SYS(pm_gpio) (pm_gpio - 1 + PM8018_GPIO_BASE)
@@ -36,7 +64,7 @@
#define GPIO_VREG_ID_EXT_2P95V 0
extern struct gpio_regulator_platform_data msm_gpio_regulator_pdata[];
-
+uint32_t msm9615_rpm_get_swfi_latency(void);
int msm9615_init_gpiomux(void);
void msm9615_init_mmc(void);
void mdm9615_allocate_fb_region(void);
diff --git a/arch/arm/mach-msm/board-copper.c b/arch/arm/mach-msm/board-copper.c
index eff5490..843d603 100644
--- a/arch/arm/mach-msm/board-copper.c
+++ b/arch/arm/mach-msm/board-copper.c
@@ -39,6 +39,8 @@
#endif
#include <mach/msm_memtypes.h>
#include <mach/msm_smd.h>
+#include <mach/rpm-smd.h>
+#include <mach/rpm-regulator-smd.h>
#include <mach/qpnp-int.h>
#include <mach/socinfo.h>
#include "clock.h"
@@ -51,7 +53,7 @@
#else
#define MSM_ION_SF_SIZE 0x2800000 /* 40 Mbytes */
#endif
-#define MSM_ION_MM_FW_SIZE 0x200000 /* (2MB) */
+#define MSM_ION_MM_FW_SIZE 0xa00000 /* (10MB) */
#define MSM_ION_MM_SIZE 0x7800000 /* (120MB) */
#define MSM_ION_QSECOM_SIZE 0x100000 /* (1MB) */
#define MSM_ION_MFC_SIZE SZ_8K
@@ -390,6 +392,40 @@
.id = -1,
};
+#define SHARED_IMEM_TZ_BASE 0xFE805720
+static struct resource tzlog_resources[] = {
+ {
+ .start = SHARED_IMEM_TZ_BASE,
+ .end = SHARED_IMEM_TZ_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+struct platform_device apq_device_tz_log = {
+ .name = "tz_log",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(tzlog_resources),
+ .resource = tzlog_resources,
+};
+
+#ifdef CONFIG_HW_RANDOM_MSM
+/* PRNG device */
+#define MSM_PRNG_PHYS 0xF9BFF000
+static struct resource rng_resources = {
+ .flags = IORESOURCE_MEM,
+ .start = MSM_PRNG_PHYS,
+ .end = MSM_PRNG_PHYS + SZ_512 - 1,
+};
+
+struct platform_device msm8974_device_rng = {
+ .name = "msm_rng",
+ .id = 0,
+ .num_resources = 1,
+ .resource = &rng_resources,
+};
+#endif
+
+
void __init msm_copper_add_devices(void)
{
#ifdef CONFIG_ION_MSM
@@ -399,6 +435,10 @@
platform_device_register(&android_usb_device);
platform_add_devices(msm_copper_stub_regulator_devices,
msm_copper_stub_regulator_devices_len);
+ platform_device_register(&apq_device_tz_log);
+#ifdef CONFIG_HW_RANDOM_MSM
+ platform_device_register(&msm8974_device_rng);
+#endif
}
/*
@@ -409,6 +449,9 @@
*/
void __init msm_copper_add_drivers(void)
{
+ msm_smd_init();
+ msm_rpm_driver_init();
+ rpm_regulator_smd_driver_init();
msm_spm_device_init();
regulator_stub_init();
}
@@ -462,10 +505,14 @@
"spi_qsd.1", NULL),
OF_DEV_AUXDATA("qcom,spmi-pmic-arb", 0xFC4C0000, \
"spmi-pmic-arb.0", NULL),
- OF_DEV_AUXDATA("qcom,msm-sdcc", 0xF980B000, \
+ OF_DEV_AUXDATA("qcom,msm-sdcc", 0xF9824000, \
"msm_sdcc.1", NULL),
- OF_DEV_AUXDATA("qcom,msm-sdcc", 0xF984B000, \
+ OF_DEV_AUXDATA("qcom,msm-sdcc", 0xF98A4000, \
+ "msm_sdcc.2", NULL),
+ OF_DEV_AUXDATA("qcom,msm-sdcc", 0xF9864000, \
"msm_sdcc.3", NULL),
+ OF_DEV_AUXDATA("qcom,msm-sdcc", 0xF98E4000, \
+ "msm_sdcc.4", NULL),
OF_DEV_AUXDATA("qcom,pil-q6v5-lpass", 0xFE200000, \
"pil-q6v5-lpass", NULL),
OF_DEV_AUXDATA("qcom,pil-pronto", 0xFB21B000, \
diff --git a/arch/arm/mach-msm/board-msm7627a-camera.c b/arch/arm/mach-msm/board-msm7627a-camera.c
index bd89e34..251c97a 100644
--- a/arch/arm/mach-msm/board-msm7627a-camera.c
+++ b/arch/arm/mach-msm/board-msm7627a-camera.c
@@ -29,6 +29,9 @@
#define GPIO_SKU3_CAM_5MP_SHDN_N 5 /* PWDN */
#define GPIO_SKU3_CAM_5MP_CAMIF_RESET 6 /* (board_is(EVT))?123:121 RESET */
#define GPIO_SKU3_CAM_5MP_CAM_DRIVER_PWDN 30
+#define GPIO_SKU7_CAM_VGA_SHDN 91
+#define GPIO_SKU7_CAM_5MP_SHDN_N 93 /* PWDN */
+#define GPIO_SKU7_CAM_5MP_CAMIF_RESET 23 /* (board_is(EVT))?123:121 RESET */
#ifdef CONFIG_MSM_CAMERA_V4L2
static uint32_t camera_off_gpio_table[] = {
@@ -72,7 +75,7 @@
.gpio_no_mux = 1,
};
-#ifdef CONFIG_WEBCAM_OV7692_QRD
+#ifdef CONFIG_OV7692
static struct gpio ov7692_cam_req_gpio[] = {
{GPIO_SKU1_CAM_VGA_SHDN, GPIOF_DIR_OUT, "CAM_VGA_SHDN"},
{GPIO_SKU1_CAM_VGA_RESET_N, GPIOF_DIR_OUT, "CAM_VGA_RESET"},
@@ -83,8 +86,6 @@
{GPIO_SKU1_CAM_VGA_SHDN, GPIOF_OUT_INIT_LOW, 5000},
{GPIO_SKU1_CAM_VGA_RESET_N, GPIOF_OUT_INIT_HIGH, 5000},
{GPIO_SKU1_CAM_VGA_RESET_N, GPIOF_OUT_INIT_LOW, 5000},
- {40, GPIOF_OUT_INIT_HIGH, 5000},
- {35, GPIOF_OUT_INIT_HIGH, 5000},
};
static struct msm_camera_gpio_conf gpio_conf_ov7692 = {
@@ -193,7 +194,7 @@
};
#endif
-#ifdef CONFIG_WEBCAM_OV7692_QRD
+#ifdef CONFIG_OV7692
static struct msm_camera_sensor_platform_info sensor_board_info_ov7692 = {
.mount_angle = 90,
.cam_vreg = msm_cam_vreg,
@@ -209,6 +210,7 @@
.sensor_name = "ov7692",
.sensor_reset_enable = 0,
.pmic_gpio_enable = 1,
+ .sensor_lcd_gpio_onoff = lcd_camera_power_onoff,
.sensor_reset = GPIO_SKU1_CAM_VGA_RESET_N,
.sensor_pwd = GPIO_SKU1_CAM_VGA_SHDN,
.pdata = &msm_camera_device_data_csi0[0],
@@ -252,6 +254,7 @@
.sensor_name = "ov5647",
.sensor_reset_enable = 1,
.pmic_gpio_enable = 1,
+ .sensor_lcd_gpio_onoff = lcd_camera_power_onoff,
.sensor_reset = GPIO_SKU3_CAM_5MP_CAMIF_RESET,
.sensor_pwd = GPIO_SKU3_CAM_5MP_SHDN_N,
.pdata = &msm_camera_device_data_csi1[0],
@@ -339,14 +342,17 @@
}
platform_device_register(&msm_camera_server);
if (machine_is_msm8625_surf() || machine_is_msm8625_evb()
- || machine_is_msm8625_evt()) {
+ || machine_is_msm8625_evt()
+ || machine_is_msm8625_qrd7()) {
platform_device_register(&msm8625_device_csic0);
platform_device_register(&msm8625_device_csic1);
} else {
platform_device_register(&msm7x27a_device_csic0);
platform_device_register(&msm7x27a_device_csic1);
}
- if (machine_is_msm8625_evb() || machine_is_msm8625_evt())
+ if (machine_is_msm8625_evb()
+ || machine_is_msm8625_evt()
+ || machine_is_msm8625_qrd7())
*(int *) msm7x27a_device_clkctl.dev.platform_data = 1;
platform_device_register(&msm7x27a_device_clkctl);
platform_device_register(&msm7x27a_device_vfe);
@@ -462,39 +468,45 @@
{
int rc = 0;
- rc = gpio_request(GPIO_SKU3_CAM_5MP_SHDN_N, "ov5647");
+ rc = gpio_request(msm_camera_sensor_ov5647_data.sensor_pwd, "ov5647");
if (rc < 0)
- pr_err("%s: gpio_request GPIO_SKU3_CAM_5MP_SHDN_N failed!",
- __func__);
+ pr_err("%s: gpio_request OV5647 sensor_pwd: %d failed!",
+ __func__, msm_camera_sensor_ov5647_data.sensor_pwd);
- pr_debug("gpio_tlmm_config %d\r\n", GPIO_SKU3_CAM_5MP_SHDN_N);
- rc = gpio_tlmm_config(GPIO_CFG(GPIO_SKU3_CAM_5MP_SHDN_N, 0,
- GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN,
- GPIO_CFG_2MA), GPIO_CFG_ENABLE);
+ rc = gpio_tlmm_config(GPIO_CFG(msm_camera_sensor_ov5647_data.sensor_pwd,
+ 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN,
+ GPIO_CFG_2MA), GPIO_CFG_ENABLE);
if (rc < 0) {
pr_err("%s:unable to enable Powr Dwn gpio for main camera!\n",
__func__);
- gpio_free(GPIO_SKU3_CAM_5MP_SHDN_N);
+ gpio_free(msm_camera_sensor_ov5647_data.sensor_pwd);
}
- gpio_direction_output(GPIO_SKU3_CAM_5MP_SHDN_N, 1);
-
- rc = gpio_request(GPIO_SKU3_CAM_5MP_CAMIF_RESET, "ov5647");
+ rc = gpio_direction_output(msm_camera_sensor_ov5647_data.sensor_pwd, 1);
if (rc < 0)
- pr_err("%s: gpio_request GPIO_SKU3_CAM_5MP_CAMIF_RESET failed!",
- __func__);
+ pr_err("%s: unable to set gpio: %d direction for ov5647 camera\n",
+ __func__, msm_camera_sensor_ov5647_data.sensor_pwd);
- pr_debug("gpio_tlmm_config %d\r\n", GPIO_SKU3_CAM_5MP_CAMIF_RESET);
- rc = gpio_tlmm_config(GPIO_CFG(GPIO_SKU3_CAM_5MP_CAMIF_RESET, 0,
- GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN,
- GPIO_CFG_2MA), GPIO_CFG_ENABLE);
+ rc = gpio_request(msm_camera_sensor_ov5647_data.sensor_reset, "ov5647");
+ if (rc < 0)
+ pr_err("%s: gpio_request OV5647 sensor_reset: %d failed!",
+ __func__, msm_camera_sensor_ov5647_data.sensor_reset);
+
+ rc = gpio_tlmm_config(GPIO_CFG(
+ msm_camera_sensor_ov5647_data.sensor_reset,
+ 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN,
+ GPIO_CFG_2MA), GPIO_CFG_ENABLE);
if (rc < 0) {
pr_err("%s: unable to enable reset gpio for main camera!\n",
__func__);
- gpio_free(GPIO_SKU3_CAM_5MP_CAMIF_RESET);
+ gpio_free(msm_camera_sensor_ov5647_data.sensor_reset);
}
- gpio_direction_output(GPIO_SKU3_CAM_5MP_CAMIF_RESET, 1);
+ rc = gpio_direction_output(
+ msm_camera_sensor_ov5647_data.sensor_reset, 1);
+ if (rc < 0)
+ pr_err("%s: unable to set gpio: %d direction for ov5647 camera\n",
+ __func__, msm_camera_sensor_ov5647_data.sensor_reset);
}
@@ -976,6 +988,7 @@
#define LCD_CAMERA_LDO_2V8 35 /* SKU1&SKU3 2.8V LDO */
#define SKU3_LCD_CAMERA_LDO_1V8 40 /* SKU3 1.8V LDO */
+#define SKU7_LCD_CAMERA_LDO_1V8 58 /* SKU7 1.8V LDO */
static int lcd_camera_ldo_1v8 = SKU3_LCD_CAMERA_LDO_1V8;
@@ -985,7 +998,10 @@
pr_debug("lcd_camera_power_init\n");
- lcd_camera_ldo_1v8 = SKU3_LCD_CAMERA_LDO_1V8; /* SKU3 PVT */
+ if (machine_is_msm7627a_qrd3() || machine_is_msm8625_qrd7())
+ lcd_camera_ldo_1v8 = SKU7_LCD_CAMERA_LDO_1V8;
+ else
+ lcd_camera_ldo_1v8 = SKU3_LCD_CAMERA_LDO_1V8;
/* LDO_EXT2V8 */
if (gpio_request(LCD_CAMERA_LDO_2V8, "lcd_camera_ldo_2v8")) {
@@ -1077,9 +1093,26 @@
#endif
pr_debug("msm7627a_camera_init Entered\n");
+
+ if (machine_is_msm7627a_qrd3() || machine_is_msm8625_qrd7()) {
+ ov7692_cam_req_gpio[0].gpio =
+ GPIO_SKU7_CAM_VGA_SHDN;
+ ov7692_cam_gpio_set_tbl[0].gpio = GPIO_SKU7_CAM_VGA_SHDN;
+ ov7692_cam_gpio_set_tbl[1].gpio = GPIO_SKU7_CAM_VGA_SHDN;
+
+ msm_camera_sensor_ov5647_data.sensor_pwd =
+ GPIO_SKU7_CAM_5MP_SHDN_N;
+ msm_camera_sensor_ov5647_data.sensor_reset =
+ GPIO_SKU7_CAM_5MP_CAMIF_RESET;
+
+ }
+
/* LCD and camera power (VREG & LDO) init */
if (machine_is_msm7627a_evb() || machine_is_msm8625_evb()
- || machine_is_msm8625_evt()) {
+ || machine_is_msm8625_evt()
+ || machine_is_msm7627a_qrd3()
+ || machine_is_msm8625_qrd7()) {
+
lcd_camera_power_init();
evb_camera_gpio_cfg();
}
@@ -1089,8 +1122,11 @@
qrd1_camera_gpio_cfg();
platform_add_devices(camera_devices_qrd,
ARRAY_SIZE(camera_devices_qrd));
- } else if (machine_is_msm7627a_evb() || machine_is_msm8625_evb()
- || machine_is_msm8625_evt()) {
+ } else if (machine_is_msm7627a_evb()
+ || machine_is_msm8625_evb()
+ || machine_is_msm8625_evt()
+ || machine_is_msm7627a_qrd3()
+ || machine_is_msm8625_qrd7()) {
platform_add_devices(camera_devices_evb,
ARRAY_SIZE(camera_devices_evb));
} else if (machine_is_msm7627a_qrd3())
@@ -1101,7 +1137,9 @@
#endif
if (!machine_is_msm7627a_qrd1() || !machine_is_msm7627a_evb()
|| !machine_is_msm8625_evb()
- || !machine_is_msm8625_evt())
+ || !machine_is_msm8625_evt()
+ || !machine_is_msm7627a_qrd3()
+ || !machine_is_msm8625_qrd7())
register_i2c_devices();
#ifndef CONFIG_MSM_CAMERA_V4L2
rc = regulator_bulk_get(NULL, ARRAY_SIZE(regs_camera), regs_camera);
@@ -1127,8 +1165,11 @@
i2c_register_board_info(MSM_GSBI0_QUP_I2C_BUS_ID,
i2c_camera_devices_qrd,
ARRAY_SIZE(i2c_camera_devices_qrd));
- } else if (machine_is_msm7627a_evb() || machine_is_msm8625_evb()
- || machine_is_msm8625_evt()) {
+ } else if (machine_is_msm7627a_evb()
+ || machine_is_msm8625_evb()
+ || machine_is_msm8625_evt()
+ || machine_is_msm7627a_qrd3()
+ || machine_is_msm8625_qrd7()) {
pr_debug("machine_is_msm7627a_evb i2c_register_board_info\n");
i2c_register_board_info(MSM_GSBI0_QUP_I2C_BUS_ID,
i2c_camera_devices_evb,
diff --git a/arch/arm/mach-msm/board-msm7627a-display.c b/arch/arm/mach-msm/board-msm7627a-display.c
index 89b9378..f44cc9e 100644
--- a/arch/arm/mach-msm/board-msm7627a-display.c
+++ b/arch/arm/mach-msm/board-msm7627a-display.c
@@ -665,6 +665,14 @@
return 0;
}
+static int mipi_NT35510_rotate_panel(void)
+{
+ int rotate = 0;
+ if (machine_is_msm8625_evt())
+ rotate = 1;
+
+ return rotate;
+}
static struct msm_panel_common_pdata mipi_truly_pdata = {
.pmic_backlight = mipi_truly_set_bl,
@@ -680,6 +688,7 @@
static struct msm_panel_common_pdata mipi_NT35510_pdata = {
.pmic_backlight = evb_backlight_control,
+ .rotate_panel = mipi_NT35510_rotate_panel,
};
static struct platform_device mipi_dsi_NT35510_panel_device = {
diff --git a/arch/arm/mach-msm/board-msm7627a-io.c b/arch/arm/mach-msm/board-msm7627a-io.c
index 49945d0..23df1cf 100644
--- a/arch/arm/mach-msm/board-msm7627a-io.c
+++ b/arch/arm/mach-msm/board-msm7627a-io.c
@@ -125,6 +125,11 @@
KEY_VOLUMEDOWN,
};
+static const unsigned short keymap_8625_evt[] = {
+ KEY_VOLUMEDOWN,
+ KEY_VOLUMEUP,
+};
+
static struct gpio_event_matrix_info kp_matrix_info_8625 = {
.info.func = gpio_event_matrix_func,
.keymap = keymap_8625,
@@ -241,7 +246,7 @@
/* T6 Object */
0, 0, 0, 0, 0, 0,
/* T38 Object */
- 16, 0, 0, 0, 0, 0, 0, 0,
+ 16, 1, 0, 0, 0, 0, 0, 0,
/* T7 Object */
32, 16, 50,
/* T8 Object */
@@ -853,7 +858,11 @@
#endif
/* keypad */
- if (machine_is_msm7627a_evb() || machine_is_msm8625_evb())
+ if (machine_is_msm8625_evt())
+ kp_matrix_info_8625.keymap = keymap_8625_evt;
+
+ if (machine_is_msm7627a_evb() || machine_is_msm8625_evb() ||
+ machine_is_msm8625_evt())
platform_device_register(&kp_pdev_8625);
else if (machine_is_msm7627a_qrd3() || machine_is_msm8625_qrd7())
platform_device_register(&kp_pdev_sku3);
diff --git a/arch/arm/mach-msm/board-msm7627a-storage.c b/arch/arm/mach-msm/board-msm7627a-storage.c
index 93a430b..43937b8 100644
--- a/arch/arm/mach-msm/board-msm7627a-storage.c
+++ b/arch/arm/mach-msm/board-msm7627a-storage.c
@@ -152,7 +152,6 @@
{
if (machine_is_msm7627a_qrd1() || machine_is_msm7627a_evb()
|| machine_is_msm8625_evb()
- || machine_is_msm8625_evt()
|| machine_is_msm7627a_qrd3()
|| machine_is_msm8625_qrd7())
gpio_sdc1_hw_det = 42;
@@ -257,7 +256,6 @@
if (machine_is_msm7627a_qrd1() ||
machine_is_msm7627a_evb() ||
machine_is_msm8625_evb() ||
- machine_is_msm8625_evt() ||
machine_is_msm7627a_qrd3() ||
machine_is_msm8625_qrd7())
status = !gpio_get_value(gpio_sdc1_hw_det);
@@ -384,7 +382,14 @@
gpio_sdc1_config();
if (mmc_regulator_init(1, "mmc", 2850000))
return;
- sdc1_plat_data.status_irq = MSM_GPIO_TO_INT(gpio_sdc1_hw_det);
+#ifdef CONFIG_MMC_MSM_CARD_HW_DETECTION
+ /* 8x25 EVT do not use hw detector */
+ if (!(machine_is_msm8625_evt()))
+ sdc1_plat_data.status_irq = MSM_GPIO_TO_INT(gpio_sdc1_hw_det);
+ if (machine_is_msm8625_evt())
+ sdc1_plat_data.status = NULL;
+#endif
+
msm_add_sdcc(1, &sdc1_plat_data);
#endif
/* SDIO WLAN slot */
diff --git a/arch/arm/mach-msm/board-msm7627a.h b/arch/arm/mach-msm/board-msm7627a.h
index 413a28c..4357e01 100644
--- a/arch/arm/mach-msm/board-msm7627a.h
+++ b/arch/arm/mach-msm/board-msm7627a.h
@@ -103,6 +103,7 @@
#endif
void __init msm7627a_camera_init(void);
+int lcd_camera_power_onoff(int on);
void __init msm7627a_add_io_devices(void);
void __init qrd7627a_add_io_devices(void);
diff --git a/arch/arm/mach-msm/board-msm7x27a.c b/arch/arm/mach-msm/board-msm7x27a.c
index df4ca83..9ec618d 100644
--- a/arch/arm/mach-msm/board-msm7x27a.c
+++ b/arch/arm/mach-msm/board-msm7x27a.c
@@ -760,9 +760,9 @@
static void msm7x27a_cfg_uart2dm_serial(void) { }
#endif
-struct fmem_platform_data fmem_pdata;
+static struct fmem_platform_data fmem_pdata;
-struct platform_device fmem_device = {
+static struct platform_device fmem_device = {
.name = "fmem",
.id = 1,
.dev = { .platform_data = &fmem_pdata },
@@ -883,6 +883,7 @@
android_pmem_audio_pdata.size = pmem_audio_size;
fmem_pdata.size = 0;
+ fmem_pdata.align = PAGE_SIZE;
/* Find pmem devices that should use FMEM (reusable) memory.
*/
diff --git a/arch/arm/mach-msm/board-msm8x60.c b/arch/arm/mach-msm/board-msm8x60.c
index e5a31f2a..e172481 100644
--- a/arch/arm/mach-msm/board-msm8x60.c
+++ b/arch/arm/mach-msm/board-msm8x60.c
@@ -102,6 +102,7 @@
#include "rpm_resources.h"
#include "acpuclock.h"
#include "pm-boot.h"
+#include "board-storage-common-a.h"
#include <linux/ion.h>
#include <mach/ion.h>
@@ -2638,7 +2639,11 @@
#define MSM_SMI_SIZE 0x4000000
#define KERNEL_SMI_BASE (MSM_SMI_BASE)
+#if defined(CONFIG_ION_MSM) && defined(CONFIG_MSM_MULTIMEDIA_USE_ION)
+#define KERNEL_SMI_SIZE 0x000000
+#else
#define KERNEL_SMI_SIZE 0x600000
+#endif
#define USER_SMI_BASE (KERNEL_SMI_BASE + KERNEL_SMI_SIZE)
#define USER_SMI_SIZE (MSM_SMI_SIZE - KERNEL_SMI_SIZE)
@@ -2647,7 +2652,7 @@
#define MSM_ION_SF_SIZE 0x4000000 /* 64MB */
#define MSM_ION_CAMERA_SIZE MSM_PMEM_ADSP_SIZE
#define MSM_ION_MM_FW_SIZE 0x200000 /* (2MB) */
-#define MSM_ION_MM_SIZE 0x3600000 /* (54MB) Must be a multiple of 64K */
+#define MSM_ION_MM_SIZE 0x3c00000 /* (60MB) Must be a multiple of 64K */
#define MSM_ION_MFC_SIZE SZ_8K
#ifdef CONFIG_FB_MSM_OVERLAY1_WRITEBACK
#define MSM_ION_WB_SIZE 0xC00000 /* 12MB */
@@ -8356,6 +8361,7 @@
.msmsdcc_fmax = 48000000,
.nonremovable = 1,
.pclk_src_dfab = 1,
+ .msm_bus_voting_data = &sps_to_ddr_bus_voting_data,
};
#endif
@@ -8374,6 +8380,7 @@
#ifdef CONFIG_MSM_SDIO_AL
.is_sdio_al_client = 1,
#endif
+ .msm_bus_voting_data = &sps_to_ddr_bus_voting_data,
};
#endif
@@ -8395,6 +8402,7 @@
.nonremovable = 0,
.pclk_src_dfab = 1,
.mpm_sdiowakeup_int = MSM_MPM_PIN_SDC3_DAT1,
+ .msm_bus_voting_data = &sps_to_ddr_bus_voting_data,
};
#endif
@@ -8409,6 +8417,7 @@
.nonremovable = 0,
.pclk_src_dfab = 1,
.mpm_sdiowakeup_int = MSM_MPM_PIN_SDC4_DAT1,
+ .msm_bus_voting_data = &sps_to_ddr_bus_voting_data,
};
#endif
@@ -8427,6 +8436,7 @@
#ifdef CONFIG_MSM_SDIO_AL
.is_sdio_al_client = 1,
#endif
+ .msm_bus_voting_data = &sps_to_ddr_bus_voting_data,
};
#endif
diff --git a/arch/arm/mach-msm/board-qrd7627a.c b/arch/arm/mach-msm/board-qrd7627a.c
index f565075..b74b285 100644
--- a/arch/arm/mach-msm/board-qrd7627a.c
+++ b/arch/arm/mach-msm/board-qrd7627a.c
@@ -31,6 +31,7 @@
#include <linux/memblock.h>
#include <linux/input/ft5x06_ts.h>
#include <linux/msm_adc.h>
+#include <linux/fmem.h>
#include <asm/mach/mmc.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -382,6 +383,9 @@
.allocator_type = PMEM_ALLOCATORTYPE_BITMAP,
.cached = 1,
.memory_type = MEMTYPE_EBI1,
+ .request_region = request_fmem_c_region,
+ .release_region = release_fmem_c_region,
+ .reusable = 1,
};
static struct platform_device android_pmem_adsp_device = {
@@ -618,6 +622,14 @@
},
};
+static struct fmem_platform_data fmem_pdata;
+
+static struct platform_device fmem_device = {
+ .name = "fmem",
+ .id = 1,
+ .dev = { .platform_data = &fmem_pdata },
+};
+
static struct platform_device *common_devices[] __initdata = {
&android_usb_device,
&android_pmem_device,
@@ -630,6 +642,7 @@
&asoc_msm_dai0,
&asoc_msm_dai1,
&msm_adc_device,
+ &fmem_device,
};
static struct platform_device *qrd7627a_devices[] __initdata = {
@@ -687,12 +700,44 @@
},
};
+#ifdef CONFIG_ANDROID_PMEM
+static struct android_pmem_platform_data *pmem_pdata_array[] __initdata = {
+ &android_pmem_adsp_pdata,
+ &android_pmem_audio_pdata,
+ &android_pmem_pdata,
+};
+#endif
+
static void __init size_pmem_devices(void)
{
#ifdef CONFIG_ANDROID_PMEM
+ unsigned int i;
+ unsigned int reusable_count = 0;
+
android_pmem_adsp_pdata.size = pmem_adsp_size;
android_pmem_pdata.size = pmem_mdp_size;
android_pmem_audio_pdata.size = pmem_audio_size;
+
+ fmem_pdata.size = 0;
+ fmem_pdata.align = PAGE_SIZE;
+
+ /* Find pmem devices that should use FMEM (reusable) memory.
+ */
+ for (i = 0; i < ARRAY_SIZE(pmem_pdata_array); ++i) {
+ struct android_pmem_platform_data *pdata = pmem_pdata_array[i];
+
+ if (!reusable_count && pdata->reusable)
+ fmem_pdata.size += pdata->size;
+
+ reusable_count += (pdata->reusable) ? 1 : 0;
+
+ if (pdata->reusable && reusable_count > 1) {
+ pr_err("%s: Too many PMEM devices specified as reusable. PMEM device %s was not configured as reusable.\n",
+ __func__, pdata->name);
+ pdata->reusable = 0;
+ }
+ }
+
#endif
}
@@ -704,9 +749,10 @@
static void __init reserve_pmem_memory(void)
{
#ifdef CONFIG_ANDROID_PMEM
- reserve_memory_for(&android_pmem_adsp_pdata);
- reserve_memory_for(&android_pmem_pdata);
- reserve_memory_for(&android_pmem_audio_pdata);
+ unsigned int i;
+ for (i = 0; i < ARRAY_SIZE(pmem_pdata_array); ++i)
+ reserve_memory_for(pmem_pdata_array[i]);
+
msm7627a_reserve_table[MEMTYPE_EBI1].size += pmem_kernel_ebi1_size;
#endif
}
diff --git a/arch/arm/mach-msm/board-storage-common-a.h b/arch/arm/mach-msm/board-storage-common-a.h
new file mode 100644
index 0000000..7737819
--- /dev/null
+++ b/arch/arm/mach-msm/board-storage-common-a.h
@@ -0,0 +1,99 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _BOARD_STORAGE_A_H
+#define _BOARD_STORAGE_A_H
+
+#include <asm/mach/mmc.h>
+#include <mach/msm_bus_board.h>
+#include <mach/msm_bus.h>
+
+#define MSM_BUS_SPS_TO_DDR_VOTE_VECTOR(num, _ib) \
+static struct msm_bus_vectors sps_to_ddr_perf_vectors_##num[] = { \
+ { \
+ .src = MSM_BUS_MASTER_SPS, \
+ .dst = MSM_BUS_SLAVE_EBI_CH0, \
+ .ib = (_ib), \
+ .ab = ((_ib) / 2), \
+ } \
+}
+
+#define MSM_BUS_SPS_TO_DDR_VOTE_VECTOR_USECASE(num) \
+ { \
+ ARRAY_SIZE(sps_to_ddr_perf_vectors_##num), \
+ sps_to_ddr_perf_vectors_##num, \
+ }
+
+/* no bandwidth required */
+MSM_BUS_SPS_TO_DDR_VOTE_VECTOR(0, 0);
+/*
+ * 13 MB/s bandwidth
+ * 4-bit MMC_TIMING_LEGACY
+ * 4-bit MMC_TIMING_UHS_SDR12
+ */
+MSM_BUS_SPS_TO_DDR_VOTE_VECTOR(1, 13 * 1024 * 1024);
+/*
+ * 26 MB/s bandwidth
+ * 8-bit MMC_TIMING_LEGACY
+ * 4-bit MMC_TIMING_MMC_HS / MMC_TIMING_SD_HS /
+ * MMC_TIMING_UHS_SDR25
+ */
+MSM_BUS_SPS_TO_DDR_VOTE_VECTOR(2, 26 * 1024 * 1024);
+/*
+ * 52 MB/s bandwidth
+ * 8-bit MMC_TIMING_MMC_HS
+ * 4-bit MMC_TIMING_UHS_SDR50 / MMC_TIMING_UHS_DDR50
+ */
+MSM_BUS_SPS_TO_DDR_VOTE_VECTOR(3, 52 * 1024 * 1024);
+/*
+ * 104 MB/s bandwidth
+ * 8-bit MMC_TIMING_UHS_DDR50
+ * 4-bit MMC_TIMING_UHS_SDR104 / MMC_TIMING_MMC_HS200
+ */
+MSM_BUS_SPS_TO_DDR_VOTE_VECTOR(4, 104 * 1024 * 1024);
+/*
+ * 200 MB/s bandwidth
+ * 8-bit MMC_TIMING_MMC_HS200
+ */
+MSM_BUS_SPS_TO_DDR_VOTE_VECTOR(5, 200 * 1024 * 1024);
+/* max. possible bandwidth */
+MSM_BUS_SPS_TO_DDR_VOTE_VECTOR(6, UINT_MAX);
+
+static unsigned int sdcc_bw_vectors[] = {0, (13 * 1024 * 1024),
+ (26 * 1024 * 1024), (52 * 1024 * 1024),
+ (104 * 1024 * 1024), (200 * 1024 * 1024),
+ UINT_MAX};
+
+static struct msm_bus_paths sps_to_ddr_bus_scale_usecases[] = {
+ MSM_BUS_SPS_TO_DDR_VOTE_VECTOR_USECASE(0),
+ MSM_BUS_SPS_TO_DDR_VOTE_VECTOR_USECASE(1),
+ MSM_BUS_SPS_TO_DDR_VOTE_VECTOR_USECASE(2),
+ MSM_BUS_SPS_TO_DDR_VOTE_VECTOR_USECASE(3),
+ MSM_BUS_SPS_TO_DDR_VOTE_VECTOR_USECASE(4),
+ MSM_BUS_SPS_TO_DDR_VOTE_VECTOR_USECASE(5),
+ MSM_BUS_SPS_TO_DDR_VOTE_VECTOR_USECASE(6),
+};
+
+static struct msm_bus_scale_pdata sps_to_ddr_bus_scale_data = {
+ sps_to_ddr_bus_scale_usecases,
+ ARRAY_SIZE(sps_to_ddr_bus_scale_usecases),
+ .name = "msm_sdcc",
+};
+
+static struct msm_mmc_bus_voting_data sps_to_ddr_bus_voting_data = {
+ .use_cases = &sps_to_ddr_bus_scale_data,
+ .bw_vecs = sdcc_bw_vectors,
+ .bw_vecs_size = sizeof(sdcc_bw_vectors),
+};
+
+#endif /* _BOARD_STORAGE_A_H */
diff --git a/arch/arm/mach-msm/clock-7x30.c b/arch/arm/mach-msm/clock-7x30.c
index f51eb5b..fcd6386 100644
--- a/arch/arm/mach-msm/clock-7x30.c
+++ b/arch/arm/mach-msm/clock-7x30.c
@@ -2651,14 +2651,14 @@
}
#endif /* CONFIG_DEBUG_FS */
-static struct clk_ops measure_clk_ops = {
+static struct clk_ops clk_ops_measure = {
.set_parent = measure_clk_set_parent,
.get_rate = measure_clk_get_rate,
};
static struct clk measure_clk = {
.dbg_name = "measure_clk",
- .ops = &measure_clk_ops,
+ .ops = &clk_ops_measure,
CLK_INIT(measure_clk),
};
diff --git a/arch/arm/mach-msm/clock-8960.c b/arch/arm/mach-msm/clock-8960.c
index 4860d42..3ee59b1 100644
--- a/arch/arm/mach-msm/clock-8960.c
+++ b/arch/arm/mach-msm/clock-8960.c
@@ -42,6 +42,7 @@
/* Peripheral clock registers. */
#define ADM0_PBUS_CLK_CTL_REG REG(0x2208)
+#define SFAB_SATA_S_HCLK_CTL_REG REG(0x2480)
#define CE1_HCLK_CTL_REG REG(0x2720)
#define CE1_CORE_CLK_CTL_REG REG(0x2724)
#define PRNG_CLK_NS_REG REG(0x2E80)
@@ -49,6 +50,7 @@
#define CE3_CORE_CLK_CTL_REG REG(0x36CC)
#define CE3_CLK_SRC_NS_REG REG(0x36C0)
#define DMA_BAM_HCLK_CTL REG(0x25C0)
+#define CLK_HALT_AFAB_SFAB_STATEA_REG REG(0x2FC0)
#define CLK_HALT_AFAB_SFAB_STATEB_REG REG(0x2FC4)
#define CLK_HALT_CFPB_STATEA_REG REG(0x2FCC)
#define CLK_HALT_CFPB_STATEB_REG REG(0x2FD0)
@@ -107,10 +109,12 @@
#define TSIF_REF_CLK_MD_REG REG(0x270C)
#define TSIF_REF_CLK_NS_REG REG(0x2710)
#define TSSC_CLK_CTL_REG REG(0x2CA0)
+#define SATA_HCLK_CTL_REG REG(0x2C00)
#define SATA_CLK_SRC_NS_REG REG(0x2C08)
#define SATA_RXOOB_CLK_CTL_REG REG(0x2C0C)
#define SATA_PMALIVE_CLK_CTL_REG REG(0x2C10)
#define SATA_PHY_REF_CLK_CTL_REG REG(0x2C14)
+#define SATA_ACLK_CTL_REG REG(0x2C20)
#define SATA_PHY_CFG_CLK_CTL_REG REG(0x2C40)
#define USB_FSn_HCLK_CTL_REG(n) REG(0x2960+(0x20*((n)-1)))
#define USB_FSn_RESET_REG(n) REG(0x2974+(0x20*((n)-1)))
@@ -142,7 +146,9 @@
#define USB_HSIC_XCVR_FS_CLK_NS_REG REG(0x2928)
#define USB_PHY0_RESET_REG REG(0x2E20)
#define PCIE_ALT_REF_CLK_NS_REG REG(0x3860)
+#define PCIE_ACLK_CTL_REG REG(0x22C0)
#define PCIE_HCLK_CTL_REG REG(0x22CC)
+#define PCIE_PCLK_CTL_REG REG(0x22D0)
#define GPLL1_MODE_REG REG(0x3160)
#define GPLL1_L_VAL_REG REG(0x3164)
#define GPLL1_M_VAL_REG REG(0x3168)
@@ -1923,6 +1929,69 @@
}
};
+#define F_SATA(f, s, d) \
+ { \
+ .freq_hz = f, \
+ .src_clk = &s##_clk.c, \
+ .ns_val = NS_DIVSRC(6, 3, d, 2, 0, s##_to_bb_mux), \
+ }
+
+static struct clk_freq_tbl clk_tbl_sata[] = {
+ F_SATA( 0, gnd, 1),
+ F_SATA( 48000000, pll8, 8),
+ F_SATA(100000000, pll3, 12),
+ F_END
+};
+
+static struct rcg_clk sata_src_clk = {
+ .b = {
+ .ctl_reg = SATA_CLK_SRC_NS_REG,
+ .halt_check = NOCHECK,
+ },
+ .ns_reg = SATA_CLK_SRC_NS_REG,
+ .root_en_mask = BIT(7),
+ .ns_mask = BM(6, 0),
+ .set_rate = set_rate_nop,
+ .freq_tbl = clk_tbl_sata,
+ .current_freq = &rcg_dummy_freq,
+ .c = {
+ .dbg_name = "sata_src_clk",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOW, 50000000, NOMINAL, 100000000),
+ CLK_INIT(sata_src_clk.c),
+ },
+};
+
+static struct branch_clk sata_rxoob_clk = {
+ .b = {
+ .ctl_reg = SATA_RXOOB_CLK_CTL_REG,
+ .en_mask = BIT(4),
+ .halt_reg = CLK_HALT_MSS_SMPSS_MISC_STATE_REG,
+ .halt_bit = 26,
+ },
+ .parent = &sata_src_clk.c,
+ .c = {
+ .dbg_name = "sata_rxoob_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(sata_rxoob_clk.c),
+ },
+};
+
+static struct branch_clk sata_pmalive_clk = {
+ .b = {
+ .ctl_reg = SATA_PMALIVE_CLK_CTL_REG,
+ .en_mask = BIT(4),
+ .halt_reg = CLK_HALT_MSS_SMPSS_MISC_STATE_REG,
+ .halt_bit = 25,
+ },
+ .parent = &sata_src_clk.c,
+ .c = {
+ .dbg_name = "sata_pmalive_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(sata_pmalive_clk.c),
+ },
+};
+
static struct branch_clk sata_phy_ref_clk = {
.b = {
.ctl_reg = SATA_PHY_REF_CLK_CTL_REG,
@@ -1938,6 +2007,47 @@
},
};
+static struct branch_clk sata_a_clk = {
+ .b = {
+ .ctl_reg = SATA_ACLK_CTL_REG,
+ .en_mask = BIT(4),
+ .halt_reg = CLK_HALT_AFAB_SFAB_STATEA_REG,
+ .halt_bit = 12,
+ },
+ .c = {
+ .dbg_name = "sata_a_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(sata_a_clk.c),
+ },
+};
+
+static struct branch_clk sata_p_clk = {
+ .b = {
+ .ctl_reg = SATA_HCLK_CTL_REG,
+ .en_mask = BIT(4),
+ .halt_reg = CLK_HALT_MSS_SMPSS_MISC_STATE_REG,
+ .halt_bit = 27,
+ },
+ .c = {
+ .dbg_name = "sata_p_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(sata_p_clk.c),
+ },
+};
+
+static struct branch_clk sfab_sata_s_p_clk = {
+ .b = {
+ .ctl_reg = SFAB_SATA_S_HCLK_CTL_REG,
+ .en_mask = BIT(4),
+ .halt_reg = CLK_HALT_AFAB_SFAB_STATEB_REG,
+ .halt_bit = 14,
+ },
+ .c = {
+ .dbg_name = "sfab_sata_s_p_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(sfab_sata_s_p_clk.c),
+ },
+};
static struct branch_clk pcie_p_clk = {
.b = {
.ctl_reg = PCIE_HCLK_CTL_REG,
@@ -1952,6 +2062,34 @@
},
};
+static struct branch_clk pcie_phy_ref_clk = {
+ .b = {
+ .ctl_reg = PCIE_PCLK_CTL_REG,
+ .en_mask = BIT(4),
+ .halt_reg = CLK_HALT_MSS_SMPSS_MISC_STATE_REG,
+ .halt_bit = 29,
+ },
+ .c = {
+ .dbg_name = "pcie_phy_ref_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(pcie_phy_ref_clk.c),
+ },
+};
+
+static struct branch_clk pcie_a_clk = {
+ .b = {
+ .ctl_reg = PCIE_ACLK_CTL_REG,
+ .en_mask = BIT(4),
+ .halt_reg = CLK_HALT_AFAB_SFAB_STATEA_REG,
+ .halt_bit = 13,
+ },
+ .c = {
+ .dbg_name = "pcie_a_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(pcie_a_clk.c),
+ },
+};
+
static struct branch_clk dma_bam_p_clk = {
.b = {
.ctl_reg = DMA_BAM_HCLK_CTL,
@@ -4519,7 +4657,11 @@
{ TEST_PER_LS(0x56), &gsbi7_uart_clk.c },
{ TEST_PER_LS(0x58), &gsbi7_qup_clk.c },
{ TEST_PER_LS(0x59), &gsbi8_p_clk.c },
+ { TEST_PER_LS(0x59), &sfab_sata_s_p_clk.c },
{ TEST_PER_LS(0x5A), &gsbi8_uart_clk.c },
+ { TEST_PER_LS(0x5A), &sata_p_clk.c },
+ { TEST_PER_LS(0x5B), &sata_rxoob_clk.c },
+ { TEST_PER_LS(0x5C), &sata_pmalive_clk.c },
{ TEST_PER_LS(0x5C), &gsbi8_qup_clk.c },
{ TEST_PER_LS(0x5D), &gsbi9_p_clk.c },
{ TEST_PER_LS(0x5E), &gsbi9_uart_clk.c },
@@ -4575,6 +4717,9 @@
{ TEST_PER_HS(0x26), &q6sw_clk },
{ TEST_PER_HS(0x27), &q6fw_clk },
{ TEST_PER_HS(0x2A), &adm0_clk.c },
+ { TEST_PER_HS(0x31), &sata_a_clk.c },
+ { TEST_PER_HS(0x2D), &pcie_phy_ref_clk.c },
+ { TEST_PER_HS(0x32), &pcie_a_clk.c },
{ TEST_PER_HS(0x34), &ebi1_clk.c },
{ TEST_PER_HS(0x34), &ebi1_a_clk.c },
{ TEST_PER_HS(0x50), &usb_hsic_hsic_clk.c },
@@ -4849,7 +4994,7 @@
}
#endif /* CONFIG_DEBUG_FS */
-static struct clk_ops measure_clk_ops = {
+static struct clk_ops clk_ops_measure = {
.set_parent = measure_clk_set_parent,
.get_rate = measure_clk_get_rate,
};
@@ -4857,7 +5002,7 @@
static struct measure_clk measure_clk = {
.c = {
.dbg_name = "measure_clk",
- .ops = &measure_clk_ops,
+ .ops = &clk_ops_measure,
CLK_INIT(measure_clk.c),
},
.multiplier = 1,
@@ -4934,6 +5079,12 @@
CLK_LOOKUP("sys_clk", usb_fs1_sys_clk.c, ""),
CLK_LOOKUP("ref_clk", sata_phy_ref_clk.c, ""),
CLK_LOOKUP("cfg_clk", sata_phy_cfg_clk.c, ""),
+ CLK_LOOKUP("src_clk", sata_src_clk.c, ""),
+ CLK_LOOKUP("core_rxoob_clk", sata_rxoob_clk.c, ""),
+ CLK_LOOKUP("core_pmalive_clk", sata_pmalive_clk.c, ""),
+ CLK_LOOKUP("bus_clk", sata_a_clk.c, ""),
+ CLK_LOOKUP("iface_clk", sata_p_clk.c, ""),
+ CLK_LOOKUP("slave_iface_clk", sfab_sata_s_p_clk.c, ""),
CLK_LOOKUP("iface_clk", ce3_p_clk.c, "qce.0"),
CLK_LOOKUP("iface_clk", ce3_p_clk.c, "qcrypto.0"),
CLK_LOOKUP("core_clk", ce3_core_clk.c, "qce.0"),
@@ -4959,7 +5110,9 @@
CLK_LOOKUP("iface_clk", sdc2_p_clk.c, "msm_sdcc.2"),
CLK_LOOKUP("iface_clk", sdc3_p_clk.c, "msm_sdcc.3"),
CLK_LOOKUP("iface_clk", sdc4_p_clk.c, "msm_sdcc.4"),
- CLK_LOOKUP("iface_clk", pcie_p_clk.c, ""),
+ CLK_LOOKUP("iface_clk", pcie_p_clk.c, "msm_pcie"),
+ CLK_LOOKUP("ref_clk", pcie_phy_ref_clk.c, "msm_pcie"),
+ CLK_LOOKUP("bus_clk", pcie_a_clk.c, "msm_pcie"),
CLK_LOOKUP("core_clk", adm0_clk.c, "msm_dmov"),
CLK_LOOKUP("iface_clk", adm0_p_clk.c, "msm_dmov"),
CLK_LOOKUP("iface_clk", pmic_arb0_p_clk.c, ""),
@@ -5947,9 +6100,14 @@
if (cpu_is_msm8960() || cpu_is_apq8064())
rmwreg(0x2, DSI2_BYTE_NS_REG, 0x7);
- /* Source the sata_phy_ref_clk from PXO */
- if (cpu_is_apq8064())
+ /*
+ * Source the sata_phy_ref_clk from PXO and set predivider of
+ * sata_pmalive_clk to 1.
+ */
+ if (cpu_is_apq8064()) {
rmwreg(0, SATA_PHY_REF_CLK_CTL_REG, 0x1);
+ rmwreg(0, SATA_PMALIVE_CLK_CTL_REG, 0x3);
+ }
/*
* TODO: Programming below PLLs and prng_clk is temporary and
diff --git a/arch/arm/mach-msm/clock-8x60.c b/arch/arm/mach-msm/clock-8x60.c
index 8d2b37a..6972302 100644
--- a/arch/arm/mach-msm/clock-8x60.c
+++ b/arch/arm/mach-msm/clock-8x60.c
@@ -3445,7 +3445,7 @@
}
#endif /* CONFIG_DEBUG_FS */
-static struct clk_ops measure_clk_ops = {
+static struct clk_ops clk_ops_measure = {
.set_parent = measure_clk_set_parent,
.get_rate = measure_clk_get_rate,
};
@@ -3453,7 +3453,7 @@
static struct measure_clk measure_clk = {
.c = {
.dbg_name = "measure_clk",
- .ops = &measure_clk_ops,
+ .ops = &clk_ops_measure,
CLK_INIT(measure_clk.c),
},
.multiplier = 1,
diff --git a/arch/arm/mach-msm/clock-9615.c b/arch/arm/mach-msm/clock-9615.c
index 1fd9b4d..66d849a 100644
--- a/arch/arm/mach-msm/clock-9615.c
+++ b/arch/arm/mach-msm/clock-9615.c
@@ -1560,7 +1560,7 @@
}
#endif /* CONFIG_DEBUG_FS */
-static struct clk_ops measure_clk_ops = {
+static struct clk_ops clk_ops_measure = {
.set_parent = measure_clk_set_parent,
.get_rate = measure_clk_get_rate,
};
@@ -1568,7 +1568,7 @@
static struct measure_clk measure_clk = {
.c = {
.dbg_name = "measure_clk",
- .ops = &measure_clk_ops,
+ .ops = &clk_ops_measure,
CLK_INIT(measure_clk.c),
},
.multiplier = 1,
@@ -1655,6 +1655,10 @@
"msm-dai-q6.1"),
CLK_LOOKUP("osr_clk", codec_i2s_mic_osr_clk.c,
"msm-dai-q6.1"),
+ CLK_LOOKUP("bit_clk", codec_i2s_spkr_bit_clk.c,
+ "msm-dai-q6.0"),
+ CLK_LOOKUP("osr_clk", codec_i2s_spkr_osr_clk.c,
+ "msm-dai-q6.0"),
CLK_LOOKUP("bit_clk", spare_i2s_mic_bit_clk.c,
"msm-dai-q6.5"),
CLK_LOOKUP("osr_clk", spare_i2s_mic_osr_clk.c,
diff --git a/arch/arm/mach-msm/clock-copper.c b/arch/arm/mach-msm/clock-copper.c
index 7123ffa..c0245a3 100644
--- a/arch/arm/mach-msm/clock-copper.c
+++ b/arch/arm/mach-msm/clock-copper.c
@@ -102,6 +102,7 @@
#define MMSS_DEBUG_CLK_CTL_REG 0x0900
#define LPASS_DEBUG_CLK_CTL_REG 0x29000
#define LPASS_LPA_PLL_VOTE_APPS_REG 0x2000
+#define MSS_DEBUG_CLK_CTL_REG 0x0078
#define USB30_MASTER_CMD_RCGR 0x03D4
#define USB30_MOCK_UTMI_CMD_RCGR 0x03E8
@@ -230,6 +231,7 @@
#define BLSP2_UART5_BCR 0x0BC0
#define BLSP2_QUP6_BCR 0x0C00
#define BLSP2_UART6_BCR 0x0C40
+#define BOOT_ROM_BCR 0x0E00
#define PDM_BCR 0x0CC0
#define PRNG_BCR 0x0D00
#define BAM_DMA_BCR 0x0D40
@@ -280,6 +282,8 @@
#define OXILICX_AXI_CBCR 0x4038
#define OXILI_BCR 0x4020
#define OXILICX_BCR 0x4030
+#define LPASS_Q6SS_BCR 0x6000
+#define MSS_Q6SS_BCR 0x1068
#define OCMEM_SYS_NOC_AXI_CBCR 0x0244
#define OCMEM_NOC_CFG_AHB_CBCR 0x0248
@@ -327,6 +331,7 @@
#define BLSP1_UART6_APPS_CBCR 0x0904
#define BLSP1_UART6_SIM_CBCR 0x0908
#define BLSP2_AHB_CBCR 0x0944
+#define BOOT_ROM_AHB_CBCR 0x0E04
#define BLSP2_QUP1_SPI_APPS_CBCR 0x0984
#define BLSP2_QUP1_I2C_APPS_CBCR 0x0988
#define BLSP2_UART1_APPS_CBCR 0x09C4
@@ -469,6 +474,11 @@
#define MMSS_MISC_AHB_CBCR 0x502C
#define MMSS_S0_AXI_CBCR 0x5064
#define OCMEMNOC_CBCR 0x50B4
+#define LPASS_Q6SS_AHB_LFABIF_CBCR 0x22000
+#define LPASS_Q6SS_XO_CBCR 0x26000
+#define MSS_XO_Q6_CBCR 0x108C
+#define MSS_BUS_Q6_CBCR 0x10A4
+#define MSS_CFG_AHB_CBCR 0x0280
#define APCS_CLOCK_BRANCH_ENA_VOTE 0x1484
#define APCS_CLOCK_SLEEP_ENA_VOTE 0x1488
@@ -1625,6 +1635,19 @@
},
};
+static struct local_vote_clk gcc_boot_rom_ahb_clk = {
+ .cbcr_reg = BOOT_ROM_AHB_CBCR,
+ .vote_reg = APCS_CLOCK_BRANCH_ENA_VOTE,
+ .en_mask = BIT(10),
+ .bcr_reg = BOOT_ROM_BCR,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_ops_vote,
+ CLK_INIT(gcc_boot_rom_ahb_clk.c),
+ },
+};
+
static struct local_vote_clk gcc_blsp2_ahb_clk = {
.cbcr_reg = BLSP2_AHB_CBCR,
.vote_reg = APCS_CLOCK_BRANCH_ENA_VOTE,
@@ -2225,6 +2248,17 @@
},
};
+static struct branch_clk gcc_mss_cfg_ahb_clk = {
+ .cbcr_reg = MSS_CFG_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_mss_cfg_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_mss_cfg_ahb_clk.c),
+ },
+};
+
static struct clk_freq_tbl ftbl_mmss_ahb_clk[] = {
F_MM(19200000, cxo, 1, 0, 0),
F_MM(40000000, gpll0, 15, 0, 0),
@@ -4291,6 +4325,55 @@
},
};
+static struct branch_clk q6ss_ahb_lfabif_clk = {
+ .cbcr_reg = LPASS_Q6SS_AHB_LFABIF_CBCR,
+ .has_sibling = 1,
+ .base = &virt_bases[LPASS_BASE],
+ .c = {
+ .dbg_name = "q6ss_ahb_lfabif_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(q6ss_ahb_lfabif_clk.c),
+ },
+};
+
+static struct branch_clk q6ss_xo_clk = {
+ .cbcr_reg = LPASS_Q6SS_XO_CBCR,
+ .bcr_reg = LPASS_Q6SS_BCR,
+ .has_sibling = 1,
+ .base = &virt_bases[LPASS_BASE],
+ .c = {
+ .dbg_name = "q6ss_xo_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(q6ss_xo_clk.c),
+ },
+};
+
+static struct branch_clk mss_xo_q6_clk = {
+ .cbcr_reg = MSS_XO_Q6_CBCR,
+ .bcr_reg = MSS_Q6SS_BCR,
+ .has_sibling = 1,
+ .base = &virt_bases[MSS_BASE],
+ .c = {
+ .dbg_name = "mss_xo_q6_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(mss_xo_q6_clk.c),
+ .depends = &gcc_mss_cfg_ahb_clk.c,
+ },
+};
+
+static struct branch_clk mss_bus_q6_clk = {
+ .cbcr_reg = MSS_BUS_Q6_CBCR,
+ .bcr_reg = MSS_Q6SS_BCR,
+ .has_sibling = 1,
+ .base = &virt_bases[MSS_BASE],
+ .c = {
+ .dbg_name = "mss_bus_q6_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(mss_bus_q6_clk.c),
+ .depends = &gcc_mss_cfg_ahb_clk.c,
+ },
+};
+
#ifdef CONFIG_DEBUG_FS
struct measure_mux_entry {
@@ -4339,6 +4422,8 @@
{&gcc_blsp2_uart4_apps_clk.c, GCC_BASE, 0x00c2},
{&gcc_blsp2_uart5_apps_clk.c, GCC_BASE, 0x00c6},
{&gcc_blsp2_uart6_apps_clk.c, GCC_BASE, 0x00cb},
+ {&gcc_boot_rom_ahb_clk.c, GCC_BASE, 0x0100},
+ {&gcc_mss_cfg_ahb_clk.c, GCC_BASE, 0x0030},
{&gcc_ce1_clk.c, GCC_BASE, 0x0140},
{&gcc_ce2_clk.c, GCC_BASE, 0x0148},
{&gcc_pdm2_clk.c, GCC_BASE, 0x00da},
@@ -4438,6 +4523,11 @@
{&audio_core_lpaif_pcm1_clk_src.c, LPASS_BASE, 0x0012},
{&audio_core_slimbus_core_clk.c, LPASS_BASE, 0x003d},
{&audio_core_slimbus_lfabif_clk.c, LPASS_BASE, 0x003e},
+ {&q6ss_xo_clk.c, LPASS_BASE, 0x002b},
+ {&q6ss_ahb_lfabif_clk.c, LPASS_BASE, 0x001e},
+ {&mss_bus_q6_clk.c, MSS_BASE, 0x003c},
+ {&mss_xo_q6_clk.c, MSS_BASE, 0x0007},
+
{&dummy_clk, N_BASES, 0x0000},
};
@@ -4465,6 +4555,7 @@
clk->sample_ticks = 0x10000;
clk->multiplier = 1;
+ writel_relaxed(0, MSS_REG_BASE(MSS_DEBUG_CLK_CTL_REG));
writel_relaxed(0, LPASS_REG_BASE(LPASS_DEBUG_CLK_CTL_REG));
writel_relaxed(0, MMSS_REG_BASE(MMSS_DEBUG_CLK_CTL_REG));
writel_relaxed(0, GCC_REG_BASE(GCC_DEBUG_CLK_CTL_REG));
@@ -4495,6 +4586,12 @@
writel_relaxed(regval, LPASS_REG_BASE(LPASS_DEBUG_CLK_CTL_REG));
break;
+ case MSS_BASE:
+ clk_sel = 0x32;
+ regval = BVAL(5, 0, measure_mux[i].debug_mux);
+ writel_relaxed(regval, MSS_REG_BASE(MSS_DEBUG_CLK_CTL_REG));
+ break;
+
default:
return -EINVAL;
}
@@ -4602,7 +4699,7 @@
}
#endif /* CONFIG_DEBUG_FS */
-static struct clk_ops measure_clk_ops = {
+static struct clk_ops clk_ops_measure = {
.set_parent = measure_clk_set_parent,
.get_rate = measure_clk_get_rate,
};
@@ -4610,7 +4707,7 @@
static struct measure_clk measure_clk = {
.c = {
.dbg_name = "measure_clk",
- .ops = &measure_clk_ops,
+ .ops = &clk_ops_measure,
CLK_INIT(measure_clk.c),
},
.multiplier = 1,
@@ -4807,6 +4904,14 @@
CLK_LOOKUP("ebit_clk", audio_core_lpaif_pcm1_ebit_clk.c, ""),
CLK_LOOKUP("ibit_clk", audio_core_lpaif_pcm1_ibit_clk.c, ""),
+ CLK_LOOKUP("core_clk", mss_xo_q6_clk.c, ""),
+ CLK_LOOKUP("bus_clk", mss_bus_q6_clk.c, ""),
+ CLK_LOOKUP("core_clk", q6ss_xo_clk.c, "pil-q6v5-lpass"),
+ CLK_LOOKUP("bus_clk", q6ss_ahb_lfabif_clk.c, "pil-q6v5-lpass"),
+ CLK_LOOKUP("mem_clk", gcc_boot_rom_ahb_clk.c, ""),
+ CLK_LOOKUP("bus_clk", gcc_mss_cfg_ahb_clk.c, ""),
+ CLK_DUMMY("core_clk", PRNG_CLK , "msm_rng.0", OFF),
+
/* TODO: Remove dummy clocks as soon as they become unnecessary */
CLK_DUMMY("phy_clk", NULL, "msm_otg", OFF),
CLK_DUMMY("core_clk", NULL, "msm_otg", OFF),
@@ -5048,6 +5153,9 @@
#define LPASS_CC_PHYS 0xFE000000
#define LPASS_CC_SIZE SZ_256K
+#define MSS_CC_PHYS 0xFC980000
+#define MSS_CC_SIZE SZ_16K
+
static void __init msmcopper_clock_pre_init(void)
{
virt_bases[GCC_BASE] = ioremap(GCC_CC_PHYS, GCC_CC_SIZE);
@@ -5062,6 +5170,10 @@
if (!virt_bases[LPASS_BASE])
panic("clock-copper: Unable to ioremap LPASS_CC memory!");
+ virt_bases[MSS_BASE] = ioremap(MSS_CC_PHYS, MSS_CC_SIZE);
+ if (!virt_bases[MSS_BASE])
+ panic("clock-copper: Unable to ioremap MSS_CC memory!");
+
clk_ops_local_pll.enable = copper_pll_clk_enable;
reg_init();
diff --git a/arch/arm/mach-msm/clock-local.c b/arch/arm/mach-msm/clock-local.c
index e1b3381..4f365fa 100644
--- a/arch/arm/mach-msm/clock-local.c
+++ b/arch/arm/mach-msm/clock-local.c
@@ -627,20 +627,16 @@
return HANDOFF_ENABLED_CLK;
}
-struct clk_ops clk_ops_gnd = {
-};
+struct clk_ops clk_ops_empty;
struct fixed_clk gnd_clk = {
.c = {
.dbg_name = "ground_clk",
- .ops = &clk_ops_gnd,
+ .ops = &clk_ops_empty,
CLK_INIT(gnd_clk.c),
},
};
-struct clk_ops clk_ops_measure = {
-};
-
static int branch_clk_enable(struct clk *clk)
{
unsigned long flags;
diff --git a/arch/arm/mach-msm/clock-local.h b/arch/arm/mach-msm/clock-local.h
index a419d69..ffc7057 100644
--- a/arch/arm/mach-msm/clock-local.h
+++ b/arch/arm/mach-msm/clock-local.h
@@ -77,7 +77,7 @@
*/
#define DEFINE_CLK_MEASURE(name) \
struct clk name = { \
- .ops = &clk_ops_measure, \
+ .ops = &clk_ops_empty, \
.dbg_name = #name, \
CLK_INIT(name), \
}; \
@@ -264,7 +264,7 @@
struct clk c;
};
-extern struct clk_ops clk_ops_measure;
+extern struct clk_ops clk_ops_empty;
static inline struct measure_clk *to_measure_clk(struct clk *clk)
{
diff --git a/arch/arm/mach-msm/clock-local2.h b/arch/arm/mach-msm/clock-local2.h
index c8d53cb..547e633 100644
--- a/arch/arm/mach-msm/clock-local2.h
+++ b/arch/arm/mach-msm/clock-local2.h
@@ -153,8 +153,6 @@
struct clk c;
};
-extern struct clk_ops clk_ops_measure;
-
static inline struct measure_clk *to_measure_clk(struct clk *clk)
{
return container_of(clk, struct measure_clk, c);
diff --git a/arch/arm/mach-msm/clock-pcom-lookup.c b/arch/arm/mach-msm/clock-pcom-lookup.c
index d842d45..f71d6d5 100644
--- a/arch/arm/mach-msm/clock-pcom-lookup.c
+++ b/arch/arm/mach-msm/clock-pcom-lookup.c
@@ -39,7 +39,7 @@
.id = PLL_0,
.mode_reg = PLLn_MODE(0),
.c = {
- .ops = &clk_pll_ops,
+ .ops = &clk_ops_pll,
.dbg_name = "pll0_clk",
CLK_INIT(pll0_clk.c),
},
@@ -49,7 +49,7 @@
.id = PLL_1,
.mode_reg = PLLn_MODE(1),
.c = {
- .ops = &clk_pll_ops,
+ .ops = &clk_ops_pll,
.dbg_name = "pll1_clk",
CLK_INIT(pll1_clk.c),
},
@@ -59,7 +59,7 @@
.id = PLL_2,
.mode_reg = PLLn_MODE(2),
.c = {
- .ops = &clk_pll_ops,
+ .ops = &clk_ops_pll,
.dbg_name = "pll2_clk",
CLK_INIT(pll2_clk.c),
},
@@ -69,7 +69,7 @@
.id = PLL_4,
.mode_reg = PLL4_MODE,
.c = {
- .ops = &clk_pll_ops,
+ .ops = &clk_ops_pll,
.dbg_name = "pll4_clk",
CLK_INIT(pll4_clk.c),
},
diff --git a/arch/arm/mach-msm/clock-pll.c b/arch/arm/mach-msm/clock-pll.c
index a4750bc..48a3409 100644
--- a/arch/arm/mach-msm/clock-pll.c
+++ b/arch/arm/mach-msm/clock-pll.c
@@ -238,7 +238,7 @@
spin_lock_irqsave(&pll_reg_lock, flags);
mode = readl_relaxed(PLL_MODE_REG(pll));
/* Disable PLL bypass mode. */
- mode |= BIT(1);
+ mode |= PLL_BYPASSNL;
writel_relaxed(mode, PLL_MODE_REG(pll));
/*
@@ -249,7 +249,7 @@
udelay(10);
/* De-assert active-low PLL reset. */
- mode |= BIT(2);
+ mode |= PLL_RESET_N;
writel_relaxed(mode, PLL_MODE_REG(pll));
/* Wait for pll to enable. */
@@ -266,7 +266,7 @@
}
/* Enable PLL output. */
- mode |= BIT(0);
+ mode |= PLL_OUTCTRL;
writel_relaxed(mode, PLL_MODE_REG(pll));
/* Ensure the write above goes through before returning. */
@@ -297,6 +297,7 @@
{41, 800000000},
{50, 960000000},
{52, 1008000000},
+ {60, 1152000000},
{62, 1200000000},
{63, 1209600000},
{0, 0},
@@ -427,7 +428,7 @@
return HANDOFF_ENABLED_CLK;
}
-struct clk_ops clk_pll_ops = {
+struct clk_ops clk_ops_pll = {
.enable = pll_clk_enable,
.disable = pll_clk_disable,
.handoff = pll_clk_handoff,
diff --git a/arch/arm/mach-msm/clock-pll.h b/arch/arm/mach-msm/clock-pll.h
index 231668f..a8c642f 100644
--- a/arch/arm/mach-msm/clock-pll.h
+++ b/arch/arm/mach-msm/clock-pll.h
@@ -43,7 +43,7 @@
void *const __iomem *base;
};
-extern struct clk_ops clk_pll_ops;
+extern struct clk_ops clk_ops_pll;
static inline struct pll_shared_clk *to_pll_shared_clk(struct clk *clk)
{
diff --git a/arch/arm/mach-msm/devices-8064.c b/arch/arm/mach-msm/devices-8064.c
index 2c533d9..6131590 100644
--- a/arch/arm/mach-msm/devices-8064.c
+++ b/arch/arm/mach-msm/devices-8064.c
@@ -82,6 +82,25 @@
#define MSM_HSUSB4_PHYS 0x12530000
#define MSM_HSUSB4_SIZE SZ_4K
+/* Address of PCIE20 PARF */
+#define PCIE20_PARF_PHYS 0x1b600000
+#define PCIE20_PARF_SIZE SZ_128
+
+/* Address of PCIE20 ELBI */
+#define PCIE20_ELBI_PHYS 0x1b502000
+#define PCIE20_ELBI_SIZE SZ_256
+
+/* Address of PCIE20 */
+#define PCIE20_PHYS 0x1b500000
+#define PCIE20_SIZE SZ_4K
+
+/* AXI address for PCIE device BAR resources */
+#define PCIE_AXI_BAR_PHYS 0x08000000
+#define PCIE_AXI_BAR_SIZE SZ_8M
+
+/* AXI address for PCIE device config space */
+#define PCIE_AXI_CONF_PHYS 0x08c00000
+#define PCIE_AXI_CONF_SIZE SZ_4K
static struct msm_watchdog_pdata msm_watchdog_pdata = {
.pet_time = 10000,
@@ -458,13 +477,24 @@
*/
struct msm_dai_auxpcm_pdata apq_auxpcm_pdata = {
.clk = "pcm_clk",
- .mode = AFE_PCM_CFG_MODE_PCM,
- .sync = AFE_PCM_CFG_SYNC_INT,
- .frame = AFE_PCM_CFG_FRM_256BPF,
- .quant = AFE_PCM_CFG_QUANT_LINEAR_NOPAD,
- .slot = 0,
- .data = AFE_PCM_CFG_CDATAOE_MASTER,
- .pcm_clk_rate = 2048000,
+ .mode_8k = {
+ .mode = AFE_PCM_CFG_MODE_PCM,
+ .sync = AFE_PCM_CFG_SYNC_INT,
+ .frame = AFE_PCM_CFG_FRM_256BPF,
+ .quant = AFE_PCM_CFG_QUANT_LINEAR_NOPAD,
+ .slot = 0,
+ .data = AFE_PCM_CFG_CDATAOE_MASTER,
+ .pcm_clk_rate = 2048000,
+ },
+ .mode_16k = {
+ .mode = AFE_PCM_CFG_MODE_PCM,
+ .sync = AFE_PCM_CFG_SYNC_INT,
+ .frame = AFE_PCM_CFG_FRM_256BPF,
+ .quant = AFE_PCM_CFG_QUANT_LINEAR_NOPAD,
+ .slot = 0,
+ .data = AFE_PCM_CFG_CDATAOE_MASTER,
+ .pcm_clk_rate = 4096000,
+ }
};
struct platform_device apq_cpudai_auxpcm_rx = {
@@ -1066,6 +1096,7 @@
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
.memtype = ION_CP_MM_HEAP_ID,
.enable_ion = 1,
+ .cp_enabled = 1,
#else
.memtype = MEMTYPE_EBI1,
.enable_ion = 0,
@@ -1517,6 +1548,46 @@
},
};
+static struct resource resources_msm_pcie[] = {
+ {
+ .name = "parf",
+ .start = PCIE20_PARF_PHYS,
+ .end = PCIE20_PARF_PHYS + PCIE20_PARF_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "elbi",
+ .start = PCIE20_ELBI_PHYS,
+ .end = PCIE20_ELBI_PHYS + PCIE20_ELBI_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "pcie20",
+ .start = PCIE20_PHYS,
+ .end = PCIE20_PHYS + PCIE20_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "axi_bar",
+ .start = PCIE_AXI_BAR_PHYS,
+ .end = PCIE_AXI_BAR_PHYS + PCIE_AXI_BAR_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "axi_conf",
+ .start = PCIE_AXI_CONF_PHYS,
+ .end = PCIE_AXI_CONF_PHYS + PCIE_AXI_CONF_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+struct platform_device msm_device_pcie = {
+ .name = "msm_pcie",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(resources_msm_pcie),
+ .resource = resources_msm_pcie,
+};
+
#ifdef CONFIG_HW_RANDOM_MSM
/* PRNG device */
#define MSM_PRNG_PHYS 0x1A500000
@@ -2069,11 +2140,12 @@
};
#endif
+/* AP2MDM_SOFT_RESET is implemented by the PON_RESET_N gpio */
#define MDM2AP_ERRFATAL 19
#define AP2MDM_ERRFATAL 18
#define MDM2AP_STATUS 49
#define AP2MDM_STATUS 48
-#define AP2MDM_PMIC_RESET_N 27
+#define AP2MDM_SOFT_RESET 27
#define AP2MDM_WAKEUP 35
static struct resource mdm_resources[] = {
@@ -2102,9 +2174,9 @@
.flags = IORESOURCE_IO,
},
{
- .start = AP2MDM_PMIC_RESET_N,
- .end = AP2MDM_PMIC_RESET_N,
- .name = "AP2MDM_PMIC_RESET_N",
+ .start = AP2MDM_SOFT_RESET,
+ .end = AP2MDM_SOFT_RESET,
+ .name = "AP2MDM_SOFT_RESET",
.flags = IORESOURCE_IO,
},
{
@@ -2241,11 +2313,17 @@
.flags = IORESOURCE_MEM,
},
{
- .name = "vcap",
+ .name = "vc_irq",
.start = VCAP_VC,
.end = VCAP_VC,
.flags = IORESOURCE_IRQ,
},
+ {
+ .name = "vp_irq",
+ .start = VCAP_VP,
+ .end = VCAP_VP,
+ .flags = IORESOURCE_IRQ,
+ },
};
static unsigned vcap_gpios[] = {
diff --git a/arch/arm/mach-msm/devices-8930.c b/arch/arm/mach-msm/devices-8930.c
index f8cb345..4ad73f9 100644
--- a/arch/arm/mach-msm/devices-8930.c
+++ b/arch/arm/mach-msm/devices-8930.c
@@ -686,6 +686,7 @@
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
.memtype = ION_CP_MM_HEAP_ID,
.enable_ion = 1,
+ .cp_enabled = 1,
#else
.memtype = MEMTYPE_EBI1,
.enable_ion = 0,
diff --git a/arch/arm/mach-msm/devices-8960.c b/arch/arm/mach-msm/devices-8960.c
index fd15c98..e474e36 100644
--- a/arch/arm/mach-msm/devices-8960.c
+++ b/arch/arm/mach-msm/devices-8960.c
@@ -1892,13 +1892,24 @@
*/
struct msm_dai_auxpcm_pdata auxpcm_pdata = {
.clk = "pcm_clk",
- .mode = AFE_PCM_CFG_MODE_PCM,
- .sync = AFE_PCM_CFG_SYNC_INT,
- .frame = AFE_PCM_CFG_FRM_256BPF,
- .quant = AFE_PCM_CFG_QUANT_LINEAR_NOPAD,
- .slot = 0,
- .data = AFE_PCM_CFG_CDATAOE_MASTER,
- .pcm_clk_rate = 2048000,
+ .mode_8k = {
+ .mode = AFE_PCM_CFG_MODE_PCM,
+ .sync = AFE_PCM_CFG_SYNC_INT,
+ .frame = AFE_PCM_CFG_FRM_256BPF,
+ .quant = AFE_PCM_CFG_QUANT_LINEAR_NOPAD,
+ .slot = 0,
+ .data = AFE_PCM_CFG_CDATAOE_MASTER,
+ .pcm_clk_rate = 2048000,
+ },
+ .mode_16k = {
+ .mode = AFE_PCM_CFG_MODE_PCM,
+ .sync = AFE_PCM_CFG_SYNC_INT,
+ .frame = AFE_PCM_CFG_FRM_256BPF,
+ .quant = AFE_PCM_CFG_QUANT_LINEAR_NOPAD,
+ .slot = 0,
+ .data = AFE_PCM_CFG_CDATAOE_MASTER,
+ .pcm_clk_rate = 4096000,
+ }
};
struct platform_device msm_cpudai_auxpcm_rx = {
@@ -3622,6 +3633,7 @@
struct msm_cache_dump_platform_data msm8960_cache_dump_pdata = {
.l2_size = L2_BUFFER_SIZE,
+ .l1_size = L1_BUFFER_SIZE,
};
struct platform_device msm8960_cache_dump_device = {
diff --git a/arch/arm/mach-msm/devices-9615.c b/arch/arm/mach-msm/devices-9615.c
index c084d29..76d79a6 100644
--- a/arch/arm/mach-msm/devices-9615.c
+++ b/arch/arm/mach-msm/devices-9615.c
@@ -62,6 +62,9 @@
#define MSM_PMIC1_SSBI_CMD_PHYS 0x00500000
#define MSM_PMIC_SSBI_SIZE SZ_4K
+#define MSM_GPIO_I2C_CLK 16
+#define MSM_GPIO_I2C_SDA 17
+
static struct msm_watchdog_pdata msm_watchdog_pdata = {
.pet_time = 10000,
.bark_time = 11000,
@@ -132,6 +135,8 @@
},
};
+#define MSM_HSUSB_RESUME_GPIO 79
+
static struct resource resources_hsusb[] = {
{
.start = MSM9615_HSUSB_PHYS,
@@ -143,6 +148,12 @@
.end = USB1_HS_IRQ,
.flags = IORESOURCE_IRQ,
},
+ {
+ .start = MSM_HSUSB_RESUME_GPIO,
+ .end = MSM_HSUSB_RESUME_GPIO,
+ .name = "USB_RESUME",
+ .flags = IORESOURCE_IO,
+ },
};
static struct resource resources_usb_bam[] = {
@@ -307,6 +318,19 @@
.end = GSBI5_QUP_IRQ,
.flags = IORESOURCE_IRQ,
},
+ {
+ .name = "i2c_clk",
+ .start = MSM_GPIO_I2C_CLK,
+ .end = MSM_GPIO_I2C_CLK,
+ .flags = IORESOURCE_IO,
+ },
+ {
+ .name = "i2c_sda",
+ .start = MSM_GPIO_I2C_SDA,
+ .end = MSM_GPIO_I2C_SDA,
+ .flags = IORESOURCE_IO,
+
+ },
};
struct platform_device msm9615_device_qup_i2c_gsbi5 = {
@@ -432,13 +456,24 @@
*/
struct msm_dai_auxpcm_pdata auxpcm_pdata = {
.clk = "pcm_clk",
- .mode = AFE_PCM_CFG_MODE_PCM,
- .sync = AFE_PCM_CFG_SYNC_INT,
- .frame = AFE_PCM_CFG_FRM_256BPF,
- .quant = AFE_PCM_CFG_QUANT_LINEAR_NOPAD,
- .slot = 0,
- .data = AFE_PCM_CFG_CDATAOE_MASTER,
- .pcm_clk_rate = 2048000,
+ .mode_8k = {
+ .mode = AFE_PCM_CFG_MODE_PCM,
+ .sync = AFE_PCM_CFG_SYNC_INT,
+ .frame = AFE_PCM_CFG_FRM_256BPF,
+ .quant = AFE_PCM_CFG_QUANT_LINEAR_NOPAD,
+ .slot = 0,
+ .data = AFE_PCM_CFG_CDATAOE_MASTER,
+ .pcm_clk_rate = 2048000,
+ },
+ .mode_16k = {
+ .mode = AFE_PCM_CFG_MODE_PCM,
+ .sync = AFE_PCM_CFG_SYNC_INT,
+ .frame = AFE_PCM_CFG_FRM_256BPF,
+ .quant = AFE_PCM_CFG_QUANT_LINEAR_NOPAD,
+ .slot = 0,
+ .data = AFE_PCM_CFG_CDATAOE_MASTER,
+ .pcm_clk_rate = 4096000,
+ }
};
struct platform_device msm_cpudai_auxpcm_rx = {
@@ -472,6 +507,15 @@
.id = -1,
};
+struct platform_device msm_i2s_cpudai0 = {
+ .name = "msm-dai-q6",
+ .id = PRIMARY_I2S_RX,
+};
+
+struct platform_device msm_i2s_cpudai1 = {
+ .name = "msm-dai-q6",
+ .id = PRIMARY_I2S_TX,
+};
struct platform_device msm_voip = {
.name = "msm-voip-dsp",
.id = -1,
diff --git a/arch/arm/mach-msm/devices-iommu.c b/arch/arm/mach-msm/devices-iommu.c
index 9a03afd..2e0253b 100644
--- a/arch/arm/mach-msm/devices-iommu.c
+++ b/arch/arm/mach-msm/devices-iommu.c
@@ -568,26 +568,26 @@
.mids = {1, -1}
};
-static struct msm_iommu_ctx_dev mdp_vg1_ctx = {
- .name = "mdp_vg1",
+static struct msm_iommu_ctx_dev mdp_port0_cb0_ctx = {
+ .name = "mdp_port0_cb0",
.num = 0,
.mids = {0, 2, -1}
};
-static struct msm_iommu_ctx_dev mdp_rgb1_ctx = {
- .name = "mdp_rgb1",
+static struct msm_iommu_ctx_dev mdp_port0_cb1_ctx = {
+ .name = "mdp_port0_cb1",
.num = 1,
.mids = {1, 3, 4, 5, 6, 7, 8, 9, 10, -1}
};
-static struct msm_iommu_ctx_dev mdp_vg2_ctx = {
- .name = "mdp_vg2",
+static struct msm_iommu_ctx_dev mdp_port1_cb0_ctx = {
+ .name = "mdp_port1_cb0",
.num = 0,
.mids = {0, 2, -1}
};
-static struct msm_iommu_ctx_dev mdp_rgb2_ctx = {
- .name = "mdp_rgb2",
+static struct msm_iommu_ctx_dev mdp_port1_cb1_ctx = {
+ .name = "mdp_port1_cb1",
.num = 1,
.mids = {1, 3, 4, 5, 6, 7, 8, 9, 10, -1}
};
@@ -732,39 +732,39 @@
},
};
-static struct platform_device msm_device_mdp_vg1_ctx = {
+static struct platform_device msm_device_mdp_port0_cb0_ctx = {
.name = "msm_iommu_ctx",
.id = 4,
.dev = {
.parent = &msm_device_iommu_mdp0.dev,
- .platform_data = &mdp_vg1_ctx,
+ .platform_data = &mdp_port0_cb0_ctx,
},
};
-static struct platform_device msm_device_mdp_rgb1_ctx = {
+static struct platform_device msm_device_mdp_port0_cb1_ctx = {
.name = "msm_iommu_ctx",
.id = 5,
.dev = {
.parent = &msm_device_iommu_mdp0.dev,
- .platform_data = &mdp_rgb1_ctx,
+ .platform_data = &mdp_port0_cb1_ctx,
},
};
-static struct platform_device msm_device_mdp_vg2_ctx = {
+static struct platform_device msm_device_mdp_port1_cb0_ctx = {
.name = "msm_iommu_ctx",
.id = 6,
.dev = {
.parent = &msm_device_iommu_mdp1.dev,
- .platform_data = &mdp_vg2_ctx,
+ .platform_data = &mdp_port1_cb0_ctx,
},
};
-static struct platform_device msm_device_mdp_rgb2_ctx = {
+static struct platform_device msm_device_mdp_port1_cb1_ctx = {
.name = "msm_iommu_ctx",
.id = 7,
.dev = {
.parent = &msm_device_iommu_mdp1.dev,
- .platform_data = &mdp_rgb2_ctx,
+ .platform_data = &mdp_port1_cb1_ctx,
},
};
@@ -950,10 +950,10 @@
static struct platform_device *msm_iommu_common_ctx_devs[] = {
&msm_device_vpe_src_ctx,
&msm_device_vpe_dst_ctx,
- &msm_device_mdp_vg1_ctx,
- &msm_device_mdp_rgb1_ctx,
- &msm_device_mdp_vg2_ctx,
- &msm_device_mdp_rgb2_ctx,
+ &msm_device_mdp_port0_cb0_ctx,
+ &msm_device_mdp_port0_cb1_ctx,
+ &msm_device_mdp_port1_cb0_ctx,
+ &msm_device_mdp_port1_cb1_ctx,
&msm_device_rot_src_ctx,
&msm_device_rot_dst_ctx,
&msm_device_ijpeg_src_ctx,
diff --git a/arch/arm/mach-msm/devices-msm7x2xa.h b/arch/arm/mach-msm/devices-msm7x2xa.h
index 407554c..4184a86 100644
--- a/arch/arm/mach-msm/devices-msm7x2xa.h
+++ b/arch/arm/mach-msm/devices-msm7x2xa.h
@@ -32,4 +32,5 @@
int ar600x_wlan_power(bool on);
void __init msm8x25_spm_device_init(void);
void __init msm8x25_kgsl_3d0_init(void);
+void __iomem *core1_reset_base(void);
#endif
diff --git a/arch/arm/mach-msm/devices.h b/arch/arm/mach-msm/devices.h
index 31142c1..100d99a 100644
--- a/arch/arm/mach-msm/devices.h
+++ b/arch/arm/mach-msm/devices.h
@@ -167,6 +167,8 @@
extern struct platform_device msm_device_dmov_adm0;
extern struct platform_device msm_device_dmov_adm1;
+extern struct platform_device msm_device_pcie;
+
extern struct platform_device msm_device_nand;
extern struct platform_device msm_device_tssc;
@@ -223,6 +225,8 @@
extern struct platform_device msm_cpudai_incall_music_rx;
extern struct platform_device msm_cpudai_incall_record_rx;
extern struct platform_device msm_cpudai_incall_record_tx;
+extern struct platform_device msm_i2s_cpudai0;
+extern struct platform_device msm_i2s_cpudai1;
extern struct platform_device msm_pil_q6v3;
extern struct platform_device msm_pil_modem;
@@ -392,3 +396,7 @@
extern struct platform_device apq8064_rtb_device;
extern struct platform_device msm8960_cache_dump_device;
+
+extern struct platform_device apq_device_tz_log;
+
+extern struct platform_device msm8974_device_rng;
diff --git a/arch/arm/mach-msm/dma.c b/arch/arm/mach-msm/dma.c
index 69c91f1..d3b2274 100644
--- a/arch/arm/mach-msm/dma.c
+++ b/arch/arm/mach-msm/dma.c
@@ -269,15 +269,6 @@
spin_unlock_irqrestore(&dmov_conf[adm].lock, irq_flags);
}
-void msm_dmov_stop_cmd(unsigned id, struct msm_dmov_cmd *cmd, int graceful)
-{
- int adm = DMOV_ID_TO_ADM(id);
- int ch = DMOV_ID_TO_CHAN(id);
- writel_relaxed((graceful << 31), DMOV_REG(DMOV_FLUSH0(ch), adm));
- wmb();
-}
-EXPORT_SYMBOL(msm_dmov_stop_cmd);
-
void msm_dmov_enqueue_cmd_ext(unsigned id, struct msm_dmov_cmd *cmd)
{
unsigned long irq_flags;
@@ -502,7 +493,8 @@
cmd->exec_func(cmd);
list_add_tail(&cmd->list,
&dmov_conf[adm].active_commands[ch]);
- PRINT_FLOW("msm_datamover_irq_handler id %d, start command\n", id);
+ PRINT_FLOW("msm_datamover_irq_handler id %d,"
+ "start command\n", id);
writel_relaxed(cmd->cmdptr,
DMOV_REG(DMOV_CMD_PTR(ch), adm));
}
diff --git a/arch/arm/mach-msm/idle-macros.S b/arch/arm/mach-msm/idle-macros.S
new file mode 100644
index 0000000..1622e13
--- /dev/null
+++ b/arch/arm/mach-msm/idle-macros.S
@@ -0,0 +1,153 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/hardware/cache-l2x0.h>
+
+/* Add 300 NOPs after 'wfi' for 8x25 target */
+.macro DELAY_8x25, rept
+#ifdef CONFIG_ARCH_MSM8625
+ .rept \rept
+ nop
+ .endr
+#endif
+.endm
+
+/* Switch between smp_to_amp/amp_to_smp configuration */
+.macro SET_SMP_COHERENCY, on = 0
+ ldr r0, =target_type
+ ldr r0, [r0]
+ mov r1, #TARGET_IS_8625
+ cmp r0, r1
+ bne skip\@
+ mrc p15, 0, r0, c1, c0, 1 /* read ACTLR register */
+ .if \on
+ orr r0, r0, #(1 << 6) /* Set the SMP bit in ACTLR */
+ .else
+ bic r0, r0, #(1 << 6) /* Clear the SMP bit */
+ .endif
+ mcr p15, 0, r0, c1, c0, 1 /* write ACTLR register */
+ isb
+skip\@:
+.endm
+
+/*
+ * Enable the "L2" cache, not require to restore the controller registers
+ */
+.macro ENABLE_8x25_L2
+ ldr r0, =target_type
+ ldr r0, [r0]
+ mov r1, #TARGET_IS_8625
+ cmp r0, r1
+ bne skip_enable\@
+ ldr r0, =apps_power_collapse
+ ldr r0, [r0]
+ cmp r0, #POWER_COLLAPSED
+ bne skip_enable\@
+ ldr r0, =l2x0_base_addr
+ ldr r0, [r0]
+ mov r1, #0x1
+ str r1, [r0, #L2X0_CTRL]
+ dmb
+skip_enable\@:
+.endm
+
+/*
+ * Perform the required operation
+ * operation: type of operation on l2 cache (e.g: clean&inv or inv)
+ * l2_enable: enable or disable
+ */
+.macro DO_CACHE_OPERATION, operation, l2_enable
+ ldr r2, =l2x0_base_addr
+ ldr r2, [r2]
+ ldr r0, =0xffff
+ str r0, [r2, #\operation]
+wait\@:
+ ldr r0, [r2, #\operation]
+ ldr r1, =0xffff
+ ands r0, r0, r1
+ bne wait\@
+l2x_sync\@:
+ mov r0, #0x0
+ str r0, [r2, #L2X0_CACHE_SYNC]
+sync\@:
+ ldr r0, [r2, #L2X0_CACHE_SYNC]
+ ands r0, r0, #0x1
+ bne sync\@
+ mov r1, #\l2_enable
+ str r1, [r2, #L2X0_CTRL]
+.endm
+
+/*
+ * Clean and invalidate the L2 cache.
+ * 1. Check the target type
+ * 2. Check whether we are coming from PC are not
+ * 3. Save 'aux', 'data latency', & 'prefetch ctlr' registers
+ * 4. Start L2 clean & invalidation operation
+ * 5. Disable the L2 cache
+ */
+.macro SUSPEND_8x25_L2
+ ldr r0, =target_type
+ ldr r0, [r0]
+ mov r1, #TARGET_IS_8625
+ cmp r0, r1
+ bne skip_suspend\@
+ ldr r0, =apps_power_collapse
+ ldr r0, [r0]
+ cmp r0, #POWER_COLLAPSED
+ bne skip_suspend\@
+ ldr r0, =l2x0_saved_ctrl_reg_val
+ ldr r1, =l2x0_base_addr
+ ldr r1, [r1]
+ ldr r2, [r1, #L2X0_AUX_CTRL]
+ str r2, [r0, #0x0] /* store aux_ctlr reg value */
+ ldr r2, [r1, #L2X0_DATA_LATENCY_CTRL]
+ str r2, [r0, #0x4] /* store data latency reg value */
+ ldr r2, [r1, #L2X0_PREFETCH_CTRL]
+ str r2, [r0, #0x8] /* store prefetch_ctlr reg value */
+ DO_CACHE_OPERATION L2X0_CLEAN_INV_WAY OFF
+ dmb
+skip_suspend\@:
+.endm
+
+/*
+ * Coming back from a successful PC
+ * 1. Check the target type
+ * 2. Check whether we are going to PC are not
+ * 3. Disable the L2 cache
+ * 4. Restore 'aux', 'data latency', & 'prefetch ctlr' reg
+ * 5. Invalidate the cache
+ * 6. Enable the L2 cache
+ */
+.macro RESUME_8x25_L2
+ ldr r0, =target_type
+ ldr r0, [r0]
+ mov r1, #TARGET_IS_8625
+ cmp r0, r1
+ bne skip_resume\@
+ ldr r0, =apps_power_collapse
+ ldr r0, [r0]
+ cmp r0, #POWER_COLLAPSED
+ bne skip_resume\@
+ ldr r1, =l2x0_base_addr
+ ldr r1, [r1]
+ mov r0, #0x0
+ str r0, [r1, #L2X0_CTRL]
+ ldr r0, =l2x0_saved_ctrl_reg_val
+ ldr r2, [r0, #0x0]
+ str r2, [r1, #L2X0_AUX_CTRL] /* restore aux_ctlr reg value */
+ ldr r2, [r0, #0x4]
+ str r2, [r1, #L2X0_DATA_LATENCY_CTRL]
+ ldr r2, [r0, #0x8]
+ str r2, [r1, #L2X0_PREFETCH_CTRL]
+ DO_CACHE_OPERATION L2X0_INV_WAY ON
+skip_resume\@:
+.endm
diff --git a/arch/arm/mach-msm/idle-v7.S b/arch/arm/mach-msm/idle-v7.S
index b73ddc8..b75f76f 100644
--- a/arch/arm/mach-msm/idle-v7.S
+++ b/arch/arm/mach-msm/idle-v7.S
@@ -20,39 +20,13 @@
#include <asm/assembler.h>
#include "idle.h"
+#include "idle-macros.S"
#ifdef CONFIG_ARCH_MSM_KRAIT
#define SCM_SVC_BOOT 0x1
#define SCM_CMD_TERMINATE_PC 0x2
#endif
-/* Switch between smp_to_amp/amp_to_smp configuration */
-.macro SET_SMP_COHERENCY, on = 0
-ldr r0, =target_type
-ldr r0, [r0]
-mov r1, #TARGET_IS_8625
-cmp r0, r1
-bne skip\@
-mrc p15, 0, r0, c1, c0, 1 /* read ACTLR register */
-.if \on
-orr r0, r0, #(1 << 6) /* Set the SMP bit in ACTLR */
-.else
-bic r0, r0, #(1 << 6) /* Clear the SMP bit */
-.endif
-mcr p15, 0, r0, c1, c0, 1 /* write ACTLR register */
-isb
-skip\@:
-.endm
-
-/* Add NOPs for 8x25 target */
-.macro DELAY_8x25, rept
-#ifdef CONFIG_ARCH_MSM8625
- .rept \rept
- nop
- .endr
-#endif
-.endm
-
ENTRY(msm_arch_idle)
wfi
#ifdef CONFIG_ARCH_MSM8X60
@@ -135,16 +109,19 @@
bic r0, r4, #(1 << 2) /* clear dcache bit */
bic r0, r0, #(1 << 12) /* clear icache bit */
mcr p15, 0, r0, c1, c0, 0 /* disable d/i cache */
- dsb
+ isb
+ SUSPEND_8x25_L2
SET_SMP_COHERENCY OFF
wfi
DELAY_8x25 300
mcr p15, 0, r4, c1, c0, 0 /* restore d/i cache */
isb
-#endif
+ ENABLE_8x25_L2 /* enable only l2, no need to restore the reg back */
SET_SMP_COHERENCY ON
+#endif
+
#if defined(CONFIG_MSM_FIQ_SUPPORT)
cpsie f
#endif
@@ -237,7 +214,6 @@
dsb
isb
- SET_SMP_COHERENCY ON
#ifdef CONFIG_ARCH_MSM_KRAIT
mrc p15, 0, r1, c0, c0, 0
ldr r3, =0xff00fc00
@@ -247,7 +223,11 @@
mrceq p15, 7, r3, c15, c0, 2
biceq r3, r3, #0x400
mcreq p15, 7, r3, c15, c0, 2
+#else
+ RESUME_8x25_L2
+ SET_SMP_COHERENCY ON
#endif
+
#ifdef CONFIG_MSM_JTAG
stmfd sp!, {lr}
bl msm_jtag_restore_state
@@ -302,6 +282,14 @@
target_type:
.long 0x0
+ .globl apps_power_collapse
+apps_power_collapse:
+ .long 0x0
+
+ .globl l2x0_base_addr
+l2x0_base_addr:
+ .long 0x0
+
/*
* Default the l2 flush flag to 1 so that caches are flushed during power
* collapse unless the L2 driver decides to flush them only during L2
@@ -309,3 +297,13 @@
*/
msm_pm_flush_l2_flag:
.long 0x1
+
+/*
+ * Save & restore l2x0 registers while system is entering and resuming
+ * from Power Collapse.
+ * 1. aux_ctrl_save (0x0)
+ * 2. data_latency_ctrl (0x4)
+ * 3. prefetch control (0x8)
+ */
+l2x0_saved_ctrl_reg_val:
+ .space 4 * 3
diff --git a/arch/arm/mach-msm/idle.h b/arch/arm/mach-msm/idle.h
index bfd632f..4abdd04 100644
--- a/arch/arm/mach-msm/idle.h
+++ b/arch/arm/mach-msm/idle.h
@@ -25,6 +25,7 @@
#define ON 1
#define OFF 0
#define TARGET_IS_8625 1
+#define POWER_COLLAPSED 1
#ifndef __ASSEMBLY__
@@ -40,6 +41,8 @@
extern unsigned long msm_pm_pc_pgd;
extern unsigned long msm_pm_boot_vector[NR_CPUS];
extern uint32_t target_type;
+extern uint32_t apps_power_collapse;
+extern uint32_t *l2x0_base_addr;
#else
static inline void msm_pm_set_l2_flush_flag(unsigned int flag)
{
diff --git a/arch/arm/mach-msm/include/mach/board.h b/arch/arm/mach-msm/include/mach/board.h
index 8a1474a..17ac3ac 100644
--- a/arch/arm/mach-msm/include/mach/board.h
+++ b/arch/arm/mach-msm/include/mach/board.h
@@ -125,9 +125,15 @@
const struct pmic8058_leds_platform_data *driver_channel;
};
+enum msm_camera_ext_led_flash_id {
+ MAM_CAMERA_EXT_LED_FLASH_SC628A,
+ MAM_CAMERA_EXT_LED_FLASH_TPS61310,
+};
+
struct msm_camera_sensor_flash_external {
uint32_t led_en;
uint32_t led_flash_en;
+ enum msm_camera_ext_led_flash_id flash_id;
struct msm_cam_expander_info *expander_info;
};
@@ -288,6 +294,7 @@
enum msm_sensor_type sensor_type;
struct msm_actuator_info *actuator_info;
int pmic_gpio_enable;
+ int (*sensor_lcd_gpio_onoff)(int on);
struct msm_eeprom_info *eeprom_info;
};
@@ -381,6 +388,7 @@
spinlock_t bl_spinlock;
int (*backlight_level)(int level, int max, int min);
int (*pmic_backlight)(int level);
+ int (*rotate_panel)(void);
int (*panel_num)(void);
void (*panel_config_gpio)(int);
int (*vga_switch)(int select_vga);
diff --git a/arch/arm/mach-msm/include/mach/camera.h b/arch/arm/mach-msm/include/mach/camera.h
index 90d236b..ba5b8ac 100644
--- a/arch/arm/mach-msm/include/mach/camera.h
+++ b/arch/arm/mach-msm/include/mach/camera.h
@@ -653,7 +653,8 @@
void msm_camio_camif_pad_reg_reset_2(void);
void msm_camio_vfe_blk_reset(void);
-void msm_camio_vfe_blk_reset_2(int flag);
+void msm_camio_vfe_blk_reset_2(void);
+void msm_camio_vfe_blk_reset_3(void);
int32_t msm_camio_3d_enable(const struct msm_camera_sensor_info *sinfo);
void msm_camio_3d_disable(void);
diff --git a/arch/arm/mach-msm/include/mach/dma.h b/arch/arm/mach-msm/include/mach/dma.h
index b3e536e..70519ff 100644
--- a/arch/arm/mach-msm/include/mach/dma.h
+++ b/arch/arm/mach-msm/include/mach/dma.h
@@ -45,7 +45,6 @@
void msm_dmov_enqueue_cmd(unsigned id, struct msm_dmov_cmd *cmd);
void msm_dmov_enqueue_cmd_ext(unsigned id, struct msm_dmov_cmd *cmd);
-void msm_dmov_stop_cmd(unsigned id, struct msm_dmov_cmd *cmd, int graceful);
void msm_dmov_flush(unsigned int id, int graceful);
int msm_dmov_exec_cmd(unsigned id, unsigned int cmdptr);
diff --git a/arch/arm/mach-msm/include/mach/iommu_domains.h b/arch/arm/mach-msm/include/mach/iommu_domains.h
index dfb100c..1a3a022 100644
--- a/arch/arm/mach-msm/include/mach/iommu_domains.h
+++ b/arch/arm/mach-msm/include/mach/iommu_domains.h
@@ -56,14 +56,28 @@
unsigned int domain_alloc_flags;
};
+
+struct msm_iova_partition {
+ unsigned long start;
+ unsigned long size;
+};
+
+struct msm_iova_layout {
+ struct msm_iova_partition *partitions;
+ int npartitions;
+ const char *client_name;
+ unsigned int domain_flags;
+};
+
#if defined(CONFIG_MSM_IOMMU)
extern struct iommu_domain *msm_get_iommu_domain(int domain_num);
-extern unsigned long msm_allocate_iova_address(unsigned int iommu_domain,
+extern int msm_allocate_iova_address(unsigned int iommu_domain,
unsigned int partition_no,
unsigned long size,
- unsigned long align);
+ unsigned long align,
+ unsigned long *iova);
extern void msm_free_iova_address(unsigned long iova,
unsigned int iommu_domain,
@@ -97,16 +111,19 @@
unsigned int partition_no,
unsigned long size);
+extern int msm_register_domain(struct msm_iova_layout *layout);
+
#else
static inline struct iommu_domain
*msm_get_iommu_domain(int subsys_id) { return NULL; }
-static inline unsigned long msm_allocate_iova_address(unsigned int iommu_domain,
+static inline int msm_allocate_iova_address(unsigned int iommu_domain,
unsigned int partition_no,
unsigned long size,
- unsigned long align) { return 0; }
+ unsigned long align,
+ unsigned long *iova) { return -ENOMEM; }
static inline void msm_free_iova_address(unsigned long iova,
unsigned int iommu_domain,
@@ -153,6 +170,11 @@
{
return;
}
+
+static inline int msm_register_domain(struct msm_iova_layout *layout)
+{
+ return -ENODEV;
+}
#endif
#endif
diff --git a/arch/arm/mach-msm/include/mach/irqs-9625.h b/arch/arm/mach-msm/include/mach/irqs-9625.h
new file mode 100644
index 0000000..91b4d07
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/irqs-9625.h
@@ -0,0 +1,36 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_ARCH_MSM_IRQS_9625_H
+#define __ASM_ARCH_MSM_IRQS_9625_H
+
+/* MSM ACPU Interrupt Numbers */
+
+/*
+ * 0-15: STI/SGI (software triggered/generated interrupts)
+ * 16-31: PPI (private peripheral interrupts)
+ * 32+: SPI (shared peripheral interrupts)
+ */
+
+
+#define APCC_QGICL2PERFMONIRPTREQ (GIC_SPI_START + 1)
+#define SC_SICL2PERFMONIRPTREQ APCC_QGICL2PERFMONIRPTREQ
+#define TLMM_MSM_SUMMARY_IRQ (GIC_SPI_START + 16)
+#define SPS_BAM_DMA_IRQ (GIC_SPI_START + 208)
+
+#define NR_MSM_IRQS 288
+#define NR_GPIO_IRQS 88
+#define NR_BOARD_IRQS 0
+#define NR_TLMM_MSM_DIR_CONN_IRQ 8 /*Need to Verify this Count*/
+#define NR_MSM_GPIOS NR_GPIO_IRQS
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/irqs.h b/arch/arm/mach-msm/include/mach/irqs.h
index e3904b4..bf766f4 100644
--- a/arch/arm/mach-msm/include/mach/irqs.h
+++ b/arch/arm/mach-msm/include/mach/irqs.h
@@ -62,6 +62,8 @@
#include "irqs-copper.h"
#elif defined(CONFIG_ARCH_MSM9615)
#include "irqs-9615.h"
+#elif defined(CONFIG_ARCH_MSM9625)
+#include "irqs-9625.h"
#elif defined(CONFIG_ARCH_MSM7X30)
#include "irqs-7x30.h"
#elif defined(CONFIG_ARCH_QSD8X50)
diff --git a/arch/arm/mach-msm/include/mach/mdm2.h b/arch/arm/mach-msm/include/mach/mdm2.h
index 78ca88f..997b3be 100644
--- a/arch/arm/mach-msm/include/mach/mdm2.h
+++ b/arch/arm/mach-msm/include/mach/mdm2.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -16,6 +16,9 @@
struct mdm_platform_data {
char *mdm_version;
int ramdump_delay_ms;
+ int soft_reset_inverted;
+ int early_power_on;
+ int sfr_query;
struct platform_device *peripheral_platform_device;
};
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-8064.h b/arch/arm/mach-msm/include/mach/msm_iomap-8064.h
index 96bc35e..10e2b74 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap-8064.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-8064.h
@@ -106,4 +106,9 @@
#define APQ8064_HDMI_PHYS 0x04A00000
#define APQ8064_HDMI_SIZE SZ_4K
+#ifdef CONFIG_DEBUG_APQ8064_UART
+#define MSM_DEBUG_UART_BASE IOMEM(0xFA740000)
+#define MSM_DEBUG_UART_PHYS 0x16640000
+#endif
+
#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-9625.h b/arch/arm/mach-msm/include/mach/msm_iomap-9625.h
new file mode 100644
index 0000000..493cf36
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-9625.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_ARCH_MSM_IOMAP_MSM9625_H
+#define __ASM_ARCH_MSM_IOMAP_MSM9625_H
+
+/* Physical base address and size of peripherals.
+ * Ordered by the virtual base addresses they will be mapped at.
+ *
+ * If you add or remove entries here, you'll want to edit the
+ * io desc array in arch/arm/mach-msm/io.c to reflect your
+ * changes.
+ *
+ */
+
+#define MSM9625_SHARED_RAM_PHYS 0x18D00000
+
+#define MSM9625_APCS_GCC_PHYS 0xF9011000
+#define MSM9625_APCS_GCC_SIZE SZ_4K
+
+#define MSM9625_TMR_PHYS 0xF9021000
+#define MSM9625_TMR_SIZE SZ_4K
+
+#define MSM9625_TLMM_PHYS 0xFD510000
+#define MSM9625_TLMM_SIZE SZ_16K
+
+#ifdef CONFIG_DEBUG_MSM9625_UART
+#define MSM_DEBUG_UART_BASE IOMEM(0xFA71E000)
+#define MSM_DEBUG_UART_PHYS 0xF991E000
+#endif
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap.h b/arch/arm/mach-msm/include/mach/msm_iomap.h
index 34af610..2676297 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap.h
@@ -58,7 +58,8 @@
defined(CONFIG_ARCH_MSM8930) || defined(CONFIG_ARCH_MSM9615) || \
defined(CONFIG_ARCH_MSMCOPPER) || defined(CONFIG_ARCH_MSM7X27) || \
defined(CONFIG_ARCH_MSM7X25) || defined(CONFIG_ARCH_MSM7X01A) || \
- defined(CONFIG_ARCH_MSM8625) || defined(CONFIG_ARCH_MSM7X30)
+ defined(CONFIG_ARCH_MSM8625) || defined(CONFIG_ARCH_MSM7X30) || \
+ defined(CONFIG_ARCH_MSM9625)
/* Unified iomap */
@@ -121,6 +122,7 @@
#include "msm_iomap-8064.h"
#include "msm_iomap-9615.h"
#include "msm_iomap-copper.h"
+#include "msm_iomap-9625.h"
#else
/* Legacy single-target iomap */
diff --git a/arch/arm/mach-msm/include/mach/msm_smd.h b/arch/arm/mach-msm/include/mach/msm_smd.h
index 2966509..dc633fb 100644
--- a/arch/arm/mach-msm/include/mach/msm_smd.h
+++ b/arch/arm/mach-msm/include/mach/msm_smd.h
@@ -295,6 +295,14 @@
* -EINVAL - NULL parameter or non-packet based channel provided
*/
int smd_is_pkt_avail(smd_channel_t *ch);
+
+/*
+ * SMD initialization function that registers for a SMD platform driver.
+ *
+ * returns success on successful driver registration.
+ */
+int __init msm_smd_init(void);
+
#else
static inline int smd_open(const char *name, smd_channel_t **ch, void *priv,
@@ -411,6 +419,11 @@
{
return -ENODEV;
}
+
+static inline int __init msm_smd_init(void)
+{
+ return 0;
+}
#endif
#endif
diff --git a/arch/arm/mach-msm/include/mach/ocmem.h b/arch/arm/mach-msm/include/mach/ocmem.h
new file mode 100644
index 0000000..bf7c338
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/ocmem.h
@@ -0,0 +1,109 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_OCMEM_H
+#define _ARCH_ARM_MACH_MSM_OCMEM_H
+
+#include <asm/page.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+
+#define OCMEM_MIN_ALLOC SZ_64K
+#define OCMEM_MIN_ALIGN SZ_64K
+
+/* Maximum number of slots in DM */
+#define OCMEM_MAX_CHUNKS 32
+#define MIN_CHUNK_SIZE (SZ_1K/8)
+
+struct ocmem_buf {
+ unsigned long addr;
+ unsigned long len;
+};
+
+struct ocmem_buf_attr {
+ unsigned long paddr;
+ unsigned long len;
+};
+
+struct ocmem_chunk {
+ bool ro;
+ unsigned long ddr_paddr;
+ unsigned long size;
+};
+
+struct ocmem_map_list {
+ int num_chunks;
+ struct ocmem_chunk chunks[OCMEM_MAX_CHUNKS];
+};
+
+/* List of clients that allocate/interact with OCMEM */
+/* Must be in sync with client_names */
+enum ocmem_client {
+ /* GMEM clients */
+ OCMEM_GRAPHICS = 0x0,
+ /* TCMEM clients */
+ OCMEM_VIDEO,
+ OCMEM_CAMERA,
+ /* Dummy Clients */
+ OCMEM_HP_AUDIO,
+ OCMEM_VOICE,
+ /* IMEM Clients */
+ OCMEM_LP_AUDIO,
+ OCMEM_SENSORS,
+ OCMEM_BLAST,
+ OCMEM_CLIENT_MAX,
+};
+
+/**
+ * List of OCMEM notification events which will be broadcasted
+ * to clients that optionally register for these notifications
+ * on a per allocation basis.
+ **/
+enum ocmem_notif_type {
+ OCMEM_MAP_DONE = 1,
+ OCMEM_MAP_FAIL,
+ OCMEM_UNMAP_DONE,
+ OCMEM_UNMAP_FAIL,
+ OCMEM_ALLOC_GROW,
+ OCMEM_ALLOC_SHRINK,
+ OCMEM_NOTIF_TYPE_COUNT,
+};
+
+/* APIS */
+/* Notification APIs */
+void *ocmem_notifier_register(int client_id, struct notifier_block *nb);
+
+int ocmem_notifier_unregister(void *notif_hndl, struct notifier_block *nb);
+
+/* Allocation APIs */
+struct ocmem_buf *ocmem_allocate(int client_id, unsigned long size);
+
+struct ocmem_buf *ocmem_allocate_nb(int client_id, unsigned long size);
+
+struct ocmem_buf *ocmem_allocate_range(int client_id, unsigned long min,
+ unsigned long goal, unsigned long step);
+
+/* Free APIs */
+int ocmem_free(int client_id, struct ocmem_buf *buf);
+
+/* Dynamic Resize APIs */
+int ocmem_shrink(int client_id, struct ocmem_buf *buf,
+ unsigned long new_size);
+
+int ocmem_expand(int client_id, struct ocmem_buf *buf,
+ unsigned long new_size);
+
+/* Priority Enforcement APIs */
+int ocmem_evict(int client_id);
+
+int ocmem_restore(int client_id);
+#endif
diff --git a/arch/arm/mach-msm/include/mach/ocmem_priv.h b/arch/arm/mach-msm/include/mach/ocmem_priv.h
new file mode 100644
index 0000000..daf32a5
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/ocmem_priv.h
@@ -0,0 +1,81 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_OCMEM_CORE_H
+#define _ARCH_ARM_MACH_MSM_OCMEM_CORE_H
+
+/** All interfaces in this header should only be used by OCMEM driver
+ * Client drivers should use wrappers available in ocmem.h
+ **/
+
+#include "ocmem.h"
+#include <mach/msm_iomap.h>
+#include <asm/io.h>
+
+#define OCMEM_PHYS_BASE 0xFEC00000
+#define OCMEM_PHYS_SIZE 0x180000
+
+struct ocmem_zone;
+
+struct ocmem_zone_ops {
+ unsigned long (*allocate) (struct ocmem_zone *, unsigned long);
+ int (*free) (struct ocmem_zone *, unsigned long, unsigned long);
+};
+
+struct ocmem_zone {
+ int owner;
+ int active_regions;
+ int max_regions;
+ struct list_head region_list;
+ unsigned long z_start;
+ unsigned long z_end;
+ unsigned long z_head;
+ unsigned long z_tail;
+ unsigned long z_free;
+ struct gen_pool *z_pool;
+ struct ocmem_zone_ops *z_ops;
+};
+
+struct ocmem_req {
+ struct rw_semaphore rw_sem;
+ /* Chain in sched queue */
+ struct list_head sched_list;
+ /* Chain in zone list */
+ struct list_head zone_list;
+ int owner;
+ int prio;
+ uint32_t req_id;
+ unsigned long req_min;
+ unsigned long req_max;
+ unsigned long req_step;
+ /* reverse pointers */
+ struct ocmem_zone *zone;
+ struct ocmem_buf *buffer;
+ unsigned long state;
+ /* Request assignments */
+ unsigned long req_start;
+ unsigned long req_end;
+ unsigned long req_sz;
+};
+
+struct ocmem_handle {
+ struct ocmem_buf buffer;
+ struct mutex handle_mutex;
+ struct ocmem_req *req;
+};
+
+struct ocmem_zone *get_zone(unsigned);
+unsigned long allocate_head(struct ocmem_zone *, unsigned long);
+int free_head(struct ocmem_zone *, unsigned long, unsigned long);
+unsigned long allocate_tail(struct ocmem_zone *, unsigned long);
+int free_tail(struct ocmem_zone *, unsigned long, unsigned long);
+#endif
diff --git a/arch/arm/mach-msm/include/mach/rpm-regulator-smd.h b/arch/arm/mach-msm/include/mach/rpm-regulator-smd.h
new file mode 100644
index 0000000..333f5af
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/rpm-regulator-smd.h
@@ -0,0 +1,55 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_INCLUDE_MACH_RPM_REGULATOR_SMD_H
+#define __ARCH_ARM_MACH_MSM_INCLUDE_MACH_RPM_REGULATOR_SMD_H
+
+#include <linux/device.h>
+
+struct rpm_regulator;
+
+#ifdef CONFIG_MSM_RPM_REGULATOR_SMD
+
+struct rpm_regulator *rpm_regulator_get(struct device *dev, const char *supply);
+
+void rpm_regulator_put(struct rpm_regulator *regulator);
+
+int rpm_regulator_enable(struct rpm_regulator *regulator);
+
+int rpm_regulator_disable(struct rpm_regulator *regulator);
+
+int rpm_regulator_set_voltage(struct rpm_regulator *regulator, int min_uV,
+ int max_uV);
+
+int __init rpm_regulator_smd_driver_init(void);
+
+#else
+
+static inline struct rpm_regulator *rpm_regulator_get(struct device *dev,
+ const char *supply) { return NULL; }
+
+static inline void rpm_regulator_put(struct rpm_regulator *regulator) { }
+
+static inline int rpm_regulator_enable(struct rpm_regulator *regulator)
+ { return 0; }
+
+static inline int rpm_regulator_disable(struct rpm_regulator *regulator)
+ { return 0; }
+
+static inline int rpm_regulator_set_voltage(struct rpm_regulator *regulator,
+ int min_uV, int max_uV) { return 0; }
+
+static inline int __init rpm_regulator_smd_driver_init(void) { return 0; }
+
+#endif /* CONFIG_MSM_RPM_REGULATOR_SMD */
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/rpm-smd.h b/arch/arm/mach-msm/include/mach/rpm-smd.h
new file mode 100644
index 0000000..ff58fed
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/rpm-smd.h
@@ -0,0 +1,254 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_RPM_SMD_H
+#define __ARCH_ARM_MACH_MSM_RPM_SMD_H
+
+/**
+ * enum msm_rpm_set - RPM enumerations for sleep/active set
+ * %MSM_RPM_CTX_SET_0: Set resource parameters for active mode.
+ * %MSM_RPM_CTX_SET_SLEEP: Set resource parameters for sleep.
+ */
+enum msm_rpm_set {
+ MSM_RPM_CTX_ACTIVE_SET,
+ MSM_RPM_CTX_SLEEP_SET,
+};
+
+struct msm_rpm_request;
+
+struct msm_rpm_kvp {
+ uint32_t key;
+ uint32_t length;
+ uint8_t *data;
+};
+#ifdef CONFIG_MSM_RPM_SMD
+/**
+ * msm_rpm_request() - Creates a parent element to identify the
+ * resource on the RPM, that stores the KVPs for different fields modified
+ * for a hardware resource
+ *
+ * @set: if the device is setting the active/sleep set parameter
+ * for the resource
+ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
+ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
+ * @num_elements: number of KVPs pairs associated with the resource
+ *
+ * returns pointer to a msm_rpm_request on success, NULL on error
+ */
+struct msm_rpm_request *msm_rpm_create_request(
+ enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, int num_elements);
+
+/**
+ * msm_rpm_request_noirq() - Creates a parent element to identify the
+ * resource on the RPM, that stores the KVPs for different fields modified
+ * for a hardware resource. This function is similar to msm_rpm_create_request
+ * except that it has to be called with interrupts masked.
+ *
+ * @set: if the device is setting the active/sleep set parameter
+ * for the resource
+ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
+ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
+ * @num_elements: number of KVPs pairs associated with the resource
+ *
+ * returns pointer to a msm_rpm_request on success, NULL on error
+ */
+struct msm_rpm_request *msm_rpm_create_request_noirq(
+ enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, int num_elements);
+
+/**
+ * msm_rpm_add_kvp_data() - Adds a Key value pair to a existing RPM resource.
+ *
+ * @handle: RPM resource handle to which the data should be appended
+ * @key: unsigned integer identify the parameter modified
+ * @data: byte array that contains the value corresponding to key.
+ * @size: size of data in bytes.
+ *
+ * returns 0 on success or errno
+ */
+int msm_rpm_add_kvp_data(struct msm_rpm_request *handle,
+ uint32_t key, const uint8_t *data, int size);
+
+/**
+ * msm_rpm_add_kvp_data_noirq() - Adds a Key value pair to a existing RPM
+ * resource. This function is similar to msm_rpm_add_kvp_data except that it
+ * has to be called with interrupts masked.
+ *
+ * @handle: RPM resource handle to which the data should be appended
+ * @key: unsigned integer identify the parameter modified
+ * @data: byte array that contains the value corresponding to key.
+ * @size: size of data in bytes.
+ *
+ * returns 0 on success or errno
+ */
+int msm_rpm_add_kvp_data_noirq(struct msm_rpm_request *handle,
+ uint32_t key, const uint8_t *data, int size);
+
+/** msm_rpm_free_request() - clean up the RPM request handle created with
+ * msm_rpm_create_request
+ *
+ * @handle: RPM resource handle to be cleared.
+ */
+
+void msm_rpm_free_request(struct msm_rpm_request *handle);
+
+/**
+ * msm_rpm_send_request() - Send the RPM messages using SMD. The function
+ * assigns a message id before sending the data out to the RPM. RPM hardware
+ * uses the message id to acknowledge the messages.
+ *
+ * @handle: pointer to the msm_rpm_request for the resource being modified.
+ *
+ * returns non-zero message id on success and zero on a failed transaction.
+ * The drivers use message id to wait for ACK from RPM.
+ */
+int msm_rpm_send_request(struct msm_rpm_request *handle);
+
+/**
+ * msm_rpm_send_request_noirq() - Send the RPM messages using SMD. The
+ * function assigns a message id before sending the data out to the RPM.
+ * RPM hardware uses the message id to acknowledge the messages. This function
+ * is similar to msm_rpm_send_request except that it has to be called with
+ * interrupts masked.
+ *
+ * @handle: pointer to the msm_rpm_request for the resource being modified.
+ *
+ * returns non-zero message id on success and zero on a failed transaction.
+ * The drivers use message id to wait for ACK from RPM.
+ */
+int msm_rpm_send_request_noirq(struct msm_rpm_request *handle);
+
+/**
+ * msm_rpm_wait_for_ack() - A blocking call that waits for acknowledgment of
+ * a message from RPM.
+ *
+ * @msg_id: the return from msm_rpm_send_requests
+ *
+ * returns 0 on success or errno
+ */
+int msm_rpm_wait_for_ack(uint32_t msg_id);
+
+/**
+ * msm_rpm_wait_for_ack_noirq() - A blocking call that waits for acknowledgment
+ * of a message from RPM. This function is similar to msm_rpm_wait_for_ack
+ * except that it has to be called with interrupts masked.
+ *
+ * @msg_id: the return from msm_rpm_send_request
+ *
+ * returns 0 on success or errno
+ */
+int msm_rpm_wait_for_ack_noirq(uint32_t msg_id);
+
+/**
+ * msm_rpm_send_message() -Wrapper function for clients to send data given an
+ * array of key value pairs.
+ *
+ * @set: if the device is setting the active/sleep set parameter
+ * for the resource
+ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
+ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
+ * @kvp: array of KVP data.
+ * @nelem: number of KVPs pairs associated with the message.
+ *
+ * returns 0 on success and errno on failure.
+ */
+int msm_rpm_send_message(enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems);
+
+/**
+ * msm_rpm_send_message_noirq() -Wrapper function for clients to send data
+ * given an array of key value pairs. This function is similar to the
+ * msm_rpm_send_message() except that it has to be called with interrupts
+ * disabled. Clients should choose the irq version when possible for system
+ * performance.
+ *
+ * @set: if the device is setting the active/sleep set parameter
+ * for the resource
+ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
+ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
+ * @kvp: array of KVP data.
+ * @nelem: number of KVPs pairs associated with the message.
+ *
+ * returns 0 on success and errno on failure.
+ */
+int msm_rpm_send_message_noirq(enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems);
+
+/**
+ * msm_rpm_driver_init() - Initialization function that registers for a
+ * rpm platform driver.
+ *
+ * returns 0 on success.
+ */
+int __init msm_rpm_driver_init(void);
+
+#else
+
+static inline struct msm_rpm_request *msm_rpm_create_request(
+ enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, int num_elements)
+{
+ return NULL;
+}
+
+static inline struct msm_rpm_request *msm_rpm_create_request_noirq(
+ enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, int num_elements)
+{
+ return NULL;
+
+}
+static inline uint32_t msm_rpm_add_kvp_data(struct msm_rpm_request *handle,
+ uint32_t key, const uint8_t *data, int count)
+{
+ return 0;
+}
+static inline uint32_t msm_rpm_add_kvp_data_noirq(
+ struct msm_rpm_request *handle, uint32_t key,
+ const uint8_t *data, int count)
+{
+ return 0;
+}
+
+static inline void msm_rpm_free_request(struct msm_rpm_request *handle)
+{
+ return ;
+}
+
+static inline int msm_rpm_send_request(struct msm_rpm_request *handle)
+{
+ return 0;
+}
+
+static inline int msm_rpm_send_request_noirq(struct msm_rpm_request *handle)
+{
+ return 0;
+
+}
+static inline int msm_rpm_wait_for_ack(uint32_t msg_id)
+{
+ return 0;
+
+}
+static inline int msm_rpm_wait_for_ack_noirq(uint32_t msg_id)
+{
+ return 0;
+}
+
+static inline int __init msm_rpm_driver_init(void)
+{
+ return 0;
+}
+#endif
+#endif /*__ARCH_ARM_MACH_MSM_RPM_SMD_H*/
diff --git a/arch/arm/mach-msm/include/mach/socinfo.h b/arch/arm/mach-msm/include/mach/socinfo.h
index 09494ea..c0ad65b 100644
--- a/arch/arm/mach-msm/include/mach/socinfo.h
+++ b/arch/arm/mach-msm/include/mach/socinfo.h
@@ -40,11 +40,17 @@
of_machine_is_compatible("qcom,msmcopper-sim")
#define machine_is_copper_rumi() \
of_machine_is_compatible("qcom,msmcopper-rumi")
+#define early_machine_is_msm9625() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm9625")
+#define machine_is_msm9625() \
+ of_machine_is_compatible("qcom,msm9625")
#else
#define early_machine_is_copper() 0
#define machine_is_copper() 0
#define machine_is_copper_sim() 0
#define machine_is_copper_rumi() 0
+#define early_machine_is_msm9625() 0
+#define machine_is_msm9625() 0
#endif
#define PLATFORM_SUBTYPE_SGLTE 6
@@ -72,6 +78,7 @@
MSM_CPU_COPPER,
MSM_CPU_8627,
MSM_CPU_8625,
+ MSM_CPU_9625
};
enum msm_cpu socinfo_get_msm_cpu(void);
diff --git a/arch/arm/mach-msm/include/mach/usb_gadget_xport.h b/arch/arm/mach-msm/include/mach/usb_gadget_xport.h
index d8a3e60..be11989 100644
--- a/arch/arm/mach-msm/include/mach/usb_gadget_xport.h
+++ b/arch/arm/mach-msm/include/mach/usb_gadget_xport.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -22,6 +22,7 @@
USB_GADGET_XPORT_BAM,
USB_GADGET_XPORT_BAM2BAM,
USB_GADGET_XPORT_HSIC,
+ USB_GADGET_XPORT_HSUART,
USB_GADGET_XPORT_NONE,
};
@@ -42,6 +43,8 @@
return "BAM2BAM";
case USB_GADGET_XPORT_HSIC:
return "HSIC";
+ case USB_GADGET_XPORT_HSUART:
+ return "HSUART";
case USB_GADGET_XPORT_NONE:
return "NONE";
default:
@@ -63,6 +66,8 @@
return USB_GADGET_XPORT_BAM2BAM;
if (!strncasecmp("HSIC", name, XPORT_STR_LEN))
return USB_GADGET_XPORT_HSIC;
+ if (!strncasecmp("HSUART", name, XPORT_STR_LEN))
+ return USB_GADGET_XPORT_HSUART;
if (!strncasecmp("", name, XPORT_STR_LEN))
return USB_GADGET_XPORT_NONE;
@@ -79,6 +84,11 @@
#define NUM_PORTS (NUM_RMNET_HSIC_PORTS \
+ NUM_DUN_HSIC_PORTS)
+#define NUM_RMNET_HSUART_PORTS 1
+#define NUM_DUN_HSUART_PORTS 1
+#define NUM_HSUART_PORTS (NUM_RMNET_HSUART_PORTS \
+ + NUM_DUN_HSUART_PORTS)
+
int ghsic_ctrl_connect(void *, int);
void ghsic_ctrl_disconnect(void *, int);
int ghsic_ctrl_setup(unsigned int, enum gadget_type);
@@ -86,4 +96,10 @@
void ghsic_data_disconnect(void *, int);
int ghsic_data_setup(unsigned int, enum gadget_type);
+int ghsuart_ctrl_connect(void *, int);
+void ghsuart_ctrl_disconnect(void *, int);
+int ghsuart_ctrl_setup(unsigned int, enum gadget_type);
+int ghsuart_data_connect(void *, int);
+void ghsuart_data_disconnect(void *, int);
+int ghsuart_data_setup(unsigned int, enum gadget_type);
#endif
diff --git a/arch/arm/mach-msm/io.c b/arch/arm/mach-msm/io.c
index 7c0de57..2a0d34a 100644
--- a/arch/arm/mach-msm/io.c
+++ b/arch/arm/mach-msm/io.c
@@ -281,6 +281,9 @@
},
MSM_CHIP_DEVICE(QFPROM, APQ8064),
MSM_CHIP_DEVICE(SIC_NON_SECURE, APQ8064),
+#ifdef CONFIG_DEBUG_APQ8064_UART
+ MSM_DEVICE(DEBUG_UART),
+#endif
};
void __init msm_map_apq8064_io(void)
@@ -449,6 +452,29 @@
void __init msm_map_msm8625_io(void) { return; }
#endif /* CONFIG_ARCH_MSM8625 */
+#ifdef CONFIG_ARCH_MSM9625
+static struct map_desc msm9625_io_desc[] __initdata = {
+ MSM_CHIP_DEVICE(APCS_GCC, MSM9625),
+ MSM_CHIP_DEVICE(TLMM, MSM9625),
+ MSM_CHIP_DEVICE(TMR, MSM9625),
+ {
+ .virtual = (unsigned long) MSM_SHARED_RAM_BASE,
+ .length = MSM_SHARED_RAM_SIZE,
+ .type = MT_DEVICE,
+ },
+#ifdef CONFIG_DEBUG_MSM9625_UART
+ MSM_DEVICE(DEBUG_UART),
+#endif
+};
+
+void __init msm_map_msm9625_io(void)
+{
+ msm_shared_ram_phys = MSM9625_SHARED_RAM_PHYS;
+ msm_map_io(msm9625_io_desc, ARRAY_SIZE(msm9625_io_desc));
+}
+#endif /* CONFIG_ARCH_MSM9625 */
+
+
void __iomem *
__msm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
{
diff --git a/arch/arm/mach-msm/iommu.c b/arch/arm/mach-msm/iommu.c
index 0652f3b..49a3e6f 100644
--- a/arch/arm/mach-msm/iommu.c
+++ b/arch/arm/mach-msm/iommu.c
@@ -49,9 +49,11 @@
#define MSM_IOMMU_ATTR_CACHED_WT 0x3
-static inline void clean_pte(unsigned long *start, unsigned long *end)
+static inline void clean_pte(unsigned long *start, unsigned long *end,
+ int redirect)
{
- dmac_flush_range(start, end);
+ if (!redirect)
+ dmac_flush_range(start, end);
}
static int msm_iommu_tex_class[4];
@@ -292,6 +294,9 @@
memset(priv->pgtable, 0, SZ_16K);
domain->priv = priv;
+
+ clean_pte(priv->pgtable, priv->pgtable + NUM_FL_PTE, priv->redirect);
+
return 0;
fail_nomem:
@@ -518,8 +523,7 @@
for (i = 0; i < 16; i++)
*(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION
| FL_TYPE_SECT | FL_SHARED | FL_NG | pgprot;
- if (!priv->redirect)
- clean_pte(fl_pte, fl_pte + 16);
+ clean_pte(fl_pte, fl_pte + 16, priv->redirect);
}
if (len == SZ_1M) {
@@ -530,8 +534,7 @@
*fl_pte = (pa & 0xFFF00000) | FL_NG | FL_TYPE_SECT | FL_SHARED
| pgprot;
- if (!priv->redirect)
- clean_pte(fl_pte, fl_pte + 1);
+ clean_pte(fl_pte, fl_pte + 1, priv->redirect);
}
/* Need a 2nd level table */
@@ -548,12 +551,12 @@
goto fail;
}
memset(sl, 0, SZ_4K);
+ clean_pte(sl, sl + NUM_SL_PTE, priv->redirect);
*fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \
FL_TYPE_TABLE);
- if (!priv->redirect)
- clean_pte(fl_pte, fl_pte + 1);
+ clean_pte(fl_pte, fl_pte + 1, priv->redirect);
}
if (!(*fl_pte & FL_TYPE_TABLE)) {
@@ -574,8 +577,7 @@
*sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_NG | SL_SHARED
| SL_TYPE_SMALL | pgprot;
- if (!priv->redirect)
- clean_pte(sl_pte, sl_pte + 1);
+ clean_pte(sl_pte, sl_pte + 1, priv->redirect);
}
if (len == SZ_64K) {
@@ -591,8 +593,7 @@
*(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_NG
| SL_SHARED | SL_TYPE_LARGE | pgprot;
- if (!priv->redirect)
- clean_pte(sl_pte, sl_pte + 16);
+ clean_pte(sl_pte, sl_pte + 16, priv->redirect);
}
ret = __flush_iotlb_va(domain, va);
@@ -652,15 +653,13 @@
for (i = 0; i < 16; i++)
*(fl_pte+i) = 0;
- if (!priv->redirect)
- clean_pte(fl_pte, fl_pte + 16);
+ clean_pte(fl_pte, fl_pte + 16, priv->redirect);
}
if (len == SZ_1M) {
*fl_pte = 0;
- if (!priv->redirect)
- clean_pte(fl_pte, fl_pte + 1);
+ clean_pte(fl_pte, fl_pte + 1, priv->redirect);
}
sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
@@ -671,15 +670,13 @@
for (i = 0; i < 16; i++)
*(sl_pte+i) = 0;
- if (!priv->redirect)
- clean_pte(sl_pte, sl_pte + 16);
+ clean_pte(sl_pte, sl_pte + 16, priv->redirect);
}
if (len == SZ_4K) {
*sl_pte = 0;
- if (!priv->redirect)
- clean_pte(sl_pte, sl_pte + 1);
+ clean_pte(sl_pte, sl_pte + 1, priv->redirect);
}
if (len == SZ_4K || len == SZ_64K) {
@@ -692,8 +689,7 @@
free_page((unsigned long)sl_table);
*fl_pte = 0;
- if (!priv->redirect)
- clean_pte(fl_pte, fl_pte + 1);
+ clean_pte(fl_pte, fl_pte + 1, priv->redirect);
}
}
@@ -773,10 +769,12 @@
}
memset(sl_table, 0, SZ_4K);
+ clean_pte(sl_table, sl_table + NUM_SL_PTE,
+ priv->redirect);
+
*fl_pte = ((((int)__pa(sl_table)) & FL_BASE_MASK) |
FL_TYPE_TABLE);
- if (!priv->redirect)
- clean_pte(fl_pte, fl_pte + 1);
+ clean_pte(fl_pte, fl_pte + 1, priv->redirect);
} else
sl_table = (unsigned long *)
__va(((*fl_pte) & FL_BASE_MASK));
@@ -809,8 +807,8 @@
}
}
- if (!priv->redirect)
- clean_pte(sl_table + sl_start, sl_table + sl_offset);
+ clean_pte(sl_table + sl_start, sl_table + sl_offset,
+ priv->redirect);
fl_pte++;
sl_offset = 0;
@@ -854,8 +852,8 @@
sl_end = NUM_SL_PTE;
memset(sl_table + sl_start, 0, (sl_end - sl_start) * 4);
- if (!priv->redirect)
- clean_pte(sl_table + sl_start, sl_table + sl_end);
+ clean_pte(sl_table + sl_start, sl_table + sl_end,
+ priv->redirect);
offset += (sl_end - sl_start) * SZ_4K;
@@ -879,8 +877,7 @@
free_page((unsigned long)sl_table);
*fl_pte = 0;
- if (!priv->redirect)
- clean_pte(fl_pte, fl_pte + 1);
+ clean_pte(fl_pte, fl_pte + 1, priv->redirect);
}
sl_start = 0;
diff --git a/arch/arm/mach-msm/iommu_domains.c b/arch/arm/mach-msm/iommu_domains.c
index 34c16d1..271e252b 100644
--- a/arch/arm/mach-msm/iommu_domains.c
+++ b/arch/arm/mach-msm/iommu_domains.c
@@ -10,27 +10,34 @@
* GNU General Public License for more details.
*/
-#include <mach/msm_subsystem_map.h>
-#include <linux/memory_alloc.h>
+#include <linux/init.h>
#include <linux/iommu.h>
+#include <linux/memory_alloc.h>
#include <linux/platform_device.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <asm/sizes.h>
#include <asm/page.h>
-#include <linux/init.h>
#include <mach/iommu.h>
#include <mach/iommu_domains.h>
#include <mach/socinfo.h>
+#include <mach/msm_subsystem_map.h>
/* dummy 64K for overmapping */
char iommu_dummy[2*SZ_64K-4];
-struct msm_iommu_domain_state {
- struct msm_iommu_domain *domains;
- int ndomains;
+struct msm_iova_data {
+ struct rb_node node;
+ struct mem_pool *pools;
+ int npools;
+ struct iommu_domain *domain;
+ int domain_num;
};
-static struct msm_iommu_domain_state domain_state;
+static struct rb_root domain_root;
+DEFINE_MUTEX(domain_mutex);
+static atomic_t domain_nums = ATOMIC_INIT(-1);
int msm_iommu_map_extra(struct iommu_domain *domain,
unsigned long start_iova,
@@ -127,9 +134,10 @@
if (size & (align - 1))
return -EINVAL;
- iova = msm_allocate_iova_address(domain_no, partition_no, size, align);
+ ret = msm_allocate_iova_address(domain_no, partition_no, size, align,
+ &iova);
- if (!iova)
+ if (ret)
return -ENOMEM;
ret = msm_iommu_map_iova_phys(msm_get_iommu_domain(domain_no), iova,
@@ -152,73 +160,210 @@
msm_free_iova_address(iova, domain_no, partition_no, size);
}
+static struct msm_iova_data *find_domain(int domain_num)
+{
+ struct rb_root *root = &domain_root;
+ struct rb_node *p = root->rb_node;
+
+ mutex_lock(&domain_mutex);
+
+ while (p) {
+ struct msm_iova_data *node;
+
+ node = rb_entry(p, struct msm_iova_data, node);
+ if (domain_num < node->domain_num)
+ p = p->rb_left;
+ else if (domain_num > domain_num)
+ p = p->rb_right;
+ else {
+ mutex_unlock(&domain_mutex);
+ return node;
+ }
+ }
+ mutex_unlock(&domain_mutex);
+ return NULL;
+}
+
+static int add_domain(struct msm_iova_data *node)
+{
+ struct rb_root *root = &domain_root;
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+
+ mutex_lock(&domain_mutex);
+ while (*p) {
+ struct msm_iova_data *tmp;
+ parent = *p;
+
+ tmp = rb_entry(parent, struct msm_iova_data, node);
+
+ if (node->domain_num < tmp->domain_num)
+ p = &(*p)->rb_left;
+ else if (node->domain_num > tmp->domain_num)
+ p = &(*p)->rb_right;
+ else
+ BUG();
+ }
+ rb_link_node(&node->node, parent, p);
+ rb_insert_color(&node->node, root);
+ mutex_unlock(&domain_mutex);
+ return 0;
+}
+
struct iommu_domain *msm_get_iommu_domain(int domain_num)
{
- if (domain_num >= 0 && domain_num < domain_state.ndomains)
- return domain_state.domains[domain_num].domain;
+ struct msm_iova_data *data;
+
+ data = find_domain(domain_num);
+
+ if (data)
+ return data->domain;
else
return NULL;
}
-unsigned long msm_allocate_iova_address(unsigned int iommu_domain,
+int msm_allocate_iova_address(unsigned int iommu_domain,
unsigned int partition_no,
unsigned long size,
- unsigned long align)
+ unsigned long align,
+ unsigned long *iova)
{
+ struct msm_iova_data *data;
struct mem_pool *pool;
- unsigned long iova;
+ unsigned long va;
- if (iommu_domain >= domain_state.ndomains)
- return 0;
+ data = find_domain(iommu_domain);
- if (partition_no >= domain_state.domains[iommu_domain].npools)
- return 0;
+ if (!data)
+ return -EINVAL;
- pool = &domain_state.domains[iommu_domain].iova_pools[partition_no];
+ if (partition_no >= data->npools)
+ return -EINVAL;
+
+ pool = &data->pools[partition_no];
if (!pool->gpool)
- return 0;
+ return -EINVAL;
- iova = gen_pool_alloc_aligned(pool->gpool, size, ilog2(align));
- if (iova)
+ va = gen_pool_alloc_aligned(pool->gpool, size, ilog2(align));
+ if (va) {
pool->free -= size;
+ /* Offset because genpool can't handle 0 addresses */
+ if (pool->paddr == 0)
+ va -= SZ_4K;
+ *iova = va;
+ return 0;
+ }
- return iova;
+ return -ENOMEM;
}
void msm_free_iova_address(unsigned long iova,
- unsigned int iommu_domain,
- unsigned int partition_no,
- unsigned long size)
+ unsigned int iommu_domain,
+ unsigned int partition_no,
+ unsigned long size)
{
+ struct msm_iova_data *data;
struct mem_pool *pool;
- if (iommu_domain >= domain_state.ndomains) {
+ data = find_domain(iommu_domain);
+
+ if (!data) {
WARN(1, "Invalid domain %d\n", iommu_domain);
return;
}
- if (partition_no >= domain_state.domains[iommu_domain].npools) {
+ if (partition_no >= data->npools) {
WARN(1, "Invalid partition %d for domain %d\n",
partition_no, iommu_domain);
return;
}
- pool = &domain_state.domains[iommu_domain].iova_pools[partition_no];
+ pool = &data->pools[partition_no];
if (!pool)
return;
pool->free += size;
+
+ /* Offset because genpool can't handle 0 addresses */
+ if (pool->paddr == 0)
+ iova += SZ_4K;
+
gen_pool_free(pool->gpool, iova, size);
}
+int msm_register_domain(struct msm_iova_layout *layout)
+{
+ int i;
+ struct msm_iova_data *data;
+ struct mem_pool *pools;
+
+ if (!layout)
+ return -EINVAL;
+
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+
+ if (!data)
+ return -ENOMEM;
+
+ pools = kmalloc(sizeof(struct mem_pool) * layout->npartitions,
+ GFP_KERNEL);
+
+ if (!pools)
+ goto out;
+
+ for (i = 0; i < layout->npartitions; i++) {
+ if (layout->partitions[i].size == 0)
+ continue;
+
+ pools[i].gpool = gen_pool_create(PAGE_SHIFT, -1);
+
+ if (!pools[i].gpool)
+ continue;
+
+ pools[i].paddr = layout->partitions[i].start;
+ pools[i].size = layout->partitions[i].size;
+
+ /*
+ * genalloc can't handle a pool starting at address 0.
+ * For now, solve this problem by offsetting the value
+ * put in by 4k.
+ * gen pool address = actual address + 4k
+ */
+ if (pools[i].paddr == 0)
+ layout->partitions[i].start += SZ_4K;
+
+ if (gen_pool_add(pools[i].gpool,
+ layout->partitions[i].start,
+ layout->partitions[i].size, -1)) {
+ gen_pool_destroy(pools[i].gpool);
+ pools[i].gpool = NULL;
+ continue;
+ }
+ }
+
+ data->pools = pools;
+ data->npools = layout->npartitions;
+ data->domain_num = atomic_inc_return(&domain_nums);
+ data->domain = iommu_domain_alloc(layout->domain_flags);
+
+ add_domain(data);
+
+ return data->domain_num;
+
+out:
+ kfree(data);
+
+ return -EINVAL;
+}
+
int msm_use_iommu()
{
/*
* If there are no domains, don't bother trying to use the iommu
*/
- return domain_state.ndomains && iommu_found();
+ return iommu_found();
}
static int __init iommu_domain_probe(struct platform_device *pdev)
@@ -229,64 +374,52 @@
if (!p)
return -ENODEV;
- domain_state.domains = p->domains;
- domain_state.ndomains = p->ndomains;
+ for (i = 0; i < p->ndomains; i++) {
+ struct msm_iova_layout l;
+ struct msm_iova_partition *part;
+ struct msm_iommu_domain *domains;
- for (i = 0; i < domain_state.ndomains; i++) {
- domain_state.domains[i].domain = iommu_domain_alloc(
- p->domain_alloc_flags);
- if (!domain_state.domains[i].domain)
+ domains = p->domains;
+ l.npartitions = domains[i].npools;
+ part = kmalloc(
+ sizeof(struct msm_iova_partition) * l.npartitions,
+ GFP_KERNEL);
+
+ if (!part) {
+ pr_info("%s: could not allocate space for domain %d",
+ __func__, i);
continue;
-
- for (j = 0; j < domain_state.domains[i].npools; j++) {
- struct mem_pool *pool = &domain_state.domains[i].
- iova_pools[j];
- mutex_init(&pool->pool_mutex);
- if (pool->size) {
- pool->gpool = gen_pool_create(PAGE_SHIFT, -1);
-
- if (!pool->gpool) {
- pr_err("%s: could not allocate pool\n",
- __func__);
- pr_err("%s: domain %d iova space %d\n",
- __func__, i, j);
- continue;
- }
-
- if (gen_pool_add(pool->gpool, pool->paddr,
- pool->size, -1)) {
- pr_err("%s: could not add memory\n",
- __func__);
- pr_err("%s: domain %d pool %d\n",
- __func__, i, j);
- gen_pool_destroy(pool->gpool);
- pool->gpool = NULL;
- continue;
- }
- } else {
- pool->gpool = NULL;
- }
}
+
+ for (j = 0; j < l.npartitions; j++) {
+ part[j].start = p->domains[i].iova_pools[j].paddr;
+ part[j].size = p->domains[i].iova_pools[j].size;
+ }
+
+ l.partitions = part;
+
+ msm_register_domain(&l);
+
+ kfree(part);
}
for (i = 0; i < p->nnames; i++) {
- int domain_idx;
struct device *ctx = msm_iommu_get_ctx(
p->domain_names[i].name);
+ struct iommu_domain *domain;
if (!ctx)
continue;
- domain_idx = p->domain_names[i].domain;
+ domain = msm_get_iommu_domain(p->domain_names[i].domain);
- if (!domain_state.domains[domain_idx].domain)
+ if (!domain)
continue;
- if (iommu_attach_device(domain_state.domains[domain_idx].domain,
- ctx)) {
- WARN(1, "%s: could not attach domain %d to context %s."
+ if (iommu_attach_device(domain, ctx)) {
+ WARN(1, "%s: could not attach domain %p to context %s."
" iommu programming will not occur.\n",
- __func__, domain_idx,
+ __func__, domain,
p->domain_names[i].name);
continue;
}
diff --git a/arch/arm/mach-msm/ipc_router.h b/arch/arm/mach-msm/ipc_router.h
index 462543e..a90be23 100644
--- a/arch/arm/mach-msm/ipc_router.h
+++ b/arch/arm/mach-msm/ipc_router.h
@@ -138,7 +138,7 @@
struct msm_ipc_sock {
struct sock sk;
struct msm_ipc_port *port;
- void *modem_pil;
+ void *default_pil;
};
enum write_data_type {
@@ -206,4 +206,15 @@
int msm_ipc_router_init_sockets(void);
void msm_ipc_router_exit_sockets(void);
+#if defined CONFIG_MSM_IPC_ROUTER_SMD_XPRT
+extern void *msm_ipc_load_default_node(void);
+
+extern void msm_ipc_unload_default_node(void *pil);
+#else
+static inline void *msm_ipc_load_default_node(void)
+{ return NULL; }
+
+static inline void msm_ipc_unload_default_node(void *pil) { }
+#endif
+
#endif
diff --git a/arch/arm/mach-msm/ipc_router_smd_xprt.c b/arch/arm/mach-msm/ipc_router_smd_xprt.c
index 0cde393..307b6ae 100644
--- a/arch/arm/mach-msm/ipc_router_smd_xprt.c
+++ b/arch/arm/mach-msm/ipc_router_smd_xprt.c
@@ -19,6 +19,7 @@
#include <linux/types.h>
#include <mach/msm_smd.h>
+#include <mach/peripheral-loader.h>
#include "ipc_router.h"
#include "smd_private.h"
@@ -442,6 +443,31 @@
return 0;
}
+void *msm_ipc_load_default_node(void)
+{
+ void *pil = NULL;
+ const char *peripheral;
+
+ peripheral = smd_edge_to_subsystem(SMD_APPS_MODEM);
+ if (peripheral && !strncmp(peripheral, "modem", 6)) {
+ pil = pil_get(peripheral);
+ if (IS_ERR(pil)) {
+ pr_err("%s: Failed to load %s\n",
+ __func__, peripheral);
+ pil = NULL;
+ }
+ }
+ return pil;
+}
+EXPORT_SYMBOL(msm_ipc_load_default_node);
+
+void msm_ipc_unload_default_node(void *pil)
+{
+ if (pil)
+ pil_put(pil);
+}
+EXPORT_SYMBOL(msm_ipc_unload_default_node);
+
static struct platform_driver msm_ipc_router_smd_remote_driver[] = {
{
.probe = msm_ipc_router_smd_remote_probe,
diff --git a/arch/arm/mach-msm/ipc_socket.c b/arch/arm/mach-msm/ipc_socket.c
index 6e8c99e..d82ffe5 100644
--- a/arch/arm/mach-msm/ipc_socket.c
+++ b/arch/arm/mach-msm/ipc_socket.c
@@ -21,62 +21,38 @@
#include <linux/gfp.h>
#include <linux/msm_ipc.h>
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+#include <linux/android_aid.h>
+#endif
+
#include <asm/string.h>
#include <asm/atomic.h>
#include <net/sock.h>
-#include <mach/peripheral-loader.h>
-#include <mach/socinfo.h>
-
#include "ipc_router.h"
#define msm_ipc_sk(sk) ((struct msm_ipc_sock *)(sk))
#define msm_ipc_sk_port(sk) ((struct msm_ipc_port *)(msm_ipc_sk(sk)->port))
-#define MODEM_LOAD_TIMEOUT (10 * HZ)
static int sockets_enabled;
static struct proto msm_ipc_proto;
static const struct proto_ops msm_ipc_proto_ops;
-static void msm_ipc_router_unload_modem(void *pil)
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+static inline int check_permissions(void)
{
- if (pil)
- pil_put(pil);
+ int rc = 0;
+ if (!current_euid() || in_egroup_p(AID_NET_RAW))
+ rc = 1;
+ return rc;
}
-
-static void *msm_ipc_router_load_modem(void)
+# else
+static inline int check_permissions(void)
{
- void *pil = NULL;
- int rc;
-
- /* Load GNSS for Standalone 8064 but not for Fusion 3 */
- if (cpu_is_apq8064()) {
- if (socinfo_get_platform_subtype() == 0x0)
- pil = pil_get("gss");
- } else {
- pil = pil_get("modem");
- }
-
- if (IS_ERR(pil) || !pil) {
- pr_debug("%s: modem load failed\n", __func__);
- pil = NULL;
- } else {
- rc = wait_for_completion_interruptible_timeout(
- &msm_ipc_remote_router_up,
- MODEM_LOAD_TIMEOUT);
- if (!rc)
- rc = -ETIMEDOUT;
- if (rc < 0) {
- pr_err("%s: wait for remote router failed %d\n",
- __func__, rc);
- msm_ipc_router_unload_modem(pil);
- pil = NULL;
- }
- }
-
- return pil;
+ return 1;
}
+#endif
static struct sk_buff_head *msm_ipc_router_build_msg(unsigned int num_sect,
struct iovec const *msg_sect,
@@ -214,6 +190,11 @@
struct msm_ipc_port *port_ptr;
void *pil;
+ if (!check_permissions()) {
+ pr_err("%s: Do not have permissions\n", __func__);
+ return -EPERM;
+ }
+
if (unlikely(protocol != 0)) {
pr_err("%s: Protocol not supported\n", __func__);
return -EPROTONOSUPPORT;
@@ -244,9 +225,9 @@
sock_init_data(sock, sk);
sk->sk_rcvtimeo = DEFAULT_RCV_TIMEO;
- pil = msm_ipc_router_load_modem();
+ pil = msm_ipc_load_default_node();
msm_ipc_sk(sk)->port = port_ptr;
- msm_ipc_sk(sk)->modem_pil = pil;
+ msm_ipc_sk(sk)->default_pil = pil;
return 0;
}
@@ -495,12 +476,12 @@
{
struct sock *sk = sock->sk;
struct msm_ipc_port *port_ptr = msm_ipc_sk_port(sk);
- void *pil = msm_ipc_sk(sk)->modem_pil;
+ void *pil = msm_ipc_sk(sk)->default_pil;
int ret;
lock_sock(sk);
ret = msm_ipc_router_close_port(port_ptr);
- msm_ipc_router_unload_modem(pil);
+ msm_ipc_unload_default_node(pil);
release_sock(sk);
sock_put(sk);
sock->sk = NULL;
diff --git a/arch/arm/mach-msm/lpass-8960.c b/arch/arm/mach-msm/lpass-8960.c
index 7775740..c58b0e1 100644
--- a/arch/arm/mach-msm/lpass-8960.c
+++ b/arch/arm/mach-msm/lpass-8960.c
@@ -31,6 +31,7 @@
#define SCM_Q6_NMI_CMD 0x1
#define MODULE_NAME "lpass_8960"
+#define MAX_BUF_SIZE 0x51
/* Subsystem restart: QDSP6 data, functions */
static void lpass_fatal_fn(struct work_struct *);
@@ -86,10 +87,39 @@
.notifier_call = modem_notifier_cb,
};
+static void lpass_log_failure_reason(void)
+{
+ char *reason;
+ char buffer[MAX_BUF_SIZE];
+ unsigned size;
+
+ reason = smem_get_entry(SMEM_SSR_REASON_LPASS0, &size);
+
+ if (!reason) {
+ pr_err("%s: subsystem failure reason: (unknown, smem_get_entry failed).",
+ MODULE_NAME);
+ return;
+ }
+
+ if (reason[0] == '\0') {
+ pr_err("%s: subsystem failure reason: (unknown, init value found)",
+ MODULE_NAME);
+ return;
+ }
+
+ size = size < MAX_BUF_SIZE ? size : (MAX_BUF_SIZE-1);
+ memcpy(buffer, reason, size);
+ buffer[size] = '\0';
+ pr_err("%s: subsystem failure reason: %s", MODULE_NAME, buffer);
+ memset((void *)reason, 0x0, size);
+ wmb();
+}
+
static void lpass_fatal_fn(struct work_struct *work)
{
pr_err("%s %s: Watchdog bite received from Q6!\n", MODULE_NAME,
__func__);
+ lpass_log_failure_reason();
panic(MODULE_NAME ": Resetting the SoC");
}
@@ -104,6 +134,7 @@
pr_err("%s: LPASS SMSM state changed to SMSM_RESET,"
" new_state = 0x%x, old_state = 0x%x\n", __func__,
new_state, old_state);
+ lpass_log_failure_reason();
panic(MODULE_NAME ": Resetting the SoC");
}
}
diff --git a/arch/arm/mach-msm/mdm2.c b/arch/arm/mach-msm/mdm2.c
index b4b7ea3..bd7bd9e 100644
--- a/arch/arm/mach-msm/mdm2.c
+++ b/arch/arm/mach-msm/mdm2.c
@@ -53,11 +53,13 @@
static void mdm_peripheral_connect(struct mdm_modem_drv *mdm_drv)
{
+ if (!mdm_drv->pdata->peripheral_platform_device)
+ return;
+
mutex_lock(&hsic_status_lock);
if (hsic_peripheral_status)
goto out;
- if (mdm_drv->pdata->peripheral_platform_device)
- platform_device_add(mdm_drv->pdata->peripheral_platform_device);
+ platform_device_add(mdm_drv->pdata->peripheral_platform_device);
hsic_peripheral_status = 1;
out:
mutex_unlock(&hsic_status_lock);
@@ -65,84 +67,106 @@
static void mdm_peripheral_disconnect(struct mdm_modem_drv *mdm_drv)
{
+ if (!mdm_drv->pdata->peripheral_platform_device)
+ return;
+
mutex_lock(&hsic_status_lock);
if (!hsic_peripheral_status)
goto out;
- if (mdm_drv->pdata->peripheral_platform_device)
- platform_device_del(mdm_drv->pdata->peripheral_platform_device);
+ platform_device_del(mdm_drv->pdata->peripheral_platform_device);
hsic_peripheral_status = 0;
out:
mutex_unlock(&hsic_status_lock);
}
-static void power_on_mdm(struct mdm_modem_drv *mdm_drv)
+static void mdm_power_down_common(struct mdm_modem_drv *mdm_drv)
+{
+ int soft_reset_direction =
+ mdm_drv->pdata->soft_reset_inverted ? 1 : 0;
+
+ gpio_direction_output(mdm_drv->ap2mdm_soft_reset_gpio,
+ soft_reset_direction);
+ mdm_peripheral_disconnect(mdm_drv);
+}
+
+static void mdm_do_first_power_on(struct mdm_modem_drv *mdm_drv)
+{
+ int soft_reset_direction =
+ mdm_drv->pdata->soft_reset_inverted ? 0 : 1;
+
+ if (power_on_count != 1) {
+ pr_err("%s: Calling fn when power_on_count != 1\n",
+ __func__);
+ return;
+ }
+
+ pr_err("%s: Powering on modem for the first time\n", __func__);
+ mdm_peripheral_disconnect(mdm_drv);
+
+ /* If the device has a kpd pwr gpio then toggle it. */
+ if (mdm_drv->ap2mdm_kpdpwr_n_gpio > 0) {
+ /* Pull AP2MDM_KPDPWR gpio high and wait for PS_HOLD to settle,
+ * then pull it back low.
+ */
+ pr_debug("%s: Pulling AP2MDM_KPDPWR gpio high\n", __func__);
+ gpio_direction_output(mdm_drv->ap2mdm_kpdpwr_n_gpio, 1);
+ msleep(1000);
+ gpio_direction_output(mdm_drv->ap2mdm_kpdpwr_n_gpio, 0);
+ }
+
+ /* De-assert the soft reset line. */
+ pr_debug("%s: De-asserting soft reset gpio\n", __func__);
+ gpio_direction_output(mdm_drv->ap2mdm_soft_reset_gpio,
+ soft_reset_direction);
+
+ mdm_peripheral_connect(mdm_drv);
+ msleep(200);
+}
+
+static void mdm_do_soft_power_on(struct mdm_modem_drv *mdm_drv)
+{
+ int soft_reset_direction =
+ mdm_drv->pdata->soft_reset_inverted ? 0 : 1;
+
+ /* De-assert the soft reset line. */
+ pr_err("%s: soft resetting mdm modem\n", __func__);
+
+ mdm_peripheral_disconnect(mdm_drv);
+
+ gpio_direction_output(mdm_drv->ap2mdm_soft_reset_gpio,
+ soft_reset_direction == 1 ? 0 : 1);
+ usleep_range(5000, 10000);
+ gpio_direction_output(mdm_drv->ap2mdm_soft_reset_gpio,
+ soft_reset_direction == 1 ? 1 : 0);
+
+ mdm_peripheral_connect(mdm_drv);
+ msleep(200);
+}
+
+static void mdm_power_on_common(struct mdm_modem_drv *mdm_drv)
{
power_on_count++;
/* this gpio will be used to indicate apq readiness,
- * de-assert it now so that it can asserted later
+ * de-assert it now so that it can be asserted later.
+ * May not be used.
*/
- gpio_direction_output(mdm_drv->ap2mdm_wakeup_gpio, 0);
+ if (mdm_drv->ap2mdm_wakeup_gpio > 0)
+ gpio_direction_output(mdm_drv->ap2mdm_wakeup_gpio, 0);
- /* The second attempt to power-on the mdm is the first attempt
- * from user space, but we're already powered on. Ignore this.
- * Subsequent attempts are from SSR or if something failed, in
- * which case we must always reset the modem.
+ /*
+ * If we did an "early power on" then ignore the very next
+ * power-on request because it would the be first request from
+ * user space but we're already powered on. Ignore it.
*/
- if (power_on_count == 2)
+ if (mdm_drv->pdata->early_power_on &&
+ (power_on_count == 2))
return;
- mdm_peripheral_disconnect(mdm_drv);
-
- /* Pull RESET gpio low and wait for it to settle. */
- pr_debug("Pulling RESET gpio low\n");
- gpio_direction_output(mdm_drv->ap2mdm_pmic_reset_n_gpio, 0);
- usleep_range(5000, 10000);
-
- /* Deassert RESET first and wait for it to settle. */
- pr_debug("%s: Pulling RESET gpio high\n", __func__);
- gpio_direction_output(mdm_drv->ap2mdm_pmic_reset_n_gpio, 1);
- usleep(20000);
-
- /* Pull PWR gpio high and wait for it to settle, but only
- * the first time the mdm is powered up.
- * Some targets do not use ap2mdm_kpdpwr_n_gpio.
- */
- if (power_on_count == 1) {
- if (mdm_drv->ap2mdm_kpdpwr_n_gpio > 0) {
- pr_debug("%s: Powering on mdm modem\n", __func__);
- gpio_direction_output(mdm_drv->ap2mdm_kpdpwr_n_gpio, 1);
- usleep(1000);
- }
- }
- mdm_peripheral_connect(mdm_drv);
-
- msleep(200);
-}
-
-static void power_down_mdm(struct mdm_modem_drv *mdm_drv)
-{
- int i;
-
- for (i = MDM_MODEM_TIMEOUT; i > 0; i -= MDM_MODEM_DELTA) {
- pet_watchdog();
- msleep(MDM_MODEM_DELTA);
- if (gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 0)
- break;
- }
- if (i <= 0) {
- pr_err("%s: MDM2AP_STATUS never went low.\n",
- __func__);
- gpio_direction_output(mdm_drv->ap2mdm_pmic_reset_n_gpio, 0);
-
- for (i = MDM_HOLD_TIME; i > 0; i -= MDM_MODEM_DELTA) {
- pet_watchdog();
- msleep(MDM_MODEM_DELTA);
- }
- }
- if (mdm_drv->ap2mdm_kpdpwr_n_gpio > 0)
- gpio_direction_output(mdm_drv->ap2mdm_kpdpwr_n_gpio, 0);
- mdm_peripheral_disconnect(mdm_drv);
+ if (power_on_count == 1)
+ mdm_do_first_power_on(mdm_drv);
+ else
+ mdm_do_soft_power_on(mdm_drv);
}
static void debug_state_changed(int value)
@@ -157,13 +181,15 @@
if (value) {
mdm_peripheral_disconnect(mdm_drv);
mdm_peripheral_connect(mdm_drv);
- gpio_direction_output(mdm_drv->ap2mdm_wakeup_gpio, 1);
+ if (mdm_drv->ap2mdm_wakeup_gpio > 0)
+ gpio_direction_output(mdm_drv->ap2mdm_wakeup_gpio, 1);
}
}
static struct mdm_ops mdm_cb = {
- .power_on_mdm_cb = power_on_mdm,
- .power_down_mdm_cb = power_down_mdm,
+ .power_on_mdm_cb = mdm_power_on_common,
+ .reset_mdm_cb = mdm_power_on_common,
+ .power_down_mdm_cb = mdm_power_down_common,
.debug_state_changed_cb = debug_state_changed,
.status_cb = mdm_status_changed,
};
diff --git a/arch/arm/mach-msm/mdm_common.c b/arch/arm/mach-msm/mdm_common.c
index 8d99c1c..ffff782 100644
--- a/arch/arm/mach-msm/mdm_common.c
+++ b/arch/arm/mach-msm/mdm_common.c
@@ -39,6 +39,7 @@
#include <linux/msm_charm.h>
#include "msm_watchdog.h"
#include "mdm_private.h"
+#include "sysmon.h"
#define MDM_MODEM_TIMEOUT 6000
#define MDM_MODEM_DELTA 100
@@ -47,6 +48,7 @@
static int mdm_debug_on;
static struct workqueue_struct *mdm_queue;
+static struct workqueue_struct *mdm_sfr_queue;
#define EXTERNAL_MODEM "external_modem"
@@ -58,6 +60,36 @@
static int first_boot = 1;
+#define RD_BUF_SIZE 100
+#define SFR_MAX_RETRIES 10
+#define SFR_RETRY_INTERVAL 1000
+
+static void mdm_restart_reason_fn(struct work_struct *work)
+{
+ int ret, ntries = 0;
+ char sfr_buf[RD_BUF_SIZE];
+
+ do {
+ msleep(SFR_RETRY_INTERVAL);
+ ret = sysmon_get_reason(SYSMON_SS_EXT_MODEM,
+ sfr_buf, sizeof(sfr_buf));
+ if (ret) {
+ /*
+ * The sysmon device may not have been probed as yet
+ * after the restart.
+ */
+ pr_err("%s: Error retrieving mdm restart reason, ret = %d, "
+ "%d/%d tries\n", __func__, ret,
+ ntries + 1, SFR_MAX_RETRIES);
+ } else {
+ pr_err("mdm restart reason: %s\n", sfr_buf);
+ break;
+ }
+ } while (++ntries < SFR_MAX_RETRIES);
+}
+
+static DECLARE_WORK(sfr_reason_work, mdm_restart_reason_fn);
+
long mdm_modem_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
@@ -120,6 +152,14 @@
(unsigned long __user *) arg);
INIT_COMPLETION(mdm_needs_reload);
break;
+ case GET_DLOAD_STATUS:
+ pr_debug("getting status of mdm2ap_errfatal_gpio\n");
+ if (gpio_get_value(mdm_drv->mdm2ap_errfatal_gpio) == 1 &&
+ !mdm_drv->mdm_ready)
+ put_user(1, (unsigned long __user *) arg);
+ else
+ put_user(0, (unsigned long __user *) arg);
+ break;
default:
pr_err("%s: invalid ioctl cmd = %d\n", __func__, _IOC_NR(cmd));
ret = -EINVAL;
@@ -129,31 +169,13 @@
return ret;
}
-static void mdm_fatal_fn(struct work_struct *work)
-{
- pr_info("%s: Reseting the mdm due to an errfatal\n", __func__);
- subsystem_restart(EXTERNAL_MODEM);
-}
-
-static DECLARE_WORK(mdm_fatal_work, mdm_fatal_fn);
-
static void mdm_status_fn(struct work_struct *work)
{
int value = gpio_get_value(mdm_drv->mdm2ap_status_gpio);
- if (!mdm_drv->mdm_ready)
- return;
-
- mdm_drv->ops->status_cb(mdm_drv, value);
-
pr_debug("%s: status:%d\n", __func__, value);
-
- if ((value == 0)) {
- pr_info("%s: unexpected reset external modem\n", __func__);
- subsystem_restart(EXTERNAL_MODEM);
- } else if (value == 1) {
- pr_info("%s: status = 1: mdm is now ready\n", __func__);
- }
+ if (mdm_drv->mdm_ready && mdm_drv->ops->status_cb)
+ mdm_drv->ops->status_cb(mdm_drv, value);
}
static DECLARE_WORK(mdm_status_work, mdm_status_fn);
@@ -162,7 +184,6 @@
{
disable_irq_nosync(mdm_drv->mdm_errfatal_irq);
disable_irq_nosync(mdm_drv->mdm_status_irq);
-
}
static irqreturn_t mdm_errfatal(int irq, void *dev_id)
@@ -170,8 +191,9 @@
pr_debug("%s: mdm got errfatal interrupt\n", __func__);
if (mdm_drv->mdm_ready &&
(gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 1)) {
- pr_debug("%s: scheduling work now\n", __func__);
- queue_work(mdm_queue, &mdm_fatal_work);
+ pr_info("%s: Reseting the mdm due to an errfatal\n", __func__);
+ mdm_drv->mdm_ready = 0;
+ subsystem_restart(EXTERNAL_MODEM);
}
return IRQ_HANDLED;
}
@@ -210,8 +232,12 @@
if (gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 0)
break;
}
- if (i <= 0)
+ if (i <= 0) {
pr_err("%s: MDM2AP_STATUS never went low\n", __func__);
+ /* Reset the modem so that it will go into download mode. */
+ if (mdm_drv && mdm_drv->ops->reset_mdm_cb)
+ mdm_drv->ops->reset_mdm_cb(mdm_drv);
+ }
return NOTIFY_DONE;
}
@@ -221,16 +247,23 @@
static irqreturn_t mdm_status_change(int irq, void *dev_id)
{
+ int value = gpio_get_value(mdm_drv->mdm2ap_status_gpio);
+
pr_debug("%s: mdm sent status change interrupt\n", __func__);
-
- queue_work(mdm_queue, &mdm_status_work);
-
+ if (value == 0 && mdm_drv->mdm_ready == 1) {
+ pr_info("%s: unexpected reset external modem\n", __func__);
+ mdm_drv->mdm_unexpected_reset_occurred = 1;
+ mdm_drv->mdm_ready = 0;
+ subsystem_restart(EXTERNAL_MODEM);
+ } else if (value == 1) {
+ pr_info("%s: status = 1: mdm is now ready\n", __func__);
+ queue_work(mdm_queue, &mdm_status_work);
+ }
return IRQ_HANDLED;
}
static int mdm_subsys_shutdown(const struct subsys_data *crashed_subsys)
{
- mdm_drv->mdm_ready = 0;
gpio_direction_output(mdm_drv->ap2mdm_errfatal_gpio, 1);
if (mdm_drv->pdata->ramdump_delay_ms > 0) {
/* Wait for the external modem to complete
@@ -238,7 +271,11 @@
*/
msleep(mdm_drv->pdata->ramdump_delay_ms);
}
- mdm_drv->ops->power_down_mdm_cb(mdm_drv);
+ if (!mdm_drv->mdm_unexpected_reset_occurred)
+ mdm_drv->ops->reset_mdm_cb(mdm_drv);
+ else
+ mdm_drv->mdm_unexpected_reset_occurred = 0;
+
return 0;
}
@@ -253,8 +290,13 @@
msecs_to_jiffies(MDM_BOOT_TIMEOUT))) {
mdm_drv->mdm_boot_status = -ETIMEDOUT;
pr_info("%s: mdm modem restart timed out.\n", __func__);
- } else
+ } else {
pr_info("%s: mdm modem has been restarted\n", __func__);
+
+ /* Log the reason for the restart */
+ if (mdm_drv->pdata->sfr_query)
+ queue_work(mdm_sfr_queue, &sfr_reason_work);
+ }
INIT_COMPLETION(mdm_boot);
return mdm_drv->mdm_boot_status;
}
@@ -275,7 +317,6 @@
pr_info("%s: mdm modem ramdumps completed.\n",
__func__);
INIT_COMPLETION(mdm_ram_dumps);
- gpio_direction_output(mdm_drv->ap2mdm_errfatal_gpio, 1);
mdm_drv->ops->power_down_mdm_cb(mdm_drv);
}
return mdm_drv->mdm_ram_dump_status;
@@ -360,11 +401,11 @@
if (pres)
mdm_drv->ap2mdm_wakeup_gpio = pres->start;
- /* AP2MDM_PMIC_RESET_N */
+ /* AP2MDM_SOFT_RESET */
pres = platform_get_resource_byname(pdev, IORESOURCE_IO,
- "AP2MDM_PMIC_RESET_N");
+ "AP2MDM_SOFT_RESET");
if (pres)
- mdm_drv->ap2mdm_pmic_reset_n_gpio = pres->start;
+ mdm_drv->ap2mdm_soft_reset_gpio = pres->start;
/* AP2MDM_KPDPWR_N */
pres = platform_get_resource_byname(pdev, IORESOURCE_IO,
@@ -372,6 +413,12 @@
if (pres)
mdm_drv->ap2mdm_kpdpwr_n_gpio = pres->start;
+ /* AP2MDM_PMIC_PWR_EN */
+ pres = platform_get_resource_byname(pdev, IORESOURCE_IO,
+ "AP2MDM_PMIC_PWR_EN");
+ if (pres)
+ mdm_drv->ap2mdm_pmic_pwr_en_gpio = pres->start;
+
mdm_drv->boot_type = CHARM_NORMAL_BOOT;
mdm_drv->ops = mdm_ops;
@@ -395,11 +442,18 @@
gpio_request(mdm_drv->ap2mdm_status_gpio, "AP2MDM_STATUS");
gpio_request(mdm_drv->ap2mdm_errfatal_gpio, "AP2MDM_ERRFATAL");
- gpio_request(mdm_drv->ap2mdm_kpdpwr_n_gpio, "AP2MDM_KPDPWR_N");
- gpio_request(mdm_drv->ap2mdm_pmic_reset_n_gpio, "AP2MDM_PMIC_RESET_N");
+ if (mdm_drv->ap2mdm_kpdpwr_n_gpio > 0)
+ gpio_request(mdm_drv->ap2mdm_kpdpwr_n_gpio, "AP2MDM_KPDPWR_N");
gpio_request(mdm_drv->mdm2ap_status_gpio, "MDM2AP_STATUS");
gpio_request(mdm_drv->mdm2ap_errfatal_gpio, "MDM2AP_ERRFATAL");
+ if (mdm_drv->ap2mdm_pmic_pwr_en_gpio > 0)
+ gpio_request(mdm_drv->ap2mdm_pmic_pwr_en_gpio,
+ "AP2MDM_PMIC_PWR_EN");
+ if (mdm_drv->ap2mdm_soft_reset_gpio > 0)
+ gpio_request(mdm_drv->ap2mdm_soft_reset_gpio,
+ "AP2MDM_SOFT_RESET");
+
if (mdm_drv->ap2mdm_wakeup_gpio > 0)
gpio_request(mdm_drv->ap2mdm_wakeup_gpio, "AP2MDM_WAKEUP");
@@ -421,6 +475,16 @@
goto fatal_err;
}
+ mdm_sfr_queue = alloc_workqueue("mdm_sfr_queue", 0, 0);
+ if (!mdm_sfr_queue) {
+ pr_err("%s: could not create workqueue mdm_sfr_queue."
+ " All mdm functionality will be disabled\n",
+ __func__);
+ ret = -ENOMEM;
+ destroy_workqueue(mdm_queue);
+ goto fatal_err;
+ }
+
atomic_notifier_chain_register(&panic_notifier_list, &mdm_panic_blk);
mdm_debugfs_init();
@@ -470,10 +534,18 @@
mdm_drv->mdm_status_irq = irq;
status_err:
+ /*
+ * If AP2MDM_PMIC_PWR_EN gpio is used, pull it high. It remains
+ * high until the whole phone is shut down.
+ */
+ if (mdm_drv->ap2mdm_pmic_pwr_en_gpio > 0)
+ gpio_direction_output(mdm_drv->ap2mdm_pmic_pwr_en_gpio, 1);
+
/* Perform early powerup of the external modem in order to
* allow tabla devices to be found.
*/
- mdm_drv->ops->power_on_mdm_cb(mdm_drv);
+ if (mdm_drv->pdata->early_power_on)
+ mdm_drv->ops->power_on_mdm_cb(mdm_drv);
pr_info("%s: Registering mdm modem\n", __func__);
return misc_register(&mdm_modem_misc);
@@ -481,10 +553,14 @@
fatal_err:
gpio_free(mdm_drv->ap2mdm_status_gpio);
gpio_free(mdm_drv->ap2mdm_errfatal_gpio);
- gpio_free(mdm_drv->ap2mdm_kpdpwr_n_gpio);
- gpio_free(mdm_drv->ap2mdm_pmic_reset_n_gpio);
+ if (mdm_drv->ap2mdm_kpdpwr_n_gpio > 0)
+ gpio_free(mdm_drv->ap2mdm_kpdpwr_n_gpio);
+ if (mdm_drv->ap2mdm_pmic_pwr_en_gpio > 0)
+ gpio_free(mdm_drv->ap2mdm_pmic_pwr_en_gpio);
gpio_free(mdm_drv->mdm2ap_status_gpio);
gpio_free(mdm_drv->mdm2ap_errfatal_gpio);
+ if (mdm_drv->ap2mdm_soft_reset_gpio > 0)
+ gpio_free(mdm_drv->ap2mdm_soft_reset_gpio);
if (mdm_drv->ap2mdm_wakeup_gpio > 0)
gpio_free(mdm_drv->ap2mdm_wakeup_gpio);
@@ -502,10 +578,14 @@
gpio_free(mdm_drv->ap2mdm_status_gpio);
gpio_free(mdm_drv->ap2mdm_errfatal_gpio);
- gpio_free(mdm_drv->ap2mdm_kpdpwr_n_gpio);
- gpio_free(mdm_drv->ap2mdm_pmic_reset_n_gpio);
+ if (mdm_drv->ap2mdm_kpdpwr_n_gpio > 0)
+ gpio_free(mdm_drv->ap2mdm_kpdpwr_n_gpio);
+ if (mdm_drv->ap2mdm_pmic_pwr_en_gpio > 0)
+ gpio_free(mdm_drv->ap2mdm_pmic_pwr_en_gpio);
gpio_free(mdm_drv->mdm2ap_status_gpio);
gpio_free(mdm_drv->mdm2ap_errfatal_gpio);
+ if (mdm_drv->ap2mdm_soft_reset_gpio > 0)
+ gpio_free(mdm_drv->ap2mdm_soft_reset_gpio);
if (mdm_drv->ap2mdm_wakeup_gpio > 0)
gpio_free(mdm_drv->ap2mdm_wakeup_gpio);
@@ -521,5 +601,7 @@
mdm_disable_irqs();
mdm_drv->ops->power_down_mdm_cb(mdm_drv);
+ if (mdm_drv->ap2mdm_pmic_pwr_en_gpio > 0)
+ gpio_direction_output(mdm_drv->ap2mdm_pmic_pwr_en_gpio, 0);
}
diff --git a/arch/arm/mach-msm/mdm_private.h b/arch/arm/mach-msm/mdm_private.h
index 206bd8b..f157d88 100644
--- a/arch/arm/mach-msm/mdm_private.h
+++ b/arch/arm/mach-msm/mdm_private.h
@@ -17,6 +17,7 @@
struct mdm_ops {
void (*power_on_mdm_cb)(struct mdm_modem_drv *mdm_drv);
+ void (*reset_mdm_cb)(struct mdm_modem_drv *mdm_drv);
void (*normal_boot_done_cb)(struct mdm_modem_drv *mdm_drv);
void (*power_down_mdm_cb)(struct mdm_modem_drv *mdm_drv);
void (*debug_state_changed_cb)(int value);
@@ -31,8 +32,9 @@
unsigned ap2mdm_status_gpio;
unsigned mdm2ap_wakeup_gpio;
unsigned ap2mdm_wakeup_gpio;
- unsigned ap2mdm_pmic_reset_n_gpio;
unsigned ap2mdm_kpdpwr_n_gpio;
+ unsigned ap2mdm_soft_reset_gpio;
+ unsigned ap2mdm_pmic_pwr_en_gpio;
int mdm_errfatal_irq;
int mdm_status_irq;
@@ -41,6 +43,7 @@
int mdm_ram_dump_status;
enum charm_boot_type boot_type;
int mdm_debug_on;
+ int mdm_unexpected_reset_occurred;
struct mdm_ops *ops;
struct mdm_platform_data *pdata;
diff --git a/arch/arm/mach-msm/modem-8960.c b/arch/arm/mach-msm/modem-8960.c
index 9a1e565..4922007 100644
--- a/arch/arm/mach-msm/modem-8960.c
+++ b/arch/arm/mach-msm/modem-8960.c
@@ -36,6 +36,8 @@
static int crash_shutdown;
#define MAX_SSR_REASON_LEN 81U
+#define Q6_FW_WDOG_ENABLE 0x08882024
+#define Q6_SW_WDOG_ENABLE 0x08982024
static void log_modem_sfr(void)
{
@@ -61,6 +63,27 @@
wmb();
}
+static void modem_wdog_check(struct work_struct *work)
+{
+ void __iomem *q6_sw_wdog_addr;
+ u32 regval;
+
+ q6_sw_wdog_addr = ioremap_nocache(Q6_SW_WDOG_ENABLE, 4);
+ if (!q6_sw_wdog_addr)
+ panic("Unable to check modem watchdog status.\n");
+
+ regval = readl_relaxed(q6_sw_wdog_addr);
+ if (!regval) {
+ pr_err("modem-8960: Modem watchdog wasn't activated!. Restarting the modem now.\n");
+ log_modem_sfr();
+ subsystem_restart("modem");
+ }
+
+ iounmap(q6_sw_wdog_addr);
+}
+
+static DECLARE_DELAYED_WORK(modem_wdog_check_work, modem_wdog_check);
+
static void modem_sw_fatal_fn(struct work_struct *work)
{
uint32_t panic_smsm_states = SMSM_RESET | SMSM_SYSTEM_DOWNLOAD;
@@ -115,14 +138,18 @@
}
}
-#define Q6_FW_WDOG_ENABLE 0x08882024
-#define Q6_SW_WDOG_ENABLE 0x08982024
static int modem_shutdown(const struct subsys_data *subsys)
{
void __iomem *q6_fw_wdog_addr;
void __iomem *q6_sw_wdog_addr;
/*
+ * Cancel any pending wdog_check work items, since we're shutting
+ * down anyway.
+ */
+ cancel_delayed_work(&modem_wdog_check_work);
+
+ /*
* Disable the modem watchdog since it keeps running even after the
* modem is shutdown.
*/
@@ -150,12 +177,16 @@
return 0;
}
+#define MODEM_WDOG_CHECK_TIMEOUT_MS 10000
+
static int modem_powerup(const struct subsys_data *subsys)
{
pil_force_boot("modem_fw");
pil_force_boot("modem");
enable_irq(Q6FW_WDOG_EXPIRED_IRQ);
enable_irq(Q6SW_WDOG_EXPIRED_IRQ);
+ schedule_delayed_work(&modem_wdog_check_work,
+ msecs_to_jiffies(MODEM_WDOG_CHECK_TIMEOUT_MS));
return 0;
}
diff --git a/arch/arm/mach-msm/msm_cache_dump.c b/arch/arm/mach-msm/msm_cache_dump.c
index 404c8f0..b21412f 100644
--- a/arch/arm/mach-msm/msm_cache_dump.c
+++ b/arch/arm/mach-msm/msm_cache_dump.c
@@ -26,7 +26,7 @@
#include <mach/memory.h>
#include <mach/msm_iomap.h>
-#define L2C_IMEM_ADDR 0x2a03f014
+#define L2_DUMP_OFFSET 0x14
static unsigned long msm_cache_dump_addr;
@@ -43,6 +43,11 @@
unsigned long event, void *ptr)
{
#ifdef CONFIG_MSM_CACHE_DUMP_ON_PANIC
+ /*
+ * Clear the bootloader magic so the dumps aren't overwritten
+ */
+ __raw_writel(0, MSM_IMEM_BASE + L2_DUMP_OFFSET);
+
scm_call_atomic1(L1C_SERVICE_ID, CACHE_BUFFER_DUMP_COMMAND_ID, 2);
scm_call_atomic1(L1C_SERVICE_ID, CACHE_BUFFER_DUMP_COMMAND_ID, 1);
#endif
@@ -66,9 +71,6 @@
unsigned long buf;
unsigned long size;
} l1_cache_data;
-#ifndef CONFIG_MSM_CACHE_DUMP_ON_PANIC
- unsigned int *imem_loc;
-#endif
void *temp;
unsigned long total_size = d->l1_size + d->l2_size;
@@ -104,11 +106,9 @@
if (ret)
pr_err("%s: could not register L2 buffer ret = %d.\n",
__func__, ret);
-#else
- imem_loc = ioremap(L2C_IMEM_ADDR, SZ_4K);
- __raw_writel(msm_cache_dump_addr + d->l1_size, imem_loc);
- iounmap(imem_loc);
#endif
+ __raw_writel(msm_cache_dump_addr + d->l1_size,
+ MSM_IMEM_BASE + L2_DUMP_OFFSET);
atomic_notifier_chain_register(&panic_notifier_list,
&msm_cache_dump_blk);
diff --git a/arch/arm/mach-msm/msm_watchdog.c b/arch/arm/mach-msm/msm_watchdog.c
index 9b8bc61..2cff7f0 100644
--- a/arch/arm/mach-msm/msm_watchdog.c
+++ b/arch/arm/mach-msm/msm_watchdog.c
@@ -151,13 +151,43 @@
.notifier_call = panic_wdog_handler,
};
+struct wdog_disable_work_data {
+ struct work_struct work;
+ struct completion complete;
+};
+
+static void wdog_disable_work(struct work_struct *work)
+{
+ struct wdog_disable_work_data *work_data =
+ container_of(work, struct wdog_disable_work_data, work);
+ __raw_writel(0, msm_tmr0_base + WDT0_EN);
+ mb();
+ if (has_vic) {
+ free_irq(WDT0_ACCSCSSNBARK_INT, 0);
+ } else {
+ disable_percpu_irq(WDT0_ACCSCSSNBARK_INT);
+ if (!appsbark_fiq) {
+ free_percpu_irq(WDT0_ACCSCSSNBARK_INT,
+ percpu_pdata);
+ free_percpu(percpu_pdata);
+ }
+ }
+ enable = 0;
+ atomic_notifier_chain_unregister(&panic_notifier_list, &panic_blk);
+ cancel_delayed_work(&dogwork_struct);
+ /* may be suspended after the first write above */
+ __raw_writel(0, msm_tmr0_base + WDT0_EN);
+ complete(&work_data->complete);
+ pr_info("MSM Watchdog deactivated.\n");
+}
+
static int wdog_enable_set(const char *val, struct kernel_param *kp)
{
int ret = 0;
int old_val = runtime_disable;
+ struct wdog_disable_work_data work_data;
mutex_lock(&disable_lock);
-
if (!enable) {
printk(KERN_INFO "MSM Watchdog is not active.\n");
ret = -EINVAL;
@@ -165,43 +195,20 @@
}
ret = param_set_int(val, kp);
-
if (ret)
goto done;
- switch (runtime_disable) {
-
- case 1:
- if (!old_val) {
- __raw_writel(0, msm_tmr0_base + WDT0_EN);
- mb();
- if (has_vic) {
- free_irq(WDT0_ACCSCSSNBARK_INT, 0);
- } else {
- disable_percpu_irq(WDT0_ACCSCSSNBARK_INT);
- if (!appsbark_fiq) {
- free_percpu_irq(WDT0_ACCSCSSNBARK_INT,
- percpu_pdata);
- free_percpu(percpu_pdata);
- }
- }
- enable = 0;
- atomic_notifier_chain_unregister(&panic_notifier_list,
- &panic_blk);
- cancel_delayed_work(&dogwork_struct);
- /* may be suspended after the first write above */
- __raw_writel(0, msm_tmr0_base + WDT0_EN);
- printk(KERN_INFO "MSM Watchdog deactivated.\n");
- }
- break;
-
- default:
+ if (runtime_disable == 1) {
+ if (old_val)
+ goto done;
+ init_completion(&work_data.complete);
+ INIT_WORK_ONSTACK(&work_data.work, wdog_disable_work);
+ schedule_work_on(0, &work_data.work);
+ wait_for_completion(&work_data.complete);
+ } else {
runtime_disable = old_val;
ret = -EINVAL;
- break;
-
}
-
done:
mutex_unlock(&disable_lock);
return ret;
@@ -332,9 +339,49 @@
}
}
+struct fiq_handler wdog_fh = {
+ .name = MODULE_NAME,
+};
+
static void init_watchdog_work(struct work_struct *work)
{
u64 timeout = (bark_time * WDT_HZ)/1000;
+ void *stack;
+ int ret;
+
+ if (has_vic) {
+ ret = request_irq(WDT0_ACCSCSSNBARK_INT, wdog_bark_handler, 0,
+ "apps_wdog_bark", NULL);
+ if (ret)
+ return;
+ } else if (appsbark_fiq) {
+ claim_fiq(&wdog_fh);
+ set_fiq_handler(&msm_wdog_fiq_start, msm_wdog_fiq_length);
+ stack = (void *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
+ if (!stack) {
+ pr_info("No free pages available - %s fails\n",
+ __func__);
+ return;
+ }
+
+ msm_wdog_fiq_setup(stack);
+ gic_set_irq_secure(WDT0_ACCSCSSNBARK_INT);
+ } else {
+ percpu_pdata = alloc_percpu(struct msm_watchdog_pdata *);
+ if (!percpu_pdata) {
+ pr_err("%s: memory allocation failed for percpu data\n",
+ __func__);
+ return;
+ }
+
+ /* Must request irq before sending scm command */
+ ret = request_percpu_irq(WDT0_ACCSCSSNBARK_INT,
+ wdog_bark_handler, "apps_wdog_bark", percpu_pdata);
+ if (ret) {
+ free_percpu(percpu_pdata);
+ return;
+ }
+ }
configure_bark_dump();
@@ -358,15 +405,9 @@
return;
}
-struct fiq_handler wdog_fh = {
- .name = MODULE_NAME,
-};
-
static int msm_watchdog_probe(struct platform_device *pdev)
{
struct msm_watchdog_pdata *pdata = pdev->dev.platform_data;
- int ret;
- void *stack;
if (!enable || !pdata || !pdata->pet_time || !pdata->bark_time) {
printk(KERN_INFO "MSM Watchdog Not Initialized\n");
@@ -382,41 +423,6 @@
msm_tmr0_base = msm_timer_get_timer0_base();
- if (has_vic) {
- ret = request_irq(WDT0_ACCSCSSNBARK_INT, wdog_bark_handler, 0,
- "apps_wdog_bark", NULL);
- if (ret)
- return ret;
- } else if (appsbark_fiq) {
- claim_fiq(&wdog_fh);
- set_fiq_handler(&msm_wdog_fiq_start, msm_wdog_fiq_length);
- stack = (void *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
- if (!stack) {
- pr_info("No free pages available - %s fails\n",
- __func__);
- return -ENOMEM;
- }
-
- msm_wdog_fiq_setup(stack);
- gic_set_irq_secure(WDT0_ACCSCSSNBARK_INT);
- } else {
- percpu_pdata = alloc_percpu(struct msm_watchdog_pdata *);
- if (!percpu_pdata) {
- pr_err("%s: memory allocation failed for percpu data\n",
- __func__);
- return -ENOMEM;
- }
-
- *__this_cpu_ptr(percpu_pdata) = pdata;
- /* Must request irq before sending scm command */
- ret = request_percpu_irq(WDT0_ACCSCSSNBARK_INT,
- wdog_bark_handler, "apps_wdog_bark", percpu_pdata);
- if (ret) {
- free_percpu(percpu_pdata);
- return ret;
- }
- }
-
/*
* This is only temporary till SBLs turn on the XPUs
* This initialization will be done in SBLs on a later releases
diff --git a/arch/arm/mach-msm/no-pm.c b/arch/arm/mach-msm/no-pm.c
index d1c474b..a0e01b4 100644
--- a/arch/arm/mach-msm/no-pm.c
+++ b/arch/arm/mach-msm/no-pm.c
@@ -32,4 +32,16 @@
}
void msm_pm_set_max_sleep_time(int64_t max_sleep_time_ns) { }
-EXPORT_SYMBOL(msm_pm_set_max_sleep_time);
+
+void msm_pm_set_irq_extns(struct msm_pm_irq_calls *irq_calls) {}
+
+int msm_pm_idle_prepare(struct cpuidle_device *dev)
+{
+ return -ENOSYS;
+}
+
+int msm_pm_idle_enter(enum msm_pm_sleep_mode sleep_mode)
+{
+ return -ENOSYS;
+}
+
diff --git a/arch/arm/mach-msm/ocmem.c b/arch/arm/mach-msm/ocmem.c
new file mode 100644
index 0000000..ed0b2f0
--- /dev/null
+++ b/arch/arm/mach-msm/ocmem.c
@@ -0,0 +1,314 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <mach/ocmem_priv.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/rbtree.h>
+#include <linux/genalloc.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+struct ocmem_partition {
+ const char *name;
+ int id;
+ unsigned long p_start;
+ unsigned long p_size;
+ unsigned long p_min;
+ unsigned int p_tail;
+};
+
+struct ocmem_plat_data {
+ void __iomem *vbase;
+ unsigned long size;
+ unsigned long base;
+ struct ocmem_partition *parts;
+ int nr_parts;
+};
+
+struct ocmem_zone zones[OCMEM_CLIENT_MAX];
+
+struct ocmem_zone *get_zone(unsigned id)
+{
+ if (id < OCMEM_GRAPHICS || id >= OCMEM_CLIENT_MAX)
+ return NULL;
+ else
+ return &zones[id];
+}
+
+static struct ocmem_plat_data *ocmem_pdata;
+
+#define CLIENT_NAME_MAX 10
+/* Must be in sync with enum ocmem_client */
+static const char *client_names[OCMEM_CLIENT_MAX] = {
+ "graphics",
+ "video",
+ "camera",
+ "hp_audio",
+ "voice",
+ "lp_audio",
+ "sensors",
+ "blast",
+};
+
+struct ocmem_quota_table {
+ const char *name;
+ int id;
+ unsigned long start;
+ unsigned long size;
+ unsigned long min;
+ unsigned int tail;
+};
+
+/* This static table will go away with device tree support */
+static struct ocmem_quota_table qt[OCMEM_CLIENT_MAX] = {
+ /* name, id, start, size, min, tail */
+ { "graphics", OCMEM_GRAPHICS, 0x0, 0x100000, 0x80000, 0},
+ { "video", OCMEM_VIDEO, 0x100000, 0x80000, 0x55000, 1},
+ { "camera", OCMEM_CAMERA, 0x0, 0x0, 0x0, 0},
+ { "voice", OCMEM_VOICE, 0x0, 0x0, 0x0, 0 },
+ { "hp_audio", OCMEM_HP_AUDIO, 0x0, 0x0, 0x0, 0},
+ { "lp_audio", OCMEM_LP_AUDIO, 0x80000, 0xA0000, 0xA0000, 0},
+ { "blast", OCMEM_BLAST, 0x120000, 0x20000, 0x20000, 0},
+ { "sensors", OCMEM_SENSORS, 0x140000, 0x40000, 0x40000, 0},
+};
+
+static inline int get_id(const char *name)
+{
+ int i = 0;
+ for (i = 0 ; i < OCMEM_CLIENT_MAX; i++) {
+ if (strncmp(name, client_names[i], CLIENT_NAME_MAX) == 0)
+ return i;
+ }
+ return -EINVAL;
+}
+
+static struct ocmem_plat_data *parse_static_config(struct platform_device *pdev)
+{
+ struct ocmem_plat_data *pdata = NULL;
+ struct ocmem_partition *parts = NULL;
+ struct device *dev = &pdev->dev;
+ int nr_parts;
+ int i;
+ int j;
+
+ pdata = devm_kzalloc(dev, sizeof(struct ocmem_plat_data),
+ GFP_KERNEL);
+
+ if (!pdata) {
+ dev_err(dev, "Unable to allocate memory for"
+ " platform data\n");
+ return NULL;
+ }
+
+ for (i = 0 ; i < ARRAY_SIZE(qt); i++)
+ if (qt[i].size != 0x0)
+ nr_parts++;
+
+ if (nr_parts == 0x0) {
+ dev_err(dev, "No valid ocmem partitions\n");
+ return NULL;
+ } else
+ dev_info(dev, "Total partitions = %d\n", nr_parts);
+
+ parts = devm_kzalloc(dev, sizeof(struct ocmem_partition) * nr_parts,
+ GFP_KERNEL);
+
+ if (!parts) {
+ dev_err(dev, "Unable to allocate memory for"
+ " partition data\n");
+ return NULL;
+ }
+
+ for (i = 0, j = 0; i < ARRAY_SIZE(qt); i++) {
+ if (qt[i].size == 0x0) {
+ dev_dbg(dev, "Skipping creation of pool for %s\n",
+ qt[i].name);
+ continue;
+ }
+ parts[j].id = qt[i].id;
+ parts[j].p_size = qt[i].size;
+ parts[j].p_start = qt[i].start;
+ parts[j].p_min = qt[i].min;
+ parts[j].p_tail = qt[i].tail;
+ j++;
+ }
+ BUG_ON(j != nr_parts);
+ pdata->nr_parts = nr_parts;
+ pdata->parts = parts;
+ pdata->base = OCMEM_PHYS_BASE;
+ pdata->size = OCMEM_PHYS_SIZE;
+ return pdata;
+}
+
+static struct ocmem_plat_data *parse_dt_config(struct platform_device *pdev)
+{
+ return NULL;
+}
+
+static int ocmem_zone_init(struct platform_device *pdev)
+{
+
+ int ret = -1;
+ int i = 0;
+ unsigned active_zones = 0;
+
+ struct ocmem_zone *zone = NULL;
+ struct ocmem_zone_ops *z_ops = NULL;
+ struct device *dev = &pdev->dev;
+ unsigned long start;
+ struct ocmem_plat_data *pdata = NULL;
+
+ pdata = platform_get_drvdata(pdev);
+
+ for (i = 0; i < pdata->nr_parts; i++) {
+ struct ocmem_partition *part = &pdata->parts[i];
+ zone = get_zone(part->id);
+
+ dev_dbg(dev, "Partition %d, start %lx, size %lx for %s\n",
+ i, part->p_start, part->p_size,
+ client_names[part->id]);
+
+ if (part->p_size > pdata->size) {
+ dev_alert(dev, "Quota > ocmem_size for id:%d\n",
+ part->id);
+ continue;
+ }
+
+ zone->z_pool = gen_pool_create(PAGE_SHIFT, -1);
+
+ if (!zone->z_pool) {
+ dev_alert(dev, "Creating pool failed for id:%d\n",
+ part->id);
+ return -EBUSY;
+ }
+
+ start = pdata->base + part->p_start;
+ ret = gen_pool_add(zone->z_pool, start,
+ part->p_size, -1);
+
+ if (ret < 0) {
+ gen_pool_destroy(zone->z_pool);
+ dev_alert(dev, "Unable to back pool %d with "
+ "buffer:%lx\n", part->id, part->p_size);
+ return -EBUSY;
+ }
+
+ /* Initialize zone allocators */
+ z_ops = devm_kzalloc(dev, sizeof(struct ocmem_zone_ops),
+ GFP_KERNEL);
+ if (!z_ops) {
+ pr_alert("ocmem: Unable to allocate memory for"
+ "zone ops:%d\n", i);
+ return -EBUSY;
+ }
+
+ /* Initialize zone parameters */
+ zone->z_start = start;
+ zone->z_head = zone->z_start;
+ zone->z_end = start + part->p_size;
+ zone->z_tail = zone->z_end;
+ zone->z_free = part->p_size;
+ zone->owner = part->id;
+ zone->active_regions = 0;
+ zone->max_regions = 0;
+ INIT_LIST_HEAD(&zone->region_list);
+ zone->z_ops = z_ops;
+ if (part->p_tail) {
+ z_ops->allocate = allocate_tail;
+ z_ops->free = free_tail;
+ } else {
+ z_ops->allocate = allocate_head;
+ z_ops->free = free_head;
+ }
+ active_zones++;
+
+ if (active_zones == 1)
+ pr_info("Physical OCMEM zone layout:\n");
+
+ pr_info(" zone %s\t: 0x%08lx - 0x%08lx (%4ld KB)\n",
+ client_names[part->id], zone->z_start,
+ zone->z_end, part->p_size/SZ_1K);
+ }
+
+ dev_info(dev, "Total active zones = %d\n", active_zones);
+ return 0;
+}
+
+static int __devinit msm_ocmem_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ if (!pdev->dev.of_node->child) {
+ dev_info(dev, "Missing Configuration in Device Tree\n");
+ ocmem_pdata = parse_static_config(pdev);
+ } else {
+ ocmem_pdata = parse_dt_config(pdev);
+ }
+
+ /* Check if we have some configuration data to start */
+ if (!ocmem_pdata)
+ return -ENODEV;
+
+ /* Sanity Checks */
+ BUG_ON(!IS_ALIGNED(ocmem_pdata->size, PAGE_SIZE));
+ BUG_ON(!IS_ALIGNED(ocmem_pdata->base, PAGE_SIZE));
+
+ platform_set_drvdata(pdev, ocmem_pdata);
+
+ if (ocmem_zone_init(pdev))
+ return -EBUSY;
+
+ dev_info(dev, "initialized successfully\n");
+ return 0;
+}
+
+static int __devexit msm_ocmem_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct of_device_id msm_ocmem_dt_match[] = {
+ { .compatible = "qcom,msm_ocmem",
+ },
+ {}
+};
+
+static struct platform_driver msm_ocmem_driver = {
+ .probe = msm_ocmem_probe,
+ .remove = __devexit_p(msm_ocmem_remove),
+ .driver = {
+ .name = "msm_ocmem",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_ocmem_dt_match,
+ },
+};
+
+static int __init ocmem_init(void)
+{
+ return platform_driver_register(&msm_ocmem_driver);
+}
+subsys_initcall(ocmem_init);
+
+static void __exit ocmem_exit(void)
+{
+ platform_driver_unregister(&msm_ocmem_driver);
+}
+module_exit(ocmem_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Support for On-Chip Memory on MSM");
diff --git a/arch/arm/mach-msm/ocmem_allocator.c b/arch/arm/mach-msm/ocmem_allocator.c
new file mode 100644
index 0000000..71cacda
--- /dev/null
+++ b/arch/arm/mach-msm/ocmem_allocator.c
@@ -0,0 +1,105 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <mach/ocmem.h>
+#include <mach/ocmem_priv.h>
+#include <linux/genalloc.h>
+
+/* All allocator operations are serialized by ocmem driver */
+
+/* The allocators work as follows:
+ Constraints:
+ 1) There is no IOMMU access to OCMEM hence successive allocations
+ in the zone must be physically contiguous
+ 2) Allocations must be freed in reverse order within a zone.
+
+ z->z_start: Fixed pointer to the start of a zone
+ z->z_end: Fixed pointer to the end of a zone
+
+ z->z_head: Movable pointer to the next free area when growing at head
+ Fixed on zones that grow from tail
+
+ z->z_tail: Movable pointer to the next free area when growing at tail
+ Fixed on zones that grow from head
+
+ z->z_free: Free space in a zone that is updated on an allocation/free
+
+ reserve: Enable libgenpool to simulate tail allocations
+*/
+
+unsigned long allocate_head(struct ocmem_zone *z, unsigned long size)
+{
+
+ unsigned long offset;
+
+ offset = gen_pool_alloc(z->z_pool, size);
+
+ if (!offset)
+ return -ENOMEM;
+
+ z->z_head += size;
+ z->z_free -= size;
+ return offset;
+}
+
+unsigned long allocate_tail(struct ocmem_zone *z, unsigned long size)
+{
+ unsigned long offset;
+ unsigned long reserve;
+ unsigned long head;
+
+ if (z->z_tail < (z->z_head + size))
+ return -ENOMEM;
+
+ reserve = z->z_tail - z->z_head - size;
+ if (reserve) {
+ head = gen_pool_alloc(z->z_pool, reserve);
+ offset = gen_pool_alloc(z->z_pool, size);
+ gen_pool_free(z->z_pool, head, reserve);
+ } else
+ offset = gen_pool_alloc(z->z_pool, size);
+
+ if (!offset)
+ return -ENOMEM;
+
+ z->z_tail -= size;
+ z->z_free -= size;
+ return offset;
+}
+
+int free_head(struct ocmem_zone *z, unsigned long offset,
+ unsigned long size)
+{
+ if (offset > z->z_head) {
+ pr_err("ocmem: Detected out of order free "
+ "leading to fragmentation\n");
+ return -EINVAL;
+ }
+ gen_pool_free(z->z_pool, offset, size);
+ z->z_head -= size;
+ z->z_free += size;
+ return 0;
+}
+
+int free_tail(struct ocmem_zone *z, unsigned long offset,
+ unsigned long size)
+{
+ if (offset > z->z_tail) {
+ pr_err("ocmem: Detected out of order free "
+ "leading to fragmentation\n");
+ return -EINVAL;
+ }
+ gen_pool_free(z->z_pool, offset, size);
+ z->z_tail += size;
+ z->z_free += size;
+ return 0;
+}
diff --git a/arch/arm/mach-msm/pcie.c b/arch/arm/mach-msm/pcie.c
index 4e2b1083..dd91e66 100644
--- a/arch/arm/mach-msm/pcie.c
+++ b/arch/arm/mach-msm/pcie.c
@@ -201,7 +201,7 @@
.write = msm_pcie_wr_conf,
};
-static int __devinit msm_pcie_gpio_init(void)
+static int __init msm_pcie_gpio_init(void)
{
int rc, i;
struct msm_pcie_gpio_info_t *info;
@@ -239,7 +239,7 @@
gpio_free(msm_pcie_dev.gpio[i].num);
}
-static int __devinit msm_pcie_vreg_init(struct device *dev)
+static int __init msm_pcie_vreg_init(struct device *dev)
{
int i, rc = 0;
struct regulator *vreg;
@@ -306,7 +306,7 @@
}
}
-static int __devinit msm_pcie_clk_init(struct device *dev)
+static int __init msm_pcie_clk_init(struct device *dev)
{
int i, rc = 0;
struct clk *clk_hdl;
@@ -346,7 +346,7 @@
}
}
-static void __devinit msm_pcie_config_controller(void)
+static void __init msm_pcie_config_controller(void)
{
struct msm_pcie_dev_t *dev = &msm_pcie_dev;
struct msm_pcie_res_info_t *axi_bar = &dev->res[MSM_PCIE_RES_AXI_BAR];
@@ -393,7 +393,7 @@
wmb();
}
-static int __devinit msm_pcie_get_resources(struct platform_device *pdev)
+static int __init msm_pcie_get_resources(struct platform_device *pdev)
{
int i, rc = 0;
struct resource *res;
@@ -437,7 +437,7 @@
return rc;
}
-static void __devexit msm_pcie_release_resources(void)
+static void msm_pcie_release_resources(void)
{
int i;
@@ -452,7 +452,7 @@
msm_pcie_dev.axi_conf = NULL;
}
-static int __devinit msm_pcie_setup(int nr, struct pci_sys_data *sys)
+static int __init msm_pcie_setup(int nr, struct pci_sys_data *sys)
{
int rc;
struct msm_pcie_dev_t *dev = &msm_pcie_dev;
@@ -548,8 +548,8 @@
return (rc) ? 0 : 1;
}
-static struct pci_bus __devinit *msm_pcie_scan_bus(int nr,
- struct pci_sys_data *sys)
+static struct pci_bus __init *msm_pcie_scan_bus(int nr,
+ struct pci_sys_data *sys)
{
struct pci_bus *bus = NULL;
@@ -560,13 +560,13 @@
return bus;
}
-static int __devinit msm_pcie_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+static int __init msm_pcie_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
{
PCIE_DBG("slot %d pin %d\n", slot, pin);
return (pin <= 4) ? (PCIE20_INTA + pin - 1) : 0;
}
-static struct hw_pci msm_pci __devinitdata = {
+static struct hw_pci msm_pci __initdata = {
.nr_controllers = 1,
.swizzle = pci_std_swizzle,
.setup = msm_pcie_setup,
@@ -574,7 +574,7 @@
.map_irq = msm_pcie_map_irq,
};
-static int __devinit msm_pcie_probe(struct platform_device *pdev)
+static int __init msm_pcie_probe(struct platform_device *pdev)
{
const struct msm_pcie_platform *pdata;
int rc;
@@ -603,7 +603,7 @@
return 0;
}
-static int __devexit msm_pcie_remove(struct platform_device *pdev)
+static int __exit msm_pcie_remove(struct platform_device *pdev)
{
PCIE_DBG("\n");
@@ -621,8 +621,7 @@
}
static struct platform_driver msm_pcie_driver = {
- .probe = msm_pcie_probe,
- .remove = __devexit_p(msm_pcie_remove),
+ .remove = __exit_p(msm_pcie_remove),
.driver = {
.name = "msm_pcie",
.owner = THIS_MODULE,
@@ -632,7 +631,7 @@
static int __init msm_pcie_init(void)
{
PCIE_DBG("\n");
- return platform_driver_register(&msm_pcie_driver);
+ return platform_driver_probe(&msm_pcie_driver, msm_pcie_probe);
}
subsys_initcall(msm_pcie_init);
diff --git a/arch/arm/mach-msm/pcie_irq.c b/arch/arm/mach-msm/pcie_irq.c
index df100db..d915561 100644
--- a/arch/arm/mach-msm/pcie_irq.c
+++ b/arch/arm/mach-msm/pcie_irq.c
@@ -67,7 +67,7 @@
return IRQ_HANDLED;
}
-uint32_t __devinit msm_pcie_irq_init(struct msm_pcie_dev_t *dev)
+uint32_t __init msm_pcie_irq_init(struct msm_pcie_dev_t *dev)
{
int i, rc;
@@ -93,7 +93,7 @@
return rc;
}
-void msm_pcie_irq_deinit(struct msm_pcie_dev_t *dev)
+void __exit msm_pcie_irq_deinit(struct msm_pcie_dev_t *dev)
{
free_irq(PCIE20_INT_MSI, dev);
}
diff --git a/arch/arm/mach-msm/peripheral-loader.c b/arch/arm/mach-msm/peripheral-loader.c
index 0ecea85..9d0ce0d 100644
--- a/arch/arm/mach-msm/peripheral-loader.c
+++ b/arch/arm/mach-msm/peripheral-loader.c
@@ -137,8 +137,11 @@
const struct firmware *fw = NULL;
const u8 *data;
- if (memblock_is_region_memory(phdr->p_paddr, phdr->p_memsz)) {
- dev_err(&pil->dev, "Kernel memory would be overwritten");
+ if (memblock_overlaps_memory(phdr->p_paddr, phdr->p_memsz)) {
+ dev_err(&pil->dev,
+ "kernel memory would be overwritten [%#08lx, %#08lx)\n",
+ (unsigned long)phdr->p_paddr,
+ (unsigned long)(phdr->p_paddr + phdr->p_memsz));
return -EPERM;
}
@@ -575,6 +578,10 @@
"invalid proxy voting. ignoring\n"))
((struct pil_reset_ops *)desc->ops)->proxy_unvote = NULL;
+ WARN(desc->ops->proxy_unvote && !desc->proxy_timeout,
+ "A proxy timeout of 0 ms was specified for %s. Specify one in "
+ "desc->proxy_timeout.\n", desc->name);
+
pil = kzalloc(sizeof(*pil), GFP_KERNEL);
if (!pil)
return ERR_PTR(-ENOMEM);
diff --git a/arch/arm/mach-msm/pil-q6v4.c b/arch/arm/mach-msm/pil-q6v4.c
index 8446e42..131a74b 100644
--- a/arch/arm/mach-msm/pil-q6v4.c
+++ b/arch/arm/mach-msm/pil-q6v4.c
@@ -116,14 +116,9 @@
int err;
struct q6v4_data *drv = dev_get_drvdata(dev);
- err = regulator_set_voltage(drv->vreg, 1050000, 1050000);
+ err = regulator_set_voltage(drv->vreg, 375000, 375000);
if (err) {
- dev_err(dev, "Failed to set regulator's voltage.\n");
- return err;
- }
- err = regulator_set_optimum_mode(drv->vreg, 100000);
- if (err < 0) {
- dev_err(dev, "Failed to set regulator's mode.\n");
+ dev_err(dev, "Failed to set regulator's voltage step.\n");
return err;
}
err = regulator_enable(drv->vreg);
@@ -131,6 +126,18 @@
dev_err(dev, "Failed to enable regulator.\n");
return err;
}
+
+ /*
+ * Q6 hardware requires a two step voltage ramp-up.
+ * Delay between the steps.
+ */
+ udelay(100);
+
+ err = regulator_set_voltage(drv->vreg, 1050000, 1050000);
+ if (err) {
+ dev_err(dev, "Failed to set regulator's voltage.\n");
+ return err;
+ }
drv->vreg_enabled = true;
return 0;
}
@@ -411,6 +418,12 @@
if (IS_ERR(drv->vreg))
return PTR_ERR(drv->vreg);
+ ret = regulator_set_optimum_mode(drv->vreg, 100000);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to set regulator's mode.\n");
+ return ret;
+ }
+
drv->xo = devm_clk_get(&pdev->dev, "xo");
if (IS_ERR(drv->xo))
return PTR_ERR(drv->xo);
diff --git a/arch/arm/mach-msm/pil-q6v5-lpass.c b/arch/arm/mach-msm/pil-q6v5-lpass.c
index 5eac539..311f8a7 100644
--- a/arch/arm/mach-msm/pil-q6v5-lpass.c
+++ b/arch/arm/mach-msm/pil-q6v5-lpass.c
@@ -15,66 +15,34 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/io.h>
-#include <linux/iopoll.h>
#include <linux/err.h>
#include <linux/of.h>
+#include <linux/clk.h>
#include "peripheral-loader.h"
#include "pil-q6v5.h"
-/* Register Offsets */
#define QDSP6SS_RST_EVB 0x010
-#define LPASS_Q6SS_BCR 0x06000
-#define LPASS_Q6SS_AHB_LFABIF_CBCR 0x22000
-#define LPASS_Q6SS_XO_CBCR 0x26000
-#define AXI_HALTREQ 0x0
-#define AXI_HALTACK 0x4
-#define AXI_IDLE 0x8
-
-#define HALT_ACK_TIMEOUT_US 100000
-
-static void clk_reg_enable(void __iomem *reg)
-{
- u32 val;
- val = readl_relaxed(reg);
- val |= BIT(0);
- writel_relaxed(val, reg);
-}
-
-static void clk_reg_disable(void __iomem *reg)
-{
- u32 val;
- val = readl_relaxed(reg);
- val &= ~BIT(0);
- writel_relaxed(val, reg);
-}
+#define PROXY_TIMEOUT_MS 10000
static int pil_lpass_shutdown(struct pil_desc *pil)
{
struct q6v5_data *drv = dev_get_drvdata(pil->dev);
- int ret;
- u32 status;
- writel_relaxed(1, drv->axi_halt_base + AXI_HALTREQ);
- ret = readl_poll_timeout(drv->axi_halt_base + AXI_HALTACK,
- status, status, 50, HALT_ACK_TIMEOUT_US);
- if (ret)
- dev_err(pil->dev, "Port halt timeout\n");
- else if (!readl_relaxed(drv->axi_halt_base + AXI_IDLE))
- dev_err(pil->dev, "Port halt failed\n");
- writel_relaxed(0, drv->axi_halt_base + AXI_HALTREQ);
+ pil_q6v5_halt_axi_port(pil, drv->axi_halt_base);
- /* Make sure Q6 registers are accessible */
- writel_relaxed(0, drv->clk_base + LPASS_Q6SS_BCR);
- clk_reg_enable(drv->clk_base + LPASS_Q6SS_AHB_LFABIF_CBCR);
- mb();
+ /*
+ * If the shutdown function is called before the reset function, clocks
+ * will not be enabled yet. Enable them here so that register writes
+ * performed during the shutdown succeed.
+ */
+ if (drv->is_booted == false)
+ pil_q6v5_enable_clks(pil);
pil_q6v5_shutdown(pil);
+ pil_q6v5_disable_clks(pil);
- /* Disable clocks and assert subsystem resets. */
- clk_reg_disable(drv->clk_base + LPASS_Q6SS_AHB_LFABIF_CBCR);
- clk_reg_disable(drv->clk_base + LPASS_Q6SS_XO_CBCR);
- writel_relaxed(1, drv->clk_base + LPASS_Q6SS_BCR);
+ drv->is_booted = false;
return 0;
}
@@ -82,21 +50,25 @@
static int pil_lpass_reset(struct pil_desc *pil)
{
struct q6v5_data *drv = dev_get_drvdata(pil->dev);
+ int ret;
- /*
- * Bring subsystem out of reset and enable required
- * regulators and clocks.
- */
- writel_relaxed(0, drv->clk_base + LPASS_Q6SS_BCR);
- clk_reg_enable(drv->clk_base + LPASS_Q6SS_XO_CBCR);
- clk_reg_enable(drv->clk_base + LPASS_Q6SS_AHB_LFABIF_CBCR);
- mb();
+ ret = pil_q6v5_enable_clks(pil);
+ if (ret)
+ return ret;
/* Program Image Address */
writel_relaxed(((drv->start_addr >> 4) & 0x0FFFFFF0),
drv->reg_base + QDSP6SS_RST_EVB);
- return pil_q6v5_reset(pil);
+ ret = pil_q6v5_reset(pil);
+ if (ret) {
+ pil_q6v5_disable_clks(pil);
+ return ret;
+ }
+
+ drv->is_booted = true;
+
+ return 0;
}
static struct pil_reset_ops pil_lpass_ops = {
@@ -111,7 +83,6 @@
{
struct q6v5_data *drv;
struct pil_desc *desc;
- struct resource *res;
desc = pil_q6v5_init(pdev);
if (IS_ERR(desc))
@@ -123,12 +94,7 @@
desc->ops = &pil_lpass_ops;
desc->owner = THIS_MODULE;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- drv->axi_halt_base = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!drv->axi_halt_base)
- return -ENOMEM;
+ desc->proxy_timeout = PROXY_TIMEOUT_MS;
drv->pil = msm_pil_register(desc);
if (IS_ERR(drv->pil))
diff --git a/arch/arm/mach-msm/pil-q6v5.c b/arch/arm/mach-msm/pil-q6v5.c
index cd58a4c..6a96990 100644
--- a/arch/arm/mach-msm/pil-q6v5.c
+++ b/arch/arm/mach-msm/pil-q6v5.c
@@ -15,19 +15,29 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/elf.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/clk.h>
+#include <mach/clk.h>
+
#include "peripheral-loader.h"
#include "pil-q6v5.h"
-/* Register Offsets */
+/* QDSP6SS Register Offsets */
#define QDSP6SS_RESET 0x014
#define QDSP6SS_GFMUX_CTL 0x020
#define QDSP6SS_PWR_CTL 0x030
+/* AXI Halt Register Offsets */
+#define AXI_HALTREQ 0x0
+#define AXI_HALTACK 0x4
+#define AXI_IDLE 0x8
+
+#define HALT_ACK_TIMEOUT_US 100000
+
/* QDSP6SS_RESET */
#define Q6SS_CORE_ARES BIT(1)
#define Q6SS_ETM_ISDB_ARES BIT(3)
@@ -66,6 +76,27 @@
}
EXPORT_SYMBOL(pil_q6v5_remove_proxy_votes);
+void pil_q6v5_halt_axi_port(struct pil_desc *pil, void __iomem *halt_base)
+{
+ int ret;
+ u32 status;
+
+ /* Assert halt request */
+ writel_relaxed(1, halt_base + AXI_HALTREQ);
+
+ /* Wait for halt */
+ ret = readl_poll_timeout(halt_base + AXI_HALTACK,
+ status, status != 0, 50, HALT_ACK_TIMEOUT_US);
+ if (ret)
+ dev_warn(pil->dev, "Port %p halt timeout\n", halt_base);
+ else if (!readl_relaxed(halt_base + AXI_IDLE))
+ dev_warn(pil->dev, "Port %p halt failed\n", halt_base);
+
+ /* Clear halt request (port will remain halted until reset) */
+ writel_relaxed(0, halt_base + AXI_HALTREQ);
+}
+EXPORT_SYMBOL(pil_q6v5_halt_axi_port);
+
int pil_q6v5_init_image(struct pil_desc *pil, const u8 *metadata,
size_t size)
{
@@ -76,6 +107,42 @@
}
EXPORT_SYMBOL(pil_q6v5_init_image);
+int pil_q6v5_enable_clks(struct pil_desc *pil)
+{
+ struct q6v5_data *drv = dev_get_drvdata(pil->dev);
+ int ret;
+
+ ret = clk_reset(drv->core_clk, CLK_RESET_DEASSERT);
+ if (ret)
+ goto err_reset;
+ ret = clk_prepare_enable(drv->core_clk);
+ if (ret)
+ goto err_core_clk;
+ ret = clk_prepare_enable(drv->bus_clk);
+ if (ret)
+ goto err_bus_clk;
+
+ return 0;
+
+err_bus_clk:
+ clk_disable_unprepare(drv->core_clk);
+err_core_clk:
+ clk_reset(drv->core_clk, CLK_RESET_ASSERT);
+err_reset:
+ return ret;
+}
+EXPORT_SYMBOL(pil_q6v5_enable_clks);
+
+void pil_q6v5_disable_clks(struct pil_desc *pil)
+{
+ struct q6v5_data *drv = dev_get_drvdata(pil->dev);
+
+ clk_disable_unprepare(drv->bus_clk);
+ clk_disable_unprepare(drv->core_clk);
+ clk_reset(drv->core_clk, CLK_RESET_ASSERT);
+}
+EXPORT_SYMBOL(pil_q6v5_disable_clks);
+
void pil_q6v5_shutdown(struct pil_desc *pil)
{
u32 val;
@@ -172,11 +239,10 @@
resource_size(res));
if (!drv->reg_base)
return ERR_PTR(-ENOMEM);
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- drv->clk_base = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!drv->clk_base)
+ drv->axi_halt_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!drv->axi_halt_base)
return ERR_PTR(-ENOMEM);
desc = devm_kzalloc(&pdev->dev, sizeof(*desc), GFP_KERNEL);
@@ -192,6 +258,14 @@
if (IS_ERR(drv->xo))
return ERR_CAST(drv->xo);
+ drv->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
+ if (IS_ERR(drv->bus_clk))
+ return ERR_CAST(drv->bus_clk);
+
+ drv->core_clk = devm_clk_get(&pdev->dev, "core_clk");
+ if (IS_ERR(drv->core_clk))
+ return ERR_CAST(drv->core_clk);
+
desc->dev = &pdev->dev;
return desc;
diff --git a/arch/arm/mach-msm/pil-q6v5.h b/arch/arm/mach-msm/pil-q6v5.h
index b17d4e7..a9a8d07 100644
--- a/arch/arm/mach-msm/pil-q6v5.h
+++ b/arch/arm/mach-msm/pil-q6v5.h
@@ -21,23 +21,27 @@
struct q6v5_data {
void __iomem *reg_base;
- void __iomem *clk_base;
+ struct clk *xo;
+ struct clk *bus_clk;
+ struct clk *core_clk;
void __iomem *axi_halt_base;
void __iomem *rmb_base;
unsigned long start_addr;
struct regulator *vreg;
- bool vreg_enabled;
+ bool is_booted;
int self_auth;
- struct clk *xo;
struct pil_device *pil;
};
int pil_q6v5_make_proxy_votes(struct pil_desc *pil);
void pil_q6v5_remove_proxy_votes(struct pil_desc *pil);
+void pil_q6v5_halt_axi_port(struct pil_desc *pil, void __iomem *halt_base);
int pil_q6v5_init_image(struct pil_desc *pil, const u8 *metadata,
size_t size);
void pil_q6v5_shutdown(struct pil_desc *pil);
int pil_q6v5_reset(struct pil_desc *pil);
+int pil_q6v5_enable_clks(struct pil_desc *pil);
+void pil_q6v5_disable_clks(struct pil_desc *pil);
struct pil_desc *pil_q6v5_init(struct platform_device *pdev);
#endif
diff --git a/arch/arm/mach-msm/platsmp-8625.c b/arch/arm/mach-msm/platsmp-8625.c
index 393f1bd..91f1133 100644
--- a/arch/arm/mach-msm/platsmp-8625.c
+++ b/arch/arm/mach-msm/platsmp-8625.c
@@ -40,6 +40,7 @@
static bool cold_boot_done;
static uint32_t *msm8625_boot_vector;
+static void __iomem *reset_core1_base;
/*
* Write pen_release in a way that is guaranteed to be visible to all
@@ -155,15 +156,22 @@
__raw_writel(0x0, base_ptr);
mb();
- iounmap(base_ptr);
+ reset_core1_base = base_ptr;
return 0;
}
+void __iomem *core1_reset_base(void)
+{
+ return reset_core1_base;
+}
+
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{
unsigned long timeout;
+ preset_lpj = loops_per_jiffy;
+
if (cold_boot_done == false) {
if (msm8625_release_secondary()) {
pr_err("Failed to release secondary core\n");
diff --git a/arch/arm/mach-msm/pm-8x60.c b/arch/arm/mach-msm/pm-8x60.c
index cc17ceb..070e2c5 100644
--- a/arch/arm/mach-msm/pm-8x60.c
+++ b/arch/arm/mach-msm/pm-8x60.c
@@ -21,11 +21,9 @@
#include <linux/ktime.h>
#include <linux/pm.h>
#include <linux/pm_qos_params.h>
-#include <linux/proc_fs.h>
#include <linux/smp.h>
#include <linux/suspend.h>
#include <linux/tick.h>
-#include <linux/uaccess.h>
#include <linux/wakelock.h>
#include <linux/delay.h>
#include <mach/msm_iomap.h>
@@ -319,220 +317,6 @@
}
/******************************************************************************
- * CONFIG_MSM_IDLE_STATS
- *****************************************************************************/
-
-#ifdef CONFIG_MSM_IDLE_STATS
-enum msm_pm_time_stats_id {
- MSM_PM_STAT_REQUESTED_IDLE,
- MSM_PM_STAT_IDLE_WFI,
- MSM_PM_STAT_RETENTION,
- MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE,
- MSM_PM_STAT_IDLE_POWER_COLLAPSE,
- MSM_PM_STAT_SUSPEND,
- MSM_PM_STAT_COUNT
-};
-
-struct msm_pm_time_stats {
- const char *name;
- int64_t first_bucket_time;
- int bucket[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
- int64_t min_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
- int64_t max_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
- int count;
- int64_t total_time;
-};
-
-struct msm_pm_cpu_time_stats {
- struct msm_pm_time_stats stats[MSM_PM_STAT_COUNT];
-};
-
-static DEFINE_SPINLOCK(msm_pm_stats_lock);
-static DEFINE_PER_CPU_SHARED_ALIGNED(
- struct msm_pm_cpu_time_stats, msm_pm_stats);
-
-/*
- * Add the given time data to the statistics collection.
- */
-static void msm_pm_add_stat(enum msm_pm_time_stats_id id, int64_t t)
-{
- unsigned long flags;
- struct msm_pm_time_stats *stats;
- int64_t bt;
- int i;
-
- spin_lock_irqsave(&msm_pm_stats_lock, flags);
- stats = __get_cpu_var(msm_pm_stats).stats;
-
- stats[id].total_time += t;
- stats[id].count++;
-
- bt = t;
- do_div(bt, stats[id].first_bucket_time);
-
- if (bt < 1ULL << (CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT *
- (CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1)))
- i = DIV_ROUND_UP(fls((uint32_t)bt),
- CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT);
- else
- i = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
-
- if (i >= CONFIG_MSM_IDLE_STATS_BUCKET_COUNT)
- i = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
-
- stats[id].bucket[i]++;
-
- if (t < stats[id].min_time[i] || !stats[id].max_time[i])
- stats[id].min_time[i] = t;
- if (t > stats[id].max_time[i])
- stats[id].max_time[i] = t;
-
- spin_unlock_irqrestore(&msm_pm_stats_lock, flags);
-}
-
-/*
- * Helper function of snprintf where buf is auto-incremented, size is auto-
- * decremented, and there is no return value.
- *
- * NOTE: buf and size must be l-values (e.g. variables)
- */
-#define SNPRINTF(buf, size, format, ...) \
- do { \
- if (size > 0) { \
- int ret; \
- ret = snprintf(buf, size, format, ## __VA_ARGS__); \
- if (ret > size) { \
- buf += size; \
- size = 0; \
- } else { \
- buf += ret; \
- size -= ret; \
- } \
- } \
- } while (0)
-
-/*
- * Write out the power management statistics.
- */
-static int msm_pm_read_proc
- (char *page, char **start, off_t off, int count, int *eof, void *data)
-{
- unsigned int cpu = off / MSM_PM_STAT_COUNT;
- int id = off % MSM_PM_STAT_COUNT;
- char *p = page;
-
- if (count < 1024) {
- *start = (char *) 0;
- *eof = 0;
- return 0;
- }
-
- if (cpu < num_possible_cpus()) {
- unsigned long flags;
- struct msm_pm_time_stats *stats;
- int i;
- int64_t bucket_time;
- int64_t s;
- uint32_t ns;
-
- spin_lock_irqsave(&msm_pm_stats_lock, flags);
- stats = per_cpu(msm_pm_stats, cpu).stats;
-
- s = stats[id].total_time;
- ns = do_div(s, NSEC_PER_SEC);
- SNPRINTF(p, count,
- "[cpu %u] %s:\n"
- " count: %7d\n"
- " total_time: %lld.%09u\n",
- cpu, stats[id].name,
- stats[id].count,
- s, ns);
-
- bucket_time = stats[id].first_bucket_time;
- for (i = 0; i < CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1; i++) {
- s = bucket_time;
- ns = do_div(s, NSEC_PER_SEC);
- SNPRINTF(p, count,
- " <%6lld.%09u: %7d (%lld-%lld)\n",
- s, ns, stats[id].bucket[i],
- stats[id].min_time[i],
- stats[id].max_time[i]);
-
- bucket_time <<= CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT;
- }
-
- SNPRINTF(p, count, " >=%6lld.%09u: %7d (%lld-%lld)\n",
- s, ns, stats[id].bucket[i],
- stats[id].min_time[i],
- stats[id].max_time[i]);
-
- *start = (char *) 1;
- *eof = (off + 1 >= MSM_PM_STAT_COUNT * num_possible_cpus());
-
- spin_unlock_irqrestore(&msm_pm_stats_lock, flags);
- }
-
- return p - page;
-}
-#undef SNPRINTF
-
-#define MSM_PM_STATS_RESET "reset"
-
-/*
- * Reset the power management statistics values.
- */
-static int msm_pm_write_proc(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
-{
- char buf[sizeof(MSM_PM_STATS_RESET)];
- int ret;
- unsigned long flags;
- unsigned int cpu;
-
- if (count < strlen(MSM_PM_STATS_RESET)) {
- ret = -EINVAL;
- goto write_proc_failed;
- }
-
- if (copy_from_user(buf, buffer, strlen(MSM_PM_STATS_RESET))) {
- ret = -EFAULT;
- goto write_proc_failed;
- }
-
- if (memcmp(buf, MSM_PM_STATS_RESET, strlen(MSM_PM_STATS_RESET))) {
- ret = -EINVAL;
- goto write_proc_failed;
- }
-
- spin_lock_irqsave(&msm_pm_stats_lock, flags);
- for_each_possible_cpu(cpu) {
- struct msm_pm_time_stats *stats;
- int i;
-
- stats = per_cpu(msm_pm_stats, cpu).stats;
- for (i = 0; i < MSM_PM_STAT_COUNT; i++) {
- memset(stats[i].bucket,
- 0, sizeof(stats[i].bucket));
- memset(stats[i].min_time,
- 0, sizeof(stats[i].min_time));
- memset(stats[i].max_time,
- 0, sizeof(stats[i].max_time));
- stats[i].count = 0;
- stats[i].total_time = 0;
- }
- }
-
- spin_unlock_irqrestore(&msm_pm_stats_lock, flags);
- return count;
-
-write_proc_failed:
- return ret;
-}
-#undef MSM_PM_STATS_RESET
-#endif /* CONFIG_MSM_IDLE_STATS */
-
-
-/******************************************************************************
* Configure Hardware before/after Low Power Mode
*****************************************************************************/
@@ -805,6 +589,38 @@
msm_timer_exit_idle((int) timer_halted);
}
+static int64_t msm_pm_timer_enter_suspend(int64_t *period)
+{
+ int time = 0;
+
+ if (msm_pm_use_qtimer)
+ return sched_clock();
+
+ time = msm_timer_get_sclk_time(period);
+ if (!time)
+ pr_err("%s: Unable to read sclk.\n", __func__);
+
+ return time;
+}
+
+static int64_t msm_pm_timer_exit_suspend(int64_t time, int64_t period)
+{
+ if (msm_pm_use_qtimer)
+ return sched_clock() - time;
+
+ if (time != 0) {
+ int64_t end_time = msm_timer_get_sclk_time(NULL);
+ if (end_time != 0) {
+ time = end_time - time;
+ if (time < 0)
+ time += period;
+ } else
+ time = 0;
+ }
+
+ return time;
+}
+
/******************************************************************************
* External Idle/Suspend Functions
*****************************************************************************/
@@ -926,9 +742,7 @@
int msm_pm_idle_enter(enum msm_pm_sleep_mode sleep_mode)
{
int64_t time;
-#ifdef CONFIG_MSM_IDLE_STATS
int exit_stat;
-#endif
if (MSM_PM_DEBUG_IDLE & msm_pm_debug_mask)
pr_info("CPU%u: %s: mode %d\n",
@@ -939,23 +753,17 @@
switch (sleep_mode) {
case MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT:
msm_pm_swfi();
-#ifdef CONFIG_MSM_IDLE_STATS
exit_stat = MSM_PM_STAT_IDLE_WFI;
-#endif
break;
case MSM_PM_SLEEP_MODE_RETENTION:
msm_pm_retention();
-#ifdef CONFIG_MSM_IDLE_STATS
exit_stat = MSM_PM_STAT_RETENTION;
-#endif
break;
case MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE:
msm_pm_power_collapse_standalone(true);
-#ifdef CONFIG_MSM_IDLE_STATS
exit_stat = MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE;
-#endif
break;
case MSM_PM_SLEEP_MODE_POWER_COLLAPSE: {
@@ -987,10 +795,7 @@
notify_rpm, collapsed);
}
msm_pm_timer_exit_idle(timer_halted);
-
-#ifdef CONFIG_MSM_IDLE_STATS
exit_stat = MSM_PM_STAT_IDLE_POWER_COLLAPSE;
-#endif
break;
}
@@ -1000,9 +805,7 @@
}
time = ktime_to_ns(ktime_get()) - time;
-#ifdef CONFIG_MSM_IDLE_STATS
msm_pm_add_stat(exit_stat, time);
-#endif
do_div(time, 1000);
return (int) time;
@@ -1057,7 +860,7 @@
msm_pm_retention();
} else if (allow[MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT]) {
per_cpu(msm_pm_last_slp_mode, cpu)
- = MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE;
+ = MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT;
msm_pm_swfi();
} else
per_cpu(msm_pm_last_slp_mode, cpu) = MSM_PM_SLEEP_MODE_NR;
@@ -1097,11 +900,8 @@
{
bool allow[MSM_PM_SLEEP_MODE_NR];
int i;
-
-#ifdef CONFIG_MSM_IDLE_STATS
int64_t period = 0;
- int64_t time = msm_timer_get_sclk_time(&period);
-#endif
+ int64_t time = msm_pm_timer_enter_suspend(&period);
if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
pr_info("%s\n", __func__);
@@ -1163,19 +963,8 @@
pr_err("%s: cannot find the lowest power limit\n",
__func__);
}
-
-#ifdef CONFIG_MSM_IDLE_STATS
- if (time != 0) {
- int64_t end_time = msm_timer_get_sclk_time(NULL);
- if (end_time != 0) {
- time = end_time - time;
- if (time < 0)
- time += period;
- } else
- time = 0;
- }
+ time = msm_pm_timer_exit_suspend(time, period);
msm_pm_add_stat(MSM_PM_STAT_SUSPEND, time);
-#endif /* CONFIG_MSM_IDLE_STATS */
} else if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE]) {
if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
pr_info("%s: standalone power collapse\n", __func__);
@@ -1217,10 +1006,14 @@
pgd_t *pc_pgd;
pmd_t *pmd;
unsigned long pmdval;
-#ifdef CONFIG_MSM_IDLE_STATS
- unsigned int cpu;
- struct proc_dir_entry *d_entry;
-#endif
+ enum msm_pm_time_stats_id enable_stats[] = {
+ MSM_PM_STAT_IDLE_WFI,
+ MSM_PM_STAT_RETENTION,
+ MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE,
+ MSM_PM_STAT_IDLE_POWER_COLLAPSE,
+ MSM_PM_STAT_SUSPEND,
+ };
+
/* Page table for cores to come back up safely. */
pc_pgd = pgd_alloc(&init_mm);
if (!pc_pgd)
@@ -1257,49 +1050,9 @@
clean_caches((unsigned long)&msm_pm_pc_pgd, sizeof(msm_pm_pc_pgd),
virt_to_phys(&msm_pm_pc_pgd));
-#ifdef CONFIG_MSM_IDLE_STATS
- for_each_possible_cpu(cpu) {
- struct msm_pm_time_stats *stats =
- per_cpu(msm_pm_stats, cpu).stats;
-
- stats[MSM_PM_STAT_REQUESTED_IDLE].name = "idle-request";
- stats[MSM_PM_STAT_REQUESTED_IDLE].first_bucket_time =
- CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
-
- stats[MSM_PM_STAT_IDLE_WFI].name = "idle-wfi";
- stats[MSM_PM_STAT_IDLE_WFI].first_bucket_time =
- CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
-
- stats[MSM_PM_STAT_RETENTION].name = "retention";
- stats[MSM_PM_STAT_RETENTION].first_bucket_time =
- CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
-
- stats[MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE].name =
- "idle-standalone-power-collapse";
- stats[MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE].
- first_bucket_time = CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
-
- stats[MSM_PM_STAT_IDLE_POWER_COLLAPSE].name =
- "idle-power-collapse";
- stats[MSM_PM_STAT_IDLE_POWER_COLLAPSE].first_bucket_time =
- CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
-
- stats[MSM_PM_STAT_SUSPEND].name = "suspend";
- stats[MSM_PM_STAT_SUSPEND].first_bucket_time =
- CONFIG_MSM_SUSPEND_STATS_FIRST_BUCKET;
- }
-
- d_entry = create_proc_entry("msm_pm_stats",
- S_IRUGO | S_IWUSR | S_IWGRP, NULL);
- if (d_entry) {
- d_entry->read_proc = msm_pm_read_proc;
- d_entry->write_proc = msm_pm_write_proc;
- d_entry->data = NULL;
- }
-#endif /* CONFIG_MSM_IDLE_STATS */
-
-
msm_pm_mode_sysfs_add();
+ msm_pm_add_stats(enable_stats, ARRAY_SIZE(enable_stats));
+
msm_spm_allow_x_cpu_set_vdd(false);
suspend_set_ops(&msm_pm_ops);
diff --git a/arch/arm/mach-msm/pm-stats.c b/arch/arm/mach-msm/pm-stats.c
new file mode 100644
index 0000000..936820a
--- /dev/null
+++ b/arch/arm/mach-msm/pm-stats.c
@@ -0,0 +1,305 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/proc_fs.h>
+
+#include "pm.h"
+
+struct msm_pm_time_stats {
+ const char *name;
+ int64_t first_bucket_time;
+ int bucket[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
+ int64_t min_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
+ int64_t max_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
+ int count;
+ int64_t total_time;
+ bool enabled;
+};
+
+struct msm_pm_cpu_time_stats {
+ struct msm_pm_time_stats stats[MSM_PM_STAT_COUNT];
+};
+
+static DEFINE_SPINLOCK(msm_pm_stats_lock);
+static DEFINE_PER_CPU_SHARED_ALIGNED(
+ struct msm_pm_cpu_time_stats, msm_pm_stats);
+
+/*
+ * Add the given time data to the statistics collection.
+ */
+void msm_pm_add_stat(enum msm_pm_time_stats_id id, int64_t t)
+{
+ unsigned long flags;
+ struct msm_pm_time_stats *stats;
+ int64_t bt;
+ int i;
+
+ spin_lock_irqsave(&msm_pm_stats_lock, flags);
+ stats = __get_cpu_var(msm_pm_stats).stats;
+
+ if (!stats[id].enabled)
+ goto add_bail;
+
+ stats[id].total_time += t;
+ stats[id].count++;
+
+ bt = t;
+ do_div(bt, stats[id].first_bucket_time);
+
+ if (bt < 1ULL << (CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT *
+ (CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1)))
+ i = DIV_ROUND_UP(fls((uint32_t)bt),
+ CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT);
+ else
+ i = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
+
+ if (i >= CONFIG_MSM_IDLE_STATS_BUCKET_COUNT)
+ i = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
+
+ stats[id].bucket[i]++;
+
+ if (t < stats[id].min_time[i] || !stats[id].max_time[i])
+ stats[id].min_time[i] = t;
+ if (t > stats[id].max_time[i])
+ stats[id].max_time[i] = t;
+
+add_bail:
+ spin_unlock_irqrestore(&msm_pm_stats_lock, flags);
+}
+
+/*
+ * Helper function of snprintf where buf is auto-incremented, size is auto-
+ * decremented, and there is no return value.
+ *
+ * NOTE: buf and size must be l-values (e.g. variables)
+ */
+#define SNPRINTF(buf, size, format, ...) \
+ do { \
+ if (size > 0) { \
+ int ret; \
+ ret = snprintf(buf, size, format, ## __VA_ARGS__); \
+ if (ret > size) { \
+ buf += size; \
+ size = 0; \
+ } else { \
+ buf += ret; \
+ size -= ret; \
+ } \
+ } \
+ } while (0)
+
+/*
+ * Write out the power management statistics.
+ */
+static int msm_pm_read_proc
+ (char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ unsigned int cpu = off / MSM_PM_STAT_COUNT;
+ int id = off % MSM_PM_STAT_COUNT;
+ char *p = page;
+
+ if (count < 1024) {
+ *start = (char *) 0;
+ *eof = 0;
+ return 0;
+ }
+
+ if (cpu < num_possible_cpus()) {
+ unsigned long flags;
+ struct msm_pm_time_stats *stats;
+ int i;
+ int64_t bucket_time;
+ int64_t s;
+ uint32_t ns;
+
+ spin_lock_irqsave(&msm_pm_stats_lock, flags);
+ stats = per_cpu(msm_pm_stats, cpu).stats;
+
+ /* Skip the disabled ones */
+ if (!stats[id].enabled) {
+ *p = '\0';
+ p++;
+ goto again;
+ }
+
+ s = stats[id].total_time;
+ ns = do_div(s, NSEC_PER_SEC);
+ SNPRINTF(p, count,
+ "[cpu %u] %s:\n"
+ " count: %7d\n"
+ " total_time: %lld.%09u\n",
+ cpu, stats[id].name,
+ stats[id].count,
+ s, ns);
+
+ bucket_time = stats[id].first_bucket_time;
+ for (i = 0; i < CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1; i++) {
+ s = bucket_time;
+ ns = do_div(s, NSEC_PER_SEC);
+ SNPRINTF(p, count,
+ " <%6lld.%09u: %7d (%lld-%lld)\n",
+ s, ns, stats[id].bucket[i],
+ stats[id].min_time[i],
+ stats[id].max_time[i]);
+
+ bucket_time <<= CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT;
+ }
+
+ SNPRINTF(p, count, " >=%6lld.%09u: %7d (%lld-%lld)\n",
+ s, ns, stats[id].bucket[i],
+ stats[id].min_time[i],
+ stats[id].max_time[i]);
+
+again:
+ *start = (char *) 1;
+ *eof = (off + 1 >= MSM_PM_STAT_COUNT * num_possible_cpus());
+
+ spin_unlock_irqrestore(&msm_pm_stats_lock, flags);
+ }
+
+ return p - page;
+}
+#undef SNPRINTF
+
+#define MSM_PM_STATS_RESET "reset"
+
+/*
+ * Reset the power management statistics values.
+ */
+static int msm_pm_write_proc(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ char buf[sizeof(MSM_PM_STATS_RESET)];
+ int ret;
+ unsigned long flags;
+ unsigned int cpu;
+
+ if (count < sizeof(MSM_PM_STATS_RESET)) {
+ ret = -EINVAL;
+ goto write_proc_failed;
+ }
+
+ if (copy_from_user(buf, buffer, sizeof(MSM_PM_STATS_RESET))) {
+ ret = -EFAULT;
+ goto write_proc_failed;
+ }
+
+ if (memcmp(buf, MSM_PM_STATS_RESET, sizeof(MSM_PM_STATS_RESET))) {
+ ret = -EINVAL;
+ goto write_proc_failed;
+ }
+
+ spin_lock_irqsave(&msm_pm_stats_lock, flags);
+ for_each_possible_cpu(cpu) {
+ struct msm_pm_time_stats *stats;
+ int i;
+
+ stats = per_cpu(msm_pm_stats, cpu).stats;
+ for (i = 0; i < MSM_PM_STAT_COUNT; i++) {
+ memset(stats[i].bucket,
+ 0, sizeof(stats[i].bucket));
+ memset(stats[i].min_time,
+ 0, sizeof(stats[i].min_time));
+ memset(stats[i].max_time,
+ 0, sizeof(stats[i].max_time));
+ stats[i].count = 0;
+ stats[i].total_time = 0;
+ }
+ }
+
+ spin_unlock_irqrestore(&msm_pm_stats_lock, flags);
+ return count;
+
+write_proc_failed:
+ return ret;
+}
+#undef MSM_PM_STATS_RESET
+
+void msm_pm_add_stats(enum msm_pm_time_stats_id *enable_stats, int size)
+{
+ unsigned int cpu;
+ struct proc_dir_entry *d_entry;
+ int i = 0;
+
+ for_each_possible_cpu(cpu) {
+ struct msm_pm_time_stats *stats =
+ per_cpu(msm_pm_stats, cpu).stats;
+
+ stats[MSM_PM_STAT_REQUESTED_IDLE].name = "idle-request";
+ stats[MSM_PM_STAT_REQUESTED_IDLE].first_bucket_time =
+ CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
+
+ stats[MSM_PM_STAT_IDLE_SPIN].name = "idle-spin";
+ stats[MSM_PM_STAT_IDLE_SPIN].first_bucket_time =
+ CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
+
+ stats[MSM_PM_STAT_IDLE_WFI].name = "idle-wfi";
+ stats[MSM_PM_STAT_IDLE_WFI].first_bucket_time =
+ CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
+
+ stats[MSM_PM_STAT_RETENTION].name = "retention";
+ stats[MSM_PM_STAT_RETENTION].first_bucket_time =
+ CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
+
+ stats[MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE].name =
+ "idle-standalone-power-collapse";
+ stats[MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE].
+ first_bucket_time = CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
+
+ stats[MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE].name =
+ "idle-failed-standalone-power-collapse";
+ stats[MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE].
+ first_bucket_time =
+ CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
+
+ stats[MSM_PM_STAT_IDLE_POWER_COLLAPSE].name =
+ "idle-power-collapse";
+ stats[MSM_PM_STAT_IDLE_POWER_COLLAPSE].first_bucket_time =
+ CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
+
+ stats[MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE].name =
+ "idle-failed-power-collapse";
+ stats[MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE].
+ first_bucket_time =
+ CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
+
+ stats[MSM_PM_STAT_SUSPEND].name = "suspend";
+ stats[MSM_PM_STAT_SUSPEND].first_bucket_time =
+ CONFIG_MSM_SUSPEND_STATS_FIRST_BUCKET;
+
+ stats[MSM_PM_STAT_FAILED_SUSPEND].name = "failed-suspend";
+ stats[MSM_PM_STAT_FAILED_SUSPEND].first_bucket_time =
+ CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
+
+ stats[MSM_PM_STAT_NOT_IDLE].name = "not-idle";
+ stats[MSM_PM_STAT_NOT_IDLE].first_bucket_time =
+ CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
+
+ for (i = 0; i < size; i++)
+ stats[enable_stats[i]].enabled = true;
+
+ }
+
+ d_entry = create_proc_entry("msm_pm_stats",
+ S_IRUGO | S_IWUSR | S_IWGRP, NULL);
+ if (d_entry) {
+ d_entry->read_proc = msm_pm_read_proc;
+ d_entry->write_proc = msm_pm_write_proc;
+ d_entry->data = NULL;
+ }
+}
diff --git a/arch/arm/mach-msm/pm.c b/arch/arm/mach-msm/pm.c
deleted file mode 100644
index d684a5a..0000000
--- a/arch/arm/mach-msm/pm.c
+++ /dev/null
@@ -1,895 +0,0 @@
-/* arch/arm/mach-msm/pm.c
- *
- * MSM Power Management Routines
- *
- * Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/pm.h>
-#include <linux/pm_qos_params.h>
-#include <linux/proc_fs.h>
-#include <linux/suspend.h>
-#include <linux/reboot.h>
-#include <linux/uaccess.h>
-#include <mach/msm_iomap.h>
-#include <mach/system.h>
-#include <asm/io.h>
-
-#ifdef CONFIG_HAS_WAKELOCK
-#include <linux/wakelock.h>
-#endif
-
-#include "smd_private.h"
-#include "smd_rpcrouter.h"
-#include "acpuclock.h"
-#include "clock.h"
-#include "proc_comm.h"
-#include "idle.h"
-#include "irq.h"
-#include "gpio.h"
-#include "timer.h"
-#include "pm.h"
-#include "pm-boot.h"
-
-enum {
- MSM_PM_DEBUG_SUSPEND = 1U << 0,
- MSM_PM_DEBUG_POWER_COLLAPSE = 1U << 1,
- MSM_PM_DEBUG_STATE = 1U << 2,
- MSM_PM_DEBUG_CLOCK = 1U << 3,
- MSM_PM_DEBUG_RESET_VECTOR = 1U << 4,
- MSM_PM_DEBUG_SMSM_STATE = 1U << 5,
- MSM_PM_DEBUG_IDLE = 1U << 6,
-};
-static int msm_pm_debug_mask;
-module_param_named(debug_mask, msm_pm_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
-
-#ifdef CONFIG_MSM_SLEEP_TIME_OVERRIDE
-static int msm_pm_sleep_time_override;
-module_param_named(sleep_time_override,
- msm_pm_sleep_time_override, int, S_IRUGO | S_IWUSR | S_IWGRP);
-#endif
-
-static int msm_pm_sleep_mode = CONFIG_MSM7X00A_SLEEP_MODE;
-module_param_named(sleep_mode, msm_pm_sleep_mode, int, S_IRUGO | S_IWUSR | S_IWGRP);
-static int msm_pm_idle_sleep_mode = CONFIG_MSM7X00A_IDLE_SLEEP_MODE;
-module_param_named(idle_sleep_mode, msm_pm_idle_sleep_mode, int, S_IRUGO | S_IWUSR | S_IWGRP);
-static int msm_pm_idle_sleep_min_time = CONFIG_MSM7X00A_IDLE_SLEEP_MIN_TIME;
-module_param_named(idle_sleep_min_time, msm_pm_idle_sleep_min_time, int, S_IRUGO | S_IWUSR | S_IWGRP);
-static int msm_pm_idle_spin_time = CONFIG_MSM7X00A_IDLE_SPIN_TIME;
-module_param_named(idle_spin_time, msm_pm_idle_spin_time, int, S_IRUGO | S_IWUSR | S_IWGRP);
-
-#define A11S_CLK_SLEEP_EN (MSM_CSR_BASE + 0x11c)
-#define A11S_PWRDOWN (MSM_CSR_BASE + 0x440)
-#define A11S_STANDBY_CTL (MSM_CSR_BASE + 0x108)
-#define A11RAMBACKBIAS (MSM_CSR_BASE + 0x508)
-
-enum {
- SLEEP_LIMIT_NONE = 0,
- SLEEP_LIMIT_NO_TCXO_SHUTDOWN = 2
-};
-
-static atomic_t msm_pm_init_done = ATOMIC_INIT(0);
-struct smsm_interrupt_info_ext {
- uint32_t aArm_en_mask;
- uint32_t aArm_interrupts_pending;
- uint32_t aArm_wakeup_reason;
- uint32_t aArm_rpc_prog;
- uint32_t aArm_rpc_proc;
- char aArm_smd_port_name[20];
- uint32_t aArm_gpio_info;
-};
-static struct msm_pm_smem_addr_t {
- uint32_t *sleep_delay;
- uint32_t *limit_sleep;
- struct smsm_interrupt_info *int_info;
- struct smsm_interrupt_info_ext *int_info_ext;
-} msm_pm_sma;
-
-static uint32_t msm_pm_max_sleep_time;
-static struct msm_pm_platform_data *msm_pm_modes;
-
-#ifdef CONFIG_MSM_IDLE_STATS
-enum msm_pm_time_stats_id {
- MSM_PM_STAT_REQUESTED_IDLE,
- MSM_PM_STAT_IDLE_SPIN,
- MSM_PM_STAT_IDLE_WFI,
- MSM_PM_STAT_IDLE_SLEEP,
- MSM_PM_STAT_IDLE_FAILED_SLEEP,
- MSM_PM_STAT_IDLE_POWER_COLLAPSE,
- MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE,
- MSM_PM_STAT_SUSPEND,
- MSM_PM_STAT_FAILED_SUSPEND,
- MSM_PM_STAT_NOT_IDLE,
- MSM_PM_STAT_COUNT
-};
-
-static struct msm_pm_time_stats {
- const char *name;
- int64_t first_bucket_time;
- int bucket[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
- int64_t min_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
- int64_t max_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
- int count;
- int64_t total_time;
-} msm_pm_stats[MSM_PM_STAT_COUNT] = {
- [MSM_PM_STAT_REQUESTED_IDLE].name = "idle-request",
- [MSM_PM_STAT_REQUESTED_IDLE].first_bucket_time =
- CONFIG_MSM_IDLE_STATS_FIRST_BUCKET,
-
- [MSM_PM_STAT_IDLE_SPIN].name = "idle-spin",
- [MSM_PM_STAT_IDLE_SPIN].first_bucket_time =
- CONFIG_MSM_IDLE_STATS_FIRST_BUCKET,
-
- [MSM_PM_STAT_IDLE_WFI].name = "idle-wfi",
- [MSM_PM_STAT_IDLE_WFI].first_bucket_time =
- CONFIG_MSM_IDLE_STATS_FIRST_BUCKET,
-
- [MSM_PM_STAT_IDLE_SLEEP].name = "idle-sleep",
- [MSM_PM_STAT_IDLE_SLEEP].first_bucket_time =
- CONFIG_MSM_IDLE_STATS_FIRST_BUCKET,
-
- [MSM_PM_STAT_IDLE_FAILED_SLEEP].name = "idle-failed-sleep",
- [MSM_PM_STAT_IDLE_FAILED_SLEEP].first_bucket_time =
- CONFIG_MSM_IDLE_STATS_FIRST_BUCKET,
-
- [MSM_PM_STAT_IDLE_POWER_COLLAPSE].name = "idle-power-collapse",
- [MSM_PM_STAT_IDLE_POWER_COLLAPSE].first_bucket_time =
- CONFIG_MSM_IDLE_STATS_FIRST_BUCKET,
-
- [MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE].name =
- "idle-failed-power-collapse",
- [MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE].first_bucket_time =
- CONFIG_MSM_IDLE_STATS_FIRST_BUCKET,
-
- [MSM_PM_STAT_SUSPEND].name = "suspend",
- [MSM_PM_STAT_SUSPEND].first_bucket_time =
- CONFIG_MSM_SUSPEND_STATS_FIRST_BUCKET,
-
- [MSM_PM_STAT_FAILED_SUSPEND].name = "failed-suspend",
- [MSM_PM_STAT_FAILED_SUSPEND].first_bucket_time =
- CONFIG_MSM_IDLE_STATS_FIRST_BUCKET,
-
- [MSM_PM_STAT_NOT_IDLE].name = "not-idle",
- [MSM_PM_STAT_NOT_IDLE].first_bucket_time =
- CONFIG_MSM_IDLE_STATS_FIRST_BUCKET,
-};
-
-static void msm_pm_add_stat(enum msm_pm_time_stats_id id, int64_t t)
-{
- int i;
- int64_t bt;
- msm_pm_stats[id].total_time += t;
- msm_pm_stats[id].count++;
- bt = t;
- do_div(bt, msm_pm_stats[id].first_bucket_time);
- if (bt < 1ULL << (CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT *
- (CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1)))
- i = DIV_ROUND_UP(fls((uint32_t)bt),
- CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT);
- else
- i = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
- msm_pm_stats[id].bucket[i]++;
- if (t < msm_pm_stats[id].min_time[i] || !msm_pm_stats[id].max_time[i])
- msm_pm_stats[id].min_time[i] = t;
- if (t > msm_pm_stats[id].max_time[i])
- msm_pm_stats[id].max_time[i] = t;
-}
-
-static uint32_t msm_pm_sleep_limit = SLEEP_LIMIT_NONE;
-#endif
-
-static int
-msm_pm_wait_state(uint32_t wait_state_all_set, uint32_t wait_state_all_clear,
- uint32_t wait_state_any_set, uint32_t wait_state_any_clear)
-{
- int i;
- uint32_t state;
-
- for (i = 0; i < 2000000; i++) {
- state = smsm_get_state(SMSM_MODEM_STATE);
- if (((state & wait_state_all_set) == wait_state_all_set) &&
- ((~state & wait_state_all_clear) == wait_state_all_clear) &&
- (wait_state_any_set == 0 || (state & wait_state_any_set) ||
- wait_state_any_clear == 0 || (state & wait_state_any_clear)))
- return 0;
- }
- printk(KERN_ERR "msm_pm_wait_state(%x, %x, %x, %x) failed %x\n",
- wait_state_all_set, wait_state_all_clear,
- wait_state_any_set, wait_state_any_clear, state);
- return -ETIMEDOUT;
-}
-
-/*
- * Respond to timing out waiting for Modem
- *
- * NOTE: The function never returns.
- */
-static void msm_pm_timeout(void)
-{
-#if defined(CONFIG_MSM_PM_TIMEOUT_RESET_CHIP)
- printk(KERN_EMERG "%s(): resetting chip\n", __func__);
- msm_proc_comm(PCOM_RESET_CHIP_IMM, NULL, NULL);
-#elif defined(CONFIG_MSM_PM_TIMEOUT_RESET_MODEM)
- printk(KERN_EMERG "%s(): resetting modem\n", __func__);
- msm_proc_comm_reset_modem_now();
-#elif defined(CONFIG_MSM_PM_TIMEOUT_HALT)
- printk(KERN_EMERG "%s(): halting\n", __func__);
-#endif
- for (;;)
- ;
-}
-
-static int msm_sleep(int sleep_mode, uint32_t sleep_delay,
- uint32_t sleep_limit, int from_idle)
-{
- int collapsed;
- uint32_t enter_state;
- uint32_t enter_wait_set = 0;
- uint32_t enter_wait_clear = 0;
- uint32_t exit_state;
- uint32_t exit_wait_clear = 0;
- uint32_t exit_wait_set = 0;
- unsigned long pm_saved_acpu_clk_rate = 0;
- int ret;
- int rv = -EINTR;
-
- if (msm_pm_debug_mask & MSM_PM_DEBUG_SUSPEND)
- printk(KERN_INFO "msm_sleep(): "
- "mode %d delay %u limit %u idle %d\n",
- sleep_mode, sleep_delay, sleep_limit, from_idle);
-
- switch (sleep_mode) {
- case MSM_PM_SLEEP_MODE_POWER_COLLAPSE:
- enter_state = SMSM_PWRC;
- enter_wait_set = SMSM_RSA;
- exit_state = SMSM_WFPI;
- exit_wait_clear = SMSM_RSA;
- break;
- case MSM_PM_SLEEP_MODE_POWER_COLLAPSE_SUSPEND:
- enter_state = SMSM_PWRC_SUSPEND;
- enter_wait_set = SMSM_RSA;
- exit_state = SMSM_WFPI;
- exit_wait_clear = SMSM_RSA;
- break;
- case MSM_PM_SLEEP_MODE_APPS_SLEEP:
- enter_state = SMSM_SLEEP;
- exit_state = SMSM_SLEEPEXIT;
- exit_wait_set = SMSM_SLEEPEXIT;
- break;
- default:
- enter_state = 0;
- exit_state = 0;
- }
-
- if (enter_state && !(smsm_get_state(SMSM_MODEM_STATE) & SMSM_RUN)) {
- if ((MSM_PM_DEBUG_POWER_COLLAPSE | MSM_PM_DEBUG_SUSPEND) &
- msm_pm_debug_mask)
- printk(KERN_INFO "msm_sleep(): modem not ready\n");
- rv = -EBUSY;
- goto check_failed;
- }
-
- memset(msm_pm_sma.int_info, 0, sizeof(*msm_pm_sma.int_info));
- msm_irq_enter_sleep1(!!enter_state, from_idle,
- &msm_pm_sma.int_info->aArm_en_mask);
- msm_gpio_enter_sleep(from_idle);
-
- if (enter_state) {
- if (sleep_delay == 0 && sleep_mode >= MSM_PM_SLEEP_MODE_APPS_SLEEP)
- sleep_delay = 192000*5; /* APPS_SLEEP does not allow infinite timeout */
-
- *msm_pm_sma.sleep_delay = sleep_delay;
- *msm_pm_sma.limit_sleep = sleep_limit;
- ret = smsm_change_state(SMSM_APPS_STATE, SMSM_RUN, enter_state);
- if (ret) {
- printk(KERN_ERR "msm_sleep(): smsm_change_state %x failed\n", enter_state);
- enter_state = 0;
- exit_state = 0;
- }
- ret = msm_pm_wait_state(enter_wait_set, enter_wait_clear, 0, 0);
- if (ret) {
- printk(KERN_EMERG "msm_sleep(): power collapse entry "
- "timed out waiting for Modem's response\n");
- msm_pm_timeout();
- }
- }
- if (msm_irq_enter_sleep2(!!enter_state, from_idle))
- goto enter_failed;
-
- if (enter_state) {
- __raw_writel(0x1f, A11S_CLK_SLEEP_EN);
- __raw_writel(1, A11S_PWRDOWN);
-
- __raw_writel(0, A11S_STANDBY_CTL);
- __raw_writel(0, A11RAMBACKBIAS);
-
- if (msm_pm_debug_mask & MSM_PM_DEBUG_STATE)
- printk(KERN_INFO "msm_sleep(): enter "
- "A11S_CLK_SLEEP_EN %x, A11S_PWRDOWN %x, "
- "smsm_get_state %x\n",
- __raw_readl(A11S_CLK_SLEEP_EN),
- __raw_readl(A11S_PWRDOWN),
- smsm_get_state(SMSM_MODEM_STATE));
- }
-
- if (sleep_mode <= MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT) {
- pm_saved_acpu_clk_rate = acpuclk_power_collapse();
- if (msm_pm_debug_mask & MSM_PM_DEBUG_CLOCK)
- printk(KERN_INFO "msm_sleep(): %ld enter power collapse"
- "\n", pm_saved_acpu_clk_rate);
- if (pm_saved_acpu_clk_rate == 0)
- goto ramp_down_failed;
- }
- if (sleep_mode < MSM_PM_SLEEP_MODE_APPS_SLEEP) {
- if (msm_pm_debug_mask & MSM_PM_DEBUG_SMSM_STATE)
- smsm_print_sleep_info(*msm_pm_sma.sleep_delay,
- *msm_pm_sma.limit_sleep,
- msm_pm_sma.int_info->aArm_en_mask,
- msm_pm_sma.int_info->aArm_wakeup_reason,
- msm_pm_sma.int_info->aArm_interrupts_pending);
- msm_pm_boot_config_before_pc(smp_processor_id(),
- virt_to_phys(msm_pm_collapse_exit));
- collapsed = msm_pm_collapse();
- msm_pm_boot_config_after_pc(smp_processor_id());
- if (collapsed) {
- cpu_init();
- local_fiq_enable();
- rv = 0;
- }
- if (msm_pm_debug_mask & MSM_PM_DEBUG_POWER_COLLAPSE)
- printk(KERN_INFO "msm_pm_collapse(): returned %d\n",
- collapsed);
- if (msm_pm_debug_mask & MSM_PM_DEBUG_SMSM_STATE)
- smsm_print_sleep_info(*msm_pm_sma.sleep_delay,
- *msm_pm_sma.limit_sleep,
- msm_pm_sma.int_info->aArm_en_mask,
- msm_pm_sma.int_info->aArm_wakeup_reason,
- msm_pm_sma.int_info->aArm_interrupts_pending);
- } else {
- msm_arch_idle();
- rv = 0;
- }
-
- if (sleep_mode <= MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT) {
- if (msm_pm_debug_mask & MSM_PM_DEBUG_CLOCK)
- printk(KERN_INFO "msm_sleep(): exit power collapse %ld"
- "\n", pm_saved_acpu_clk_rate);
- if (acpuclk_set_rate(smp_processor_id(),
- pm_saved_acpu_clk_rate, SETRATE_PC) < 0)
- printk(KERN_ERR "msm_sleep(): clk_set_rate %ld "
- "failed\n", pm_saved_acpu_clk_rate);
- }
- if (msm_pm_debug_mask & MSM_PM_DEBUG_STATE)
- printk(KERN_INFO "msm_sleep(): exit A11S_CLK_SLEEP_EN %x, "
- "A11S_PWRDOWN %x, smsm_get_state %x\n",
- __raw_readl(A11S_CLK_SLEEP_EN),
- __raw_readl(A11S_PWRDOWN),
- smsm_get_state(SMSM_MODEM_STATE));
-ramp_down_failed:
- msm_irq_exit_sleep1(msm_pm_sma.int_info->aArm_en_mask,
- msm_pm_sma.int_info->aArm_wakeup_reason,
- msm_pm_sma.int_info->aArm_interrupts_pending);
-enter_failed:
- if (enter_state) {
- __raw_writel(0x00, A11S_CLK_SLEEP_EN);
- __raw_writel(0, A11S_PWRDOWN);
- smsm_change_state(SMSM_APPS_STATE, enter_state, exit_state);
- if (msm_pm_wait_state(exit_wait_set, exit_wait_clear, 0, 0)) {
- printk(KERN_EMERG "msm_sleep(): power collapse exit "
- "timed out waiting for Modem's response\n");
- msm_pm_timeout();
- }
- if (msm_pm_debug_mask & MSM_PM_DEBUG_STATE)
- printk(KERN_INFO "msm_sleep(): sleep exit "
- "A11S_CLK_SLEEP_EN %x, A11S_PWRDOWN %x, "
- "smsm_get_state %x\n",
- __raw_readl(A11S_CLK_SLEEP_EN),
- __raw_readl(A11S_PWRDOWN),
- smsm_get_state(SMSM_MODEM_STATE));
- if (msm_pm_debug_mask & MSM_PM_DEBUG_SMSM_STATE)
- smsm_print_sleep_info(*msm_pm_sma.sleep_delay,
- *msm_pm_sma.limit_sleep,
- msm_pm_sma.int_info->aArm_en_mask,
- msm_pm_sma.int_info->aArm_wakeup_reason,
- msm_pm_sma.int_info->aArm_interrupts_pending);
- }
- msm_irq_exit_sleep2(msm_pm_sma.int_info->aArm_en_mask,
- msm_pm_sma.int_info->aArm_wakeup_reason,
- msm_pm_sma.int_info->aArm_interrupts_pending);
- if (enter_state) {
- smsm_change_state(SMSM_APPS_STATE, exit_state, SMSM_RUN);
- if (msm_pm_debug_mask & MSM_PM_DEBUG_STATE)
- printk(KERN_INFO "msm_sleep(): sleep exit "
- "A11S_CLK_SLEEP_EN %x, A11S_PWRDOWN %x, "
- "smsm_get_state %x\n",
- __raw_readl(A11S_CLK_SLEEP_EN),
- __raw_readl(A11S_PWRDOWN),
- smsm_get_state(SMSM_MODEM_STATE));
- }
- msm_irq_exit_sleep3(msm_pm_sma.int_info->aArm_en_mask,
- msm_pm_sma.int_info->aArm_wakeup_reason,
- msm_pm_sma.int_info->aArm_interrupts_pending);
- msm_gpio_exit_sleep();
- smd_sleep_exit();
-
-check_failed:
- return rv;
-}
-
-void msm_pm_set_max_sleep_time(int64_t max_sleep_time_ns)
-{
- int64_t max_sleep_time_bs = max_sleep_time_ns;
-
- /* Convert from ns -> BS units */
- do_div(max_sleep_time_bs, NSEC_PER_SEC / 32768);
-
- if (max_sleep_time_bs > 0x6DDD000)
- msm_pm_max_sleep_time = (uint32_t) 0x6DDD000;
- else
- msm_pm_max_sleep_time = (uint32_t) max_sleep_time_bs;
-
- if (msm_pm_debug_mask & MSM_PM_DEBUG_SUSPEND)
- printk(KERN_INFO "%s: Requested %lldns (%lldbs), Giving %ubs\n",
- __func__, max_sleep_time_ns,
- max_sleep_time_bs,
- msm_pm_max_sleep_time);
-}
-EXPORT_SYMBOL(msm_pm_set_max_sleep_time);
-
-void arch_idle(void)
-{
- int ret;
- int spin;
- int64_t sleep_time;
- int low_power = 0;
- struct msm_pm_platform_data *mode;
-#ifdef CONFIG_MSM_IDLE_STATS
- int64_t t1;
- static int64_t t2;
- int exit_stat;
-#endif
- int latency_qos = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
- uint32_t sleep_limit = SLEEP_LIMIT_NONE;
- int allow_sleep =
- msm_pm_idle_sleep_mode < MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT &&
-#ifdef CONFIG_HAS_WAKELOCK
- !has_wake_lock(WAKE_LOCK_IDLE) &&
-#endif
- msm_irq_idle_sleep_allowed();
-
- if (!atomic_read(&msm_pm_init_done))
- return;
-
- sleep_time = msm_timer_enter_idle();
-
-#ifdef CONFIG_MSM_IDLE_STATS
- t1 = ktime_to_ns(ktime_get());
- msm_pm_add_stat(MSM_PM_STAT_NOT_IDLE, t1 - t2);
- msm_pm_add_stat(MSM_PM_STAT_REQUESTED_IDLE, sleep_time);
-#endif
-
- mode = &msm_pm_modes[MSM_PM_SLEEP_MODE_POWER_COLLAPSE];
- if (mode->latency >= latency_qos)
- sleep_limit = SLEEP_LIMIT_NO_TCXO_SHUTDOWN;
-
- mode = &msm_pm_modes[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN];
- if (mode->latency >= latency_qos)
- allow_sleep = false;
-
- mode = &msm_pm_modes[
- MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT];
- if (mode->latency >= latency_qos) {
- /* no time even for SWFI */
- while (!msm_irq_pending())
- udelay(1);
-#ifdef CONFIG_MSM_IDLE_STATS
- exit_stat = MSM_PM_STAT_IDLE_SPIN;
-#endif
- goto abort_idle;
- }
-
- if (msm_pm_debug_mask & MSM_PM_DEBUG_IDLE)
- printk(KERN_INFO "arch_idle: sleep time %llu, allow_sleep %d\n",
- sleep_time, allow_sleep);
- spin = msm_pm_idle_spin_time >> 10;
- while (spin-- > 0) {
- if (msm_irq_pending()) {
-#ifdef CONFIG_MSM_IDLE_STATS
- exit_stat = MSM_PM_STAT_IDLE_SPIN;
-#endif
- goto abort_idle;
- }
- udelay(1);
- }
- if (sleep_time < msm_pm_idle_sleep_min_time || !allow_sleep) {
- unsigned long saved_rate;
- saved_rate = acpuclk_wait_for_irq();
- if (msm_pm_debug_mask & MSM_PM_DEBUG_CLOCK)
- printk(KERN_DEBUG "arch_idle: clk %ld -> swfi\n",
- saved_rate);
- if (saved_rate) {
- msm_arch_idle();
-#ifdef CONFIG_MSM_IDLE_STATS
- exit_stat = MSM_PM_STAT_IDLE_WFI;
-#endif
- } else {
- while (!msm_irq_pending())
- udelay(1);
-#ifdef CONFIG_MSM_IDLE_STATS
- exit_stat = MSM_PM_STAT_IDLE_SPIN;
-#endif
- }
- if (msm_pm_debug_mask & MSM_PM_DEBUG_CLOCK)
- printk(KERN_DEBUG "msm_sleep: clk swfi -> %ld\n",
- saved_rate);
- if (saved_rate
- && acpuclk_set_rate(smp_processor_id(),
- saved_rate, SETRATE_SWFI) < 0)
- printk(KERN_ERR "msm_sleep(): clk_set_rate %ld "
- "failed\n", saved_rate);
- } else {
- low_power = 1;
- do_div(sleep_time, NSEC_PER_SEC / 32768);
- if (sleep_time > 0x6DDD000) {
- printk("sleep_time too big %lld\n", sleep_time);
- sleep_time = 0x6DDD000;
- }
- ret = msm_sleep(msm_pm_idle_sleep_mode, sleep_time,
- sleep_limit, 1);
-#ifdef CONFIG_MSM_IDLE_STATS
- switch (msm_pm_idle_sleep_mode) {
- case MSM_PM_SLEEP_MODE_POWER_COLLAPSE_SUSPEND:
- case MSM_PM_SLEEP_MODE_POWER_COLLAPSE:
- if (ret)
- exit_stat =
- MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE;
- else {
- exit_stat = MSM_PM_STAT_IDLE_POWER_COLLAPSE;
- msm_pm_sleep_limit = sleep_limit;
- }
- break;
- case MSM_PM_SLEEP_MODE_APPS_SLEEP:
- if (ret)
- exit_stat = MSM_PM_STAT_IDLE_FAILED_SLEEP;
- else
- exit_stat = MSM_PM_STAT_IDLE_SLEEP;
- break;
- default:
- exit_stat = MSM_PM_STAT_IDLE_WFI;
- }
-#endif
- }
-abort_idle:
- msm_timer_exit_idle(low_power);
-#ifdef CONFIG_MSM_IDLE_STATS
- t2 = ktime_to_ns(ktime_get());
- msm_pm_add_stat(exit_stat, t2 - t1);
-#endif
-}
-
-static int msm_pm_enter(suspend_state_t state)
-{
- uint32_t sleep_limit = SLEEP_LIMIT_NONE;
- int ret;
-#ifdef CONFIG_MSM_IDLE_STATS
- int64_t period = 0;
- int64_t time = 0;
-
- time = msm_timer_get_sclk_time(&period);
-#endif
-
- clock_debug_print_enabled();
-
-#ifdef CONFIG_MSM_SLEEP_TIME_OVERRIDE
- if (msm_pm_sleep_time_override > 0) {
- int64_t ns = NSEC_PER_SEC * (int64_t)msm_pm_sleep_time_override;
- msm_pm_set_max_sleep_time(ns);
- msm_pm_sleep_time_override = 0;
- }
-#endif
-
- ret = msm_sleep(msm_pm_sleep_mode,
- msm_pm_max_sleep_time, sleep_limit, 0);
-
-#ifdef CONFIG_MSM_IDLE_STATS
- if (msm_pm_sleep_mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE_SUSPEND ||
- msm_pm_sleep_mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE) {
- enum msm_pm_time_stats_id id;
- int64_t end_time;
-
- if (ret)
- id = MSM_PM_STAT_FAILED_SUSPEND;
- else {
- id = MSM_PM_STAT_SUSPEND;
- msm_pm_sleep_limit = sleep_limit;
- }
-
- if (time != 0) {
- end_time = msm_timer_get_sclk_time(NULL);
- if (end_time != 0) {
- time = end_time - time;
- if (time < 0)
- time += period;
- } else
- time = 0;
- }
-
- msm_pm_add_stat(id, time);
- }
-#endif
-
- return 0;
-}
-
-static struct platform_suspend_ops msm_pm_ops = {
- .enter = msm_pm_enter,
- .valid = suspend_valid_only_mem,
-};
-
-static uint32_t restart_reason = 0x776655AA;
-
-static void msm_pm_power_off(void)
-{
- msm_rpcrouter_close();
- msm_proc_comm(PCOM_POWER_DOWN, 0, 0);
- for (;;) ;
-}
-
-static void msm_pm_restart(char str, const char *cmd)
-{
- msm_rpcrouter_close();
- msm_proc_comm(PCOM_RESET_CHIP, &restart_reason, 0);
-
- for (;;) ;
-}
-
-static int msm_reboot_call(struct notifier_block *this, unsigned long code, void *_cmd)
-{
- if((code == SYS_RESTART) && _cmd) {
- char *cmd = _cmd;
- if (!strcmp(cmd, "bootloader")) {
- restart_reason = 0x77665500;
- } else if (!strcmp(cmd, "recovery")) {
- restart_reason = 0x77665502;
- } else if (!strcmp(cmd, "eraseflash")) {
- restart_reason = 0x776655EF;
- } else if (!strncmp(cmd, "oem-", 4)) {
- unsigned code = simple_strtoul(cmd + 4, 0, 16) & 0xff;
- restart_reason = 0x6f656d00 | code;
- } else {
- restart_reason = 0x77665501;
- }
- }
- return NOTIFY_DONE;
-}
-
-static struct notifier_block msm_reboot_notifier = {
- .notifier_call = msm_reboot_call,
-};
-
-#ifdef CONFIG_MSM_IDLE_STATS
-/*
- * Helper function of snprintf where buf is auto-incremented, size is auto-
- * decremented, and there is no return value.
- *
- * NOTE: buf and size must be l-values (e.g. variables)
- */
-#define SNPRINTF(buf, size, format, ...) \
- do { \
- if (size > 0) { \
- int ret; \
- ret = snprintf(buf, size, format, ## __VA_ARGS__); \
- if (ret > size) { \
- buf += size; \
- size = 0; \
- } else { \
- buf += ret; \
- size -= ret; \
- } \
- } \
- } while (0)
-
-/*
- * Write out the power management statistics.
- */
-static int msm_pm_read_proc(
- char *page, char **start, off_t off, int count, int *eof, void *data)
-{
- int i;
- char *p = page;
-
- if (count < 1024) {
- *start = (char *) 0;
- *eof = 0;
- return 0;
- }
-
- if (!off) {
- SNPRINTF(p, count, "Last power collapse voted ");
- if (msm_pm_sleep_limit == SLEEP_LIMIT_NONE)
- SNPRINTF(p, count, "for TCXO shutdown\n\n");
- else
- SNPRINTF(p, count, "against TCXO shutdown\n\n");
-
- *start = (char *) 1;
- *eof = 0;
- } else if (--off < ARRAY_SIZE(msm_pm_stats)) {
- int64_t bucket_time;
- int64_t s;
- uint32_t ns;
-
- s = msm_pm_stats[off].total_time;
- ns = do_div(s, NSEC_PER_SEC);
- SNPRINTF(p, count,
- "%s:\n"
- " count: %7d\n"
- " total_time: %lld.%09u\n",
- msm_pm_stats[off].name,
- msm_pm_stats[off].count,
- s, ns);
-
- bucket_time = msm_pm_stats[off].first_bucket_time;
- for (i = 0; i < CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1; i++) {
- s = bucket_time;
- ns = do_div(s, NSEC_PER_SEC);
- SNPRINTF(p, count,
- " <%6lld.%09u: %7d (%lld-%lld)\n",
- s, ns, msm_pm_stats[off].bucket[i],
- msm_pm_stats[off].min_time[i],
- msm_pm_stats[off].max_time[i]);
-
- bucket_time <<= CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT;
- }
-
- SNPRINTF(p, count, " >=%6lld.%09u: %7d (%lld-%lld)\n",
- s, ns, msm_pm_stats[off].bucket[i],
- msm_pm_stats[off].min_time[i],
- msm_pm_stats[off].max_time[i]);
-
- *start = (char *) 1;
- *eof = (off + 1 >= ARRAY_SIZE(msm_pm_stats));
- }
-
- return p - page;
-}
-#undef SNPRINTF
-
-#define MSM_PM_STATS_RESET "reset"
-
-/*
- * Reset the power management statistics values.
- */
-static int msm_pm_write_proc(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
-{
- char buf[sizeof(MSM_PM_STATS_RESET)];
- int ret;
- unsigned long flags;
- int i;
-
- if (count < strlen(MSM_PM_STATS_RESET)) {
- ret = -EINVAL;
- goto write_proc_failed;
- }
-
- if (copy_from_user(buf, buffer, strlen(MSM_PM_STATS_RESET))) {
- ret = -EFAULT;
- goto write_proc_failed;
- }
-
- if (memcmp(buf, MSM_PM_STATS_RESET, strlen(MSM_PM_STATS_RESET))) {
- ret = -EINVAL;
- goto write_proc_failed;
- }
-
- local_irq_save(flags);
- for (i = 0; i < ARRAY_SIZE(msm_pm_stats); i++) {
- memset(msm_pm_stats[i].bucket,
- 0, sizeof(msm_pm_stats[i].bucket));
- memset(msm_pm_stats[i].min_time,
- 0, sizeof(msm_pm_stats[i].min_time));
- memset(msm_pm_stats[i].max_time,
- 0, sizeof(msm_pm_stats[i].max_time));
- msm_pm_stats[i].count = 0;
- msm_pm_stats[i].total_time = 0;
- }
-
- msm_pm_sleep_limit = SLEEP_LIMIT_NONE;
- local_irq_restore(flags);
-
- return count;
-
-write_proc_failed:
- return ret;
-}
-#undef MSM_PM_STATS_RESET
-#endif /* CONFIG_MSM_IDLE_STATS */
-
-static int __init msm_pm_init(void)
-{
-#ifdef CONFIG_MSM_IDLE_STATS
- struct proc_dir_entry *d_entry;
-#endif
- int ret;
-
- pm_power_off = msm_pm_power_off;
- arm_pm_restart = msm_pm_restart;
- msm_pm_max_sleep_time = 0;
-
- register_reboot_notifier(&msm_reboot_notifier);
-
- msm_pm_sma.sleep_delay = smem_alloc(SMEM_SMSM_SLEEP_DELAY,
- sizeof(*msm_pm_sma.sleep_delay));
- if (msm_pm_sma.sleep_delay == NULL) {
- printk(KERN_ERR "msm_pm_init: failed get SLEEP_DELAY\n");
- return -ENODEV;
- }
-
- msm_pm_sma.limit_sleep = smem_alloc(SMEM_SMSM_LIMIT_SLEEP,
- sizeof(*msm_pm_sma.limit_sleep));
- if (msm_pm_sma.limit_sleep == NULL) {
- printk(KERN_ERR "msm_pm_init: failed get LIMIT_SLEEP\n");
- return -ENODEV;
- }
-
- msm_pm_sma.int_info_ext = smem_alloc(SMEM_SMSM_INT_INFO,
- sizeof(*msm_pm_sma.int_info_ext));
-
- if (msm_pm_sma.int_info_ext)
- msm_pm_sma.int_info = (struct smsm_interrupt_info *)
- msm_pm_sma.int_info_ext;
- else
- msm_pm_sma.int_info = smem_alloc(SMEM_SMSM_INT_INFO,
- sizeof(*msm_pm_sma.int_info));
-
- if (msm_pm_sma.int_info == NULL) {
- printk(KERN_ERR "msm_pm_init: failed get INT_INFO\n");
- return -ENODEV;
- }
-
- ret = msm_timer_init_time_sync(msm_pm_timeout);
- if (ret)
- return ret;
-
- BUG_ON(msm_pm_modes == NULL);
-
- atomic_set(&msm_pm_init_done, 1);
- suspend_set_ops(&msm_pm_ops);
-
-#ifdef CONFIG_MSM_IDLE_STATS
- d_entry = create_proc_entry("msm_pm_stats",
- S_IRUGO | S_IWUSR | S_IWGRP, NULL);
- if (d_entry) {
- d_entry->read_proc = msm_pm_read_proc;
- d_entry->write_proc = msm_pm_write_proc;
- d_entry->data = NULL;
- }
-#endif
-
- return 0;
-}
-
-void __init msm_pm_set_platform_data(
- struct msm_pm_platform_data *data, int count)
-{
- BUG_ON(MSM_PM_SLEEP_MODE_NR != count);
- msm_pm_modes = data;
-}
-
-late_initcall(msm_pm_init);
diff --git a/arch/arm/mach-msm/pm.h b/arch/arm/mach-msm/pm.h
index 09494a0..ce0d747 100644
--- a/arch/arm/mach-msm/pm.h
+++ b/arch/arm/mach-msm/pm.h
@@ -84,6 +84,8 @@
void __init msm_pm_init_sleep_status_data(
struct msm_pm_sleep_status_data *sleep_data);
+
+
#ifdef CONFIG_MSM_PM8X60
void msm_pm_set_rpm_wakeup_irq(unsigned int irq);
int msm_pm_wait_cpu_shutdown(unsigned int cpu);
@@ -99,4 +101,29 @@
#else
static inline int msm_platform_secondary_init(unsigned int cpu) { return 0; }
#endif
+
+enum msm_pm_time_stats_id {
+ MSM_PM_STAT_REQUESTED_IDLE = 0,
+ MSM_PM_STAT_IDLE_SPIN,
+ MSM_PM_STAT_IDLE_WFI,
+ MSM_PM_STAT_RETENTION,
+ MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE,
+ MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE,
+ MSM_PM_STAT_IDLE_POWER_COLLAPSE,
+ MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE,
+ MSM_PM_STAT_SUSPEND,
+ MSM_PM_STAT_FAILED_SUSPEND,
+ MSM_PM_STAT_NOT_IDLE,
+ MSM_PM_STAT_COUNT
+};
+
+#ifdef CONFIG_MSM_IDLE_STATS
+void msm_pm_add_stats(enum msm_pm_time_stats_id *enable_stats, int size);
+void msm_pm_add_stat(enum msm_pm_time_stats_id id, int64_t t);
+#else
+static inline void msm_pm_add_stats(enum msm_pm_time_stats_id *enable_stats,
+ int size) {}
+static inline void msm_pm_add_stat(enum msm_pm_time_stats_id id, int64_t t) {}
+#endif
+
#endif /* __ARCH_ARM_MACH_MSM_PM_H */
diff --git a/arch/arm/mach-msm/pm2.c b/arch/arm/mach-msm/pm2.c
index f4bfe23..426d6e6 100644
--- a/arch/arm/mach-msm/pm2.c
+++ b/arch/arm/mach-msm/pm2.c
@@ -23,10 +23,8 @@
#include <linux/init.h>
#include <linux/pm.h>
#include <linux/pm_qos_params.h>
-#include <linux/proc_fs.h>
#include <linux/suspend.h>
#include <linux/reboot.h>
-#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/tick.h>
#include <linux/memory.h>
@@ -65,7 +63,7 @@
#include "spm.h"
#include "sirc.h"
#include "pm-boot.h"
-#define MSM_CORE1_RESET 0xA8600590
+#include "devices-msm7x2xa.h"
/******************************************************************************
* Debug Definitions
@@ -425,6 +423,7 @@
SLEEP_LIMIT_MASK = 0x03,
};
+static uint32_t msm_pm_sleep_limit = SLEEP_LIMIT_NONE;
#ifdef CONFIG_MSM_MEMORY_LOW_POWER_MODE
enum {
SLEEP_RESOURCE_MEMORY_BIT0 = 0x0200,
@@ -455,30 +454,21 @@
static void msm_pm_config_hw_before_power_down(void)
{
if (cpu_is_msm7x30() || cpu_is_msm8x55()) {
- __raw_writel(1, APPS_PWRDOWN);
- mb();
__raw_writel(4, APPS_SECOP);
- mb();
} else if (cpu_is_msm7x27()) {
__raw_writel(0x1f, APPS_CLK_SLEEP_EN);
- mb();
- __raw_writel(1, APPS_PWRDOWN);
- mb();
} else if (cpu_is_msm7x27a() || cpu_is_msm7x27aa() ||
cpu_is_msm7x25a() || cpu_is_msm7x25aa() ||
cpu_is_msm7x25ab()) {
__raw_writel(0x7, APPS_CLK_SLEEP_EN);
- mb();
- __raw_writel(1, APPS_PWRDOWN);
- mb();
- } else {
+ } else if (cpu_is_qsd8x50()) {
__raw_writel(0x1f, APPS_CLK_SLEEP_EN);
mb();
- __raw_writel(1, APPS_PWRDOWN);
- mb();
__raw_writel(0, APPS_STANDBY_CTL);
- mb();
}
+ mb();
+ __raw_writel(1, APPS_PWRDOWN);
+ mb();
}
/*
@@ -490,7 +480,7 @@
void __iomem *base_ptr;
unsigned int value = 0;
- base_ptr = ioremap_nocache(MSM_CORE1_RESET, SZ_4);
+ base_ptr = core1_reset_base();
if (!base_ptr)
return;
@@ -545,7 +535,6 @@
mb();
__raw_writel(0x0, base_ptr);
mb();
- iounmap(base_ptr);
}
/*
@@ -560,13 +549,11 @@
__raw_writel(0, APPS_PWRDOWN);
mb();
msm_spm_reinit();
- } else {
+ } else if (cpu_is_msm8625()) {
__raw_writel(0, APPS_PWRDOWN);
mb();
- __raw_writel(0, APPS_CLK_SLEEP_EN);
- mb();
- if (cpu_is_msm8625() && power_collapsed) {
+ if (power_collapsed) {
/*
* enable the SCU while coming out of power
* collapse.
@@ -577,6 +564,11 @@
*/
configure_top_csr();
}
+ } else {
+ __raw_writel(0, APPS_PWRDOWN);
+ mb();
+ __raw_writel(0, APPS_CLK_SLEEP_EN);
+ mb();
}
}
@@ -772,238 +764,6 @@
/******************************************************************************
- * CONFIG_MSM_IDLE_STATS
- *****************************************************************************/
-
-#ifdef CONFIG_MSM_IDLE_STATS
-enum msm_pm_time_stats_id {
- MSM_PM_STAT_REQUESTED_IDLE,
- MSM_PM_STAT_IDLE_SPIN,
- MSM_PM_STAT_IDLE_WFI,
- MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE,
- MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE,
- MSM_PM_STAT_IDLE_POWER_COLLAPSE,
- MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE,
- MSM_PM_STAT_SUSPEND,
- MSM_PM_STAT_FAILED_SUSPEND,
- MSM_PM_STAT_NOT_IDLE,
- MSM_PM_STAT_COUNT
-};
-
-struct msm_pm_time_stats {
- const char *name;
- int64_t first_bucket_time;
- int bucket[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
- int64_t min_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
- int64_t max_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
- int count;
- int64_t total_time;
-};
-
-struct msm_pm_cpu_time_stats {
- struct msm_pm_time_stats stats[MSM_PM_STAT_COUNT];
-};
-
-static DEFINE_PER_CPU_SHARED_ALIGNED(
- struct msm_pm_cpu_time_stats, msm_pm_stats);
-
-static uint32_t msm_pm_sleep_limit = SLEEP_LIMIT_NONE;
-
-static DEFINE_SPINLOCK(msm_pm_stats_lock);
-
-/*
- * Add the given time data to the statistics collection.
- */
-static void msm_pm_add_stat(enum msm_pm_time_stats_id id, int64_t t)
-{
- unsigned long flags;
- struct msm_pm_time_stats *stats;
- int i;
- int64_t bt;
-
- spin_lock_irqsave(&msm_pm_stats_lock, flags);
- stats = __get_cpu_var(msm_pm_stats).stats;
-
- stats[id].total_time += t;
- stats[id].count++;
-
- bt = t;
- do_div(bt, stats[id].first_bucket_time);
-
- if (bt < 1ULL << (CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT *
- (CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1)))
- i = DIV_ROUND_UP(fls((uint32_t)bt),
- CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT);
- else
- i = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
-
- if (i >= CONFIG_MSM_IDLE_STATS_BUCKET_COUNT)
- i = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
-
- stats[id].bucket[i]++;
-
- if (t < stats[id].min_time[i] || !stats[id].max_time[i])
- stats[id].min_time[i] = t;
- if (t > stats[id].max_time[i])
- stats[id].max_time[i] = t;
-
- spin_unlock_irqrestore(&msm_pm_stats_lock, flags);
-}
-
-/*
- * Helper function of snprintf where buf is auto-incremented, size is auto-
- * decremented, and there is no return value.
- *
- * NOTE: buf and size must be l-values (e.g. variables)
- */
-#define SNPRINTF(buf, size, format, ...) \
- do { \
- if (size > 0) { \
- int ret; \
- ret = snprintf(buf, size, format, ## __VA_ARGS__); \
- if (ret > size) { \
- buf += size; \
- size = 0; \
- } else { \
- buf += ret; \
- size -= ret; \
- } \
- } \
- } while (0)
-
-/*
- * Write out the power management statistics.
- */
-static int msm_pm_read_proc
- (char *page, char **start, off_t off, int count, int *eof, void *data)
-{
- unsigned int cpu = off / MSM_PM_STAT_COUNT;
- int id = off % MSM_PM_STAT_COUNT;
- char *p = page;
-
- if (count < 1024) {
- *start = (char *) 0;
- *eof = 0;
- return 0;
- }
-
- if (!off) {
- SNPRINTF(p, count, "Last power collapse voted ");
- if ((msm_pm_sleep_limit & SLEEP_LIMIT_MASK) ==
- SLEEP_LIMIT_NONE)
- SNPRINTF(p, count, "for TCXO shutdown\n\n");
- else
- SNPRINTF(p, count, "against TCXO shutdown\n\n");
- }
-
- if (cpu < num_possible_cpus()) {
- unsigned long flags;
- struct msm_pm_time_stats *stats;
- int i;
- int64_t bucket_time;
- int64_t s;
- uint32_t ns;
-
- spin_lock_irqsave(&msm_pm_stats_lock, flags);
- stats = per_cpu(msm_pm_stats, cpu).stats;
-
- s = stats[id].total_time;
- ns = do_div(s, NSEC_PER_SEC);
- SNPRINTF(p, count,
- "[cpu %u] %s:\n"
- " count: %7d\n"
- " total_time: %lld.%09u\n",
- cpu, stats[id].name,
- stats[id].count,
- s, ns);
-
- bucket_time = stats[id].first_bucket_time;
- for (i = 0; i < CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1; i++) {
- s = bucket_time;
- ns = do_div(s, NSEC_PER_SEC);
- SNPRINTF(p, count,
- " <%6lld.%09u: %7d (%lld-%lld)\n",
- s, ns, stats[id].bucket[i],
- stats[id].min_time[i],
- stats[id].max_time[i]);
-
- bucket_time <<= CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT;
- }
-
- SNPRINTF(p, count, " >=%6lld.%09u: %7d (%lld-%lld)\n",
- s, ns, stats[id].bucket[i],
- stats[id].min_time[i],
- stats[id].max_time[i]);
-
- *start = (char *) 1;
- *eof = (off + 1 >= MSM_PM_STAT_COUNT * num_possible_cpus());
-
- spin_unlock_irqrestore(&msm_pm_stats_lock, flags);
- }
-
- return p - page;
-}
-#undef SNPRINTF
-
-#define MSM_PM_STATS_RESET "reset"
-
-/*
- * Reset the power management statistics values.
- */
-static int msm_pm_write_proc(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
-{
- char buf[sizeof(MSM_PM_STATS_RESET)];
- int ret;
- unsigned long flags;
- unsigned int cpu;
-
- if (count < strlen(MSM_PM_STATS_RESET)) {
- ret = -EINVAL;
- goto write_proc_failed;
- }
-
- if (copy_from_user(buf, buffer, strlen(MSM_PM_STATS_RESET))) {
- ret = -EFAULT;
- goto write_proc_failed;
- }
-
- if (memcmp(buf, MSM_PM_STATS_RESET, strlen(MSM_PM_STATS_RESET))) {
- ret = -EINVAL;
- goto write_proc_failed;
- }
-
- spin_lock_irqsave(&msm_pm_stats_lock, flags);
- for_each_possible_cpu(cpu) {
- struct msm_pm_time_stats *stats;
- int i;
-
- stats = per_cpu(msm_pm_stats, cpu).stats;
- for (i = 0; i < MSM_PM_STAT_COUNT; i++) {
- memset(stats[i].bucket,
- 0, sizeof(stats[i].bucket));
- memset(stats[i].min_time,
- 0, sizeof(stats[i].min_time));
- memset(stats[i].max_time,
- 0, sizeof(stats[i].max_time));
- stats[i].count = 0;
- stats[i].total_time = 0;
- }
- }
- msm_pm_sleep_limit = SLEEP_LIMIT_NONE;
- spin_unlock_irqrestore(&msm_pm_stats_lock, flags);
-
- return count;
-
-write_proc_failed:
- return ret;
-}
-
-#undef MSM_PM_STATS_RESET
-#endif /* CONFIG_MSM_IDLE_STATS */
-
-
-/******************************************************************************
* Shared Memory Bits
*****************************************************************************/
@@ -1194,7 +954,10 @@
#endif
#ifdef CONFIG_CACHE_L2X0
- l2cc_suspend();
+ if (!cpu_is_msm8625())
+ l2cc_suspend();
+ else
+ apps_power_collapse = 1;
#endif
collapsed = msm_pm_collapse();
@@ -1221,7 +984,10 @@
}
#ifdef CONFIG_CACHE_L2X0
- l2cc_resume();
+ if (!cpu_is_msm8625())
+ l2cc_resume();
+ else
+ apps_power_collapse = 0;
#endif
msm_pm_boot_config_after_pc(smp_processor_id());
@@ -1508,6 +1274,30 @@
return 0;
}
+static int64_t msm_pm_timer_enter_suspend(int64_t *period)
+{
+ int time = 0;
+
+ time = msm_timer_get_sclk_time(period);
+ if (!time)
+ pr_err("%s: Unable to read sclk.\n", __func__);
+ return time;
+}
+
+static int64_t msm_pm_timer_exit_suspend(int64_t time, int64_t period)
+{
+
+ if (time != 0) {
+ int64_t end_time = msm_timer_get_sclk_time(NULL);
+ if (end_time != 0) {
+ time = end_time - time;
+ if (time < 0)
+ time += period;
+ } else
+ time = 0;
+ }
+ return time;
+}
/******************************************************************************
* External Idle/Suspend Functions
@@ -1526,28 +1316,22 @@
int ret;
int i;
unsigned int cpu;
-
-#ifdef CONFIG_MSM_IDLE_STATS
int64_t t1;
static DEFINE_PER_CPU(int64_t, t2);
int exit_stat;
- #endif
if (!atomic_read(&msm_pm_init_done))
return;
cpu = smp_processor_id();
-
latency_qos = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
/* get the next timer expiration */
timer_expiration = ktime_to_ns(tick_nohz_get_sleep_length());
-#ifdef CONFIG_MSM_IDLE_STATS
t1 = ktime_to_ns(ktime_get());
msm_pm_add_stat(MSM_PM_STAT_NOT_IDLE, t1 - __get_cpu_var(t2));
msm_pm_add_stat(MSM_PM_STAT_REQUESTED_IDLE, timer_expiration);
exit_stat = MSM_PM_STAT_IDLE_SPIN;
-#endif
for (i = 0; i < ARRAY_SIZE(allow); i++)
allow[i] = true;
@@ -1628,46 +1412,34 @@
low_power = (ret != -EBUSY && ret != -ETIMEDOUT);
msm_timer_exit_idle(low_power);
-#ifdef CONFIG_MSM_IDLE_STATS
if (ret)
exit_stat = MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE;
else {
exit_stat = MSM_PM_STAT_IDLE_POWER_COLLAPSE;
msm_pm_sleep_limit = sleep_limit;
}
-#endif
} else if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE]) {
ret = msm_pm_power_collapse_standalone(true);
-#ifdef CONFIG_MSM_IDLE_STATS
exit_stat = ret ?
MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE :
MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE;
-#endif
} else if (allow[MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT]) {
ret = msm_pm_swfi(true);
if (ret)
while (!msm_pm_irq_extns->irq_pending())
udelay(1);
-#ifdef CONFIG_MSM_IDLE_STATS
exit_stat = ret ? MSM_PM_STAT_IDLE_SPIN : MSM_PM_STAT_IDLE_WFI;
-#endif
} else if (allow[MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT]) {
msm_pm_swfi(false);
-#ifdef CONFIG_MSM_IDLE_STATS
exit_stat = MSM_PM_STAT_IDLE_WFI;
-#endif
} else {
while (!msm_pm_irq_extns->irq_pending())
udelay(1);
-#ifdef CONFIG_MSM_IDLE_STATS
exit_stat = MSM_PM_STAT_IDLE_SPIN;
-#endif
}
-#ifdef CONFIG_MSM_IDLE_STATS
__get_cpu_var(t2) = ktime_to_ns(ktime_get());
msm_pm_add_stat(exit_stat, __get_cpu_var(t2) - t1);
-#endif
}
/*
@@ -1688,10 +1460,8 @@
uint32_t sleep_limit = SLEEP_LIMIT_NONE;
int ret = -EPERM;
int i;
-#ifdef CONFIG_MSM_IDLE_STATS
int64_t period = 0;
int64_t time = 0;
-#endif
/* Must executed by CORE0 */
if (smp_processor_id()) {
@@ -1699,9 +1469,7 @@
goto suspend_exit;
}
-#ifdef CONFIG_MSM_IDLE_STATS
- time = msm_timer_get_sclk_time(&period);
-#endif
+ time = msm_pm_timer_enter_suspend(&period);
MSM_PM_DPRINTK(MSM_PM_DEBUG_SUSPEND, KERN_INFO,
"%s(): sleep limit %u\n", __func__, sleep_limit);
@@ -1718,10 +1486,7 @@
if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE] ||
allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN]) {
-#ifdef CONFIG_MSM_IDLE_STATS
enum msm_pm_time_stats_id id;
- int64_t end_time;
-#endif
clock_debug_print_enabled();
@@ -1751,7 +1516,6 @@
ret = msm_pm_power_collapse(
false, msm_pm_max_sleep_time, sleep_limit);
-#ifdef CONFIG_MSM_IDLE_STATS
if (ret)
id = MSM_PM_STAT_FAILED_SUSPEND;
else {
@@ -1759,18 +1523,8 @@
msm_pm_sleep_limit = sleep_limit;
}
- if (time != 0) {
- end_time = msm_timer_get_sclk_time(NULL);
- if (end_time != 0) {
- time = end_time - time;
- if (time < 0)
- time += period;
- } else
- time = 0;
- }
-
+ time = msm_pm_timer_exit_suspend(time, period);
msm_pm_add_stat(id, time);
-#endif
} else if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE]) {
ret = msm_pm_power_collapse_standalone(false);
} else if (allow[MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT]) {
@@ -1880,12 +1634,21 @@
*/
static int __init msm_pm_init(void)
{
-#ifdef CONFIG_MSM_IDLE_STATS
- struct proc_dir_entry *d_entry;
- unsigned int cpu;
-#endif
int ret;
int val;
+ enum msm_pm_time_stats_id enable_stats[] = {
+ MSM_PM_STAT_REQUESTED_IDLE,
+ MSM_PM_STAT_IDLE_SPIN,
+ MSM_PM_STAT_IDLE_WFI,
+ MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE,
+ MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE,
+ MSM_PM_STAT_IDLE_POWER_COLLAPSE,
+ MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE,
+ MSM_PM_STAT_SUSPEND,
+ MSM_PM_STAT_FAILED_SUSPEND,
+ MSM_PM_STAT_NOT_IDLE,
+ };
+
#ifdef CONFIG_CPU_V7
pgd_t *pc_pgd;
pmd_t *pmd;
@@ -1963,6 +1726,8 @@
*/
val = 0x00030002;
__raw_writel(val, (MSM_CFG_CTL_BASE + 0x38));
+
+ l2x0_base_addr = MSM_L2CC_BASE;
}
#ifdef CONFIG_MSM_MEMORY_LOW_POWER_MODE
@@ -1979,70 +1744,9 @@
suspend_set_ops(&msm_pm_ops);
msm_pm_mode_sysfs_add();
-#ifdef CONFIG_MSM_IDLE_STATS
- for_each_possible_cpu(cpu) {
- struct msm_pm_time_stats *stats =
- per_cpu(msm_pm_stats, cpu).stats;
-
- stats[MSM_PM_STAT_REQUESTED_IDLE].name = "idle-request";
- stats[MSM_PM_STAT_REQUESTED_IDLE].first_bucket_time =
- CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
-
- stats[MSM_PM_STAT_IDLE_SPIN].name = "idle-spin";
- stats[MSM_PM_STAT_IDLE_SPIN].first_bucket_time =
- CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
-
- stats[MSM_PM_STAT_IDLE_WFI].name = "idle-wfi";
- stats[MSM_PM_STAT_IDLE_WFI].first_bucket_time =
- CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
-
- stats[MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE].name =
- "idle-standalone-power-collapse";
- stats[MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE].
- first_bucket_time =
- CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
-
- stats[MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE].name =
- "idle-failed-standalone-power-collapse";
- stats[MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE].
- first_bucket_time =
- CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
-
- stats[MSM_PM_STAT_IDLE_POWER_COLLAPSE].name =
- "idle-power-collapse";
- stats[MSM_PM_STAT_IDLE_POWER_COLLAPSE].first_bucket_time =
- CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
-
- stats[MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE].name =
- "idle-failed-power-collapse";
- stats[MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE].
- first_bucket_time =
- CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
-
- stats[MSM_PM_STAT_SUSPEND].name = "suspend";
- stats[MSM_PM_STAT_SUSPEND].first_bucket_time =
- CONFIG_MSM_SUSPEND_STATS_FIRST_BUCKET;
-
- stats[MSM_PM_STAT_FAILED_SUSPEND].name = "failed-suspend";
- stats[MSM_PM_STAT_FAILED_SUSPEND].first_bucket_time =
- CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
-
- stats[MSM_PM_STAT_NOT_IDLE].name = "not-idle";
- stats[MSM_PM_STAT_NOT_IDLE].first_bucket_time =
- CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
- }
+ msm_pm_add_stats(enable_stats, ARRAY_SIZE(enable_stats));
atomic_set(&msm_pm_init_done, 1);
-
- d_entry = create_proc_entry("msm_pm_stats",
- S_IRUGO | S_IWUSR | S_IWGRP, NULL);
- if (d_entry) {
- d_entry->read_proc = msm_pm_read_proc;
- d_entry->write_proc = msm_pm_write_proc;
- d_entry->data = NULL;
- }
-#endif
-
return 0;
}
diff --git a/arch/arm/mach-msm/qdsp5/audio_amrnb_in.c b/arch/arm/mach-msm/qdsp5/audio_amrnb_in.c
index d4e4893..bec1d4c 100644
--- a/arch/arm/mach-msm/qdsp5/audio_amrnb_in.c
+++ b/arch/arm/mach-msm/qdsp5/audio_amrnb_in.c
@@ -821,13 +821,15 @@
}
/* Allow only single frame */
if (audio->mode == MSM_AUD_ENC_MODE_TUNNEL) {
- if (cfg.buffer_size != (FRAME_SIZE - 8))
+ if (cfg.buffer_size != (FRAME_SIZE - 8)) {
rc = -EINVAL;
break;
+ }
} else {
- if (cfg.buffer_size != (AMRNB_FRAME_SIZE + 14))
+ if (cfg.buffer_size != (AMRNB_FRAME_SIZE + 14)) {
rc = -EINVAL;
break;
+ }
}
audio->buffer_size = cfg.buffer_size;
break;
diff --git a/arch/arm/mach-msm/qdsp5/audio_evrc_in.c b/arch/arm/mach-msm/qdsp5/audio_evrc_in.c
index 99ad02b..05a16da 100644
--- a/arch/arm/mach-msm/qdsp5/audio_evrc_in.c
+++ b/arch/arm/mach-msm/qdsp5/audio_evrc_in.c
@@ -799,13 +799,15 @@
}
/* Allow only single frame */
if (audio->mode == MSM_AUD_ENC_MODE_TUNNEL) {
- if (cfg.buffer_size != (FRAME_SIZE - 8))
+ if (cfg.buffer_size != (FRAME_SIZE - 8)) {
rc = -EINVAL;
break;
+ }
} else {
- if (cfg.buffer_size != (EVRC_FRAME_SIZE + 14))
+ if (cfg.buffer_size != (EVRC_FRAME_SIZE + 14)) {
rc = -EINVAL;
break;
+ }
}
audio->buffer_size = cfg.buffer_size;
break;
diff --git a/arch/arm/mach-msm/qdsp5/audio_lpa.c b/arch/arm/mach-msm/qdsp5/audio_lpa.c
index 8dfba0b..a7c2543 100644
--- a/arch/arm/mach-msm/qdsp5/audio_lpa.c
+++ b/arch/arm/mach-msm/qdsp5/audio_lpa.c
@@ -552,7 +552,9 @@
struct audpcm_buffer_node *buf_node;
struct list_head *ptr, *next;
union msm_audio_event_payload payload;
+ unsigned long flags;
+ spin_lock_irqsave(&audio->dsp_lock, flags);
MM_DBG("\n"); /* Macro prints the file name and function */
list_for_each_safe(ptr, next, &audio->out_queue) {
buf_node = list_entry(ptr, struct audpcm_buffer_node, list);
@@ -565,6 +567,7 @@
audio->drv_status &= ~ADRV_STATUS_OBUF_GIVEN;
audio->out_needed = 0;
atomic_set(&audio->out_bytes, 0);
+ spin_unlock_irqrestore(&audio->dsp_lock, flags);
}
static void audio_ioport_reset(struct audio *audio)
{
diff --git a/arch/arm/mach-msm/qdsp5/audio_mp3.c b/arch/arm/mach-msm/qdsp5/audio_mp3.c
index 5abdf85..7f72e25 100644
--- a/arch/arm/mach-msm/qdsp5/audio_mp3.c
+++ b/arch/arm/mach-msm/qdsp5/audio_mp3.c
@@ -836,7 +836,9 @@
struct audmp3_buffer_node *buf_node;
struct list_head *ptr, *next;
union msm_audio_event_payload payload;
+ unsigned long flags;
+ spin_lock_irqsave(&audio->dsp_lock, flags);
MM_DBG("\n"); /* Macro prints the file name and function */
list_for_each_safe(ptr, next, &audio->out_queue) {
buf_node = list_entry(ptr, struct audmp3_buffer_node, list);
@@ -849,6 +851,7 @@
audio->drv_status &= ~ADRV_STATUS_OBUF_GIVEN;
audio->out_needed = 0;
atomic_set(&audio->out_bytes, 0);
+ spin_unlock_irqrestore(&audio->dsp_lock, flags);
}
static void audio_flush(struct audio *audio)
diff --git a/arch/arm/mach-msm/qdsp5/audio_out.c b/arch/arm/mach-msm/qdsp5/audio_out.c
index 8fe8cf66..ef7a70b 100644
--- a/arch/arm/mach-msm/qdsp5/audio_out.c
+++ b/arch/arm/mach-msm/qdsp5/audio_out.c
@@ -593,6 +593,7 @@
audio_flush(audio);
mutex_unlock(&audio->write_lock);
}
+ break;
case AUDIO_SET_CONFIG: {
struct msm_audio_config config;
if (copy_from_user(&config, (void*) arg, sizeof(config))) {
diff --git a/arch/arm/mach-msm/qdsp5/audio_pcm_in.c b/arch/arm/mach-msm/qdsp5/audio_pcm_in.c
index 16c70ce..851980d 100644
--- a/arch/arm/mach-msm/qdsp5/audio_pcm_in.c
+++ b/arch/arm/mach-msm/qdsp5/audio_pcm_in.c
@@ -585,7 +585,7 @@
audio->in_head = 0;
audio->in_tail = 0;
audio->in_count = 0;
- for (i = FRAME_NUM-1; i <= 0; i--) {
+ for (i = FRAME_NUM-1; i >= 0; i--) {
audio->in[i].size = 0;
audio->in[i].read = 0;
}
diff --git a/arch/arm/mach-msm/qdsp5/audio_qcelp_in.c b/arch/arm/mach-msm/qdsp5/audio_qcelp_in.c
index 6ebd5f9..e6906d0 100644
--- a/arch/arm/mach-msm/qdsp5/audio_qcelp_in.c
+++ b/arch/arm/mach-msm/qdsp5/audio_qcelp_in.c
@@ -800,13 +800,15 @@
}
/* Allow only single frame */
if (audio->mode == MSM_AUD_ENC_MODE_TUNNEL) {
- if (cfg.buffer_size != (FRAME_SIZE - 8))
+ if (cfg.buffer_size != (FRAME_SIZE - 8)) {
rc = -EINVAL;
break;
+ }
} else {
- if (cfg.buffer_size != (QCELP_FRAME_SIZE + 14))
+ if (cfg.buffer_size != (QCELP_FRAME_SIZE + 14)) {
rc = -EINVAL;
break;
+ }
}
audio->buffer_size = cfg.buffer_size;
break;
diff --git a/arch/arm/mach-msm/qdsp5/audio_wma.c b/arch/arm/mach-msm/qdsp5/audio_wma.c
index d767916..b17cdda 100644
--- a/arch/arm/mach-msm/qdsp5/audio_wma.c
+++ b/arch/arm/mach-msm/qdsp5/audio_wma.c
@@ -469,6 +469,7 @@
wake_up(&audio->write_wait);
if (audio->pcm_feedback)
audplay_buffer_refresh(audio);
+ break;
case AUDPP_MSG_PCMDMAMISSED:
MM_DBG("PCMDMAMISSED\n");
audio->teos = 1;
diff --git a/arch/arm/mach-msm/qdsp5/audio_wmapro.c b/arch/arm/mach-msm/qdsp5/audio_wmapro.c
index 7fb08ff..5e806e6 100644
--- a/arch/arm/mach-msm/qdsp5/audio_wmapro.c
+++ b/arch/arm/mach-msm/qdsp5/audio_wmapro.c
@@ -463,6 +463,7 @@
wake_up(&audio->write_wait);
if (audio->pcm_feedback)
audplay_buffer_refresh(audio);
+ break;
case AUDPP_MSG_PCMDMAMISSED:
MM_DBG("PCMDMAMISSED\n");
audio->teos = 1;
diff --git a/arch/arm/mach-msm/qdsp5v2/audio_a2dp_in.c b/arch/arm/mach-msm/qdsp5v2/audio_a2dp_in.c
index 60b5c20..733b7a1 100644
--- a/arch/arm/mach-msm/qdsp5v2/audio_a2dp_in.c
+++ b/arch/arm/mach-msm/qdsp5v2/audio_a2dp_in.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
*
* sbc/pcm audio input driver
* Based on the pcm input driver in arch/arm/mach-msm/qdsp5v2/audio_pcm_in.c
@@ -41,7 +41,6 @@
#include <mach/iommu_domains.h>
#include <mach/msm_adsp.h>
#include <mach/msm_memtypes.h>
-#include <mach/msm_subsystem_map.h>
#include <mach/socinfo.h>
#include <mach/qdsp5v2/qdsp5audreccmdi.h>
#include <mach/qdsp5v2/qdsp5audrecmsg.h>
@@ -108,7 +107,7 @@
/* data allocated for various buffers */
char *data;
dma_addr_t phys;
- struct msm_mapped_buffer *msm_map;
+ void *msm_map;
int opened;
int enabled;
@@ -849,7 +848,7 @@
audio->audrec = NULL;
audio->opened = 0;
if (audio->data) {
- msm_subsystem_unmap_buffer(audio->msm_map);
+ iounmap(audio->msm_map);
free_contiguous_memory_by_paddr(audio->phys);
audio->data = NULL;
}
@@ -871,9 +870,7 @@
audio->phys = allocate_contiguous_ebi_nomap(DMASZ, SZ_4K);
if (audio->phys) {
- audio->msm_map = msm_subsystem_map_buffer(
- audio->phys, DMASZ,
- MSM_SUBSYSTEM_MAP_KADDR, NULL, 0);
+ audio->msm_map = ioremap(audio->phys, DMASZ);
if (IS_ERR(audio->msm_map)) {
MM_ERR("could not map the phys address to kernel"
"space\n");
@@ -881,7 +878,7 @@
free_contiguous_memory_by_paddr(audio->phys);
goto done;
}
- audio->data = (u8 *)audio->msm_map->vaddr;
+ audio->data = (u8 *)audio->msm_map;
} else {
MM_ERR("could not allocate DMA buffers\n");
rc = -ENOMEM;
diff --git a/arch/arm/mach-msm/qdsp5v2/audio_aac.c b/arch/arm/mach-msm/qdsp5v2/audio_aac.c
index 9069426..32053bf 100644
--- a/arch/arm/mach-msm/qdsp5v2/audio_aac.c
+++ b/arch/arm/mach-msm/qdsp5v2/audio_aac.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -38,7 +38,6 @@
#include <mach/msm_adsp.h>
#include <mach/iommu.h>
#include <mach/iommu_domains.h>
-#include <mach/msm_subsystem_map.h>
#include <mach/qdsp5v2/qdsp5audppmsg.h>
#include <mach/qdsp5v2/qdsp5audplaycmdi.h>
#include <mach/qdsp5v2/qdsp5audplaymsg.h>
@@ -142,8 +141,8 @@
/* data allocated for various buffers */
char *data;
int32_t phys; /* physical address of write buffer */
- struct msm_mapped_buffer *map_v_read;
- struct msm_mapped_buffer *map_v_write;
+ void *map_v_read;
+ void *map_v_write;
int mfield; /* meta field embedded in data */
int rflush; /* Read flush */
@@ -1629,9 +1628,9 @@
audio->event_abort = 1;
wake_up(&audio->event_wait);
audaac_reset_event_queue(audio);
- msm_subsystem_unmap_buffer(audio->map_v_write);
+ iounmap(audio->map_v_write);
free_contiguous_memory_by_paddr(audio->phys);
- msm_subsystem_unmap_buffer(audio->map_v_read);
+ iounmap(audio->map_v_read);
free_contiguous_memory_by_paddr(audio->read_phys);
mutex_unlock(&audio->lock);
#ifdef CONFIG_DEBUG_FS
@@ -1821,10 +1820,8 @@
audio->phys = allocate_contiguous_ebi_nomap(pmem_sz, SZ_4K);
if (audio->phys) {
audio->map_v_write =
- msm_subsystem_map_buffer(audio->phys,
- pmem_sz,
- MSM_SUBSYSTEM_MAP_KADDR,
- NULL, 0);
+ ioremap(audio->phys,
+ pmem_sz);
if (IS_ERR(audio->map_v_write)) {
MM_ERR("could not map write phys address, \
freeing instance 0x%08x\n",
@@ -1835,7 +1832,7 @@
kfree(audio);
goto done;
}
- audio->data = (u8 *)audio->map_v_write->vaddr;
+ audio->data = (u8 *)audio->map_v_write;
MM_DBG("write buf: phy addr 0x%08x kernel addr \
0x%08x\n", audio->phys, (int)audio->data);
break;
@@ -1857,28 +1854,26 @@
MM_ERR("could not allocate read buffers, freeing instance \
0x%08x\n", (int)audio);
rc = -ENOMEM;
- msm_subsystem_unmap_buffer(audio->map_v_write);
+ iounmap(audio->map_v_write);
free_contiguous_memory_by_paddr(audio->phys);
audpp_adec_free(audio->dec_id);
kfree(audio);
goto done;
}
- audio->map_v_read = msm_subsystem_map_buffer(
- audio->read_phys,
- PCM_BUFSZ_MIN * PCM_BUF_MAX_COUNT,
- MSM_SUBSYSTEM_MAP_KADDR, NULL, 0);
+ audio->map_v_read = ioremap(audio->read_phys,
+ PCM_BUFSZ_MIN * PCM_BUF_MAX_COUNT);
if (IS_ERR(audio->map_v_read)) {
MM_ERR("could not map read phys address, freeing instance \
0x%08x\n", (int)audio);
rc = -ENOMEM;
- msm_subsystem_unmap_buffer(audio->map_v_write);
+ iounmap(audio->map_v_write);
free_contiguous_memory_by_paddr(audio->phys);
free_contiguous_memory_by_paddr(audio->read_phys);
audpp_adec_free(audio->dec_id);
kfree(audio);
goto done;
}
- audio->read_data = audio->map_v_read->vaddr;
+ audio->read_data = audio->map_v_read;
MM_DBG("read buf: phy addr 0x%08x kernel addr 0x%08x\n",
audio->read_phys, (int)audio->read_data);
@@ -2000,9 +1995,9 @@
event_err:
msm_adsp_put(audio->audplay);
err:
- msm_subsystem_unmap_buffer(audio->map_v_write);
+ iounmap(audio->map_v_write);
free_contiguous_memory_by_paddr(audio->phys);
- msm_subsystem_unmap_buffer(audio->map_v_read);
+ iounmap(audio->map_v_read);
free_contiguous_memory_by_paddr(audio->read_phys);
audpp_adec_free(audio->dec_id);
kfree(audio);
diff --git a/arch/arm/mach-msm/qdsp5v2/audio_aac_in.c b/arch/arm/mach-msm/qdsp5v2/audio_aac_in.c
index 010fd90..d2b4407 100644
--- a/arch/arm/mach-msm/qdsp5v2/audio_aac_in.c
+++ b/arch/arm/mach-msm/qdsp5v2/audio_aac_in.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -32,7 +32,6 @@
#include <mach/msm_adsp.h>
#include <mach/iommu.h>
-#include <mach/msm_subsystem_map.h>
#include <mach/iommu_domains.h>
#include <mach/qdsp5v2/qdsp5audreccmdi.h>
#include <mach/qdsp5v2/qdsp5audrecmsg.h>
@@ -97,8 +96,8 @@
wait_queue_head_t write_wait;
int32_t out_phys; /* physical address of write buffer */
char *out_data;
- struct msm_mapped_buffer *map_v_read;
- struct msm_mapped_buffer *map_v_write;
+ void *map_v_read;
+ void *map_v_write;
int mfield; /* meta field embedded in data */
int wflush; /*write flush */
@@ -1292,12 +1291,12 @@
audio->audrec = NULL;
audio->opened = 0;
if (audio->data) {
- msm_subsystem_unmap_buffer(audio->map_v_read);
+ iounmap(audio->map_v_read);
free_contiguous_memory_by_paddr(audio->phys);
audio->data = NULL;
}
if (audio->out_data) {
- msm_subsystem_unmap_buffer(audio->map_v_write);
+ iounmap(audio->map_v_write);
free_contiguous_memory_by_paddr(audio->out_phys);
audio->out_data = NULL;
}
@@ -1320,16 +1319,14 @@
}
audio->phys = allocate_contiguous_ebi_nomap(DMASZ, SZ_4K);
if (audio->phys) {
- audio->map_v_read = msm_subsystem_map_buffer(
- audio->phys, DMASZ,
- MSM_SUBSYSTEM_MAP_KADDR, NULL, 0);
+ audio->map_v_read = ioremap(audio->phys, DMASZ);
if (IS_ERR(audio->map_v_read)) {
MM_ERR("could not map DMA buffers\n");
rc = -ENOMEM;
free_contiguous_memory_by_paddr(audio->phys);
goto done;
}
- audio->data = audio->map_v_read->vaddr;
+ audio->data = audio->map_v_read;
} else {
MM_ERR("could not allocate DMA buffers\n");
rc = -ENOMEM;
@@ -1398,16 +1395,15 @@
rc = -ENOMEM;
goto evt_error;
} else {
- audio->map_v_write = msm_subsystem_map_buffer(
- audio->out_phys, BUFFER_SIZE,
- MSM_SUBSYSTEM_MAP_KADDR, NULL, 0);
+ audio->map_v_write = ioremap(
+ audio->out_phys, BUFFER_SIZE);
if (IS_ERR(audio->map_v_write)) {
MM_ERR("could not map write phys address\n");
rc = -ENOMEM;
free_contiguous_memory_by_paddr(audio->out_phys);
goto evt_error;
}
- audio->out_data = audio->map_v_write->vaddr;
+ audio->out_data = audio->map_v_write;
MM_DBG("write buf: phy addr 0x%08x kernel addr 0x%08x\n",
audio->out_phys, (int)audio->out_data);
}
@@ -1434,7 +1430,7 @@
aac_in_listener, (void *) audio);
if (rc) {
MM_ERR("failed to register device event listener\n");
- msm_subsystem_unmap_buffer(audio->map_v_write);
+ iounmap(audio->map_v_write);
free_contiguous_memory_by_paddr(audio->out_phys);
goto evt_error;
}
diff --git a/arch/arm/mach-msm/qdsp5v2/audio_acdb.c b/arch/arm/mach-msm/qdsp5v2/audio_acdb.c
index 90373f9..89957a4 100644
--- a/arch/arm/mach-msm/qdsp5v2/audio_acdb.c
+++ b/arch/arm/mach-msm/qdsp5v2/audio_acdb.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -28,7 +28,6 @@
#include <mach/dal.h>
#include <mach/iommu.h>
#include <mach/iommu_domains.h>
-#include <mach/msm_subsystem_map.h>
#include <mach/qdsp5v2/audio_dev_ctl.h>
#include <mach/qdsp5v2/audpp.h>
#include <mach/socinfo.h>
@@ -111,7 +110,7 @@
u16 *pbe_enable_flag;
u32 fluence_extbuff;
u8 *fluence_extbuff_virt;
- struct msm_mapped_buffer *map_v_fluence;
+ void *map_v_fluence;
struct acdb_pbe_block *pbe_blk;
@@ -130,7 +129,7 @@
/* pmem for get acdb blk */
unsigned long get_blk_paddr;
u8 *get_blk_kvaddr;
- struct msm_mapped_buffer *map_v_get_blk;
+ void *map_v_get_blk;
char *build_id;
};
@@ -140,7 +139,7 @@
u32 node_status;
s32 stream_id;
u32 phys_addr_acdb_values;
- struct msm_mapped_buffer *map_v_addr;
+ void *map_v_addr;
u8 *virt_addr_acdb_values;
struct auddev_evt_audcal_info device_info;
};
@@ -237,7 +236,7 @@
struct rtc_acdb_pmem {
u8 *viraddr;
int32_t phys;
- struct msm_mapped_buffer *map_v_rtc;
+ void *map_v_rtc;
};
struct rtc_acdb_data {
@@ -1087,11 +1086,11 @@
rtc_acdb.valid_abid = false;
if (rtc_read->viraddr != NULL || ((void *)rtc_read->phys) != NULL) {
- msm_subsystem_unmap_buffer(rtc_read->map_v_rtc);
+ iounmap(rtc_read->map_v_rtc);
free_contiguous_memory_by_paddr(rtc_read->phys);
}
if (rtc_write->viraddr != NULL || ((void *)rtc_write->phys) != NULL) {
- msm_subsystem_unmap_buffer(rtc_write->map_v_rtc);
+ iounmap(rtc_write->map_v_rtc);
free_contiguous_memory_by_paddr(rtc_write->phys);
}
}
@@ -1141,17 +1140,15 @@
result = -ENOMEM;
goto error;
}
- rtc_read->map_v_rtc = msm_subsystem_map_buffer(
- rtc_read->phys,
- PMEM_RTC_ACDB_QUERY_MEM,
- MSM_SUBSYSTEM_MAP_KADDR, NULL, 0);
+ rtc_read->map_v_rtc = ioremap(rtc_read->phys,
+ PMEM_RTC_ACDB_QUERY_MEM);
if (IS_ERR(rtc_read->map_v_rtc)) {
MM_ERR("ACDB Could not map physical address\n");
result = -ENOMEM;
goto error;
}
- rtc_read->viraddr = rtc_read->map_v_rtc->vaddr;
+ rtc_read->viraddr = rtc_read->map_v_rtc;
memset(rtc_read->viraddr, 0, PMEM_RTC_ACDB_QUERY_MEM);
rtc_write->phys = allocate_contiguous_ebi_nomap(PMEM_RTC_ACDB_QUERY_MEM,
@@ -1162,16 +1159,15 @@
result = -ENOMEM;
goto error;
}
- rtc_write->map_v_rtc = msm_subsystem_map_buffer(
- rtc_write->phys, PMEM_RTC_ACDB_QUERY_MEM,
- MSM_SUBSYSTEM_MAP_KADDR, NULL, 0);
+ rtc_write->map_v_rtc = ioremap(rtc_write->phys,
+ PMEM_RTC_ACDB_QUERY_MEM);
if (IS_ERR(rtc_write->map_v_rtc)) {
MM_ERR("ACDB Could not map physical address\n");
result = -ENOMEM;
goto error;
}
- rtc_write->viraddr = rtc_write->map_v_rtc->vaddr;
+ rtc_write->viraddr = rtc_write->map_v_rtc;
memset(rtc_write->viraddr, 0, PMEM_RTC_ACDB_QUERY_MEM);
init_waitqueue_head(&rtc_acdb.wait);
return true;
@@ -1187,11 +1183,11 @@
debugfs_remove(get_set_abid_data_dentry);
}
if (rtc_read->viraddr != NULL || ((void *)rtc_read->phys) != NULL) {
- msm_subsystem_unmap_buffer(rtc_read->map_v_rtc);
+ iounmap(rtc_read->map_v_rtc);
free_contiguous_memory_by_paddr(rtc_read->phys);
}
if (rtc_write->viraddr != NULL || ((void *)rtc_write->phys) != NULL) {
- msm_subsystem_unmap_buffer(rtc_write->map_v_rtc);
+ iounmap(rtc_write->map_v_rtc);
free_contiguous_memory_by_paddr(rtc_write->phys);
}
return false;
@@ -2544,11 +2540,9 @@
result = -ENOMEM;
goto error;
}
- acdb_cache_tx[i].map_v_addr =
- msm_subsystem_map_buffer(
+ acdb_cache_tx[i].map_v_addr = ioremap(
acdb_cache_tx[i].phys_addr_acdb_values,
- ACDB_BUF_SIZE,
- MSM_SUBSYSTEM_MAP_KADDR, NULL, 0);
+ ACDB_BUF_SIZE);
if (IS_ERR(acdb_cache_tx[i].map_v_addr)) {
MM_ERR("ACDB=> Could not map physical address\n");
result = -ENOMEM;
@@ -2557,15 +2551,14 @@
goto error;
}
acdb_cache_tx[i].virt_addr_acdb_values =
- acdb_cache_tx[i].map_v_addr->vaddr;
+ acdb_cache_tx[i].map_v_addr;
memset(acdb_cache_tx[i].virt_addr_acdb_values, 0,
ACDB_BUF_SIZE);
}
return result;
error:
for (err = 0; err < i; err++) {
- msm_subsystem_unmap_buffer(
- acdb_cache_tx[err].map_v_addr);
+ iounmap(acdb_cache_tx[err].map_v_addr);
free_contiguous_memory_by_paddr(
acdb_cache_tx[err].phys_addr_acdb_values);
}
@@ -2590,11 +2583,8 @@
goto error;
}
acdb_cache_rx[i].map_v_addr =
- msm_subsystem_map_buffer(
- acdb_cache_rx[i].phys_addr_acdb_values,
- ACDB_BUF_SIZE,
- MSM_SUBSYSTEM_MAP_KADDR,
- NULL, 0);
+ ioremap(acdb_cache_rx[i].phys_addr_acdb_values,
+ ACDB_BUF_SIZE);
if (IS_ERR(acdb_cache_rx[i].map_v_addr)) {
MM_ERR("ACDB=> Could not map physical address\n");
result = -ENOMEM;
@@ -2603,15 +2593,14 @@
goto error;
}
acdb_cache_rx[i].virt_addr_acdb_values =
- acdb_cache_rx[i].map_v_addr->vaddr;
+ acdb_cache_rx[i].map_v_addr;
memset(acdb_cache_rx[i].virt_addr_acdb_values, 0,
ACDB_BUF_SIZE);
}
return result;
error:
for (err = 0; err < i; err++) {
- msm_subsystem_unmap_buffer(
- acdb_cache_rx[err].map_v_addr);
+ iounmap(acdb_cache_rx[err].map_v_addr);
free_contiguous_memory_by_paddr(
acdb_cache_rx[err].phys_addr_acdb_values);
}
@@ -2628,10 +2617,8 @@
result = -ENOMEM;
goto error;
}
- acdb_data.map_v_get_blk = msm_subsystem_map_buffer(
- acdb_data.get_blk_paddr,
- ACDB_BUF_SIZE,
- MSM_SUBSYSTEM_MAP_KADDR, NULL, 0);
+ acdb_data.map_v_get_blk = ioremap(acdb_data.get_blk_paddr,
+ ACDB_BUF_SIZE);
if (IS_ERR(acdb_data.map_v_get_blk)) {
MM_ERR("ACDB=> Could not map physical address\n");
result = -ENOMEM;
@@ -2639,7 +2626,7 @@
acdb_data.get_blk_paddr);
goto error;
}
- acdb_data.get_blk_kvaddr = acdb_data.map_v_get_blk->vaddr;
+ acdb_data.get_blk_kvaddr = acdb_data.map_v_get_blk;
memset(acdb_data.get_blk_kvaddr, 0, ACDB_BUF_SIZE);
error:
return result;
@@ -2650,7 +2637,7 @@
u32 i = 0;
for (i = 0; i < MAX_COPP_NODE_SUPPORTED; i++) {
- msm_subsystem_unmap_buffer(acdb_cache_rx[i].map_v_addr);
+ iounmap(acdb_cache_rx[i].map_v_addr);
free_contiguous_memory_by_paddr(
acdb_cache_rx[i].phys_addr_acdb_values);
}
@@ -2661,7 +2648,7 @@
u32 i = 0;
for (i = 0; i < MAX_AUDREC_SESSIONS; i++) {
- msm_subsystem_unmap_buffer(acdb_cache_tx[i].map_v_addr);
+ iounmap(acdb_cache_tx[i].map_v_addr);
free_contiguous_memory_by_paddr(
acdb_cache_tx[i].phys_addr_acdb_values);
}
@@ -2669,7 +2656,7 @@
static void free_memory_acdb_get_blk(void)
{
- msm_subsystem_unmap_buffer(acdb_data.map_v_get_blk);
+ iounmap(acdb_data.map_v_get_blk);
free_contiguous_memory_by_paddr(acdb_data.get_blk_paddr);
}
@@ -2827,11 +2814,9 @@
result = -ENOMEM;
goto done;
}
- acdb_data.map_v_fluence =
- msm_subsystem_map_buffer(
+ acdb_data.map_v_fluence = ioremap(
acdb_data.fluence_extbuff,
- FLUENCE_BUF_SIZE,
- MSM_SUBSYSTEM_MAP_KADDR, NULL, 0);
+ FLUENCE_BUF_SIZE);
if (IS_ERR(acdb_data.map_v_fluence)) {
MM_ERR("ACDB=> Could not map physical address\n");
free_memory_acdb_get_blk();
@@ -2852,7 +2837,7 @@
goto done;
} else
acdb_data.fluence_extbuff_virt =
- acdb_data.map_v_fluence->vaddr;
+ acdb_data.map_v_fluence;
done:
return result;
}
@@ -3431,11 +3416,11 @@
for (i = 0; i < MAX_COPP_NODE_SUPPORTED; i++) {
if (i < MAX_AUDREC_SESSIONS) {
- msm_subsystem_unmap_buffer(acdb_cache_tx[i].map_v_addr);
+ iounmap(acdb_cache_tx[i].map_v_addr);
free_contiguous_memory_by_paddr(
acdb_cache_tx[i].phys_addr_acdb_values);
}
- msm_subsystem_unmap_buffer(acdb_cache_rx[i].map_v_addr);
+ iounmap(acdb_cache_rx[i].map_v_addr);
free_contiguous_memory_by_paddr(
acdb_cache_rx[i].phys_addr_acdb_values);
}
@@ -3446,7 +3431,7 @@
kfree(acdb_data.preproc_iir);
free_contiguous_memory_by_paddr(
(int32_t)acdb_data.pbe_extbuff);
- msm_subsystem_unmap_buffer(acdb_data.map_v_fluence);
+ iounmap(acdb_data.map_v_fluence);
free_contiguous_memory_by_paddr(
(int32_t)acdb_data.fluence_extbuff);
mutex_destroy(&acdb_data.acdb_mutex);
diff --git a/arch/arm/mach-msm/qdsp5v2/audio_adpcm.c b/arch/arm/mach-msm/qdsp5v2/audio_adpcm.c
index 4b8b7a6..a53128d 100644
--- a/arch/arm/mach-msm/qdsp5v2/audio_adpcm.c
+++ b/arch/arm/mach-msm/qdsp5v2/audio_adpcm.c
@@ -43,7 +43,6 @@
#include <mach/msm_adsp.h>
#include <mach/iommu.h>
#include <mach/iommu_domains.h>
-#include <mach/msm_subsystem_map.h>
#include <mach/qdsp5v2/qdsp5audppmsg.h>
#include <mach/qdsp5v2/qdsp5audplaycmdi.h>
#include <mach/qdsp5v2/qdsp5audplaymsg.h>
@@ -139,8 +138,8 @@
/* data allocated for various buffers */
char *data;
int32_t phys; /* physical address of write buffer */
- struct msm_mapped_buffer *map_v_read;
- struct msm_mapped_buffer *map_v_write;
+ void *map_v_read;
+ void *map_v_write;
int mfield; /* meta field embedded in data */
int rflush; /* Read flush */
int wflush; /* Write flush */
@@ -1023,12 +1022,10 @@
rc = -ENOMEM;
break;
}
- audio->map_v_read = msm_subsystem_map_buffer(
+ audio->map_v_read = ioremap(
audio->read_phys,
config.buffer_size *
- config.buffer_count,
- MSM_SUBSYSTEM_MAP_KADDR
- , NULL, 0);
+ config.buffer_count);
if (IS_ERR(audio->map_v_read)) {
MM_ERR("read buf map fail\n");
rc = -ENOMEM;
@@ -1038,7 +1035,7 @@
uint8_t index;
uint32_t offset = 0;
audio->read_data =
- audio->map_v_read->vaddr;
+ audio->map_v_read;
audio->buf_refresh = 0;
audio->pcm_buf_count =
config.buffer_count;
@@ -1420,10 +1417,10 @@
audio->event_abort = 1;
wake_up(&audio->event_wait);
audadpcm_reset_event_queue(audio);
- msm_subsystem_unmap_buffer(audio->map_v_write);
+ iounmap(audio->map_v_write);
free_contiguous_memory_by_paddr(audio->phys);
if (audio->read_data) {
- msm_subsystem_unmap_buffer(audio->map_v_read);
+ iounmap(audio->map_v_read);
free_contiguous_memory_by_paddr(audio->read_phys);
}
mutex_unlock(&audio->lock);
@@ -1615,10 +1612,7 @@
audio->phys = allocate_contiguous_ebi_nomap(pmem_sz,
SZ_4K);
if (audio->phys) {
- audio->map_v_write = msm_subsystem_map_buffer(
- audio->phys, pmem_sz,
- MSM_SUBSYSTEM_MAP_KADDR,
- NULL, 0);
+ audio->map_v_write = ioremap(audio->phys, pmem_sz);
if (IS_ERR(audio->map_v_write)) {
MM_ERR("could not map write phys address, \
freeing instance 0x%08x\n",
@@ -1629,7 +1623,7 @@
kfree(audio);
goto done;
}
- audio->data = audio->map_v_write->vaddr;
+ audio->data = audio->map_v_write;
MM_DBG("write buf: phy addr 0x%08x kernel addr \
0x%08x\n", audio->phys, (int)audio->data);
break;
@@ -1729,7 +1723,7 @@
event_err:
msm_adsp_put(audio->audplay);
err:
- msm_subsystem_unmap_buffer(audio->map_v_write);
+ iounmap(audio->map_v_write);
free_contiguous_memory_by_paddr(audio->phys);
audpp_adec_free(audio->dec_id);
kfree(audio);
diff --git a/arch/arm/mach-msm/qdsp5v2/audio_amrnb.c b/arch/arm/mach-msm/qdsp5v2/audio_amrnb.c
index a09b71b..5f288dd 100644
--- a/arch/arm/mach-msm/qdsp5v2/audio_amrnb.c
+++ b/arch/arm/mach-msm/qdsp5v2/audio_amrnb.c
@@ -44,7 +44,6 @@
#include <mach/msm_adsp.h>
#include <mach/iommu.h>
#include <mach/iommu_domains.h>
-#include <mach/msm_subsystem_map.h>
#include <mach/qdsp5v2/audio_dev_ctl.h>
#include <mach/qdsp5v2/qdsp5audppmsg.h>
#include <mach/qdsp5v2/qdsp5audplaycmdi.h>
@@ -132,8 +131,8 @@
/* data allocated for various buffers */
char *data;
int32_t phys; /* physical address of write buffer */
- struct msm_mapped_buffer *map_v_read;
- struct msm_mapped_buffer *map_v_write;
+ void *map_v_read;
+ void *map_v_write;
int mfield; /* meta field embedded in data */
int rflush; /* Read flush */
@@ -993,12 +992,10 @@
rc = -ENOMEM;
break;
}
- audio->map_v_read = msm_subsystem_map_buffer(
+ audio->map_v_read = ioremap(
audio->read_phys,
config.buffer_size *
- config.buffer_count,
- MSM_SUBSYSTEM_MAP_KADDR,
- NULL, 0);
+ config.buffer_count);
if (IS_ERR(audio->map_v_read)) {
MM_ERR("failed to map read phys address\n");
rc = -ENOMEM;
@@ -1007,7 +1004,7 @@
} else {
uint8_t index;
uint32_t offset = 0;
- audio->read_data = audio->map_v_read->vaddr;
+ audio->read_data = audio->map_v_read;
audio->buf_refresh = 0;
audio->pcm_buf_count =
config.buffer_count;
@@ -1317,10 +1314,10 @@
audio->event_abort = 1;
wake_up(&audio->event_wait);
audamrnb_reset_event_queue(audio);
- msm_subsystem_unmap_buffer(audio->map_v_write);
+ iounmap(audio->map_v_write);
free_contiguous_memory_by_paddr(audio->phys);
if (audio->read_data) {
- msm_subsystem_unmap_buffer(audio->map_v_read);
+ iounmap(audio->map_v_read);
free_contiguous_memory_by_paddr(audio->read_phys);
}
mutex_unlock(&audio->lock);
@@ -1512,9 +1509,7 @@
kfree(audio);
goto done;
} else {
- audio->map_v_write = msm_subsystem_map_buffer(
- audio->phys, DMASZ,
- MSM_SUBSYSTEM_MAP_KADDR, NULL, 0);
+ audio->map_v_write = ioremap(audio->phys, DMASZ);
if (IS_ERR(audio->map_v_write)) {
MM_ERR("could not map write phys address, freeing \
instance 0x%08x\n", (int)audio);
@@ -1525,7 +1520,7 @@
kfree(audio);
goto done;
}
- audio->data = audio->map_v_write->vaddr;
+ audio->data = audio->map_v_write;
MM_DBG("write buf: phy addr 0x%08x kernel addr \
0x%08x\n", audio->phys, (int)audio->data);
}
@@ -1610,7 +1605,7 @@
event_err:
msm_adsp_put(audio->audplay);
err:
- msm_subsystem_unmap_buffer(audio->map_v_write);
+ iounmap(audio->map_v_write);
free_contiguous_memory_by_paddr(audio->phys);
audpp_adec_free(audio->dec_id);
kfree(audio);
diff --git a/arch/arm/mach-msm/qdsp5v2/audio_amrnb_in.c b/arch/arm/mach-msm/qdsp5v2/audio_amrnb_in.c
index bdb5bb1..790c510 100644
--- a/arch/arm/mach-msm/qdsp5v2/audio_amrnb_in.c
+++ b/arch/arm/mach-msm/qdsp5v2/audio_amrnb_in.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -32,7 +32,6 @@
#include <mach/iommu.h>
#include <mach/iommu_domains.h>
-#include <mach/msm_subsystem_map.h>
#include <mach/msm_adsp.h>
#include <mach/socinfo.h>
#include <mach/qdsp5v2/qdsp5audreccmdi.h>
@@ -99,7 +98,7 @@
/* data allocated for various buffers */
char *data;
dma_addr_t phys;
- struct msm_mapped_buffer *map_v_read;
+ void *map_v_read;
int opened;
int enabled;
@@ -767,7 +766,7 @@
audio->audrec = NULL;
audio->opened = 0;
if (audio->data) {
- msm_subsystem_unmap_buffer(audio->map_v_read);
+ iounmap(audio->map_v_read);
free_contiguous_memory_by_paddr(audio->phys);
audio->data = NULL;
}
@@ -788,16 +787,14 @@
}
audio->phys = allocate_contiguous_ebi_nomap(DMASZ, SZ_4K);
if (audio->phys) {
- audio->map_v_read = msm_subsystem_map_buffer(
- audio->phys, DMASZ,
- MSM_SUBSYSTEM_MAP_KADDR, NULL, 0);
+ audio->map_v_read = ioremap(audio->phys, DMASZ);
if (IS_ERR(audio->map_v_read)) {
MM_ERR("could not map DMA buffers\n");
rc = -ENOMEM;
free_contiguous_memory_by_paddr(audio->phys);
goto done;
}
- audio->data = audio->map_v_read->vaddr;
+ audio->data = audio->map_v_read;
} else {
MM_ERR("could not allocate DMA buffers\n");
rc = -ENOMEM;
diff --git a/arch/arm/mach-msm/qdsp5v2/audio_amrwb.c b/arch/arm/mach-msm/qdsp5v2/audio_amrwb.c
index 48e9a9f..b74c054 100644
--- a/arch/arm/mach-msm/qdsp5v2/audio_amrwb.c
+++ b/arch/arm/mach-msm/qdsp5v2/audio_amrwb.c
@@ -45,7 +45,6 @@
#include <mach/msm_adsp.h>
#include <mach/iommu.h>
#include <mach/iommu_domains.h>
-#include <mach/msm_subsystem_map.h>
#include <mach/qdsp5v2/qdsp5audppmsg.h>
#include <mach/qdsp5v2/qdsp5audplaycmdi.h>
#include <mach/qdsp5v2/qdsp5audplaymsg.h>
@@ -136,8 +135,8 @@
char *data;
int32_t phys; /* physical address of write buffer */
- struct msm_mapped_buffer *map_v_read;
- struct msm_mapped_buffer *map_v_write;
+ void *map_v_read;
+ void *map_v_write;
int mfield; /* meta field embedded in data */
int rflush; /* Read flush */
@@ -1003,12 +1002,10 @@
rc = -ENOMEM;
break;
}
- audio->map_v_read = msm_subsystem_map_buffer(
+ audio->map_v_read = ioremap(
audio->read_phys,
config.buffer_size *
- config.buffer_count,
- MSM_SUBSYSTEM_MAP_KADDR,
- NULL, 0);
+ config.buffer_count);
if (IS_ERR(audio->map_v_read)) {
MM_ERR("Error could not map read"
" phys address\n");
@@ -1018,7 +1015,7 @@
} else {
uint8_t index;
uint32_t offset = 0;
- audio->read_data = audio->map_v_read->vaddr;
+ audio->read_data = audio->map_v_read;
audio->pcm_feedback = 1;
audio->buf_refresh = 0;
audio->pcm_buf_count =
@@ -1401,10 +1398,10 @@
audio->event_abort = 1;
wake_up(&audio->event_wait);
audamrwb_reset_event_queue(audio);
- msm_subsystem_unmap_buffer(audio->map_v_write);
+ iounmap(audio->map_v_write);
free_contiguous_memory_by_paddr(audio->phys);
if (audio->read_data) {
- msm_subsystem_unmap_buffer(audio->map_v_read);
+ iounmap(audio->map_v_read);
free_contiguous_memory_by_paddr(audio->read_phys);
}
mutex_unlock(&audio->lock);
@@ -1591,9 +1588,7 @@
kfree(audio);
goto done;
} else {
- audio->map_v_write = msm_subsystem_map_buffer(
- audio->phys, DMASZ,
- MSM_SUBSYSTEM_MAP_KADDR, NULL, 0);
+ audio->map_v_write = ioremap(audio->phys, DMASZ);
if (IS_ERR(audio->map_v_write)) {
MM_ERR("could not map write phys buffers, freeing \
instance 0x%08x\n", (int)audio);
@@ -1603,7 +1598,7 @@
kfree(audio);
goto done;
}
- audio->data = audio->map_v_write->vaddr;
+ audio->data = audio->map_v_write;
MM_DBG("write buf: phy addr 0x%08x kernel addr 0x%08x\n",
audio->phys, (int)audio->data);
}
@@ -1692,7 +1687,7 @@
event_err:
msm_adsp_put(audio->audplay);
err:
- msm_subsystem_unmap_buffer(audio->map_v_write);
+ iounmap(audio->map_v_write);
free_contiguous_memory_by_paddr(audio->phys);
audpp_adec_free(audio->dec_id);
kfree(audio);
diff --git a/arch/arm/mach-msm/qdsp5v2/audio_evrc.c b/arch/arm/mach-msm/qdsp5v2/audio_evrc.c
index 9b5694d..8818cbd 100644
--- a/arch/arm/mach-msm/qdsp5v2/audio_evrc.c
+++ b/arch/arm/mach-msm/qdsp5v2/audio_evrc.c
@@ -40,7 +40,6 @@
#include <mach/msm_adsp.h>
#include <mach/iommu.h>
#include <mach/iommu_domains.h>
-#include <mach/msm_subsystem_map.h>
#include <mach/qdsp5v2/audio_dev_ctl.h>
#include <mach/qdsp5v2/qdsp5audppmsg.h>
#include <mach/qdsp5v2/qdsp5audplaycmdi.h>
@@ -132,8 +131,8 @@
/* data allocated for various buffers */
char *data;
int32_t phys; /* physical address of write buffer */
- struct msm_mapped_buffer *map_v_read;
- struct msm_mapped_buffer *map_v_write;
+ void *map_v_read;
+ void *map_v_write;
int mfield; /* meta field embedded in data */
int rflush; /* Read flush */
@@ -982,12 +981,10 @@
rc = -ENOMEM;
break;
}
- audio->map_v_read = msm_subsystem_map_buffer(
+ audio->map_v_read = ioremap(
audio->read_phys,
config.buffer_size *
- config.buffer_count,
- MSM_SUBSYSTEM_MAP_KADDR
- , NULL, 0);
+ config.buffer_count);
if (IS_ERR(audio->map_v_read)) {
MM_ERR("failed to map read"
" phy address\n");
@@ -998,7 +995,7 @@
uint8_t index;
uint32_t offset = 0;
audio->read_data =
- audio->map_v_read->vaddr;
+ audio->map_v_read;
audio->buf_refresh = 0;
audio->pcm_buf_count =
config.buffer_count;
@@ -1311,10 +1308,10 @@
audio->event_abort = 1;
wake_up(&audio->event_wait);
audevrc_reset_event_queue(audio);
- msm_subsystem_unmap_buffer(audio->map_v_write);
+ iounmap(audio->map_v_write);
free_contiguous_memory_by_paddr(audio->phys);
if (audio->read_data) {
- msm_subsystem_unmap_buffer(audio->map_v_read);
+ iounmap(audio->map_v_read);
free_contiguous_memory_by_paddr(audio->read_phys);
}
mutex_unlock(&audio->lock);
@@ -1505,9 +1502,7 @@
kfree(audio);
goto done;
} else {
- audio->map_v_write = msm_subsystem_map_buffer(
- audio->phys, DMASZ,
- MSM_SUBSYSTEM_MAP_KADDR, NULL, 0);
+ audio->map_v_write = ioremap(audio->phys, DMASZ);
if (IS_ERR(audio->map_v_write)) {
MM_ERR("failed to map write physical address, freeing \
instance 0x%08x\n", (int)audio);
@@ -1517,7 +1512,7 @@
kfree(audio);
goto done;
}
- audio->data = audio->map_v_write->vaddr;
+ audio->data = audio->map_v_write;
MM_DBG("write buf: phy addr 0x%08x kernel addr 0x%08x\n",
audio->phys, (int)audio->data);
}
@@ -1604,7 +1599,7 @@
event_err:
msm_adsp_put(audio->audplay);
err:
- msm_subsystem_unmap_buffer(audio->map_v_write);
+ iounmap(audio->map_v_write);
free_contiguous_memory_by_paddr(audio->phys);
audpp_adec_free(audio->dec_id);
kfree(audio);
diff --git a/arch/arm/mach-msm/qdsp5v2/audio_evrc_in.c b/arch/arm/mach-msm/qdsp5v2/audio_evrc_in.c
index 50621c9..150e476 100644
--- a/arch/arm/mach-msm/qdsp5v2/audio_evrc_in.c
+++ b/arch/arm/mach-msm/qdsp5v2/audio_evrc_in.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -33,7 +33,6 @@
#include <mach/msm_adsp.h>
#include <mach/iommu.h>
#include <mach/iommu_domains.h>
-#include <mach/msm_subsystem_map.h>
#include <mach/socinfo.h>
#include <mach/qdsp5v2/qdsp5audreccmdi.h>
#include <mach/qdsp5v2/qdsp5audrecmsg.h>
@@ -131,8 +130,8 @@
/* data allocated for various buffers */
char *data;
dma_addr_t phys;
- struct msm_mapped_buffer *map_v_read;
- struct msm_mapped_buffer *map_v_write;
+ void *map_v_read;
+ void *map_v_write;
int opened;
int enabled;
int running;
@@ -1319,12 +1318,12 @@
audio->audrec = NULL;
audio->opened = 0;
if (audio->data) {
- msm_subsystem_unmap_buffer(audio->map_v_read);
+ iounmap(audio->map_v_read);
free_contiguous_memory_by_paddr(audio->phys);
audio->data = NULL;
}
if (audio->out_data) {
- msm_subsystem_unmap_buffer(audio->map_v_write);
+ iounmap(audio->map_v_write);
free_contiguous_memory_by_paddr(audio->out_phys);
audio->out_data = NULL;
}
@@ -1346,17 +1345,14 @@
}
audio->phys = allocate_contiguous_ebi_nomap(DMASZ, SZ_4K);
if (audio->phys) {
- audio->map_v_read = msm_subsystem_map_buffer(
- audio->phys, DMASZ,
- MSM_SUBSYSTEM_MAP_KADDR,
- NULL, 0);
+ audio->map_v_read = ioremap(audio->phys, DMASZ);
if (IS_ERR(audio->map_v_read)) {
MM_ERR("failed to map read physical address\n");
rc = -ENOMEM;
free_contiguous_memory_by_paddr(audio->phys);
goto done;
}
- audio->data = audio->map_v_read->vaddr;
+ audio->data = audio->map_v_read;
} else {
MM_ERR("could not allocate DMA buffers\n");
rc = -ENOMEM;
@@ -1425,17 +1421,14 @@
rc = -ENOMEM;
goto evt_error;
} else {
- audio->map_v_write = msm_subsystem_map_buffer(
- audio->out_phys, BUFFER_SIZE,
- MSM_SUBSYSTEM_MAP_KADDR,
- NULL, 0);
+ audio->map_v_write = ioremap(audio->out_phys, BUFFER_SIZE);
if (IS_ERR(audio->map_v_write)) {
MM_ERR("could map write buffers\n");
rc = -ENOMEM;
free_contiguous_memory_by_paddr(audio->out_phys);
goto evt_error;
}
- audio->out_data = audio->map_v_write->vaddr;
+ audio->out_data = audio->map_v_write;
MM_DBG("write buf: phy addr 0x%08x kernel addr 0x%08x\n",
audio->out_phys, (int)audio->out_data);
}
@@ -1461,7 +1454,7 @@
evrc_in_listener, (void *) audio);
if (rc) {
MM_ERR("failed to register device event listener\n");
- msm_subsystem_unmap_buffer(audio->map_v_write);
+ iounmap(audio->map_v_write);
free_contiguous_memory_by_paddr(audio->out_phys);
goto evt_error;
}
diff --git a/arch/arm/mach-msm/qdsp5v2/audio_mp3.c b/arch/arm/mach-msm/qdsp5v2/audio_mp3.c
index c639833..a4fc3e3 100644
--- a/arch/arm/mach-msm/qdsp5v2/audio_mp3.c
+++ b/arch/arm/mach-msm/qdsp5v2/audio_mp3.c
@@ -37,7 +37,6 @@
#include <mach/iommu.h>
#include <mach/iommu_domains.h>
-#include <mach/msm_subsystem_map.h>
#include <mach/qdsp5v2/audio_dev_ctl.h>
#include <mach/qdsp5v2/qdsp5audppmsg.h>
#include <mach/qdsp5v2/qdsp5audplaycmdi.h>
@@ -198,8 +197,8 @@
/* data allocated for various buffers */
char *data;
int32_t phys; /* physical address of write buffer */
- struct msm_mapped_buffer *map_v_read;
- struct msm_mapped_buffer *map_v_write;
+ void *map_v_read;
+ void *map_v_write;
uint32_t drv_status;
int mfield; /* meta field embedded in data */
@@ -1609,12 +1608,10 @@
rc = -ENOMEM;
break;
}
- audio->map_v_read = msm_subsystem_map_buffer(
+ audio->map_v_read = ioremap(
audio->read_phys,
config.buffer_size *
- config.buffer_count,
- MSM_SUBSYSTEM_MAP_KADDR
- , NULL, 0);
+ config.buffer_count);
if (IS_ERR(audio->map_v_read)) {
MM_ERR("failed to map read buffer"
" physical address\n");
@@ -1625,7 +1622,7 @@
uint8_t index;
uint32_t offset = 0;
audio->read_data =
- audio->map_v_read->vaddr;
+ audio->map_v_read;
audio->buf_refresh = 0;
audio->pcm_buf_count =
config.buffer_count;
@@ -2145,11 +2142,11 @@
wake_up(&audio->event_wait);
audmp3_reset_event_queue(audio);
if (audio->data) {
- msm_subsystem_unmap_buffer(audio->map_v_write);
+ iounmap(audio->map_v_write);
free_contiguous_memory_by_paddr(audio->phys);
}
if (audio->read_data) {
- msm_subsystem_unmap_buffer(audio->map_v_read);
+ iounmap(audio->map_v_read);
free_contiguous_memory_by_paddr(audio->read_phys);
}
mutex_unlock(&audio->lock);
@@ -2353,10 +2350,8 @@
audio->phys = allocate_contiguous_ebi_nomap(pmem_sz,
SZ_4K);
if (audio->phys) {
- audio->map_v_write = msm_subsystem_map_buffer(
- audio->phys, pmem_sz,
- MSM_SUBSYSTEM_MAP_KADDR
- , NULL, 0);
+ audio->map_v_write = ioremap(
+ audio->phys, pmem_sz);
if (IS_ERR(audio->map_v_write)) {
MM_ERR("failed to map write physical"
" address , freeing instance"
@@ -2368,7 +2363,7 @@
kfree(audio);
goto done;
}
- audio->data = audio->map_v_write->vaddr;
+ audio->data = audio->map_v_write;
MM_DBG("write buf: phy addr 0x%08x kernel addr\
0x%08x\n", audio->phys,\
(int)audio->data);
@@ -2485,7 +2480,7 @@
msm_adsp_put(audio->audplay);
err:
if (audio->data) {
- msm_subsystem_unmap_buffer(audio->map_v_write);
+ iounmap(audio->map_v_write);
free_contiguous_memory_by_paddr(audio->phys);
}
audpp_adec_free(audio->dec_id);
diff --git a/arch/arm/mach-msm/qdsp5v2/audio_mvs.c b/arch/arm/mach-msm/qdsp5v2/audio_mvs.c
index dc41bf4..fae2401 100644
--- a/arch/arm/mach-msm/qdsp5v2/audio_mvs.c
+++ b/arch/arm/mach-msm/qdsp5v2/audio_mvs.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -276,7 +276,7 @@
struct audio_mvs_dl_reply {
struct rpc_reply_hdr reply_hdr;
- uint32_t voc_pkt[MVS_MAX_VOC_PKT_SIZE/4];
+ uint32_t voc_pkt[Q5V2_MVS_MAX_VOC_PKT_SIZE/4];
uint32_t valid_frame_info_ptr;
uint32_t frame_mode;
@@ -288,7 +288,7 @@
struct audio_mvs_buf_node {
struct list_head list;
- struct msm_audio_mvs_frame frame;
+ struct q5v2_msm_audio_mvs_frame frame;
};
/* Each buffer is 20 ms, queue holds 200 ms of data. */
@@ -933,10 +933,15 @@
pr_debug("%s: UL AMR frame_type %d\n",
__func__, be32_to_cpu(*args));
- } else if ((frame_mode == MVS_FRAME_MODE_PCM_UL) ||
- (frame_mode == MVS_FRAME_MODE_VOC_TX)) {
- /* PCM and EVRC don't have frame_type */
+ } else if (frame_mode == MVS_FRAME_MODE_PCM_UL) {
+ /* PCM don't have frame_type */
buf_node->frame.frame_type = 0;
+ } else if (frame_mode == MVS_FRAME_MODE_VOC_TX) {
+ /* Extracting EVRC current buffer frame rate*/
+ buf_node->frame.frame_type = be32_to_cpu(*args);
+
+ pr_debug("%s: UL EVRC frame_type %d\n",
+ __func__, be32_to_cpu(*args));
} else if (frame_mode == MVS_FRAME_MODE_G711_UL) {
/* Extract G711 frame type. */
buf_node->frame.frame_type = be32_to_cpu(*args);
@@ -1065,7 +1070,7 @@
cpu_to_be32(AUDIO_MVS_PKT_NORMAL);
} else if (frame_mode == MVS_FRAME_MODE_VOC_RX) {
dl_reply.cdc_param.gnr_arg.param1 =
- cpu_to_be32(audio->rate_type);
+ cpu_to_be32(buf_node->frame.frame_type);
dl_reply.cdc_param.gnr_arg.param2 = 0;
dl_reply.cdc_param.\
gnr_arg.valid_pkt_status_ptr =
@@ -1427,7 +1432,7 @@
if ((audio->state == AUDIO_MVS_STARTED) &&
(!list_empty(&audio->out_queue))) {
- if (count >= sizeof(struct msm_audio_mvs_frame)) {
+ if (count >= sizeof(struct q5v2_msm_audio_mvs_frame)) {
buf_node = list_first_entry(&audio->out_queue,
struct audio_mvs_buf_node,
list);
@@ -1435,7 +1440,8 @@
rc = copy_to_user(buf,
&buf_node->frame,
- sizeof(struct msm_audio_mvs_frame));
+ sizeof(struct q5v2_msm_audio_mvs_frame)
+ );
if (rc == 0) {
rc = buf_node->frame.len +
@@ -1453,7 +1459,7 @@
} else {
pr_err("%s: Read count %d < sizeof(frame) %d",
__func__, count,
- sizeof(struct msm_audio_mvs_frame));
+ sizeof(struct q5v2_msm_audio_mvs_frame));
rc = -ENOMEM;
}
@@ -1491,7 +1497,7 @@
mutex_lock(&audio->in_lock);
if (audio->state == AUDIO_MVS_STARTED) {
- if (count <= sizeof(struct msm_audio_mvs_frame)) {
+ if (count <= sizeof(struct q5v2_msm_audio_mvs_frame)) {
if (!list_empty(&audio->free_in_queue)) {
buf_node =
list_first_entry(&audio->free_in_queue,
@@ -1511,7 +1517,7 @@
} else {
pr_err("%s: Write count %d < sizeof(frame) %d",
__func__, count,
- sizeof(struct msm_audio_mvs_frame));
+ sizeof(struct q5v2_msm_audio_mvs_frame));
rc = -ENOMEM;
}
diff --git a/arch/arm/mach-msm/qdsp5v2/audio_out.c b/arch/arm/mach-msm/qdsp5v2/audio_out.c
index 9a93185..930de03 100644
--- a/arch/arm/mach-msm/qdsp5v2/audio_out.c
+++ b/arch/arm/mach-msm/qdsp5v2/audio_out.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -36,7 +36,6 @@
#include <mach/msm_adsp.h>
#include <mach/iommu.h>
#include <mach/iommu_domains.h>
-#include <mach/msm_subsystem_map.h>
#include <mach/qdsp5v2/qdsp5audppcmdi.h>
#include <mach/qdsp5v2/qdsp5audppmsg.h>
#include <mach/qdsp5v2/audio_dev_ctl.h>
@@ -86,7 +85,7 @@
/* data allocated for various buffers */
char *data;
dma_addr_t phys;
- struct msm_mapped_buffer *map_v_write;
+ void *map_v_write;
int teos; /* valid only if tunnel mode & no data left for decoder */
int opened;
int enabled;
@@ -704,16 +703,13 @@
{
the_audio.phys = allocate_contiguous_ebi_nomap(DMASZ, SZ_4K);
if (the_audio.phys) {
- the_audio.map_v_write = msm_subsystem_map_buffer(
- the_audio.phys, DMASZ,
- MSM_SUBSYSTEM_MAP_KADDR,
- NULL, 0);
+ the_audio.map_v_write = ioremap(the_audio.phys, DMASZ);
if (IS_ERR(the_audio.map_v_write)) {
MM_ERR("could not map physical buffers\n");
free_contiguous_memory_by_paddr(the_audio.phys);
return -ENOMEM;
}
- the_audio.data = the_audio.map_v_write->vaddr;
+ the_audio.data = the_audio.map_v_write;
} else {
MM_ERR("could not allocate physical buffers\n");
return -ENOMEM;
diff --git a/arch/arm/mach-msm/qdsp5v2/audio_pcm.c b/arch/arm/mach-msm/qdsp5v2/audio_pcm.c
index b22820b..613ee57 100644
--- a/arch/arm/mach-msm/qdsp5v2/audio_pcm.c
+++ b/arch/arm/mach-msm/qdsp5v2/audio_pcm.c
@@ -40,7 +40,6 @@
#include <mach/iommu.h>
#include <mach/iommu_domains.h>
-#include <mach/msm_subsystem_map.h>
#include <mach/qdsp5v2/qdsp5audppcmdi.h>
#include <mach/qdsp5v2/qdsp5audppmsg.h>
#include <mach/qdsp5v2/qdsp5audplaycmdi.h>
@@ -172,7 +171,7 @@
/* data allocated for various buffers */
char *data;
int32_t phys;
- struct msm_mapped_buffer *map_v_write;
+ void *map_v_write;
uint32_t drv_status;
int wflush; /* Write flush */
int opened;
@@ -1382,7 +1381,7 @@
wake_up(&audio->event_wait);
audpcm_reset_event_queue(audio);
if (audio->data) {
- msm_subsystem_unmap_buffer(audio->map_v_write);
+ iounmap(audio->map_v_write);
free_contiguous_memory_by_paddr(audio->phys);
}
mutex_unlock(&audio->lock);
@@ -1560,10 +1559,8 @@
audio->phys = allocate_contiguous_ebi_nomap(pmem_sz,
SZ_4K);
if (audio->phys) {
- audio->map_v_write = msm_subsystem_map_buffer(
- audio->phys, pmem_sz,
- MSM_SUBSYSTEM_MAP_KADDR
- , NULL, 0);
+ audio->map_v_write = ioremap(
+ audio->phys, pmem_sz);
if (IS_ERR(audio->map_v_write)) {
MM_ERR("could not map write phys\
address freeing instance \
@@ -1575,7 +1572,7 @@
kfree(audio);
goto done;
}
- audio->data = audio->map_v_write->vaddr;
+ audio->data = audio->map_v_write;
MM_DBG("write buf: phy addr 0x%08x \
kernel addr 0x%08x\n",
audio->phys, (int)audio->data);
@@ -1679,7 +1676,7 @@
msm_adsp_put(audio->audplay);
err:
if (audio->data) {
- msm_subsystem_unmap_buffer(audio->map_v_write);
+ iounmap(audio->map_v_write);
free_contiguous_memory_by_paddr(audio->phys);
}
audpp_adec_free(audio->dec_id);
diff --git a/arch/arm/mach-msm/qdsp5v2/audio_pcm_in.c b/arch/arm/mach-msm/qdsp5v2/audio_pcm_in.c
index a5a9bd2..ce67ebb 100644
--- a/arch/arm/mach-msm/qdsp5v2/audio_pcm_in.c
+++ b/arch/arm/mach-msm/qdsp5v2/audio_pcm_in.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -33,7 +33,6 @@
#include <mach/iommu.h>
#include <mach/iommu_domains.h>
-#include <mach/msm_subsystem_map.h>
#include <mach/msm_adsp.h>
#include <mach/socinfo.h>
@@ -113,7 +112,7 @@
/* data allocated for various buffers */
char *data;
dma_addr_t phys;
- struct msm_mapped_buffer *map_v_read;
+ void *map_v_read;
int opened;
int enabled;
@@ -843,7 +842,7 @@
audio->audrec = NULL;
audio->opened = 0;
if (audio->data) {
- msm_subsystem_unmap_buffer(audio->map_v_read);
+ iounmap(audio->map_v_read);
free_contiguous_memory_by_paddr(audio->phys);
audio->data = NULL;
}
@@ -864,16 +863,14 @@
}
audio->phys = allocate_contiguous_ebi_nomap(DMASZ, SZ_4K);
if (audio->phys) {
- audio->map_v_read = msm_subsystem_map_buffer(
- audio->phys, DMASZ,
- MSM_SUBSYSTEM_MAP_KADDR, NULL, 0);
+ audio->map_v_read = ioremap(audio->phys, DMASZ);
if (IS_ERR(audio->map_v_read)) {
MM_ERR("could not map read phys buffers\n");
rc = -ENOMEM;
free_contiguous_memory_by_paddr(audio->phys);
goto done;
}
- audio->data = audio->map_v_read->vaddr;
+ audio->data = audio->map_v_read;
} else {
MM_ERR("could not allocate read buffers\n");
rc = -ENOMEM;
diff --git a/arch/arm/mach-msm/qdsp5v2/audio_qcelp.c b/arch/arm/mach-msm/qdsp5v2/audio_qcelp.c
index ce5d421..c4851d9 100644
--- a/arch/arm/mach-msm/qdsp5v2/audio_qcelp.c
+++ b/arch/arm/mach-msm/qdsp5v2/audio_qcelp.c
@@ -41,7 +41,6 @@
#include <mach/msm_adsp.h>
#include <mach/iommu.h>
#include <mach/iommu_domains.h>
-#include <mach/msm_subsystem_map.h>
#include <mach/qdsp5v2/qdsp5audppmsg.h>
#include <mach/qdsp5v2/qdsp5audplaycmdi.h>
#include <mach/qdsp5v2/qdsp5audplaymsg.h>
@@ -128,8 +127,8 @@
/* data allocated for various buffers */
char *data;
int32_t phys; /* physical address of write buffer */
- struct msm_mapped_buffer *map_v_read;
- struct msm_mapped_buffer *map_v_write;
+ void *map_v_read;
+ void *map_v_write;
int mfield; /* meta field embedded in data */
int rflush; /* Read flush */
@@ -984,12 +983,10 @@
rc = -ENOMEM;
break;
}
- audio->map_v_read = msm_subsystem_map_buffer(
+ audio->map_v_read = ioremap(
audio->read_phys,
config.buffer_size *
- config.buffer_count,
- MSM_SUBSYSTEM_MAP_KADDR
- , NULL, 0);
+ config.buffer_count);
if (IS_ERR(audio->map_v_read)) {
MM_ERR("failed to map read buf\n");
rc = -ENOMEM;
@@ -999,7 +996,7 @@
uint8_t index;
uint32_t offset = 0;
audio->read_data =
- audio->map_v_read->vaddr;
+ audio->map_v_read;
audio->buf_refresh = 0;
audio->pcm_buf_count =
config.buffer_count;
@@ -1313,10 +1310,10 @@
audio->event_abort = 1;
wake_up(&audio->event_wait);
audqcelp_reset_event_queue(audio);
- msm_subsystem_unmap_buffer(audio->map_v_write);
+ iounmap(audio->map_v_write);
free_contiguous_memory_by_paddr(audio->phys);
if (audio->read_data) {
- msm_subsystem_unmap_buffer(audio->map_v_read);
+ iounmap(audio->map_v_read);
free_contiguous_memory_by_paddr(audio->read_phys);
}
mutex_unlock(&audio->lock);
@@ -1505,9 +1502,7 @@
kfree(audio);
goto done;
} else {
- audio->map_v_write = msm_subsystem_map_buffer(
- audio->phys, DMASZ,
- MSM_SUBSYSTEM_MAP_KADDR, NULL, 0);
+ audio->map_v_write = ioremap(audio->phys, DMASZ);
if (IS_ERR(audio->map_v_write)) {
MM_ERR("could not map write phys address, freeing \
instance 0x%08x\n", (int)audio);
@@ -1517,7 +1512,7 @@
kfree(audio);
goto done;
}
- audio->data = audio->map_v_write->vaddr;
+ audio->data = audio->map_v_write;
MM_DBG("write buf: phy addr 0x%08x kernel addr 0x%08x\n",
audio->phys, (int)audio->data);
}
@@ -1604,7 +1599,7 @@
event_err:
msm_adsp_put(audio->audplay);
err:
- msm_subsystem_unmap_buffer(audio->map_v_write);
+ iounmap(audio->map_v_write);
free_contiguous_memory_by_paddr(audio->phys);
audpp_adec_free(audio->dec_id);
kfree(audio);
diff --git a/arch/arm/mach-msm/qdsp5v2/audio_qcelp_in.c b/arch/arm/mach-msm/qdsp5v2/audio_qcelp_in.c
index d34499d..7041bde 100644
--- a/arch/arm/mach-msm/qdsp5v2/audio_qcelp_in.c
+++ b/arch/arm/mach-msm/qdsp5v2/audio_qcelp_in.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -33,7 +33,6 @@
#include <mach/msm_adsp.h>
#include <mach/iommu.h>
#include <mach/iommu_domains.h>
-#include <mach/msm_subsystem_map.h>
#include <mach/socinfo.h>
#include <mach/qdsp5v2/qdsp5audreccmdi.h>
#include <mach/qdsp5v2/qdsp5audrecmsg.h>
@@ -133,8 +132,8 @@
/* data allocated for various buffers */
char *data;
dma_addr_t phys;
- struct msm_mapped_buffer *map_v_read;
- struct msm_mapped_buffer *map_v_write;
+ void *map_v_read;
+ void *map_v_write;
int opened;
int enabled;
@@ -1325,12 +1324,12 @@
audio->audrec = NULL;
audio->opened = 0;
if (audio->data) {
- msm_subsystem_unmap_buffer(audio->map_v_read);
+ iounmap(audio->map_v_read);
free_contiguous_memory_by_paddr(audio->phys);
audio->data = NULL;
}
if (audio->out_data) {
- msm_subsystem_unmap_buffer(audio->map_v_write);
+ iounmap(audio->map_v_write);
free_contiguous_memory_by_paddr(audio->out_phys);
audio->out_data = NULL;
}
@@ -1352,16 +1351,14 @@
}
audio->phys = allocate_contiguous_ebi_nomap(DMASZ, SZ_4K);
if (audio->phys) {
- audio->map_v_read = msm_subsystem_map_buffer(
- audio->phys, DMASZ,
- MSM_SUBSYSTEM_MAP_KADDR, NULL, 0);
+ audio->map_v_read = ioremap(audio->phys, DMASZ);
if (IS_ERR(audio->map_v_read)) {
MM_ERR("could not map DMA buffers\n");
rc = -ENOMEM;
free_contiguous_memory_by_paddr(audio->phys);
goto done;
}
- audio->data = audio->map_v_read->vaddr;
+ audio->data = audio->map_v_read;
} else {
MM_ERR("could not allocate DMA buffers\n");
rc = -ENOMEM;
@@ -1431,16 +1428,14 @@
rc = -ENOMEM;
goto evt_error;
} else {
- audio->map_v_write = msm_subsystem_map_buffer(
- audio->out_phys, BUFFER_SIZE,
- MSM_SUBSYSTEM_MAP_KADDR, NULL, 0);
+ audio->map_v_write = ioremap(audio->out_phys, BUFFER_SIZE);
if (IS_ERR(audio->map_v_write)) {
MM_ERR("could not map write buffers\n");
rc = -ENOMEM;
free_contiguous_memory_by_paddr(audio->out_phys);
goto evt_error;
}
- audio->out_data = audio->map_v_write->vaddr;
+ audio->out_data = audio->map_v_write;
MM_DBG("write buf: phy addr 0x%08x kernel addr 0x%08x\n",
audio->out_phys, (int)audio->out_data);
}
@@ -1466,7 +1461,7 @@
qcelp_in_listener, (void *) audio);
if (rc) {
MM_ERR("failed to register device event listener\n");
- msm_subsystem_unmap_buffer(audio->map_v_write);
+ iounmap(audio->map_v_write);
free_contiguous_memory_by_paddr(audio->out_phys);
goto evt_error;
}
diff --git a/arch/arm/mach-msm/qdsp5v2/audio_wma.c b/arch/arm/mach-msm/qdsp5v2/audio_wma.c
index f29b078..79439e1 100644
--- a/arch/arm/mach-msm/qdsp5v2/audio_wma.c
+++ b/arch/arm/mach-msm/qdsp5v2/audio_wma.c
@@ -45,7 +45,6 @@
#include <mach/msm_adsp.h>
#include <mach/iommu.h>
#include <mach/iommu_domains.h>
-#include <mach/msm_subsystem_map.h>
#include <mach/qdsp5v2/qdsp5audppmsg.h>
#include <mach/qdsp5v2/qdsp5audplaycmdi.h>
#include <mach/qdsp5v2/qdsp5audplaymsg.h>
@@ -141,8 +140,8 @@
/* data allocated for various buffers */
char *data;
int32_t phys; /* physical address of write buffer */
- struct msm_mapped_buffer *map_v_read;
- struct msm_mapped_buffer *map_v_write;
+ void *map_v_read;
+ void *map_v_write;
int mfield; /* meta field embedded in data */
int rflush; /* Read flush */
@@ -1062,12 +1061,10 @@
rc = -ENOMEM;
break;
}
- audio->map_v_read = msm_subsystem_map_buffer(
+ audio->map_v_read = ioremap(
audio->read_phys,
config.buffer_size *
- config.buffer_count,
- MSM_SUBSYSTEM_MAP_KADDR
- , NULL, 0);
+ config.buffer_count);
if (IS_ERR(audio->map_v_read)) {
MM_ERR("read buf alloc fail\n");
rc = -ENOMEM;
@@ -1077,7 +1074,7 @@
uint8_t index;
uint32_t offset = 0;
audio->read_data =
- audio->map_v_read->vaddr;
+ audio->map_v_read;
audio->buf_refresh = 0;
audio->pcm_buf_count =
config.buffer_count;
@@ -1458,10 +1455,10 @@
audio->event_abort = 1;
wake_up(&audio->event_wait);
audwma_reset_event_queue(audio);
- msm_subsystem_unmap_buffer(audio->map_v_write);
+ iounmap(audio->map_v_write);
free_contiguous_memory_by_paddr(audio->phys);
if (audio->read_data) {
- msm_subsystem_unmap_buffer(audio->map_v_read);
+ iounmap(audio->map_v_read);
free_contiguous_memory_by_paddr(audio->read_phys);
}
mutex_unlock(&audio->lock);
@@ -1652,10 +1649,7 @@
MM_DBG("pmemsz = %d\n", pmem_sz);
audio->phys = allocate_contiguous_ebi_nomap(pmem_sz, SZ_4K);
if (audio->phys) {
- audio->map_v_write = msm_subsystem_map_buffer(
- audio->phys, pmem_sz,
- MSM_SUBSYSTEM_MAP_KADDR,
- NULL, 0);
+ audio->map_v_write = ioremap(audio->phys, pmem_sz);
if (IS_ERR(audio->map_v_write)) {
MM_ERR("could not allocate write buffers, \
freeing instance 0x%08x\n",
@@ -1666,7 +1660,7 @@
kfree(audio);
goto done;
}
- audio->data = audio->map_v_write->vaddr;
+ audio->data = audio->map_v_write;
MM_DBG("write buf: phy addr 0x%08x kernel addr \
0x%08x\n", audio->phys, (int)audio->data);
break;
@@ -1772,7 +1766,7 @@
event_err:
msm_adsp_put(audio->audplay);
err:
- msm_subsystem_unmap_buffer(audio->map_v_write);
+ iounmap(audio->map_v_write);
free_contiguous_memory_by_paddr(audio->phys);
audpp_adec_free(audio->dec_id);
kfree(audio);
diff --git a/arch/arm/mach-msm/qdsp5v2/audio_wmapro.c b/arch/arm/mach-msm/qdsp5v2/audio_wmapro.c
index cf25359..6672ca0 100644
--- a/arch/arm/mach-msm/qdsp5v2/audio_wmapro.c
+++ b/arch/arm/mach-msm/qdsp5v2/audio_wmapro.c
@@ -44,7 +44,6 @@
#include <mach/msm_adsp.h>
#include <mach/iommu.h>
#include <mach/iommu_domains.h>
-#include <mach/msm_subsystem_map.h>
#include <mach/qdsp5v2/qdsp5audppmsg.h>
#include <mach/qdsp5v2/qdsp5audplaycmdi.h>
#include <mach/qdsp5v2/qdsp5audplaymsg.h>
@@ -141,8 +140,8 @@
/* data allocated for various buffers */
char *data;
int32_t phys; /* physical address of write buffer */
- struct msm_mapped_buffer *map_v_read;
- struct msm_mapped_buffer *map_v_write;
+ void *map_v_read;
+ void *map_v_write;
int mfield; /* meta field embedded in data */
int rflush; /* Read flush */
@@ -1074,12 +1073,10 @@
rc = -ENOMEM;
break;
}
- audio->map_v_read = msm_subsystem_map_buffer(
+ audio->map_v_read = ioremap(
audio->read_phys,
config.buffer_size *
- config.buffer_count,
- MSM_SUBSYSTEM_MAP_KADDR
- , NULL, 0);
+ config.buffer_count);
if (IS_ERR(audio->map_v_read)) {
MM_ERR("read buf map fail\n");
rc = -ENOMEM;
@@ -1089,7 +1086,7 @@
uint8_t index;
uint32_t offset = 0;
audio->read_data =
- audio->map_v_read->vaddr;
+ audio->map_v_read;
audio->pcm_feedback = 1;
audio->buf_refresh = 0;
audio->pcm_buf_count =
@@ -1471,10 +1468,10 @@
audio->event_abort = 1;
wake_up(&audio->event_wait);
audwmapro_reset_event_queue(audio);
- msm_subsystem_unmap_buffer(audio->map_v_write);
+ iounmap(audio->map_v_write);
free_contiguous_memory_by_paddr(audio->phys);
if (audio->read_data) {
- msm_subsystem_unmap_buffer(audio->map_v_read);
+ iounmap(audio->map_v_read);
free_contiguous_memory_by_paddr(audio->read_phys);
}
mutex_unlock(&audio->lock);
@@ -1665,10 +1662,7 @@
MM_DBG("pmemsz = %d\n", pmem_sz);
audio->phys = allocate_contiguous_ebi_nomap(pmem_sz, SZ_4K);
if (audio->phys) {
- audio->map_v_write = msm_subsystem_map_buffer(
- audio->phys, pmem_sz,
- MSM_SUBSYSTEM_MAP_KADDR,
- NULL, 0);
+ audio->map_v_write = ioremap(audio->phys, pmem_sz);
if (IS_ERR(audio->map_v_write)) {
MM_ERR("could not map write buffers, \
freeing instance 0x%08x\n",
@@ -1679,7 +1673,7 @@
kfree(audio);
goto done;
}
- audio->data = audio->map_v_write->vaddr;
+ audio->data = audio->map_v_write;
MM_DBG("write buf: phy addr 0x%08x kernel addr \
0x%08x\n", audio->phys, (int)audio->data);
break;
@@ -1790,7 +1784,7 @@
event_err:
msm_adsp_put(audio->audplay);
err:
- msm_subsystem_unmap_buffer(audio->map_v_write);
+ iounmap(audio->map_v_write);
free_contiguous_memory_by_paddr(audio->phys);
audpp_adec_free(audio->dec_id);
kfree(audio);
diff --git a/arch/arm/mach-msm/qdsp6v2/dsp_debug.c b/arch/arm/mach-msm/qdsp6v2/dsp_debug.c
index 1d8195e..1fa0876 100644
--- a/arch/arm/mach-msm/qdsp6v2/dsp_debug.c
+++ b/arch/arm/mach-msm/qdsp6v2/dsp_debug.c
@@ -27,7 +27,6 @@
#include "../proc_comm.h"
#include <mach/debug_mm.h>
-#include <mach/msm_subsystem_map.h>
#include <mach/qdsp6v2/dsp_debug.h>
static wait_queue_head_t dsp_wait;
@@ -72,8 +71,7 @@
{
char cmd[32];
void __iomem *ptr;
- unsigned int flags = MSM_SUBSYSTEM_MAP_KADDR | MSM_SUBSYSTEM_MAP_CACHED;
- struct msm_mapped_buffer *mem_buffer;
+ void *mem_buffer;
if (count >= sizeof(cmd))
return -EINVAL;
@@ -96,21 +94,19 @@
}
}
/* assert DSP NMI */
- mem_buffer = msm_subsystem_map_buffer(DSP_NMI_ADDR, 0x16, flags,
- NULL, 0);
+ mem_buffer = ioremap(DSP_NMI_ADDR, 0x16);
if (IS_ERR((void *)mem_buffer)) {
pr_err("%s:map_buffer failed, error = %ld\n", __func__,
PTR_ERR((void *)mem_buffer));
return -ENOMEM;
}
- ptr = mem_buffer->vaddr;
+ ptr = mem_buffer;
if (!ptr) {
pr_err("Unable to map DSP NMI\n");
return -EFAULT;
}
writel(0x1, (void *)ptr);
- if (msm_subsystem_unmap_buffer(mem_buffer) < 0)
- pr_err("%s:unmap buffer failed\n", __func__);
+ iounmap(mem_buffer);
} else if (!strcmp(cmd, "boom")) {
q6audio_dsp_not_responding();
} else if (!strcmp(cmd, "continue-crash")) {
@@ -135,8 +131,7 @@
size_t mapsize = PAGE_SIZE;
unsigned addr;
void __iomem *ptr;
- unsigned int flags = MSM_SUBSYSTEM_MAP_KADDR | MSM_SUBSYSTEM_MAP_CACHED;
- struct msm_mapped_buffer *mem_buffer;
+ void *mem_buffer;
if ((dsp_ram_base == 0) || (dsp_ram_size == 0)) {
pr_err("[%s:%s] Memory Invalid or not initialized, Base = 0x%x,"
@@ -158,29 +153,26 @@
mapsize *= 2;
while (count >= PAGE_SIZE) {
- mem_buffer = msm_subsystem_map_buffer(addr, mapsize, flags,
- NULL, 0);
+ mem_buffer = ioremap(addr, mapsize);
if (IS_ERR((void *)mem_buffer)) {
pr_err("%s:map_buffer failed, error = %ld\n",
__func__, PTR_ERR((void *)mem_buffer));
return -ENOMEM;
}
- ptr = mem_buffer->vaddr;
+ ptr = mem_buffer;
if (!ptr) {
pr_err("[%s:%s] map error @ %x\n", __MM_FILE__,
__func__, addr);
return -EFAULT;
}
if (copy_to_user(buf, ptr, PAGE_SIZE)) {
- if (msm_subsystem_unmap_buffer(mem_buffer) < 0)
- pr_err("%s: unmap buffer failed\n", __func__);
+ iounmap(mem_buffer);
pr_err("[%s:%s] copy error @ %p\n", __MM_FILE__,
__func__, buf);
return -EFAULT;
}
copy_ok_count += PAGE_SIZE;
- if (msm_subsystem_unmap_buffer(mem_buffer) < 0)
- pr_err("%s: unmap buffer failed\n", __func__);
+ iounmap(mem_buffer);
addr += PAGE_SIZE;
buf += PAGE_SIZE;
actual += PAGE_SIZE;
diff --git a/arch/arm/mach-msm/qdsp6v2/snddev_ecodec.c b/arch/arm/mach-msm/qdsp6v2/snddev_ecodec.c
index eb394a3..f75af16 100644
--- a/arch/arm/mach-msm/qdsp6v2/snddev_ecodec.c
+++ b/arch/arm/mach-msm/qdsp6v2/snddev_ecodec.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -344,7 +344,7 @@
mutex_init(&drv->dev_lock);
drv->ref_cnt = 0;
- drv->ecodec_clk = clk_get(NULL, "pcm_clk");
+ drv->ecodec_clk = clk_get_sys(NULL, "pcm_clk");
if (IS_ERR(drv->ecodec_clk)) {
pr_err("%s: could not get pcm_clk\n", __func__);
return PTR_ERR(drv->ecodec_clk);
diff --git a/arch/arm/mach-msm/qdsp6v2/snddev_icodec.c b/arch/arm/mach-msm/qdsp6v2/snddev_icodec.c
index 216d982..ea935cc 100644
--- a/arch/arm/mach-msm/qdsp6v2/snddev_icodec.c
+++ b/arch/arm/mach-msm/qdsp6v2/snddev_icodec.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -308,7 +308,7 @@
}
msm_snddev_rx_mclk_request();
- drv->rx_osrclk = clk_get(0, "i2s_spkr_osr_clk");
+ drv->rx_osrclk = clk_get_sys(NULL, "i2s_spkr_osr_clk");
if (IS_ERR(drv->rx_osrclk))
pr_err("%s master clock Error\n", __func__);
@@ -320,7 +320,7 @@
}
clk_enable(drv->rx_osrclk);
- drv->rx_bitclk = clk_get(0, "i2s_spkr_bit_clk");
+ drv->rx_bitclk = clk_get_sys(NULL, "i2s_spkr_bit_clk");
if (IS_ERR(drv->rx_bitclk))
pr_err("%s clock Error\n", __func__);
@@ -437,7 +437,7 @@
msm_snddev_tx_mclk_request();
- drv->tx_osrclk = clk_get(0, "i2s_mic_osr_clk");
+ drv->tx_osrclk = clk_get_sys(NULL, "i2s_mic_osr_clk");
if (IS_ERR(drv->tx_osrclk))
pr_err("%s master clock Error\n", __func__);
@@ -449,7 +449,7 @@
}
clk_enable(drv->tx_osrclk);
- drv->tx_bitclk = clk_get(0, "i2s_mic_bit_clk");
+ drv->tx_bitclk = clk_get_sys(NULL, "i2s_mic_bit_clk");
if (IS_ERR(drv->tx_bitclk))
pr_err("%s clock Error\n", __func__);
diff --git a/arch/arm/mach-msm/qdsp6v2/snddev_mi2s.c b/arch/arm/mach-msm/qdsp6v2/snddev_mi2s.c
index a99b600..75a7411 100644
--- a/arch/arm/mach-msm/qdsp6v2/snddev_mi2s.c
+++ b/arch/arm/mach-msm/qdsp6v2/snddev_mi2s.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -184,7 +184,7 @@
}
/* set up osr clk */
- drv->tx_osrclk = clk_get(0, "mi2s_osr_clk");
+ drv->tx_osrclk = clk_get_sys(NULL, "mi2s_osr_clk");
if (IS_ERR(drv->tx_osrclk))
pr_err("%s master clock Error\n", __func__);
@@ -197,7 +197,7 @@
clk_enable(drv->tx_osrclk);
/* set up bit clk */
- drv->tx_bitclk = clk_get(0, "mi2s_bit_clk");
+ drv->tx_bitclk = clk_get_sys(NULL, "mi2s_bit_clk");
if (IS_ERR(drv->tx_bitclk))
pr_err("%s clock Error\n", __func__);
diff --git a/arch/arm/mach-msm/restart.c b/arch/arm/mach-msm/restart.c
index 5382102..e45e2c4 100644
--- a/arch/arm/mach-msm/restart.c
+++ b/arch/arm/mach-msm/restart.c
@@ -232,21 +232,10 @@
printk(KERN_ERR "Restarting has failed\n");
}
-static int __init msm_restart_init(void)
+static int __init msm_pmic_restart_init(void)
{
int rc;
-#ifdef CONFIG_MSM_DLOAD_MODE
- atomic_notifier_chain_register(&panic_notifier_list, &panic_blk);
- dload_mode_addr = MSM_IMEM_BASE + DLOAD_MODE_ADDR;
-
- /* Reset detection is switched on below.*/
- set_dload_mode(download_mode);
-#endif
- msm_tmr0_base = msm_timer_get_timer0_base();
- restart_reason = MSM_IMEM_BASE + RESTART_REASON_ADDR;
- pm_power_off = msm_power_off;
-
if (pmic_reset_irq != 0) {
rc = request_any_context_irq(pmic_reset_irq,
resout_irq_handler, IRQF_TRIGGER_HIGH,
@@ -260,4 +249,19 @@
return 0;
}
-late_initcall(msm_restart_init);
+late_initcall(msm_pmic_restart_init);
+
+static int __init msm_restart_init(void)
+{
+#ifdef CONFIG_MSM_DLOAD_MODE
+ atomic_notifier_chain_register(&panic_notifier_list, &panic_blk);
+ dload_mode_addr = MSM_IMEM_BASE + DLOAD_MODE_ADDR;
+ set_dload_mode(download_mode);
+#endif
+ msm_tmr0_base = msm_timer_get_timer0_base();
+ restart_reason = MSM_IMEM_BASE + RESTART_REASON_ADDR;
+ pm_power_off = msm_power_off;
+
+ return 0;
+}
+early_initcall(msm_restart_init);
diff --git a/arch/arm/mach-msm/rpc_pmapp.c b/arch/arm/mach-msm/rpc_pmapp.c
index 811e63c..1d18553 100644
--- a/arch/arm/mach-msm/rpc_pmapp.c
+++ b/arch/arm/mach-msm/rpc_pmapp.c
@@ -548,7 +548,7 @@
int pmapp_disp_backlight_set_brightness(int value)
{
- if (value < 0 || value > 100)
+ if (value < 0 || value > 255)
return -EINVAL;
return pmapp_rpc_set_only(value, 0, 0, 0, 1,
diff --git a/arch/arm/mach-msm/rpm-notifier.h b/arch/arm/mach-msm/rpm-notifier.h
new file mode 100644
index 0000000..df8d9b3
--- /dev/null
+++ b/arch/arm/mach-msm/rpm-notifier.h
@@ -0,0 +1,27 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ARCH_ARM_MACH_MSM_RPM_NOTIF_H
+#define __ARCH_ARM_MACH_MSM_RPM_NOTIF_H
+
+struct msm_rpm_notifier_data {
+ uint32_t rsc_type;
+ uint32_t rsc_id;
+ uint32_t key;
+ uint32_t size;
+ uint8_t *value;
+};
+
+int msm_rpm_register_notifier(struct notifier_block *nb);
+int msm_rpm_unregister_notifier(struct notifier_block *nb);
+
+#endif /*__ARCH_ARM_MACH_MSM_RPM_NOTIF_H */
diff --git a/arch/arm/mach-msm/rpm-regulator-smd.c b/arch/arm/mach-msm/rpm-regulator-smd.c
new file mode 100644
index 0000000..b892d05
--- /dev/null
+++ b/arch/arm/mach-msm/rpm-regulator-smd.c
@@ -0,0 +1,1430 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <mach/rpm-smd.h>
+#include <mach/rpm-regulator-smd.h>
+#include <mach/socinfo.h>
+
+/* Debug Definitions */
+
+enum {
+ RPM_VREG_DEBUG_REQUEST = BIT(0),
+ RPM_VREG_DEBUG_FULL_REQUEST = BIT(1),
+ RPM_VREG_DEBUG_DUPLICATE = BIT(2),
+};
+
+static int rpm_vreg_debug_mask;
+module_param_named(
+ debug_mask, rpm_vreg_debug_mask, int, S_IRUSR | S_IWUSR
+);
+
+#define vreg_err(req, fmt, ...) \
+ pr_err("%s: " fmt, req->rdesc.name, ##__VA_ARGS__)
+
+/* RPM regulator request types */
+enum rpm_regulator_smd_type {
+ RPM_REGULATOR_SMD_TYPE_LDO,
+ RPM_REGULATOR_SMD_TYPE_SMPS,
+ RPM_REGULATOR_SMD_TYPE_VS,
+ RPM_REGULATOR_SMD_TYPE_NCP,
+ RPM_REGULATOR_SMD_TYPE_MAX,
+};
+
+/* RPM resource parameters */
+enum rpm_regulator_param_index {
+ RPM_REGULATOR_PARAM_ENABLE,
+ RPM_REGULATOR_PARAM_VOLTAGE,
+ RPM_REGULATOR_PARAM_CURRENT,
+ RPM_REGULATOR_PARAM_MODE_LDO,
+ RPM_REGULATOR_PARAM_MODE_SMPS,
+ RPM_REGULATOR_PARAM_PIN_CTRL_ENABLE,
+ RPM_REGULATOR_PARAM_PIN_CTRL_MODE,
+ RPM_REGULATOR_PARAM_FREQUENCY,
+ RPM_REGULATOR_PARAM_HEAD_ROOM,
+ RPM_REGULATOR_PARAM_QUIET_MODE,
+ RPM_REGULATOR_PARAM_FREQ_REASON,
+ RPM_REGULATOR_PARAM_MAX,
+};
+
+#define RPM_SET_CONFIG_ACTIVE BIT(0)
+#define RPM_SET_CONFIG_SLEEP BIT(1)
+#define RPM_SET_CONFIG_BOTH (RPM_SET_CONFIG_ACTIVE \
+ | RPM_SET_CONFIG_SLEEP)
+struct rpm_regulator_param {
+ char *name;
+ char *property_name;
+ u32 key;
+ u32 min;
+ u32 max;
+ u32 supported_regulator_types;
+};
+
+#define PARAM(_idx, _support_ldo, _support_smps, _support_vs, _support_ncp, \
+ _name, _min, _max, _property_name) \
+ [RPM_REGULATOR_PARAM_##_idx] = { \
+ .name = _name, \
+ .property_name = _property_name, \
+ .min = _min, \
+ .max = _max, \
+ .supported_regulator_types = \
+ _support_ldo << RPM_REGULATOR_SMD_TYPE_LDO | \
+ _support_smps << RPM_REGULATOR_SMD_TYPE_SMPS | \
+ _support_vs << RPM_REGULATOR_SMD_TYPE_VS | \
+ _support_ncp << RPM_REGULATOR_SMD_TYPE_NCP, \
+ }
+
+static struct rpm_regulator_param params[RPM_REGULATOR_PARAM_MAX] = {
+ /* ID LDO SMPS VS NCP name min max property-name */
+ PARAM(ENABLE, 1, 1, 1, 1, "swen", 0, 1, "qcom,init-enable"),
+ PARAM(VOLTAGE, 1, 1, 0, 1, "uv", 0, 0x7FFFFFF, "qcom,init-voltage"),
+ PARAM(CURRENT, 1, 1, 0, 0, "ma", 0, 0x1FFF, "qcom,init-current"),
+ PARAM(MODE_LDO, 1, 0, 0, 0, "lsmd", 0, 1, "qcom,init-ldo-mode"),
+ PARAM(MODE_SMPS, 0, 1, 0, 0, "ssmd", 0, 2, "qcom,init-smps-mode"),
+ PARAM(PIN_CTRL_ENABLE, 1, 1, 1, 0, "pcen", 0, 0xF, "qcom,init-pin-ctrl-enable"),
+ PARAM(PIN_CTRL_MODE, 1, 1, 1, 0, "pcmd", 0, 0x1F, "qcom,init-pin-ctrl-mode"),
+ PARAM(FREQUENCY, 0, 1, 0, 1, "freq", 0, 16, "qcom,init-frequency"),
+ PARAM(HEAD_ROOM, 1, 0, 0, 1, "hr", 0, 0x7FFFFFFF, "qcom,init-head-room"),
+ PARAM(QUIET_MODE, 0, 1, 0, 0, "qm", 0, 2, "qcom,init-quiet-mode"),
+ PARAM(FREQ_REASON, 0, 1, 0, 1, "resn", 0, 8, "qcom,init-freq-reason"),
+};
+
+struct rpm_vreg_request {
+ u32 param[RPM_REGULATOR_PARAM_MAX];
+ u32 valid;
+ u32 modified;
+};
+
+struct rpm_vreg {
+ struct rpm_vreg_request aggr_req_active;
+ struct rpm_vreg_request aggr_req_sleep;
+ struct list_head reg_list;
+ const char *resource_name;
+ u32 resource_id;
+ bool allow_atomic;
+ int regulator_type;
+ int hpm_min_load;
+ int enable_time;
+ struct spinlock slock;
+ struct mutex mlock;
+ unsigned long flags;
+ bool sleep_request_sent;
+ struct msm_rpm_request *handle_active;
+ struct msm_rpm_request *handle_sleep;
+};
+
+struct rpm_regulator {
+ struct regulator_desc rdesc;
+ struct regulator_dev *rdev;
+ struct rpm_vreg *rpm_vreg;
+ struct list_head list;
+ bool set_active;
+ bool set_sleep;
+ struct rpm_vreg_request req;
+ int system_load;
+ int min_uV;
+ int max_uV;
+};
+
+/*
+ * This voltage in uV is returned by get_voltage functions when there is no way
+ * to determine the current voltage level. It is needed because the regulator
+ * framework treats a 0 uV voltage as an error.
+ */
+#define VOLTAGE_UNKNOWN 1
+
+/*
+ * Regulator requests sent in the active set take effect immediately. Requests
+ * sent in the sleep set take effect when the Apps processor transitions into
+ * RPM assisted power collapse. For any given regulator, if an active set
+ * request is present, but not a sleep set request, then the active set request
+ * is used at all times, even when the Apps processor is power collapsed.
+ *
+ * The rpm-regulator-smd takes advantage of this default usage of the active set
+ * request by only sending a sleep set request if it differs from the
+ * corresponding active set request.
+ */
+#define RPM_SET_ACTIVE MSM_RPM_CTX_ACTIVE_SET
+#define RPM_SET_SLEEP MSM_RPM_CTX_SLEEP_SET
+
+static u32 rpm_vreg_string_to_int(const u8 *str)
+{
+ int i, len;
+ u32 output = 0;
+
+ len = strnlen(str, sizeof(u32));
+ for (i = 0; i < len; i++)
+ output |= str[i] << (i * 8);
+
+ return output;
+}
+
+static inline void rpm_vreg_lock(struct rpm_vreg *rpm_vreg)
+{
+ if (rpm_vreg->allow_atomic)
+ spin_lock_irqsave(&rpm_vreg->slock, rpm_vreg->flags);
+ else
+ mutex_lock(&rpm_vreg->mlock);
+}
+
+static inline void rpm_vreg_unlock(struct rpm_vreg *rpm_vreg)
+{
+ if (rpm_vreg->allow_atomic)
+ spin_unlock_irqrestore(&rpm_vreg->slock, rpm_vreg->flags);
+ else
+ mutex_unlock(&rpm_vreg->mlock);
+}
+
+static inline bool rpm_vreg_active_or_sleep_enabled(struct rpm_vreg *rpm_vreg)
+{
+ return (rpm_vreg->aggr_req_active.param[RPM_REGULATOR_PARAM_ENABLE]
+ && (rpm_vreg->aggr_req_active.valid
+ & BIT(RPM_REGULATOR_PARAM_ENABLE)))
+ || ((rpm_vreg->aggr_req_sleep.param[RPM_REGULATOR_PARAM_ENABLE])
+ && (rpm_vreg->aggr_req_sleep.valid
+ & BIT(RPM_REGULATOR_PARAM_ENABLE)));
+}
+
+/*
+ * This is used when voting for LPM or HPM by subtracting or adding to the
+ * hpm_min_load of a regulator. It has units of uA.
+ */
+#define LOAD_THRESHOLD_STEP 1000
+
+static inline int rpm_vreg_hpm_min_uA(struct rpm_vreg *rpm_vreg)
+{
+ return rpm_vreg->hpm_min_load;
+}
+
+static inline int rpm_vreg_lpm_max_uA(struct rpm_vreg *rpm_vreg)
+{
+ return rpm_vreg->hpm_min_load - LOAD_THRESHOLD_STEP;
+}
+
+#define MICRO_TO_MILLI(uV) ((uV) / 1000)
+#define MILLI_TO_MICRO(uV) ((uV) * 1000)
+
+#define DEBUG_PRINT_BUFFER_SIZE 512
+#define REQ_SENT 0
+#define REQ_PREV 1
+#define REQ_CACHED 2
+#define REQ_TYPES 3
+
+static void rpm_regulator_req(struct rpm_regulator *regulator, int set,
+ bool sent)
+{
+ char buf[DEBUG_PRINT_BUFFER_SIZE];
+ size_t buflen = DEBUG_PRINT_BUFFER_SIZE;
+ struct rpm_vreg *rpm_vreg = regulator->rpm_vreg;
+ struct rpm_vreg_request *aggr;
+ bool first;
+ u32 mask[REQ_TYPES] = {0, 0, 0};
+ const char *req_names[REQ_TYPES] = {"sent", "prev", "cached"};
+ int pos = 0;
+ int i, j;
+
+ aggr = (set == RPM_SET_ACTIVE)
+ ? &rpm_vreg->aggr_req_active : &rpm_vreg->aggr_req_sleep;
+
+ if (rpm_vreg_debug_mask & RPM_VREG_DEBUG_DUPLICATE) {
+ mask[REQ_SENT] = aggr->modified;
+ mask[REQ_PREV] = aggr->valid & ~aggr->modified;
+ } else if (sent
+ && (rpm_vreg_debug_mask & RPM_VREG_DEBUG_FULL_REQUEST)) {
+ mask[REQ_SENT] = aggr->modified;
+ mask[REQ_PREV] = aggr->valid & ~aggr->modified;
+ } else if (sent && (rpm_vreg_debug_mask & RPM_VREG_DEBUG_REQUEST)) {
+ mask[REQ_SENT] = aggr->modified;
+ }
+
+ if (!(mask[REQ_SENT] | mask[REQ_PREV]))
+ return;
+
+ if (set == RPM_SET_SLEEP && !rpm_vreg->sleep_request_sent) {
+ mask[REQ_CACHED] = mask[REQ_SENT] | mask[REQ_PREV];
+ mask[REQ_SENT] = 0;
+ mask[REQ_PREV] = 0;
+ }
+
+ pos += scnprintf(buf + pos, buflen - pos, "%s%s: ",
+ KERN_INFO, __func__);
+
+ pos += scnprintf(buf + pos, buflen - pos, "%s %u (%s): s=%s",
+ rpm_vreg->resource_name, rpm_vreg->resource_id,
+ regulator->rdesc.name,
+ (set == RPM_SET_ACTIVE ? "act" : "slp"));
+
+ for (i = 0; i < REQ_TYPES; i++) {
+ if (mask[i])
+ pos += scnprintf(buf + pos, buflen - pos, "; %s: ",
+ req_names[i]);
+
+ first = true;
+ for (j = 0; j < RPM_REGULATOR_PARAM_MAX; j++) {
+ if (mask[i] & BIT(j)) {
+ pos += scnprintf(buf + pos, buflen - pos,
+ "%s%s=%u", (first ? "" : ", "),
+ params[j].name, aggr->param[j]);
+ first = false;
+ }
+ }
+ }
+
+ pos += scnprintf(buf + pos, buflen - pos, "\n");
+ printk(buf);
+}
+
+#define RPM_VREG_SET_PARAM(_regulator, _param, _val) \
+{ \
+ (_regulator)->req.param[RPM_REGULATOR_PARAM_##_param] = _val; \
+ (_regulator)->req.modified |= BIT(RPM_REGULATOR_PARAM_##_param); \
+} \
+
+static int rpm_vreg_add_kvp_to_request(struct rpm_vreg *rpm_vreg,
+ const u32 *param, int idx, u32 set)
+{
+ struct msm_rpm_request *handle;
+
+ handle = (set == RPM_SET_ACTIVE ? rpm_vreg->handle_active
+ : rpm_vreg->handle_sleep);
+
+ if (rpm_vreg->allow_atomic)
+ return msm_rpm_add_kvp_data_noirq(handle, params[idx].key,
+ (u8 *)¶m[idx], 4);
+ else
+ return msm_rpm_add_kvp_data(handle, params[idx].key,
+ (u8 *)¶m[idx], 4);
+}
+
+static void rpm_vreg_check_modified_requests(const u32 *prev_param,
+ const u32 *param, u32 prev_valid, u32 *modified)
+{
+ u32 value_changed = 0;
+ int i;
+
+ for (i = 0; i < RPM_REGULATOR_PARAM_MAX; i++) {
+ if (param[i] != prev_param[i])
+ value_changed |= BIT(i);
+ }
+
+ /*
+ * Only keep bits that are for changed parameters or previously
+ * invalid parameters.
+ */
+ *modified &= value_changed | ~prev_valid;
+}
+
+static int rpm_vreg_add_modified_requests(struct rpm_regulator *regulator,
+ u32 set, const u32 *param, u32 modified)
+{
+ struct rpm_vreg *rpm_vreg = regulator->rpm_vreg;
+ int rc = 0;
+ int i;
+
+ for (i = 0; i < RPM_REGULATOR_PARAM_MAX; i++) {
+ /* Only send requests for modified parameters. */
+ if (modified & BIT(i)) {
+ rc = rpm_vreg_add_kvp_to_request(rpm_vreg, param, i,
+ set);
+ if (rc) {
+ vreg_err(regulator,
+ "add KVP failed: %s %u; %s, rc=%d\n",
+ rpm_vreg->resource_name,
+ rpm_vreg->resource_id, params[i].name,
+ rc);
+ return rc;
+ }
+ }
+ }
+
+ return rc;
+}
+
+static int rpm_vreg_send_request(struct rpm_regulator *regulator, u32 set)
+{
+ struct rpm_vreg *rpm_vreg = regulator->rpm_vreg;
+ struct msm_rpm_request *handle
+ = (set == RPM_SET_ACTIVE ? rpm_vreg->handle_active
+ : rpm_vreg->handle_sleep);
+ int rc;
+
+ if (rpm_vreg->allow_atomic)
+ rc = msm_rpm_wait_for_ack_noirq(msm_rpm_send_request_noirq(
+ handle));
+ else
+ rc = msm_rpm_wait_for_ack(msm_rpm_send_request(handle));
+
+ if (rc)
+ vreg_err(regulator, "msm rpm send failed: %s %u; set=%s, "
+ "rc=%d\n", rpm_vreg->resource_name,
+ rpm_vreg->resource_id,
+ (set == RPM_SET_ACTIVE ? "act" : "slp"), rc);
+
+ return rc;
+}
+
+#define RPM_VREG_AGGR_MAX(_idx, _param_aggr, _param_reg) \
+{ \
+ _param_aggr[RPM_REGULATOR_PARAM_##_idx] \
+ = max(_param_aggr[RPM_REGULATOR_PARAM_##_idx], \
+ _param_reg[RPM_REGULATOR_PARAM_##_idx]); \
+}
+
+#define RPM_VREG_AGGR_SUM(_idx, _param_aggr, _param_reg) \
+{ \
+ _param_aggr[RPM_REGULATOR_PARAM_##_idx] \
+ += _param_reg[RPM_REGULATOR_PARAM_##_idx]; \
+}
+
+#define RPM_VREG_AGGR_OR(_idx, _param_aggr, _param_reg) \
+{ \
+ _param_aggr[RPM_REGULATOR_PARAM_##_idx] \
+ |= _param_reg[RPM_REGULATOR_PARAM_##_idx]; \
+}
+
+/*
+ * The RPM treats freq=0 as a special value meaning that this consumer does not
+ * care what the SMPS switching freqency is.
+ */
+#define RPM_REGULATOR_FREQ_DONT_CARE 0
+
+static inline void rpm_vreg_freqency_aggr(u32 *freq, u32 consumer_freq)
+{
+ if (consumer_freq != RPM_REGULATOR_FREQ_DONT_CARE
+ && (consumer_freq < *freq
+ || *freq == RPM_REGULATOR_FREQ_DONT_CARE))
+ *freq = consumer_freq;
+}
+
+/*
+ * Aggregation is performed on each parameter based on the way that the RPM
+ * aggregates that type internally between RPM masters.
+ */
+static void rpm_vreg_aggregate_params(u32 *param_aggr, const u32 *param_reg)
+{
+ RPM_VREG_AGGR_MAX(ENABLE, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(VOLTAGE, param_aggr, param_reg);
+ RPM_VREG_AGGR_SUM(CURRENT, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(MODE_LDO, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(MODE_SMPS, param_aggr, param_reg);
+ RPM_VREG_AGGR_OR(PIN_CTRL_ENABLE, param_aggr, param_reg);
+ RPM_VREG_AGGR_OR(PIN_CTRL_MODE, param_aggr, param_reg);
+ rpm_vreg_freqency_aggr(¶m_aggr[RPM_REGULATOR_PARAM_FREQUENCY],
+ param_reg[RPM_REGULATOR_PARAM_FREQUENCY]);
+ RPM_VREG_AGGR_MAX(HEAD_ROOM, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(QUIET_MODE, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(FREQ_REASON, param_aggr, param_reg);
+}
+
+static int rpm_vreg_aggregate_requests(struct rpm_regulator *regulator)
+{
+ struct rpm_vreg *rpm_vreg = regulator->rpm_vreg;
+ u32 param_active[RPM_REGULATOR_PARAM_MAX];
+ u32 param_sleep[RPM_REGULATOR_PARAM_MAX];
+ u32 modified_active, modified_sleep;
+ struct rpm_regulator *reg;
+ bool sleep_set_differs = false;
+ bool send_active = false;
+ bool send_sleep = false;
+ int rc = 0;
+ int i;
+
+ memset(param_active, 0, sizeof(param_active));
+ memset(param_sleep, 0, sizeof(param_sleep));
+ modified_active = rpm_vreg->aggr_req_active.modified;
+ modified_sleep = rpm_vreg->aggr_req_sleep.modified;
+
+ /*
+ * Aggregate all of the requests for this regulator in both active
+ * and sleep sets.
+ */
+ list_for_each_entry(reg, &rpm_vreg->reg_list, list) {
+ if (reg->set_active) {
+ rpm_vreg_aggregate_params(param_active, reg->req.param);
+ modified_active |= reg->req.modified;
+ }
+ if (reg->set_sleep) {
+ rpm_vreg_aggregate_params(param_sleep, reg->req.param);
+ modified_sleep |= reg->req.modified;
+ }
+ }
+
+ /*
+ * Check if the aggregated sleep set parameter values differ from the
+ * aggregated active set parameter values.
+ */
+ if (!rpm_vreg->sleep_request_sent) {
+ for (i = 0; i < RPM_REGULATOR_PARAM_MAX; i++) {
+ if ((param_active[i] != param_sleep[i])
+ && (modified_sleep & BIT(i))) {
+ sleep_set_differs = true;
+ break;
+ }
+ }
+ }
+
+ /* Add KVPs to the active set RPM request if they have new values. */
+ rpm_vreg_check_modified_requests(rpm_vreg->aggr_req_active.param,
+ param_active, rpm_vreg->aggr_req_active.valid,
+ &modified_active);
+ rc = rpm_vreg_add_modified_requests(regulator, RPM_SET_ACTIVE,
+ param_active, modified_active);
+ if (rc)
+ return rc;
+ send_active = modified_active;
+
+ /*
+ * Sleep set configurations are only sent if they differ from the
+ * active set values. This is because the active set values will take
+ * effect during rpm assisted power collapse in the absence of sleep set
+ * values.
+ *
+ * However, once a sleep set request is sent for a given regulator,
+ * additional sleep set requests must be sent in the future even if they
+ * match the corresponding active set requests.
+ */
+ if (rpm_vreg->sleep_request_sent || sleep_set_differs) {
+ /* Add KVPs to the sleep set RPM request if they are new. */
+ rpm_vreg_check_modified_requests(rpm_vreg->aggr_req_sleep.param,
+ param_sleep, rpm_vreg->aggr_req_sleep.valid,
+ &modified_sleep);
+ rc = rpm_vreg_add_modified_requests(regulator, RPM_SET_SLEEP,
+ param_sleep, modified_sleep);
+ if (rc)
+ return rc;
+ send_sleep = modified_sleep;
+ }
+
+ /* Send active set request to the RPM if it contains new KVPs. */
+ if (send_active) {
+ rc = rpm_vreg_send_request(regulator, RPM_SET_ACTIVE);
+ if (rc)
+ return rc;
+ rpm_vreg->aggr_req_active.valid |= modified_active;
+ }
+ /* Store the results of the aggregation. */
+ rpm_vreg->aggr_req_active.modified = modified_active;
+ memcpy(rpm_vreg->aggr_req_active.param, param_active,
+ sizeof(param_active));
+
+ /* Handle debug printing of the active set request. */
+ rpm_regulator_req(regulator, RPM_SET_ACTIVE, send_active);
+ if (send_active)
+ rpm_vreg->aggr_req_active.modified = 0;
+
+ /* Send sleep set request to the RPM if it contains new KVPs. */
+ if (send_sleep) {
+ rc = rpm_vreg_send_request(regulator, RPM_SET_SLEEP);
+ if (rc)
+ return rc;
+ else
+ rpm_vreg->sleep_request_sent = true;
+ rpm_vreg->aggr_req_sleep.valid |= modified_sleep;
+ }
+ /* Store the results of the aggregation. */
+ rpm_vreg->aggr_req_sleep.modified = modified_sleep;
+ memcpy(rpm_vreg->aggr_req_sleep.param, param_sleep,
+ sizeof(param_sleep));
+
+ /* Handle debug printing of the sleep set request. */
+ rpm_regulator_req(regulator, RPM_SET_SLEEP, send_sleep);
+ if (send_sleep)
+ rpm_vreg->aggr_req_sleep.modified = 0;
+
+ /*
+ * Loop over all requests for this regulator to update the valid and
+ * modified values for use in future aggregation.
+ */
+ list_for_each_entry(reg, &rpm_vreg->reg_list, list) {
+ reg->req.valid |= reg->req.modified;
+ reg->req.modified = 0;
+ }
+
+ return rc;
+}
+
+static int rpm_vreg_is_enabled(struct regulator_dev *rdev)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+
+ return reg->req.param[RPM_REGULATOR_PARAM_ENABLE];
+}
+
+static int rpm_vreg_enable(struct regulator_dev *rdev)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+ int rc;
+ u32 prev_enable;
+
+ rpm_vreg_lock(reg->rpm_vreg);
+
+ prev_enable = reg->req.param[RPM_REGULATOR_PARAM_ENABLE];
+ RPM_VREG_SET_PARAM(reg, ENABLE, 1);
+ rc = rpm_vreg_aggregate_requests(reg);
+ if (rc) {
+ vreg_err(reg, "enable failed, rc=%d", rc);
+ RPM_VREG_SET_PARAM(reg, ENABLE, prev_enable);
+ }
+
+ rpm_vreg_unlock(reg->rpm_vreg);
+
+ return rc;
+}
+
+static int rpm_vreg_disable(struct regulator_dev *rdev)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+ int rc;
+ u32 prev_enable;
+
+ rpm_vreg_lock(reg->rpm_vreg);
+
+ prev_enable = reg->req.param[RPM_REGULATOR_PARAM_ENABLE];
+ RPM_VREG_SET_PARAM(reg, ENABLE, 0);
+ rc = rpm_vreg_aggregate_requests(reg);
+ if (rc) {
+ vreg_err(reg, "enable failed, rc=%d", rc);
+ RPM_VREG_SET_PARAM(reg, ENABLE, prev_enable);
+ }
+
+ rpm_vreg_unlock(reg->rpm_vreg);
+
+ return rc;
+}
+
+static int rpm_vreg_set_voltage(struct regulator_dev *rdev, int min_uV,
+ int max_uV, unsigned *selector)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+ int rc = 0;
+ u32 prev_voltage;
+
+ rpm_vreg_lock(reg->rpm_vreg);
+
+ prev_voltage = reg->req.param[RPM_REGULATOR_PARAM_VOLTAGE];
+ RPM_VREG_SET_PARAM(reg, VOLTAGE, min_uV);
+
+ /* Only send a new voltage if the regulator is currently enabled. */
+ if (rpm_vreg_active_or_sleep_enabled(reg->rpm_vreg))
+ rc = rpm_vreg_aggregate_requests(reg);
+
+ if (rc) {
+ vreg_err(reg, "set voltage failed, rc=%d", rc);
+ RPM_VREG_SET_PARAM(reg, VOLTAGE, prev_voltage);
+ }
+
+ rpm_vreg_unlock(reg->rpm_vreg);
+
+ return rc;
+}
+
+static int rpm_vreg_get_voltage(struct regulator_dev *rdev)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+ int uV;
+
+ uV = reg->req.param[RPM_REGULATOR_PARAM_VOLTAGE];
+ if (uV == 0)
+ uV = VOLTAGE_UNKNOWN;
+
+ return uV;
+}
+
+static int rpm_vreg_list_voltage(struct regulator_dev *rdev, unsigned selector)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+ int uV = 0;
+
+ if (selector == 0)
+ uV = reg->min_uV;
+ else if (selector == 1)
+ uV = reg->max_uV;
+
+ return uV;
+}
+
+static int rpm_vreg_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+ int rc = 0;
+ u32 prev_current;
+ int prev_uA;
+
+ rpm_vreg_lock(reg->rpm_vreg);
+
+ prev_current = reg->req.param[RPM_REGULATOR_PARAM_CURRENT];
+ prev_uA = MILLI_TO_MICRO(prev_current);
+
+ if (mode == REGULATOR_MODE_NORMAL) {
+ /* Make sure that request current is in HPM range. */
+ if (prev_uA < rpm_vreg_hpm_min_uA(reg->rpm_vreg))
+ RPM_VREG_SET_PARAM(reg, CURRENT,
+ MICRO_TO_MILLI(rpm_vreg_hpm_min_uA(reg->rpm_vreg)));
+ } else if (REGULATOR_MODE_IDLE) {
+ /* Make sure that request current is in LPM range. */
+ if (prev_uA > rpm_vreg_lpm_max_uA(reg->rpm_vreg))
+ RPM_VREG_SET_PARAM(reg, CURRENT,
+ MICRO_TO_MILLI(rpm_vreg_lpm_max_uA(reg->rpm_vreg)));
+ } else {
+ vreg_err(reg, "invalid mode: %u\n", mode);
+ rpm_vreg_unlock(reg->rpm_vreg);
+ return -EINVAL;
+ }
+
+ /* Only send a new mode value if the regulator is currently enabled. */
+ if (rpm_vreg_active_or_sleep_enabled(reg->rpm_vreg))
+ rc = rpm_vreg_aggregate_requests(reg);
+
+ if (rc) {
+ vreg_err(reg, "set mode failed, rc=%d", rc);
+ RPM_VREG_SET_PARAM(reg, CURRENT, prev_current);
+ }
+
+ rpm_vreg_unlock(reg->rpm_vreg);
+
+ return rc;
+}
+
+static unsigned int rpm_vreg_get_mode(struct regulator_dev *rdev)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+
+ return (reg->req.param[RPM_REGULATOR_PARAM_CURRENT]
+ >= MICRO_TO_MILLI(reg->rpm_vreg->hpm_min_load))
+ ? REGULATOR_MODE_NORMAL : REGULATOR_MODE_IDLE;
+}
+
+static unsigned int rpm_vreg_get_optimum_mode(struct regulator_dev *rdev,
+ int input_uV, int output_uV, int load_uA)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+ u32 load_mA;
+
+ load_uA += reg->system_load;
+
+ load_mA = MICRO_TO_MILLI(load_uA);
+ if (load_mA > params[RPM_REGULATOR_PARAM_CURRENT].max)
+ load_mA = params[RPM_REGULATOR_PARAM_CURRENT].max;
+
+ rpm_vreg_lock(reg->rpm_vreg);
+ RPM_VREG_SET_PARAM(reg, CURRENT, MICRO_TO_MILLI(load_uA));
+ rpm_vreg_unlock(reg->rpm_vreg);
+
+ return (load_uA >= reg->rpm_vreg->hpm_min_load)
+ ? REGULATOR_MODE_NORMAL : REGULATOR_MODE_IDLE;
+}
+
+static int rpm_vreg_enable_time(struct regulator_dev *rdev)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+
+ return reg->rpm_vreg->enable_time;
+}
+
+/**
+ * rpm_regulator_get() - lookup and obtain a handle to an RPM regulator
+ * @dev: device for regulator consumer
+ * @supply: supply name
+ *
+ * Returns a struct rpm_regulator corresponding to the regulator producer,
+ * or ERR_PTR() containing errno.
+ *
+ * This function may only be called from nonatomic context.
+ */
+struct rpm_regulator *rpm_regulator_get(struct device *dev, const char *supply)
+{
+ struct rpm_regulator *framework_reg;
+ struct rpm_regulator *priv_reg = NULL;
+ struct regulator *regulator;
+ struct rpm_vreg *rpm_vreg;
+
+ regulator = regulator_get(dev, supply);
+ if (regulator == NULL) {
+ pr_err("could not find regulator for: dev=%s, id=%s\n",
+ (dev ? dev_name(dev) : ""), (supply ? supply : ""));
+ return ERR_PTR(-ENODEV);
+ }
+
+ framework_reg = regulator_get_drvdata(regulator);
+ if (framework_reg == NULL) {
+ pr_err("regulator structure not found.\n");
+ regulator_put(regulator);
+ return ERR_PTR(-ENODEV);
+ }
+ regulator_put(regulator);
+
+ rpm_vreg = framework_reg->rpm_vreg;
+
+ priv_reg = kzalloc(sizeof(struct rpm_regulator), GFP_KERNEL);
+ if (priv_reg == NULL) {
+ vreg_err(framework_reg, "could not allocate memory for "
+ "regulator\n");
+ rpm_vreg_unlock(rpm_vreg);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /*
+ * Allocate a regulator_dev struct so that framework callback functions
+ * can be called from the private API functions.
+ */
+ priv_reg->rdev = kzalloc(sizeof(struct regulator_dev), GFP_KERNEL);
+ if (priv_reg->rdev == NULL) {
+ vreg_err(framework_reg, "could not allocate memory for "
+ "regulator_dev\n");
+ kfree(priv_reg);
+ rpm_vreg_unlock(rpm_vreg);
+ return ERR_PTR(-ENOMEM);
+ }
+ priv_reg->rdev->reg_data = priv_reg;
+ priv_reg->rpm_vreg = rpm_vreg;
+ priv_reg->rdesc.name = framework_reg->rdesc.name;
+ priv_reg->set_active = framework_reg->set_active;
+ priv_reg->set_sleep = framework_reg->set_sleep;
+ priv_reg->min_uV = framework_reg->min_uV;
+ priv_reg->max_uV = framework_reg->max_uV;
+ priv_reg->system_load = framework_reg->system_load;
+
+ might_sleep_if(!rpm_vreg->allow_atomic);
+ rpm_vreg_lock(rpm_vreg);
+ list_add(&priv_reg->list, &rpm_vreg->reg_list);
+ rpm_vreg_unlock(rpm_vreg);
+
+ return priv_reg;
+}
+EXPORT_SYMBOL_GPL(rpm_regulator_get);
+
+static int rpm_regulator_check_input(struct rpm_regulator *regulator)
+{
+ if (regulator == NULL || regulator->rpm_vreg == NULL) {
+ pr_err("invalid rpm_regulator pointer\n");
+ return -EINVAL;
+ }
+
+ might_sleep_if(!regulator->rpm_vreg->allow_atomic);
+
+ return 0;
+}
+
+/**
+ * rpm_regulator_put() - free the RPM regulator handle
+ * @regulator: RPM regulator handle
+ *
+ * Parameter reaggregation does not take place when rpm_regulator_put is called.
+ * Therefore, regulator enable state and voltage must be configured
+ * appropriately before calling rpm_regulator_put.
+ *
+ * This function may be called from either atomic or nonatomic context. If this
+ * function is called from atomic context, then the regulator being operated on
+ * must be configured via device tree with qcom,allow-atomic == 1.
+ */
+void rpm_regulator_put(struct rpm_regulator *regulator)
+{
+ struct rpm_vreg *rpm_vreg;
+ int rc = rpm_regulator_check_input(regulator);
+
+ if (rc)
+ return;
+
+ rpm_vreg = regulator->rpm_vreg;
+
+ might_sleep_if(!rpm_vreg->allow_atomic);
+ rpm_vreg_lock(rpm_vreg);
+ list_del(®ulator->list);
+ rpm_vreg_unlock(rpm_vreg);
+
+ kfree(regulator->rdev);
+ kfree(regulator);
+}
+EXPORT_SYMBOL_GPL(rpm_regulator_put);
+
+/**
+ * rpm_regulator_enable() - enable regulator output
+ * @regulator: RPM regulator handle
+ *
+ * Returns 0 on success or errno on failure.
+ *
+ * This function may be called from either atomic or nonatomic context. If this
+ * function is called from atomic context, then the regulator being operated on
+ * must be configured via device tree with qcom,allow-atomic == 1.
+ */
+int rpm_regulator_enable(struct rpm_regulator *regulator)
+{
+ int rc = rpm_regulator_check_input(regulator);
+
+ if (rc)
+ return rc;
+
+ return rpm_vreg_enable(regulator->rdev);
+}
+EXPORT_SYMBOL_GPL(rpm_regulator_enable);
+
+/**
+ * rpm_regulator_disable() - disable regulator output
+ * @regulator: RPM regulator handle
+ *
+ * Returns 0 on success or errno on failure.
+ *
+ * The enable state of the regulator is determined by aggregating the requests
+ * of all consumers. Therefore, it is possible that the regulator will remain
+ * enabled even after rpm_regulator_disable is called.
+ *
+ * This function may be called from either atomic or nonatomic context. If this
+ * function is called from atomic context, then the regulator being operated on
+ * must be configured via device tree with qcom,allow-atomic == 1.
+ */
+int rpm_regulator_disable(struct rpm_regulator *regulator)
+{
+ int rc = rpm_regulator_check_input(regulator);
+
+ if (rc)
+ return rc;
+
+ return rpm_vreg_disable(regulator->rdev);
+}
+EXPORT_SYMBOL_GPL(rpm_regulator_disable);
+
+/**
+ * rpm_regulator_set_voltage() - set regulator output voltage
+ * @regulator: RPM regulator handle
+ * @min_uV: minimum required voltage in uV
+ * @max_uV: maximum acceptable voltage in uV
+ *
+ * Sets a voltage regulator to the desired output voltage. This can be set
+ * while the regulator is disabled or enabled. If the regulator is enabled then
+ * the voltage will change to the new value immediately; otherwise, if the
+ * regulator is disabled, then the regulator will output at the new voltage when
+ * enabled.
+ *
+ * The min_uV to max_uV voltage range requested must intersect with the
+ * voltage constraint range configured for the regulator.
+ *
+ * Returns 0 on success or errno on failure.
+ *
+ * The final voltage value that is sent to the RPM is aggregated based upon the
+ * values requested by all consumers of the regulator. This corresponds to the
+ * maximum min_uV value.
+ *
+ * This function may be called from either atomic or nonatomic context. If this
+ * function is called from atomic context, then the regulator being operated on
+ * must be configured via device tree with qcom,allow-atomic == 1.
+ */
+int rpm_regulator_set_voltage(struct rpm_regulator *regulator, int min_uV,
+ int max_uV)
+{
+ int rc = rpm_regulator_check_input(regulator);
+ int uV = min_uV;
+
+ if (rc)
+ return rc;
+
+ if (regulator->rpm_vreg->regulator_type == RPM_REGULATOR_SMD_TYPE_VS) {
+ vreg_err(regulator, "unsupported regulator type: %d\n",
+ regulator->rpm_vreg->regulator_type);
+ return -EINVAL;
+ }
+
+ if (min_uV > max_uV) {
+ vreg_err(regulator, "min_uV=%d must be less than max_uV=%d\n",
+ min_uV, max_uV);
+ return -EINVAL;
+ }
+
+ if (uV < regulator->min_uV && max_uV >= regulator->min_uV)
+ uV = regulator->min_uV;
+
+ if (uV < regulator->min_uV || uV > regulator->max_uV) {
+ vreg_err(regulator, "request v=[%d, %d] is outside allowed "
+ "v=[%d, %d]\n", min_uV, max_uV, regulator->min_uV,
+ regulator->max_uV);
+ return -EINVAL;
+ }
+
+ return rpm_vreg_set_voltage(regulator->rdev, uV, uV, NULL);
+}
+EXPORT_SYMBOL_GPL(rpm_regulator_set_voltage);
+
+static struct regulator_ops ldo_ops = {
+ .enable = rpm_vreg_enable,
+ .disable = rpm_vreg_disable,
+ .is_enabled = rpm_vreg_is_enabled,
+ .set_voltage = rpm_vreg_set_voltage,
+ .get_voltage = rpm_vreg_get_voltage,
+ .list_voltage = rpm_vreg_list_voltage,
+ .set_mode = rpm_vreg_set_mode,
+ .get_mode = rpm_vreg_get_mode,
+ .get_optimum_mode = rpm_vreg_get_optimum_mode,
+ .enable_time = rpm_vreg_enable_time,
+};
+
+static struct regulator_ops smps_ops = {
+ .enable = rpm_vreg_enable,
+ .disable = rpm_vreg_disable,
+ .is_enabled = rpm_vreg_is_enabled,
+ .set_voltage = rpm_vreg_set_voltage,
+ .get_voltage = rpm_vreg_get_voltage,
+ .list_voltage = rpm_vreg_list_voltage,
+ .set_mode = rpm_vreg_set_mode,
+ .get_mode = rpm_vreg_get_mode,
+ .get_optimum_mode = rpm_vreg_get_optimum_mode,
+ .enable_time = rpm_vreg_enable_time,
+};
+
+static struct regulator_ops switch_ops = {
+ .enable = rpm_vreg_enable,
+ .disable = rpm_vreg_disable,
+ .is_enabled = rpm_vreg_is_enabled,
+ .enable_time = rpm_vreg_enable_time,
+};
+
+static struct regulator_ops ncp_ops = {
+ .enable = rpm_vreg_enable,
+ .disable = rpm_vreg_disable,
+ .is_enabled = rpm_vreg_is_enabled,
+ .set_voltage = rpm_vreg_set_voltage,
+ .get_voltage = rpm_vreg_get_voltage,
+ .list_voltage = rpm_vreg_list_voltage,
+ .enable_time = rpm_vreg_enable_time,
+};
+
+static struct regulator_ops *vreg_ops[] = {
+ [RPM_REGULATOR_SMD_TYPE_LDO] = &ldo_ops,
+ [RPM_REGULATOR_SMD_TYPE_SMPS] = &smps_ops,
+ [RPM_REGULATOR_SMD_TYPE_VS] = &switch_ops,
+ [RPM_REGULATOR_SMD_TYPE_NCP] = &ncp_ops,
+};
+
+static int __devexit rpm_vreg_device_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rpm_regulator *reg;
+
+ reg = platform_get_drvdata(pdev);
+ if (reg) {
+ rpm_vreg_lock(reg->rpm_vreg);
+ regulator_unregister(reg->rdev);
+ list_del(®->list);
+ kfree(reg);
+ rpm_vreg_unlock(reg->rpm_vreg);
+ } else {
+ dev_err(dev, "%s: drvdata missing\n", __func__);
+ return -EINVAL;
+ }
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static int __devexit rpm_vreg_resource_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rpm_regulator *reg, *reg_temp;
+ struct rpm_vreg *rpm_vreg;
+
+ rpm_vreg = platform_get_drvdata(pdev);
+ if (rpm_vreg) {
+ rpm_vreg_lock(rpm_vreg);
+ list_for_each_entry_safe(reg, reg_temp, &rpm_vreg->reg_list,
+ list) {
+ /* Only touch data for private consumers. */
+ if (reg->rdev->desc == NULL) {
+ list_del(®->list);
+ kfree(reg->rdev);
+ kfree(reg);
+ } else {
+ dev_err(dev, "%s: not all child devices have "
+ "been removed\n", __func__);
+ }
+ }
+ rpm_vreg_unlock(rpm_vreg);
+
+ msm_rpm_free_request(rpm_vreg->handle_active);
+ msm_rpm_free_request(rpm_vreg->handle_sleep);
+
+ kfree(rpm_vreg);
+ } else {
+ dev_err(dev, "%s: drvdata missing\n", __func__);
+ return -EINVAL;
+ }
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+/*
+ * This probe is called for child rpm-regulator devices which have
+ * properties which are required to configure individual regulator
+ * framework regulators for a given RPM regulator resource.
+ */
+static int __devinit rpm_vreg_device_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct regulator_init_data *init_data;
+ struct rpm_vreg *rpm_vreg;
+ struct rpm_regulator *reg;
+ int rc = 0;
+ int i, regulator_type;
+ u32 val;
+
+ if (!dev->of_node) {
+ dev_err(dev, "%s: device tree information missing\n", __func__);
+ return -ENODEV;
+ }
+
+ if (pdev->dev.parent == NULL) {
+ dev_err(dev, "%s: parent device missing\n", __func__);
+ return -ENODEV;
+ }
+
+ rpm_vreg = dev_get_drvdata(pdev->dev.parent);
+ if (rpm_vreg == NULL) {
+ dev_err(dev, "%s: rpm_vreg not found in parent device\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ reg = kzalloc(sizeof(struct rpm_regulator), GFP_KERNEL);
+ if (reg == NULL) {
+ dev_err(dev, "%s: could not allocate memory for reg\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ regulator_type = rpm_vreg->regulator_type;
+ reg->rpm_vreg = rpm_vreg;
+ reg->rdesc.ops = vreg_ops[regulator_type];
+ reg->rdesc.owner = THIS_MODULE;
+ reg->rdesc.type = REGULATOR_VOLTAGE;
+
+ if (regulator_type == RPM_REGULATOR_SMD_TYPE_VS)
+ reg->rdesc.n_voltages = 0;
+ else
+ reg->rdesc.n_voltages = 2;
+
+ rc = of_property_read_u32(node, "qcom,set", &val);
+ if (rc) {
+ dev_err(dev, "%s: sleep set and/or active set must be "
+ "configured via qcom,set property, rc=%d\n", __func__,
+ rc);
+ goto fail_free_reg;
+ } else if (!(val & RPM_SET_CONFIG_BOTH)) {
+ dev_err(dev, "%s: qcom,set=%u property is invalid\n", __func__,
+ val);
+ rc = -EINVAL;
+ goto fail_free_reg;
+ }
+
+ reg->set_active = !!(val & RPM_SET_CONFIG_ACTIVE);
+ reg->set_sleep = !!(val & RPM_SET_CONFIG_SLEEP);
+
+ init_data = of_get_regulator_init_data(dev);
+ if (init_data == NULL) {
+ dev_err(dev, "%s: unable to allocate memory\n", __func__);
+ rc = -ENOMEM;
+ goto fail_free_reg;
+ }
+ if (init_data->constraints.name == NULL) {
+ dev_err(dev, "%s: regulator name not specified\n", __func__);
+ rc = -EINVAL;
+ goto fail_free_reg;
+ }
+
+ init_data->constraints.input_uV = init_data->constraints.max_uV;
+
+ if (of_get_property(node, "parent-supply", NULL))
+ init_data->supply_regulator = "parent";
+
+ /*
+ * Fill in ops and mode masks based on callbacks specified for
+ * this type of regulator.
+ */
+ if (reg->rdesc.ops->enable)
+ init_data->constraints.valid_ops_mask
+ |= REGULATOR_CHANGE_STATUS;
+ if (reg->rdesc.ops->get_voltage)
+ init_data->constraints.valid_ops_mask
+ |= REGULATOR_CHANGE_VOLTAGE;
+ if (reg->rdesc.ops->get_mode) {
+ init_data->constraints.valid_ops_mask
+ |= REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_DRMS;
+ init_data->constraints.valid_modes_mask
+ |= REGULATOR_MODE_NORMAL | REGULATOR_MODE_IDLE;
+ }
+
+ reg->rdesc.name = init_data->constraints.name;
+ reg->min_uV = init_data->constraints.min_uV;
+ reg->max_uV = init_data->constraints.max_uV;
+
+ /* Initialize the param array based on optional properties. */
+ for (i = 0; i < RPM_REGULATOR_PARAM_MAX; i++) {
+ rc = of_property_read_u32(node, params[i].property_name, &val);
+ if (rc == 0) {
+ if (params[i].supported_regulator_types
+ & BIT(regulator_type)) {
+ if (val < params[i].min
+ || val > params[i].max) {
+ pr_warn("%s: device tree property: "
+ "%s=%u is outsided allowed "
+ "range [%u, %u]\n",
+ reg->rdesc.name,
+ params[i].property_name, val,
+ params[i].min, params[i].max);
+ continue;
+ }
+ reg->req.param[i] = val;
+ reg->req.modified |= BIT(i);
+ } else {
+ pr_warn("%s: regulator type=%d does not support"
+ " device tree property: %s\n",
+ reg->rdesc.name, regulator_type,
+ params[i].property_name);
+ }
+ }
+ }
+
+ of_property_read_u32(node, "qcom,system_load", ®->system_load);
+
+ rpm_vreg_lock(rpm_vreg);
+ list_add(®->list, &rpm_vreg->reg_list);
+ rpm_vreg_unlock(rpm_vreg);
+
+ reg->rdev = regulator_register(®->rdesc, dev, init_data, reg, node);
+ if (IS_ERR(reg->rdev)) {
+ rc = PTR_ERR(reg->rdev);
+ reg->rdev = NULL;
+ pr_err("regulator_register failed: %s, rc=%d\n",
+ reg->rdesc.name, rc);
+ goto fail_remove_from_list;
+ }
+
+ platform_set_drvdata(pdev, reg);
+
+ pr_debug("successfully probed: %s\n", reg->rdesc.name);
+
+ return 0;
+
+fail_remove_from_list:
+ rpm_vreg_lock(rpm_vreg);
+ list_del(®->list);
+ rpm_vreg_unlock(rpm_vreg);
+
+fail_free_reg:
+ kfree(reg);
+ return rc;
+}
+
+/*
+ * This probe is called for parent rpm-regulator devices which have
+ * properties which are required to identify a given RPM resource.
+ */
+static int __devinit rpm_vreg_resource_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct rpm_vreg *rpm_vreg;
+ int val = 0;
+ u32 resource_type;
+ int rc;
+
+ if (!dev->of_node) {
+ dev_err(dev, "%s: device tree information missing\n", __func__);
+ return -ENODEV;
+ }
+
+ /* Create new rpm_vreg entry. */
+ rpm_vreg = kzalloc(sizeof(struct rpm_vreg), GFP_KERNEL);
+ if (rpm_vreg == NULL) {
+ dev_err(dev, "%s: could not allocate memory for vreg\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ /* Required device tree properties: */
+ rc = of_property_read_string(node, "qcom,resource-name",
+ &rpm_vreg->resource_name);
+ if (rc) {
+ dev_err(dev, "%s: qcom,resource-name missing in DT node\n",
+ __func__);
+ goto fail_free_vreg;
+ }
+ resource_type = rpm_vreg_string_to_int(rpm_vreg->resource_name);
+
+ rc = of_property_read_u32(node, "qcom,resource-id",
+ &rpm_vreg->resource_id);
+ if (rc) {
+ dev_err(dev, "%s: qcom,resource-id missing in DT node\n",
+ __func__);
+ goto fail_free_vreg;
+ }
+
+ rc = of_property_read_u32(node, "qcom,regulator-type",
+ &rpm_vreg->regulator_type);
+ if (rc) {
+ dev_err(dev, "%s: qcom,regulator-type missing in DT node\n",
+ __func__);
+ goto fail_free_vreg;
+ }
+
+ if ((rpm_vreg->regulator_type < 0)
+ || (rpm_vreg->regulator_type >= RPM_REGULATOR_SMD_TYPE_MAX)) {
+ dev_err(dev, "%s: invalid regulator type: %d\n", __func__,
+ rpm_vreg->regulator_type);
+ rc = -EINVAL;
+ goto fail_free_vreg;
+ }
+
+ /* Optional device tree properties: */
+ of_property_read_u32(node, "qcom,allow-atomic", &val);
+ rpm_vreg->allow_atomic = !!val;
+ of_property_read_u32(node, "qcom,enable-time", &rpm_vreg->enable_time);
+ of_property_read_u32(node, "qcom,hpm-min-load",
+ &rpm_vreg->hpm_min_load);
+
+ rpm_vreg->handle_active = msm_rpm_create_request(RPM_SET_ACTIVE,
+ resource_type, rpm_vreg->resource_id, RPM_REGULATOR_PARAM_MAX);
+ if (rpm_vreg->handle_active == NULL
+ || IS_ERR(rpm_vreg->handle_active)) {
+ rc = PTR_ERR(rpm_vreg->handle_active);
+ dev_err(dev, "%s: failed to create active RPM handle, rc=%d\n",
+ __func__, rc);
+ goto fail_free_vreg;
+ }
+
+ rpm_vreg->handle_sleep = msm_rpm_create_request(RPM_SET_SLEEP,
+ resource_type, rpm_vreg->resource_id, RPM_REGULATOR_PARAM_MAX);
+ if (rpm_vreg->handle_sleep == NULL || IS_ERR(rpm_vreg->handle_sleep)) {
+ rc = PTR_ERR(rpm_vreg->handle_sleep);
+ dev_err(dev, "%s: failed to create sleep RPM handle, rc=%d\n",
+ __func__, rc);
+ goto fail_free_handle_active;
+ }
+
+ INIT_LIST_HEAD(&rpm_vreg->reg_list);
+
+ if (rpm_vreg->allow_atomic)
+ spin_lock_init(&rpm_vreg->slock);
+ else
+ mutex_init(&rpm_vreg->mlock);
+
+ platform_set_drvdata(pdev, rpm_vreg);
+
+ rc = of_platform_populate(node, NULL, NULL, dev);
+ if (rc) {
+ dev_err(dev, "%s: failed to add child nodes, rc=%d\n", __func__,
+ rc);
+ goto fail_unset_drvdata;
+ }
+
+ pr_debug("successfully probed: %s (%08X) %u\n", rpm_vreg->resource_name,
+ resource_type, rpm_vreg->resource_id);
+
+ return rc;
+
+fail_unset_drvdata:
+ platform_set_drvdata(pdev, NULL);
+ msm_rpm_free_request(rpm_vreg->handle_sleep);
+
+fail_free_handle_active:
+ msm_rpm_free_request(rpm_vreg->handle_active);
+
+fail_free_vreg:
+ kfree(rpm_vreg);
+
+ return rc;
+}
+
+static struct of_device_id rpm_vreg_match_table_device[] = {
+ { .compatible = "qcom,rpm-regulator-smd", },
+ {}
+};
+
+static struct of_device_id rpm_vreg_match_table_resource[] = {
+ { .compatible = "qcom,rpm-regulator-smd-resource", },
+ {}
+};
+
+static struct platform_driver rpm_vreg_device_driver = {
+ .probe = rpm_vreg_device_probe,
+ .remove = __devexit_p(rpm_vreg_device_remove),
+ .driver = {
+ .name = "qcom,rpm-regulator-smd",
+ .owner = THIS_MODULE,
+ .of_match_table = rpm_vreg_match_table_device,
+ },
+};
+
+static struct platform_driver rpm_vreg_resource_driver = {
+ .probe = rpm_vreg_resource_probe,
+ .remove = __devexit_p(rpm_vreg_resource_remove),
+ .driver = {
+ .name = "qcom,rpm-regulator-smd-resource",
+ .owner = THIS_MODULE,
+ .of_match_table = rpm_vreg_match_table_resource,
+ },
+};
+
+/**
+ * rpm_regulator_smd_driver_init() - initialized SMD RPM regulator driver
+ *
+ * This function registers the SMD RPM regulator platform drivers.
+ *
+ * Returns 0 on success or errno on failure.
+ */
+int __init rpm_regulator_smd_driver_init(void)
+{
+ static bool initialized;
+ int i, rc;
+
+ if (initialized)
+ return 0;
+ else
+ initialized = true;
+
+ /* Store parameter string names as integers */
+ for (i = 0; i < RPM_REGULATOR_PARAM_MAX; i++)
+ params[i].key = rpm_vreg_string_to_int(params[i].name);
+
+ rc = platform_driver_register(&rpm_vreg_device_driver);
+ if (rc)
+ return rc;
+
+ return platform_driver_register(&rpm_vreg_resource_driver);
+}
+EXPORT_SYMBOL_GPL(rpm_regulator_smd_driver_init);
+
+static void __exit rpm_vreg_exit(void)
+{
+ platform_driver_unregister(&rpm_vreg_device_driver);
+ platform_driver_unregister(&rpm_vreg_resource_driver);
+}
+
+module_init(rpm_regulator_smd_driver_init);
+module_exit(rpm_vreg_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM SMD RPM regulator driver");
diff --git a/arch/arm/mach-msm/rpm-smd.c b/arch/arm/mach-msm/rpm-smd.c
new file mode 100644
index 0000000..75f4d92
--- /dev/null
+++ b/arch/arm/mach-msm/rpm-smd.c
@@ -0,0 +1,826 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/notifier.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <mach/socinfo.h>
+#include <mach/msm_smd.h>
+#include <mach/rpm-smd.h>
+#include "rpm-notifier.h"
+
+struct msm_rpm_driver_data {
+ const char *ch_name;
+ uint32_t ch_type;
+ smd_channel_t *ch_info;
+ struct work_struct work;
+ spinlock_t smd_lock_write;
+ spinlock_t smd_lock_read;
+ struct completion smd_open;
+};
+
+#define DEFAULT_BUFFER_SIZE 256
+#define GFP_FLAG(noirq) (noirq ? GFP_ATOMIC : GFP_KERNEL)
+#define INV_HDR "resource does not exist"
+#define ERR "err\0"
+#define MAX_ERR_BUFFER_SIZE 60
+
+static struct atomic_notifier_head msm_rpm_sleep_notifier;
+static bool standalone;
+
+int msm_rpm_register_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&msm_rpm_sleep_notifier, nb);
+}
+
+int msm_rpm_unregister_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&msm_rpm_sleep_notifier, nb);
+}
+
+static struct workqueue_struct *msm_rpm_smd_wq;
+
+enum {
+ MSM_RPM_MSG_REQUEST_TYPE = 0,
+ MSM_RPM_MSG_TYPE_NR,
+};
+
+static const uint32_t msm_rpm_request_service[MSM_RPM_MSG_TYPE_NR] = {
+ 0x716572, /* 'req\0' */
+};
+
+/*the order of fields matter and reflect the order expected by the RPM*/
+struct rpm_request_header {
+ uint32_t service_type;
+ uint32_t request_len;
+};
+
+struct rpm_message_header {
+ uint32_t msg_id;
+ enum msm_rpm_set set;
+ uint32_t resource_type;
+ uint32_t resource_id;
+ uint32_t data_len;
+};
+
+struct msm_rpm_kvp_data {
+ uint32_t key;
+ uint32_t nbytes; /* number of bytes */
+ uint8_t *value;
+ bool valid;
+};
+
+static atomic_t msm_rpm_msg_id = ATOMIC_INIT(0);
+
+static struct msm_rpm_driver_data msm_rpm_data;
+
+struct msm_rpm_request {
+ struct rpm_request_header req_hdr;
+ struct rpm_message_header msg_hdr;
+ struct msm_rpm_kvp_data *kvp;
+ uint32_t num_elements;
+ uint32_t write_idx;
+ uint8_t *buf;
+ uint32_t numbytes;
+};
+
+/*
+ * Data related to message acknowledgement
+ */
+
+LIST_HEAD(msm_rpm_wait_list);
+
+struct msm_rpm_wait_data {
+ struct list_head list;
+ uint32_t msg_id;
+ bool ack_recd;
+ int errno;
+ struct completion ack;
+};
+DEFINE_SPINLOCK(msm_rpm_list_lock);
+
+struct msm_rpm_ack_msg {
+ uint32_t req;
+ uint32_t req_len;
+ uint32_t rsc_id;
+ uint32_t msg_len;
+ uint32_t id_ack;
+};
+
+static int irq_process;
+
+LIST_HEAD(msm_rpm_ack_list);
+
+static void msm_rpm_notify_sleep_chain(struct rpm_message_header *hdr,
+ struct msm_rpm_kvp_data *kvp)
+{
+ struct msm_rpm_notifier_data notif;
+
+ notif.rsc_type = hdr->resource_type;
+ notif.rsc_id = hdr->resource_id;
+ notif.key = kvp->key;
+ notif.size = kvp->nbytes;
+ notif.value = kvp->value;
+ atomic_notifier_call_chain(&msm_rpm_sleep_notifier, 0, ¬if);
+}
+
+static int msm_rpm_add_kvp_data_common(struct msm_rpm_request *handle,
+ uint32_t key, const uint8_t *data, int size, bool noirq)
+{
+ int i;
+ int data_size, msg_size;
+
+ if (!handle)
+ return -EINVAL;
+
+ data_size = ALIGN(size, SZ_4);
+ msg_size = data_size + sizeof(struct rpm_request_header);
+
+ for (i = 0; i < handle->write_idx; i++) {
+ if (handle->kvp[i].key != key)
+ continue;
+ if (handle->kvp[i].nbytes != data_size) {
+ kfree(handle->kvp[i].value);
+ handle->kvp[i].value = NULL;
+ } else {
+ if (!memcmp(handle->kvp[i].value, data, data_size))
+ return 0;
+ }
+ break;
+ }
+
+ if (i >= handle->num_elements)
+ return -ENOMEM;
+
+ if (i == handle->write_idx)
+ handle->write_idx++;
+
+ if (!handle->kvp[i].value) {
+ handle->kvp[i].value = kzalloc(data_size, GFP_FLAG(noirq));
+
+ if (!handle->kvp[i].value)
+ return -ENOMEM;
+ } else {
+ /* We enter the else case, if a key already exists but the
+ * data doesn't match. In which case, we should zero the data
+ * out.
+ */
+ memset(handle->kvp[i].value, 0, data_size);
+ }
+
+ if (!handle->kvp[i].valid)
+ handle->msg_hdr.data_len += msg_size;
+ else
+ handle->msg_hdr.data_len += (data_size - handle->kvp[i].nbytes);
+
+ handle->kvp[i].nbytes = data_size;
+ handle->kvp[i].key = key;
+ memcpy(handle->kvp[i].value, data, size);
+ handle->kvp[i].valid = true;
+
+ if (handle->msg_hdr.set == MSM_RPM_CTX_SLEEP_SET)
+ msm_rpm_notify_sleep_chain(&handle->msg_hdr, &handle->kvp[i]);
+
+ return 0;
+
+}
+
+static struct msm_rpm_request *msm_rpm_create_request_common(
+ enum msm_rpm_set set, uint32_t rsc_type, uint32_t rsc_id,
+ int num_elements, bool noirq)
+{
+ struct msm_rpm_request *cdata;
+
+ cdata = kzalloc(sizeof(struct msm_rpm_request),
+ GFP_FLAG(noirq));
+
+ if (!cdata) {
+ printk(KERN_INFO"%s():Cannot allocate memory for client data\n",
+ __func__);
+ goto cdata_alloc_fail;
+ }
+
+ cdata->msg_hdr.set = set;
+ cdata->msg_hdr.resource_type = rsc_type;
+ cdata->msg_hdr.resource_id = rsc_id;
+ cdata->msg_hdr.data_len = 0;
+
+ cdata->num_elements = num_elements;
+ cdata->write_idx = 0;
+
+ cdata->kvp = kzalloc(sizeof(struct msm_rpm_kvp_data) * num_elements,
+ GFP_FLAG(noirq));
+
+ if (!cdata->kvp) {
+ pr_warn("%s(): Cannot allocate memory for key value data\n",
+ __func__);
+ goto kvp_alloc_fail;
+ }
+
+ cdata->buf = kzalloc(DEFAULT_BUFFER_SIZE, GFP_FLAG(noirq));
+
+ if (!cdata->buf)
+ goto buf_alloc_fail;
+
+ cdata->numbytes = DEFAULT_BUFFER_SIZE;
+ return cdata;
+
+buf_alloc_fail:
+ kfree(cdata->kvp);
+kvp_alloc_fail:
+ kfree(cdata);
+cdata_alloc_fail:
+ return NULL;
+
+}
+
+void msm_rpm_free_request(struct msm_rpm_request *handle)
+{
+ int i;
+
+ if (!handle)
+ return;
+ for (i = 0; i < handle->write_idx; i++)
+ kfree(handle->kvp[i].value);
+ kfree(handle->kvp);
+ kfree(handle);
+}
+EXPORT_SYMBOL(msm_rpm_free_request);
+
+struct msm_rpm_request *msm_rpm_create_request(
+ enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, int num_elements)
+{
+ return msm_rpm_create_request_common(set, rsc_type, rsc_id,
+ num_elements, false);
+}
+EXPORT_SYMBOL(msm_rpm_create_request);
+
+struct msm_rpm_request *msm_rpm_create_request_noirq(
+ enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, int num_elements)
+{
+ return msm_rpm_create_request_common(set, rsc_type, rsc_id,
+ num_elements, true);
+}
+EXPORT_SYMBOL(msm_rpm_create_request_noirq);
+
+int msm_rpm_add_kvp_data(struct msm_rpm_request *handle,
+ uint32_t key, const uint8_t *data, int size)
+{
+ return msm_rpm_add_kvp_data_common(handle, key, data, size, false);
+
+}
+EXPORT_SYMBOL(msm_rpm_add_kvp_data);
+
+int msm_rpm_add_kvp_data_noirq(struct msm_rpm_request *handle,
+ uint32_t key, const uint8_t *data, int size)
+{
+ return msm_rpm_add_kvp_data_common(handle, key, data, size, true);
+}
+EXPORT_SYMBOL(msm_rpm_add_kvp_data_noirq);
+
+/* Runs in interrupt context */
+static void msm_rpm_notify(void *data, unsigned event)
+{
+ struct msm_rpm_driver_data *pdata = (struct msm_rpm_driver_data *)data;
+ BUG_ON(!pdata);
+
+ if (!(pdata->ch_info))
+ return;
+
+ switch (event) {
+ case SMD_EVENT_DATA:
+ queue_work(msm_rpm_smd_wq, &pdata->work);
+ break;
+ case SMD_EVENT_OPEN:
+ complete(&pdata->smd_open);
+ break;
+ case SMD_EVENT_CLOSE:
+ case SMD_EVENT_STATUS:
+ case SMD_EVENT_REOPEN_READY:
+ break;
+ default:
+ pr_info("Unknown SMD event\n");
+
+ }
+}
+
+static struct msm_rpm_wait_data *msm_rpm_get_entry_from_msg_id(uint32_t msg_id)
+{
+ struct list_head *ptr;
+ struct msm_rpm_wait_data *elem;
+ unsigned long flags;
+
+ spin_lock_irqsave(&msm_rpm_list_lock, flags);
+
+ list_for_each(ptr, &msm_rpm_wait_list) {
+ elem = list_entry(ptr, struct msm_rpm_wait_data, list);
+ if (elem && (elem->msg_id == msg_id))
+ break;
+ elem = NULL;
+ }
+ spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
+ return elem;
+}
+
+static int msm_rpm_get_next_msg_id(void)
+{
+ int id;
+
+ do {
+ id = atomic_inc_return(&msm_rpm_msg_id);
+ } while ((id == 0) || msm_rpm_get_entry_from_msg_id(id));
+
+ return id;
+}
+
+static int msm_rpm_add_wait_list(uint32_t msg_id)
+{
+ unsigned long flags;
+ struct msm_rpm_wait_data *data =
+ kzalloc(sizeof(struct msm_rpm_wait_data), GFP_ATOMIC);
+
+ if (!data)
+ return -ENOMEM;
+
+ init_completion(&data->ack);
+ data->ack_recd = false;
+ data->msg_id = msg_id;
+ spin_lock_irqsave(&msm_rpm_list_lock, flags);
+ list_add(&data->list, &msm_rpm_wait_list);
+ spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
+
+ return 0;
+}
+
+static void msm_rpm_free_list_entry(struct msm_rpm_wait_data *elem)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&msm_rpm_list_lock, flags);
+ list_del(&elem->list);
+ spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
+ kfree(elem);
+}
+
+static void msm_rpm_process_ack(uint32_t msg_id, int errno)
+{
+ struct list_head *ptr;
+ struct msm_rpm_wait_data *elem;
+ unsigned long flags;
+
+ spin_lock_irqsave(&msm_rpm_list_lock, flags);
+
+ list_for_each(ptr, &msm_rpm_wait_list) {
+ elem = list_entry(ptr, struct msm_rpm_wait_data, list);
+ if (elem && (elem->msg_id == msg_id)) {
+ elem->errno = errno;
+ elem->ack_recd = true;
+ complete(&elem->ack);
+ break;
+ }
+ elem = NULL;
+ }
+ WARN_ON(!elem);
+
+ spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
+}
+
+struct msm_rpm_kvp_packet {
+ uint32_t id;
+ uint32_t len;
+ uint32_t val;
+};
+
+static inline uint32_t msm_rpm_get_msg_id_from_ack(uint8_t *buf)
+{
+ return ((struct msm_rpm_ack_msg *)buf)->id_ack;
+}
+
+static inline int msm_rpm_get_error_from_ack(uint8_t *buf)
+{
+ uint8_t *tmp;
+ uint32_t req_len = ((struct msm_rpm_ack_msg *)buf)->req_len;
+
+ int rc = -ENODEV;
+
+ req_len -= sizeof(struct msm_rpm_ack_msg);
+ req_len += 2 * sizeof(uint32_t);
+ if (!req_len)
+ return 0;
+
+ tmp = buf + sizeof(struct msm_rpm_ack_msg);
+
+ BUG_ON(memcmp(tmp, ERR, sizeof(uint32_t)));
+
+ tmp += 2 * sizeof(uint32_t);
+
+ if (!(memcmp(tmp, INV_HDR, min(req_len, sizeof(INV_HDR))-1)))
+ rc = -EINVAL;
+
+ return rc;
+}
+
+static void msm_rpm_read_smd_data(char *buf)
+{
+ int pkt_sz;
+ int bytes_read = 0;
+
+ pkt_sz = smd_cur_packet_size(msm_rpm_data.ch_info);
+
+ BUG_ON(pkt_sz > MAX_ERR_BUFFER_SIZE);
+
+ if (pkt_sz != smd_read_avail(msm_rpm_data.ch_info))
+ return;
+
+ BUG_ON(pkt_sz == 0);
+
+ do {
+ int len;
+
+ len = smd_read(msm_rpm_data.ch_info, buf + bytes_read, pkt_sz);
+ pkt_sz -= len;
+ bytes_read += len;
+
+ } while (pkt_sz > 0);
+
+ BUG_ON(pkt_sz < 0);
+}
+
+static void msm_rpm_smd_work(struct work_struct *work)
+{
+ uint32_t msg_id;
+ int errno;
+ char buf[MAX_ERR_BUFFER_SIZE] = {0};
+ unsigned long flags;
+
+ while (smd_is_pkt_avail(msm_rpm_data.ch_info) && !irq_process) {
+ spin_lock_irqsave(&msm_rpm_data.smd_lock_read, flags);
+ msm_rpm_read_smd_data(buf);
+ spin_unlock_irqrestore(&msm_rpm_data.smd_lock_read, flags);
+ msg_id = msm_rpm_get_msg_id_from_ack(buf);
+ errno = msm_rpm_get_error_from_ack(buf);
+ msm_rpm_process_ack(msg_id, errno);
+ }
+}
+
+static int msm_rpm_send_data(struct msm_rpm_request *cdata,
+ int msg_type, bool noirq)
+{
+ uint8_t *tmpbuff;
+ int i, ret, msg_size;
+ unsigned long flags;
+
+ int req_hdr_sz, msg_hdr_sz;
+
+ if (!cdata->msg_hdr.data_len)
+ return 0;
+ req_hdr_sz = sizeof(cdata->req_hdr);
+ msg_hdr_sz = sizeof(cdata->msg_hdr);
+
+ cdata->req_hdr.service_type = msm_rpm_request_service[msg_type];
+
+ cdata->msg_hdr.msg_id = msm_rpm_get_next_msg_id();
+
+ cdata->req_hdr.request_len = cdata->msg_hdr.data_len + msg_hdr_sz;
+ msg_size = cdata->req_hdr.request_len + req_hdr_sz;
+
+ /* populate data_len */
+ if (msg_size > cdata->numbytes) {
+ kfree(cdata->buf);
+ cdata->numbytes = msg_size;
+ cdata->buf = kzalloc(msg_size, GFP_FLAG(noirq));
+ }
+
+ if (!cdata->buf)
+ return 0;
+
+ tmpbuff = cdata->buf;
+
+ memcpy(tmpbuff, &cdata->req_hdr, req_hdr_sz + msg_hdr_sz);
+
+ tmpbuff += req_hdr_sz + msg_hdr_sz;
+
+ for (i = 0; (i < cdata->write_idx); i++) {
+ /* Sanity check */
+ BUG_ON((tmpbuff - cdata->buf) > cdata->numbytes);
+
+ if (!cdata->kvp[i].valid)
+ continue;
+
+ memcpy(tmpbuff, &cdata->kvp[i].key, sizeof(uint32_t));
+ tmpbuff += sizeof(uint32_t);
+
+ memcpy(tmpbuff, &cdata->kvp[i].nbytes, sizeof(uint32_t));
+ tmpbuff += sizeof(uint32_t);
+
+ memcpy(tmpbuff, cdata->kvp[i].value, cdata->kvp[i].nbytes);
+ tmpbuff += cdata->kvp[i].nbytes;
+ }
+
+ if (standalone) {
+ for (i = 0; (i < cdata->write_idx); i++)
+ cdata->kvp[i].valid = false;
+
+ cdata->msg_hdr.data_len = 0;
+ ret = cdata->msg_hdr.msg_id;
+ return ret;
+ }
+
+ msm_rpm_add_wait_list(cdata->msg_hdr.msg_id);
+
+ spin_lock_irqsave(&msm_rpm_data.smd_lock_write, flags);
+
+ ret = smd_write_avail(msm_rpm_data.ch_info);
+
+ if (ret < 0) {
+ pr_warn("%s(): SMD not initialized\n", __func__);
+ spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write, flags);
+ return 0;
+ }
+
+ while ((ret < msg_size)) {
+ if (!noirq) {
+ spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write,
+ flags);
+ cpu_relax();
+ spin_lock_irqsave(&msm_rpm_data.smd_lock_write, flags);
+ } else
+ udelay(5);
+ ret = smd_write_avail(msm_rpm_data.ch_info);
+ }
+
+ ret = smd_write(msm_rpm_data.ch_info, &cdata->buf[0], msg_size);
+ spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write, flags);
+
+ if (ret == msg_size) {
+ for (i = 0; (i < cdata->write_idx); i++)
+ cdata->kvp[i].valid = false;
+ cdata->msg_hdr.data_len = 0;
+ ret = cdata->msg_hdr.msg_id;
+ } else if (ret < msg_size) {
+ struct msm_rpm_wait_data *rc;
+ ret = 0;
+ pr_info("Failed to write data msg_size:%d ret:%d\n",
+ msg_size, ret);
+ rc = msm_rpm_get_entry_from_msg_id(cdata->msg_hdr.msg_id);
+ if (rc)
+ msm_rpm_free_list_entry(rc);
+ }
+ return ret;
+}
+
+int msm_rpm_send_request(struct msm_rpm_request *handle)
+{
+ return msm_rpm_send_data(handle, MSM_RPM_MSG_REQUEST_TYPE, false);
+}
+EXPORT_SYMBOL(msm_rpm_send_request);
+
+int msm_rpm_send_request_noirq(struct msm_rpm_request *handle)
+{
+ return msm_rpm_send_data(handle, MSM_RPM_MSG_REQUEST_TYPE, true);
+}
+EXPORT_SYMBOL(msm_rpm_send_request_noirq);
+
+int msm_rpm_wait_for_ack(uint32_t msg_id)
+{
+ struct msm_rpm_wait_data *elem;
+ int rc = 0;
+
+ if (!msg_id)
+ return -EINVAL;
+
+ if (standalone)
+ return 0;
+
+ elem = msm_rpm_get_entry_from_msg_id(msg_id);
+ if (!elem)
+ return 0;
+
+ rc = wait_for_completion_timeout(&elem->ack, msecs_to_jiffies(1));
+ if (!rc) {
+ pr_warn("%s(): Timed out after 1 ms\n", __func__);
+ rc = -ETIMEDOUT;
+ } else {
+ rc = elem->errno;
+ msm_rpm_free_list_entry(elem);
+ }
+ return rc;
+}
+EXPORT_SYMBOL(msm_rpm_wait_for_ack);
+
+int msm_rpm_wait_for_ack_noirq(uint32_t msg_id)
+{
+ struct msm_rpm_wait_data *elem;
+ unsigned long flags;
+ int rc = 0;
+ uint32_t id = 0;
+ int count = 0;
+
+ if (!msg_id)
+ return -EINVAL;
+
+ if (standalone)
+ return 0;
+
+ spin_lock_irqsave(&msm_rpm_data.smd_lock_read, flags);
+ irq_process = true;
+
+ elem = msm_rpm_get_entry_from_msg_id(msg_id);
+
+ if (!elem)
+ /* Should this be a bug
+ * Is it ok for another thread to read the msg?
+ */
+ goto wait_ack_cleanup;
+
+ while ((id != msg_id) && (count++ < 10)) {
+ if (smd_is_pkt_avail(msm_rpm_data.ch_info)) {
+ int errno;
+ char buf[MAX_ERR_BUFFER_SIZE] = {};
+
+ msm_rpm_read_smd_data(buf);
+ id = msm_rpm_get_msg_id_from_ack(buf);
+ errno = msm_rpm_get_error_from_ack(buf);
+ msm_rpm_process_ack(id, errno);
+ } else
+ udelay(100);
+ }
+
+ if (count == 10) {
+ rc = -ETIMEDOUT;
+ pr_warn("%s(): Timed out after 1ms\n", __func__);
+ } else {
+ rc = elem->errno;
+ msm_rpm_free_list_entry(elem);
+ }
+wait_ack_cleanup:
+ irq_process = false;
+ spin_unlock_irqrestore(&msm_rpm_data.smd_lock_read, flags);
+ return rc;
+}
+EXPORT_SYMBOL(msm_rpm_wait_for_ack_noirq);
+
+int msm_rpm_send_message(enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
+{
+ int i, rc;
+ struct msm_rpm_request *req =
+ msm_rpm_create_request(set, rsc_type, rsc_id, nelems);
+ if (!req)
+ return -ENOMEM;
+
+ for (i = 0; i < nelems; i++) {
+ rc = msm_rpm_add_kvp_data(req, kvp[i].key,
+ kvp[i].data, kvp[i].length);
+ if (rc)
+ goto bail;
+ }
+
+ rc = msm_rpm_wait_for_ack(msm_rpm_send_request(req));
+bail:
+ msm_rpm_free_request(req);
+ return rc;
+}
+EXPORT_SYMBOL(msm_rpm_send_message);
+
+int msm_rpm_send_message_noirq(enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
+{
+ int i, rc;
+ struct msm_rpm_request *req =
+ msm_rpm_create_request_noirq(set, rsc_type, rsc_id, nelems);
+ if (!req)
+ return -ENOMEM;
+
+ for (i = 0; i < nelems; i++) {
+ rc = msm_rpm_add_kvp_data_noirq(req, kvp[i].key,
+ kvp[i].data, kvp[i].length);
+ if (rc)
+ goto bail;
+ }
+
+ rc = msm_rpm_wait_for_ack_noirq(msm_rpm_send_request_noirq(req));
+bail:
+ msm_rpm_free_request(req);
+ return rc;
+}
+EXPORT_SYMBOL(msm_rpm_send_message_noirq);
+static bool msm_rpm_set_standalone(void)
+{
+ if (machine_is_copper()) {
+ pr_warn("%s(): Running in standalone mode, requests "
+ "will not be sent to RPM\n", __func__);
+ standalone = true;
+ }
+ return standalone;
+}
+
+static int __devinit msm_rpm_dev_probe(struct platform_device *pdev)
+{
+ char *key = NULL;
+ int ret;
+
+ key = "rpm-channel-name";
+ ret = of_property_read_string(pdev->dev.of_node, key,
+ &msm_rpm_data.ch_name);
+ if (ret)
+ goto fail;
+
+ key = "rpm-channel-type";
+ ret = of_property_read_u32(pdev->dev.of_node, key,
+ &msm_rpm_data.ch_type);
+ if (ret)
+ goto fail;
+
+ init_completion(&msm_rpm_data.smd_open);
+ spin_lock_init(&msm_rpm_data.smd_lock_write);
+ spin_lock_init(&msm_rpm_data.smd_lock_read);
+ INIT_WORK(&msm_rpm_data.work, msm_rpm_smd_work);
+
+ if (smd_named_open_on_edge(msm_rpm_data.ch_name, msm_rpm_data.ch_type,
+ &msm_rpm_data.ch_info, &msm_rpm_data,
+ msm_rpm_notify)) {
+ pr_info("Cannot open RPM channel %s %d\n", msm_rpm_data.ch_name,
+ msm_rpm_data.ch_type);
+
+ msm_rpm_set_standalone();
+ BUG_ON(!standalone);
+ complete(&msm_rpm_data.smd_open);
+ }
+
+ ret = wait_for_completion_timeout(&msm_rpm_data.smd_open,
+ msecs_to_jiffies(5));
+
+ BUG_ON(!ret);
+
+ smd_disable_read_intr(msm_rpm_data.ch_info);
+
+ if (!standalone) {
+ msm_rpm_smd_wq = create_singlethread_workqueue("rpm-smd");
+ if (!msm_rpm_smd_wq)
+ return -EINVAL;
+ }
+
+ of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+ return 0;
+fail:
+ pr_err("%s(): Failed to read node: %s, key=%s\n", __func__,
+ pdev->dev.of_node->full_name, key);
+ return -EINVAL;
+}
+
+static struct of_device_id msm_rpm_match_table[] = {
+ {.compatible = "qcom,rpm-smd"},
+ {},
+};
+
+static struct platform_driver msm_rpm_device_driver = {
+ .probe = msm_rpm_dev_probe,
+ .driver = {
+ .name = "rpm-smd",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_rpm_match_table,
+ },
+};
+
+int __init msm_rpm_driver_init(void)
+{
+ static bool registered;
+
+ if (registered)
+ return 0;
+ registered = true;
+
+ return platform_driver_register(&msm_rpm_device_driver);
+}
+EXPORT_SYMBOL(msm_rpm_driver_init);
+late_initcall(msm_rpm_driver_init);
diff --git a/arch/arm/mach-msm/scm-boot.h b/arch/arm/mach-msm/scm-boot.h
index b14c968..221ffca 100644
--- a/arch/arm/mach-msm/scm-boot.h
+++ b/arch/arm/mach-msm/scm-boot.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2010, 2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -21,6 +21,10 @@
#define SCM_FLAG_WARMBOOT_CPU2 0x10
#define SCM_FLAG_WARMBOOT_CPU3 0x40
+#ifdef CONFIG_MSM_SCM
int scm_set_boot_addr(void *addr, int flags);
+#else
+static inline int scm_set_boot_addr(void *addr, int flags) { return 0; }
+#endif
#endif
diff --git a/arch/arm/mach-msm/smd.c b/arch/arm/mach-msm/smd.c
index dac0a37..ddc3a8d 100644
--- a/arch/arm/mach-msm/smd.c
+++ b/arch/arm/mach-msm/smd.c
@@ -3483,8 +3483,14 @@
},
};
-static int __init msm_smd_init(void)
+int __init msm_smd_init(void)
{
+ static bool registered;
+
+ if (registered)
+ return 0;
+
+ registered = true;
return platform_driver_register(&msm_smd_driver);
}
diff --git a/arch/arm/mach-msm/smd_pkt.c b/arch/arm/mach-msm/smd_pkt.c
index abcd336..b9cba8c 100644
--- a/arch/arm/mach-msm/smd_pkt.c
+++ b/arch/arm/mach-msm/smd_pkt.c
@@ -40,7 +40,7 @@
#ifdef CONFIG_ARCH_FSM9XXX
#define NUM_SMD_PKT_PORTS 4
#else
-#define NUM_SMD_PKT_PORTS 14
+#define NUM_SMD_PKT_PORTS 15
#endif
#define LOOPBACK_INX (NUM_SMD_PKT_PORTS - 1)
@@ -677,6 +677,7 @@
"apr_apps2",
"smdcntl8",
"smd_sns_adsp",
+ "smd_cxm_qmi",
"smd_pkt_loopback",
};
@@ -694,6 +695,7 @@
"apr_apps2",
"DATA40_CNTL",
"SENSOR",
+ "CXM_QMI_PORT_8064",
"LOOPBACK",
};
@@ -711,6 +713,7 @@
SMD_APPS_QDSP,
SMD_APPS_MODEM,
SMD_APPS_QDSP,
+ SMD_APPS_WCNSS,
SMD_APPS_MODEM,
};
#endif
diff --git a/arch/arm/mach-msm/socinfo.c b/arch/arm/mach-msm/socinfo.c
index 00748e4..b047cf4 100644
--- a/arch/arm/mach-msm/socinfo.c
+++ b/arch/arm/mach-msm/socinfo.c
@@ -218,7 +218,6 @@
/* 8064 IDs */
[109] = MSM_CPU_8064,
- [130] = MSM_CPU_8064,
/* 8930 IDs */
[116] = MSM_CPU_8930,
@@ -247,11 +246,17 @@
[128] = MSM_CPU_8625,
[129] = MSM_CPU_8625,
+ /* 8064 MPQ ID */
+ [130] = MSM_CPU_8064,
+
/* 7x25AB IDs */
[131] = MSM_CPU_7X25AB,
[132] = MSM_CPU_7X25AB,
[133] = MSM_CPU_7X25AB,
+ /* 9625 IDs */
+ [134] = MSM_CPU_9625,
+
/* Uninitialized IDs are not known to run Linux.
MSM_CPU_UNKNOWN is set to 0 to ensure these IDs are
considered as unknown CPU. */
@@ -621,6 +626,10 @@
dummy_socinfo.id = 126;
strlcpy(dummy_socinfo.build_id, "copper - ",
sizeof(dummy_socinfo.build_id));
+ } else if (early_machine_is_msm9625()) {
+ dummy_socinfo.id = 134;
+ strlcpy(dummy_socinfo.build_id, "msm9625 - ",
+ sizeof(dummy_socinfo.build_id));
} else if (machine_is_msm8625_rumi3())
dummy_socinfo.id = 127;
strlcat(dummy_socinfo.build_id, "Dummy socinfo",
diff --git a/arch/arm/mach-msm/subsystem_map.c b/arch/arm/mach-msm/subsystem_map.c
index 4a1285b..916686f 100644
--- a/arch/arm/mach-msm/subsystem_map.c
+++ b/arch/arm/mach-msm/subsystem_map.c
@@ -375,12 +375,13 @@
partition_no = msm_subsystem_get_partition_no(
subsys_ids[i]);
- iova_start = msm_allocate_iova_address(domain_no,
+ ret = msm_allocate_iova_address(domain_no,
partition_no,
map_size,
- max(min_align, SZ_4K));
+ max(min_align, SZ_4K),
+ &iova_start);
- if (!iova_start) {
+ if (ret) {
pr_err("%s: could not allocate iova address\n",
__func__);
continue;
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
index 404538a..bacba58 100644
--- a/arch/arm/vfp/vfphw.S
+++ b/arch/arm/vfp/vfphw.S
@@ -159,6 +159,22 @@
tst r5, #FPSCR_IXE
bne process_exception
+#ifdef CONFIG_ARCH_MSM_KRAIT
+ @ Krait does not set FPEXC.DEX for unsupported short vector instructions
+ mrc p15, 0, r2, c0, c0, 0
+ ldr r4, =0xff00fc00
+ and r4, r2, r4
+ ldr r2, =0x51000400
+ cmp r2, r4
+ bne skip
+
+ tst r5, #FPSCR_LENGTH_MASK
+ beq skip
+ orr r1, r1, #FPEXC_DEX
+ b process_exception
+skip:
+#endif
+
@ Fall into hand on to next handler - appropriate coproc instr
@ not recognised by VFP
diff --git a/block/blk-core.c b/block/blk-core.c
index 35ae52d..a6a8ccb 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1467,7 +1467,7 @@
goto end_io;
}
- if (unlikely(!(bio->bi_rw & REQ_DISCARD) &&
+ if (unlikely(!(bio->bi_rw & (REQ_DISCARD | REQ_SANITIZE)) &&
nr_sectors > queue_max_hw_sectors(q))) {
printk(KERN_ERR "bio too big device %s (%u > %u)\n",
bdevname(bio->bi_bdev, b),
@@ -1521,6 +1521,14 @@
goto end_io;
}
+ if ((bio->bi_rw & REQ_SANITIZE) &&
+ (!blk_queue_sanitize(q))) {
+ pr_info("%s - got a SANITIZE request but the queue "
+ "doesn't support sanitize requests", __func__);
+ err = -EOPNOTSUPP;
+ goto end_io;
+ }
+
if (blk_throtl_bio(q, &bio))
goto end_io;
@@ -1611,7 +1619,8 @@
* If it's a regular read/write or a barrier with data attached,
* go through the normal accounting stuff before submission.
*/
- if (bio_has_data(bio) && !(rw & REQ_DISCARD)) {
+ if (bio_has_data(bio) &&
+ (!(rw & (REQ_DISCARD | REQ_SANITIZE)))) {
if (rw & WRITE) {
count_vm_events(PGPGOUT, count);
} else {
@@ -1657,7 +1666,7 @@
*/
int blk_rq_check_limits(struct request_queue *q, struct request *rq)
{
- if (rq->cmd_flags & REQ_DISCARD)
+ if (rq->cmd_flags & (REQ_DISCARD | REQ_SANITIZE))
return 0;
if (blk_rq_sectors(rq) > queue_max_sectors(q) ||
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 78e627e..39a7f25 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -112,6 +112,57 @@
EXPORT_SYMBOL(blkdev_issue_discard);
/**
+ * blkdev_issue_sanitize - queue a sanitize request
+ * @bdev: blockdev to issue sanitize for
+ * @gfp_mask: memory allocation flags (for bio_alloc)
+ *
+ * Description:
+ * Issue a sanitize request for the specified block device
+ */
+int blkdev_issue_sanitize(struct block_device *bdev, gfp_t gfp_mask)
+{
+ DECLARE_COMPLETION_ONSTACK(wait);
+ struct request_queue *q = bdev_get_queue(bdev);
+ int type = REQ_WRITE | REQ_SANITIZE;
+ struct bio_batch bb;
+ struct bio *bio;
+ int ret = 0;
+
+ if (!q)
+ return -ENXIO;
+
+ if (!blk_queue_sanitize(q)) {
+ pr_err("%s - card doesn't support sanitize", __func__);
+ return -EOPNOTSUPP;
+ }
+
+ bio = bio_alloc(gfp_mask, 1);
+ if (!bio)
+ return -ENOMEM;
+
+ atomic_set(&bb.done, 1);
+ bb.flags = 1 << BIO_UPTODATE;
+ bb.wait = &wait;
+
+ bio->bi_end_io = bio_batch_end_io;
+ bio->bi_bdev = bdev;
+ bio->bi_private = &bb;
+
+ atomic_inc(&bb.done);
+ submit_bio(type, bio);
+
+ /* Wait for bios in-flight */
+ if (!atomic_dec_and_test(&bb.done))
+ wait_for_completion(&wait);
+
+ if (!test_bit(BIO_UPTODATE, &bb.flags))
+ ret = -EIO;
+
+ return ret;
+}
+EXPORT_SYMBOL(blkdev_issue_sanitize);
+
+/**
* blkdev_issue_zeroout - generate number of zero filed write bios
* @bdev: blockdev to issue
* @sector: start sector
diff --git a/block/blk-merge.c b/block/blk-merge.c
index cfcc37c..f3ed15b 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -383,6 +383,12 @@
return 0;
/*
+ * Don't merge file system requests and sanitize requests
+ */
+ if ((req->cmd_flags & REQ_SANITIZE) != (next->cmd_flags & REQ_SANITIZE))
+ return 0;
+
+ /*
* not contiguous
*/
if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
diff --git a/block/elevator.c b/block/elevator.c
index b0b38ce..78a14b5 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -89,6 +89,12 @@
return 0;
/*
+ * Don't merge sanitize requests
+ */
+ if ((bio->bi_rw & REQ_SANITIZE) != (rq->bio->bi_rw & REQ_SANITIZE))
+ return 0;
+
+ /*
* different data direction or already started, don't merge
*/
if (bio_data_dir(bio) != rq_data_dir(rq))
@@ -657,7 +663,7 @@
if (rq->cmd_flags & REQ_SOFTBARRIER) {
/* barriers are scheduling boundary, update end_sector */
if (rq->cmd_type == REQ_TYPE_FS ||
- (rq->cmd_flags & REQ_DISCARD)) {
+ (rq->cmd_flags & (REQ_DISCARD | REQ_SANITIZE))) {
q->end_sector = rq_end_sector(rq);
q->boundary_rq = rq;
}
diff --git a/block/ioctl.c b/block/ioctl.c
index 1124cd2..dbc103b 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -131,6 +131,11 @@
return blkdev_issue_discard(bdev, start, len, GFP_KERNEL, flags);
}
+static int blk_ioctl_sanitize(struct block_device *bdev)
+{
+ return blkdev_issue_sanitize(bdev, GFP_KERNEL);
+}
+
static int put_ushort(unsigned long arg, unsigned short val)
{
return put_user(val, (unsigned short __user *)arg);
@@ -215,6 +220,10 @@
set_device_ro(bdev, n);
return 0;
+ case BLKSANITIZE:
+ ret = blk_ioctl_sanitize(bdev);
+ break;
+
case BLKDISCARD:
case BLKSECDISCARD: {
uint64_t range[2];
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 34640c3..7b7549a 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -194,6 +194,7 @@
return -ENOMEM;
}
}
+ driver->data_ready[i] = 0x0;
driver->data_ready[i] |= MSG_MASKS_TYPE;
driver->data_ready[i] |= EVENT_MASKS_TYPE;
driver->data_ready[i] |= LOG_MASKS_TYPE;
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index a7a4a2a..4ac2643 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -1139,8 +1139,17 @@
}
}
#if defined(CONFIG_DIAG_OVER_USB)
+ /* Check for the command/respond msg for the maximum packet length */
+ if ((*buf == 0x4b) && (*(buf+1) == 0x12) &&
+ (*(uint16_t *)(buf+2) == 0x0055)) {
+ for (i = 0; i < 4; i++)
+ *(driver->apps_rsp_buf+i) = *(buf+i);
+ *(uint32_t *)(driver->apps_rsp_buf+4) = PKT_SIZE;
+ ENCODE_RSP_AND_SEND(7);
+ return 0;
+ }
/* Check for Apps Only & get event mask request */
- if (!(driver->ch) && chk_apps_only() && *buf == 0x81) {
+ else if (!(driver->ch) && chk_apps_only() && *buf == 0x81) {
driver->apps_rsp_buf[0] = 0x81;
driver->apps_rsp_buf[1] = 0x0;
*(uint16_t *)(driver->apps_rsp_buf + 2) = 0x0;
diff --git a/drivers/char/msm_rotator.c b/drivers/char/msm_rotator.c
index b457a00..6cd1806 100644
--- a/drivers/char/msm_rotator.c
+++ b/drivers/char/msm_rotator.c
@@ -45,6 +45,7 @@
#define MSM_ROTATOR_START (MSM_ROTATOR_BASE+0x0030)
#define MSM_ROTATOR_MAX_BURST_SIZE (MSM_ROTATOR_BASE+0x0050)
#define MSM_ROTATOR_HW_VERSION (MSM_ROTATOR_BASE+0x0070)
+#define MSM_ROTATOR_SW_RESET (MSM_ROTATOR_BASE+0x0074)
#define MSM_ROTATOR_SRC_SIZE (MSM_ROTATOR_BASE+0x1108)
#define MSM_ROTATOR_SRCP0_ADDR (MSM_ROTATOR_BASE+0x110c)
#define MSM_ROTATOR_SRCP1_ADDR (MSM_ROTATOR_BASE+0x1110)
@@ -393,6 +394,8 @@
case MDP_RGB_565:
case MDP_BGR_565:
case MDP_YCRYCB_H2V1:
+ case MDP_YCBCR_H1V1:
+ case MDP_YCRCB_H1V1:
p->num_planes = 1;
p->plane_size[0] = w * h * get_bpp(format);
break;
@@ -904,8 +907,7 @@
break;
if (s == MAX_SESSIONS) {
- dev_dbg(msm_rotator_dev->device,
- "%s() : Attempt to use invalid session_id %d\n",
+ pr_err("%s() : Attempt to use invalid session_id %d\n",
__func__, s);
rc = -EINVAL;
goto do_rotate_unlock_mutex;
@@ -1127,11 +1129,13 @@
break;
default:
rc = -EINVAL;
+ pr_err("%s(): Unsupported format %u\n", __func__, format);
goto do_rotate_exit;
}
if (rc != 0) {
msm_rotator_dev->last_session_idx = INVALID_SESSION;
+ pr_err("%s(): Invalid session error\n", __func__);
goto do_rotate_exit;
}
@@ -1143,8 +1147,11 @@
wait_event(msm_rotator_dev->wq,
(msm_rotator_dev->processing == 0));
status = (unsigned char)ioread32(MSM_ROTATOR_INTR_STATUS);
- if ((status & 0x03) != 0x01)
+ if ((status & 0x03) != 0x01) {
+ pr_err("%s(): AXI Bus Error, issuing SW_RESET\n", __func__);
+ iowrite32(0x1, MSM_ROTATOR_SW_RESET);
rc = -EFAULT;
+ }
iowrite32(0, MSM_ROTATOR_INTR_ENABLE);
iowrite32(3, MSM_ROTATOR_INTR_CLEAR);
diff --git a/drivers/crypto/msm/qce40.c b/drivers/crypto/msm/qce40.c
index 111bc49..c203fc5 100644
--- a/drivers/crypto/msm/qce40.c
+++ b/drivers/crypto/msm/qce40.c
@@ -2101,32 +2101,33 @@
uint32_t authsize = q_req->authsize;
uint32_t totallen_in, totallen_out, out_len;
uint32_t pad_len_in, pad_len_out;
- uint32_t pad_mac_len_out, pad_ptx_len_out;
int rc = 0;
int ce_block_size;
ce_block_size = pce_dev->ce_dm.ce_block_size;
if (q_req->dir == QCE_ENCRYPT) {
+ uint32_t pad_mac_len_out;
+
q_req->cryptlen = areq->cryptlen;
totallen_in = q_req->cryptlen + areq->assoclen;
- totallen_out = q_req->cryptlen + authsize + areq->assoclen;
- out_len = areq->cryptlen + authsize;
pad_len_in = ALIGN(totallen_in, ce_block_size) - totallen_in;
- pad_mac_len_out = ALIGN(authsize, ce_block_size) -
- authsize;
- pad_ptx_len_out = ALIGN(q_req->cryptlen, ce_block_size) -
- q_req->cryptlen;
- pad_len_out = pad_ptx_len_out + pad_mac_len_out;
- totallen_out += pad_len_out;
+
+ out_len = areq->cryptlen + authsize;
+ totallen_out = q_req->cryptlen + authsize + areq->assoclen;
+ pad_mac_len_out = ALIGN(authsize, ce_block_size) - authsize;
+ totallen_out += pad_mac_len_out;
+ pad_len_out = ALIGN(totallen_out, ce_block_size) -
+ totallen_out + pad_mac_len_out;
+
} else {
q_req->cryptlen = areq->cryptlen - authsize;
totallen_in = areq->cryptlen + areq->assoclen;
- totallen_out = q_req->cryptlen + areq->assoclen;
- out_len = areq->cryptlen - authsize;
- pad_len_in = ALIGN(areq->cryptlen, ce_block_size) -
- areq->cryptlen;
- pad_len_out = pad_len_in + authsize;
- totallen_out += pad_len_out;
+ pad_len_in = ALIGN(totallen_in, ce_block_size) - totallen_in;
+
+ out_len = q_req->cryptlen;
+ totallen_out = totallen_in;
+ pad_len_out = ALIGN(totallen_out, ce_block_size) - totallen_out;
+ pad_len_out += authsize;
}
_chain_buffer_in_init(pce_dev);
@@ -2605,4 +2606,4 @@
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Mona Hossain <mhossain@codeaurora.org>");
MODULE_DESCRIPTION("Crypto Engine driver");
-MODULE_VERSION("2.16");
+MODULE_VERSION("2.17");
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c
index bd7729b..56f986d 100644
--- a/drivers/gpu/ion/ion.c
+++ b/drivers/gpu/ion/ion.c
@@ -996,19 +996,41 @@
{
struct ion_client *client = s->private;
struct rb_node *n;
+ struct rb_node *n2;
- seq_printf(s, "%16.16s: %16.16s : %16.16s : %16.16s\n", "heap_name",
- "size_in_bytes", "handle refcount", "buffer");
+ seq_printf(s, "%16.16s: %16.16s : %16.16s : %12.12s : %12.12s : %s\n",
+ "heap_name", "size_in_bytes", "handle refcount",
+ "buffer", "physical", "[domain,partition] - virt");
+
mutex_lock(&client->lock);
for (n = rb_first(&client->handles); n; n = rb_next(n)) {
struct ion_handle *handle = rb_entry(n, struct ion_handle,
node);
+ enum ion_heap_type type = handle->buffer->heap->type;
- seq_printf(s, "%16.16s: %16x : %16d : %16p\n",
+ seq_printf(s, "%16.16s: %16x : %16d : %12p",
handle->buffer->heap->name,
handle->buffer->size,
atomic_read(&handle->ref.refcount),
handle->buffer);
+
+ if (type == ION_HEAP_TYPE_SYSTEM_CONTIG ||
+ type == ION_HEAP_TYPE_CARVEOUT ||
+ type == ION_HEAP_TYPE_CP)
+ seq_printf(s, " : %12lx", handle->buffer->priv_phys);
+ else
+ seq_printf(s, " : %12s", "N/A");
+
+ for (n2 = rb_first(&handle->buffer->iommu_maps); n2;
+ n2 = rb_next(n2)) {
+ struct ion_iommu_map *imap =
+ rb_entry(n2, struct ion_iommu_map, node);
+ seq_printf(s, " : [%d,%d] - %8lx",
+ imap->domain_info[DI_DOMAIN_NUM],
+ imap->domain_info[DI_PARTITION_NUM],
+ imap->iova_addr);
+ }
+ seq_printf(s, "\n");
}
seq_printf(s, "%16.16s %d\n", "client refcount:",
@@ -1063,7 +1085,13 @@
struct rb_node *parent = NULL;
struct ion_client *entry;
pid_t pid;
- unsigned int name_len = strnlen(name, 64);
+ unsigned int name_len;
+
+ if (!name) {
+ pr_err("%s: Name cannot be null\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+ name_len = strnlen(name, 64);
get_task_struct(current->group_leader);
task_lock(current->group_leader);
@@ -1493,21 +1521,28 @@
mutex_unlock(&client->lock);
if (copy_to_user((void __user *)arg, &data, sizeof(data)))
return -EFAULT;
+ if (data.fd < 0)
+ return data.fd;
break;
}
case ION_IOC_IMPORT:
{
struct ion_fd_data data;
+ int ret = 0;
if (copy_from_user(&data, (void __user *)arg,
sizeof(struct ion_fd_data)))
return -EFAULT;
data.handle = ion_import_fd(client, data.fd);
- if (IS_ERR(data.handle))
+ if (IS_ERR(data.handle)) {
+ ret = PTR_ERR(data.handle);
data.handle = NULL;
+ }
if (copy_to_user((void __user *)arg, &data,
sizeof(struct ion_fd_data)))
return -EFAULT;
+ if (ret < 0)
+ return ret;
break;
}
case ION_IOC_CUSTOM:
@@ -1561,6 +1596,8 @@
if (!data.handle)
ion_free(client, handle);
+ if (ret < 0)
+ return ret;
break;
}
@@ -1637,12 +1674,158 @@
return size;
}
+/**
+ * Searches through a clients handles to find if the buffer is owned
+ * by this client. Used for debug output.
+ * @param client pointer to candidate owner of buffer
+ * @param buf pointer to buffer that we are trying to find the owner of
+ * @return 1 if found, 0 otherwise
+ */
+static int ion_debug_find_buffer_owner(const struct ion_client *client,
+ const struct ion_buffer *buf)
+{
+ struct rb_node *n;
+
+ for (n = rb_first(&client->handles); n; n = rb_next(n)) {
+ const struct ion_handle *handle = rb_entry(n,
+ const struct ion_handle,
+ node);
+ if (handle->buffer == buf)
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * Adds mem_map_data pointer to the tree of mem_map
+ * Used for debug output.
+ * @param mem_map The mem_map tree
+ * @param data The new data to add to the tree
+ */
+static void ion_debug_mem_map_add(struct rb_root *mem_map,
+ struct mem_map_data *data)
+{
+ struct rb_node **p = &mem_map->rb_node;
+ struct rb_node *parent = NULL;
+ struct mem_map_data *entry;
+
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct mem_map_data, node);
+
+ if (data->addr < entry->addr) {
+ p = &(*p)->rb_left;
+ } else if (data->addr > entry->addr) {
+ p = &(*p)->rb_right;
+ } else {
+ pr_err("%s: mem_map_data already found.", __func__);
+ BUG();
+ }
+ }
+ rb_link_node(&data->node, parent, p);
+ rb_insert_color(&data->node, mem_map);
+}
+
+/**
+ * Search for an owner of a buffer by iterating over all ION clients.
+ * @param dev ion device containing pointers to all the clients.
+ * @param buffer pointer to buffer we are trying to find the owner of.
+ * @return name of owner.
+ */
+const char *ion_debug_locate_owner(const struct ion_device *dev,
+ const struct ion_buffer *buffer)
+{
+ struct rb_node *j;
+ const char *client_name = NULL;
+
+ for (j = rb_first(&dev->user_clients); j && !client_name;
+ j = rb_next(j)) {
+ struct ion_client *client = rb_entry(j, struct ion_client,
+ node);
+ if (ion_debug_find_buffer_owner(client, buffer))
+ client_name = client->name;
+ }
+ for (j = rb_first(&dev->kernel_clients); j && !client_name;
+ j = rb_next(j)) {
+ struct ion_client *client = rb_entry(j, struct ion_client,
+ node);
+ if (ion_debug_find_buffer_owner(client, buffer))
+ client_name = client->name;
+ }
+ return client_name;
+}
+
+/**
+ * Create a mem_map of the heap.
+ * @param s seq_file to log error message to.
+ * @param heap The heap to create mem_map for.
+ * @param mem_map The mem map to be created.
+ */
+void ion_debug_mem_map_create(struct seq_file *s, struct ion_heap *heap,
+ struct rb_root *mem_map)
+{
+ struct ion_device *dev = heap->dev;
+ struct rb_node *n;
+
+ for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
+ struct ion_buffer *buffer =
+ rb_entry(n, struct ion_buffer, node);
+ if (buffer->heap->id == heap->id) {
+ struct mem_map_data *data =
+ kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ seq_printf(s, "ERROR: out of memory. "
+ "Part of memory map will not be logged\n");
+ break;
+ }
+ data->addr = buffer->priv_phys;
+ data->addr_end = buffer->priv_phys + buffer->size-1;
+ data->size = buffer->size;
+ data->client_name = ion_debug_locate_owner(dev, buffer);
+ ion_debug_mem_map_add(mem_map, data);
+ }
+ }
+}
+
+/**
+ * Free the memory allocated by ion_debug_mem_map_create
+ * @param mem_map The mem map to free.
+ */
+static void ion_debug_mem_map_destroy(struct rb_root *mem_map)
+{
+ if (mem_map) {
+ struct rb_node *n;
+ while ((n = rb_first(mem_map)) != 0) {
+ struct mem_map_data *data =
+ rb_entry(n, struct mem_map_data, node);
+ rb_erase(&data->node, mem_map);
+ kfree(data);
+ }
+ }
+}
+
+/**
+ * Print heap debug information.
+ * @param s seq_file to log message to.
+ * @param heap pointer to heap that we will print debug information for.
+ */
+static void ion_heap_print_debug(struct seq_file *s, struct ion_heap *heap)
+{
+ if (heap->ops->print_debug) {
+ struct rb_root mem_map = RB_ROOT;
+ ion_debug_mem_map_create(s, heap, &mem_map);
+ heap->ops->print_debug(heap, s, &mem_map);
+ ion_debug_mem_map_destroy(&mem_map);
+ }
+}
+
static int ion_debug_heap_show(struct seq_file *s, void *unused)
{
struct ion_heap *heap = s->private;
struct ion_device *dev = heap->dev;
struct rb_node *n;
+ mutex_lock(&dev->lock);
seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
for (n = rb_first(&dev->user_clients); n; n = rb_next(n)) {
struct ion_client *client = rb_entry(n, struct ion_client,
@@ -1666,8 +1849,8 @@
seq_printf(s, "%16.s %16u %16x\n", client->name, client->pid,
size);
}
- if (heap->ops->print_debug)
- heap->ops->print_debug(heap, s);
+ ion_heap_print_debug(s, heap);
+ mutex_unlock(&dev->lock);
return 0;
}
diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c
index ca2380b..710583b 100644
--- a/drivers/gpu/ion/ion_carveout_heap.c
+++ b/drivers/gpu/ion/ion_carveout_heap.c
@@ -251,7 +251,8 @@
return 0;
}
-static int ion_carveout_print_debug(struct ion_heap *heap, struct seq_file *s)
+static int ion_carveout_print_debug(struct ion_heap *heap, struct seq_file *s,
+ const struct rb_root *mem_map)
{
struct ion_carveout_heap *carveout_heap =
container_of(heap, struct ion_carveout_heap, heap);
@@ -260,6 +261,44 @@
carveout_heap->allocated_bytes);
seq_printf(s, "total heap size: %lx\n", carveout_heap->total_size);
+ if (mem_map) {
+ unsigned long base = carveout_heap->base;
+ unsigned long size = carveout_heap->total_size;
+ unsigned long end = base+size;
+ unsigned long last_end = base;
+ struct rb_node *n;
+
+ seq_printf(s, "\nMemory Map\n");
+ seq_printf(s, "%16.s %14.s %14.s %14.s\n",
+ "client", "start address", "end address",
+ "size (hex)");
+
+ for (n = rb_first(mem_map); n; n = rb_next(n)) {
+ struct mem_map_data *data =
+ rb_entry(n, struct mem_map_data, node);
+ const char *client_name = "(null)";
+
+ if (last_end < data->addr) {
+ seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n",
+ "FREE", last_end, data->addr-1,
+ data->addr-last_end,
+ data->addr-last_end);
+ }
+
+ if (data->client_name)
+ client_name = data->client_name;
+
+ seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n",
+ client_name, data->addr,
+ data->addr_end,
+ data->size, data->size);
+ last_end = data->addr_end+1;
+ }
+ if (last_end < end) {
+ seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n", "FREE",
+ last_end, end-1, end-last_end, end-last_end);
+ }
+ }
return 0;
}
@@ -287,13 +326,12 @@
extra = iova_length - buffer->size;
- data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
- data->mapped_size, align);
+ ret = msm_allocate_iova_address(domain_num, partition_num,
+ data->mapped_size, align,
+ &data->iova_addr);
- if (!data->iova_addr) {
- ret = -ENOMEM;
+ if (ret)
goto out;
- }
domain = msm_get_iommu_domain(domain_num);
diff --git a/drivers/gpu/ion/ion_cp_heap.c b/drivers/gpu/ion/ion_cp_heap.c
index 7f57fe6..fcbf1d4 100644
--- a/drivers/gpu/ion/ion_cp_heap.c
+++ b/drivers/gpu/ion/ion_cp_heap.c
@@ -243,7 +243,8 @@
cp_heap->total_size -
cp_heap->allocated_bytes, size);
- if (cp_heap->reusable && !cp_heap->allocated_bytes) {
+ if (cp_heap->reusable && !cp_heap->allocated_bytes &&
+ cp_heap->heap_protected == HEAP_NOT_PROTECTED) {
if (fmem_set_state(FMEM_T_STATE) != 0)
pr_err("%s: unable to transition heap to T-state\n",
__func__);
@@ -293,7 +294,8 @@
mutex_lock(&cp_heap->lock);
cp_heap->allocated_bytes -= size;
- if (cp_heap->reusable && !cp_heap->allocated_bytes) {
+ if (cp_heap->reusable && !cp_heap->allocated_bytes &&
+ cp_heap->heap_protected == HEAP_NOT_PROTECTED) {
if (fmem_set_state(FMEM_T_STATE) != 0)
pr_err("%s: unable to transition heap to T-state\n",
__func__);
@@ -561,7 +563,8 @@
return 0;
}
-static int ion_cp_print_debug(struct ion_heap *heap, struct seq_file *s)
+static int ion_cp_print_debug(struct ion_heap *heap, struct seq_file *s,
+ const struct rb_root *mem_map)
{
unsigned long total_alloc;
unsigned long total_size;
@@ -586,6 +589,45 @@
seq_printf(s, "heap protected: %s\n", heap_protected ? "Yes" : "No");
seq_printf(s, "reusable: %s\n", cp_heap->reusable ? "Yes" : "No");
+ if (mem_map) {
+ unsigned long base = cp_heap->base;
+ unsigned long size = cp_heap->total_size;
+ unsigned long end = base+size;
+ unsigned long last_end = base;
+ struct rb_node *n;
+
+ seq_printf(s, "\nMemory Map\n");
+ seq_printf(s, "%16.s %14.s %14.s %14.s\n",
+ "client", "start address", "end address",
+ "size (hex)");
+
+ for (n = rb_first(mem_map); n; n = rb_next(n)) {
+ struct mem_map_data *data =
+ rb_entry(n, struct mem_map_data, node);
+ const char *client_name = "(null)";
+
+ if (last_end < data->addr) {
+ seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n",
+ "FREE", last_end, data->addr-1,
+ data->addr-last_end,
+ data->addr-last_end);
+ }
+
+ if (data->client_name)
+ client_name = data->client_name;
+
+ seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n",
+ client_name, data->addr,
+ data->addr_end,
+ data->size, data->size);
+ last_end = data->addr_end+1;
+ }
+ if (last_end < end) {
+ seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n", "FREE",
+ last_end, end-1, end-last_end, end-last_end);
+ }
+ }
+
return 0;
}
@@ -645,13 +687,15 @@
}
if (!ret_value && domain) {
unsigned long temp_phys = cp_heap->base;
- unsigned long temp_iova =
- msm_allocate_iova_address(domain_num, partition,
- virt_addr_len, SZ_64K);
- if (!temp_iova) {
+ unsigned long temp_iova;
+
+ ret_value = msm_allocate_iova_address(domain_num, partition,
+ virt_addr_len, SZ_64K,
+ &temp_iova);
+
+ if (ret_value) {
pr_err("%s: could not allocate iova from domain %lu, partition %d\n",
__func__, domain_num, partition);
- ret_value = -ENOMEM;
goto out;
}
cp_heap->iommu_iova[domain_num] = temp_iova;
@@ -740,13 +784,12 @@
extra = iova_length - buffer->size;
- data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
- data->mapped_size, align);
+ ret = msm_allocate_iova_address(domain_num, partition_num,
+ data->mapped_size, align,
+ &data->iova_addr);
- if (!data->iova_addr) {
- ret = -ENOMEM;
+ if (ret)
goto out;
- }
domain = msm_get_iommu_domain(domain_num);
@@ -916,6 +959,14 @@
cp_heap = NULL;
}
+void ion_cp_heap_get_base(struct ion_heap *heap, unsigned long *base,
+ unsigned long *size) \
+{
+ struct ion_cp_heap *cp_heap =
+ container_of(heap, struct ion_cp_heap, heap);
+ *base = cp_heap->base;
+ *size = cp_heap->total_size;
+}
/* SCM related code for locking down memory for content protection */
diff --git a/drivers/gpu/ion/ion_iommu_heap.c b/drivers/gpu/ion/ion_iommu_heap.c
index 312ca42..70bdc7f 100644
--- a/drivers/gpu/ion/ion_iommu_heap.c
+++ b/drivers/gpu/ion/ion_iommu_heap.c
@@ -190,13 +190,12 @@
data->mapped_size = iova_length;
extra = iova_length - buffer->size;
- data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
- data->mapped_size, align);
+ ret = msm_allocate_iova_address(domain_num, partition_num,
+ data->mapped_size, align,
+ &data->iova_addr);
- if (!data->iova_addr) {
- ret = -ENOMEM;
+ if (!data->iova_addr)
goto out;
- }
domain = msm_get_iommu_domain(domain_num);
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h
index 98e11cf..00ce33f 100644
--- a/drivers/gpu/ion/ion_priv.h
+++ b/drivers/gpu/ion/ion_priv.h
@@ -155,7 +155,8 @@
unsigned long iova_length,
unsigned long flags);
void (*unmap_iommu)(struct ion_iommu_map *data);
- int (*print_debug)(struct ion_heap *heap, struct seq_file *s);
+ int (*print_debug)(struct ion_heap *heap, struct seq_file *s,
+ const struct rb_root *mem_map);
int (*secure_heap)(struct ion_heap *heap);
int (*unsecure_heap)(struct ion_heap *heap);
};
@@ -185,7 +186,22 @@
const char *name;
};
-
+/**
+ * struct mem_map_data - represents information about the memory map for a heap
+ * @node: rb node used to store in the tree of mem_map_data
+ * @addr: start address of memory region.
+ * @addr: end address of memory region.
+ * @size: size of memory region
+ * @client_name: name of the client who owns this buffer.
+ *
+ */
+struct mem_map_data {
+ struct rb_node node;
+ unsigned long addr;
+ unsigned long addr_end;
+ unsigned long size;
+ const char *client_name;
+};
#define iommu_map_domain(__m) ((__m)->domain_info[1])
#define iommu_map_partition(__m) ((__m)->domain_info[0])
@@ -298,4 +314,9 @@
void *uaddr, unsigned long offset, unsigned long len,
unsigned int cmd);
+void ion_cp_heap_get_base(struct ion_heap *heap, unsigned long *base,
+ unsigned long *size);
+
+void ion_mem_map_show(struct ion_heap *heap);
+
#endif /* _ION_PRIV_H */
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c
index ed9ae27..26c6632 100644
--- a/drivers/gpu/ion/ion_system_heap.c
+++ b/drivers/gpu/ion/ion_system_heap.c
@@ -204,7 +204,8 @@
return 0;
}
-static int ion_system_print_debug(struct ion_heap *heap, struct seq_file *s)
+static int ion_system_print_debug(struct ion_heap *heap, struct seq_file *s,
+ const struct rb_root *unused)
{
seq_printf(s, "total bytes currently allocated: %lx\n",
(unsigned long) atomic_read(&system_heap_allocated));
@@ -240,13 +241,12 @@
data->mapped_size = iova_length;
extra = iova_length - buffer->size;
- data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
- data->mapped_size, align);
+ ret = msm_allocate_iova_address(domain_num, partition_num,
+ data->mapped_size, align,
+ &data->iova_addr);
- if (!data->iova_addr) {
- ret = -ENOMEM;
+ if (ret)
goto out;
- }
domain = msm_get_iommu_domain(domain_num);
@@ -423,7 +423,8 @@
}
static int ion_system_contig_print_debug(struct ion_heap *heap,
- struct seq_file *s)
+ struct seq_file *s,
+ const struct rb_root *unused)
{
seq_printf(s, "total bytes currently allocated: %lx\n",
(unsigned long) atomic_read(&system_contig_heap_allocated));
@@ -458,13 +459,12 @@
data->mapped_size = iova_length;
extra = iova_length - buffer->size;
- data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
- data->mapped_size, align);
+ ret = msm_allocate_iova_address(domain_num, partition_num,
+ data->mapped_size, align,
+ &data->iova_addr);
- if (!data->iova_addr) {
- ret = -ENOMEM;
+ if (ret)
goto out;
- }
domain = msm_get_iommu_domain(domain_num);
diff --git a/drivers/gpu/ion/msm/msm_ion.c b/drivers/gpu/ion/msm/msm_ion.c
index 15c0ec5..c8bfce3 100644
--- a/drivers/gpu/ion/msm/msm_ion.c
+++ b/drivers/gpu/ion/msm/msm_ion.c
@@ -213,6 +213,45 @@
}
}
+static int is_heap_overlapping(const struct ion_platform_heap *heap1,
+ const struct ion_platform_heap *heap2)
+{
+ unsigned long heap1_base = heap1->base;
+ unsigned long heap2_base = heap2->base;
+ unsigned long heap1_end = heap1->base + heap1->size - 1;
+ unsigned long heap2_end = heap2->base + heap2->size - 1;
+
+ if (heap1_base == heap2_base)
+ return 1;
+ if (heap1_base < heap2_base && heap1_end >= heap2_base)
+ return 1;
+ if (heap2_base < heap1_base && heap2_end >= heap1_base)
+ return 1;
+ return 0;
+}
+
+static void check_for_heap_overlap(const struct ion_platform_heap heap_list[],
+ unsigned long nheaps)
+{
+ unsigned long i;
+ unsigned long j;
+
+ for (i = 0; i < nheaps; ++i) {
+ const struct ion_platform_heap *heap1 = &heap_list[i];
+ if (!heap1->base)
+ continue;
+ for (j = i + 1; j < nheaps; ++j) {
+ const struct ion_platform_heap *heap2 = &heap_list[j];
+ if (!heap2->base)
+ continue;
+ if (is_heap_overlapping(heap1, heap2)) {
+ panic("Memory in heap %s overlaps with heap %s\n",
+ heap1->name, heap2->name);
+ }
+ }
+ }
+}
+
static int msm_ion_probe(struct platform_device *pdev)
{
struct ion_platform_data *pdata = pdev->dev.platform_data;
@@ -258,6 +297,8 @@
ion_device_add_heap(idev, heaps[i]);
}
+
+ check_for_heap_overlap(pdata->heaps, num_heaps);
platform_set_drvdata(pdev, idev);
return 0;
diff --git a/drivers/gpu/msm/a3xx_reg.h b/drivers/gpu/msm/a3xx_reg.h
index 0a71982..35af06e 100644
--- a/drivers/gpu/msm/a3xx_reg.h
+++ b/drivers/gpu/msm/a3xx_reg.h
@@ -248,6 +248,10 @@
#define A3XX_VBIF_ARB_CTL 0x303C
#define A3XX_VBIF_OUT_AXI_AOOO_EN 0x305E
#define A3XX_VBIF_OUT_AXI_AOOO 0x305F
+#define A3XX_VBIF_ERR_PENDING 0x3064
+#define A3XX_VBIF_ERR_MASK 0x3066
+#define A3XX_VBIF_ERR_CLEAR 0x3067
+#define A3XX_VBIF_ERR_INFO 0x3068
/* Bit flags for RBBM_CTL */
#define RBBM_RBBM_CTL_RESET_PWR_CTR1 (1 << 1)
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 9479302..2f503ae 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -21,6 +21,7 @@
#include "kgsl_pwrscale.h"
#include "kgsl_cffdump.h"
#include "kgsl_sharedmem.h"
+#include "kgsl_iommu.h"
#include "adreno.h"
#include "adreno_pm4types.h"
@@ -69,10 +70,9 @@
static struct adreno_device device_3d0 = {
.dev = {
+ KGSL_DEVICE_COMMON_INIT(device_3d0.dev),
.name = DEVICE_3D0_NAME,
.id = KGSL_DEVICE_3D0,
- .ver_major = DRIVER_VERSION_MAJOR,
- .ver_minor = DRIVER_VERSION_MINOR,
.mh = {
.mharb = ADRENO_CFG_MHARB,
/* Remove 1k boundary check in z470 to avoid a GPU
@@ -92,9 +92,6 @@
.pwrctrl = {
.irq_name = KGSL_3D0_IRQ,
},
- .mutex = __MUTEX_INITIALIZER(device_3d0.dev.mutex),
- .state = KGSL_STATE_INIT,
- .active_cnt = 0,
.iomemname = KGSL_3D0_REG_MEMORY,
.ftbl = &adreno_functable,
#ifdef CONFIG_HAS_EARLYSUSPEND
@@ -162,7 +159,7 @@
"a300_pm4.fw", "a300_pfp.fw", &adreno_a3xx_gpudev,
512, 0, 2, SZ_256K },
/* A3XX doesn't use the pix_shader_start */
- { ADRENO_REV_A320, 3, 2, 0, 0,
+ { ADRENO_REV_A320, 3, 2, 0, ANY_ID,
"a300_pm4.fw", "a300_pfp.fw", &adreno_a3xx_gpudev,
512, 0, 2, SZ_512K },
@@ -247,7 +244,144 @@
return result;
}
-static void adreno_setstate(struct kgsl_device *device,
+static void adreno_iommu_setstate(struct kgsl_device *device,
+ uint32_t flags)
+{
+ unsigned int pt_val, reg_pt_val;
+ unsigned int link[200];
+ unsigned int *cmds = &link[0];
+ int sizedwords = 0;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct kgsl_memdesc **reg_map_desc;
+ void *reg_map_array;
+ int num_iommu_units, i;
+
+ if (!adreno_dev->drawctxt_active)
+ return kgsl_mmu_device_setstate(&device->mmu, flags);
+ num_iommu_units = kgsl_mmu_get_reg_map_desc(&device->mmu,
+ ®_map_array);
+ reg_map_desc = reg_map_array;
+
+ if (kgsl_mmu_enable_clk(&device->mmu,
+ KGSL_IOMMU_CONTEXT_USER))
+ goto done;
+
+ if (adreno_is_a225(adreno_dev))
+ cmds += adreno_add_change_mh_phys_limit_cmds(cmds, 0xFFFFF000,
+ device->mmu.setstate_memory.gpuaddr +
+ KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+ else
+ cmds += adreno_add_bank_change_cmds(cmds,
+ KGSL_IOMMU_CONTEXT_USER,
+ device->mmu.setstate_memory.gpuaddr +
+ KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+
+ if (flags & KGSL_MMUFLAGS_PTUPDATE) {
+ pt_val = kgsl_mmu_pt_get_base_addr(device->mmu.hwpagetable);
+ /*
+ * We need to perfrom the following operations for all
+ * IOMMU units
+ */
+ for (i = 0; i < num_iommu_units; i++) {
+ reg_pt_val = (pt_val &
+ (KGSL_IOMMU_TTBR0_PA_MASK <<
+ KGSL_IOMMU_TTBR0_PA_SHIFT)) +
+ kgsl_mmu_get_pt_lsb(&device->mmu, i,
+ KGSL_IOMMU_CONTEXT_USER);
+ /*
+ * Set address of the new pagetable by writng to IOMMU
+ * TTBR0 register
+ */
+ *cmds++ = cp_type3_packet(CP_MEM_WRITE, 2);
+ *cmds++ = reg_map_desc[i]->gpuaddr +
+ (KGSL_IOMMU_CONTEXT_USER <<
+ KGSL_IOMMU_CTX_SHIFT) + KGSL_IOMMU_TTBR0;
+ *cmds++ = reg_pt_val;
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0x00000000;
+
+ /*
+ * Read back the ttbr0 register as a barrier to ensure
+ * above writes have completed
+ */
+ cmds += adreno_add_read_cmds(device, cmds,
+ reg_map_desc[i]->gpuaddr +
+ (KGSL_IOMMU_CONTEXT_USER <<
+ KGSL_IOMMU_CTX_SHIFT) + KGSL_IOMMU_TTBR0,
+ reg_pt_val,
+ device->mmu.setstate_memory.gpuaddr +
+ KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+
+ /* set the asid */
+ *cmds++ = cp_type3_packet(CP_MEM_WRITE, 2);
+ *cmds++ = reg_map_desc[i]->gpuaddr +
+ (KGSL_IOMMU_CONTEXT_USER <<
+ KGSL_IOMMU_CTX_SHIFT) + KGSL_IOMMU_CONTEXTIDR;
+ *cmds++ = kgsl_mmu_get_hwpagetable_asid(&device->mmu);
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0x00000000;
+
+ /* Read back asid to ensure above write completes */
+ cmds += adreno_add_read_cmds(device, cmds,
+ reg_map_desc[i]->gpuaddr +
+ (KGSL_IOMMU_CONTEXT_USER <<
+ KGSL_IOMMU_CTX_SHIFT) + KGSL_IOMMU_CONTEXTIDR,
+ kgsl_mmu_get_hwpagetable_asid(&device->mmu),
+ device->mmu.setstate_memory.gpuaddr +
+ KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+ }
+ /* invalidate all base pointers */
+ *cmds++ = cp_type3_packet(CP_INVALIDATE_STATE, 1);
+ *cmds++ = 0x7fff;
+
+ if (flags & KGSL_MMUFLAGS_TLBFLUSH)
+ cmds += __adreno_add_idle_indirect_cmds(cmds,
+ device->mmu.setstate_memory.gpuaddr +
+ KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+ }
+ if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
+ /*
+ * tlb flush based on asid, no need to flush entire tlb
+ */
+ for (i = 0; i < num_iommu_units; i++) {
+ *cmds++ = cp_type3_packet(CP_MEM_WRITE, 2);
+ *cmds++ = (reg_map_desc[i]->gpuaddr +
+ (KGSL_IOMMU_CONTEXT_USER <<
+ KGSL_IOMMU_CTX_SHIFT) +
+ KGSL_IOMMU_CTX_TLBIASID);
+ *cmds++ = kgsl_mmu_get_hwpagetable_asid(&device->mmu);
+ cmds += adreno_add_read_cmds(device, cmds,
+ reg_map_desc[i]->gpuaddr +
+ (KGSL_IOMMU_CONTEXT_USER <<
+ KGSL_IOMMU_CTX_SHIFT) +
+ KGSL_IOMMU_CONTEXTIDR,
+ kgsl_mmu_get_hwpagetable_asid(&device->mmu),
+ device->mmu.setstate_memory.gpuaddr +
+ KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+ }
+ }
+
+ if (adreno_is_a225(adreno_dev))
+ cmds += adreno_add_change_mh_phys_limit_cmds(cmds,
+ reg_map_desc[num_iommu_units - 1]->gpuaddr - PAGE_SIZE,
+ device->mmu.setstate_memory.gpuaddr +
+ KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+ else
+ cmds += adreno_add_bank_change_cmds(cmds,
+ KGSL_IOMMU_CONTEXT_PRIV,
+ device->mmu.setstate_memory.gpuaddr +
+ KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+
+ sizedwords += (cmds - &link[0]);
+ if (sizedwords)
+ adreno_ringbuffer_issuecmds(device,
+ KGSL_CMD_FLAGS_PMODE, &link[0], sizedwords);
+done:
+ if (num_iommu_units)
+ kfree(reg_map_array);
+}
+
+static void adreno_gpummu_setstate(struct kgsl_device *device,
uint32_t flags)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
@@ -257,16 +391,6 @@
unsigned int mh_mmu_invalidate = 0x00000003; /*invalidate all and tc */
/*
- * A3XX doesn't support the fast path (the registers don't even exist)
- * so just bail out early
- */
-
- if (adreno_is_a3xx(adreno_dev)) {
- kgsl_mmu_device_setstate(&device->mmu, flags);
- return;
- }
-
- /*
* If possible, then set the state via the command stream to avoid
* a CPU idle. Otherwise, use the default setstate which uses register
* writes For CFF dump we must idle and use the registers so that it is
@@ -280,7 +404,7 @@
/* set page table base */
*cmds++ = cp_type0_packet(MH_MMU_PT_BASE, 1);
- *cmds++ = kgsl_pt_get_base_addr(
+ *cmds++ = kgsl_mmu_pt_get_base_addr(
device->mmu.hwpagetable);
sizedwords += 4;
}
@@ -352,6 +476,16 @@
}
}
+static void adreno_setstate(struct kgsl_device *device,
+ uint32_t flags)
+{
+ /* call the mmu specific handler */
+ if (KGSL_MMU_TYPE_GPU == kgsl_mmu_get_mmutype())
+ return adreno_gpummu_setstate(device, flags);
+ else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
+ return adreno_iommu_setstate(device, flags);
+}
+
static unsigned int
a3xx_getchipid(struct kgsl_device *device)
{
@@ -366,10 +500,22 @@
*/
if (cpu_is_apq8064()) {
+ unsigned int version = socinfo_get_version();
+
/* A320 */
majorid = 2;
minorid = 0;
- patchid = 0;
+
+ /*
+ * V1.1 has some GPU work arounds that we need to communicate
+ * up to user space via the patchid
+ */
+
+ if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
+ (SOCINFO_VERSION_MINOR(version) == 1))
+ patchid = 1;
+ else
+ patchid = 0;
} else if (cpu_is_msm8930()) {
/* A305 */
majorid = 0;
@@ -482,8 +628,6 @@
adreno_dev = ADRENO_DEVICE(device);
device->parentdev = &pdev->dev;
- init_completion(&device->recovery_gate);
-
status = adreno_ringbuffer_init(device);
if (status != 0)
goto error;
@@ -1135,16 +1279,12 @@
static unsigned int _get_context_id(struct kgsl_context *k_ctxt)
{
unsigned int context_id = KGSL_MEMSTORE_GLOBAL;
-
if (k_ctxt != NULL) {
struct adreno_context *a_ctxt = k_ctxt->devctxt;
- /*
- * if the context was not created with per context timestamp
- * support, we must use the global timestamp since issueibcmds
- * will be returning that one.
- */
- if (a_ctxt->flags & CTXT_FLAGS_PER_CONTEXT_TS)
- context_id = a_ctxt->id;
+ if (k_ctxt->id == KGSL_CONTEXT_INVALID || a_ctxt == NULL)
+ context_id = KGSL_CONTEXT_INVALID;
+ else if (a_ctxt->flags & CTXT_FLAGS_PER_CONTEXT_TS)
+ context_id = k_ctxt->id;
}
return context_id;
@@ -1155,11 +1295,22 @@
{
int status;
unsigned int ref_ts, enableflag;
- unsigned int context_id = _get_context_id(context);
+ unsigned int context_id;
+
+ mutex_lock(&device->mutex);
+ context_id = _get_context_id(context);
+ /*
+ * If the context ID is invalid, we are in a race with
+ * the context being destroyed by userspace so bail.
+ */
+ if (context_id == KGSL_CONTEXT_INVALID) {
+ KGSL_DRV_WARN(device, "context was detached");
+ status = -EINVAL;
+ goto unlock;
+ }
status = kgsl_check_timestamp(device, context, timestamp);
if (!status) {
- mutex_lock(&device->mutex);
kgsl_sharedmem_readl(&device->memstore, &enableflag,
KGSL_MEMSTORE_OFFSET(context_id, ts_cmp_enable));
mb();
@@ -1193,8 +1344,9 @@
adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
&cmds[0], 2);
}
- mutex_unlock(&device->mutex);
}
+unlock:
+ mutex_unlock(&device->mutex);
return status;
}
@@ -1253,6 +1405,15 @@
msecs_first = (msecs <= 100) ? ((msecs + 4) / 5) : 100;
msecs_part = (msecs - msecs_first + 3) / 4;
for (retries = 0; retries < 5; retries++) {
+ /*
+ * If the context ID is invalid, we are in a race with
+ * the context being destroyed by userspace so bail.
+ */
+ if (context_id == KGSL_CONTEXT_INVALID) {
+ KGSL_DRV_WARN(device, "context was detached");
+ status = -EINVAL;
+ goto done;
+ }
if (kgsl_check_timestamp(device, context, timestamp)) {
/* if the timestamp happens while we're not
* waiting, there's a chance that an interrupt
@@ -1314,6 +1475,14 @@
unsigned int timestamp = 0;
unsigned int context_id = _get_context_id(context);
+ /*
+ * If the context ID is invalid, we are in a race with
+ * the context being destroyed by userspace so bail.
+ */
+ if (context_id == KGSL_CONTEXT_INVALID) {
+ KGSL_DRV_WARN(device, "context was detached");
+ return timestamp;
+ }
switch (type) {
case KGSL_TIMESTAMP_QUEUED: {
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index a7ea20c..4ce56a4 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -16,6 +16,7 @@
#include "kgsl_device.h"
#include "adreno_drawctxt.h"
#include "adreno_ringbuffer.h"
+#include "kgsl_iommu.h"
#define DEVICE_3D_NAME "kgsl-3d"
#define DEVICE_3D0_NAME "kgsl-3d0"
@@ -223,4 +224,70 @@
return (ilog2(size) - 5) << 29;
}
+static inline int __adreno_add_idle_indirect_cmds(unsigned int *cmds,
+ unsigned int nop_gpuaddr)
+{
+ /* Adding an indirect buffer ensures that the prefetch stalls until
+ * the commands in indirect buffer have completed. We need to stall
+ * prefetch with a nop indirect buffer when updating pagetables
+ * because it provides stabler synchronization */
+ *cmds++ = CP_HDR_INDIRECT_BUFFER_PFD;
+ *cmds++ = nop_gpuaddr;
+ *cmds++ = 2;
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0x00000000;
+ return 5;
+}
+
+static inline int adreno_add_change_mh_phys_limit_cmds(unsigned int *cmds,
+ unsigned int new_phys_limit,
+ unsigned int nop_gpuaddr)
+{
+ unsigned int *start = cmds;
+
+ cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
+ *cmds++ = cp_type0_packet(MH_MMU_MPU_END, 1);
+ *cmds++ = new_phys_limit;
+ cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
+ return cmds - start;
+}
+
+static inline int adreno_add_bank_change_cmds(unsigned int *cmds,
+ int cur_ctx_bank,
+ unsigned int nop_gpuaddr)
+{
+ unsigned int *start = cmds;
+
+ cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
+ *cmds++ = cp_type0_packet(REG_CP_STATE_DEBUG_INDEX, 1);
+ *cmds++ = (cur_ctx_bank ? 0 : 0x20);
+ cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
+ return cmds - start;
+}
+
+/*
+ * adreno_read_cmds - Add pm4 packets to perform read
+ * @device - Pointer to device structure
+ * @cmds - Pointer to memory where read commands need to be added
+ * @addr - gpu address of the read
+ * @val - The GPU will wait until the data at address addr becomes
+ * equal to value
+ */
+static inline int adreno_add_read_cmds(struct kgsl_device *device,
+ unsigned int *cmds, unsigned int addr,
+ unsigned int val, unsigned int nop_gpuaddr)
+{
+ unsigned int *start = cmds;
+
+ *cmds++ = cp_type3_packet(CP_WAIT_REG_MEM, 5);
+ /* MEM SPACE = memory, FUNCTION = equals */
+ *cmds++ = 0x13;
+ *cmds++ = addr;
+ *cmds++ = val;
+ *cmds++ = 0xFFFFFFFF;
+ *cmds++ = 0xFFFFFFFF;
+ cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
+ return cmds - start;
+}
+
#endif /*__ADRENO_H */
diff --git a/drivers/gpu/msm/adreno_a2xx.c b/drivers/gpu/msm/adreno_a2xx.c
index eb936f8..8a132df 100644
--- a/drivers/gpu/msm/adreno_a2xx.c
+++ b/drivers/gpu/msm/adreno_a2xx.c
@@ -830,9 +830,6 @@
*cmds++ = 0x00010000;
if (adreno_is_a22x(adreno_dev)) {
- *cmds++ = cp_type3_packet(CP_SET_DRAW_INIT_FLAGS, 1);
- *cmds++ = 0;
-
*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
*cmds++ = CP_REG(REG_A220_RB_LRZ_VSC_CONTROL);
*cmds++ = 0x0000000;
@@ -1052,9 +1049,6 @@
*cmds++ = 0x00010000;
if (adreno_is_a22x(adreno_dev)) {
- *cmds++ = cp_type3_packet(CP_SET_DRAW_INIT_FLAGS, 1);
- *cmds++ = 0;
-
*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
*cmds++ = CP_REG(REG_A220_RB_LRZ_VSC_CONTROL);
*cmds++ = 0x0000000;
@@ -1848,8 +1842,13 @@
/* NQ and External Memory Swap */
GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
- /* Protected mode error checking */
- GSL_RB_WRITE(cmds, cmds_gpu, GSL_RB_PROTECTED_MODE_CONTROL);
+ /* Protected mode error checking
+ * If iommu is used then protection needs to be turned off
+ * to enable context bank switching */
+ if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
+ GSL_RB_WRITE(cmds, cmds_gpu, 0);
+ else
+ GSL_RB_WRITE(cmds, cmds_gpu, GSL_RB_PROTECTED_MODE_CONTROL);
/* Disable header dumping and Header dump address */
GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
/* Header dump size */
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index 5187eb1..8362b65 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -2335,6 +2335,39 @@
adreno_ringbuffer_submit(rb);
}
+#define VBIF_MAX_CLIENTS 6
+
+static void a3xx_vbif_callback(struct adreno_device *adreno_dev,
+ unsigned int status)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ int i;
+ char str[80], *ptr = str;
+ int slen = sizeof(str) - 1;
+
+ KGSL_DRV_INFO(device, "VBIF error | status=%X\n",
+ status);
+
+ for (i = 0; i < VBIF_MAX_CLIENTS; i++) {
+ if (status & (1 << i)) {
+ unsigned int err;
+ int ret;
+
+ adreno_regwrite(device, A3XX_VBIF_ERR_INFO, i);
+ adreno_regread(device, A3XX_VBIF_ERR_INFO, &err);
+
+ ret = snprintf(ptr, slen, "%d:%8.8X ", i, err);
+ ptr += ret;
+ slen -= ret;
+ }
+ }
+
+ KGSL_DRV_INFO(device, "%s\n", str);
+
+ /* Clear the errors */
+ adreno_regwrite(device, A3XX_VBIF_ERR_CLEAR, status);
+}
+
static void a3xx_err_callback(struct adreno_device *adreno_dev, int bit)
{
struct kgsl_device *device = &adreno_dev->dev;
@@ -2511,6 +2544,15 @@
if (status)
adreno_regwrite(&adreno_dev->dev, A3XX_RBBM_INT_CLEAR_CMD,
status);
+
+ /* Check for VBIF errors */
+ adreno_regread(&adreno_dev->dev, A3XX_VBIF_ERR_PENDING, &status);
+
+ if (status) {
+ a3xx_vbif_callback(adreno_dev, status);
+ ret = IRQ_HANDLED;
+ }
+
return ret;
}
@@ -2518,10 +2560,17 @@
{
struct kgsl_device *device = &adreno_dev->dev;
- if (state)
+ if (state) {
adreno_regwrite(device, A3XX_RBBM_INT_0_MASK, A3XX_INT_MASK);
- else
+
+ /* Enable VBIF interrupts - write 0 to enable them all */
+ adreno_regwrite(device, A3XX_VBIF_ERR_MASK, 0);
+ /* Clear outstanding VBIF errors */
+ adreno_regwrite(device, A3XX_VBIF_ERR_CLEAR, 0x3F);
+ } else {
adreno_regwrite(device, A3XX_RBBM_INT_0_MASK, 0);
+ adreno_regwrite(device, A3XX_VBIF_ERR_MASK, 0xFFFFFFFF);
+ }
}
static unsigned int a3xx_busy_cycles(struct adreno_device *adreno_dev)
diff --git a/drivers/gpu/msm/adreno_postmortem.c b/drivers/gpu/msm/adreno_postmortem.c
index ec38f75..7bb65ca 100644
--- a/drivers/gpu/msm/adreno_postmortem.c
+++ b/drivers/gpu/msm/adreno_postmortem.c
@@ -725,8 +725,8 @@
current_context));
context = idr_find(&device->context_idr, context_id);
if (context) {
- ts_processed = device->ftbl->readtimestamp(device, context,
- KGSL_TIMESTAMP_RETIRED);
+ ts_processed = kgsl_readtimestamp(device, context,
+ KGSL_TIMESTAMP_RETIRED);
KGSL_LOG_DUMP(device, "CTXT: %d TIMESTM RTRD: %08X\n",
context->id, ts_processed);
} else
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 8d900b0..3d46221 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -928,8 +928,8 @@
" context id is invalid.\n");
return -EINVAL;
}
- retired_timestamp = device->ftbl->readtimestamp(device, context,
- KGSL_TIMESTAMP_RETIRED);
+ retired_timestamp = kgsl_readtimestamp(device, context,
+ KGSL_TIMESTAMP_RETIRED);
KGSL_DRV_ERR(device, "GPU successfully executed till ts: %x\n",
retired_timestamp);
/*
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 2ce2f2b..1a34e80 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -25,6 +25,8 @@
#include <linux/ashmem.h>
#include <linux/major.h>
#include <linux/ion.h>
+#include <linux/io.h>
+#include <mach/socinfo.h>
#include "kgsl.h"
#include "kgsl_debugfs.h"
@@ -76,8 +78,7 @@
if (context == NULL)
return -EINVAL;
}
- cur_ts = device->ftbl->readtimestamp(device, context,
- KGSL_TIMESTAMP_RETIRED);
+ cur_ts = kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED);
/* Check to see if the requested timestamp has already fired */
@@ -133,8 +134,7 @@
struct kgsl_event *event, *event_tmp;
unsigned int id, cur;
- cur = device->ftbl->readtimestamp(device, context,
- KGSL_TIMESTAMP_RETIRED);
+ cur = kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED);
id = context->id;
list_for_each_entry_safe(event, event_tmp, &device->events, list) {
@@ -171,8 +171,8 @@
if (event->owner != owner)
continue;
- cur = device->ftbl->readtimestamp(device, event->context,
- KGSL_TIMESTAMP_RETIRED);
+ cur = kgsl_readtimestamp(device, event->context,
+ KGSL_TIMESTAMP_RETIRED);
id = event->context ? event->context->id : KGSL_MEMSTORE_GLOBAL;
/*
@@ -361,31 +361,58 @@
return NULL;
}
+ kref_init(&context->refcount);
context->id = id;
context->dev_priv = dev_priv;
return context;
}
-static void
-kgsl_destroy_context(struct kgsl_device_private *dev_priv,
- struct kgsl_context *context)
+/**
+ * kgsl_context_detach - Release the "master" context reference
+ * @context - The context that will be detached
+ *
+ * This is called when a context becomes unusable, because userspace
+ * has requested for it to be destroyed. The context itself may
+ * exist a bit longer until its reference count goes to zero.
+ * Other code referencing the context can detect that it has been
+ * detached because the context id will be set to KGSL_CONTEXT_INVALID.
+ */
+void
+kgsl_context_detach(struct kgsl_context *context)
{
int id;
-
+ struct kgsl_device *device;
if (context == NULL)
return;
-
- /* Fire a bug if the devctxt hasn't been freed */
- BUG_ON(context->devctxt);
-
+ device = context->dev_priv->device;
+ trace_kgsl_context_detach(device, context);
id = context->id;
- kfree(context);
- idr_remove(&dev_priv->device->context_idr, id);
+ if (device->ftbl->drawctxt_destroy)
+ device->ftbl->drawctxt_destroy(device, context);
+ /*device specific drawctxt_destroy MUST clean up devctxt */
+ BUG_ON(context->devctxt);
+ /*
+ * Cancel events after the device-specific context is
+ * destroyed, to avoid possibly freeing memory while
+ * it is still in use by the GPU.
+ */
+ kgsl_cancel_events_ctxt(device, context);
+ idr_remove(&device->context_idr, id);
+ context->id = KGSL_CONTEXT_INVALID;
+ kgsl_context_put(context);
}
-static void kgsl_timestamp_expired(struct work_struct *work)
+void
+kgsl_context_destroy(struct kref *kref)
+{
+ struct kgsl_context *context = container_of(kref, struct kgsl_context,
+ refcount);
+ kfree(context);
+}
+
+void kgsl_timestamp_expired(struct work_struct *work)
{
struct kgsl_device *device = container_of(work, struct kgsl_device,
ts_expired_ws);
@@ -397,8 +424,8 @@
/* Process expired events */
list_for_each_entry_safe(event, event_tmp, &device->events, list) {
- ts_processed = device->ftbl->readtimestamp(device,
- event->context, KGSL_TIMESTAMP_RETIRED);
+ ts_processed = kgsl_readtimestamp(device, event->context,
+ KGSL_TIMESTAMP_RETIRED);
if (timestamp_cmp(ts_processed, event->timestamp) < 0)
continue;
@@ -415,6 +442,7 @@
mutex_unlock(&device->mutex);
}
+EXPORT_SYMBOL(kgsl_timestamp_expired);
static void kgsl_check_idle_locked(struct kgsl_device *device)
{
@@ -492,8 +520,8 @@
{
unsigned int ts_processed;
- ts_processed = device->ftbl->readtimestamp(device, context,
- KGSL_TIMESTAMP_RETIRED);
+ ts_processed = kgsl_readtimestamp(device, context,
+ KGSL_TIMESTAMP_RETIRED);
return (timestamp_cmp(ts_processed, timestamp) >= 0);
}
@@ -755,23 +783,23 @@
mutex_lock(&device->mutex);
kgsl_check_suspended(device);
- /* clean up any to-be-freed entries that belong to this
- * process and this device
- */
- kgsl_cancel_events(device, dev_priv);
-
while (1) {
context = idr_get_next(&device->context_idr, &next);
if (context == NULL)
break;
- if (context->dev_priv == dev_priv) {
- device->ftbl->drawctxt_destroy(device, context);
- kgsl_destroy_context(dev_priv, context);
- }
+ if (context->dev_priv == dev_priv)
+ kgsl_context_detach(context);
next = next + 1;
}
+ /*
+ * Clean up any to-be-freed entries that belong to this
+ * process and this device. This is done after the context
+ * are destroyed to avoid possibly freeing memory while
+ * it is still in use by the GPU.
+ */
+ kgsl_cancel_events(device, dev_priv);
device->open_count--;
if (device->open_count == 0) {
@@ -992,19 +1020,25 @@
unsigned int timeout)
{
int result = 0;
+ struct kgsl_device *device = dev_priv->device;
+ unsigned int context_id = context ? context->id : KGSL_MEMSTORE_GLOBAL;
/* Set the active count so that suspend doesn't do the wrong thing */
- dev_priv->device->active_cnt++;
+ device->active_cnt++;
- trace_kgsl_waittimestamp_entry(dev_priv->device,
- context ? context->id : KGSL_MEMSTORE_GLOBAL,
- timestamp, timeout);
+ trace_kgsl_waittimestamp_entry(device, context_id,
+ kgsl_readtimestamp(device, context,
+ KGSL_TIMESTAMP_RETIRED),
+ timestamp, timeout);
- result = dev_priv->device->ftbl->waittimestamp(dev_priv->device,
+ result = device->ftbl->waittimestamp(dev_priv->device,
context, timestamp, timeout);
- trace_kgsl_waittimestamp_exit(dev_priv->device, result);
+ trace_kgsl_waittimestamp_exit(device,
+ kgsl_readtimestamp(device, context,
+ KGSL_TIMESTAMP_RETIRED),
+ result);
/* Fire off any pending suspend operations that are in flight */
@@ -1021,7 +1055,7 @@
{
struct kgsl_device_waittimestamp *param = data;
- return _device_waittimestamp(dev_priv, KGSL_MEMSTORE_GLOBAL,
+ return _device_waittimestamp(dev_priv, NULL,
param->timestamp, param->timeout);
}
@@ -1031,6 +1065,7 @@
{
struct kgsl_device_waittimestamp_ctxtid *param = data;
struct kgsl_context *context;
+ int result;
context = kgsl_find_context(dev_priv, param->context_id);
if (context == NULL) {
@@ -1038,9 +1073,16 @@
param->context_id);
return -EINVAL;
}
-
- return _device_waittimestamp(dev_priv, context,
+ /*
+ * A reference count is needed here, because waittimestamp may
+ * block with the device mutex unlocked and userspace could
+ * request for the context to be destroyed during that time.
+ */
+ kgsl_context_get(context);
+ result = _device_waittimestamp(dev_priv, context,
param->timestamp, param->timeout);
+ kgsl_context_put(context);
+ return result;
}
static long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
@@ -1114,7 +1156,7 @@
¶m->timestamp,
param->flags);
- trace_kgsl_issueibcmds(dev_priv->device, param, result);
+ trace_kgsl_issueibcmds(dev_priv->device, param, ibdesc, result);
free_ibdesc:
kfree(ibdesc);
@@ -1127,8 +1169,7 @@
struct kgsl_context *context, unsigned int type,
unsigned int *timestamp)
{
- *timestamp = dev_priv->device->ftbl->readtimestamp(dev_priv->device,
- context, type);
+ *timestamp = kgsl_readtimestamp(dev_priv->device, context, type);
trace_kgsl_readtimestamp(dev_priv->device,
context ? context->id : KGSL_MEMSTORE_GLOBAL,
@@ -1172,7 +1213,7 @@
spin_lock(&entry->priv->mem_lock);
rb_erase(&entry->node, &entry->priv->mem_rb);
spin_unlock(&entry->priv->mem_lock);
- trace_kgsl_mem_timestamp_free(entry, id, timestamp);
+ trace_kgsl_mem_timestamp_free(device, entry, id, timestamp, 0);
kgsl_mem_entry_detach_process(entry);
}
@@ -1183,27 +1224,25 @@
int result = 0;
struct kgsl_mem_entry *entry = NULL;
struct kgsl_device *device = dev_priv->device;
- unsigned int cur;
unsigned int context_id = context ? context->id : KGSL_MEMSTORE_GLOBAL;
spin_lock(&dev_priv->process_priv->mem_lock);
entry = kgsl_sharedmem_find(dev_priv->process_priv, gpuaddr);
spin_unlock(&dev_priv->process_priv->mem_lock);
- if (entry) {
- cur = device->ftbl->readtimestamp(device, context,
- KGSL_TIMESTAMP_RETIRED);
-
- trace_kgsl_mem_timestamp_queue(entry, context_id, cur);
- result = kgsl_add_event(dev_priv->device, context_id,
- timestamp, kgsl_freemem_event_cb,
- entry, dev_priv);
- } else {
+ if (!entry) {
KGSL_DRV_ERR(dev_priv->device,
- "invalid gpuaddr %08x\n", gpuaddr);
+ "invalid gpuaddr %08x\n", gpuaddr);
result = -EINVAL;
+ goto done;
}
-
+ trace_kgsl_mem_timestamp_queue(device, entry, context_id,
+ kgsl_readtimestamp(device, context,
+ KGSL_TIMESTAMP_RETIRED),
+ timestamp);
+ result = kgsl_add_event(dev_priv->device, context_id, timestamp,
+ kgsl_freemem_event_cb, entry, dev_priv);
+done:
return result;
}
@@ -1250,15 +1289,18 @@
goto done;
}
- if (dev_priv->device->ftbl->drawctxt_create)
+ if (dev_priv->device->ftbl->drawctxt_create) {
result = dev_priv->device->ftbl->drawctxt_create(
dev_priv->device, dev_priv->process_priv->pagetable,
context, param->flags);
-
+ if (result)
+ goto done;
+ }
+ trace_kgsl_context_create(dev_priv->device, context, param->flags);
param->drawctxt_id = context->id;
done:
if (result && context)
- kgsl_destroy_context(dev_priv, context);
+ kgsl_context_detach(context);
return result;
}
@@ -1277,14 +1319,7 @@
goto done;
}
- kgsl_cancel_events_ctxt(dev_priv->device, context);
-
- if (dev_priv->device->ftbl->drawctxt_destroy)
- dev_priv->device->ftbl->drawctxt_destroy(dev_priv->device,
- context);
-
- kgsl_destroy_context(dev_priv, context);
-
+ kgsl_context_detach(context);
done:
return result;
}
@@ -2334,7 +2369,7 @@
};
EXPORT_SYMBOL(kgsl_driver);
-void kgsl_unregister_device(struct kgsl_device *device)
+static void _unregister_device(struct kgsl_device *device)
{
int minor;
@@ -2343,43 +2378,15 @@
if (device == kgsl_driver.devp[minor])
break;
}
-
- mutex_unlock(&kgsl_driver.devlock);
-
- if (minor == KGSL_DEVICE_MAX)
- return;
-
- kgsl_device_snapshot_close(device);
-
- kgsl_cffdump_close(device->id);
- kgsl_pwrctrl_uninit_sysfs(device);
-
- wake_lock_destroy(&device->idle_wakelock);
- pm_qos_remove_request(&device->pm_qos_req_dma);
-
- idr_destroy(&device->context_idr);
-
- if (device->memstore.hostptr)
- kgsl_sharedmem_free(&device->memstore);
-
- kgsl_mmu_close(device);
-
- if (device->work_queue) {
- destroy_workqueue(device->work_queue);
- device->work_queue = NULL;
+ if (minor != KGSL_DEVICE_MAX) {
+ device_destroy(kgsl_driver.class,
+ MKDEV(MAJOR(kgsl_driver.major), minor));
+ kgsl_driver.devp[minor] = NULL;
}
-
- device_destroy(kgsl_driver.class,
- MKDEV(MAJOR(kgsl_driver.major), minor));
-
- mutex_lock(&kgsl_driver.devlock);
- kgsl_driver.devp[minor] = NULL;
mutex_unlock(&kgsl_driver.devlock);
}
-EXPORT_SYMBOL(kgsl_unregister_device);
-int
-kgsl_register_device(struct kgsl_device *device)
+static int _register_device(struct kgsl_device *device)
{
int minor, ret;
dev_t dev;
@@ -2393,7 +2400,6 @@
break;
}
}
-
mutex_unlock(&kgsl_driver.devlock);
if (minor == KGSL_DEVICE_MAX) {
@@ -2409,75 +2415,17 @@
device->name);
if (IS_ERR(device->dev)) {
+ mutex_lock(&kgsl_driver.devlock);
+ kgsl_driver.devp[minor] = NULL;
+ mutex_unlock(&kgsl_driver.devlock);
ret = PTR_ERR(device->dev);
KGSL_CORE_ERR("device_create(%s): %d\n", device->name, ret);
- goto err_devlist;
+ return ret;
}
dev_set_drvdata(device->parentdev, device);
-
- /* Generic device initialization */
- init_waitqueue_head(&device->wait_queue);
-
- kgsl_cffdump_open(device->id);
-
- init_completion(&device->hwaccess_gate);
- init_completion(&device->suspend_gate);
-
- ATOMIC_INIT_NOTIFIER_HEAD(&device->ts_notifier_list);
-
- setup_timer(&device->idle_timer, kgsl_timer, (unsigned long) device);
- ret = kgsl_create_device_workqueue(device);
- if (ret)
- goto err_devlist;
-
- INIT_WORK(&device->idle_check_ws, kgsl_idle_check);
- INIT_WORK(&device->ts_expired_ws, kgsl_timestamp_expired);
-
- INIT_LIST_HEAD(&device->events);
-
- device->last_expired_ctxt_id = KGSL_CONTEXT_INVALID;
-
- ret = kgsl_mmu_init(device);
- if (ret != 0)
- goto err_dest_work_q;
-
- ret = kgsl_allocate_contiguous(&device->memstore, KGSL_MEMSTORE_SIZE);
- if (ret != 0)
- goto err_close_mmu;
-
- wake_lock_init(&device->idle_wakelock, WAKE_LOCK_IDLE, device->name);
- pm_qos_add_request(&device->pm_qos_req_dma, PM_QOS_CPU_DMA_LATENCY,
- PM_QOS_DEFAULT_VALUE);
-
- idr_init(&device->context_idr);
-
- /* Initalize the snapshot engine */
- kgsl_device_snapshot_init(device);
-
- /* sysfs and debugfs initalization - failure here is non fatal */
-
- /* Initialize logging */
- kgsl_device_debugfs_init(device);
-
- /* Initialize common sysfs entries */
- kgsl_pwrctrl_init_sysfs(device);
-
return 0;
-
-err_close_mmu:
- kgsl_mmu_close(device);
-err_dest_work_q:
- destroy_workqueue(device->work_queue);
- device->work_queue = NULL;
-err_devlist:
- mutex_lock(&kgsl_driver.devlock);
- kgsl_driver.devp[minor] = NULL;
- mutex_unlock(&kgsl_driver.devlock);
-
- return ret;
}
-EXPORT_SYMBOL(kgsl_register_device);
int kgsl_device_platform_probe(struct kgsl_device *device)
{
@@ -2487,7 +2435,12 @@
struct platform_device *pdev =
container_of(device->parentdev, struct platform_device, dev);
- pm_runtime_enable(device->parentdev);
+ status = _register_device(device);
+ if (status)
+ return status;
+
+ /* Initialize logging first, so that failures below actually print. */
+ kgsl_device_debugfs_init(device);
status = kgsl_pwrctrl_init(device);
if (status)
@@ -2512,29 +2465,40 @@
device->reg_phys = res->start;
device->reg_len = resource_size(res);
- if (!request_mem_region(device->reg_phys, device->reg_len,
- device->name)) {
+ if (!devm_request_mem_region(device->dev, device->reg_phys,
+ device->reg_len, device->name)) {
KGSL_DRV_ERR(device, "request_mem_region failed\n");
status = -ENODEV;
goto error_pwrctrl_close;
}
- device->reg_virt = ioremap(device->reg_phys, device->reg_len);
+ device->reg_virt = devm_ioremap(device->dev, device->reg_phys,
+ device->reg_len);
if (device->reg_virt == NULL) {
KGSL_DRV_ERR(device, "ioremap failed\n");
status = -ENODEV;
- goto error_release_mem;
+ goto error_pwrctrl_close;
+ }
+ /*acquire interrupt */
+ device->pwrctrl.interrupt_num =
+ platform_get_irq_byname(pdev, device->pwrctrl.irq_name);
+
+ if (device->pwrctrl.interrupt_num <= 0) {
+ KGSL_DRV_ERR(device, "platform_get_irq_byname failed: %d\n",
+ device->pwrctrl.interrupt_num);
+ status = -EINVAL;
+ goto error_pwrctrl_close;
}
- status = request_irq(device->pwrctrl.interrupt_num, kgsl_irq_handler,
- IRQF_TRIGGER_HIGH, device->name, device);
+ status = devm_request_irq(device->dev, device->pwrctrl.interrupt_num,
+ kgsl_irq_handler, IRQF_TRIGGER_HIGH,
+ device->name, device);
if (status) {
KGSL_DRV_ERR(device, "request_irq(%d) failed: %d\n",
device->pwrctrl.interrupt_num, status);
- goto error_iounmap;
+ goto error_pwrctrl_close;
}
- device->pwrctrl.have_irq = 1;
disable_irq(device->pwrctrl.interrupt_num);
KGSL_DRV_INFO(device,
@@ -2544,38 +2508,78 @@
result = kgsl_drm_init(pdev);
if (result)
- goto error_iounmap;
+ goto error_pwrctrl_close;
- status = kgsl_register_device(device);
- if (!status)
- return status;
+ kgsl_cffdump_open(device->id);
- free_irq(device->pwrctrl.interrupt_num, NULL);
- device->pwrctrl.have_irq = 0;
-error_iounmap:
- iounmap(device->reg_virt);
- device->reg_virt = NULL;
-error_release_mem:
- release_mem_region(device->reg_phys, device->reg_len);
+ setup_timer(&device->idle_timer, kgsl_timer, (unsigned long) device);
+ status = kgsl_create_device_workqueue(device);
+ if (status)
+ goto error_pwrctrl_close;
+
+ status = kgsl_mmu_init(device);
+ if (status != 0) {
+ KGSL_DRV_ERR(device, "kgsl_mmu_init failed %d\n", status);
+ goto error_dest_work_q;
+ }
+
+ status = kgsl_allocate_contiguous(&device->memstore,
+ sizeof(struct kgsl_devmemstore));
+
+ if (status != 0) {
+ KGSL_DRV_ERR(device, "kgsl_allocate_contiguous failed %d\n",
+ status);
+ goto error_close_mmu;
+ }
+
+ wake_lock_init(&device->idle_wakelock, WAKE_LOCK_IDLE, device->name);
+ pm_qos_add_request(&device->pm_qos_req_dma, PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
+
+ /* Initalize the snapshot engine */
+ kgsl_device_snapshot_init(device);
+
+ /* Initialize common sysfs entries */
+ kgsl_pwrctrl_init_sysfs(device);
+
+ return 0;
+
+error_close_mmu:
+ kgsl_mmu_close(device);
+error_dest_work_q:
+ destroy_workqueue(device->work_queue);
+ device->work_queue = NULL;
error_pwrctrl_close:
kgsl_pwrctrl_close(device);
error:
+ _unregister_device(device);
return status;
}
EXPORT_SYMBOL(kgsl_device_platform_probe);
void kgsl_device_platform_remove(struct kgsl_device *device)
{
- kgsl_unregister_device(device);
+ kgsl_device_snapshot_close(device);
- if (device->reg_virt != NULL) {
- iounmap(device->reg_virt);
- device->reg_virt = NULL;
- release_mem_region(device->reg_phys, device->reg_len);
+ kgsl_cffdump_close(device->id);
+ kgsl_pwrctrl_uninit_sysfs(device);
+
+ wake_lock_destroy(&device->idle_wakelock);
+ pm_qos_remove_request(&device->pm_qos_req_dma);
+
+ idr_destroy(&device->context_idr);
+
+ kgsl_sharedmem_free(&device->memstore);
+
+ kgsl_mmu_close(device);
+
+ if (device->work_queue) {
+ destroy_workqueue(device->work_queue);
+ device->work_queue = NULL;
}
kgsl_pwrctrl_close(device);
- pm_runtime_disable(device->parentdev);
+ _unregister_device(device);
}
EXPORT_SYMBOL(kgsl_device_platform_remove);
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index da3e4b2..b67f460 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -122,6 +122,8 @@
int (*map_kernel_mem)(struct kgsl_memdesc *);
};
+#define KGSL_MEMDESC_GUARD_PAGE BIT(0)
+
/* shared memory allocation */
struct kgsl_memdesc {
struct kgsl_pagetable *pagetable;
@@ -133,6 +135,7 @@
struct scatterlist *sg;
unsigned int sglen;
struct kgsl_memdesc_ops *ops;
+ int flags;
};
/* List of different memory entry types */
@@ -155,7 +158,6 @@
int flags;
void *priv_data;
struct rb_node node;
- uint32_t free_timestamp;
unsigned int context_id;
/* back pointer to private structure under whose context this
* allocation is made */
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index b42e606..5b2fd31 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -199,7 +199,28 @@
s64 on_time;
};
+void kgsl_timestamp_expired(struct work_struct *work);
+
+#define KGSL_DEVICE_COMMON_INIT(_dev) \
+ .hwaccess_gate = COMPLETION_INITIALIZER((_dev).hwaccess_gate),\
+ .suspend_gate = COMPLETION_INITIALIZER((_dev).suspend_gate),\
+ .recovery_gate = COMPLETION_INITIALIZER((_dev).recovery_gate),\
+ .ts_notifier_list = ATOMIC_NOTIFIER_INIT((_dev).ts_notifier_list),\
+ .idle_check_ws = __WORK_INITIALIZER((_dev).idle_check_ws,\
+ kgsl_idle_check),\
+ .ts_expired_ws = __WORK_INITIALIZER((_dev).ts_expired_ws,\
+ kgsl_timestamp_expired),\
+ .context_idr = IDR_INIT((_dev).context_idr),\
+ .events = LIST_HEAD_INIT((_dev).events),\
+ .wait_queue = __WAIT_QUEUE_HEAD_INITIALIZER((_dev).wait_queue),\
+ .mutex = __MUTEX_INITIALIZER((_dev).mutex),\
+ .state = KGSL_STATE_INIT,\
+ .ver_major = DRIVER_VERSION_MAJOR,\
+ .ver_minor = DRIVER_VERSION_MINOR,\
+ .last_expired_ctxt_id = KGSL_CONTEXT_INVALID
+
struct kgsl_context {
+ struct kref refcount;
uint32_t id;
/* Pointer to the owning device instance */
@@ -273,6 +294,13 @@
return device->ftbl->gpuid(device);
}
+static inline unsigned int kgsl_readtimestamp(struct kgsl_device *device,
+ struct kgsl_context *context,
+ enum kgsl_timestamp_type type)
+{
+ return device->ftbl->readtimestamp(device, context, type);
+}
+
static inline int kgsl_create_device_sysfs_files(struct device *root,
const struct device_attribute **list)
{
@@ -360,4 +388,32 @@
return pdev->dev.platform_data;
}
+/**
+ * kgsl_context_get - Get context reference count
+ * @context
+ *
+ * Asynchronous code that holds a pointer to a context
+ * must hold a reference count on it. The kgsl device
+ * mutex must be held while the context reference count
+ * is changed.
+ */
+static inline void
+kgsl_context_get(struct kgsl_context *context)
+{
+ kref_get(&context->refcount);
+}
+
+void kgsl_context_destroy(struct kref *kref);
+
+/**
+ * kgsl_context_put - Release context reference count
+ * @context
+ *
+ */
+static inline void
+kgsl_context_put(struct kgsl_context *context)
+{
+ kref_put(&context->refcount, kgsl_context_destroy);
+}
+
#endif /* __KGSL_DEVICE_H */
diff --git a/drivers/gpu/msm/kgsl_gpummu.c b/drivers/gpu/msm/kgsl_gpummu.c
index 880fde1..429d035 100644
--- a/drivers/gpu/msm/kgsl_gpummu.c
+++ b/drivers/gpu/msm/kgsl_gpummu.c
@@ -21,6 +21,7 @@
#include "kgsl_mmu.h"
#include "kgsl_device.h"
#include "kgsl_sharedmem.h"
+#include "kgsl_trace.h"
#define KGSL_PAGETABLE_SIZE \
ALIGN(KGSL_PAGETABLE_ENTRIES(CONFIG_MSM_KGSL_PAGE_TABLE_SIZE) * \
@@ -410,6 +411,9 @@
reg & ~(PAGE_SIZE - 1),
kgsl_mmu_get_ptname_from_ptbase(ptbase),
reg & 0x02 ? "WRITE" : "READ", (reg >> 4) & 0xF);
+ trace_kgsl_mmu_pagefault(mmu->device, reg & ~(PAGE_SIZE - 1),
+ kgsl_mmu_get_ptname_from_ptbase(ptbase),
+ reg & 0x02 ? "WRITE" : "READ");
}
static void *kgsl_gpummu_create_pagetable(void)
@@ -591,7 +595,7 @@
{
unsigned int numpages;
unsigned int pte, ptefirst, ptelast, superpte;
- unsigned int range = memdesc->size;
+ unsigned int range = kgsl_sg_size(memdesc->sg, memdesc->sglen);
struct kgsl_gpummu_pt *gpummu_pt = mmu_specific_pt;
/* All GPU addresses as assigned are page aligned, but some
@@ -708,6 +712,13 @@
return ptbase;
}
+static unsigned int
+kgsl_gpummu_pt_get_base_addr(struct kgsl_pagetable *pt)
+{
+ struct kgsl_gpummu_pt *gpummu_pt = pt->priv;
+ return gpummu_pt->base.gpuaddr;
+}
+
struct kgsl_mmu_ops gpummu_ops = {
.mmu_init = kgsl_gpummu_init,
.mmu_close = kgsl_gpummu_close,
@@ -719,6 +730,9 @@
.mmu_get_current_ptbase = kgsl_gpummu_get_current_ptbase,
.mmu_enable_clk = NULL,
.mmu_disable_clk = NULL,
+ .mmu_get_hwpagetable_asid = NULL,
+ .mmu_get_pt_lsb = NULL,
+ .mmu_get_reg_map_desc = NULL,
};
struct kgsl_mmu_pt_ops gpummu_pt_ops = {
@@ -727,4 +741,5 @@
.mmu_create_pagetable = kgsl_gpummu_create_pagetable,
.mmu_destroy_pagetable = kgsl_gpummu_destroy_pagetable,
.mmu_pt_equal = kgsl_gpummu_pt_equal,
+ .mmu_pt_get_base_addr = kgsl_gpummu_pt_get_base_addr,
};
diff --git a/drivers/gpu/msm/kgsl_gpummu.h b/drivers/gpu/msm/kgsl_gpummu.h
index c61a8b2..caa5df1 100644
--- a/drivers/gpu/msm/kgsl_gpummu.h
+++ b/drivers/gpu/msm/kgsl_gpummu.h
@@ -75,9 +75,4 @@
void *kgsl_gpummu_ptpool_init(int entries);
void kgsl_gpummu_ptpool_destroy(void *ptpool);
-static inline unsigned int kgsl_pt_get_base_addr(struct kgsl_pagetable *pt)
-{
- struct kgsl_gpummu_pt *gpummu_pt = pt->priv;
- return gpummu_pt->base.gpuaddr;
-}
#endif /* __KGSL_GPUMMU_H */
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 4f27e6c..2050827 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -23,6 +23,81 @@
#include "kgsl_mmu.h"
#include "kgsl_sharedmem.h"
#include "kgsl_iommu.h"
+#include "adreno_pm4types.h"
+#include "adreno.h"
+#include "kgsl_trace.h"
+
+static struct kgsl_iommu_unit *get_iommu_unit(struct device *dev)
+{
+ int i, j, k;
+
+ for (i = 0; i < KGSL_DEVICE_MAX; i++) {
+ struct kgsl_mmu *mmu;
+ struct kgsl_iommu *iommu;
+
+ if (kgsl_driver.devp[i] == NULL)
+ continue;
+
+ mmu = kgsl_get_mmu(kgsl_driver.devp[i]);
+ if (mmu == NULL || mmu->priv == NULL)
+ continue;
+
+ iommu = mmu->priv;
+
+ for (j = 0; j < iommu->unit_count; j++) {
+ struct kgsl_iommu_unit *iommu_unit =
+ &iommu->iommu_units[j];
+ for (k = 0; k < iommu_unit->dev_count; k++) {
+ if (iommu_unit->dev[k].dev == dev)
+ return iommu_unit;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static struct kgsl_iommu_device *get_iommu_device(struct kgsl_iommu_unit *unit,
+ struct device *dev)
+{
+ int k;
+
+ for (k = 0; unit && k < unit->dev_count; k++) {
+ if (unit->dev[k].dev == dev)
+ return &(unit->dev[k]);
+ }
+
+ return NULL;
+}
+
+static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
+ struct device *dev, unsigned long addr, int flags)
+{
+ struct kgsl_iommu_unit *iommu_unit = get_iommu_unit(dev);
+ struct kgsl_iommu_device *iommu_dev = get_iommu_device(iommu_unit, dev);
+ unsigned int ptbase, fsr;
+
+ if (!iommu_dev) {
+ KGSL_CORE_ERR("Invalid IOMMU device %p\n", dev);
+ return -ENOSYS;
+ }
+
+ ptbase = iommu_get_pt_base_addr(domain);
+
+ fsr = KGSL_IOMMU_GET_IOMMU_REG(iommu_unit->reg_map.hostptr,
+ iommu_dev->ctx_id, FSR);
+
+ KGSL_MEM_CRIT(iommu_dev->kgsldev,
+ "GPU PAGE FAULT: addr = %lX pid = %d\n",
+ addr, kgsl_mmu_get_ptname_from_ptbase(ptbase));
+ KGSL_MEM_CRIT(iommu_dev->kgsldev, "context = %d FSR = %X\n",
+ iommu_dev->ctx_id, fsr);
+
+ trace_kgsl_mmu_pagefault(iommu_dev->kgsldev, addr,
+ kgsl_mmu_get_ptname_from_ptbase(ptbase), 0);
+
+ return 0;
+}
/*
* kgsl_iommu_disable_clk - Disable iommu clocks
@@ -96,27 +171,81 @@
return ret;
}
+/*
+ * kgsl_iommu_pt_equal - Check if pagetables are equal
+ * @pt - Pointer to pagetable
+ * @pt_base - Address of a pagetable that the IOMMU register is
+ * programmed with
+ *
+ * Checks whether the pt_base is equal to the base address of
+ * the pagetable which is contained in the pt structure
+ * Return - Non-zero if the pagetable addresses are equal else 0
+ */
static int kgsl_iommu_pt_equal(struct kgsl_pagetable *pt,
unsigned int pt_base)
{
- struct iommu_domain *domain = pt ? pt->priv : NULL;
- return domain && pt_base && ((unsigned int)domain == pt_base);
+ struct kgsl_iommu_pt *iommu_pt = pt ? pt->priv : NULL;
+ unsigned int domain_ptbase = iommu_pt ?
+ iommu_get_pt_base_addr(iommu_pt->domain) : 0;
+ /* Only compare the valid address bits of the pt_base */
+ domain_ptbase &= (KGSL_IOMMU_TTBR0_PA_MASK <<
+ KGSL_IOMMU_TTBR0_PA_SHIFT);
+ pt_base &= (KGSL_IOMMU_TTBR0_PA_MASK <<
+ KGSL_IOMMU_TTBR0_PA_SHIFT);
+ return domain_ptbase && pt_base &&
+ (domain_ptbase == pt_base);
}
+/*
+ * kgsl_iommu_destroy_pagetable - Free up reaources help by a pagetable
+ * @mmu_specific_pt - Pointer to pagetable which is to be freed
+ *
+ * Return - void
+ */
static void kgsl_iommu_destroy_pagetable(void *mmu_specific_pt)
{
- struct iommu_domain *domain = mmu_specific_pt;
- if (domain)
- iommu_domain_free(domain);
+ struct kgsl_iommu_pt *iommu_pt = mmu_specific_pt;
+ if (iommu_pt->domain)
+ iommu_domain_free(iommu_pt->domain);
+ if (iommu_pt->iommu) {
+ if ((KGSL_IOMMU_ASID_REUSE == iommu_pt->asid) &&
+ iommu_pt->iommu->asid_reuse)
+ iommu_pt->iommu->asid_reuse--;
+ if (!iommu_pt->iommu->asid_reuse ||
+ (KGSL_IOMMU_ASID_REUSE != iommu_pt->asid))
+ clear_bit(iommu_pt->asid, iommu_pt->iommu->asids);
+ }
+ kfree(iommu_pt);
}
+/*
+ * kgsl_iommu_create_pagetable - Create a IOMMU pagetable
+ *
+ * Allocate memory to hold a pagetable and allocate the IOMMU
+ * domain which is the actual IOMMU pagetable
+ * Return - void
+ */
void *kgsl_iommu_create_pagetable(void)
{
- struct iommu_domain *domain = iommu_domain_alloc(0);
- if (!domain)
- KGSL_CORE_ERR("Failed to create iommu domain\n");
+ struct kgsl_iommu_pt *iommu_pt;
- return domain;
+ iommu_pt = kzalloc(sizeof(struct kgsl_iommu_pt), GFP_KERNEL);
+ if (!iommu_pt) {
+ KGSL_CORE_ERR("kzalloc(%d) failed\n",
+ sizeof(struct kgsl_iommu_pt));
+ return NULL;
+ }
+ iommu_pt->domain = iommu_domain_alloc(MSM_IOMMU_DOMAIN_PT_CACHEABLE);
+ if (!iommu_pt->domain) {
+ KGSL_CORE_ERR("Failed to create iommu domain\n");
+ kfree(iommu_pt);
+ return NULL;
+ } else {
+ iommu_set_fault_handler(iommu_pt->domain,
+ kgsl_iommu_fault_handler);
+ }
+
+ return iommu_pt;
}
/*
@@ -133,25 +262,25 @@
*/
static void kgsl_detach_pagetable_iommu_domain(struct kgsl_mmu *mmu)
{
- struct iommu_domain *domain;
+ struct kgsl_iommu_pt *iommu_pt;
struct kgsl_iommu *iommu = mmu->priv;
int i, j;
BUG_ON(mmu->hwpagetable == NULL);
BUG_ON(mmu->hwpagetable->priv == NULL);
- domain = mmu->hwpagetable->priv;
+ iommu_pt = mmu->hwpagetable->priv;
for (i = 0; i < iommu->unit_count; i++) {
struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
for (j = 0; j < iommu_unit->dev_count; j++) {
if (iommu_unit->dev[j].attached) {
- iommu_detach_device(domain,
+ iommu_detach_device(iommu_pt->domain,
iommu_unit->dev[j].dev);
iommu_unit->dev[j].attached = false;
KGSL_MEM_INFO(mmu->device, "iommu %p detached "
"from user dev of MMU: %p\n",
- domain, mmu);
+ iommu_pt->domain, mmu);
}
}
}
@@ -172,14 +301,14 @@
*/
static int kgsl_attach_pagetable_iommu_domain(struct kgsl_mmu *mmu)
{
- struct iommu_domain *domain;
+ struct kgsl_iommu_pt *iommu_pt;
struct kgsl_iommu *iommu = mmu->priv;
int i, j, ret = 0;
BUG_ON(mmu->hwpagetable == NULL);
BUG_ON(mmu->hwpagetable->priv == NULL);
- domain = mmu->hwpagetable->priv;
+ iommu_pt = mmu->hwpagetable->priv;
/*
* Loop through all the iommu devcies under all iommu units and
@@ -189,7 +318,7 @@
struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
for (j = 0; j < iommu_unit->dev_count; j++) {
if (!iommu_unit->dev[j].attached) {
- ret = iommu_attach_device(domain,
+ ret = iommu_attach_device(iommu_pt->domain,
iommu_unit->dev[j].dev);
if (ret) {
KGSL_MEM_ERR(mmu->device,
@@ -200,8 +329,8 @@
iommu_unit->dev[j].attached = true;
KGSL_MEM_INFO(mmu->device,
"iommu pt %p attached to dev %p, ctx_id %d\n",
- domain, iommu_unit->dev[j].dev,
- iommu_unit->dev[j].ctx_id);
+ iommu_pt->domain, iommu_unit->dev[j].dev,
+ iommu_unit->dev[j].ctx_id);
}
}
}
@@ -252,6 +381,8 @@
}
iommu_unit->dev[iommu_unit->dev_count].ctx_id =
data->iommu_ctxs[i].ctx_id;
+ iommu_unit->dev[iommu_unit->dev_count].kgsldev = mmu->device;
+
KGSL_DRV_INFO(mmu->device,
"Obtained dev handle %p for iommu context %s\n",
iommu_unit->dev[iommu_unit->dev_count].dev,
@@ -352,19 +483,66 @@
return ret;
}
+/*
+ * kgsl_iommu_pt_get_base_addr - Get the address of the pagetable that the
+ * IOMMU ttbr0 register is programmed with
+ * @pt - kgsl pagetable pointer that contains the IOMMU domain pointer
+ *
+ * Return - actual pagetable address that the ttbr0 register is programmed
+ * with
+ */
+static unsigned int kgsl_iommu_pt_get_base_addr(struct kgsl_pagetable *pt)
+{
+ struct kgsl_iommu_pt *iommu_pt = pt->priv;
+ return iommu_get_pt_base_addr(iommu_pt->domain);
+}
+
+/*
+ * kgsl_iommu_get_pt_lsb - Return the lsb of the ttbr0 IOMMU register
+ * @mmu - Pointer to mmu structure
+ * @hostptr - Pointer to the IOMMU register map. This is used to match
+ * the iommu device whose lsb value is to be returned
+ * @ctx_id - The context bank whose lsb valus is to be returned
+ * Return - returns the lsb which is the last 14 bits of the ttbr0 IOMMU
+ * register. ttbr0 is the actual PTBR for of the IOMMU. The last 14 bits
+ * are only programmed once in the beginning when a domain is attached
+ * does not change.
+ */
+static int kgsl_iommu_get_pt_lsb(struct kgsl_mmu *mmu,
+ unsigned int unit_id,
+ enum kgsl_iommu_context_id ctx_id)
+{
+ struct kgsl_iommu *iommu = mmu->priv;
+ int i, j;
+ for (i = 0; i < iommu->unit_count; i++) {
+ struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
+ for (j = 0; j < iommu_unit->dev_count; j++)
+ if (unit_id == i &&
+ ctx_id == iommu_unit->dev[j].ctx_id)
+ return iommu_unit->dev[j].pt_lsb;
+ }
+ return 0;
+}
+
static void kgsl_iommu_setstate(struct kgsl_mmu *mmu,
struct kgsl_pagetable *pagetable)
{
if (mmu->flags & KGSL_FLAGS_STARTED) {
+ struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu_pt *iommu_pt = pagetable->priv;
/* page table not current, then setup mmu to use new
* specified page table
*/
if (mmu->hwpagetable != pagetable) {
- kgsl_idle(mmu->device, KGSL_TIMEOUT_DEFAULT);
- kgsl_detach_pagetable_iommu_domain(mmu);
+ unsigned int flags = 0;
mmu->hwpagetable = pagetable;
- if (mmu->hwpagetable)
- kgsl_attach_pagetable_iommu_domain(mmu);
+ /* force tlb flush if asid is reused */
+ if (iommu->asid_reuse &&
+ (KGSL_IOMMU_ASID_REUSE == iommu_pt->asid))
+ flags |= KGSL_MMUFLAGS_TLBFLUSH;
+ flags |= kgsl_mmu_pt_get_flags(mmu->hwpagetable,
+ mmu->device->id);
+ kgsl_setstate(mmu, KGSL_MMUFLAGS_PTUPDATE | flags);
}
}
}
@@ -385,6 +563,14 @@
sizeof(struct kgsl_iommu));
return -ENOMEM;
}
+ iommu->asids = kzalloc(BITS_TO_LONGS(KGSL_IOMMU_MAX_ASIDS) *
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!iommu->asids) {
+ KGSL_CORE_ERR("kzalloc(%d) failed\n",
+ sizeof(struct kgsl_iommu));
+ status = -ENOMEM;
+ goto done;
+ }
mmu->priv = iommu;
status = kgsl_get_iommu_ctxt(mmu);
@@ -394,10 +580,17 @@
if (status)
goto done;
+ /* A nop is required in an indirect buffer when switching
+ * pagetables in-stream */
+ kgsl_sharedmem_writel(&mmu->setstate_memory,
+ KGSL_IOMMU_SETSTATE_NOP_OFFSET,
+ cp_nop_packet(1));
+
dev_info(mmu->device->dev, "|%s| MMU type set for device is IOMMU\n",
__func__);
done:
if (status) {
+ kfree(iommu->asids);
kfree(iommu);
mmu->priv = NULL;
}
@@ -418,6 +611,7 @@
int status = 0;
int i = 0;
struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu_pt *iommu_pt;
mmu->defaultpagetable = kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
/* Return error if the default pagetable doesn't exist */
@@ -437,6 +631,14 @@
goto err;
}
}
+ /*
+ * The dafault pagetable always has asid 0 assigned by the iommu driver
+ * and asid 1 is assigned to the private context.
+ */
+ iommu_pt = mmu->defaultpagetable->priv;
+ iommu_pt->asid = 0;
+ set_bit(0, iommu->asids);
+ set_bit(1, iommu->asids);
return status;
err:
for (i--; i >= 0; i--) {
@@ -454,6 +656,8 @@
static int kgsl_iommu_start(struct kgsl_mmu *mmu)
{
int status;
+ struct kgsl_iommu *iommu = mmu->priv;
+ int i, j;
if (mmu->flags & KGSL_FLAGS_STARTED)
return 0;
@@ -463,18 +667,64 @@
if (status)
return -ENOMEM;
}
- kgsl_regwrite(mmu->device, MH_MMU_CONFIG, 0x00000000);
+ /* We use the GPU MMU to control access to IOMMU registers on a225,
+ * hence we still keep the MMU active on a225 */
+ if (adreno_is_a225(ADRENO_DEVICE(mmu->device))) {
+ struct kgsl_mh *mh = &(mmu->device->mh);
+ kgsl_regwrite(mmu->device, MH_MMU_CONFIG, 0x00000001);
+ kgsl_regwrite(mmu->device, MH_MMU_MPU_END,
+ mh->mpu_base +
+ iommu->iommu_units
+ [iommu->unit_count - 1].reg_map.gpuaddr -
+ PAGE_SIZE);
+ } else {
+ kgsl_regwrite(mmu->device, MH_MMU_CONFIG, 0x00000000);
+ }
mmu->hwpagetable = mmu->defaultpagetable;
status = kgsl_attach_pagetable_iommu_domain(mmu);
- if (!status) {
- mmu->flags |= KGSL_FLAGS_STARTED;
- } else {
- kgsl_detach_pagetable_iommu_domain(mmu);
+ if (status) {
mmu->hwpagetable = NULL;
+ goto done;
}
+ status = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
+ if (status) {
+ KGSL_CORE_ERR("clk enable failed\n");
+ goto done;
+ }
+ status = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_PRIV);
+ if (status) {
+ KGSL_CORE_ERR("clk enable failed\n");
+ goto done;
+ }
+ /* Get the lsb value of pagetables set in the IOMMU ttbr0 register as
+ * that value should not change when we change pagetables, so while
+ * changing pagetables we can use this lsb value of the pagetable w/o
+ * having to read it again
+ */
+ for (i = 0; i < iommu->unit_count; i++) {
+ struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
+ for (j = 0; j < iommu_unit->dev_count; j++)
+ iommu_unit->dev[j].pt_lsb = KGSL_IOMMMU_PT_LSB(
+ KGSL_IOMMU_GET_IOMMU_REG(
+ iommu_unit->reg_map.hostptr,
+ iommu_unit->dev[j].ctx_id,
+ TTBR0));
+ }
+ iommu->asid = KGSL_IOMMU_GET_IOMMU_REG(
+ iommu->iommu_units[0].reg_map.hostptr,
+ KGSL_IOMMU_CONTEXT_USER,
+ CONTEXTIDR);
+ kgsl_iommu_disable_clk(mmu);
+ mmu->flags |= KGSL_FLAGS_STARTED;
+
+done:
+ if (status) {
+ kgsl_iommu_disable_clk(mmu);
+ kgsl_detach_pagetable_iommu_domain(mmu);
+ }
return status;
}
@@ -483,9 +733,8 @@
struct kgsl_memdesc *memdesc)
{
int ret;
- unsigned int range = memdesc->size;
- struct iommu_domain *domain = (struct iommu_domain *)
- mmu_specific_pt;
+ unsigned int range = kgsl_sg_size(memdesc->sg, memdesc->sglen);
+ struct kgsl_iommu_pt *iommu_pt = mmu_specific_pt;
/* All GPU addresses as assigned are page aligned, but some
functions purturb the gpuaddr with an offset, so apply the
@@ -496,10 +745,10 @@
if (range == 0 || gpuaddr == 0)
return 0;
- ret = iommu_unmap_range(domain, gpuaddr, range);
+ ret = iommu_unmap_range(iommu_pt->domain, gpuaddr, range);
if (ret)
KGSL_CORE_ERR("iommu_unmap_range(%p, %x, %d) failed "
- "with err: %d\n", domain, gpuaddr,
+ "with err: %d\n", iommu_pt->domain, gpuaddr,
range, ret);
return 0;
@@ -513,20 +762,21 @@
{
int ret;
unsigned int iommu_virt_addr;
- struct iommu_domain *domain = mmu_specific_pt;
+ struct kgsl_iommu_pt *iommu_pt = mmu_specific_pt;
+ int size = kgsl_sg_size(memdesc->sg, memdesc->sglen);
- BUG_ON(NULL == domain);
+ BUG_ON(NULL == iommu_pt);
iommu_virt_addr = memdesc->gpuaddr;
- ret = iommu_map_range(domain, iommu_virt_addr, memdesc->sg,
- memdesc->size, (IOMMU_READ | IOMMU_WRITE));
+ ret = iommu_map_range(iommu_pt->domain, iommu_virt_addr, memdesc->sg,
+ size, (IOMMU_READ | IOMMU_WRITE));
if (ret) {
KGSL_CORE_ERR("iommu_map_range(%p, %x, %p, %d, %d) "
- "failed with err: %d\n", domain,
- iommu_virt_addr, memdesc->sg, memdesc->size,
- 0, ret);
+ "failed with err: %d\n", iommu_pt->domain,
+ iommu_virt_addr, memdesc->sg, size,
+ (IOMMU_READ | IOMMU_WRITE), ret);
return ret;
}
@@ -550,6 +800,7 @@
*/
if (mmu->flags & KGSL_FLAGS_STARTED) {
+ kgsl_regwrite(mmu->device, MH_MMU_CONFIG, 0x00000000);
/* detach iommu attachment */
kgsl_detach_pagetable_iommu_domain(mmu);
mmu->hwpagetable = NULL;
@@ -573,6 +824,8 @@
}
if (mmu->defaultpagetable)
kgsl_mmu_putpagetable(mmu->defaultpagetable);
+ kfree(iommu->asids);
+ kfree(iommu);
return 0;
}
@@ -592,17 +845,168 @@
KGSL_IOMMU_TTBR0_PA_SHIFT);
}
+/*
+ * kgsl_iommu_get_hwpagetable_asid - Returns asid(application space ID) for a
+ * pagetable
+ * @mmu - Pointer to mmu structure
+ *
+ * Allocates an asid to a IOMMU domain if it does not already have one. asid's
+ * are unique identifiers for pagetable that can be used to selectively flush
+ * tlb entries of the IOMMU unit.
+ * Return - asid to be used with the IOMMU domain
+ */
+static int kgsl_iommu_get_hwpagetable_asid(struct kgsl_mmu *mmu)
+{
+ struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu_pt *iommu_pt = mmu->hwpagetable->priv;
+
+ /*
+ * If the iommu pagetable does not have any asid assigned and is not the
+ * default pagetable then assign asid.
+ */
+ if (!iommu_pt->asid && iommu_pt != mmu->defaultpagetable->priv) {
+ iommu_pt->asid = find_first_zero_bit(iommu->asids,
+ KGSL_IOMMU_MAX_ASIDS);
+ /* No free bits means reuse asid */
+ if (iommu_pt->asid >= KGSL_IOMMU_MAX_ASIDS) {
+ iommu_pt->asid = KGSL_IOMMU_ASID_REUSE;
+ iommu->asid_reuse++;
+ }
+ set_bit(iommu_pt->asid, iommu->asids);
+ /*
+ * Store pointer to asids list so that during pagetable destroy
+ * the asid assigned to this pagetable may be cleared
+ */
+ iommu_pt->iommu = iommu;
+ }
+ /* Return the asid + the constant part of asid that never changes */
+ return (iommu_pt->asid & (KGSL_IOMMU_CONTEXTIDR_ASID_MASK <<
+ KGSL_IOMMU_CONTEXTIDR_ASID_SHIFT)) +
+ (iommu->asid & ~(KGSL_IOMMU_CONTEXTIDR_ASID_MASK <<
+ KGSL_IOMMU_CONTEXTIDR_ASID_SHIFT));
+}
+
+/*
+ * kgsl_iommu_default_setstate - Change the IOMMU pagetable or flush IOMMU tlb
+ * of the primary context bank
+ * @mmu - Pointer to mmu structure
+ * @flags - Flags indicating whether pagetable has to chnage or tlb is to be
+ * flushed or both
+ *
+ * Based on flags set the new pagetable fo the IOMMU unit or flush it's tlb or
+ * do both by doing direct register writes to the IOMMu registers through the
+ * cpu
+ * Return - void
+ */
+static void kgsl_iommu_default_setstate(struct kgsl_mmu *mmu,
+ uint32_t flags)
+{
+ struct kgsl_iommu *iommu = mmu->priv;
+ int temp;
+ int i;
+ unsigned int pt_base = kgsl_iommu_pt_get_base_addr(
+ mmu->hwpagetable);
+ unsigned int pt_val;
+
+ if (kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER)) {
+ KGSL_DRV_ERR(mmu->device, "Failed to enable iommu clocks\n");
+ return;
+ }
+ /* Mask off the lsb of the pt base address since lsb will not change */
+ pt_base &= (KGSL_IOMMU_TTBR0_PA_MASK << KGSL_IOMMU_TTBR0_PA_SHIFT);
+ if (flags & KGSL_MMUFLAGS_PTUPDATE) {
+ kgsl_idle(mmu->device, KGSL_TIMEOUT_DEFAULT);
+ for (i = 0; i < iommu->unit_count; i++) {
+ /* get the lsb value which should not change when
+ * changing ttbr0 */
+ pt_val = kgsl_iommu_get_pt_lsb(mmu, i,
+ KGSL_IOMMU_CONTEXT_USER);
+ pt_val += pt_base;
+
+ KGSL_IOMMU_SET_IOMMU_REG(
+ iommu->iommu_units[i].reg_map.hostptr,
+ KGSL_IOMMU_CONTEXT_USER, TTBR0, pt_val);
+
+ mb();
+ temp = KGSL_IOMMU_GET_IOMMU_REG(
+ iommu->iommu_units[i].reg_map.hostptr,
+ KGSL_IOMMU_CONTEXT_USER, TTBR0);
+ /* Set asid */
+ KGSL_IOMMU_SET_IOMMU_REG(
+ iommu->iommu_units[i].reg_map.hostptr,
+ KGSL_IOMMU_CONTEXT_USER, CONTEXTIDR,
+ kgsl_iommu_get_hwpagetable_asid(mmu));
+ mb();
+ temp = KGSL_IOMMU_GET_IOMMU_REG(
+ iommu->iommu_units[i].reg_map.hostptr,
+ KGSL_IOMMU_CONTEXT_USER, CONTEXTIDR);
+ }
+ }
+ /* Flush tlb */
+ if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
+ for (i = 0; i < iommu->unit_count; i++) {
+ KGSL_IOMMU_SET_IOMMU_REG(
+ iommu->iommu_units[i].reg_map.hostptr,
+ KGSL_IOMMU_CONTEXT_USER, CTX_TLBIASID,
+ kgsl_iommu_get_hwpagetable_asid(mmu));
+ mb();
+ }
+ }
+ /* Disable smmu clock */
+ kgsl_iommu_disable_clk(mmu);
+}
+
+/*
+ * kgsl_iommu_get_reg_map_desc - Returns an array of pointers that contain
+ * the address of memory descriptors which map the IOMMU registers
+ * @mmu - Pointer to mmu structure
+ * @reg_map_desc - Out parameter in which the address of the array containing
+ * pointers to register map descriptors is returned. The caller is supposed
+ * to free this array
+ *
+ * Return - The number of iommu units which is also the number of register
+ * mapped descriptor arrays which the out parameter will have
+ */
+static int kgsl_iommu_get_reg_map_desc(struct kgsl_mmu *mmu,
+ void **reg_map_desc)
+{
+ struct kgsl_iommu *iommu = mmu->priv;
+ void **reg_desc_ptr;
+ int i;
+
+ /*
+ * Alocate array of pointers that will hold address of the register map
+ * descriptors
+ */
+ reg_desc_ptr = kmalloc(iommu->unit_count *
+ sizeof(struct kgsl_memdesc *), GFP_KERNEL);
+ if (!reg_desc_ptr) {
+ KGSL_CORE_ERR("Failed to kmalloc(%d)\n",
+ iommu->unit_count * sizeof(struct kgsl_memdesc *));
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < iommu->unit_count; i++)
+ reg_desc_ptr[i] = &(iommu->iommu_units[i].reg_map);
+
+ *reg_map_desc = reg_desc_ptr;
+ return i;
+}
+
struct kgsl_mmu_ops iommu_ops = {
.mmu_init = kgsl_iommu_init,
.mmu_close = kgsl_iommu_close,
.mmu_start = kgsl_iommu_start,
.mmu_stop = kgsl_iommu_stop,
.mmu_setstate = kgsl_iommu_setstate,
- .mmu_device_setstate = NULL,
+ .mmu_device_setstate = kgsl_iommu_default_setstate,
.mmu_pagefault = NULL,
.mmu_get_current_ptbase = kgsl_iommu_get_current_ptbase,
.mmu_enable_clk = kgsl_iommu_enable_clk,
.mmu_disable_clk = kgsl_iommu_disable_clk,
+ .mmu_get_hwpagetable_asid = kgsl_iommu_get_hwpagetable_asid,
+ .mmu_get_pt_lsb = kgsl_iommu_get_pt_lsb,
+ .mmu_get_reg_map_desc = kgsl_iommu_get_reg_map_desc,
};
struct kgsl_mmu_pt_ops iommu_pt_ops = {
@@ -611,4 +1015,5 @@
.mmu_create_pagetable = kgsl_iommu_create_pagetable,
.mmu_destroy_pagetable = kgsl_iommu_destroy_pagetable,
.mmu_pt_equal = kgsl_iommu_pt_equal,
+ .mmu_pt_get_base_addr = kgsl_iommu_pt_get_base_addr,
};
diff --git a/drivers/gpu/msm/kgsl_iommu.h b/drivers/gpu/msm/kgsl_iommu.h
index 5a92f513..efc3d9c 100644
--- a/drivers/gpu/msm/kgsl_iommu.h
+++ b/drivers/gpu/msm/kgsl_iommu.h
@@ -18,6 +18,8 @@
/* IOMMU registers and masks */
#define KGSL_IOMMU_TTBR0 0x10
#define KGSL_IOMMU_TTBR1 0x14
+#define KGSL_IOMMU_FSR 0x20
+
#define KGSL_IOMMU_TTBR0_PA_MASK 0x0003FFFF
#define KGSL_IOMMU_TTBR0_PA_SHIFT 14
#define KGSL_IOMMU_CTX_TLBIALL 0x800
@@ -26,6 +28,10 @@
#define KGSL_IOMMU_CONTEXTIDR_ASID_SHIFT 0
#define KGSL_IOMMU_CTX_TLBIASID 0x804
#define KGSL_IOMMU_CTX_SHIFT 12
+
+#define KGSL_IOMMU_MAX_ASIDS 256
+#define KGSL_IOMMU_ASID_REUSE 2
+
/*
* Max number of iommu units that the gpu core can have
* On APQ8064, KGSL can control a maximum of 2 IOMMU units.
@@ -35,6 +41,25 @@
/* Max number of iommu contexts per IOMMU unit */
#define KGSL_IOMMU_MAX_DEVS_PER_UNIT 2
+/* Macros to read/write IOMMU registers */
+#define KGSL_IOMMU_SET_IOMMU_REG(base_addr, ctx, REG, val) \
+ writel_relaxed(val, base_addr + \
+ (ctx << KGSL_IOMMU_CTX_SHIFT) + \
+ KGSL_IOMMU_##REG)
+
+#define KGSL_IOMMU_GET_IOMMU_REG(base_addr, ctx, REG) \
+ readl_relaxed(base_addr + \
+ (ctx << KGSL_IOMMU_CTX_SHIFT) + \
+ KGSL_IOMMU_##REG)
+
+/* Gets the lsb value of pagetable */
+#define KGSL_IOMMMU_PT_LSB(pt_val) \
+ (pt_val & ~(KGSL_IOMMU_TTBR0_PA_MASK << \
+ KGSL_IOMMU_TTBR0_PA_SHIFT))
+
+/* offset at which a nop command is placed in setstate_memory */
+#define KGSL_IOMMU_SETSTATE_NOP_OFFSET 1024
+
/*
* struct kgsl_iommu_device - Structure holding data about iommu contexts
* @dev: Device pointer to iommu context
@@ -52,6 +77,7 @@
unsigned int pt_lsb;
enum kgsl_iommu_context_id ctx_id;
bool clk_enabled;
+ struct kgsl_device *kgsldev;
};
/*
@@ -81,6 +107,7 @@
* @asids: A bit structure indicating which id's are presently used
* @asid: Contains the initial value of IOMMU_CONTEXTIDR when a domain
* is first attached
+ * asid_reuse: Holds the number of times the reuse asid is reused
*/
struct kgsl_iommu {
struct kgsl_iommu_unit iommu_units[KGSL_IOMMU_MAX_UNITS];
@@ -89,7 +116,19 @@
struct kgsl_device *device;
unsigned long *asids;
unsigned int asid;
- unsigned int active_ctx;
+ unsigned int asid_reuse;
+};
+
+/*
+ * struct kgsl_iommu_pt - Iommu pagetable structure private to kgsl driver
+ * @domain: Pointer to the iommu domain that contains the iommu pagetable
+ * @iommu: Pointer to iommu structure
+ * @asid: The asid assigned to this domain
+ */
+struct kgsl_iommu_pt {
+ struct iommu_domain *domain;
+ struct kgsl_iommu *iommu;
+ unsigned int asid;
};
#endif
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index 5fdc182..9092b96 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -453,10 +453,11 @@
*/
if ((KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) &&
(KGSL_MMU_GLOBAL_PT == name)) {
- pagetable->kgsl_pool = gen_pool_create(PAGE_SHIFT, -1);
+ pagetable->kgsl_pool = gen_pool_create(KGSL_MMU_ALIGN_SHIFT,
+ -1);
if (pagetable->kgsl_pool == NULL) {
KGSL_CORE_ERR("gen_pool_create(%d) failed\n",
- PAGE_SHIFT);
+ KGSL_MMU_ALIGN_SHIFT);
goto err_alloc;
}
if (gen_pool_add(pagetable->kgsl_pool,
@@ -467,9 +468,10 @@
}
}
- pagetable->pool = gen_pool_create(PAGE_SHIFT, -1);
+ pagetable->pool = gen_pool_create(KGSL_MMU_ALIGN_SHIFT, -1);
if (pagetable->pool == NULL) {
- KGSL_CORE_ERR("gen_pool_create(%d) failed\n", PAGE_SHIFT);
+ KGSL_CORE_ERR("gen_pool_create(%d) failed\n",
+ KGSL_MMU_ALIGN_SHIFT);
goto err_kgsl_pool;
}
@@ -521,11 +523,8 @@
if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
return (void *)(-1);
-#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
- if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
- name = KGSL_MMU_GLOBAL_PT;
-#else
- name = KGSL_MMU_GLOBAL_PT;
+#ifndef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
+ name = KGSL_MMU_GLOBAL_PT;
#endif
pt = kgsl_get_pagetable(name);
@@ -580,12 +579,23 @@
*/
}
+static inline struct gen_pool *
+_get_pool(struct kgsl_pagetable *pagetable, unsigned int flags)
+{
+ if (pagetable->kgsl_pool &&
+ (KGSL_MEMFLAGS_GLOBAL & flags))
+ return pagetable->kgsl_pool;
+ return pagetable->pool;
+}
+
int
kgsl_mmu_map(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc,
unsigned int protflags)
{
int ret;
+ struct gen_pool *pool;
+ int size;
if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
if (memdesc->sglen == 1) {
@@ -605,21 +615,17 @@
}
}
- /* Allocate from kgsl pool if it exists for global mappings */
- if (pagetable->kgsl_pool &&
- (KGSL_MEMFLAGS_GLOBAL & memdesc->priv))
- memdesc->gpuaddr = gen_pool_alloc_aligned(pagetable->kgsl_pool,
- memdesc->size, KGSL_MMU_ALIGN_SHIFT);
- else
- memdesc->gpuaddr = gen_pool_alloc_aligned(pagetable->pool,
- memdesc->size, KGSL_MMU_ALIGN_SHIFT);
+ size = kgsl_sg_size(memdesc->sg, memdesc->sglen);
+ /* Allocate from kgsl pool if it exists for global mappings */
+ pool = _get_pool(pagetable, memdesc->priv);
+
+ memdesc->gpuaddr = gen_pool_alloc(pool, size);
if (memdesc->gpuaddr == 0) {
KGSL_CORE_ERR("gen_pool_alloc(%d) failed from pool: %s\n",
- memdesc->size,
- ((pagetable->kgsl_pool &&
- (KGSL_MEMFLAGS_GLOBAL & memdesc->priv)) ?
- "kgsl_pool" : "general_pool"));
+ size,
+ (pool == pagetable->kgsl_pool) ?
+ "kgsl_pool" : "general_pool");
KGSL_CORE_ERR(" [%d] allocated=%d, entries=%d\n",
pagetable->name, pagetable->stats.mapped,
pagetable->stats.entries);
@@ -641,7 +647,7 @@
KGSL_STATS_ADD(1, pagetable->stats.entries,
pagetable->stats.max_entries);
- KGSL_STATS_ADD(memdesc->size, pagetable->stats.mapped,
+ KGSL_STATS_ADD(size, pagetable->stats.mapped,
pagetable->stats.max_mapped);
spin_unlock(&pagetable->lock);
@@ -650,7 +656,7 @@
err_free_gpuaddr:
spin_unlock(&pagetable->lock);
- gen_pool_free(pagetable->pool, memdesc->gpuaddr, memdesc->size);
+ gen_pool_free(pool, memdesc->gpuaddr, size);
memdesc->gpuaddr = 0;
return ret;
}
@@ -660,6 +666,9 @@
kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc)
{
+ struct gen_pool *pool;
+ int size;
+
if (memdesc->size == 0 || memdesc->gpuaddr == 0)
return 0;
@@ -667,6 +676,9 @@
memdesc->gpuaddr = 0;
return 0;
}
+
+ size = kgsl_sg_size(memdesc->sg, memdesc->sglen);
+
if (KGSL_MMU_TYPE_IOMMU != kgsl_mmu_get_mmutype())
spin_lock(&pagetable->lock);
pagetable->pt_ops->mmu_unmap(pagetable->priv, memdesc);
@@ -674,19 +686,12 @@
spin_lock(&pagetable->lock);
/* Remove the statistics */
pagetable->stats.entries--;
- pagetable->stats.mapped -= memdesc->size;
+ pagetable->stats.mapped -= size;
spin_unlock(&pagetable->lock);
- if (pagetable->kgsl_pool &&
- (KGSL_MEMFLAGS_GLOBAL & memdesc->priv))
- gen_pool_free(pagetable->kgsl_pool,
- memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK,
- memdesc->size);
- else
- gen_pool_free(pagetable->pool,
- memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK,
- memdesc->size);
+ pool = _get_pool(pagetable, memdesc->priv);
+ gen_pool_free(pool, memdesc->gpuaddr, size);
/*
* Don't clear the gpuaddr on global mappings because they
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index fc64629..2db327b 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -134,6 +134,12 @@
(struct kgsl_mmu *mmu);
int (*mmu_enable_clk)
(struct kgsl_mmu *mmu, int ctx_id);
+ int (*mmu_get_hwpagetable_asid)(struct kgsl_mmu *mmu);
+ int (*mmu_get_pt_lsb)(struct kgsl_mmu *mmu,
+ unsigned int unit_id,
+ enum kgsl_iommu_context_id ctx_id);
+ int (*mmu_get_reg_map_desc)(struct kgsl_mmu *mmu,
+ void **reg_map_desc);
};
struct kgsl_mmu_pt_ops {
@@ -147,6 +153,8 @@
void (*mmu_destroy_pagetable) (void *pt);
int (*mmu_pt_equal) (struct kgsl_pagetable *pt,
unsigned int pt_base);
+ unsigned int (*mmu_pt_get_base_addr)
+ (struct kgsl_pagetable *pt);
};
struct kgsl_mmu {
@@ -236,4 +244,54 @@
return pt->pt_ops->mmu_pt_equal(pt, pt_base);
}
+static inline unsigned int kgsl_mmu_pt_get_base_addr(struct kgsl_pagetable *pt)
+{
+ if (KGSL_MMU_TYPE_NONE == kgsl_mmu_get_mmutype())
+ return 0;
+ else
+ return pt->pt_ops->mmu_pt_get_base_addr(pt);
+}
+
+static inline int kgsl_mmu_get_reg_map_desc(struct kgsl_mmu *mmu,
+ void **reg_map_desc)
+{
+ if (mmu->mmu_ops && mmu->mmu_ops->mmu_get_reg_map_desc)
+ return mmu->mmu_ops->mmu_get_reg_map_desc(mmu, reg_map_desc);
+ else
+ return 0;
+}
+
+static inline int kgsl_mmu_get_pt_lsb(struct kgsl_mmu *mmu,
+ unsigned int unit_id,
+ enum kgsl_iommu_context_id ctx_id)
+{
+ if (mmu->mmu_ops && mmu->mmu_ops->mmu_get_pt_lsb)
+ return mmu->mmu_ops->mmu_get_pt_lsb(mmu, unit_id, ctx_id);
+ else
+ return 0;
+}
+
+static inline int kgsl_mmu_get_hwpagetable_asid(struct kgsl_mmu *mmu)
+{
+ if (mmu->mmu_ops && mmu->mmu_ops->mmu_get_hwpagetable_asid)
+ return mmu->mmu_ops->mmu_get_hwpagetable_asid(mmu);
+ else
+ return 0;
+}
+
+static inline int kgsl_mmu_enable_clk(struct kgsl_mmu *mmu,
+ int ctx_id)
+{
+ if (mmu->mmu_ops && mmu->mmu_ops->mmu_enable_clk)
+ return mmu->mmu_ops->mmu_enable_clk(mmu, ctx_id);
+ else
+ return 0;
+}
+
+static inline void kgsl_mmu_disable_clk(struct kgsl_mmu *mmu)
+{
+ if (mmu->mmu_ops && mmu->mmu_ops->mmu_disable_clk)
+ mmu->mmu_ops->mmu_disable_clk(mmu);
+}
+
#endif /* __KGSL_MMU_H */
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 15a0252..2aaefba 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -11,6 +11,7 @@
*
*/
#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
#include <mach/msm_iomap.h>
#include <mach/msm_bus.h>
@@ -403,7 +404,7 @@
trace_kgsl_bus(device, state);
if (pwr->ebi1_clk) {
clk_set_rate(pwr->ebi1_clk, 0);
- clk_disable(pwr->ebi1_clk);
+ clk_disable_unprepare(pwr->ebi1_clk);
}
if (pwr->pcl)
msm_bus_scale_client_update_request(pwr->pcl,
@@ -414,7 +415,7 @@
&pwr->power_flags)) {
trace_kgsl_bus(device, state);
if (pwr->ebi1_clk) {
- clk_enable(pwr->ebi1_clk);
+ clk_prepare_enable(pwr->ebi1_clk);
clk_set_rate(pwr->ebi1_clk,
pwr->pwrlevels[pwr->active_pwrlevel].
bus_freq);
@@ -552,17 +553,8 @@
}
}
- /*acquire interrupt */
- pwr->interrupt_num =
- platform_get_irq_byname(pdev, pwr->irq_name);
- if (pwr->interrupt_num <= 0) {
- KGSL_PWR_ERR(device, "platform_get_irq_byname failed: %d\n",
- pwr->interrupt_num);
- result = -EINVAL;
- goto done;
- }
-
+ pm_runtime_enable(device->parentdev);
register_early_suspend(&device->display_off);
return result;
@@ -582,16 +574,9 @@
KGSL_PWR_INFO(device, "close device %d\n", device->id);
+ pm_runtime_disable(device->parentdev);
unregister_early_suspend(&device->display_off);
- if (pwr->interrupt_num > 0) {
- if (pwr->have_irq) {
- free_irq(pwr->interrupt_num, NULL);
- pwr->have_irq = 0;
- }
- pwr->interrupt_num = 0;
- }
-
clk_put(pwr->ebi1_clk);
if (pwr->pcl)
@@ -721,6 +706,7 @@
}
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_NAP);
+ kgsl_mmu_disable_clk(&device->mmu);
kgsl_pwrctrl_set_state(device, KGSL_STATE_NAP);
if (device->idle_wakelock.name)
wake_unlock(&device->idle_wakelock);
@@ -764,6 +750,7 @@
gpu_freq);
_sleep_accounting(device);
kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
+ kgsl_mmu_disable_clk(&device->mmu);
kgsl_pwrctrl_set_state(device, KGSL_STATE_SLEEP);
wake_unlock(&device->idle_wakelock);
pm_qos_update_request(&device->pm_qos_req_dma,
@@ -903,6 +890,7 @@
/* Order pwrrail/clk sequence based upon platform */
kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
+ kgsl_mmu_disable_clk(&device->mmu);
kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
}
EXPORT_SYMBOL(kgsl_pwrctrl_disable);
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h
index 6325b66..1e5c21c 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.h
+++ b/drivers/gpu/msm/kgsl_pwrctrl.h
@@ -39,7 +39,6 @@
struct kgsl_pwrctrl {
int interrupt_num;
- int have_irq;
struct clk *ebi1_clk;
struct clk *grp_clks[KGSL_MAX_CLKS];
unsigned long power_flags;
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index a51f29f..8829102 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -63,6 +63,13 @@
}
+/*
+ * One page allocation for a guard region to protect against over-zealous
+ * GPU pre-fetch
+ */
+
+static struct page *kgsl_guard_page;
+
/**
* Given a kobj, find the process structure attached to it
*/
@@ -333,13 +340,20 @@
{
int i = 0;
struct scatterlist *sg;
+ int sglen = memdesc->sglen;
+
+ /* Don't free the guard page if it was used */
+ if (memdesc->flags & KGSL_MEMDESC_GUARD_PAGE)
+ sglen--;
+
kgsl_driver.stats.page_alloc -= memdesc->size;
+
if (memdesc->hostptr) {
vunmap(memdesc->hostptr);
kgsl_driver.stats.vmalloc -= memdesc->size;
}
if (memdesc->sg)
- for_each_sg(memdesc->sg, sg, memdesc->sglen, i)
+ for_each_sg(memdesc->sg, sg, sglen, i)
__free_page(sg_page(sg));
}
@@ -362,17 +376,23 @@
pgprot_t page_prot = pgprot_writecombine(PAGE_KERNEL);
struct page **pages = NULL;
struct scatterlist *sg;
+ int sglen = memdesc->sglen;
int i;
+
+ /* Don't map the guard page if it exists */
+ if (memdesc->flags & KGSL_MEMDESC_GUARD_PAGE)
+ sglen--;
+
/* create a list of pages to call vmap */
- pages = vmalloc(memdesc->sglen * sizeof(struct page *));
+ pages = vmalloc(sglen * sizeof(struct page *));
if (!pages) {
KGSL_CORE_ERR("vmalloc(%d) failed\n",
- memdesc->sglen * sizeof(struct page *));
+ sglen * sizeof(struct page *));
return -ENOMEM;
}
- for_each_sg(memdesc->sg, sg, memdesc->sglen, i)
+ for_each_sg(memdesc->sg, sg, sglen, i)
pages[i] = sg_page(sg);
- memdesc->hostptr = vmap(pages, memdesc->sglen,
+ memdesc->hostptr = vmap(pages, sglen,
VM_IOREMAP, page_prot);
KGSL_STATS_ADD(memdesc->size, kgsl_driver.stats.vmalloc,
kgsl_driver.stats.vmalloc_max);
@@ -471,6 +491,14 @@
int sglen = PAGE_ALIGN(size) / PAGE_SIZE;
int i;
+ /*
+ * Add guard page to the end of the allocation when the
+ * IOMMU is in use.
+ */
+
+ if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_IOMMU)
+ sglen++;
+
memdesc->size = size;
memdesc->pagetable = pagetable;
memdesc->priv = KGSL_MEMFLAGS_CACHED;
@@ -490,7 +518,7 @@
memdesc->sglen = sglen;
sg_init_table(memdesc->sg, sglen);
- for (i = 0; i < memdesc->sglen; i++) {
+ for (i = 0; i < PAGE_ALIGN(size) / PAGE_SIZE; i++) {
struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO |
__GFP_HIGHMEM);
if (!page) {
@@ -501,6 +529,22 @@
flush_dcache_page(page);
sg_set_page(&memdesc->sg[i], page, PAGE_SIZE, 0);
}
+
+ /* ADd the guard page to the end of the sglist */
+
+ if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_IOMMU) {
+ if (kgsl_guard_page == NULL)
+ kgsl_guard_page = alloc_page(GFP_KERNEL | __GFP_ZERO |
+ __GFP_HIGHMEM);
+
+ if (kgsl_guard_page != NULL) {
+ sg_set_page(&memdesc->sg[sglen - 1], kgsl_guard_page,
+ PAGE_SIZE, 0);
+ memdesc->flags |= KGSL_MEMDESC_GUARD_PAGE;
+ } else
+ memdesc->sglen--;
+ }
+
outer_cache_range_op_sg(memdesc->sg, memdesc->sglen,
KGSL_CACHE_OP_FLUSH);
diff --git a/drivers/gpu/msm/kgsl_sharedmem.h b/drivers/gpu/msm/kgsl_sharedmem.h
index fb8dd95..034ade4 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.h
+++ b/drivers/gpu/msm/kgsl_sharedmem.h
@@ -157,4 +157,15 @@
return ret;
}
+static inline int kgsl_sg_size(struct scatterlist *sg, int sglen)
+{
+ int i, size = 0;
+ struct scatterlist *s;
+
+ for_each_sg(sg, s, sglen, i) {
+ size += s->length;
+ }
+
+ return size;
+}
#endif /* __KGSL_SHAREDMEM_H */
diff --git a/drivers/gpu/msm/kgsl_snapshot.c b/drivers/gpu/msm/kgsl_snapshot.c
index f61c74f..080cb15 100644
--- a/drivers/gpu/msm/kgsl_snapshot.c
+++ b/drivers/gpu/msm/kgsl_snapshot.c
@@ -63,10 +63,10 @@
* return the global timestamp for all contexts
*/
- header->timestamp_queued = device->ftbl->readtimestamp(device,
- context, KGSL_TIMESTAMP_QUEUED);
- header->timestamp_retired = device->ftbl->readtimestamp(device,
- context, KGSL_TIMESTAMP_RETIRED);
+ header->timestamp_queued = kgsl_readtimestamp(device, context,
+ KGSL_TIMESTAMP_QUEUED);
+ header->timestamp_retired = kgsl_readtimestamp(device, context,
+ KGSL_TIMESTAMP_RETIRED);
_ctxtptr += sizeof(struct kgsl_snapshot_linux_context);
diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h
index 84d7f94..60231f6 100644
--- a/drivers/gpu/msm/kgsl_trace.h
+++ b/drivers/gpu/msm/kgsl_trace.h
@@ -22,6 +22,7 @@
#define TRACE_INCLUDE_FILE kgsl_trace
#include <linux/tracepoint.h>
+#include "kgsl_device.h"
struct kgsl_device;
struct kgsl_ringbuffer_issueibcmds;
@@ -33,9 +34,11 @@
TRACE_EVENT(kgsl_issueibcmds,
TP_PROTO(struct kgsl_device *device,
- struct kgsl_ringbuffer_issueibcmds *cmd, int result),
+ struct kgsl_ringbuffer_issueibcmds *cmd,
+ struct kgsl_ibdesc *ibdesc,
+ int result),
- TP_ARGS(device, cmd, result),
+ TP_ARGS(device, cmd, ibdesc, result),
TP_STRUCT__entry(
__string(device_name, device->name)
@@ -50,7 +53,7 @@
TP_fast_assign(
__assign_str(device_name, device->name);
__entry->drawctxt_id = cmd->drawctxt_id;
- __entry->ibdesc_addr = cmd->ibdesc_addr;
+ __entry->ibdesc_addr = ibdesc[0].gpuaddr;
__entry->numibs = cmd->numibs;
__entry->timestamp = cmd->timestamp;
__entry->flags = cmd->flags;
@@ -58,14 +61,19 @@
),
TP_printk(
- "d_name=%s ctx=%u ib=%u numibs=%u timestamp=%u "
- "flags=%u result=%d",
+ "d_name=%s ctx=%u ib=0x%u numibs=%u timestamp=0x%x "
+ "flags=0x%x(%s) result=%d",
__get_str(device_name),
__entry->drawctxt_id,
__entry->ibdesc_addr,
__entry->numibs,
__entry->timestamp,
__entry->flags,
+ __entry->flags ? __print_flags(__entry->flags, "|",
+ { KGSL_CONTEXT_SAVE_GMEM, "SAVE_GMEM" },
+ { KGSL_CONTEXT_SUBMIT_IB_LIST, "IB_LIST" },
+ { KGSL_CONTEXT_CTX_SWITCH, "CTX_SWITCH" })
+ : "None",
__entry->result
)
);
@@ -97,7 +105,7 @@
),
TP_printk(
- "d_name=%s context_id=%u type=%u timestamp=%u",
+ "d_name=%s context_id=%u type=%u timestamp=0x%x",
__get_str(device_name),
__entry->context_id,
__entry->type,
@@ -112,30 +120,34 @@
TP_PROTO(struct kgsl_device *device,
unsigned int context_id,
- unsigned int timestamp,
+ unsigned int curr_ts,
+ unsigned int wait_ts,
unsigned int timeout),
- TP_ARGS(device, context_id, timestamp, timeout),
+ TP_ARGS(device, context_id, curr_ts, wait_ts, timeout),
TP_STRUCT__entry(
__string(device_name, device->name)
__field(unsigned int, context_id)
- __field(unsigned int, timestamp)
+ __field(unsigned int, curr_ts)
+ __field(unsigned int, wait_ts)
__field(unsigned int, timeout)
),
TP_fast_assign(
__assign_str(device_name, device->name);
__entry->context_id = context_id;
- __entry->timestamp = timestamp;
+ __entry->curr_ts = curr_ts;
+ __entry->wait_ts = wait_ts;
__entry->timeout = timeout;
),
TP_printk(
- "d_name=%s context_id=%u timestamp=%u timeout=%u",
+ "d_name=%s context_id=%u curr_ts=%u timestamp=0x%x timeout=%u",
__get_str(device_name),
__entry->context_id,
- __entry->timestamp,
+ __entry->curr_ts,
+ __entry->wait_ts,
__entry->timeout
)
);
@@ -145,23 +157,27 @@
*/
TRACE_EVENT(kgsl_waittimestamp_exit,
- TP_PROTO(struct kgsl_device *device, int result),
+ TP_PROTO(struct kgsl_device *device, unsigned int curr_ts,
+ int result),
- TP_ARGS(device, result),
+ TP_ARGS(device, curr_ts, result),
TP_STRUCT__entry(
__string(device_name, device->name)
+ __field(unsigned int, curr_ts)
__field(int, result)
),
TP_fast_assign(
__assign_str(device_name, device->name);
+ __entry->curr_ts = curr_ts;
__entry->result = result;
),
TP_printk(
- "d_name=%s result=%d",
+ "d_name=%s curr_ts=%u result=%d",
__get_str(device_name),
+ __entry->curr_ts,
__entry->result
)
);
@@ -343,12 +359,13 @@
DECLARE_EVENT_CLASS(kgsl_mem_timestamp_template,
- TP_PROTO(struct kgsl_mem_entry *mem_entry, unsigned int id,
- unsigned int curr_ts),
+ TP_PROTO(struct kgsl_device *device, struct kgsl_mem_entry *mem_entry,
+ unsigned int id, unsigned int curr_ts, unsigned int free_ts),
- TP_ARGS(mem_entry, id, curr_ts),
+ TP_ARGS(device, mem_entry, id, curr_ts, free_ts),
TP_STRUCT__entry(
+ __string(device_name, device->name)
__field(unsigned int, gpuaddr)
__field(unsigned int, size)
__field(int, type)
@@ -358,33 +375,120 @@
),
TP_fast_assign(
+ __assign_str(device_name, device->name);
__entry->gpuaddr = mem_entry->memdesc.gpuaddr;
__entry->size = mem_entry->memdesc.size;
__entry->drawctxt_id = id;
__entry->type = mem_entry->memtype;
__entry->curr_ts = curr_ts;
- __entry->free_ts = mem_entry->free_timestamp;
+ __entry->free_ts = free_ts;
),
TP_printk(
- "gpuaddr=0x%08x size=%d type=%d ctx=%u curr_ts=0x%08x free_ts=0x%08x",
- __entry->gpuaddr, __entry->size, __entry->type,
- __entry->drawctxt_id, __entry->curr_ts, __entry->free_ts
+ "d_name=%s gpuaddr=0x%08x size=%d type=%d ctx=%u"
+ " curr_ts=0x%08x free_ts=0x%08x",
+ __get_str(device_name),
+ __entry->gpuaddr,
+ __entry->size,
+ __entry->type,
+ __entry->drawctxt_id,
+ __entry->curr_ts,
+ __entry->free_ts
)
);
DEFINE_EVENT(kgsl_mem_timestamp_template, kgsl_mem_timestamp_queue,
- TP_PROTO(struct kgsl_mem_entry *mem_entry, unsigned int id,
- unsigned int curr_ts),
- TP_ARGS(mem_entry, id, curr_ts)
+ TP_PROTO(struct kgsl_device *device, struct kgsl_mem_entry *mem_entry,
+ unsigned int id, unsigned int curr_ts, unsigned int free_ts),
+ TP_ARGS(device, mem_entry, id, curr_ts, free_ts)
);
DEFINE_EVENT(kgsl_mem_timestamp_template, kgsl_mem_timestamp_free,
- TP_PROTO(struct kgsl_mem_entry *mem_entry, unsigned int id,
- unsigned int curr_ts),
- TP_ARGS(mem_entry, id, curr_ts)
+ TP_PROTO(struct kgsl_device *device, struct kgsl_mem_entry *mem_entry,
+ unsigned int id, unsigned int curr_ts, unsigned int free_ts),
+ TP_ARGS(device, mem_entry, id, curr_ts, free_ts)
);
+TRACE_EVENT(kgsl_context_create,
+
+ TP_PROTO(struct kgsl_device *device, struct kgsl_context *context,
+ unsigned int flags),
+
+ TP_ARGS(device, context, flags),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, id)
+ __field(unsigned int, flags)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->id = context->id;
+ __entry->flags = flags;
+ ),
+
+ TP_printk(
+ "d_name=%s ctx=%u flags=0x%x %s",
+ __get_str(device_name), __entry->id, __entry->flags,
+ __entry->flags ? __print_flags(__entry->flags, "|",
+ { KGSL_CONTEXT_NO_GMEM_ALLOC , "NO_GMEM_ALLOC" },
+ { KGSL_CONTEXT_PREAMBLE, "PREAMBLE" },
+ { KGSL_CONTEXT_TRASH_STATE, "TRASH_STATE" },
+ { KGSL_CONTEXT_PER_CONTEXT_TS, "PER_CONTEXT_TS" })
+ : "None"
+ )
+);
+
+TRACE_EVENT(kgsl_context_detach,
+
+ TP_PROTO(struct kgsl_device *device, struct kgsl_context *context),
+
+ TP_ARGS(device, context),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, id)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->id = context->id;
+ ),
+
+ TP_printk(
+ "d_name=%s ctx=%u",
+ __get_str(device_name), __entry->id
+ )
+);
+
+TRACE_EVENT(kgsl_mmu_pagefault,
+
+ TP_PROTO(struct kgsl_device *device, unsigned int page,
+ unsigned int pt, const char *op),
+
+ TP_ARGS(device, page, pt, op),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, page)
+ __field(unsigned int, pt)
+ __string(op, op)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->page = page;
+ __entry->pt = pt;
+ __assign_str(op, op);
+ ),
+
+ TP_printk(
+ "d_name=%s page=0x%08x pt=%d op=%s\n",
+ __get_str(device_name), __entry->page, __entry->pt,
+ __get_str(op)
+ )
+);
#endif /* _KGSL_TRACE_H */
diff --git a/drivers/gpu/msm/z180.c b/drivers/gpu/msm/z180.c
index 240de9a..846a9a1 100644
--- a/drivers/gpu/msm/z180.c
+++ b/drivers/gpu/msm/z180.c
@@ -132,10 +132,9 @@
static struct z180_device device_2d0 = {
.dev = {
+ KGSL_DEVICE_COMMON_INIT(device_2d0.dev),
.name = DEVICE_2D0_NAME,
.id = KGSL_DEVICE_2D0,
- .ver_major = DRIVER_VERSION_MAJOR,
- .ver_minor = DRIVER_VERSION_MINOR,
.mh = {
.mharb = Z180_CFG_MHARB,
.mh_intf_cfg1 = 0x00032f07,
@@ -152,20 +151,17 @@
.pwrctrl = {
.irq_name = KGSL_2D0_IRQ,
},
- .mutex = __MUTEX_INITIALIZER(device_2d0.dev.mutex),
- .state = KGSL_STATE_INIT,
- .active_cnt = 0,
.iomemname = KGSL_2D0_REG_MEMORY,
.ftbl = &z180_functable,
},
+ .cmdwin_lock = __SPIN_LOCK_INITIALIZER(device_2d1.cmdwin_lock),
};
static struct z180_device device_2d1 = {
.dev = {
+ KGSL_DEVICE_COMMON_INIT(device_2d1.dev),
.name = DEVICE_2D1_NAME,
.id = KGSL_DEVICE_2D1,
- .ver_major = DRIVER_VERSION_MAJOR,
- .ver_minor = DRIVER_VERSION_MINOR,
.mh = {
.mharb = Z180_CFG_MHARB,
.mh_intf_cfg1 = 0x00032f07,
@@ -182,12 +178,10 @@
.pwrctrl = {
.irq_name = KGSL_2D1_IRQ,
},
- .mutex = __MUTEX_INITIALIZER(device_2d1.dev.mutex),
- .state = KGSL_STATE_INIT,
- .active_cnt = 0,
.iomemname = KGSL_2D1_REG_MEMORY,
.ftbl = &z180_functable,
},
+ .cmdwin_lock = __SPIN_LOCK_INITIALIZER(device_2d1.cmdwin_lock),
};
static irqreturn_t z180_irq_handler(struct kgsl_device *device)
@@ -524,7 +518,6 @@
device->parentdev = &pdev->dev;
z180_dev = Z180_DEVICE(device);
- spin_lock_init(&z180_dev->cmdwin_lock);
status = z180_ringbuffer_init(device);
if (status != 0)
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 3238d33..701160c 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -777,7 +777,7 @@
input_report_abs(input_dev, ABS_MT_POSITION_Y,
finger[id].y);
input_report_abs(input_dev, ABS_MT_PRESSURE,
- finger[id].area);
+ finger[id].pressure);
} else {
finger[id].status = 0;
}
diff --git a/drivers/leds/leds-pm8xxx.c b/drivers/leds/leds-pm8xxx.c
index fa42c2c..ece7b0f 100644
--- a/drivers/leds/leds-pm8xxx.c
+++ b/drivers/leds/leds-pm8xxx.c
@@ -57,6 +57,7 @@
#define WLED_OVP_VAL_BIT_SHFT 0x04
#define WLED_BOOST_LIMIT_MASK 0xE0
#define WLED_BOOST_LIMIT_BIT_SHFT 0x05
+#define WLED_BOOST_OFF 0x00
#define WLED_EN_MASK 0x01
#define WLED_CP_SELECT_MAX 0x03
#define WLED_CP_SELECT_MASK 0x03
@@ -155,6 +156,7 @@
struct led_classdev cdev;
int id;
u8 reg;
+ u8 wled_mod_ctrl_val;
struct device *dev;
struct work_struct work;
struct mutex lock;
@@ -237,6 +239,24 @@
if (value > WLED_MAX_LEVEL)
value = WLED_MAX_LEVEL;
+ if (value == 0) {
+ rc = pm8xxx_writeb(led->dev->parent, WLED_MOD_CTRL_REG,
+ WLED_BOOST_OFF);
+ if (rc) {
+ dev_err(led->dev->parent, "can't write wled ctrl config"
+ " register rc=%d\n", rc);
+ return rc;
+ }
+ } else {
+ rc = pm8xxx_writeb(led->dev->parent, WLED_MOD_CTRL_REG,
+ led->wled_mod_ctrl_val);
+ if (rc) {
+ dev_err(led->dev->parent, "can't write wled ctrl config"
+ " register rc=%d\n", rc);
+ return rc;
+ }
+ }
+
duty = (WLED_MAX_DUTY_CYCLE * value) / WLED_MAX_LEVEL;
num_wled_strings = led->wled_cfg->num_strings;
@@ -629,6 +649,7 @@
" register rc=%d\n", rc);
return rc;
}
+ led->wled_mod_ctrl_val = val;
/* dump wled registers */
wled_dump_regs(led);
diff --git a/drivers/media/dvb/dvb-core/demux.h b/drivers/media/dvb/dvb-core/demux.h
index ff0c9d8..a57ad44 100644
--- a/drivers/media/dvb/dvb-core/demux.h
+++ b/drivers/media/dvb/dvb-core/demux.h
@@ -136,6 +136,8 @@
struct timespec timeout);
int (*start_filtering) (struct dmx_ts_feed* feed);
int (*stop_filtering) (struct dmx_ts_feed* feed);
+ int (*set_indexing_params) (struct dmx_ts_feed *feed,
+ struct dmx_indexing_video_params *params);
};
/*--------------------------------------------------------------------------*/
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index ed3f731..1d310f2 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -1186,6 +1186,24 @@
return ret;
}
+ /* Support indexing for video PES */
+ if ((para->pes_type == DMX_PES_VIDEO0) ||
+ (para->pes_type == DMX_PES_VIDEO1) ||
+ (para->pes_type == DMX_PES_VIDEO2) ||
+ (para->pes_type == DMX_PES_VIDEO3)) {
+
+ if (tsfeed->set_indexing_params) {
+ ret = tsfeed->set_indexing_params(tsfeed,
+ ¶->video_params);
+
+ if (ret < 0) {
+ dmxdev->demux->release_ts_feed(dmxdev->demux,
+ tsfeed);
+ return ret;
+ }
+ }
+ }
+
ret = tsfeed->start_filtering(tsfeed);
if (ret < 0) {
dmxdev->demux->release_ts_feed(dmxdev->demux, tsfeed);
@@ -1464,6 +1482,23 @@
if (params->pes_type > DMX_PES_OTHER || params->pes_type < 0)
return -EINVAL;
+ if (params->flags & DMX_ENABLE_INDEXING) {
+ if (!(dmxdev->capabilities & DMXDEV_CAP_INDEXING))
+ return -EINVAL;
+
+ /* can do indexing only on video PES */
+ if ((params->pes_type != DMX_PES_VIDEO0) &&
+ (params->pes_type != DMX_PES_VIDEO1) &&
+ (params->pes_type != DMX_PES_VIDEO2) &&
+ (params->pes_type != DMX_PES_VIDEO3))
+ return -EINVAL;
+
+ /* can do indexing only when recording */
+ if ((params->output != DMX_OUT_TS_TAP) &&
+ (params->output != DMX_OUT_TSDEMUX_TAP))
+ return -EINVAL;
+ }
+
dmxdevfilter->type = DMXDEV_TYPE_PES;
memcpy(&dmxdevfilter->params, params,
sizeof(struct dmx_pes_filter_params));
diff --git a/drivers/media/dvb/dvb-core/dmxdev.h b/drivers/media/dvb/dvb-core/dmxdev.h
index 82f8f6d..4c52e84 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.h
+++ b/drivers/media/dvb/dvb-core/dmxdev.h
@@ -108,6 +108,7 @@
#define DMXDEV_CAP_DUPLEX 0x1
#define DMXDEV_CAP_PULL_MODE 0x2
#define DMXDEV_CAP_PCR_EXTRACTION 0x4
+#define DMXDEV_CAP_INDEXING 0x8
enum dmx_playback_mode_t playback_mode;
dmx_source_t source;
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c
index 966b48d..0ff2a55 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb/dvb-core/dvb_demux.c
@@ -1027,6 +1027,18 @@
return ret;
}
+static int dmx_ts_set_indexing_params(
+ struct dmx_ts_feed *ts_feed,
+ struct dmx_indexing_video_params *params)
+{
+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+
+ memcpy(&feed->indexing_params, params,
+ sizeof(struct dmx_indexing_video_params));
+
+ return 0;
+}
+
static int dvbdmx_allocate_ts_feed(struct dmx_demux *dmx,
struct dmx_ts_feed **ts_feed,
dmx_ts_cb callback)
@@ -1048,6 +1060,8 @@
feed->pid = 0xffff;
feed->peslen = 0xfffa;
feed->buffer = NULL;
+ memset(&feed->indexing_params, 0,
+ sizeof(struct dmx_indexing_video_params));
/* default behaviour - pass first PES data even if it is
* partial PES data from previous PES that we didn't receive its header.
@@ -1063,6 +1077,7 @@
(*ts_feed)->start_filtering = dmx_ts_feed_start_filtering;
(*ts_feed)->stop_filtering = dmx_ts_feed_stop_filtering;
(*ts_feed)->set = dmx_ts_feed_set;
+ (*ts_feed)->set_indexing_params = dmx_ts_set_indexing_params;
if (!(feed->filter = dvb_dmx_filter_alloc(demux))) {
feed->state = DMX_STATE_FREE;
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
index 297f3df..17f4960 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.h
+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
@@ -95,10 +95,12 @@
int cc;
int pusi_seen; /* prevents feeding of garbage from previous section */
- u16 peslen;
+ u32 peslen;
struct list_head list_head;
unsigned int index; /* a unique index for each feed (can be used as hardware pid filter index) */
+
+ struct dmx_indexing_video_params indexing_params;
};
struct dvb_demux {
diff --git a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.c b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.c
index e7bbfcb..979f5d3 100644
--- a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.c
+++ b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.c
@@ -34,16 +34,6 @@
sizeof(struct mpq_adapter_video_meta_data)))
/*
- * The following threshold defines gap from end of ring-buffer
- * from which new PES payload will not be written to make
- * sure that the PES payload does not wrap-around at end of the
- * buffer. Instead, padding will be inserted and the new PES will
- * be written from the beginning of the buffer.
- * Setting this to 0 means no padding will be added.
- */
-#define VIDEO_WRAP_AROUND_THRESHOLD (1024*1024+512*1024)
-
-/*
* PCR/STC information length saved in ring-buffer.
* PCR / STC are saved in ring-buffer in the following form:
* <8 bit flags><64 bits of STC> <64bits of PCR>
@@ -51,13 +41,85 @@
* The current flags that are defined:
* 0x00000001: discontinuity_indicator
*/
-#define PCR_STC_LEN 17
+#define PCR_STC_LEN 17
/* Number of demux devices, has default of linux configuration */
static int mpq_demux_device_num = CONFIG_DVB_MPQ_NUM_DMX_DEVICES;
module_param(mpq_demux_device_num, int, S_IRUGO);
+/**
+ * Maximum allowed framing pattern size
+ */
+#define MPQ_MAX_PATTERN_SIZE 6
+
+/**
+ * Number of patterns to look for when doing framing, per video standard
+ */
+#define MPQ_MPEG2_PATTERN_NUM 5
+#define MPQ_H264_PATTERN_NUM 5
+#define MPQ_VC1_PATTERN_NUM 3
+
+/*
+ * mpq_framing_pattern_lookup_params - framing pattern lookup parameters.
+ *
+ * @pattern: the byte pattern to look for.
+ * @mask: the byte mask to use (same length as pattern).
+ * @size: the length of the pattern, in bytes.
+ * @type: the type of the pattern.
+ */
+struct mpq_framing_pattern_lookup_params {
+ u8 pattern[MPQ_MAX_PATTERN_SIZE];
+ u8 mask[MPQ_MAX_PATTERN_SIZE];
+ size_t size;
+ enum dmx_framing_pattern_type type;
+};
+
+/*
+ * Pre-defined video framing lookup pattern information.
+ * Note: the first pattern in each patterns database must
+ * be the Sequence Header (or equivalent SPS in H.264).
+ * The code assumes this is the case when prepending
+ * Sequence Header data in case it is required.
+ */
+static const struct mpq_framing_pattern_lookup_params
+ mpeg2_patterns[MPQ_MPEG2_PATTERN_NUM] = {
+ {{0x00, 0x00, 0x01, 0xB3}, {0xFF, 0xFF, 0xFF, 0xFF}, 4,
+ DMX_FRM_MPEG2_SEQUENCE_HEADER},
+ {{0x00, 0x00, 0x01, 0xB8}, {0xFF, 0xFF, 0xFF, 0xFF}, 4,
+ DMX_FRM_MPEG2_GOP_HEADER},
+ {{0x00, 0x00, 0x01, 0x00, 0x00, 0x08},
+ {0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x38}, 6,
+ DMX_FRM_MPEG2_I_PIC},
+ {{0x00, 0x00, 0x01, 0x00, 0x00, 0x10},
+ {0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x38}, 6,
+ DMX_FRM_MPEG2_P_PIC},
+ {{0x00, 0x00, 0x01, 0x00, 0x00, 0x18},
+ {0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x38}, 6,
+ DMX_FRM_MPEG2_B_PIC}
+};
+
+static const struct mpq_framing_pattern_lookup_params
+ h264_patterns[MPQ_H264_PATTERN_NUM] = {
+ {{0x00, 0x00, 0x01, 0x07}, {0xFF, 0xFF, 0xFF, 0x1F}, 4,
+ DMX_FRM_H264_SPS},
+ {{0x00, 0x00, 0x01, 0x08}, {0xFF, 0xFF, 0xFF, 0x1F}, 4,
+ DMX_FRM_H264_PPS},
+ {{0x00, 0x00, 0x01, 0x05, 0x80}, {0xFF, 0xFF, 0xFF, 0x1F, 0x80}, 5,
+ DMX_FRM_H264_IDR_PIC},
+ {{0x00, 0x00, 0x01, 0x01, 0x80}, {0xFF, 0xFF, 0xFF, 0x1F, 0x80}, 5,
+ DMX_FRM_H264_NON_IDR_PIC}
+};
+
+static const struct mpq_framing_pattern_lookup_params
+ vc1_patterns[MPQ_VC1_PATTERN_NUM] = {
+ {{0x00, 0x00, 0x01, 0x0F}, {0xFF, 0xFF, 0xFF, 0xFF}, 4,
+ DMX_FRM_VC1_SEQUENCE_HEADER},
+ {{0x00, 0x00, 0x01, 0x0E}, {0xFF, 0xFF, 0xFF, 0xFF}, 4,
+ DMX_FRM_VC1_ENTRY_POINT_HEADER},
+ {{0x00, 0x00, 0x01, 0x0D}, {0xFF, 0xFF, 0xFF, 0xFF}, 4,
+ DMX_FRM_VC1_FRAME_START_CODE}
+};
/* Global data-structure for managing demux devices */
static struct
@@ -73,14 +135,13 @@
decoder_buffers[MPQ_ADAPTER_MAX_NUM_OF_INTERFACES];
/*
- * Indicates whether we allow decoder's data to
- * wrap-around in the output buffer or padding is
- * inserted in such case.
+ * Indicates whether the video decoder handles framing
+ * or we are required to provide framing information
+ * in the meta-data passed to the decoder.
*/
- int decoder_data_wrap;
+ int decoder_framing;
} mpq_dmx_info;
-
/* Check that PES header is valid and that it is a video PES */
static int mpq_dmx_is_valid_video_pes(struct pes_packet_header *pes_header)
{
@@ -97,6 +158,321 @@
return 0;
}
+/* Check if a framing pattern is a video frame pattern or a header pattern */
+static inline int mpq_dmx_is_video_frame(
+ enum dmx_indexing_video_standard standard,
+ enum dmx_framing_pattern_type pattern_type)
+{
+ switch (standard) {
+ case DMX_INDEXING_MPEG2:
+ if ((pattern_type == DMX_FRM_MPEG2_I_PIC) ||
+ (pattern_type == DMX_FRM_MPEG2_P_PIC) ||
+ (pattern_type == DMX_FRM_MPEG2_B_PIC))
+ return 1;
+ return 0;
+ case DMX_INDEXING_H264:
+ if ((pattern_type == DMX_FRM_H264_IDR_PIC) ||
+ (pattern_type == DMX_FRM_H264_NON_IDR_PIC))
+ return 1;
+ return 0;
+ case DMX_INDEXING_VC1:
+ if (pattern_type == DMX_FRM_VC1_FRAME_START_CODE)
+ return 1;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * mpq_framing_pattern_lookup_results - framing lookup results
+ *
+ * @offset: The offset in the buffer where the pattern was found.
+ * If a pattern is found using a prefix (i.e. started on the
+ * previous buffer), offset is zero.
+ * @type: the type of the pattern found.
+ * @used_prefix_size: the prefix size that was used to find this pattern
+ */
+struct mpq_framing_pattern_lookup_results {
+ struct {
+ u32 offset;
+ enum dmx_framing_pattern_type type;
+ u32 used_prefix_size;
+ } info[MPQ_MAX_FOUND_PATTERNS];
+};
+
+/*
+ * Check if two patterns are identical, taking mask into consideration.
+ * @pattern1: the first byte pattern to compare.
+ * @pattern2: the second byte pattern to compare.
+ * @mask: the bit mask to use.
+ * @pattern_size: the length of both patterns and the mask, in bytes.
+ *
+ * Return: 1 if patterns match, 0 otherwise.
+ */
+static inline int mpq_dmx_patterns_match(const u8 *pattern1, const u8 *pattern2,
+ const u8 *mask, size_t pattern_size)
+{
+ int i;
+
+ /*
+ * Assumption: it is OK to access pattern1, pattern2 and mask.
+ * This function performs no sanity checks to keep things fast.
+ */
+
+ for (i = 0; i < pattern_size; i++)
+ if ((pattern1[i] & mask[i]) != (pattern2[i] & mask[i]))
+ return 0;
+
+ return 1;
+}
+
+/*
+ * mpq_dmx_framing_pattern_search -
+ * search for framing patterns in a given buffer.
+ *
+ * Optimized version: first search for a common substring, e.g. 0x00 0x00 0x01.
+ * If this string is found, go over all the given patterns (all must start
+ * with this string) and search for their ending in the buffer.
+ *
+ * Assumption: the patterns we look for do not spread over more than two
+ * buffers.
+ *
+ * @paterns: the full patterns information to look for.
+ * @patterns_num: the number of patterns to look for.
+ * @buf: the buffer to search.
+ * @buf_size: the size of the buffer to search. we search the entire buffer.
+ * @prefix_size_masks: a bit mask (per pattern) of possible prefix sizes to use
+ * when searching for a pattern that started at the last buffer.
+ * Updated in this function for use in the next lookup.
+ * @results: lookup results (offset, type, used_prefix_size) per found pattern,
+ * up to MPQ_MAX_FOUND_PATTERNS.
+ *
+ * Return:
+ * Number of patterns found (up to MPQ_MAX_FOUND_PATTERNS).
+ * 0 if pattern was not found.
+ * Negative error value on failure.
+ */
+static int mpq_dmx_framing_pattern_search(
+ const struct mpq_framing_pattern_lookup_params *patterns,
+ int patterns_num,
+ const u8 *buf,
+ size_t buf_size,
+ struct mpq_framing_prefix_size_masks *prefix_size_masks,
+ struct mpq_framing_pattern_lookup_results *results)
+{
+ int i, j;
+ unsigned int current_size;
+ u32 prefix;
+ int found = 0;
+ int start_offset = 0;
+ /* the starting common substring to look for */
+ u8 string[] = {0x00, 0x00, 0x01};
+ /* the mask for the starting string */
+ u8 string_mask[] = {0xFF, 0xFF, 0xFF};
+ /* the size of the starting string (in bytes) */
+ size_t string_size = 3;
+
+ /* sanity checks - can be commented out for optimization purposes */
+ if ((patterns == NULL) || (patterns_num <= 0) || (buf == NULL)) {
+ MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ memset(results, 0, sizeof(struct mpq_framing_pattern_lookup_results));
+
+ /*
+ * handle prefix - disregard string, simply check all patterns,
+ * looking for a matching suffix at the very beginning of the buffer.
+ */
+ for (j = 0; (j < patterns_num) && !found; j++) {
+ prefix = prefix_size_masks->size_mask[j];
+ current_size = 32;
+ while (prefix) {
+ if (prefix & (0x1 << (current_size - 1))) {
+ /*
+ * check that we don't look further
+ * than buf_size boundary
+ */
+ if ((int)(patterns[j].size - current_size) >
+ buf_size)
+ break;
+
+ if (mpq_dmx_patterns_match(
+ (patterns[j].pattern + current_size),
+ buf, (patterns[j].mask + current_size),
+ (patterns[j].size - current_size))) {
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: Found matching pattern"
+ "using prefix of size %d\n",
+ __func__, current_size);
+ /*
+ * pattern found using prefix at the
+ * very beginning of the buffer, so
+ * offset is 0, but we already zeroed
+ * everything in the beginning of the
+ * function. that's why the next line
+ * is commented.
+ */
+ /* results->info[found].offset = 0; */
+ results->info[found].type =
+ patterns[j].type;
+ results->info[found].used_prefix_size =
+ current_size;
+ found++;
+ /*
+ * save offset to start looking from
+ * in the buffer, to avoid reusing the
+ * data of a pattern we already found.
+ */
+ start_offset = (patterns[j].size -
+ current_size);
+
+ if (found >= MPQ_MAX_FOUND_PATTERNS)
+ goto next_prefix_lookup;
+ /*
+ * we don't want to search for the same
+ * pattern with several possible prefix
+ * sizes if we have already found it,
+ * so we break from the inner loop.
+ * since we incremented 'found', we
+ * will not search for additional
+ * patterns using a prefix - that would
+ * imply ambiguous patterns where one
+ * pattern can be included in another.
+ * the for loop will exit.
+ */
+ break;
+ }
+ }
+ current_size--;
+ prefix &= ~(0x1 << (current_size - 1));
+ }
+ }
+
+ /*
+ * Search buffer for entire pattern, starting with the string.
+ * Note the external for loop does not execute if buf_size is
+ * smaller than string_size (the cast to int is required, since
+ * size_t is unsigned).
+ */
+ for (i = start_offset; i < (int)(buf_size - string_size + 1); i++) {
+ if (mpq_dmx_patterns_match(string, (buf + i), string_mask,
+ string_size)) {
+ /* now search for patterns: */
+ for (j = 0; j < patterns_num; j++) {
+ /* avoid overflow to next buffer */
+ if ((i + patterns[j].size) > buf_size)
+ continue;
+
+ if (mpq_dmx_patterns_match(
+ (patterns[j].pattern + string_size),
+ (buf + i + string_size),
+ (patterns[j].mask + string_size),
+ (patterns[j].size - string_size))) {
+
+ results->info[found].offset = i;
+ results->info[found].type =
+ patterns[j].type;
+ /*
+ * save offset to start next prefix
+ * lookup, to avoid reusing the data
+ * of any pattern we already found.
+ */
+ if ((i + patterns[j].size) >
+ start_offset)
+ start_offset = (i +
+ patterns[j].size);
+ /*
+ * did not use a prefix to find this
+ * pattern, but we zeroed everything
+ * in the beginning of the function.
+ * So no need to zero used_prefix_size
+ * for results->info[found]
+ */
+
+ found++;
+ if (found >= MPQ_MAX_FOUND_PATTERNS)
+ goto next_prefix_lookup;
+ /*
+ * theoretically we don't have to break
+ * here, but we don't want to search
+ * for the other matching patterns on
+ * the very same same place in the
+ * buffer. That would mean the
+ * (pattern & mask) combinations are
+ * not unique. So we break from inner
+ * loop and move on to the next place
+ * in the buffer.
+ */
+ break;
+ }
+ }
+ }
+ }
+
+next_prefix_lookup:
+ /* check for possible prefix sizes for the next buffer */
+ for (j = 0; j < patterns_num; j++) {
+ prefix_size_masks->size_mask[j] = 0;
+ for (i = 1; i < patterns[j].size; i++) {
+ /*
+ * avoid looking outside of the buffer
+ * or reusing previously used data.
+ */
+ if (i > (buf_size - start_offset))
+ break;
+
+ if (mpq_dmx_patterns_match(patterns[j].pattern,
+ (buf + buf_size - i),
+ patterns[j].mask, i)) {
+ prefix_size_masks->size_mask[j] |=
+ (1 << (i - 1));
+ }
+ }
+ }
+
+ return found;
+}
+
+/*
+ * mpq_dmx_get_pattern_params -
+ * get a pointer to the relevant pattern parameters structure,
+ * based on the video parameters.
+ *
+ * @video_params: the video parameters (e.g. video standard).
+ * @patterns: a pointer to a pointer to the pattern parameters,
+ * updated by this function.
+ * @patterns_num: number of patterns, updated by this function.
+ */
+static inline int mpq_dmx_get_pattern_params(
+ struct dmx_indexing_video_params *video_params,
+ const struct mpq_framing_pattern_lookup_params **patterns,
+ int *patterns_num)
+{
+ switch (video_params->standard) {
+ case DMX_INDEXING_MPEG2:
+ *patterns = mpeg2_patterns;
+ *patterns_num = MPQ_MPEG2_PATTERN_NUM;
+ break;
+ case DMX_INDEXING_H264:
+ *patterns = h264_patterns;
+ *patterns_num = MPQ_H264_PATTERN_NUM;
+ break;
+ case DMX_INDEXING_VC1:
+ *patterns = vc1_patterns;
+ *patterns_num = MPQ_VC1_PATTERN_NUM;
+ break;
+ default:
+ MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+ *patterns = NULL;
+ *patterns_num = 0;
+ return -EINVAL;
+ }
+
+ return 0;
+}
/* Extend dvb-demux debugfs with HW statistics */
void mpq_dmx_init_hw_statistics(struct mpq_demux *mpq_demux)
@@ -199,8 +575,12 @@
mpq_dmx_info.devices = NULL;
mpq_dmx_info.ion_client = NULL;
- /* TODO: the following should be set based on the decoder */
- mpq_dmx_info.decoder_data_wrap = 0;
+ /*
+ * TODO: the following should be set based on the decoder:
+ * 0 means the decoder doesn't handle framing, so framing
+ * is done by demux. 1 means the decoder handles framing.
+ */
+ mpq_dmx_info.decoder_framing = 0;
/* Allocate memory for all MPQ devices */
mpq_dmx_info.devices =
@@ -404,13 +784,27 @@
if (feed_data == NULL) {
MPQ_DVB_ERR_PRINT(
- "%s: FAILED to private video feed data\n",
+ "%s: FAILED to allocate private video feed data\n",
__func__);
ret = -ENOMEM;
goto init_failed;
}
+ /* get and store framing information if required */
+ if (!mpq_dmx_info.decoder_framing) {
+ mpq_dmx_get_pattern_params(&feed->indexing_params,
+ &feed_data->patterns, &feed_data->patterns_num);
+ if (feed_data->patterns == NULL) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to get framing pattern parameters\n",
+ __func__);
+
+ ret = -EINVAL;
+ goto init_failed_free_priv_data;
+ }
+ }
+
/* Allocate packet buffer holding the meta-data */
packet_buffer = vmalloc(VIDEO_META_DATA_BUFFER_SIZE);
@@ -430,12 +824,7 @@
* flag set.
*/
- if (mpq_dmx_info.decoder_data_wrap)
- actual_buffer_size =
- feed->buffer_size;
- else
- actual_buffer_size =
- feed->buffer_size + VIDEO_WRAP_AROUND_THRESHOLD;
+ actual_buffer_size = feed->buffer_size;
actual_buffer_size += (SZ_4K - 1);
actual_buffer_size &= ~(SZ_4K - 1);
@@ -551,6 +940,14 @@
feed->pusi_seen = 0;
feed->peslen = 0;
feed_data->fullness_wait_cancel = 0;
+ feed_data->last_framing_match_address = 0;
+ feed_data->last_framing_match_type = DMX_FRM_UNKNOWN;
+ feed_data->found_sequence_header_pattern = 0;
+ memset(&feed_data->prefix_size, 0,
+ sizeof(struct mpq_framing_prefix_size_masks));
+ feed_data->first_pattern_offset = 0;
+ feed_data->first_prefix_size = 0;
+ feed_data->write_pts_dts = 0;
spin_lock(&mpq_demux->feed_lock);
feed->priv = (void *)feed_data;
@@ -669,7 +1066,6 @@
if (mpq_dmx_is_video_feed(feed)) {
int ret;
- int gap;
struct mpq_video_feed_info *feed_data;
struct dvb_ringbuffer *video_buff;
@@ -686,16 +1082,6 @@
video_buff =
&feed_data->video_buffer->raw_data;
- /*
- * If we are now starting new PES and the
- * PES payload may wrap-around, extra padding
- * needs to be pushed into the buffer.
- */
- gap = video_buff->size - video_buff->pwrite;
- if ((!mpq_dmx_info.decoder_data_wrap) &&
- (gap < VIDEO_WRAP_AROUND_THRESHOLD))
- required_space += gap;
-
ret = 0;
if ((feed_data != NULL) &&
(!feed_data->fullness_wait_cancel) &&
@@ -795,13 +1181,206 @@
}
EXPORT_SYMBOL(mpq_dmx_decoder_fullness_abort);
-int mpq_dmx_process_video_packet(
+
+static inline int mpq_dmx_parse_mandatory_pes_header(
+ struct dvb_demux_feed *feed,
+ struct mpq_video_feed_info *feed_data,
+ struct pes_packet_header *pes_header,
+ const u8 *buf,
+ u32 *ts_payload_offset,
+ int *bytes_avail)
+{
+ int left_size, copy_len;
+
+ if (feed_data->pes_header_offset < PES_MANDATORY_FIELDS_LEN) {
+ left_size =
+ PES_MANDATORY_FIELDS_LEN -
+ feed_data->pes_header_offset;
+
+ copy_len = (left_size > *bytes_avail) ?
+ *bytes_avail :
+ left_size;
+
+ memcpy((u8 *)((u8 *)pes_header + feed_data->pes_header_offset),
+ (buf + *ts_payload_offset),
+ copy_len);
+
+ feed_data->pes_header_offset += copy_len;
+
+ if (left_size > *bytes_avail)
+ return -EINVAL;
+
+ /* else - we have beginning of PES header */
+ *bytes_avail -= left_size;
+ *ts_payload_offset += left_size;
+
+ /* Make sure the PES packet is valid */
+ if (mpq_dmx_is_valid_video_pes(pes_header) < 0) {
+ /*
+ * Since the new PES header parsing
+ * failed, reset pusi_seen to drop all
+ * data until next PUSI
+ */
+ feed->pusi_seen = 0;
+ feed_data->pes_header_offset = 0;
+
+ MPQ_DVB_ERR_PRINT(
+ "%s: invalid packet\n",
+ __func__);
+
+ return -EINVAL;
+ }
+
+ feed_data->pes_header_left_bytes =
+ pes_header->pes_header_data_length;
+ }
+
+ return 0;
+}
+
+static inline int mpq_dmx_parse_remaining_pes_header(
+ struct dvb_demux_feed *feed,
+ struct mpq_video_feed_info *feed_data,
+ struct pes_packet_header *pes_header,
+ const u8 *buf,
+ u32 *ts_payload_offset,
+ int *bytes_avail)
+{
+ int left_size, copy_len;
+
+ /* Remainning header bytes that need to be processed? */
+ if (!feed_data->pes_header_left_bytes)
+ return 0;
+
+ /* Did we capture the PTS value (if exists)? */
+ if ((*bytes_avail != 0) &&
+ (feed_data->pes_header_offset <
+ (PES_MANDATORY_FIELDS_LEN+5)) &&
+ ((pes_header->pts_dts_flag == 2) ||
+ (pes_header->pts_dts_flag == 3))) {
+
+ /* 5 more bytes should be there */
+ left_size =
+ PES_MANDATORY_FIELDS_LEN + 5 -
+ feed_data->pes_header_offset;
+
+ copy_len = (left_size > *bytes_avail) ?
+ *bytes_avail :
+ left_size;
+
+ memcpy((u8 *)((u8 *)pes_header + feed_data->pes_header_offset),
+ (buf + *ts_payload_offset),
+ copy_len);
+
+ feed_data->pes_header_offset += copy_len;
+ feed_data->pes_header_left_bytes -= copy_len;
+
+ if (left_size > *bytes_avail)
+ return -EINVAL;
+
+ /* else - we have the PTS */
+ *bytes_avail -= copy_len;
+ *ts_payload_offset += copy_len;
+ feed_data->write_pts_dts = 1;
+ }
+
+ /* Did we capture the DTS value (if exist)? */
+ if ((*bytes_avail != 0) &&
+ (feed_data->pes_header_offset <
+ (PES_MANDATORY_FIELDS_LEN+10)) &&
+ (pes_header->pts_dts_flag == 3)) {
+
+ /* 5 more bytes should be there */
+ left_size =
+ PES_MANDATORY_FIELDS_LEN + 10 -
+ feed_data->pes_header_offset;
+
+ copy_len = (left_size > *bytes_avail) ?
+ *bytes_avail :
+ left_size;
+
+ memcpy((u8 *)((u8 *)pes_header + feed_data->pes_header_offset),
+ (buf + *ts_payload_offset),
+ copy_len);
+
+ feed_data->pes_header_offset += copy_len;
+ feed_data->pes_header_left_bytes -= copy_len;
+
+ if (left_size > *bytes_avail)
+ return -EINVAL;
+
+ /* else - we have the DTS */
+ *bytes_avail -= copy_len;
+ *ts_payload_offset += copy_len;
+ feed_data->write_pts_dts = 1;
+ }
+
+ /* Any more header bytes?! */
+ if (feed_data->pes_header_left_bytes >= *bytes_avail) {
+ feed_data->pes_header_left_bytes -= *bytes_avail;
+ return -EINVAL;
+ }
+
+ /* Got PES header, process payload */
+ *bytes_avail -= feed_data->pes_header_left_bytes;
+ *ts_payload_offset += feed_data->pes_header_left_bytes;
+ feed_data->pes_header_left_bytes = 0;
+
+ return 0;
+}
+
+static inline void mpq_dmx_get_pts_dts(struct mpq_video_feed_info *feed_data,
+ struct pes_packet_header *pes_header,
+ struct mpq_adapter_video_meta_data *meta_data,
+ enum dmx_packet_type packet_type)
+{
+ struct dmx_pts_dts_info *info;
+
+ if (packet_type == DMX_PES_PACKET)
+ info = &(meta_data->info.pes.pts_dts_info);
+ else
+ info = &(meta_data->info.framing.pts_dts_info);
+
+ if (feed_data->write_pts_dts) {
+ if ((pes_header->pts_dts_flag == 2) ||
+ (pes_header->pts_dts_flag == 3)) {
+ info->pts_exist = 1;
+
+ info->pts =
+ ((u64)pes_header->pts_1 << 30) |
+ ((u64)pes_header->pts_2 << 22) |
+ ((u64)pes_header->pts_3 << 15) |
+ ((u64)pes_header->pts_4 << 7) |
+ (u64)pes_header->pts_5;
+ } else {
+ info->pts_exist = 0;
+ info->pts = 0;
+ }
+
+ if (pes_header->pts_dts_flag == 3) {
+ info->dts_exist = 1;
+
+ info->dts =
+ ((u64)pes_header->dts_1 << 30) |
+ ((u64)pes_header->dts_2 << 22) |
+ ((u64)pes_header->dts_3 << 15) |
+ ((u64)pes_header->dts_4 << 7) |
+ (u64)pes_header->dts_5;
+ } else {
+ info->dts_exist = 0;
+ info->dts = 0;
+ }
+ } else {
+ info->pts_exist = 0;
+ info->dts_exist = 0;
+ }
+}
+
+static int mpq_dmx_process_video_packet_framing(
struct dvb_demux_feed *feed,
const u8 *buf)
{
int bytes_avail;
- int left_size;
- int copy_len;
u32 ts_payload_offset;
struct mpq_video_feed_info *feed_data;
const struct ts_packet_header *ts_header;
@@ -809,13 +1388,18 @@
struct pes_packet_header *pes_header;
struct mpq_demux *mpq_demux;
- mpq_demux =
- (struct mpq_demux *)feed->demux->priv;
+ struct mpq_framing_pattern_lookup_results framing_res;
+ int found_patterns = 0;
+ int first_pattern = 0;
+ int i;
+ u32 pattern_addr = 0;
+ int is_video_frame = 0;
+
+ mpq_demux = (struct mpq_demux *)feed->demux->priv;
spin_lock(&mpq_demux->feed_lock);
- feed_data =
- (struct mpq_video_feed_info *)feed->priv;
+ feed_data = (struct mpq_video_feed_info *)feed->priv;
if (unlikely(feed_data == NULL)) {
spin_unlock(&mpq_demux->feed_lock);
@@ -824,13 +1408,11 @@
ts_header = (const struct ts_packet_header *)buf;
- stream_buffer =
- feed_data->video_buffer;
+ stream_buffer = feed_data->video_buffer;
- pes_header =
- &feed_data->pes_header;
+ pes_header = &feed_data->pes_header;
-/* printk("TS packet: %X %X %X %X %X%X %X %X %X\n",
+ /* MPQ_DVB_DBG_PRINT("TS packet: %X %X %X %X %X%X %X %X %X\n",
ts_header->sync_byte,
ts_header->transport_error_indicator,
ts_header->payload_unit_start_indicator,
@@ -839,7 +1421,318 @@
ts_header->pid_lsb,
ts_header->transport_scrambling_control,
ts_header->adaptation_field_control,
- ts_header->continuity_counter);*/
+ ts_header->continuity_counter); */
+
+ /* Make sure this TS packet has a payload and not scrambled */
+ if ((ts_header->sync_byte != 0x47) ||
+ (ts_header->adaptation_field_control == 0) ||
+ (ts_header->adaptation_field_control == 2) ||
+ (ts_header->transport_scrambling_control)) {
+ /* continue to next packet */
+ spin_unlock(&mpq_demux->feed_lock);
+ return 0;
+ }
+
+ if (ts_header->payload_unit_start_indicator) { /* PUSI? */
+ if (feed->pusi_seen) { /* Did we see PUSI before? */
+ /*
+ * Double check that we are not in middle of
+ * previous PES header parsing.
+ */
+ if (feed_data->pes_header_left_bytes != 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: received PUSI"
+ "while handling PES header"
+ "of previous PES\n",
+ __func__);
+ }
+
+ feed->peslen = 0;
+ feed_data->pes_header_offset = 0;
+ feed_data->pes_header_left_bytes =
+ PES_MANDATORY_FIELDS_LEN;
+ feed_data->write_pts_dts = 0;
+ } else {
+ feed->pusi_seen = 1;
+ }
+ }
+
+ /*
+ * Parse PES data only if PUSI was encountered,
+ * otherwise the data is dropped
+ */
+ if (!feed->pusi_seen) {
+ spin_unlock(&mpq_demux->feed_lock);
+ return 0; /* drop and wait for next packets */
+ }
+
+ ts_payload_offset = sizeof(struct ts_packet_header);
+
+ /* Skip adaptation field if exists */
+ if (ts_header->adaptation_field_control == 3)
+ ts_payload_offset += buf[ts_payload_offset] + 1;
+
+ /* 188 bytes: the size of a TS packet including the TS packet header */
+ bytes_avail = 188 - ts_payload_offset;
+
+ /* Get the mandatory fields of the video PES header */
+ if (mpq_dmx_parse_mandatory_pes_header(feed, feed_data,
+ pes_header, buf,
+ &ts_payload_offset,
+ &bytes_avail)) {
+ spin_unlock(&mpq_demux->feed_lock);
+ return 0;
+ }
+
+ if (mpq_dmx_parse_remaining_pes_header(feed, feed_data,
+ pes_header, buf,
+ &ts_payload_offset,
+ &bytes_avail)) {
+ spin_unlock(&mpq_demux->feed_lock);
+ return 0;
+ }
+
+ /*
+ * If we reached here,
+ * then we are now at the PES payload data
+ */
+ if (bytes_avail == 0) {
+ spin_unlock(&mpq_demux->feed_lock);
+ return 0;
+ }
+
+ /*
+ * the decoder requires demux to do framing,
+ * so search for the patterns now.
+ */
+ found_patterns = mpq_dmx_framing_pattern_search(
+ feed_data->patterns,
+ feed_data->patterns_num,
+ (buf + ts_payload_offset),
+ bytes_avail,
+ &feed_data->prefix_size,
+ &framing_res);
+
+ if (!(feed_data->found_sequence_header_pattern)) {
+ for (i = 0; i < found_patterns; i++) {
+ if ((framing_res.info[i].type ==
+ DMX_FRM_MPEG2_SEQUENCE_HEADER) ||
+ (framing_res.info[i].type ==
+ DMX_FRM_H264_SPS) ||
+ (framing_res.info[i].type ==
+ DMX_FRM_VC1_SEQUENCE_HEADER)) {
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: Found Sequence Pattern, buf %p, "
+ "i = %d, offset = %d, type = %d\n",
+ __func__, buf, i,
+ framing_res.info[i].offset,
+ framing_res.info[i].type);
+
+ first_pattern = i;
+ feed_data->found_sequence_header_pattern = 1;
+ ts_payload_offset +=
+ framing_res.info[i].offset;
+ bytes_avail -= framing_res.info[i].offset;
+
+ if (framing_res.info[i].used_prefix_size) {
+ feed_data->first_prefix_size =
+ framing_res.info[i].
+ used_prefix_size;
+ }
+ /*
+ * if this is the first pattern we write,
+ * no need to take offset into account since we
+ * dropped all data before it (so effectively
+ * offset is 0).
+ * we save the first pattern offset and take
+ * it into consideration for the rest of the
+ * patterns found in this buffer.
+ */
+ feed_data->first_pattern_offset =
+ framing_res.info[i].offset;
+ break;
+ }
+ }
+ }
+
+ /*
+ * If decoder requires demux to do framing,
+ * pass data to decoder only after sequence header
+ * or equivalent is found. Otherwise the data is dropped.
+ */
+ if (!(feed_data->found_sequence_header_pattern)) {
+ spin_unlock(&mpq_demux->feed_lock);
+ return 0;
+ }
+
+ /*
+ * write prefix used to find first Sequence pattern, if needed.
+ * feed_data->patterns[0].pattern always contains the Sequence
+ * pattern.
+ */
+ if (feed_data->first_prefix_size) {
+ if (mpq_streambuffer_data_write(stream_buffer,
+ (feed_data->patterns[0].pattern),
+ feed_data->first_prefix_size) < 0) {
+ mpq_demux->decoder_tsp_drop_count++;
+ spin_unlock(&mpq_demux->feed_lock);
+ return 0;
+ }
+ feed_data->first_prefix_size = 0;
+ }
+ /* write data to payload buffer */
+ if (mpq_streambuffer_data_write(stream_buffer,
+ (buf + ts_payload_offset),
+ bytes_avail) < 0) {
+ mpq_demux->decoder_tsp_drop_count++;
+ } else {
+ struct mpq_streambuffer_packet_header packet;
+ struct mpq_adapter_video_meta_data meta_data;
+
+ feed->peslen += bytes_avail;
+
+ meta_data.packet_type = DMX_FRAMING_INFO_PACKET;
+ packet.user_data_len =
+ sizeof(struct mpq_adapter_video_meta_data);
+
+ for (i = first_pattern; i < found_patterns; i++) {
+ if (feed_data->last_framing_match_address) {
+ is_video_frame = mpq_dmx_is_video_frame(
+ feed->indexing_params.standard,
+ feed_data->last_framing_match_type);
+ if (is_video_frame == 1) {
+ mpq_dmx_get_pts_dts(feed_data,
+ pes_header,
+ &meta_data,
+ DMX_FRAMING_INFO_PACKET);
+ } else {
+ meta_data.info.framing.
+ pts_dts_info.pts_exist = 0;
+ meta_data.info.framing.
+ pts_dts_info.dts_exist = 0;
+ }
+ /*
+ * writing meta-data that includes
+ * framing information
+ */
+ meta_data.info.framing.pattern_type =
+ feed_data->last_framing_match_type;
+ packet.raw_data_addr =
+ feed_data->last_framing_match_address;
+
+ pattern_addr = feed_data->pes_payload_address +
+ framing_res.info[i].offset -
+ framing_res.info[i].used_prefix_size;
+
+ if ((pattern_addr -
+ feed_data->first_pattern_offset) <
+ feed_data->last_framing_match_address) {
+ /* wraparound case */
+ packet.raw_data_len =
+ (pattern_addr -
+ feed_data->
+ last_framing_match_address +
+ stream_buffer->raw_data.size) -
+ feed_data->first_pattern_offset;
+ } else {
+ packet.raw_data_len =
+ pattern_addr -
+ feed_data->
+ last_framing_match_address -
+ feed_data->first_pattern_offset;
+ }
+
+ MPQ_DVB_DBG_PRINT("Writing Packet: "
+ "addr = 0x%X, len = %d, type = %d, "
+ "isPts = %d, isDts = %d\n",
+ packet.raw_data_addr,
+ packet.raw_data_len,
+ meta_data.info.framing.pattern_type,
+ meta_data.info.framing.
+ pts_dts_info.pts_exist,
+ meta_data.info.framing.
+ pts_dts_info.dts_exist);
+
+ if (mpq_streambuffer_pkt_write(stream_buffer,
+ &packet,
+ (u8 *)&meta_data) < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: "
+ "Couldn't write packet. "
+ "Should never happen\n",
+ __func__);
+ } else {
+ if (is_video_frame == 1)
+ feed_data->write_pts_dts = 0;
+ }
+ }
+
+ /* save the last match for next time */
+ feed_data->last_framing_match_type =
+ framing_res.info[i].type;
+
+ feed_data->last_framing_match_address =
+ (feed_data->pes_payload_address +
+ framing_res.info[i].offset -
+ framing_res.info[i].used_prefix_size -
+ feed_data->first_pattern_offset);
+ }
+ /*
+ * the first pattern offset is needed only for the group of
+ * patterns that are found and written with the first pattern.
+ */
+ feed_data->first_pattern_offset = 0;
+
+ feed_data->pes_payload_address =
+ (u32)stream_buffer->raw_data.data +
+ stream_buffer->raw_data.pwrite;
+ }
+
+ spin_unlock(&mpq_demux->feed_lock);
+
+ return 0;
+}
+
+static int mpq_dmx_process_video_packet_no_framing(
+ struct dvb_demux_feed *feed,
+ const u8 *buf)
+{
+ int bytes_avail;
+ u32 ts_payload_offset;
+ struct mpq_video_feed_info *feed_data;
+ const struct ts_packet_header *ts_header;
+ struct mpq_streambuffer *stream_buffer;
+ struct pes_packet_header *pes_header;
+ struct mpq_demux *mpq_demux;
+
+ mpq_demux = (struct mpq_demux *)feed->demux->priv;
+
+ spin_lock(&mpq_demux->feed_lock);
+
+ feed_data = (struct mpq_video_feed_info *)feed->priv;
+
+ if (unlikely(feed_data == NULL)) {
+ spin_unlock(&mpq_demux->feed_lock);
+ return 0;
+ }
+
+ ts_header = (const struct ts_packet_header *)buf;
+
+ stream_buffer = feed_data->video_buffer;
+
+ pes_header = &feed_data->pes_header;
+
+ /* MPQ_DVB_DBG_PRINT("TS packet: %X %X %X %X %X%X %X %X %X\n",
+ ts_header->sync_byte,
+ ts_header->transport_error_indicator,
+ ts_header->payload_unit_start_indicator,
+ ts_header->transport_priority,
+ ts_header->pid_msb,
+ ts_header->pid_lsb,
+ ts_header->transport_scrambling_control,
+ ts_header->adaptation_field_control,
+ ts_header->continuity_counter); */
/* Make sure this TS packet has a payload and not scrambled */
if ((ts_header->sync_byte != 0x47) ||
@@ -869,46 +1762,15 @@
packet.raw_data_len = feed->peslen;
- if ((!mpq_dmx_info.decoder_data_wrap) &&
- ((feed_data->pes_payload_address +
- feed->peslen) >
- ((u32)stream_buffer->raw_data.data +
- stream_buffer->raw_data.size)))
- MPQ_DVB_ERR_PRINT(
- "%s: "
- "Video data has wrapped-around!\n",
- __func__);
-
packet.user_data_len =
sizeof(struct
mpq_adapter_video_meta_data);
- if ((pes_header->pts_dts_flag == 2) ||
- (pes_header->pts_dts_flag == 3))
- meta_data.pts_exist = 1;
- else
- meta_data.pts_exist = 0;
+ mpq_dmx_get_pts_dts(feed_data, pes_header,
+ &meta_data,
+ DMX_PES_PACKET);
- meta_data.pts =
- ((u64)pes_header->pts_1 << 30) |
- ((u64)pes_header->pts_2 << 22) |
- ((u64)pes_header->pts_3 << 15) |
- ((u64)pes_header->pts_4 << 7) |
- (u64)pes_header->pts_5;
-
- if (pes_header->pts_dts_flag == 3)
- meta_data.dts_exist = 1;
- else
- meta_data.dts_exist = 0;
-
- meta_data.dts =
- ((u64)pes_header->dts_1 << 30) |
- ((u64)pes_header->dts_2 << 22) |
- ((u64)pes_header->dts_3 << 15) |
- ((u64)pes_header->dts_4 << 7) |
- (u64)pes_header->dts_5;
-
- meta_data.is_padding = 0;
+ meta_data.packet_type = DMX_PES_PACKET;
if (mpq_streambuffer_pkt_write(
stream_buffer,
@@ -919,6 +1781,8 @@
"Couldn't write packet. "
"Should never happen\n",
__func__);
+ else
+ feed_data->write_pts_dts = 0;
} else {
MPQ_DVB_ERR_PRINT(
"%s: received PUSI"
@@ -956,137 +1820,24 @@
if (ts_header->adaptation_field_control == 3)
ts_payload_offset += buf[ts_payload_offset] + 1;
+ /* 188 bytes: size of a TS packet including the TS packet header */
bytes_avail = 188 - ts_payload_offset;
- /* Got the mandatory fields of the video PES header? */
- if (feed_data->pes_header_offset < PES_MANDATORY_FIELDS_LEN) {
- left_size =
- PES_MANDATORY_FIELDS_LEN -
- feed_data->pes_header_offset;
-
- copy_len = (left_size > bytes_avail) ?
- bytes_avail :
- left_size;
-
- memcpy((u8 *)pes_header+feed_data->pes_header_offset,
- buf+ts_payload_offset,
- copy_len);
-
- feed_data->pes_header_offset += copy_len;
-
- if (left_size > bytes_avail) {
- spin_unlock(&mpq_demux->feed_lock);
- return 0;
- }
-
- /* else - we have beginning of PES header */
- bytes_avail -= left_size;
- ts_payload_offset += left_size;
-
- /* Make sure the PES packet is valid */
- if (mpq_dmx_is_valid_video_pes(pes_header) < 0) {
- /*
- * Since the new PES header parsing
- * failed, reset pusi_seen to drop all
- * data until next PUSI
- */
- feed->pusi_seen = 0;
- feed_data->pes_header_offset = 0;
-
- MPQ_DVB_ERR_PRINT(
- "%s: invalid packet\n",
- __func__);
-
- spin_unlock(&mpq_demux->feed_lock);
- return 0;
- }
-
- feed_data->pes_header_left_bytes =
- pes_header->pes_header_data_length;
+ /* Get the mandatory fields of the video PES header */
+ if (mpq_dmx_parse_mandatory_pes_header(feed, feed_data,
+ pes_header, buf,
+ &ts_payload_offset,
+ &bytes_avail)) {
+ spin_unlock(&mpq_demux->feed_lock);
+ return 0;
}
- /* Remainning header bytes that need to be processed? */
- if (feed_data->pes_header_left_bytes) {
- /* Did we capture the PTS value (if exist)? */
- if ((bytes_avail != 0) &&
- (feed_data->pes_header_offset <
- (PES_MANDATORY_FIELDS_LEN+5)) &&
- ((pes_header->pts_dts_flag == 2) ||
- (pes_header->pts_dts_flag == 3))) {
-
- /* 5 more bytes should be there */
- left_size =
- PES_MANDATORY_FIELDS_LEN +
- 5 -
- feed_data->pes_header_offset;
-
- copy_len = (left_size > bytes_avail) ?
- bytes_avail :
- left_size;
-
- memcpy((u8 *)pes_header+
- feed_data->pes_header_offset,
- buf+ts_payload_offset,
- copy_len);
-
- feed_data->pes_header_offset += copy_len;
- feed_data->pes_header_left_bytes -= copy_len;
-
- if (left_size > bytes_avail) {
- spin_unlock(&mpq_demux->feed_lock);
- return 0;
- }
-
- /* else - we have the PTS */
- bytes_avail -= copy_len;
- ts_payload_offset += copy_len;
- }
-
- /* Did we capture the DTS value (if exist)? */
- if ((bytes_avail != 0) &&
- (feed_data->pes_header_offset <
- (PES_MANDATORY_FIELDS_LEN+10)) &&
- (pes_header->pts_dts_flag == 3)) {
-
- /* 5 more bytes should be there */
- left_size =
- PES_MANDATORY_FIELDS_LEN +
- 10 -
- feed_data->pes_header_offset;
-
- copy_len = (left_size > bytes_avail) ?
- bytes_avail :
- left_size;
-
- memcpy((u8 *)pes_header+
- feed_data->pes_header_offset,
- buf+ts_payload_offset,
- copy_len);
-
- feed_data->pes_header_offset += copy_len;
- feed_data->pes_header_left_bytes -= copy_len;
-
- if (left_size > bytes_avail) {
- spin_unlock(&mpq_demux->feed_lock);
- return 0;
- }
-
- /* else - we have the DTS */
- bytes_avail -= copy_len;
- ts_payload_offset += copy_len;
- }
-
- /* Any more header bytes?! */
- if (feed_data->pes_header_left_bytes >= bytes_avail) {
- feed_data->pes_header_left_bytes -= bytes_avail;
- spin_unlock(&mpq_demux->feed_lock);
- return 0;
- }
-
- /* Got PES header, process payload */
- bytes_avail -= feed_data->pes_header_left_bytes;
- ts_payload_offset += feed_data->pes_header_left_bytes;
- feed_data->pes_header_left_bytes = 0;
+ if (mpq_dmx_parse_remaining_pes_header(feed, feed_data,
+ pes_header, buf,
+ &ts_payload_offset,
+ &bytes_avail)) {
+ spin_unlock(&mpq_demux->feed_lock);
+ return 0;
}
/*
@@ -1098,56 +1849,6 @@
return 0;
}
- if (feed->peslen == 0) { /* starting new PES */
- /* gap till end of the buffer */
- int gap =
- stream_buffer->raw_data.size -
- stream_buffer->raw_data.pwrite;
-
- if ((!mpq_dmx_info.decoder_data_wrap) &&
- (gap < VIDEO_WRAP_AROUND_THRESHOLD)) {
- struct mpq_streambuffer_packet_header packet;
- struct mpq_adapter_video_meta_data meta_data;
-
- /*
- * Do not start writting new PES from
- * this location to prevent possible
- * wrap-around of the payload, fill padding instead.
- */
-
- /* push a packet with padding indication */
- meta_data.is_padding = 1;
-
- packet.raw_data_len = gap;
- packet.user_data_len =
- sizeof(struct mpq_adapter_video_meta_data);
- packet.raw_data_addr =
- feed_data->pes_payload_address;
-
- if (mpq_streambuffer_data_write_deposit(
- stream_buffer,
- gap) < 0) {
- MPQ_DVB_ERR_PRINT(
- "%s: mpq_streambuffer_data_write_deposit "
- "failed!\n",
- __func__);
- } else if (mpq_streambuffer_pkt_write(
- stream_buffer,
- &packet,
- (u8 *)&meta_data) < 0) {
- MPQ_DVB_ERR_PRINT(
- "%s: "
- "Couldn't write packet. "
- "Should never happen\n",
- __func__);
- } else {
- feed_data->pes_payload_address =
- (u32)stream_buffer->raw_data.data +
- stream_buffer->raw_data.pwrite;
- }
- }
- }
-
if (mpq_streambuffer_data_write(
stream_buffer,
buf+ts_payload_offset,
@@ -1160,8 +1861,17 @@
return 0;
}
-EXPORT_SYMBOL(mpq_dmx_process_video_packet);
+int mpq_dmx_process_video_packet(
+ struct dvb_demux_feed *feed,
+ const u8 *buf)
+{
+ if (mpq_dmx_info.decoder_framing)
+ return mpq_dmx_process_video_packet_no_framing(feed, buf);
+ else
+ return mpq_dmx_process_video_packet_framing(feed, buf);
+}
+EXPORT_SYMBOL(mpq_dmx_process_video_packet);
int mpq_dmx_process_pcr_packet(
struct dvb_demux_feed *feed,
@@ -1217,9 +1927,9 @@
(((u64)adaptation_field->program_clock_reference_ext_1) << 8) +
adaptation_field->program_clock_reference_ext_2;
- stc = buf[189] << 16;
- stc += buf[190] << 8;
- stc += buf[191];
+ stc = buf[190] << 16;
+ stc += buf[189] << 8;
+ stc += buf[188];
stc *= 256; /* convert from 105.47 KHZ to 27MHz */
output[0] = adaptation_field->discontinuity_indicator;
diff --git a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.h b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.h
index d90bd89..a2d102b 100644
--- a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.h
+++ b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.h
@@ -32,7 +32,9 @@
/**
* TSIF alias name length
*/
-#define TSIF_NAME_LENGTH 10
+#define TSIF_NAME_LENGTH 10
+
+#define MPQ_MAX_FOUND_PATTERNS 5
/**
* struct mpq_demux - mpq demux information
@@ -252,6 +254,17 @@
} __packed;
/*
+ * mpq_framing_prefix_size_masks - possible prefix sizes.
+ *
+ * @size_mask: a bit mask (per pattern) of possible prefix sizes to use
+ * when searching for a pattern that started in the last buffer.
+ * Updated in mpq_dmx_framing_pattern_search for use in the next lookup
+ */
+struct mpq_framing_prefix_size_masks {
+ u32 size_mask[MPQ_MAX_FOUND_PATTERNS];
+};
+
+/*
* mpq_video_feed_info - private data used for video feed.
*
* @plugin_data: Underlying plugin's own private data.
@@ -270,6 +283,30 @@
* @payload_buff_handle: ION handle for the allocated payload buffer
* @stream_interface: The ID of the video stream interface registered
* with this stream buffer.
+ * @patterns: pointer to the framing patterns to look for.
+ * @patterns_num: number of framing patterns.
+ * @last_framing_match_address: Used for saving the raw data address of
+ * the previous pattern match found in this video feed.
+ * @last_framing_match_type: Used for saving the type of
+ * the previous pattern match found in this video feed.
+ * @found_sequence_header_pattern: Flag used to note that an MPEG-2
+ * Sequence Header, H.264 SPS or VC-1 Sequence Header pattern
+ * (whichever is relevant according to the video standard) had already
+ * been found.
+ * @prefix_size: a bit mask representing the size(s) of possible prefixes
+ * to the pattern, already found in the previous buffer. If bit 0 is set,
+ * a prefix of size 1 was found. If bit 1 is set, a prefix of size 2 was
+ * found, etc. This supports a prefix size of up to 32, which is more
+ * than we need. The search function updates prefix_size as needed
+ * for the next buffer search.
+ * @first_pattern_offset: used to save the offset of the first pattern written
+ * to the stream buffer.
+ * @first_prefix_size: used to save the prefix size used to find the first
+ * pattern written to the stream buffer.
+ * @write_pts_dts: Flag used to decide if to write PTS/DTS information
+ * (if it is available in the PES header) in the meta-data passed
+ * to the video decoder. PTS/DTS information is written in the first
+ * packet after it is available.
*/
struct mpq_video_feed_info {
void *plugin_data;
@@ -281,6 +318,15 @@
int fullness_wait_cancel;
struct ion_handle *payload_buff_handle;
enum mpq_adapter_stream_if stream_interface;
+ const struct mpq_framing_pattern_lookup_params *patterns;
+ int patterns_num;
+ u32 last_framing_match_address;
+ enum dmx_framing_pattern_type last_framing_match_type;
+ int found_sequence_header_pattern;
+ struct mpq_framing_prefix_size_masks prefix_size;
+ u32 first_pattern_offset;
+ u32 first_prefix_size;
+ int write_pts_dts;
};
/**
diff --git a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tsif.c b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tsif.c
index 5894a65..c79d5bb 100644
--- a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tsif.c
+++ b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tsif.c
@@ -598,7 +598,8 @@
mpq_demux->dmxdev.capabilities =
DMXDEV_CAP_DUPLEX |
DMXDEV_CAP_PULL_MODE |
- DMXDEV_CAP_PCR_EXTRACTION;
+ DMXDEV_CAP_PCR_EXTRACTION |
+ DMXDEV_CAP_INDEXING;
mpq_demux->dmxdev.demux->set_source = mpq_dmx_set_source;
diff --git a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v1.c b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v1.c
index 406ae52..2df5acc 100644
--- a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v1.c
+++ b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v1.c
@@ -705,7 +705,8 @@
mpq_demux->dmxdev.capabilities =
DMXDEV_CAP_DUPLEX |
DMXDEV_CAP_PULL_MODE |
- DMXDEV_CAP_PCR_EXTRACTION;
+ DMXDEV_CAP_PCR_EXTRACTION |
+ DMXDEV_CAP_INDEXING;
mpq_demux->dmxdev.demux->set_source = mpq_dmx_set_source;
diff --git a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v2.c b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v2.c
index d3c2c50..d0f3e7a 100644
--- a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v2.c
+++ b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v2.c
@@ -89,7 +89,8 @@
mpq_demux->dmxdev.capabilities =
DMXDEV_CAP_DUPLEX |
DMXDEV_CAP_PULL_MODE |
- DMXDEV_CAP_PCR_EXTRACTION;
+ DMXDEV_CAP_PCR_EXTRACTION |
+ DMXDEV_CAP_INDEXING;
mpq_demux->dmxdev.demux->set_source = mpq_dmx_set_source;
diff --git a/drivers/media/dvb/mpq/include/mpq_adapter.h b/drivers/media/dvb/mpq/include/mpq_adapter.h
index c720f91..c9b2441 100644
--- a/drivers/media/dvb/mpq/include/mpq_adapter.h
+++ b/drivers/media/dvb/mpq/include/mpq_adapter.h
@@ -37,15 +37,38 @@
};
-/** The meta-data used for video interface */
-struct mpq_adapter_video_meta_data {
- /**
- * Indication whether this packet is just a padding packet.
- * In this case packet should be just disposed along
- * with the padding in the raw-data buffer.
- */
- int is_padding;
+enum dmx_framing_pattern_type {
+ /* MPEG-2 */
+ DMX_FRM_MPEG2_SEQUENCE_HEADER,
+ DMX_FRM_MPEG2_GOP_HEADER,
+ DMX_FRM_MPEG2_I_PIC,
+ DMX_FRM_MPEG2_P_PIC,
+ DMX_FRM_MPEG2_B_PIC,
+ /* H.264 */
+ DMX_FRM_H264_SPS,
+ DMX_FRM_H264_PPS,
+ /* H.264 First Coded slice of an IDR Picture */
+ DMX_FRM_H264_IDR_PIC,
+ /* H.264 First Coded slice of a non-IDR Picture */
+ DMX_FRM_H264_NON_IDR_PIC,
+ /* VC-1 Sequence Header*/
+ DMX_FRM_VC1_SEQUENCE_HEADER,
+ /* VC-1 Entry Point Header (Advanced Profile only) */
+ DMX_FRM_VC1_ENTRY_POINT_HEADER,
+ /* VC-1 Frame Start Code */
+ DMX_FRM_VC1_FRAME_START_CODE,
+ /* Unknown or invalid framing information */
+ DMX_FRM_UNKNOWN
+};
+enum dmx_packet_type {
+ DMX_PADDING_PACKET,
+ DMX_PES_PACKET,
+ DMX_FRAMING_INFO_PACKET,
+ DMX_EOS_PACKET
+};
+
+struct dmx_pts_dts_info {
/** Indication whether PTS exist */
int pts_exist;
@@ -57,6 +80,30 @@
/** DTS value associated with the PES data if any */
u64 dts;
+};
+
+struct dmx_framing_packet_info {
+ /** framing pattern type */
+ enum dmx_framing_pattern_type pattern_type;
+ /** PTS/DTS information */
+ struct dmx_pts_dts_info pts_dts_info;
+};
+
+struct dmx_pes_packet_info {
+ /** PTS/DTS information */
+ struct dmx_pts_dts_info pts_dts_info;
+};
+
+/** The meta-data used for video interface */
+struct mpq_adapter_video_meta_data {
+ /** meta-data packet type */
+ enum dmx_packet_type packet_type;
+
+ /** packet-type specific information */
+ union {
+ struct dmx_framing_packet_info framing;
+ struct dmx_pes_packet_info pes;
+ } info;
} __packed;
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index 5508c3d..c07bdc4 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -24,6 +24,17 @@
LIRC daemon handles protocol decoding for IR reception and
encoding for IR transmitting (aka "blasting").
+config USER_RC_INPUT
+ tristate "User Space Input device wrapper for Remote Control"
+ depends on RC_CORE
+
+ ---help---
+ Say Y if you want to report remote control input events
+ from userspace.
+
+ To compile this driver as a module, choose M here: the module will
+ be called user-rc-input.
+
source "drivers/media/rc/keymaps/Kconfig"
config IR_NEC_DECODER
diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile
index 523fcd0..b9c1e21 100644
--- a/drivers/media/rc/Makefile
+++ b/drivers/media/rc/Makefile
@@ -11,6 +11,7 @@
obj-$(CONFIG_IR_SONY_DECODER) += ir-sony-decoder.o
obj-$(CONFIG_IR_RC5_SZ_DECODER) += ir-rc5-sz-decoder.o
obj-$(CONFIG_IR_LIRC_CODEC) += ir-lirc-codec.o
+obj-$(CONFIG_USER_RC_INPUT) += user-rc-input.o
# stand-alone IR receivers/transmitters
obj-$(CONFIG_IR_IMON) += imon.o
diff --git a/drivers/media/rc/user-rc-input.c b/drivers/media/rc/user-rc-input.c
new file mode 100644
index 0000000..f1a9334
--- /dev/null
+++ b/drivers/media/rc/user-rc-input.c
@@ -0,0 +1,251 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/ioctl.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/cdev.h>
+#include <linux/slab.h>
+
+#include <media/rc-core.h>
+#include <media/user-rc-input.h>
+
+#define MAX_RC_DEVICES 1
+#define USER_RC_INPUT_DEV_NAME "user-rc-input"
+#define USER_RC_INPUT_DRV_NAME "rc-user-input"
+
+struct user_rc_input_dev {
+ struct cdev rc_input_cdev;
+ struct class *rc_input_class;
+ struct device *rc_input_dev;
+ struct rc_dev *rcdev;
+ dev_t rc_input_base_dev;
+ struct device *dev;
+ int in_use;
+};
+
+static int user_rc_input_open(struct inode *inode, struct file *file)
+{
+ struct cdev *input_cdev = inode->i_cdev;
+ struct user_rc_input_dev *input_dev =
+ container_of(input_cdev, struct user_rc_input_dev, rc_input_cdev);
+
+ if (input_dev->in_use) {
+ dev_err(input_dev->dev,
+ "Device is already open..only one instance is allowed\n");
+ return -EBUSY;
+ }
+ input_dev->in_use++;
+ file->private_data = input_dev;
+
+ return 0;
+}
+
+static int user_rc_input_release(struct inode *inode, struct file *file)
+{
+ struct user_rc_input_dev *input_dev = file->private_data;
+
+ input_dev->in_use--;
+
+ return 0;
+}
+
+static ssize_t user_rc_input_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ int ret;
+ struct user_rc_input_dev *input_dev = file->private_data;
+ __u8 *buf;
+
+ buf = kmalloc(count * sizeof(__u8), GFP_KERNEL);
+ if (!buf) {
+ dev_err(input_dev->dev,
+ "kmalloc failed...Insufficient memory\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (copy_from_user(buf, buffer, count)) {
+ dev_err(input_dev->dev, "Copy from user failed\n");
+ ret = -EFAULT;
+ goto out_free;
+ }
+
+ switch (buf[0]) {
+ case USER_CONTROL_PRESSED:
+ dev_dbg(input_dev->dev, "user controlled"
+ " pressed 0x%x\n", buf[1]);
+ rc_keydown(input_dev->rcdev, buf[1], 0);
+ break;
+ case USER_CONTROL_REPEATED:
+ dev_dbg(input_dev->dev, "user controlled"
+ " repeated 0x%x\n", buf[1]);
+ rc_repeat(input_dev->rcdev);
+ break;
+ case USER_CONTROL_RELEASED:
+ dev_dbg(input_dev->dev, "user controlled"
+ " released 0x%x\n", buf[1]);
+ rc_keyup(input_dev->rcdev);
+ break;
+ }
+
+out_free:
+ kfree(buf);
+out:
+ return ret;
+}
+
+const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ .open = user_rc_input_open,
+ .write = user_rc_input_write,
+ .release = user_rc_input_release,
+};
+
+static int __devinit user_rc_input_probe(struct platform_device *pdev)
+{
+ struct user_rc_input_dev *user_rc_dev;
+ struct rc_dev *rcdev;
+ int retval;
+
+ user_rc_dev = kzalloc(sizeof(struct user_rc_input_dev), GFP_KERNEL);
+ if (!user_rc_dev)
+ return -ENOMEM;
+
+ user_rc_dev->rc_input_class = class_create(THIS_MODULE,
+ "user-rc-input-loopback");
+
+ if (IS_ERR(user_rc_dev->rc_input_class)) {
+ retval = PTR_ERR(user_rc_dev->rc_input_class);
+ goto err;
+ }
+
+ retval = alloc_chrdev_region(&user_rc_dev->rc_input_base_dev, 0,
+ MAX_RC_DEVICES, USER_RC_INPUT_DEV_NAME);
+
+ if (retval) {
+ dev_err(&pdev->dev,
+ "alloc_chrdev_region failed\n");
+ goto alloc_chrdev_err;
+ }
+
+ dev_info(&pdev->dev, "User space report key event input "
+ "loopback driver registered, "
+ "major %d\n", MAJOR(user_rc_dev->rc_input_base_dev));
+
+ cdev_init(&user_rc_dev->rc_input_cdev, &fops);
+ retval = cdev_add(&user_rc_dev->rc_input_cdev,
+ user_rc_dev->rc_input_base_dev,
+ MAX_RC_DEVICES);
+ if (retval) {
+ dev_err(&pdev->dev, "cdev_add failed\n");
+ goto cdev_add_err;
+ }
+ user_rc_dev->rc_input_dev =
+ device_create(user_rc_dev->rc_input_class,
+ NULL,
+ MKDEV(MAJOR(user_rc_dev->rc_input_base_dev),
+ 0), NULL, "user-rc-input-dev%d", 0);
+
+ if (IS_ERR(user_rc_dev->rc_input_dev)) {
+ retval = PTR_ERR(user_rc_dev->rc_input_dev);
+ dev_err(&pdev->dev, "device_create failed\n");
+ goto device_create_err;
+ }
+
+ rcdev = rc_allocate_device();
+ if (!rcdev) {
+ dev_err(&pdev->dev, "failed to allocate rc device");
+ retval = -ENOMEM;
+ goto err_allocate_device;
+ }
+
+ rcdev->driver_type = RC_DRIVER_SCANCODE;
+ rcdev->allowed_protos = RC_TYPE_OTHER;
+ rcdev->input_name = USER_RC_INPUT_DEV_NAME;
+ rcdev->input_id.bustype = BUS_HOST;
+ rcdev->driver_name = USER_RC_INPUT_DRV_NAME;
+ rcdev->map_name = RC_MAP_UE_RF4CE;
+
+ retval = rc_register_device(rcdev);
+ if (retval < 0) {
+ dev_err(&pdev->dev, "failed to register rc device\n");
+ goto rc_register_err;
+ }
+ user_rc_dev->rcdev = rcdev;
+ user_rc_dev->dev = &pdev->dev;
+ platform_set_drvdata(pdev, user_rc_dev);
+ user_rc_dev->in_use = 0;
+
+ return 0;
+
+rc_register_err:
+ rc_free_device(rcdev);
+err_allocate_device:
+ device_destroy(user_rc_dev->rc_input_class,
+ MKDEV(MAJOR(user_rc_dev->rc_input_base_dev), 0));
+cdev_add_err:
+ unregister_chrdev_region(user_rc_dev->rc_input_base_dev,
+ MAX_RC_DEVICES);
+device_create_err:
+ cdev_del(&user_rc_dev->rc_input_cdev);
+alloc_chrdev_err:
+ class_destroy(user_rc_dev->rc_input_class);
+err:
+ kfree(user_rc_dev);
+ return retval;
+}
+
+static int __devexit user_rc_input_remove(struct platform_device *pdev)
+{
+ struct user_rc_input_dev *user_rc_dev = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+ rc_free_device(user_rc_dev->rcdev);
+ device_destroy(user_rc_dev->rc_input_class,
+ MKDEV(MAJOR(user_rc_dev->rc_input_base_dev), 0));
+ unregister_chrdev_region(user_rc_dev->rc_input_base_dev,
+ MAX_RC_DEVICES);
+ cdev_del(&user_rc_dev->rc_input_cdev);
+ class_destroy(user_rc_dev->rc_input_class);
+ kfree(user_rc_dev);
+
+ return 0;
+}
+
+static struct platform_driver user_rc_input_driver = {
+ .probe = user_rc_input_probe,
+ .remove = __devexit_p(user_rc_input_remove),
+ .driver = {
+ .name = USER_RC_INPUT_DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init user_rc_input_init(void)
+{
+ return platform_driver_register(&user_rc_input_driver);
+}
+module_init(user_rc_input_init);
+
+static void __exit user_rc_input_exit(void)
+{
+ platform_driver_unregister(&user_rc_input_driver);
+}
+module_exit(user_rc_input_exit);
+
+MODULE_DESCRIPTION("User RC Input driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index 5aaef24..7d53e11 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -1090,3 +1090,5 @@
endif # V4L_MEM2MEM_DRIVERS
+
+source "drivers/media/video/msm_vidc/Kconfig"
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 65a2348..d2eabb9 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -158,6 +158,7 @@
obj-$(CONFIG_MSM_VCAP) += vcap_v4l2.o
obj-$(CONFIG_MSM_VCAP) += vcap_vc.o
+obj-$(CONFIG_MSM_VCAP) += vcap_vp.o
obj-$(CONFIG_VIDEO_AK881X) += ak881x.o
obj-$(CONFIG_VIDEO_OMAP2) += omap2cam.o
@@ -189,6 +190,7 @@
obj-$(CONFIG_MSM_CAMERA) += msm/
obj-$(CONFIG_ARCH_OMAP) += omap/
+obj-$(CONFIG_MSM_VIDC) += msm_vidc/
EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
diff --git a/drivers/media/video/msm/Kconfig b/drivers/media/video/msm/Kconfig
index fbc3a37..ab4a6f2 100644
--- a/drivers/media/video/msm/Kconfig
+++ b/drivers/media/video/msm/Kconfig
@@ -177,6 +177,15 @@
supports spotlight and flash light modes with
differrent current levels.
+config MSM_CAMERA_FLASH_TPS61310
+ bool "Qualcomm MSM camera tps61310 flash support"
+ depends on MSM_CAMERA
+ default n
+ ---help---
+ Enable support for LED flash for msm camera.
+ It is a Texas Instruments multiple LED Flash
+ for camera flash and video light applications.
+
config IMX072
bool "Sensor imx072 (Sony 5M)"
default n
@@ -271,3 +280,9 @@
overlay driver. This allows video rendering
apps to render overlaid video using Video4Linux2
APIs, by using /dev/videoX device
+
+config OV7692
+ bool "Sensor OV7692 (VGA YUV)"
+ depends on MSM_CAMERA
+ ---help---
+ Omni Vision VGA YUV Sensor
diff --git a/drivers/media/video/msm/Makefile b/drivers/media/video/msm/Makefile
index ebfed6c..b60f99f 100644
--- a/drivers/media/video/msm/Makefile
+++ b/drivers/media/video/msm/Makefile
@@ -13,6 +13,7 @@
EXTRA_CFLAGS += -Idrivers/media/video/msm/actuators
obj-$(CONFIG_MSM_CAMERA) += msm_isp.o msm.o msm_mem.o msm_mctl.o msm_mctl_buf.o msm_mctl_pp.o
obj-$(CONFIG_MSM_CAMERA) += io/ eeprom/ sensors/ actuators/ csi/
+ obj-$(CONFIG_MSM_CAMERA) += msm_gesture.o
else
obj-$(CONFIG_MSM_CAMERA) += msm_camera.o
endif
diff --git a/drivers/media/video/msm/actuators/msm_actuator.c b/drivers/media/video/msm/actuators/msm_actuator.c
index e4d8368..3a8ae9e 100644
--- a/drivers/media/video/msm/actuators/msm_actuator.c
+++ b/drivers/media/video/msm/actuators/msm_actuator.c
@@ -253,6 +253,8 @@
target_step_pos = dest_step_pos;
target_lens_pos =
a_ctrl->step_position_table[target_step_pos];
+ if (curr_lens_pos == target_lens_pos)
+ return rc;
rc = a_ctrl->func_tbl->
actuator_write_focus(
a_ctrl,
@@ -273,6 +275,8 @@
target_step_pos = step_boundary;
target_lens_pos =
a_ctrl->step_position_table[target_step_pos];
+ if (curr_lens_pos == target_lens_pos)
+ return rc;
rc = a_ctrl->func_tbl->
actuator_write_focus(
a_ctrl,
@@ -371,38 +375,13 @@
int32_t msm_actuator_power_down(struct msm_actuator_ctrl_t *a_ctrl)
{
int32_t rc = 0;
- int16_t step_pos = 0;
- int16_t i = 0;
- CDBG("%s called\n", __func__);
-
- if (a_ctrl->step_position_table) {
- if (a_ctrl->step_position_table[a_ctrl->curr_step_pos] >=
- a_ctrl->step_position_table[a_ctrl->pwd_step]) {
- step_pos = (a_ctrl->
- step_position_table[a_ctrl->curr_step_pos] -
- a_ctrl->step_position_table[a_ctrl->
- pwd_step]) / 10;
- for (i = 0; i < 10; i++) {
- rc = a_ctrl->func_tbl->
- actuator_i2c_write(a_ctrl,
- i * step_pos, 0);
- usleep(500);
- }
- rc = a_ctrl->func_tbl->actuator_i2c_write(a_ctrl,
- a_ctrl->step_position_table[a_ctrl->
- curr_step_pos],
- 0);
- }
- CDBG("%s after msm_actuator_set_default_focus\n", __func__);
- kfree(a_ctrl->step_position_table);
- }
-
if (a_ctrl->vcm_enable) {
rc = gpio_direction_output(a_ctrl->vcm_pwd, 0);
if (!rc)
gpio_free(a_ctrl->vcm_pwd);
}
+ kfree(a_ctrl->step_position_table);
a_ctrl->step_position_table = NULL;
return rc;
}
diff --git a/drivers/media/video/msm/csi/msm_csic.c b/drivers/media/video/msm/csi/msm_csic.c
index 6a5a647..e8be393 100644
--- a/drivers/media/video/msm/csi/msm_csic.c
+++ b/drivers/media/video/msm/csi/msm_csic.c
@@ -289,6 +289,8 @@
return rc;
}
}
+ if (csic_dev->hw_version == CSIC_7X)
+ msm_camio_vfe_blk_reset_3();
#if DBG_CSIC
enable_irq(csic_dev->irq->start);
@@ -433,12 +435,28 @@
goto csic_no_resource;
}
disable_irq(new_csic_dev->irq->start);
- iounmap(new_csic_dev->base);
- new_csic_dev->base = NULL;
new_csic_dev->pdev = pdev;
+
+ rc = msm_cam_clk_enable(&new_csic_dev->pdev->dev, &csic_7x_clk_info[2],
+ new_csic_dev->csic_clk, 1, 1);
+ new_csic_dev->base = ioremap(new_csic_dev->mem->start,
+ resource_size(new_csic_dev->mem));
+ if (!new_csic_dev->base) {
+ rc = -ENOMEM;
+ return rc;
+ }
+
+ msm_camera_io_w(MIPI_PWR_CNTL_DIS, new_csic_dev->base + MIPI_PWR_CNTL);
+
+ rc = msm_cam_clk_enable(&new_csic_dev->pdev->dev, &csic_7x_clk_info[2],
+ new_csic_dev->csic_clk, 1, 0);
+
+ iounmap(new_csic_dev->base);
+ new_csic_dev->base = NULL;
msm_cam_register_subdev_node(
&new_csic_dev->subdev, CSIC_DEV, pdev->id);
+
return 0;
csic_no_resource:
diff --git a/drivers/media/video/msm/flash.c b/drivers/media/video/msm/flash.c
index a86e5c4..ba86d8c 100644
--- a/drivers/media/video/msm/flash.c
+++ b/drivers/media/video/msm/flash.c
@@ -1,5 +1,5 @@
-/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -30,54 +30,54 @@
MSM_CAM_FLASH_ON,
};
-#if defined CONFIG_MSM_CAMERA_FLASH_SC628A
static struct i2c_client *sc628a_client;
-static const struct i2c_device_id sc628a_i2c_id[] = {
- {"sc628a", 0},
- { }
-};
-
-static int32_t sc628a_i2c_txdata(unsigned short saddr,
+static int32_t flash_i2c_txdata(struct i2c_client *client,
unsigned char *txdata, int length)
{
struct i2c_msg msg[] = {
{
- .addr = saddr,
+ .addr = client->addr >> 1,
.flags = 0,
.len = length,
.buf = txdata,
},
};
- if (i2c_transfer(sc628a_client->adapter, msg, 1) < 0) {
- CDBG("sc628a_i2c_txdata faild 0x%x\n", saddr);
+ if (i2c_transfer(client->adapter, msg, 1) < 0) {
+ CDBG("flash_i2c_txdata faild 0x%x\n", client->addr >> 1);
return -EIO;
}
return 0;
}
-static int32_t sc628a_i2c_write_b_flash(uint8_t waddr, uint8_t bdata)
+static int32_t flash_i2c_write_b(struct i2c_client *client,
+ uint8_t baddr, uint8_t bdata)
{
int32_t rc = -EFAULT;
unsigned char buf[2];
- if (!sc628a_client)
+ if (!client)
return -ENOTSUPP;
memset(buf, 0, sizeof(buf));
- buf[0] = waddr;
+ buf[0] = baddr;
buf[1] = bdata;
- rc = sc628a_i2c_txdata(sc628a_client->addr>>1, buf, 2);
+ rc = flash_i2c_txdata(client, buf, 2);
if (rc < 0) {
CDBG("i2c_write_b failed, addr = 0x%x, val = 0x%x!\n",
- waddr, bdata);
+ baddr, bdata);
}
usleep_range(4000, 5000);
return rc;
}
+static const struct i2c_device_id sc628a_i2c_id[] = {
+ {"sc628a", 0},
+ { }
+};
+
static int sc628a_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -91,7 +91,7 @@
sc628a_client = client;
- CDBG("sc628a_probe successed! rc = %d\n", rc);
+ CDBG("sc628a_probe success rc = %d\n", rc);
return 0;
probe_failure:
@@ -107,7 +107,49 @@
.name = "sc628a",
},
};
-#endif
+
+static struct i2c_client *tps61310_client;
+
+static const struct i2c_device_id tps61310_i2c_id[] = {
+ {"tps61310", 0},
+ { }
+};
+
+static int tps61310_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int rc = 0;
+ CDBG("%s enter\n", __func__);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ pr_err("i2c_check_functionality failed\n");
+ goto probe_failure;
+ }
+
+ tps61310_client = client;
+
+ rc = flash_i2c_write_b(tps61310_client, 0x01, 0x00);
+ if (rc < 0) {
+ tps61310_client = NULL;
+ goto probe_failure;
+ }
+
+ CDBG("%s success! rc = %d\n", __func__, rc);
+ return 0;
+
+probe_failure:
+ pr_err("%s failed! rc = %d\n", __func__, rc);
+ return rc;
+}
+
+static struct i2c_driver tps61310_i2c_driver = {
+ .id_table = tps61310_i2c_id,
+ .probe = tps61310_i2c_probe,
+ .remove = __exit_p(tps61310_i2c_remove),
+ .driver = {
+ .name = "tps61310",
+ },
+};
static int config_flash_gpio_table(enum msm_cam_flash_stat stat,
struct msm_camera_sensor_strobe_flash_data *sfdata)
@@ -278,18 +320,34 @@
{
int rc = 0;
-#if defined CONFIG_MSM_CAMERA_FLASH_SC628A
switch (led_state) {
case MSM_CAMERA_LED_INIT:
- if (!sc628a_client) {
- rc = i2c_add_driver(&sc628a_i2c_driver);
- if (rc < 0 || sc628a_client == NULL) {
- rc = -ENOTSUPP;
- CDBG("I2C add driver failed");
- return rc;
+ if (external->flash_id == MAM_CAMERA_EXT_LED_FLASH_SC628A) {
+ if (!sc628a_client) {
+ rc = i2c_add_driver(&sc628a_i2c_driver);
+ if (rc < 0 || sc628a_client == NULL) {
+ pr_err("sc628a_i2c_driver add failed\n");
+ rc = -ENOTSUPP;
+ return rc;
+ }
}
+ } else if (external->flash_id ==
+ MAM_CAMERA_EXT_LED_FLASH_TPS61310) {
+ if (!tps61310_client) {
+ rc = i2c_add_driver(&tps61310_i2c_driver);
+ if (rc < 0 || tps61310_client == NULL) {
+ pr_err("tps61310_i2c_driver add failed\n");
+ rc = -ENOTSUPP;
+ return rc;
+ }
+ }
+ } else {
+ pr_err("Flash id not supported\n");
+ rc = -ENOTSUPP;
+ return rc;
}
+
#if defined(CONFIG_GPIO_SX150X) || defined(CONFIG_GPIO_SX150X_MODULE)
if (external->expander_info && !sx150x_client) {
struct i2c_adapter *adapter =
@@ -298,40 +356,70 @@
sx150x_client = i2c_new_device(adapter,
external->expander_info->board_info);
if (!sx150x_client || !adapter) {
+ pr_err("sx150x_client is not available\n");
rc = -ENOTSUPP;
- i2c_del_driver(&sc628a_i2c_driver);
- sc628a_client = NULL;
+ if (sc628a_client) {
+ i2c_del_driver(&sc628a_i2c_driver);
+ sc628a_client = NULL;
+ }
+ if (tps61310_client) {
+ i2c_del_driver(&tps61310_i2c_driver);
+ tps61310_client = NULL;
+ }
return rc;
}
+ i2c_put_adapter(adapter);
}
#endif
- rc = gpio_request(external->led_en, "sc628a");
+ if (sc628a_client)
+ rc = gpio_request(external->led_en, "sc628a");
+ if (tps61310_client)
+ rc = gpio_request(external->led_en, "tps61310");
+
if (!rc) {
- gpio_direction_output(external->led_en, 1);
+ gpio_direction_output(external->led_en, 0);
} else {
- goto err1;
+ goto error;
}
- rc = gpio_request(external->led_flash_en, "sc628a");
+
+ if (sc628a_client)
+ rc = gpio_request(external->led_flash_en, "sc628a");
+ if (tps61310_client)
+ rc = gpio_request(external->led_flash_en, "tps61310");
+
if (!rc) {
- gpio_direction_output(external->led_flash_en, 1);
+ gpio_direction_output(external->led_flash_en, 0);
break;
}
gpio_set_value_cansleep(external->led_en, 0);
gpio_free(external->led_en);
-
-err1:
- i2c_del_driver(&sc628a_i2c_driver);
- sc628a_client = NULL;
-
+error:
+ pr_err("%s gpio request failed\n", __func__);
+ if (sc628a_client) {
+ i2c_del_driver(&sc628a_i2c_driver);
+ sc628a_client = NULL;
+ }
+ if (tps61310_client) {
+ i2c_del_driver(&tps61310_i2c_driver);
+ tps61310_client = NULL;
+ }
break;
case MSM_CAMERA_LED_RELEASE:
- if (sc628a_client) {
+ if (sc628a_client || tps61310_client) {
gpio_set_value_cansleep(external->led_en, 0);
gpio_free(external->led_en);
gpio_set_value_cansleep(external->led_flash_en, 0);
gpio_free(external->led_flash_en);
+ if (sc628a_client) {
+ i2c_del_driver(&sc628a_i2c_driver);
+ sc628a_client = NULL;
+ }
+ if (tps61310_client) {
+ i2c_del_driver(&tps61310_i2c_driver);
+ tps61310_client = NULL;
+ }
}
#if defined(CONFIG_GPIO_SX150X) || defined(CONFIG_GPIO_SX150X_MODULE)
if (external->expander_info && sx150x_client) {
@@ -342,37 +430,38 @@
break;
case MSM_CAMERA_LED_OFF:
- rc = sc628a_i2c_write_b_flash(0x02, 0x0);
- if (sc628a_client) {
- gpio_set_value_cansleep(external->led_en, 0);
- gpio_set_value_cansleep(external->led_flash_en, 0);
- }
+ if (sc628a_client)
+ rc = flash_i2c_write_b(sc628a_client, 0x02, 0x00);
+ if (tps61310_client)
+ rc = flash_i2c_write_b(tps61310_client, 0x01, 0x00);
+ gpio_set_value_cansleep(external->led_en, 0);
+ gpio_set_value_cansleep(external->led_flash_en, 0);
break;
case MSM_CAMERA_LED_LOW:
- if (sc628a_client) {
- gpio_set_value_cansleep(external->led_en, 1);
- gpio_set_value_cansleep(external->led_flash_en, 1);
- usleep_range(2000, 3000);
- }
- rc = sc628a_i2c_write_b_flash(0x02, 0x06);
+ gpio_set_value_cansleep(external->led_en, 1);
+ gpio_set_value_cansleep(external->led_flash_en, 1);
+ usleep_range(2000, 3000);
+ if (sc628a_client)
+ rc = flash_i2c_write_b(sc628a_client, 0x02, 0x06);
+ if (tps61310_client)
+ rc = flash_i2c_write_b(tps61310_client, 0x01, 0x86);
break;
case MSM_CAMERA_LED_HIGH:
- if (sc628a_client) {
- gpio_set_value_cansleep(external->led_en, 1);
- gpio_set_value_cansleep(external->led_flash_en, 1);
- usleep_range(2000, 3000);
- }
- rc = sc628a_i2c_write_b_flash(0x02, 0x49);
+ gpio_set_value_cansleep(external->led_en, 1);
+ gpio_set_value_cansleep(external->led_flash_en, 1);
+ usleep_range(2000, 3000);
+ if (sc628a_client)
+ rc = flash_i2c_write_b(sc628a_client, 0x02, 0x49);
+ if (tps61310_client)
+ rc = flash_i2c_write_b(tps61310_client, 0x01, 0x8B);
break;
default:
rc = -EFAULT;
break;
}
-#endif
-
return rc;
}
diff --git a/drivers/media/video/msm/io/msm_camera_io_util.c b/drivers/media/video/msm/io/msm_camera_io_util.c
index af60426..4049266 100644
--- a/drivers/media/video/msm/io/msm_camera_io_util.c
+++ b/drivers/media/video/msm/io/msm_camera_io_util.c
@@ -370,7 +370,7 @@
usleep_range(gpio_conf->cam_gpio_set_tbl[i].delay,
gpio_conf->cam_gpio_set_tbl[i].delay + 1000);
}
- } else if (!gpio_conf->gpio_no_mux) {
+ } else {
for (i = gpio_conf->cam_gpio_set_tbl_size - 1; i >= 0; i--) {
if (gpio_conf->cam_gpio_set_tbl[i].flags)
gpio_set_value_cansleep(
diff --git a/drivers/media/video/msm/io/msm_io_7x27a_v4l2.c b/drivers/media/video/msm/io/msm_io_7x27a_v4l2.c
index 45761d5..946b985 100644
--- a/drivers/media/video/msm/io/msm_io_7x27a_v4l2.c
+++ b/drivers/media/video/msm/io/msm_io_7x27a_v4l2.c
@@ -85,27 +85,18 @@
clk_set_rate(clk, rate);
}
-void msm_camio_vfe_blk_reset_2(int vfe_apps_reset)
+void msm_camio_vfe_blk_reset_2(void)
{
uint32_t val;
- if (apps_reset && !vfe_apps_reset)
- return;
-
/* do apps reset */
val = readl_relaxed(appbase + 0x00000210);
- if (apps_reset)
- val |= 0x10A0001;
- else
- val |= 0x1;
+ val |= 0x1;
writel_relaxed(val, appbase + 0x00000210);
usleep_range(10000, 11000);
val = readl_relaxed(appbase + 0x00000210);
- if (apps_reset)
- val &= ~(0x10A0001);
- else
- val &= ~0x1;
+ val &= ~0x1;
writel_relaxed(val, appbase + 0x00000210);
usleep_range(10000, 11000);
@@ -122,6 +113,26 @@
usleep_range(10000, 11000);
}
+void msm_camio_vfe_blk_reset_3(void)
+{
+ uint32_t val;
+
+ if (!apps_reset)
+ return;
+
+ /* do apps reset */
+ val = readl_relaxed(appbase + 0x00000210);
+ val |= 0x10A0000;
+ writel_relaxed(val, appbase + 0x00000210);
+ usleep_range(10000, 11000);
+
+ val = readl_relaxed(appbase + 0x00000210);
+ val &= ~(0x10A0000);
+ writel_relaxed(val, appbase + 0x00000210);
+ usleep_range(10000, 11000);
+ mb();
+}
+
void msm_camio_set_perf_lvl(enum msm_bus_perf_setting perf_setting)
{
switch (perf_setting) {
diff --git a/drivers/media/video/msm/io/msm_io_vfe31_v4l2.c b/drivers/media/video/msm/io/msm_io_vfe31_v4l2.c
index a1270ea..9dc097b 100644
--- a/drivers/media/video/msm/io/msm_io_vfe31_v4l2.c
+++ b/drivers/media/video/msm/io/msm_io_vfe31_v4l2.c
@@ -156,6 +156,11 @@
return;
}
+void msm_camio_vfe_blk_reset_3(void)
+{
+ return;
+}
+
static void msm_camio_axi_cfg(enum msm_bus_perf_setting perf_setting)
{
switch (perf_setting) {
diff --git a/drivers/media/video/msm/msm.c b/drivers/media/video/msm/msm.c
index 8720d70..034cbc5 100644
--- a/drivers/media/video/msm/msm.c
+++ b/drivers/media/video/msm/msm.c
@@ -45,6 +45,10 @@
module_param(msm_camera_v4l2_nr, uint, 0644);
MODULE_PARM_DESC(msm_camera_v4l2_nr, "videoX start number, -1 is autodetect");
+static long msm_server_send_v4l2_evt(void *evt);
+static void msm_cam_server_subdev_notify(struct v4l2_subdev *sd,
+ unsigned int notification, void *arg);
+
static void msm_queue_init(struct msm_device_queue *queue, const char *name)
{
D("%s\n", __func__);
@@ -178,10 +182,16 @@
return -EINVAL;
}
+ D("%s qid %d evtid %d %d\n", __func__, command->queue_idx,
+ command->evt_id,
+ g_server_dev.server_queue[command->queue_idx].evt_id);
g_server_dev.server_queue[command->queue_idx].ctrl = command;
if (command->evt_id !=
g_server_dev.server_queue[command->queue_idx].evt_id) {
- pr_err("Invalid event id from userspace\n");
+ pr_err("%s Invalid event id from userspace cmd id %d %d qid %d\n",
+ __func__, command->evt_id,
+ g_server_dev.server_queue[command->queue_idx].evt_id,
+ command->queue_idx);
return -EINVAL;
}
@@ -241,6 +251,8 @@
mutex_lock(&server_dev->server_queue_lock);
if (++server_dev->server_evt_id == 0)
server_dev->server_evt_id++;
+ D("%s qid %d evtid %d\n", __func__, out->queue_idx,
+ server_dev->server_evt_id);
server_dev->server_queue[out->queue_idx].evt_id =
server_dev->server_evt_id;
@@ -286,7 +298,8 @@
ctrlcmd = (struct msm_ctrl_cmd *)(rcmd->command);
value = out->value;
- if (ctrlcmd->length > 0)
+ if (ctrlcmd->length > 0 && value != NULL &&
+ ctrlcmd->length <= out->length)
memcpy(value, ctrlcmd->value, ctrlcmd->length);
memcpy(out, ctrlcmd, sizeof(struct msm_ctrl_cmd));
@@ -316,7 +329,7 @@
{
int rc = 0;
struct msm_ctrl_cmd ctrlcmd;
- D("%s\n", __func__);
+ D("%s qid %d\n", __func__, pcam->server_queue_idx);
ctrlcmd.type = MSM_V4L2_OPEN;
ctrlcmd.timeout_ms = 10000;
ctrlcmd.length = strnlen(g_server_dev.config_info.config_dev_name[0],
@@ -336,7 +349,7 @@
{
int rc = 0;
struct msm_ctrl_cmd ctrlcmd;
- D("%s\n", __func__);
+ D("%s qid %d\n", __func__, pcam->server_queue_idx);
ctrlcmd.type = MSM_V4L2_CLOSE;
ctrlcmd.timeout_ms = 10000;
ctrlcmd.length = strnlen(g_server_dev.config_info.config_dev_name[0],
@@ -1431,7 +1444,7 @@
sub->type++;
D("sub->type while = 0x%x\n", sub->type);
} while (sub->type !=
- V4L2_EVENT_PRIVATE_START + MSM_CAM_RESP_MAX);
+ V4L2_EVENT_PRIVATE_START + MSM_SVR_RESP_MAX);
} else {
D("sub->type not V4L2_EVENT_ALL = 0x%x\n", sub->type);
rc = v4l2_event_subscribe(fh, sub);
@@ -1554,6 +1567,150 @@
msm_mctl_free(pcam);
return rc;
}
+
+int msm_server_open_client(int *p_qidx)
+{
+ int rc = 0;
+ int server_q_idx = 0;
+ struct msm_cam_server_queue *queue = NULL;
+
+ mutex_lock(&g_server_dev.server_lock);
+ server_q_idx = msm_find_free_queue();
+ if (server_q_idx < 0) {
+ mutex_unlock(&g_server_dev.server_lock);
+ return server_q_idx;
+ }
+
+ *p_qidx = server_q_idx;
+ queue = &g_server_dev.server_queue[server_q_idx];
+ queue->ctrl = NULL;
+ queue->ctrl_data = kzalloc(sizeof(uint8_t) *
+ max_control_command_size, GFP_KERNEL);
+ msm_queue_init(&queue->ctrl_q, "control");
+ msm_queue_init(&queue->eventData_q, "eventdata");
+ queue->queue_active = 1;
+ mutex_unlock(&g_server_dev.server_lock);
+ return rc;
+}
+
+int msm_server_send_ctrl(struct msm_ctrl_cmd *out,
+ int ctrl_id)
+{
+ int rc = 0;
+ void *value;
+ struct msm_queue_cmd *rcmd;
+ struct msm_queue_cmd *event_qcmd;
+ struct msm_ctrl_cmd *ctrlcmd;
+ struct msm_cam_server_dev *server_dev = &g_server_dev;
+ struct msm_device_queue *queue =
+ &server_dev->server_queue[out->queue_idx].ctrl_q;
+
+ struct v4l2_event v4l2_evt;
+ struct msm_isp_event_ctrl *isp_event;
+ isp_event = kzalloc(sizeof(struct msm_isp_event_ctrl), GFP_KERNEL);
+ if (!isp_event) {
+ pr_err("%s Insufficient memory. return", __func__);
+ return -ENOMEM;
+ }
+ event_qcmd = kzalloc(sizeof(struct msm_queue_cmd), GFP_KERNEL);
+ if (!event_qcmd) {
+ pr_err("%s Insufficient memory. return", __func__);
+ kfree(isp_event);
+ return -ENOMEM;
+ }
+
+ D("%s\n", __func__);
+ mutex_lock(&server_dev->server_queue_lock);
+ if (++server_dev->server_evt_id == 0)
+ server_dev->server_evt_id++;
+
+ D("%s qid %d evtid %d\n", __func__, out->queue_idx,
+ server_dev->server_evt_id);
+ server_dev->server_queue[out->queue_idx].evt_id =
+ server_dev->server_evt_id;
+ v4l2_evt.type = V4L2_EVENT_PRIVATE_START + ctrl_id;
+ v4l2_evt.u.data[0] = out->queue_idx;
+ /* setup event object to transfer the command; */
+ isp_event->resptype = MSM_CAM_RESP_V4L2;
+ isp_event->isp_data.ctrl = *out;
+ isp_event->isp_data.ctrl.evt_id = server_dev->server_evt_id;
+
+ atomic_set(&event_qcmd->on_heap, 1);
+ event_qcmd->command = isp_event;
+
+ msm_enqueue(&server_dev->server_queue[out->queue_idx].eventData_q,
+ &event_qcmd->list_eventdata);
+
+ /* now send command to config thread in userspace,
+ * and wait for results */
+ v4l2_event_queue(server_dev->server_command_queue.pvdev,
+ &v4l2_evt);
+ D("%s v4l2_event_queue: type = 0x%x\n", __func__, v4l2_evt.type);
+ mutex_unlock(&server_dev->server_queue_lock);
+
+ /* wait for config return status */
+ D("Waiting for config status\n");
+ rc = wait_event_interruptible_timeout(queue->wait,
+ !list_empty_careful(&queue->list),
+ msecs_to_jiffies(out->timeout_ms));
+ D("Waiting is over for config status\n");
+ if (list_empty_careful(&queue->list)) {
+ if (!rc)
+ rc = -ETIMEDOUT;
+ if (rc < 0) {
+ kfree(isp_event);
+ pr_err("%s: wait_event error %d\n", __func__, rc);
+ return rc;
+ }
+ }
+
+ rcmd = msm_dequeue(queue, list_control);
+ BUG_ON(!rcmd);
+ D("%s Finished servicing ioctl\n", __func__);
+
+ ctrlcmd = (struct msm_ctrl_cmd *)(rcmd->command);
+ value = out->value;
+ if (ctrlcmd->length > 0)
+ memcpy(value, ctrlcmd->value, ctrlcmd->length);
+
+ memcpy(out, ctrlcmd, sizeof(struct msm_ctrl_cmd));
+ out->value = value;
+
+ kfree(ctrlcmd);
+ server_dev->server_queue[out->queue_idx].ctrl = NULL;
+
+ free_qcmd(rcmd);
+ kfree(isp_event);
+ D("%s: rc %d\n", __func__, rc);
+ /* rc is the time elapsed. */
+ if (rc >= 0) {
+ /* TODO: Refactor msm_ctrl_cmd::status field */
+ if (out->status == 0)
+ rc = -1;
+ else if (out->status == 1 || out->status == 4)
+ rc = 0;
+ else
+ rc = -EINVAL;
+ }
+ return rc;
+}
+
+int msm_server_close_client(int idx)
+{
+ int rc = 0;
+ struct msm_cam_server_queue *queue = NULL;
+ mutex_lock(&g_server_dev.server_lock);
+ queue = &g_server_dev.server_queue[idx];
+ queue->queue_active = 0;
+ kfree(queue->ctrl);
+ queue->ctrl = NULL;
+ kfree(queue->ctrl_data);
+ queue->ctrl_data = NULL;
+ msm_queue_drain(&queue->ctrl_q, list_control);
+ msm_drain_eventq(&queue->eventData_q);
+ mutex_unlock(&g_server_dev.server_lock);
+ return rc;
+}
/* v4l2_file_operations */
static int msm_open(struct file *f)
{
@@ -1609,7 +1766,10 @@
pcam_inst->my_index,
pcam->vnode_id, pcam->use_count);
pcam->use_count++;
+ D("%s use_count %d\n", __func__, pcam->use_count);
if (pcam->use_count == 1) {
+ struct msm_cam_server_queue *queue;
+ int ges_evt = MSM_V4L2_GES_CAM_OPEN;
pcam->server_queue_idx = server_q_idx;
queue = &g_server_dev.server_queue[server_q_idx];
queue->ctrl = NULL;
@@ -1619,6 +1779,10 @@
msm_queue_init(&queue->eventData_q, "eventdata");
queue->queue_active = 1;
+ pr_err("%s send gesture evt\n", __func__);
+ msm_cam_server_subdev_notify(g_server_dev.gesture_device,
+ NOTIFY_GESTURE_CAM_EVT, &ges_evt);
+
rc = msm_cam_server_open_session(&g_server_dev, pcam);
if (rc < 0) {
pr_err("%s: cam_server_open_session failed %d\n",
@@ -1712,6 +1876,74 @@
}
mutex_unlock(&pcam->vid_lock);
kfree(pcam_inst);
+ pr_err("%s: error end", __func__);
+ return rc;
+}
+
+int msm_cam_server_close_mctl_session(struct msm_cam_v4l2_device *pcam)
+{
+ int rc = 0;
+ struct msm_cam_media_controller *pmctl = NULL;
+
+ pmctl = msm_camera_get_mctl(pcam->mctl_handle);
+ if (!pmctl) {
+ D("%s: invalid handle\n", __func__);
+ return -ENODEV;
+ }
+
+ if (pmctl->mctl_release) {
+ rc = pmctl->mctl_release(pmctl);
+ if (rc < 0)
+ pr_err("mctl_release fails %d\n", rc);
+ }
+
+#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
+ kref_put(&pmctl->refcount, msm_release_ion_client);
+#endif
+
+ rc = msm_cam_server_close_session(&g_server_dev, pcam);
+ if (rc < 0)
+ pr_err("msm_cam_server_close_session fails %d\n", rc);
+
+ return rc;
+}
+
+int msm_cam_server_open_mctl_session(struct msm_cam_v4l2_device *pcam,
+ int *p_active)
+{
+ int rc = 0;
+ struct msm_cam_media_controller *pmctl = NULL;
+ D("%s: %p", __func__, g_server_dev.pcam_active);
+ *p_active = 0;
+ if (g_server_dev.pcam_active) {
+ D("%s: Active camera present return", __func__);
+ return 0;
+ }
+ rc = msm_cam_server_open_session(&g_server_dev, pcam);
+ if (rc < 0) {
+ pr_err("%s: cam_server_open_session failed %d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ pmctl = msm_camera_get_mctl(pcam->mctl_handle);
+ /* Should be set to sensor ops if any but right now its OK!! */
+ if (!pmctl->mctl_open) {
+ D("%s: media contoller is not inited\n",
+ __func__);
+ rc = -ENODEV;
+ return rc;
+ }
+
+ D("%s: call mctl_open\n", __func__);
+ rc = pmctl->mctl_open(pmctl, MSM_APPS_ID_V4L2);
+
+ if (rc < 0) {
+ pr_err("%s: HW open failed rc = 0x%x\n", __func__, rc);
+ return rc;
+ }
+ pmctl->pcam_ptr = pcam;
+ *p_active = 1;
return rc;
}
@@ -1838,6 +2070,7 @@
f->private_data = NULL;
if (pcam->use_count == 0) {
+ int ges_evt = MSM_V4L2_GES_CAM_CLOSE;
if (g_server_dev.use_count > 0) {
rc = msm_send_close_server(pcam);
if (rc < 0)
@@ -1865,6 +2098,9 @@
if (g_server_dev.use_count == 0)
mutex_unlock(&g_server_dev.server_lock);
+
+ msm_cam_server_subdev_notify(g_server_dev.gesture_device,
+ NOTIFY_GESTURE_CAM_EVT, &ges_evt);
}
mutex_unlock(&pcam->vid_lock);
return rc;
@@ -2079,6 +2315,11 @@
rc = 0;
break;
}
+
+ case MSM_CAM_IOCTL_SEND_EVENT:
+ rc = msm_server_send_v4l2_evt(arg);
+ break;
+
default:
pr_err("%s: Invalid IOCTL = %d", __func__, cmd);
break;
@@ -2118,6 +2359,7 @@
static int msm_close_server(struct file *fp)
{
+ struct v4l2_event_subscription sub;
D("%s\n", __func__);
mutex_lock(&g_server_dev.server_lock);
if (g_server_dev.use_count > 0)
@@ -2134,10 +2376,36 @@
v4l2_event_queue(
g_server_dev.pcam_active->pvdev, &v4l2_ev);
}
+ sub.type = V4L2_EVENT_ALL;
+ msm_server_v4l2_unsubscribe_event(
+ &g_server_dev.server_command_queue.eventHandle, &sub);
}
return 0;
}
+static long msm_server_send_v4l2_evt(void *evt)
+{
+ struct v4l2_event *v4l2_ev = (struct v4l2_event *)evt;
+ int rc = 0;
+
+ if (NULL == evt) {
+ pr_err("%s: evt is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ D("%s: evt type 0x%x\n", __func__, v4l2_ev->type);
+ if ((v4l2_ev->type >= MSM_GES_APP_EVT_MIN) &&
+ (v4l2_ev->type < MSM_GES_APP_EVT_MAX)) {
+ msm_cam_server_subdev_notify(g_server_dev.gesture_device,
+ NOTIFY_GESTURE_EVT, v4l2_ev);
+ } else {
+ pr_err("%s: Invalid evt %d\n", __func__, v4l2_ev->type);
+ rc = -EINVAL;
+ }
+ D("%s: end\n", __func__);
+
+ return rc;
+}
static long msm_v4l2_evt_notify(struct msm_cam_media_controller *mctl,
unsigned int cmd, unsigned long evt)
@@ -2384,16 +2652,20 @@
spin_lock_init(&config_cam->p_mctl->stats_info.pmem_stats_spinlock);
config_cam->p_mctl->config_device = config_cam;
+#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
kref_get(&config_cam->p_mctl->refcount);
+#endif
fp->private_data = config_cam;
return rc;
}
static int msm_close_config(struct inode *node, struct file *f)
{
+#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
struct msm_cam_config_dev *config_cam = f->private_data;
D("%s Decrementing ref count of config node ", __func__);
kref_put(&config_cam->p_mctl->refcount, msm_release_ion_client);
+#endif
return 0;
}
@@ -2626,6 +2898,14 @@
rc = v4l2_subdev_call(g_server_dev.csic_device[csid_core],
core, ioctl, VIDIOC_MSM_CSIC_CFG, arg);
break;
+ case NOTIFY_GESTURE_EVT:
+ rc = v4l2_subdev_call(g_server_dev.gesture_device,
+ core, ioctl, VIDIOC_MSM_GESTURE_EVT, arg);
+ break;
+ case NOTIFY_GESTURE_CAM_EVT:
+ rc = v4l2_subdev_call(g_server_dev.gesture_device,
+ core, ioctl, VIDIOC_MSM_GESTURE_CAM_EVT, arg);
+ break;
default:
break;
}
@@ -2665,6 +2945,8 @@
if (index >= MAX_NUM_AXI_DEV)
return -EINVAL;
g_server_dev.axi_device[index] = sd;
+ } else if (sdev_type == GESTURE_DEV) {
+ g_server_dev.gesture_device = sd;
}
err = v4l2_device_register_subdev(&g_server_dev.v4l2_dev, sd);
diff --git a/drivers/media/video/msm/msm.h b/drivers/media/video/msm/msm.h
index 04e224c..6798cbb 100644
--- a/drivers/media/video/msm/msm.h
+++ b/drivers/media/video/msm/msm.h
@@ -32,6 +32,7 @@
#include <mach/camera.h>
#include <media/msm_isp.h>
#include <linux/ion.h>
+#include <media/msm_gestures.h>
#define MSM_V4L2_DIMENSION_SIZE 96
#define MAX_DEV_NAME_LEN 50
@@ -69,6 +70,7 @@
SENSOR_DEV,
ACTUATOR_DEV,
EEPROM_DEV,
+ GESTURE_DEV,
};
/* msm queue management APIs*/
@@ -150,6 +152,8 @@
NOTIFY_VFE_BUF_FREE_EVT, /* arg = msm_camera_csic_params */
NOTIFY_VFE_IRQ,
NOTIFY_AXI_IRQ,
+ NOTIFY_GESTURE_EVT, /* arg = v4l2_event */
+ NOTIFY_GESTURE_CAM_EVT, /* arg = int */
NOTIFY_INVALID
};
@@ -330,6 +334,8 @@
struct msm_cam_v4l2_dev_inst *dev_inst[MSM_DEV_INST_MAX];
struct msm_cam_v4l2_dev_inst *dev_inst_map[MSM_MAX_IMG_MODE];
struct mutex dev_lock;
+ int active;
+ int use_count;
};
/* abstract camera device for each sensor successfully probed*/
@@ -384,7 +390,8 @@
struct msm_mem_map_info mem_map;
};
-#define MAX_NUM_ACTIVE_CAMERA 2
+/* 2 for camera, 1 for gesture */
+#define MAX_NUM_ACTIVE_CAMERA 3
struct msm_cam_server_queue {
uint32_t queue_active;
@@ -444,6 +451,7 @@
struct v4l2_subdev *vfe_device[MAX_NUM_VFE_DEV];
struct v4l2_subdev *axi_device[MAX_NUM_AXI_DEV];
struct v4l2_subdev *vpe_device[MAX_NUM_VPE_DEV];
+ struct v4l2_subdev *gesture_device;
};
/* camera server related functions */
@@ -566,6 +574,12 @@
uint32_t msm_camera_get_mctl_handle(void);
struct msm_cam_media_controller *msm_camera_get_mctl(uint32_t handle);
void msm_camera_free_mctl(uint32_t handle);
+int msm_server_open_client(int *p_qidx);
+int msm_server_send_ctrl(struct msm_ctrl_cmd *out, int ctrl_id);
+int msm_server_close_client(int idx);
+int msm_cam_server_open_mctl_session(struct msm_cam_v4l2_device *pcam,
+ int *p_active);
+int msm_cam_server_close_mctl_session(struct msm_cam_v4l2_device *pcam);
#endif /* __KERNEL__ */
#endif /* _MSM_H */
diff --git a/drivers/media/video/msm/msm_gesture.c b/drivers/media/video/msm/msm_gesture.c
new file mode 100644
index 0000000..654594d
--- /dev/null
+++ b/drivers/media/video/msm/msm_gesture.c
@@ -0,0 +1,497 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <mach/camera.h>
+#include <media/v4l2-subdev.h>
+#include "msm.h"
+#include <media/msm_camera.h>
+#include <media/msm_gestures.h>
+#include <media/v4l2-ctrls.h>
+
+#ifdef CONFIG_MSM_CAMERA_DEBUG
+#define D(fmt, args...) pr_debug("msm_gesture: " fmt, ##args)
+#else
+#define D(fmt, args...) do {} while (0)
+#endif
+
+struct msm_gesture_ctrl {
+ int queue_id;
+ atomic_t active;
+ struct v4l2_ctrl_handler ctrl_handler;
+ int num_ctrls;
+ struct v4l2_fh *p_eventHandle;
+ struct v4l2_subdev *sd;
+ struct msm_ges_evt event;
+ int camera_opened;
+};
+
+static struct msm_gesture_ctrl g_gesture_ctrl;
+
+int msm_gesture_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ D("%s\n", __func__);
+ if (sub->type == V4L2_EVENT_ALL)
+ sub->type = MSM_GES_APP_NOTIFY_EVENT;
+ return v4l2_event_subscribe(fh, sub);
+}
+
+static int msm_gesture_send_ctrl(struct msm_gesture_ctrl *p_gesture_ctrl,
+ int type, void *value, int length, uint32_t timeout)
+{
+ int rc = 0;
+ struct msm_ctrl_cmd ctrlcmd;
+ D("%s qid %d\n", __func__, p_gesture_ctrl->queue_id);
+ ctrlcmd.type = type;
+ ctrlcmd.timeout_ms = timeout;
+ ctrlcmd.length = length;
+ ctrlcmd.value = value;
+ ctrlcmd.vnode_id = 0;
+ ctrlcmd.queue_idx = p_gesture_ctrl->queue_id;
+ ctrlcmd.config_ident = 0;
+
+ rc = msm_server_send_ctrl(&ctrlcmd, MSM_GES_RESP_V4L2);
+ return rc;
+}
+
+static int msm_gesture_proc_ctrl_cmd(struct msm_gesture_ctrl *p_gesture_ctrl,
+ struct v4l2_control *ctrl)
+{
+ int rc = 0;
+ struct msm_ctrl_cmd *tmp_cmd = NULL;
+ uint8_t *ctrl_data = NULL;
+ void __user *uptr_cmd;
+ void __user *uptr_value;
+ uint32_t cmd_len = sizeof(struct msm_ctrl_cmd);
+ uint32_t value_len;
+
+ tmp_cmd = (struct msm_ctrl_cmd *)ctrl->value;
+ uptr_cmd = (void __user *)ctrl->value;
+ uptr_value = (void __user *)tmp_cmd->value;
+ value_len = tmp_cmd->length;
+
+ D("%s: cmd type = %d, up1=0x%x, ulen1=%d, up2=0x%x, ulen2=%d\n",
+ __func__, tmp_cmd->type, (uint32_t)uptr_cmd, cmd_len,
+ (uint32_t)uptr_value, tmp_cmd->length);
+
+ ctrl_data = kzalloc(value_len + cmd_len, GFP_KERNEL);
+ if (ctrl_data == 0) {
+ pr_err("%s could not allocate memory\n", __func__);
+ rc = -ENOMEM;
+ goto end;
+ }
+ tmp_cmd = (struct msm_ctrl_cmd *)ctrl_data;
+ if (copy_from_user((void *)ctrl_data, uptr_cmd,
+ cmd_len)) {
+ pr_err("%s: copy_from_user failed.\n", __func__);
+ rc = -EINVAL;
+ goto end;
+ }
+ tmp_cmd->value = (void *)(ctrl_data + cmd_len);
+ if (uptr_value && tmp_cmd->length > 0) {
+ if (copy_from_user((void *)tmp_cmd->value, uptr_value,
+ value_len)) {
+ pr_err("%s: copy_from_user failed, size=%d\n",
+ __func__, value_len);
+ rc = -EINVAL;
+ goto end;
+ }
+ } else
+ tmp_cmd->value = NULL;
+
+ /* send command to config thread in usersspace, and get return value */
+ rc = msm_server_send_ctrl((struct msm_ctrl_cmd *)ctrl_data,
+ MSM_GES_RESP_V4L2);
+ D("%s: msm_server_control rc=%d\n", __func__, rc);
+ if (rc == 0) {
+ if (uptr_value && tmp_cmd->length > 0 &&
+ copy_to_user((void __user *)uptr_value,
+ (void *)(ctrl_data + cmd_len),
+ tmp_cmd->length)) {
+ pr_err("%s: copy_to_user failed, size=%d\n",
+ __func__, tmp_cmd->length);
+ rc = -EINVAL;
+ goto end;
+ }
+ tmp_cmd->value = uptr_value;
+ if (copy_to_user((void __user *)uptr_cmd,
+ (void *)tmp_cmd, cmd_len)) {
+ pr_err("%s: copy_to_user failed in cpy, size=%d\n",
+ __func__, cmd_len);
+ rc = -EINVAL;
+ goto end;
+ }
+ }
+end:
+ D("%s: END, type = %d, vaddr = 0x%x, vlen = %d, status = %d, rc = %d\n",
+ __func__, tmp_cmd->type, (uint32_t)tmp_cmd->value,
+ tmp_cmd->length, tmp_cmd->status, rc);
+ kfree(ctrl_data);
+ return rc;
+}
+
+static int msm_gesture_s_ctrl(struct v4l2_subdev *sd,
+ struct v4l2_control *ctrl)
+{
+ int rc = 0;
+ struct msm_gesture_ctrl *p_gesture_ctrl = &g_gesture_ctrl;
+ D("%s ctrl->id %d\n", __func__, ctrl->id);
+ rc = msm_gesture_proc_ctrl_cmd(p_gesture_ctrl, ctrl);
+ if (rc != 0) {
+ pr_err("%s set ctrl failed %d\n", __func__, rc);
+ return -EINVAL;
+ }
+ return rc;
+}
+
+static int msm_gesture_s_ctrl_ops(struct v4l2_ctrl *ctrl)
+{
+ int rc = 0;
+ struct v4l2_control control;
+ struct msm_gesture_ctrl *p_gesture_ctrl = &g_gesture_ctrl;
+ control.id = ctrl->id;
+ control.value = ctrl->val;
+ D("%s ctrl->id 0x%x\n", __func__, ctrl->id);
+ rc = msm_gesture_proc_ctrl_cmd(p_gesture_ctrl, &control);
+ if (rc != 0) {
+ pr_err("%s proc ctrl failed %d\n", __func__, rc);
+ return -EINVAL;
+ }
+ return rc;
+}
+
+static int msm_gesture_s_ctrl_ext(struct v4l2_subdev *sd,
+ struct v4l2_ext_controls *ctrls)
+{
+ int rc = 0;
+ struct v4l2_control control;
+ struct msm_gesture_ctrl *p_gesture_ctrl = &g_gesture_ctrl;
+ if ((ctrls->count < 1) || (NULL == ctrls->controls)) {
+ pr_err("%s invalid ctrl failed\n", __func__);
+ return -EINVAL;
+ }
+ control.id = ctrls->controls->id;
+ control.value = ctrls->controls->value;
+ D("%s ctrl->id %d\n", __func__, control.id);
+ rc = msm_gesture_proc_ctrl_cmd(p_gesture_ctrl, &control);
+ if (rc != 0) {
+ pr_err("%s proc ctrl failed %d\n", __func__, rc);
+ return -EINVAL;
+ }
+ return rc;
+}
+
+static int msm_gesture_handle_event(struct v4l2_subdev *sd,
+ struct msm_gesture_ctrl *p_gesture_ctrl, void* arg)
+{
+ int rc = 0;
+ struct v4l2_event *evt = (struct v4l2_event *)arg;
+ struct msm_ges_evt *p_ges_evt = NULL;
+ D("%s: Received gesture evt 0x%x ", __func__, evt->type);
+ p_gesture_ctrl->event.evt_len = 0;
+ p_gesture_ctrl->event.evt_data = NULL;
+ if (0 != evt->u.data[0]) {
+ p_ges_evt = (struct msm_ges_evt *)evt->u.data;
+ D("%s: event data %p len %d", __func__,
+ p_ges_evt->evt_data,
+ p_ges_evt->evt_len);
+
+ if (p_ges_evt->evt_len > 0) {
+ p_gesture_ctrl->event.evt_data =
+ kzalloc(p_ges_evt->evt_len, GFP_KERNEL);
+
+ if (NULL == p_gesture_ctrl->event.evt_data) {
+ pr_err("%s: cannot allocate event", __func__);
+ rc = -ENOMEM;
+ } else {
+ if (copy_from_user(
+ (void *)p_gesture_ctrl->event.evt_data,
+ (void __user *)p_ges_evt->evt_data,
+ p_ges_evt->evt_len)) {
+ pr_err("%s: copy_from_user failed",
+ __func__);
+ rc = -EFAULT;
+ } else {
+ D("%s: copied the event", __func__);
+ p_gesture_ctrl->event.evt_len =
+ p_ges_evt->evt_len;
+ }
+ }
+ }
+ }
+
+ if (rc == 0) {
+ ktime_get_ts(&evt->timestamp);
+ v4l2_event_queue(&sd->devnode, evt);
+ }
+ D("%s: exit rc %d ", __func__, rc);
+ return rc;
+}
+
+static int msm_gesture_get_evt_payload(struct v4l2_subdev *sd,
+ struct msm_gesture_ctrl *p_gesture_ctrl, void* arg)
+{
+ int rc = 0;
+ struct msm_ges_evt *p_ges_evt = (struct msm_ges_evt *)arg;
+ D("%s: enter ", __func__);
+ if (NULL != p_gesture_ctrl->event.evt_data) {
+ D("%s: event data %p len %d", __func__,
+ p_gesture_ctrl->event.evt_data,
+ p_gesture_ctrl->event.evt_len);
+
+ if (copy_to_user((void __user *)p_ges_evt->evt_data,
+ p_gesture_ctrl->event.evt_data,
+ p_gesture_ctrl->event.evt_len)) {
+ pr_err("%s: copy_to_user failed.\n", __func__);
+ rc = -EFAULT;
+ } else {
+ D("%s: copied the event", __func__);
+ p_ges_evt->evt_len = p_gesture_ctrl->event.evt_len;
+ }
+ }
+ D("%s: exit rc %d ", __func__, rc);
+ return rc;
+}
+
+static int msm_gesture_handle_cam_event(struct v4l2_subdev *sd,
+ struct msm_gesture_ctrl *p_gesture_ctrl, int cam_evt)
+{
+ int rc = 0;
+ D("%s: cam_evt %d ", __func__, cam_evt);
+
+ if ((cam_evt != MSM_V4L2_GES_CAM_OPEN)
+ && (cam_evt != MSM_V4L2_GES_CAM_CLOSE)) {
+ pr_err("%s: error invalid event %d ", __func__, cam_evt);
+ return -EINVAL;
+ }
+
+ p_gesture_ctrl->camera_opened =
+ (cam_evt == MSM_V4L2_GES_CAM_OPEN);
+
+ if (atomic_read(&p_gesture_ctrl->active) == 0) {
+ D("%s gesture not active\n", __func__);
+ return 0;
+ }
+
+ rc = msm_gesture_send_ctrl(p_gesture_ctrl, cam_evt, NULL,
+ 0, 2000);
+ if (rc != 0) {
+ pr_err("%s gesture ctrl failed %d\n", __func__, rc);
+ rc = -EINVAL;
+ }
+ D("%s exit rc %d\n", __func__, rc);
+ return rc;
+}
+
+long msm_gesture_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ int rc = 0;
+ struct msm_gesture_ctrl *p_gesture_ctrl = &g_gesture_ctrl;
+ D("%s\n", __func__);
+ switch (cmd) {
+ case MSM_GES_IOCTL_CTRL_COMMAND: {
+ struct v4l2_control *ctrl = (struct v4l2_control *)arg;
+ D("%s MSM_GES_IOCTL_CTRL_COMMAND arg %p size %d\n", __func__,
+ arg, sizeof(ctrl));
+ rc = msm_gesture_s_ctrl(sd, ctrl);
+ break;
+ }
+ case VIDIOC_MSM_GESTURE_EVT: {
+ rc = msm_gesture_handle_event(sd, p_gesture_ctrl, arg);
+ break;
+ }
+ case VIDIOC_MSM_GESTURE_CAM_EVT: {
+ int cam_evt = *((int *)arg);
+ rc = msm_gesture_handle_cam_event(sd, p_gesture_ctrl, cam_evt);
+ break;
+ }
+ case MSM_GES_GET_EVT_PAYLOAD: {
+ rc = msm_gesture_get_evt_payload(sd, p_gesture_ctrl, arg);
+ break;
+ }
+ default:
+ pr_err("%s: Invalid ioctl %d", __func__, cmd);
+ break;
+ }
+ D("%s exit rc %d\n", __func__, rc);
+ return rc;
+}
+
+static const struct v4l2_ctrl_ops msm_gesture_ctrl_ops = {
+ .s_ctrl = msm_gesture_s_ctrl_ops,
+};
+
+static const struct v4l2_ctrl_config msm_gesture_ctrl_filter = {
+ .ops = &msm_gesture_ctrl_ops,
+ .id = MSM_GESTURE_CID_CTRL_CMD,
+ .name = "Gesture ctrl",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ .max = 0x7fffffff,
+ .step = 1,
+ .min = 0x80000000,
+};
+
+static int msm_gesture_init_ctrl(struct v4l2_subdev *sd,
+ struct msm_gesture_ctrl *p_gesture_ctrl)
+{
+ int rc = 0;
+ p_gesture_ctrl->num_ctrls = 1;
+ p_gesture_ctrl->ctrl_handler.error = 0;
+ v4l2_ctrl_handler_init(&p_gesture_ctrl->ctrl_handler,
+ p_gesture_ctrl->num_ctrls);
+ v4l2_ctrl_new_custom(&p_gesture_ctrl->ctrl_handler,
+ &msm_gesture_ctrl_filter, p_gesture_ctrl);
+ if (p_gesture_ctrl->ctrl_handler.error) {
+ int err = p_gesture_ctrl->ctrl_handler.error;
+ D("%s: error adding control %d", __func__, err);
+ p_gesture_ctrl->ctrl_handler.error = 0;
+ }
+ sd->ctrl_handler = &p_gesture_ctrl->ctrl_handler;
+ return rc;
+}
+
+static int msm_gesture_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ int rc = 0, rc_err = 0;
+ struct msm_gesture_ctrl *p_gesture_ctrl = &g_gesture_ctrl;
+ D("%s\n", __func__);
+ if (atomic_read(&p_gesture_ctrl->active) != 0) {
+ pr_err("%s already opened\n", __func__);
+ return -EINVAL;
+ }
+ memset(&p_gesture_ctrl->event, 0x0, sizeof(struct msm_ges_evt));
+ rc = msm_server_open_client(&p_gesture_ctrl->queue_id);
+ if (rc != 0) {
+ pr_err("%s open failed %d\n", __func__, rc);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ rc = msm_gesture_init_ctrl(sd, p_gesture_ctrl);
+ if (rc != 0) {
+ pr_err("%s init ctrl failed %d\n", __func__, rc);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ rc = msm_gesture_send_ctrl(p_gesture_ctrl, MSM_V4L2_GES_OPEN, NULL,
+ 0, 10000);
+ if (rc != 0) {
+ pr_err("%s gesture ctrl failed %d\n", __func__, rc);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ atomic_inc(&p_gesture_ctrl->active);
+
+ return rc;
+
+err:
+ rc_err = msm_server_close_client(p_gesture_ctrl->queue_id);
+ if (rc_err != 0)
+ pr_err("%s failed %d\n", __func__, rc);
+ return rc;
+}
+
+static int msm_gesture_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ int rc = 0;
+ struct msm_gesture_ctrl *p_gesture_ctrl = &g_gesture_ctrl;
+ D("%s\n", __func__);
+ if (atomic_read(&p_gesture_ctrl->active) == 0) {
+ pr_err("%s already closed\n", __func__);
+ return -EINVAL;
+ }
+
+ rc = msm_gesture_send_ctrl(p_gesture_ctrl, MSM_V4L2_GES_CLOSE, NULL,
+ 0, 10000);
+ if (rc != 0)
+ pr_err("%s gesture ctrl failed %d\n", __func__, rc);
+
+ rc = msm_server_close_client(p_gesture_ctrl->queue_id);
+ if (rc != 0)
+ pr_err("%s failed %d\n", __func__, rc);
+
+ v4l2_ctrl_handler_free(&p_gesture_ctrl->ctrl_handler);
+ kfree(p_gesture_ctrl->event.evt_data);
+
+ atomic_dec(&p_gesture_ctrl->active);
+ g_gesture_ctrl.queue_id = -1;
+ return 0;
+}
+
+static struct v4l2_subdev_core_ops msm_gesture_core_ops = {
+ .s_ctrl = msm_gesture_s_ctrl,
+ .s_ext_ctrls = msm_gesture_s_ctrl_ext,
+ .ioctl = msm_gesture_ioctl,
+ .subscribe_event = msm_gesture_subscribe_event,
+};
+
+static struct v4l2_subdev_video_ops msm_gesture_video_ops;
+
+static struct v4l2_subdev_ops msm_gesture_subdev_ops = {
+ .core = &msm_gesture_core_ops,
+ .video = &msm_gesture_video_ops,
+};
+
+static const struct v4l2_subdev_internal_ops msm_gesture_internal_ops = {
+ .open = msm_gesture_open,
+ .close = msm_gesture_close,
+};
+
+static int msm_gesture_node_register(void)
+{
+ struct msm_gesture_ctrl *p_gesture_ctrl = &g_gesture_ctrl;
+ struct v4l2_subdev *gesture_subdev =
+ kzalloc(sizeof(struct v4l2_subdev), GFP_KERNEL);
+ D("%s\n", __func__);
+ if (!gesture_subdev) {
+ pr_err("%s: no enough memory\n", __func__);
+ return -ENOMEM;
+ };
+
+ v4l2_subdev_init(gesture_subdev, &msm_gesture_subdev_ops);
+ gesture_subdev->internal_ops = &msm_gesture_internal_ops;
+ gesture_subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(gesture_subdev->name,
+ sizeof(gesture_subdev->name), "gesture");
+
+ media_entity_init(&gesture_subdev->entity, 0, NULL, 0);
+ gesture_subdev->entity.type = MEDIA_ENT_T_DEVNODE_V4L;
+ gesture_subdev->entity.group_id = GESTURE_DEV;
+ gesture_subdev->entity.name = gesture_subdev->name;
+
+ /* events */
+ gesture_subdev->flags |= V4L2_SUBDEV_FL_HAS_EVENTS;
+ gesture_subdev->nevents = MAX_GES_EVENTS;
+
+ msm_cam_register_subdev_node(gesture_subdev, GESTURE_DEV, 0);
+
+ gesture_subdev->entity.revision = gesture_subdev->devnode.num;
+
+ atomic_set(&p_gesture_ctrl->active, 0);
+ p_gesture_ctrl->queue_id = -1;
+ p_gesture_ctrl->event.evt_data = NULL;
+ p_gesture_ctrl->event.evt_len = 0;
+ return 0;
+}
+
+static int __init msm_gesture_init_module(void)
+{
+ return msm_gesture_node_register();
+}
+
+module_init(msm_gesture_init_module);
+MODULE_DESCRIPTION("MSM Gesture driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/msm/msm_isp.c b/drivers/media/video/msm/msm_isp.c
index d678d86..315f218 100644
--- a/drivers/media/video/msm/msm_isp.c
+++ b/drivers/media/video/msm/msm_isp.c
@@ -246,7 +246,6 @@
pr_err("%s: Invalid vdata type: %d\n", __func__, vdata->type);
break;
}
- msm_isp_sync_free(vdata);
return rc;
}
diff --git a/drivers/media/video/msm/msm_mctl.c b/drivers/media/video/msm/msm_mctl.c
index e878063..e9eb68f 100644
--- a/drivers/media/video/msm/msm_mctl.c
+++ b/drivers/media/video/msm/msm_mctl.c
@@ -923,7 +923,6 @@
struct msm_cam_v4l2_device *pcam = NULL;
struct msm_cam_v4l2_dev_inst *pcam_inst;
struct msm_cam_media_controller *pmctl;
- D("%s : E ", __func__);
if (f == NULL) {
pr_err("%s :: cannot open video driver data", __func__);
@@ -935,8 +934,8 @@
pr_err("%s NULL pointer passed in!\n", __func__);
return rc;
}
- pmctl = msm_camera_get_mctl(pcam->mctl_handle);
+ D("%s : E use_count %d", __func__, pcam->mctl_node.use_count);
mutex_lock(&pcam->mctl_node.dev_lock);
for (i = 0; i < MSM_DEV_INST_MAX; i++) {
if (pcam->mctl_node.dev_inst[i] == NULL)
@@ -960,6 +959,21 @@
D("%s pcam_inst %p my_index = %d\n", __func__,
pcam_inst, pcam_inst->my_index);
+ rc = msm_cam_server_open_mctl_session(pcam,
+ &pcam->mctl_node.active);
+ if (rc < 0) {
+ pr_err("%s: mctl session open failed %d", __func__, rc);
+ mutex_unlock(&pcam->mctl_node.dev_lock);
+ return rc;
+ }
+
+ pmctl = msm_camera_get_mctl(pcam->mctl_handle);
+ if (!pmctl) {
+ pr_err("%s mctl NULL!\n", __func__);
+ return rc;
+ }
+
+ D("%s active %d\n", __func__, pcam->mctl_node.active);
rc = msm_setup_v4l2_event_queue(&pcam_inst->eventHandle,
pcam->mctl_node.pvdev);
if (rc < 0) {
@@ -973,6 +987,7 @@
D("f->private_data = 0x%x, pcam = 0x%x\n",
(u32)f->private_data, (u32)pcam_inst);
+ pcam->mctl_node.use_count++;
mutex_unlock(&pcam->mctl_node.dev_lock);
D("%s : X ", __func__);
return rc;
@@ -1030,6 +1045,17 @@
pmctl = msm_camera_get_mctl(pcam->mctl_handle);
mutex_lock(&pcam->mctl_node.dev_lock);
+ D("%s : active %d ", __func__, pcam->mctl_node.active);
+ if (pcam->mctl_node.active == 1) {
+ rc = msm_cam_server_close_mctl_session(pcam);
+ if (rc < 0) {
+ pr_err("%s: mctl session close failed %d",
+ __func__, rc);
+ mutex_unlock(&pcam->mctl_node.dev_lock);
+ return rc;
+ }
+ pmctl = NULL;
+ }
pcam_inst->streamon = 0;
pcam->mctl_node.dev_inst_map[pcam_inst->image_mode] = NULL;
if (pcam_inst->vbqueue_initialized)
@@ -1040,10 +1066,14 @@
v4l2_fh_exit(&pcam_inst->eventHandle);
kfree(pcam_inst);
- kref_put(&pmctl->refcount, msm_release_ion_client);
+ if (NULL != pmctl) {
+ D("%s : release ion client", __func__);
+ kref_put(&pmctl->refcount, msm_release_ion_client);
+ }
f->private_data = NULL;
mutex_unlock(&pcam->mctl_node.dev_lock);
- D("%s : X ", __func__);
+ pcam->mctl_node.use_count--;
+ D("%s : use_count %d X ", __func__, pcam->mctl_node.use_count);
return rc;
}
@@ -1246,6 +1276,11 @@
pb->m.planes[i].data_offset;
pcam_inst->buf_offset[pb->index][i].addr_offset =
pb->m.planes[i].reserved[0];
+ pcam_inst->plane_info.plane[i].offset = 0;
+ D("%s, len %d user[%d] %p buf_len %d\n",
+ __func__, pb->length, i,
+ (void *)pb->m.planes[i].m.userptr,
+ pb->m.planes[i].length);
}
} else {
D("%s stored reserved info %d", __func__, pb->reserved);
diff --git a/drivers/media/video/msm/msm_mctl_buf.c b/drivers/media/video/msm/msm_mctl_buf.c
index 5bc81a7..42d13a1 100644
--- a/drivers/media/video/msm/msm_mctl_buf.c
+++ b/drivers/media/video/msm/msm_mctl_buf.c
@@ -431,8 +431,8 @@
int pp_divert_type = 0, pp_type = 0;
msm_mctl_check_pp(p_mctl, image_mode, &pp_divert_type, &pp_type);
- D("%s: pp_type=%d, pp_divert_type = %d, frame_id = 0x%x",
- __func__, pp_type, pp_divert_type, frame_id);
+ D("%s: pp_type=%d, pp_divert_type = %d, frame_id = 0x%x image_mode %d",
+ __func__, pp_type, pp_divert_type, frame_id, image_mode);
if (pp_type || pp_divert_type)
rc = msm_mctl_do_pp_divert(p_mctl,
image_mode, fbuf, frame_id, pp_type);
@@ -440,9 +440,26 @@
idx = msm_mctl_img_mode_to_inst_index(
p_mctl, image_mode, 0);
if (idx < 0) {
- pr_err("%s Invalid instance, dropping buffer\n",
- __func__);
- return idx;
+ /* check mctl node */
+ if ((image_mode >= 0) &&
+ p_mctl->pcam_ptr->mctl_node.
+ dev_inst_map[image_mode]) {
+ int index = p_mctl->pcam_ptr->mctl_node.
+ dev_inst_map[image_mode]->my_index;
+ pcam_inst = p_mctl->pcam_ptr->mctl_node.
+ dev_inst[index];
+ D("%s: Mctl node index %d inst %p",
+ __func__, index, pcam_inst);
+ rc = msm_mctl_buf_done_proc(p_mctl, pcam_inst,
+ image_mode, fbuf,
+ &frame_id, 1);
+ D("%s mctl node buf done %d\n", __func__, 0);
+ return -EINVAL;
+ } else {
+ pr_err("%s Invalid instance, dropping buffer\n",
+ __func__);
+ return idx;
+ }
}
pcam_inst = p_mctl->pcam_ptr->dev_inst[idx];
rc = msm_mctl_buf_done_proc(p_mctl, pcam_inst,
@@ -573,6 +590,10 @@
plane_offset =
mem->offset.sp_off.cbcr_off;
+ D("%s: data off %d plane off %d",
+ __func__,
+ pcam_inst->buf_offset[buf_idx][i].
+ data_offset, plane_offset);
free_buf->ch_paddr[i] = (uint32_t)
videobuf2_to_pmem_contig(&buf->vidbuf, i) +
pcam_inst->buf_offset[buf_idx][i].data_offset +
diff --git a/drivers/media/video/msm/msm_vfe31_v4l2.c b/drivers/media/video/msm/msm_vfe31_v4l2.c
index 90ba214..89615ec 100644
--- a/drivers/media/video/msm/msm_vfe31_v4l2.c
+++ b/drivers/media/video/msm/msm_vfe31_v4l2.c
@@ -423,20 +423,15 @@
static void vfe31_subdev_notify(int id, int path)
{
- struct msm_vfe_resp *rp;
+ struct msm_vfe_resp rp;
unsigned long flags;
spin_lock_irqsave(&vfe31_ctrl->sd_notify_lock, flags);
- rp = msm_isp_sync_alloc(sizeof(struct msm_vfe_resp), GFP_ATOMIC);
- if (!rp) {
- CDBG("rp: cannot allocate buffer\n");
- spin_unlock_irqrestore(&vfe31_ctrl->sd_notify_lock, flags);
- return;
- }
+ memset(&rp, 0, sizeof(struct msm_vfe_resp));
CDBG("vfe31_subdev_notify : msgId = %d\n", id);
- rp->evt_msg.type = MSM_CAMERA_MSG;
- rp->evt_msg.msg_id = path;
- rp->type = id;
- v4l2_subdev_notify(&vfe31_ctrl->subdev, NOTIFY_VFE_BUF_EVT, rp);
+ rp.evt_msg.type = MSM_CAMERA_MSG;
+ rp.evt_msg.msg_id = path;
+ rp.type = id;
+ v4l2_subdev_notify(&vfe31_ctrl->subdev, NOTIFY_VFE_BUF_EVT, &rp);
spin_unlock_irqrestore(&vfe31_ctrl->sd_notify_lock, flags);
}
diff --git a/drivers/media/video/msm/msm_vfe32.c b/drivers/media/video/msm/msm_vfe32.c
index becdd95..d50b778 100644
--- a/drivers/media/video/msm/msm_vfe32.c
+++ b/drivers/media/video/msm/msm_vfe32.c
@@ -418,19 +418,15 @@
static void vfe32_subdev_notify(int id, int path)
{
- struct msm_vfe_resp *rp;
+ struct msm_vfe_resp rp;
unsigned long flags = 0;
spin_lock_irqsave(&vfe32_ctrl->sd_notify_lock, flags);
- rp = msm_isp_sync_alloc(sizeof(struct msm_vfe_resp), GFP_ATOMIC);
- if (!rp) {
- CDBG("rp: cannot allocate buffer\n");
- return;
- }
CDBG("vfe32_subdev_notify : msgId = %d\n", id);
- rp->evt_msg.type = MSM_CAMERA_MSG;
- rp->evt_msg.msg_id = path;
- rp->type = id;
- v4l2_subdev_notify(&vfe32_ctrl->subdev, NOTIFY_VFE_BUF_EVT, rp);
+ memset(&rp, 0, sizeof(struct msm_vfe_resp));
+ rp.evt_msg.type = MSM_CAMERA_MSG;
+ rp.evt_msg.msg_id = path;
+ rp.type = id;
+ v4l2_subdev_notify(&vfe32_ctrl->subdev, NOTIFY_VFE_BUF_EVT, &rp);
spin_unlock_irqrestore(&vfe32_ctrl->sd_notify_lock, flags);
}
@@ -3056,7 +3052,7 @@
ch2_paddr = vfe32_get_ch_addr(ping_pong,
vfe32_ctrl->outpath.out1.ch2);
- pr_debug("%s ch0 = 0x%x, ch1 = 0x%x, ch2 = 0x%x\n",
+ CDBG("%s ch0 = 0x%x, ch1 = 0x%x, ch2 = 0x%x\n",
__func__, ch0_paddr, ch1_paddr, ch2_paddr);
if (free_buf) {
/* Y channel */
diff --git a/drivers/media/video/msm/msm_vfe7x27a_v4l2.c b/drivers/media/video/msm/msm_vfe7x27a_v4l2.c
index d9d315e..2b72021 100644
--- a/drivers/media/video/msm/msm_vfe7x27a_v4l2.c
+++ b/drivers/media/video/msm/msm_vfe7x27a_v4l2.c
@@ -258,7 +258,8 @@
{VFE_CMD_DUMMY_9, VFE_MAX, VFE_MAX},
{VFE_CMD_STATS_AF_START, VFE_STATS_AUTOFOCUS_CONFIG, QDSP_CMDQUEUE,
"VFE_CMD_STATS_AF_START", "VFE_STATS_AUTOFOCUS_CONFIG"},
- {VFE_CMD_STATS_AF_STOP, VFE_MAX, VFE_MAX},
+ {VFE_CMD_STATS_AF_STOP, VFE_STATS_AUTOFOCUS_CONFIG, QDSP_CMDQUEUE,
+ "VFE_CMD_STATS_AF_STOP", "VFE_STATS_AUTOFOCUS_CONFIG"},
{VFE_CMD_STATS_AE_START, VFE_MAX, VFE_MAX},
{VFE_CMD_STATS_AE_STOP, VFE_MAX, VFE_MAX},
{VFE_CMD_STATS_AWB_START, VFE_MAX, VFE_MAX},
@@ -341,7 +342,6 @@
static uint32_t extlen;
struct mutex vfe_lock;
-static int apps_reset;
static uint8_t vfestopped;
static struct stop_event stopevent;
@@ -396,7 +396,7 @@
void (*getevent)(void *ptr, size_t len))
{
uint32_t evt_buf[3];
- void *data;
+ void *data = NULL;
struct buf_info *outch = NULL;
uint32_t y_phy, cbcr_phy;
struct table_cmd *table_pending = NULL;
@@ -409,7 +409,7 @@
CDBG("%s:id=%d\n", __func__, id);
if (id != VFE_ADSP_EVENT) {
- data = kzalloc(len, GFP_KERNEL);
+ data = kzalloc(len, GFP_ATOMIC);
if (!data) {
pr_err("%s: rp: cannot allocate buffer\n", __func__);
return;
@@ -432,6 +432,7 @@
vfe_7x_ops(driver_data, MSG_OUTPUT_T,
len, getevent);
vfe2x_send_isp_msg(vfe2x_ctrl, MSG_ID_SNAPSHOT_DONE);
+ kfree(data);
return;
case MSG_OUTPUT_S:
outch = &vfe2x_ctrl->snap;
@@ -489,6 +490,8 @@
len = sizeof(fack);
msm_adsp_write(vfe_mod, QDSP_CMDQUEUE,
cmd_data, len);
+ kfree(data);
+ return;
}
}
y_phy = ((struct vfe_endframe *)data)->y_address;
@@ -557,6 +560,8 @@
len = sizeof(fack);
msm_adsp_write(vfe_mod, QDSP_CMDQUEUE,
cmd_data, len);
+ kfree(data);
+ return;
}
}
y_phy = ((struct vfe_endframe *)data)->y_address;
@@ -666,7 +671,9 @@
msgs_map[id].isp_id);
break;
default:
- vfe2x_send_isp_msg(vfe2x_ctrl, msgs_map[id].isp_id);
+ if (MSG_TABLE_CMD_ACK != id)
+ vfe2x_send_isp_msg(vfe2x_ctrl,
+ msgs_map[id].isp_id);
break;
}
}
@@ -706,24 +713,30 @@
vfe2x_ctrl->update_pending = 0;
}
spin_unlock_irqrestore(&vfe2x_ctrl->table_lock, flags);
+ kfree(data);
return;
}
table_pending = list_first_entry(&vfe2x_ctrl->table_q,
struct table_cmd, list);
if (!table_pending) {
spin_unlock_irqrestore(&vfe2x_ctrl->table_lock, flags);
+ kfree(data);
return;
}
msm_adsp_write(vfe_mod, table_pending->queue,
table_pending->cmd, table_pending->size);
list_del(&table_pending->list);
kfree(table_pending->cmd);
+ kfree(table_pending);
vfe2x_ctrl->tableack_pending = 1;
spin_unlock_irqrestore(&vfe2x_ctrl->table_lock, flags);
} else if (!vfe2x_ctrl->tableack_pending) {
- if (!list_empty(&vfe2x_ctrl->table_q))
+ if (!list_empty(&vfe2x_ctrl->table_q)) {
+ kfree(data);
return;
+ }
}
+ kfree(data);
}
static struct msm_adsp_ops vfe_7x_sync = {
@@ -834,19 +847,15 @@
static void vfe2x_subdev_notify(int id, int path)
{
- struct msm_vfe_resp *rp;
+ struct msm_vfe_resp rp;
unsigned long flags = 0;
spin_lock_irqsave(&vfe2x_ctrl->sd_notify_lock, flags);
- rp = msm_isp_sync_alloc(sizeof(struct msm_vfe_resp), GFP_ATOMIC);
- if (!rp) {
- CDBG("rp: cannot allocate buffer\n");
- return;
- }
+ memset(&rp, 0, sizeof(struct msm_vfe_resp));
CDBG("vfe2x_subdev_notify : msgId = %d\n", id);
- rp->evt_msg.type = MSM_CAMERA_MSG;
- rp->evt_msg.msg_id = path;
- rp->type = id;
- v4l2_subdev_notify(&vfe2x_ctrl->subdev, NOTIFY_VFE_BUF_EVT, rp);
+ rp.evt_msg.type = MSM_CAMERA_MSG;
+ rp.evt_msg.msg_id = path;
+ rp.type = id;
+ v4l2_subdev_notify(&vfe2x_ctrl->subdev, NOTIFY_VFE_BUF_EVT, &rp);
spin_unlock_irqrestore(&vfe2x_ctrl->sd_notify_lock, flags);
}
@@ -1216,8 +1225,7 @@
if (queue == QDSP_CMDQUEUE) {
switch (vfecmd.id) {
case VFE_CMD_RESET:
- msm_camio_vfe_blk_reset_2(apps_reset);
- apps_reset = 0;
+ msm_camio_vfe_blk_reset_2();
vfestopped = 0;
break;
case VFE_CMD_START:
@@ -1270,7 +1278,10 @@
if ((!list_empty(&vfe2x_ctrl->table_q)) ||
vfe2x_ctrl->tableack_pending) {
CDBG("update pending\n");
- vfe2x_ctrl->update_pending = 1;
+ vfe2x_ctrl->update_pending = 0;
+ vfe2x_send_isp_msg(vfe2x_ctrl,
+ msgs_map[MSG_UPDATE_ACK].
+ isp_id);
spin_unlock_irqrestore(
&vfe2x_ctrl->table_lock,
flags);
@@ -1588,29 +1599,30 @@
config_send:
CDBG("send adsp command = %d\n", *(uint32_t *)cmd_data);
+ spin_lock_irqsave(&vfe2x_ctrl->table_lock, flags);
if (queue == QDSP_TABLEQUEUE &&
vfe2x_ctrl->tableack_pending) {
CDBG("store table cmd\n");
table_pending = kzalloc(sizeof(struct table_cmd), GFP_ATOMIC);
if (!table_pending) {
rc = -ENOMEM;
+ spin_unlock_irqrestore(&vfe2x_ctrl->table_lock, flags);
goto config_done;
}
table_pending->cmd = kzalloc(vfecmd.length + 4, GFP_ATOMIC);
if (!table_pending->cmd) {
kfree(table_pending);
rc = -ENOMEM;
+ spin_unlock_irqrestore(&vfe2x_ctrl->table_lock, flags);
goto config_done;
}
memcpy(table_pending->cmd, cmd_data, vfecmd.length + 4);
table_pending->queue = queue;
table_pending->size = vfecmd.length + 4;
- spin_lock_irqsave(&vfe2x_ctrl->table_lock, flags);
list_add_tail(&table_pending->list, &vfe2x_ctrl->table_q);
spin_unlock_irqrestore(&vfe2x_ctrl->table_lock, flags);
} else {
if (queue == QDSP_TABLEQUEUE) {
- spin_lock_irqsave(&vfe2x_ctrl->table_lock, flags);
CDBG("sending table cmd\n");
vfe2x_ctrl->tableack_pending = 1;
rc = msm_adsp_write(vfe_mod, queue,
@@ -1621,7 +1633,6 @@
uint32_t *ptr = cmd_data;
CDBG("%x %x %x\n", ptr[0], ptr[1], ptr[2]);
}
- spin_lock_irqsave(&vfe2x_ctrl->table_lock, flags);
CDBG("send n-table cmd\n");
rc = msm_adsp_write(vfe_mod, queue,
cmd_data, vfecmd.length + 4);
@@ -1636,6 +1647,7 @@
config_failure:
kfree(scfg);
kfree(axio);
+ kfree(sfcfg);
return rc;
}
@@ -1717,8 +1729,6 @@
CDBG("msm_cam_clk_enable: disable vfe_clk\n");
msm_cam_clk_enable(&vfe2x_ctrl->pdev->dev, vfe2x_clk_info,
vfe2x_ctrl->vfe_clk, ARRAY_SIZE(vfe2x_clk_info), 0);
- apps_reset = 1;
-
msm_adsp_disable(qcam_mod);
msm_adsp_disable(vfe_mod);
diff --git a/drivers/media/video/msm/msm_vpe.c b/drivers/media/video/msm/msm_vpe.c
index b7376dc..71c10ad 100644
--- a/drivers/media/video/msm/msm_vpe.c
+++ b/drivers/media/video/msm/msm_vpe.c
@@ -50,10 +50,12 @@
/* enable the frame irq, bit 0 = Display list 0 ROI done */
msm_camera_io_w_mb(1, vpe_ctrl->vpebase + VPE_INTR_ENABLE_OFFSET);
msm_camera_io_dump(vpe_ctrl->vpebase, 0x120);
+ msm_camera_io_dump(vpe_ctrl->vpebase + 0x00400, 0x18);
msm_camera_io_dump(vpe_ctrl->vpebase + 0x10000, 0x250);
msm_camera_io_dump(vpe_ctrl->vpebase + 0x30000, 0x20);
msm_camera_io_dump(vpe_ctrl->vpebase + 0x50000, 0x30);
msm_camera_io_dump(vpe_ctrl->vpebase + 0x50400, 0x10);
+
/* this triggers the operation. */
msm_camera_io_w(1, vpe_ctrl->vpebase + VPE_DL0_START_OFFSET);
wmb();
@@ -756,7 +758,7 @@
}
rc = request_irq(vpe_ctrl->vpeirq->start, vpe_parse_irq,
- IRQF_TRIGGER_RISING, "vfe", 0);
+ IRQF_TRIGGER_RISING, "vpe", 0);
if (rc < 0) {
release_mem_region(vpe_ctrl->vpemem->start,
resource_size(vpe_ctrl->vpemem));
diff --git a/drivers/media/video/msm/msm_vpe.h b/drivers/media/video/msm/msm_vpe.h
index 553ee4f..0d14626 100644
--- a/drivers/media/video/msm/msm_vpe.h
+++ b/drivers/media/video/msm/msm_vpe.h
@@ -60,7 +60,8 @@
#define VPE_SCALE_COEFF_LSP_0_OFFSET 0x50400
#define VPE_SCALE_COEFF_MSP_0_OFFSET 0x50404
-#define VPE_AXI_ARB_2_OFFSET 0x004C
+#define VPE_AXI_ARB_1_OFFSET 0x00408
+#define VPE_AXI_ARB_2_OFFSET 0x0040C
#define VPE_SCALE_COEFF_LSBn(n) (0x50400 + 8 * (n))
#define VPE_SCALE_COEFF_MSBn(n) (0x50404 + 8 * (n))
diff --git a/drivers/media/video/msm/sensors/Makefile b/drivers/media/video/msm/sensors/Makefile
index 13dc446..ea36bf6 100644
--- a/drivers/media/video/msm/sensors/Makefile
+++ b/drivers/media/video/msm/sensors/Makefile
@@ -13,5 +13,5 @@
obj-$(CONFIG_S5K4E1) += s5k4e1_v4l2.o
obj-$(CONFIG_MT9E013) += mt9e013_v4l2.o
obj-$(CONFIG_WEBCAM_OV9726) += ov9726_v4l2.o
-obj-$(CONFIG_WEBCAM_OV7692_QRD) += ov7692_qrd_v4l2.o
+obj-$(CONFIG_OV7692) += ov7692_v4l2.o
obj-$(CONFIG_VX6953) += vx6953.o
diff --git a/drivers/media/video/msm/sensors/msm_sensor.c b/drivers/media/video/msm/sensors/msm_sensor.c
index ff5bb49..d163427 100644
--- a/drivers/media/video/msm/sensors/msm_sensor.c
+++ b/drivers/media/video/msm/sensors/msm_sensor.c
@@ -22,6 +22,13 @@
uint16_t cur_line = 0;
uint16_t exp_fl_lines = 0;
if (s_ctrl->sensor_exp_gain_info) {
+ if (s_ctrl->prev_gain && s_ctrl->prev_line &&
+ s_ctrl->func_tbl->sensor_write_exp_gain)
+ s_ctrl->func_tbl->sensor_write_exp_gain(
+ s_ctrl,
+ s_ctrl->prev_gain,
+ s_ctrl->prev_line);
+
msm_camera_i2c_read(s_ctrl->sensor_i2c_client,
s_ctrl->sensor_exp_gain_info->coarse_int_time_addr,
&cur_line,
@@ -429,6 +436,8 @@
s_ctrl,
cdata.cfg.exp_gain.gain,
cdata.cfg.exp_gain.line);
+ s_ctrl->prev_gain = cdata.cfg.exp_gain.gain;
+ s_ctrl->prev_line = cdata.cfg.exp_gain.line;
break;
case CFG_SET_PICT_EXP_GAIN:
diff --git a/drivers/media/video/msm/sensors/msm_sensor.h b/drivers/media/video/msm/sensors/msm_sensor.h
index 22cc05b..0e51409 100644
--- a/drivers/media/video/msm/sensors/msm_sensor.h
+++ b/drivers/media/video/msm/sensors/msm_sensor.h
@@ -153,6 +153,8 @@
uint16_t curr_line_length_pclk;
uint16_t curr_frame_length_lines;
+ uint16_t prev_gain;
+ uint16_t prev_line;
uint32_t fps_divider;
enum msm_sensor_resolution_t curr_res;
diff --git a/drivers/media/video/msm/sensors/ov2720.c b/drivers/media/video/msm/sensors/ov2720.c
index 1e66843..7531a26 100644
--- a/drivers/media/video/msm/sensors/ov2720.c
+++ b/drivers/media/video/msm/sensors/ov2720.c
@@ -333,7 +333,7 @@
{0x5000, 0xff},
{0x3a18, 0x00},
{0x3a19, 0x80},
- {0x3503, 0x00},
+ {0x3503, 0x07},
{0x4521, 0x00},
{0x5183, 0xb0},
{0x5184, 0xb0},
@@ -430,7 +430,7 @@
{0x5000, 0xff},
{0x3a18, 0x00},
{0x3a19, 0x80},
- {0x3503, 0x00},
+ {0x3503, 0x07},
{0x4521, 0x00},
{0x5183, 0xb0},
{0x5184, 0xb0},
@@ -527,7 +527,7 @@
{0x5000, 0xff},
{0x3a18, 0x00},
{0x3a19, 0x80},
- {0x3503, 0x00},
+ {0x3503, 0x07},
{0x4521, 0x00},
{0x5183, 0xb0},
{0x5184, 0xb0},
diff --git a/drivers/media/video/msm/sensors/ov5647_v4l2.c b/drivers/media/video/msm/sensors/ov5647_v4l2.c
index d30d48b..48f1d5d 100644
--- a/drivers/media/video/msm/sensors/ov5647_v4l2.c
+++ b/drivers/media/video/msm/sensors/ov5647_v4l2.c
@@ -21,8 +21,6 @@
DEFINE_MUTEX(ov5647_mut);
-
-
static struct msm_camera_i2c_reg_conf ov5647_start_settings[] = {
{0x4202, 0x00}, /* streaming on */
};
@@ -159,6 +157,34 @@
{0x4004, 0x02},
};
+static struct msm_camera_i2c_reg_conf ov5647_zsl_settings[] = {
+ {0x3035, 0x21},
+ {0x3036, 0x2f},
+ {0x3821, 0x06},
+ {0x3820, 0x00},
+ {0x3612, 0x0b},
+ {0x3618, 0x04},
+ {0x380c, 0x0a},
+ {0x380d, 0x8c},
+ {0x380e, 0x07},
+ {0x380f, 0xb0},
+ {0x3814, 0x11},
+ {0x3815, 0x11},
+ {0x3709, 0x12},
+ {0x3808, 0x0a},
+ {0x3809, 0x30},
+ {0x380a, 0x07},
+ {0x380b, 0xa0},
+ {0x3800, 0x00},
+ {0x3801, 0x04},
+ {0x3802, 0x00},
+ {0x3803, 0x00},
+ {0x3804, 0x0a},
+ {0x3805, 0x3b},
+ {0x3806, 0x07},
+ {0x3807, 0xa3},
+ {0x4004, 0x04},
+};
static struct msm_camera_i2c_reg_conf ov5647_recommend_settings[] = {
{0x3035, 0x11},
@@ -313,6 +339,8 @@
ARRAY_SIZE(ov5647_video_60fps_settings), 0, MSM_CAMERA_I2C_BYTE_DATA},
{&ov5647_video_90fps_settings[0],
ARRAY_SIZE(ov5647_video_90fps_settings), 0, MSM_CAMERA_I2C_BYTE_DATA},
+ {&ov5647_zsl_settings[0],
+ ARRAY_SIZE(ov5647_zsl_settings), 0, MSM_CAMERA_I2C_BYTE_DATA},
};
static struct msm_camera_csi_params ov5647_csi_params = {
@@ -370,6 +398,15 @@
.op_pixel_clk = 159408000,
.binning_factor = 0x0,
},
+ { /* For ZSL */
+ .x_output = 0xA30, /*2608*/ /*for 5Mp*/
+ .y_output = 0x7A0, /*1952*/
+ .line_length_pclk = 0xA8C,
+ .frame_length_lines = 0x7B0,
+ .vt_pixel_clk = 79704000,
+ .op_pixel_clk = 159408000,
+ .binning_factor = 0x0,
+ },
};
@@ -381,10 +418,11 @@
};
static struct msm_camera_csi_params *ov5647_csi_params_array[] = {
- &ov5647_csi_params,
- &ov5647_csi_params,
- &ov5647_csi_params,
- &ov5647_csi_params,
+ &ov5647_csi_params, /* Snapshot */
+ &ov5647_csi_params, /* Preview */
+ &ov5647_csi_params, /* 60fps */
+ &ov5647_csi_params, /* 90fps */
+ &ov5647_csi_params, /* ZSL */
};
static struct msm_sensor_id_info_t ov5647_id_info = {
@@ -711,6 +749,8 @@
}
+static int32_t vfe_clk = 266667000;
+
int32_t ov5647_sensor_setting(struct msm_sensor_ctrl_t *s_ctrl,
int update_type, int res)
{
@@ -762,6 +802,10 @@
0x4800, 0x4,
MSM_CAMERA_I2C_BYTE_DATA);
msleep(266);
+ if (res == MSM_SENSOR_RES_4)
+ v4l2_subdev_notify(&s_ctrl->sensor_v4l2_subdev,
+ NOTIFY_PCLK_CHANGE,
+ &vfe_clk);
s_ctrl->func_tbl->sensor_start_stream(s_ctrl);
msleep(50);
}
diff --git a/drivers/media/video/msm/sensors/ov7692_qrd_v4l2.c b/drivers/media/video/msm/sensors/ov7692_v4l2.c
similarity index 87%
rename from drivers/media/video/msm/sensors/ov7692_qrd_v4l2.c
rename to drivers/media/video/msm/sensors/ov7692_v4l2.c
index 2324495..e7970d5 100644
--- a/drivers/media/video/msm/sensors/ov7692_qrd_v4l2.c
+++ b/drivers/media/video/msm/sensors/ov7692_v4l2.c
@@ -247,7 +247,43 @@
.video = &ov7692_subdev_video_ops,
};
+int32_t ov7692_sensor_power_up(struct msm_sensor_ctrl_t *s_ctrl)
+{
+ int32_t rc = 0;
+ struct msm_camera_sensor_info *info = NULL;
+ info = s_ctrl->sensordata;
+ if (info->pmic_gpio_enable) {
+ info->sensor_lcd_gpio_onoff(1);
+ usleep_range(5000, 5100);
+ }
+
+ rc = msm_sensor_power_up(s_ctrl);
+ if (rc < 0) {
+ CDBG("%s: msm_sensor_power_up failed\n", __func__);
+ return rc;
+ }
+
+ return rc;
+}
+
+int32_t ov7692_sensor_power_down(struct msm_sensor_ctrl_t *s_ctrl)
+{
+ int32_t rc = 0;
+ struct msm_camera_sensor_info *info = NULL;
+
+ rc = msm_sensor_power_down(s_ctrl);
+ if (rc < 0)
+ CDBG("%s: msm_sensor_power_down failed\n", __func__);
+
+ info = s_ctrl->sensordata;
+ if (info->pmic_gpio_enable) {
+ info->pmic_gpio_enable = 0;
+ info->sensor_lcd_gpio_onoff(0);
+ usleep_range(5000, 5100);
+ }
+ return rc;
+}
static struct msm_sensor_fn_t ov7692_func_tbl = {
.sensor_start_stream = msm_sensor_start_stream,
@@ -257,8 +293,8 @@
.sensor_mode_init = msm_sensor_mode_init,
.sensor_get_output_info = msm_sensor_get_output_info,
.sensor_config = msm_sensor_config,
- .sensor_power_up = msm_sensor_power_up,
- .sensor_power_down = msm_sensor_power_down,
+ .sensor_power_up = ov7692_sensor_power_up,
+ .sensor_power_down = ov7692_sensor_power_down,
};
static struct msm_sensor_reg_t ov7692_regs = {
diff --git a/drivers/media/video/msm/sensors/s5k4e1_v4l2.c b/drivers/media/video/msm/sensors/s5k4e1_v4l2.c
index 6671073..2d25824 100644
--- a/drivers/media/video/msm/sensors/s5k4e1_v4l2.c
+++ b/drivers/media/video/msm/sensors/s5k4e1_v4l2.c
@@ -406,6 +406,36 @@
return 0;
}
+int32_t s5k4e1_sensor_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int rc = 0;
+ struct msm_camera_sensor_info *s_info;
+
+ rc = msm_sensor_i2c_probe(client, id);
+
+ s_info = client->dev.platform_data;
+ if (s_info == NULL) {
+ pr_err("%s %s NULL sensor data\n", __func__, client->name);
+ return -EFAULT;
+ }
+
+ if (s_info->actuator_info->vcm_enable) {
+ rc = gpio_request(s_info->actuator_info->vcm_pwd,
+ "msm_actuator");
+ if (rc < 0)
+ pr_err("%s: gpio_request:msm_actuator %d failed\n",
+ __func__, s_info->actuator_info->vcm_pwd);
+ rc = gpio_direction_output(s_info->actuator_info->vcm_pwd, 0);
+ if (rc < 0)
+ pr_err("%s: gpio:msm_actuator %d direction can't be set\n",
+ __func__, s_info->actuator_info->vcm_pwd);
+ gpio_free(s_info->actuator_info->vcm_pwd);
+ }
+
+ return rc;
+}
+
static const struct i2c_device_id s5k4e1_i2c_id[] = {
{SENSOR_NAME, (kernel_ulong_t)&s5k4e1_s_ctrl},
{ }
@@ -413,7 +443,7 @@
static struct i2c_driver s5k4e1_i2c_driver = {
.id_table = s5k4e1_i2c_id,
- .probe = msm_sensor_i2c_probe,
+ .probe = s5k4e1_sensor_i2c_probe,
.driver = {
.name = SENSOR_NAME,
},
diff --git a/drivers/media/video/msm/wfd/enc-subdev.c b/drivers/media/video/msm/wfd/enc-subdev.c
index b7ae0f4..c94fa13 100644
--- a/drivers/media/video/msm/wfd/enc-subdev.c
+++ b/drivers/media/video/msm/wfd/enc-subdev.c
@@ -191,7 +191,7 @@
}
vbuf->v4l2_buf.timestamp =
- ns_to_timeval(frame_data->time_stamp);
+ ns_to_timeval(frame_data->time_stamp * NSEC_PER_USEC);
WFD_MSG_DBG("bytes used %d, ts: %d.%d, frame type is %d\n",
frame_data->data_len,
@@ -365,7 +365,15 @@
WFD_MSG_ERR("Failed to get out buf reqs rc = %d", rc);
goto err;
}
- b->count = buf_req.actual_count;
+
+ buf_req.actual_count = b->count = max(buf_req.min_count, b->count);
+ rc = vcd_set_buffer_requirements(client_ctx->vcd_handle,
+ VCD_BUFFER_OUTPUT, &buf_req);
+ if (rc) {
+ WFD_MSG_ERR("Failed to set out buf reqs rc = %d", rc);
+ goto err;
+ }
+
err:
return rc;
}
@@ -1051,6 +1059,7 @@
struct v4l2_fract *frate = arg;
struct vcd_property_hdr vcd_property_hdr;
struct vcd_property_frame_rate vcd_frame_rate;
+ struct vcd_property_vop_timing_constant_delta vcd_delta;
int rc;
vcd_property_hdr.prop_id = VCD_I_FRAME_RATE;
vcd_property_hdr.sz =
@@ -1060,8 +1069,25 @@
vcd_frame_rate.fps_numerator = frate->denominator;
rc = vcd_set_property(client_ctx->vcd_handle,
&vcd_property_hdr, &vcd_frame_rate);
- if (rc)
+ if (rc) {
WFD_MSG_ERR("Failed to set frame rate, rc = %d\n", rc);
+ goto set_framerate_fail;
+ }
+
+ vcd_property_hdr.prop_id = VCD_I_VOP_TIMING_CONSTANT_DELTA;
+ vcd_property_hdr.sz = sizeof(vcd_delta);
+
+ vcd_delta.constant_delta = (frate->numerator * USEC_PER_SEC) /
+ frate->denominator;
+ rc = vcd_set_property(client_ctx->vcd_handle,
+ &vcd_property_hdr, &vcd_delta);
+
+ if (rc) {
+ WFD_MSG_ERR("Failed to set frame delta, rc = %d", rc);
+ goto set_framerate_fail;
+ }
+
+set_framerate_fail:
return rc;
}
@@ -1827,12 +1853,16 @@
struct venc_buf_info *venc_buf = arg;
struct mem_region *mregion = venc_buf->mregion;
struct vcd_frame_data vcd_input_buffer = {0};
+ int64_t ts = 0;
+
+ ts = venc_buf->timestamp;
+ do_div(ts, NSEC_PER_USEC);
vcd_input_buffer.virtual = mregion->kvaddr;
vcd_input_buffer.frm_clnt_data = (u32)mregion;
vcd_input_buffer.ip_frm_tag = (u32)mregion;
vcd_input_buffer.data_len = mregion->size;
- vcd_input_buffer.time_stamp = venc_buf->timestamp;
+ vcd_input_buffer.time_stamp = ts;
vcd_input_buffer.offset = 0;
rc = vcd_encode_frame(client_ctx->vcd_handle,
diff --git a/drivers/media/video/msm/wfd/wfd-ioctl.c b/drivers/media/video/msm/wfd/wfd-ioctl.c
index 2242aa8..4f6c09d 100644
--- a/drivers/media/video/msm/wfd/wfd-ioctl.c
+++ b/drivers/media/video/msm/wfd/wfd-ioctl.c
@@ -498,7 +498,6 @@
if (rc)
WFD_MSG_ERR("Failed to free output buffer\n");
wfd_unregister_out_buf(inst, minfo);
- wfd_free_input_buffers(wfd_dev, inst);
}
static int mdp_output_thread(void *data)
@@ -1344,12 +1343,13 @@
inst = filp->private_data;
if (inst) {
wfdioc_streamoff(filp, NULL, V4L2_BUF_TYPE_VIDEO_CAPTURE);
- vb2_queue_release(&inst->vid_bufq);
rc = v4l2_subdev_call(&wfd_dev->mdp_sdev, core, ioctl,
MDP_CLOSE, (void *)inst->mdp_inst);
if (rc)
WFD_MSG_ERR("Failed to CLOSE mdp subdevice: %d\n", rc);
+ vb2_queue_release(&inst->vid_bufq);
+ wfd_free_input_buffers(wfd_dev, inst);
rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl,
CLOSE, (void *)inst->venc_inst);
diff --git a/drivers/media/video/msm_vidc/Kconfig b/drivers/media/video/msm_vidc/Kconfig
new file mode 100644
index 0000000..0b5a5fe
--- /dev/null
+++ b/drivers/media/video/msm_vidc/Kconfig
@@ -0,0 +1,8 @@
+#
+# VIDEO CORE
+#
+
+menuconfig MSM_VIDC
+ bool "Qualcomm MSM Video Core Driver"
+ depends on ARCH_MSMCOPPER && VIDEO_V4L2
+ default y
diff --git a/drivers/media/video/msm_vidc/Makefile b/drivers/media/video/msm_vidc/Makefile
new file mode 100644
index 0000000..12c61c9
--- /dev/null
+++ b/drivers/media/video/msm_vidc/Makefile
@@ -0,0 +1,8 @@
+obj-$(CONFIG_MSM_VIDC) := msm_v4l2_vidc.o \
+ msm_vidc_common.o \
+ msm_vidc.o \
+ msm_vdec.o \
+ msm_venc.o \
+ msm_smem.o \
+ vidc_hal.o \
+ vidc_hal_interrupt_handler.o \
diff --git a/drivers/media/video/msm_vidc/msm_smem.c b/drivers/media/video/msm_vidc/msm_smem.c
new file mode 100644
index 0000000..25b5c5c
--- /dev/null
+++ b/drivers/media/video/msm_vidc/msm_smem.c
@@ -0,0 +1,244 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include "msm_smem.h"
+
+struct smem_client {
+ int mem_type;
+ void *clnt;
+};
+
+static int ion_user_to_kernel(struct smem_client *client,
+ int fd, u32 offset, struct msm_smem *mem)
+{
+ struct ion_handle *hndl;
+ unsigned long ionflag;
+ size_t len;
+ int rc = 0;
+ hndl = ion_import_fd(client->clnt, fd);
+ if (IS_ERR_OR_NULL(hndl)) {
+ pr_err("Failed to get handle: %p, %d, %d, %p\n",
+ client, fd, offset, hndl);
+ rc = -ENOMEM;
+ goto fail_import_fd;
+ }
+ rc = ion_handle_get_flags(client->clnt, hndl, &ionflag);
+ if (rc) {
+ pr_err("Failed to get ion flags: %d", rc);
+ goto fail_map;
+ }
+ rc = ion_phys(client->clnt, hndl, &mem->paddr, &len);
+ if (rc) {
+ pr_err("Failed to get physical address\n");
+ goto fail_map;
+ }
+ mem->kvaddr = ion_map_kernel(client->clnt, hndl, ionflag);
+ if (!mem->kvaddr) {
+ pr_err("Failed to map shared mem in kernel\n");
+ rc = -EIO;
+ goto fail_map;
+ }
+
+ mem->kvaddr += offset;
+ mem->paddr += offset;
+ mem->mem_type = client->mem_type;
+ mem->smem_priv = hndl;
+ mem->device_addr = mem->paddr;
+ mem->size = len;
+ return rc;
+fail_map:
+ ion_free(client->clnt, hndl);
+fail_import_fd:
+ return rc;
+}
+
+static int alloc_ion_mem(struct smem_client *client, size_t size,
+ u32 align, u32 flags, struct msm_smem *mem)
+{
+ struct ion_handle *hndl;
+ size_t len;
+ int rc = 0;
+ flags = flags | ION_HEAP(ION_CP_MM_HEAP_ID);
+ hndl = ion_alloc(client->clnt, size, align, flags);
+ if (IS_ERR_OR_NULL(hndl)) {
+ pr_err("Failed to allocate shared memory = %p, %d, %d, 0x%x\n",
+ client, size, align, flags);
+ rc = -ENOMEM;
+ goto fail_shared_mem_alloc;
+ }
+ mem->mem_type = client->mem_type;
+ mem->smem_priv = hndl;
+ if (ion_phys(client->clnt, hndl, &mem->paddr, &len)) {
+ pr_err("Failed to get physical address\n");
+ rc = -EIO;
+ goto fail_map;
+ }
+ mem->device_addr = mem->paddr;
+ mem->size = size;
+ mem->kvaddr = ion_map_kernel(client->clnt, hndl, 0);
+ if (!mem->kvaddr) {
+ pr_err("Failed to map shared mem in kernel\n");
+ rc = -EIO;
+ goto fail_map;
+ }
+ return rc;
+fail_map:
+ ion_free(client->clnt, hndl);
+fail_shared_mem_alloc:
+ return rc;
+}
+
+static void free_ion_mem(struct smem_client *client, struct msm_smem *mem)
+{
+ ion_unmap_kernel(client->clnt, mem->smem_priv);
+ ion_free(client->clnt, mem->smem_priv);
+}
+
+static void *ion_new_client(void)
+{
+ struct ion_client *client = NULL;
+ client = msm_ion_client_create(-1, "video_client");
+ if (!client)
+ pr_err("Failed to create smem client\n");
+ return client;
+};
+
+static void ion_delete_client(struct smem_client *client)
+{
+ ion_client_destroy(client->clnt);
+}
+
+struct msm_smem *msm_smem_user_to_kernel(void *clt, int fd, u32 offset)
+{
+ struct smem_client *client = clt;
+ int rc = 0;
+ struct msm_smem *mem;
+ if (fd < 0) {
+ pr_err("Invalid fd: %d\n", fd);
+ return NULL;
+ }
+ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+ if (!mem) {
+ pr_err("Failed to allocte shared mem\n");
+ return NULL;
+ }
+ switch (client->mem_type) {
+ case SMEM_ION:
+ rc = ion_user_to_kernel(clt, fd, offset, mem);
+ break;
+ default:
+ pr_err("Mem type not supported\n");
+ rc = -EINVAL;
+ break;
+ }
+ if (rc) {
+ pr_err("Failed to allocate shared memory\n");
+ kfree(mem);
+ mem = NULL;
+ }
+ return mem;
+}
+
+void *msm_smem_new_client(enum smem_type mtype)
+{
+ struct smem_client *client = NULL;
+ void *clnt = NULL;
+ switch (mtype) {
+ case SMEM_ION:
+ clnt = ion_new_client();
+ break;
+ default:
+ pr_err("Mem type not supported\n");
+ break;
+ }
+ if (clnt) {
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (client) {
+ client->mem_type = mtype;
+ client->clnt = clnt;
+ }
+ } else {
+ pr_err("Failed to create new client: mtype = %d\n", mtype);
+ }
+ return client;
+};
+
+struct msm_smem *msm_smem_alloc(void *clt, size_t size, u32 align, u32 flags)
+{
+ struct smem_client *client;
+ int rc = 0;
+ struct msm_smem *mem;
+
+ client = clt;
+ if (!client) {
+ pr_err("Invalid client passed\n");
+ return NULL;
+ }
+ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+ if (!mem) {
+ pr_err("Failed to allocate shared mem\n");
+ return NULL;
+ }
+ switch (client->mem_type) {
+ case SMEM_ION:
+ rc = alloc_ion_mem(client, size, align, flags, mem);
+ break;
+ default:
+ pr_err("Mem type not supported\n");
+ rc = -EINVAL;
+ break;
+ }
+ if (rc) {
+ pr_err("Failed to allocate shared memory\n");
+ kfree(mem);
+ mem = NULL;
+ }
+ return mem;
+}
+
+void msm_smem_free(void *clt, struct msm_smem *mem)
+{
+ struct smem_client *client = clt;
+ if (!client || !mem) {
+ pr_err("Invalid client/handle passed\n");
+ return;
+ }
+ switch (client->mem_type) {
+ case SMEM_ION:
+ free_ion_mem(client, mem);
+ break;
+ default:
+ pr_err("Mem type not supported\n");
+ break;
+ }
+ kfree(mem);
+};
+
+void msm_smem_delete_client(void *clt)
+{
+ struct smem_client *client = clt;
+ if (!client) {
+ pr_err("Invalid client passed\n");
+ return;
+ }
+ switch (client->mem_type) {
+ case SMEM_ION:
+ ion_delete_client(client);
+ break;
+ default:
+ pr_err("Mem type not supported\n");
+ break;
+ }
+ kfree(client);
+}
diff --git a/drivers/media/video/msm_vidc/msm_smem.h b/drivers/media/video/msm_vidc/msm_smem.h
new file mode 100644
index 0000000..84d12cc
--- /dev/null
+++ b/drivers/media/video/msm_vidc/msm_smem.h
@@ -0,0 +1,38 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _MSM_SMEM_H_
+#define _MSM_SMEM_H_
+
+#include <linux/types.h>
+#include <linux/ion.h>
+
+enum smem_type {
+ SMEM_ION,
+};
+
+struct msm_smem {
+ int mem_type;
+ size_t size;
+ void *kvaddr;
+ unsigned long paddr;
+ unsigned long device_addr;
+ /*Device address and others to follow*/
+ void *smem_priv;
+};
+
+void *msm_smem_new_client(enum smem_type mtype);
+struct msm_smem *msm_smem_alloc(void *clt, size_t size, u32 align, u32 flags);
+void msm_smem_free(void *clt, struct msm_smem *mem);
+void msm_smem_delete_client(void *clt);
+struct msm_smem *msm_smem_user_to_kernel(void *clt, int fd, u32 offset);
+#endif
diff --git a/drivers/media/video/msm_vidc/msm_v4l2_vidc.c b/drivers/media/video/msm_vidc/msm_v4l2_vidc.c
new file mode 100644
index 0000000..550fbde
--- /dev/null
+++ b/drivers/media/video/msm_vidc/msm_v4l2_vidc.c
@@ -0,0 +1,611 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/ioctl.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/version.h>
+#include <linux/slab.h>
+
+#include <media/msm_vidc.h>
+#include "msm_vidc_internal.h"
+#include "vidc_hal_api.h"
+#include "msm_smem.h"
+
+#define BASE_DEVICE_NUMBER 32
+
+struct msm_vidc_drv *vidc_driver;
+
+struct buffer_info {
+ struct list_head list;
+ int type;
+ int fd;
+ int buff_off;
+ int size;
+ u32 uvaddr;
+ struct msm_smem *handle;
+};
+
+struct msm_v4l2_vid_inst {
+ struct msm_vidc_inst vidc_inst;
+ void *mem_client;
+ struct list_head registered_bufs;
+};
+
+static inline struct msm_vidc_inst *get_vidc_inst(struct file *filp, void *fh)
+{
+ return container_of(filp->private_data,
+ struct msm_vidc_inst, event_handler);
+}
+
+static inline struct msm_v4l2_vid_inst *get_v4l2_inst(struct file *filp,
+ void *fh)
+{
+ struct msm_vidc_inst *vidc_inst;
+ vidc_inst = container_of(filp->private_data,
+ struct msm_vidc_inst, event_handler);
+ return container_of((void *)vidc_inst,
+ struct msm_v4l2_vid_inst, vidc_inst);
+}
+
+static int msm_vidc_v4l2_setup_event_queue(void *inst,
+ struct video_device *pvdev)
+{
+ int rc = 0;
+ struct msm_vidc_inst *vidc_inst = (struct msm_vidc_inst *)inst;
+ spin_lock_init(&pvdev->fh_lock);
+ INIT_LIST_HEAD(&pvdev->fh_list);
+ rc = v4l2_fh_init(&vidc_inst->event_handler, pvdev);
+ if (rc < 0)
+ return rc;
+ if (&vidc_inst->event_handler.events == NULL) {
+ rc = v4l2_event_init(&vidc_inst->event_handler);
+ if (rc < 0)
+ return rc;
+ }
+ rc = v4l2_event_alloc(&vidc_inst->event_handler, 32);
+ if (rc < 0)
+ return rc;
+ v4l2_fh_add(&vidc_inst->event_handler);
+ return rc;
+}
+
+struct buffer_info *get_registered_buf(struct list_head *list,
+ int fd, u32 buff_off, u32 size)
+{
+ struct buffer_info *temp;
+ struct buffer_info *ret = NULL;
+ if (!list || fd < 0) {
+ pr_err("%s Invalid input\n", __func__);
+ goto err_invalid_input;
+ }
+ if (!list_empty(list)) {
+ list_for_each_entry(temp, list, list) {
+ if (temp && temp->fd == fd &&
+ (CONTAINS(temp->buff_off, temp->size, buff_off)
+ || CONTAINS(buff_off, size, temp->buff_off)
+ || OVERLAPS(buff_off, size,
+ temp->buff_off, temp->size))) {
+ pr_err("This memory region is already mapped\n");
+ ret = temp;
+ break;
+ }
+ }
+ }
+err_invalid_input:
+ return ret;
+}
+
+static int msm_v4l2_open(struct file *filp)
+{
+ int rc = 0;
+ struct video_device *vdev = video_devdata(filp);
+ struct msm_video_device *vid_dev =
+ container_of(vdev, struct msm_video_device, vdev);
+ struct msm_vidc_core *core = video_drvdata(filp);
+ struct msm_v4l2_vid_inst *v4l2_inst = kzalloc(sizeof(*v4l2_inst),
+ GFP_KERNEL);
+ if (!v4l2_inst) {
+ pr_err("Failed to allocate memory for this instance\n");
+ rc = -ENOMEM;
+ goto fail_nomem;
+ }
+ v4l2_inst->mem_client = msm_smem_new_client(SMEM_ION);
+ if (!v4l2_inst->mem_client) {
+ pr_err("Failed to create memory client\n");
+ rc = -ENOMEM;
+ goto fail_mem_client;
+ }
+ rc = msm_vidc_open(&v4l2_inst->vidc_inst, core->id, vid_dev->type);
+ if (rc) {
+ pr_err("Failed to create video instance, core: %d, type = %d\n",
+ core->id, vid_dev->type);
+ rc = -ENOMEM;
+ goto fail_open;
+ }
+ INIT_LIST_HEAD(&v4l2_inst->registered_bufs);
+ rc = msm_vidc_v4l2_setup_event_queue(&v4l2_inst->vidc_inst, vdev);
+ clear_bit(V4L2_FL_USES_V4L2_FH, &vdev->flags);
+ filp->private_data = &(v4l2_inst->vidc_inst.event_handler);
+ return rc;
+fail_open:
+ msm_smem_delete_client(v4l2_inst->mem_client);
+fail_mem_client:
+ kfree(v4l2_inst);
+fail_nomem:
+ return rc;
+}
+
+static int msm_v4l2_close(struct file *filp)
+{
+ int rc;
+ struct list_head *ptr, *next;
+ struct buffer_info *binfo;
+ struct msm_vidc_inst *vidc_inst;
+ struct msm_v4l2_vid_inst *v4l2_inst;
+ vidc_inst = get_vidc_inst(filp, NULL);
+ v4l2_inst = get_v4l2_inst(filp, NULL);
+ rc = msm_vidc_close(vidc_inst);
+ list_for_each_safe(ptr, next, &v4l2_inst->registered_bufs) {
+ binfo = list_entry(ptr, struct buffer_info, list);
+ list_del(&binfo->list);
+ msm_smem_free(v4l2_inst->mem_client, binfo->handle);
+ kfree(binfo);
+ }
+ msm_smem_delete_client(v4l2_inst->mem_client);
+ kfree(v4l2_inst);
+ return rc;
+}
+
+static int msm_v4l2_querycap(struct file *filp, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct msm_vidc_inst *vidc_inst = get_vidc_inst(filp, fh);
+ return msm_vidc_querycap((void *)vidc_inst, cap);
+}
+
+int msm_v4l2_enum_fmt(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+ return msm_vidc_enum_fmt((void *)vidc_inst, f);
+}
+
+int msm_v4l2_s_fmt(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+ return msm_vidc_s_fmt((void *)vidc_inst, f);
+}
+
+int msm_v4l2_g_fmt(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+ return msm_vidc_g_fmt((void *)vidc_inst, f);
+}
+
+int msm_v4l2_s_ctrl(struct file *file, void *fh,
+ struct v4l2_control *a)
+{
+ struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+ return msm_vidc_s_ctrl((void *)vidc_inst, a);
+}
+
+int msm_v4l2_g_ctrl(struct file *file, void *fh,
+ struct v4l2_control *a)
+{
+ struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+ return msm_vidc_g_ctrl((void *)vidc_inst, a);
+}
+
+int msm_v4l2_reqbufs(struct file *file, void *fh,
+ struct v4l2_requestbuffers *b)
+{
+ struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+ struct msm_v4l2_vid_inst *v4l2_inst;
+ struct list_head *ptr, *next;
+ int rc;
+ struct buffer_info *bi;
+ struct v4l2_buffer buffer_info;
+ v4l2_inst = get_v4l2_inst(file, NULL);
+ if (b->count == 0) {
+ list_for_each_safe(ptr, next, &v4l2_inst->registered_bufs) {
+ bi = list_entry(ptr, struct buffer_info, list);
+ if (bi->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ buffer_info.type = bi->type;
+ buffer_info.m.planes[0].reserved[0] =
+ bi->fd;
+ buffer_info.m.planes[0].reserved[1] =
+ bi->buff_off;
+ buffer_info.m.planes[0].length = bi->size;
+ buffer_info.m.planes[0].m.userptr =
+ bi->uvaddr;
+ buffer_info.length = 1;
+ pr_err("Releasing buffer: %d, %d, %d\n",
+ buffer_info.m.planes[0].reserved[0],
+ buffer_info.m.planes[0].reserved[1],
+ buffer_info.m.planes[0].length);
+ rc = msm_vidc_release_buf(&v4l2_inst->vidc_inst,
+ &buffer_info);
+ list_del(&bi->list);
+ msm_smem_free(v4l2_inst->mem_client,
+ bi->handle);
+ kfree(bi);
+ }
+ }
+ }
+ return msm_vidc_reqbufs((void *)vidc_inst, b);
+}
+
+int msm_v4l2_prepare_buf(struct file *file, void *fh,
+ struct v4l2_buffer *b)
+{
+ struct msm_smem *handle;
+ struct buffer_info *binfo;
+ struct msm_vidc_inst *vidc_inst;
+ struct msm_v4l2_vid_inst *v4l2_inst;
+ int i, rc = 0;
+ vidc_inst = get_vidc_inst(file, fh);
+ v4l2_inst = get_v4l2_inst(file, fh);
+ if (!v4l2_inst->mem_client) {
+ pr_err("Failed to get memory client\n");
+ rc = -ENOMEM;
+ goto exit;
+ }
+ for (i = 0; i < b->length; ++i) {
+ binfo = get_registered_buf(&v4l2_inst->registered_bufs,
+ b->m.planes[i].reserved[0],
+ b->m.planes[i].reserved[1],
+ b->m.planes[i].length);
+ if (binfo) {
+ pr_err("This memory region has already been prepared\n");
+ rc = -EINVAL;
+ goto exit;
+ }
+ binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
+ if (!binfo) {
+ pr_err("Out of memory\n");
+ rc = -ENOMEM;
+ goto exit;
+ }
+ handle = msm_smem_user_to_kernel(v4l2_inst->mem_client,
+ b->m.planes[i].reserved[0],
+ b->m.planes[i].reserved[1]);
+ if (!handle) {
+ pr_err("Failed to get device buffer address\n");
+ kfree(binfo);
+ goto exit;
+ }
+ binfo->type = b->type;
+ binfo->fd = b->m.planes[i].reserved[0];
+ binfo->buff_off = b->m.planes[i].reserved[1];
+ binfo->size = b->m.planes[i].length;
+ binfo->uvaddr = b->m.planes[i].m.userptr;
+ binfo->handle = handle;
+ pr_debug("Registering buffer: %d, %d, %d\n",
+ b->m.planes[i].reserved[0],
+ b->m.planes[i].reserved[1],
+ b->m.planes[i].length);
+ list_add_tail(&binfo->list, &v4l2_inst->registered_bufs);
+ b->m.planes[i].m.userptr = handle->device_addr;
+ }
+ rc = msm_vidc_prepare_buf(&v4l2_inst->vidc_inst, b);
+exit:
+ return rc;
+}
+
+int msm_v4l2_qbuf(struct file *file, void *fh,
+ struct v4l2_buffer *b)
+{
+ struct msm_vidc_inst *vidc_inst;
+ struct msm_v4l2_vid_inst *v4l2_inst;
+ struct buffer_info *binfo;
+ int rc = 0;
+ int i;
+ vidc_inst = get_vidc_inst(file, fh);
+ v4l2_inst = get_v4l2_inst(file, fh);
+ for (i = 0; i < b->length; ++i) {
+ binfo = get_registered_buf(&v4l2_inst->registered_bufs,
+ b->m.planes[i].reserved[0],
+ b->m.planes[i].reserved[1],
+ b->m.planes[i].length);
+ if (!binfo) {
+ pr_err("This buffer is not registered: %d, %d, %d\n",
+ b->m.planes[i].reserved[0],
+ b->m.planes[i].reserved[1],
+ b->m.planes[i].length);
+ rc = -EINVAL;
+ goto err_invalid_buff;
+ }
+ b->m.planes[i].m.userptr = binfo->handle->device_addr;
+ pr_debug("Queueing device address = %ld\n",
+ binfo->handle->device_addr);
+ }
+ rc = msm_vidc_qbuf(&v4l2_inst->vidc_inst, b);
+err_invalid_buff:
+ return rc;
+}
+
+int msm_v4l2_dqbuf(struct file *file, void *fh,
+ struct v4l2_buffer *b)
+{
+ struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+ return msm_vidc_dqbuf((void *)vidc_inst, b);
+}
+
+int msm_v4l2_streamon(struct file *file, void *fh,
+ enum v4l2_buf_type i)
+{
+ struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+ return msm_vidc_streamon((void *)vidc_inst, i);
+}
+
+int msm_v4l2_streamoff(struct file *file, void *fh,
+ enum v4l2_buf_type i)
+{
+ struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+ return msm_vidc_streamoff((void *)vidc_inst, i);
+}
+
+static int msm_v4l2_subscribe_event(struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ int rc = 0;
+ if (sub->type == V4L2_EVENT_ALL)
+ sub->type = V4L2_EVENT_PRIVATE_START + V4L2_EVENT_VIDC_BASE;
+ rc = v4l2_event_subscribe(fh, sub);
+ return rc;
+}
+
+static int msm_v4l2_unsubscribe_event(struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ int rc = 0;
+ rc = v4l2_event_unsubscribe(fh, sub);
+ return rc;
+}
+
+static int msm_v4l2_decoder_cmd(struct file *file, void *fh,
+ struct v4l2_decoder_cmd *dec)
+{
+ struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+ return msm_vidc_decoder_cmd((void *)vidc_inst, dec);
+}
+static const struct v4l2_ioctl_ops msm_v4l2_ioctl_ops = {
+ .vidioc_querycap = msm_v4l2_querycap,
+ .vidioc_enum_fmt_vid_cap_mplane = msm_v4l2_enum_fmt,
+ .vidioc_enum_fmt_vid_out_mplane = msm_v4l2_enum_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = msm_v4l2_s_fmt,
+ .vidioc_s_fmt_vid_out_mplane = msm_v4l2_s_fmt,
+ .vidioc_g_fmt_vid_cap_mplane = msm_v4l2_g_fmt,
+ .vidioc_g_fmt_vid_out_mplane = msm_v4l2_g_fmt,
+ .vidioc_reqbufs = msm_v4l2_reqbufs,
+ .vidioc_prepare_buf = msm_v4l2_prepare_buf,
+ .vidioc_qbuf = msm_v4l2_qbuf,
+ .vidioc_dqbuf = msm_v4l2_dqbuf,
+ .vidioc_streamon = msm_v4l2_streamon,
+ .vidioc_streamoff = msm_v4l2_streamoff,
+ .vidioc_s_ctrl = msm_v4l2_s_ctrl,
+ .vidioc_g_ctrl = msm_v4l2_g_ctrl,
+ .vidioc_subscribe_event = msm_v4l2_subscribe_event,
+ .vidioc_unsubscribe_event = msm_v4l2_unsubscribe_event,
+ .vidioc_decoder_cmd = msm_v4l2_decoder_cmd,
+};
+
+static const struct v4l2_ioctl_ops msm_v4l2_enc_ioctl_ops = {
+};
+
+static unsigned int msm_v4l2_poll(struct file *filp,
+ struct poll_table_struct *pt)
+{
+ struct msm_vidc_inst *vidc_inst = get_vidc_inst(filp, NULL);
+ return msm_vidc_poll((void *)vidc_inst, filp, pt);
+}
+
+static const struct v4l2_file_operations msm_v4l2_vidc_fops = {
+ .owner = THIS_MODULE,
+ .open = msm_v4l2_open,
+ .release = msm_v4l2_close,
+ .ioctl = video_ioctl2,
+ .poll = msm_v4l2_poll,
+};
+
+void msm_vidc_release_video_device(struct video_device *pvdev)
+{
+}
+
+static int msm_vidc_initialize_core(struct platform_device *pdev,
+ struct msm_vidc_core *core)
+{
+ struct resource *res;
+ int i = 0;
+ if (!core)
+ return -EINVAL;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ pr_err("Failed to get IORESOURCE_MEM\n");
+ return -ENODEV;
+ }
+ core->register_base = res->start;
+ core->register_size = resource_size(res);
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ pr_err("Failed to get IORESOURCE_IRQ\n");
+ return -ENODEV;
+ }
+ core->irq = res->start;
+ INIT_LIST_HEAD(&core->instances);
+ mutex_init(&core->sync_lock);
+ spin_lock_init(&core->lock);
+ core->base_addr = 0x34f00000;
+ core->state = VIDC_CORE_UNINIT;
+ for (i = SYS_MSG_INDEX(SYS_MSG_START);
+ i <= SYS_MSG_INDEX(SYS_MSG_END); i++) {
+ init_completion(&core->completions[i]);
+ }
+ return 0;
+}
+
+static int __devinit msm_vidc_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ struct msm_vidc_core *core;
+ unsigned long flags;
+ char debugfs_name[MAX_DEBUGFS_NAME];
+
+ core = kzalloc(sizeof(*core), GFP_KERNEL);
+ if (!core || !vidc_driver) {
+ pr_err("Failed to allocate memory for device core\n");
+ rc = -ENOMEM;
+ goto err_no_mem;
+ }
+ rc = msm_vidc_initialize_core(pdev, core);
+ if (rc) {
+ pr_err("Failed to init core\n");
+ goto err_v4l2_register;
+ }
+ rc = v4l2_device_register(&pdev->dev, &core->v4l2_dev);
+ if (rc) {
+ pr_err("Failed to register v4l2 device\n");
+ goto err_v4l2_register;
+ }
+ core->vdev[MSM_VIDC_DECODER].vdev.release =
+ msm_vidc_release_video_device;
+ core->vdev[MSM_VIDC_DECODER].vdev.fops = &msm_v4l2_vidc_fops;
+ core->vdev[MSM_VIDC_DECODER].vdev.ioctl_ops = &msm_v4l2_ioctl_ops;
+ core->vdev[MSM_VIDC_DECODER].type = MSM_VIDC_DECODER;
+ rc = video_register_device(&core->vdev[MSM_VIDC_DECODER].vdev,
+ VFL_TYPE_GRABBER, BASE_DEVICE_NUMBER);
+ if (rc) {
+ pr_err("Failed to register video decoder device");
+ goto err_dec_register;
+ }
+ video_set_drvdata(&core->vdev[MSM_VIDC_DECODER].vdev, core);
+
+ core->vdev[MSM_VIDC_ENCODER].vdev.release =
+ msm_vidc_release_video_device;
+ core->vdev[MSM_VIDC_ENCODER].vdev.fops = &msm_v4l2_vidc_fops;
+ core->vdev[MSM_VIDC_ENCODER].vdev.ioctl_ops = &msm_v4l2_ioctl_ops;
+ core->vdev[MSM_VIDC_ENCODER].type = MSM_VIDC_ENCODER;
+ rc = video_register_device(&core->vdev[MSM_VIDC_ENCODER].vdev,
+ VFL_TYPE_GRABBER, BASE_DEVICE_NUMBER + 1);
+ if (rc) {
+ pr_err("Failed to register video encoder device");
+ goto err_enc_register;
+ }
+ video_set_drvdata(&core->vdev[MSM_VIDC_ENCODER].vdev, core);
+ core->device = vidc_hal_add_device(core->id, core->base_addr,
+ core->register_base, core->register_size, core->irq,
+ &handle_cmd_response);
+ if (!core->device) {
+ pr_err("Failed to create interrupt handler");
+ goto err_cores_exceeded;
+ }
+
+ spin_lock_irqsave(&vidc_driver->lock, flags);
+ if (vidc_driver->num_cores + 1 > MSM_VIDC_CORES_MAX) {
+ spin_unlock_irqrestore(&vidc_driver->lock, flags);
+ pr_err("Maximum cores already exist, core_no = %d\n",
+ vidc_driver->num_cores);
+ goto err_cores_exceeded;
+ }
+
+ core->id = vidc_driver->num_cores++;
+ list_add_tail(&core->list, &vidc_driver->cores);
+ spin_unlock_irqrestore(&vidc_driver->lock, flags);
+ snprintf(debugfs_name, MAX_DEBUGFS_NAME, "core%d", core->id);
+ core->debugfs_root = debugfs_create_dir(debugfs_name,
+ vidc_driver->debugfs_root);
+ pdev->dev.platform_data = core;
+ return rc;
+
+err_cores_exceeded:
+ video_unregister_device(&core->vdev[MSM_VIDC_ENCODER].vdev);
+err_enc_register:
+ video_unregister_device(&core->vdev[MSM_VIDC_DECODER].vdev);
+err_dec_register:
+ v4l2_device_unregister(&core->v4l2_dev);
+err_v4l2_register:
+ kfree(core);
+err_no_mem:
+ return rc;
+}
+
+static int __devexit msm_vidc_remove(struct platform_device *pdev)
+{
+ int rc = 0;
+ struct msm_vidc_core *core = pdev->dev.platform_data;
+ vidc_hal_delete_device(core->device);
+ video_unregister_device(&core->vdev[MSM_VIDC_ENCODER].vdev);
+ video_unregister_device(&core->vdev[MSM_VIDC_DECODER].vdev);
+ v4l2_device_unregister(&core->v4l2_dev);
+ kfree(core);
+ return rc;
+}
+static const struct of_device_id msm_vidc_dt_match[] = {
+ {.compatible = "qcom,msm-vidc"},
+};
+
+MODULE_DEVICE_TABLE(of, msm_vidc_dt_match);
+
+static struct platform_driver msm_vidc_driver = {
+ .probe = msm_vidc_probe,
+ .remove = msm_vidc_remove,
+ .driver = {
+ .name = "msm_vidc",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_vidc_dt_match,
+ },
+};
+
+static int __init msm_vidc_init(void)
+{
+ int rc = 0;
+ vidc_driver = kzalloc(sizeof(*vidc_driver),
+ GFP_KERNEL);
+ if (!vidc_driver) {
+ pr_err("Failed to allocate memroy for msm_vidc_drv\n");
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&vidc_driver->cores);
+ spin_lock_init(&vidc_driver->lock);
+ vidc_driver->debugfs_root = debugfs_create_dir("msm_vidc", NULL);
+ if (!vidc_driver->debugfs_root)
+ pr_err("Failed to create debugfs for msm_vidc\n");
+
+ rc = platform_driver_register(&msm_vidc_driver);
+ if (rc) {
+ pr_err("Failed to register platform driver\n");
+ kfree(vidc_driver);
+ vidc_driver = NULL;
+ }
+
+ return rc;
+}
+
+static void __exit msm_vidc_exit(void)
+{
+ platform_driver_unregister(&msm_vidc_driver);
+ debugfs_remove_recursive(vidc_driver->debugfs_root);
+ kfree(vidc_driver);
+ vidc_driver = NULL;
+}
+
+module_init(msm_vidc_init);
+module_exit(msm_vidc_exit);
diff --git a/drivers/media/video/msm_vidc/msm_vdec.c b/drivers/media/video/msm_vidc/msm_vdec.c
new file mode 100644
index 0000000..3011a2b
--- /dev/null
+++ b/drivers/media/video/msm_vidc/msm_vdec.c
@@ -0,0 +1,875 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/slab.h>
+
+#include "msm_vidc_internal.h"
+#include "msm_vidc_common.h"
+#include "vidc_hal_api.h"
+#include "msm_smem.h"
+
+#define MSM_VDEC_DVC_NAME "msm_vdec_8974"
+#define MAX_PLANES 1
+#define DEFAULT_HEIGHT 720
+#define DEFAULT_WIDTH 1280
+#define MIN_NUM_OUTPUT_BUFFERS 2
+#define MAX_NUM_OUTPUT_BUFFERS 6
+
+static const char *const mpeg_video_vidc_divx_format[] = {
+ "DIVX Format 4",
+ "DIVX Format 5",
+ "DIVX Format 6",
+ NULL
+};
+static const char *mpeg_video_stream_format[] = {
+ "NAL Format Start Codes",
+ "NAL Format One NAL Per Buffer",
+ "NAL Format One Byte Length",
+ "NAL Format Two Byte Length",
+ "NAL Format Four Byte Length",
+ NULL
+};
+static const char *const mpeg_video_output_order[] = {
+ "Display Order",
+ "Decode Order",
+ NULL
+};
+static const struct msm_vidc_ctrl msm_vdec_ctrls[] = {
+ {
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_STREAM_FORMAT,
+ .name = "NAL Format",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDC_VIDEO_NAL_FORMAT_STARTCODES,
+ .maximum = V4L2_MPEG_VIDC_VIDEO_NAL_FORMAT_FOUR_BYTE_LENGTH,
+ .default_value = V4L2_MPEG_VIDC_VIDEO_NAL_FORMAT_STARTCODES,
+ .menu_skip_mask = ~(
+ (1 << V4L2_MPEG_VIDC_VIDEO_NAL_FORMAT_STARTCODES) |
+ (1 << V4L2_MPEG_VIDC_VIDEO_NAL_FORMAT_ONE_NAL_PER_BUFFER) |
+ (1 << V4L2_MPEG_VIDC_VIDEO_NAL_FORMAT_ONE_BYTE_LENGTH) |
+ (1 << V4L2_MPEG_VIDC_VIDEO_NAL_FORMAT_TWO_BYTE_LENGTH) |
+ (1 << V4L2_MPEG_VIDC_VIDEO_NAL_FORMAT_FOUR_BYTE_LENGTH)
+ ),
+ .qmenu = mpeg_video_stream_format,
+ .step = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_OUTPUT_ORDER,
+ .name = "Output Order",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDC_VIDEO_OUTPUT_ORDER_DISPLAY,
+ .maximum = V4L2_MPEG_VIDC_VIDEO_OUTPUT_ORDER_DECODE,
+ .default_value = V4L2_MPEG_VIDC_VIDEO_OUTPUT_ORDER_DISPLAY,
+ .menu_skip_mask = ~(
+ (1 << V4L2_MPEG_VIDC_VIDEO_OUTPUT_ORDER_DISPLAY) |
+ (1 << V4L2_MPEG_VIDC_VIDEO_OUTPUT_ORDER_DECODE)
+ ),
+ .qmenu = mpeg_video_output_order,
+ .step = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_ENABLE_PICTURE_TYPE,
+ .name = "Picture Type Decoding",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 1,
+ .maximum = 15,
+ .default_value = 15,
+ .step = 1,
+ .menu_skip_mask = 0,
+ .qmenu = NULL,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_KEEP_ASPECT_RATIO,
+ .name = "Keep Aspect Ratio",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = 0,
+ .maximum = 1,
+ .default_value = 0,
+ .step = 1,
+ .menu_skip_mask = 0,
+ .qmenu = NULL,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_POST_LOOP_DEBLOCKER_MODE,
+ .name = "Deblocker Mode",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = 0,
+ .maximum = 1,
+ .default_value = 0,
+ .step = 1,
+ .menu_skip_mask = 0,
+ .qmenu = NULL,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_DIVX_FORMAT,
+ .name = "Divx Format",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDC_VIDEO_DIVX_FORMAT_4,
+ .maximum = V4L2_MPEG_VIDC_VIDEO_DIVX_FORMAT_6,
+ .default_value = V4L2_MPEG_VIDC_VIDEO_DIVX_FORMAT_4,
+ .menu_skip_mask = ~(
+ (1 << V4L2_MPEG_VIDC_VIDEO_DIVX_FORMAT_4) |
+ (1 << V4L2_MPEG_VIDC_VIDEO_DIVX_FORMAT_5) |
+ (1 << V4L2_MPEG_VIDC_VIDEO_DIVX_FORMAT_6)
+ ),
+ .qmenu = mpeg_video_vidc_divx_format,
+ .step = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_MB_ERROR_MAP_REPORTING,
+ .name = "MB Error Map Reporting",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = 0,
+ .maximum = 1,
+ .default_value = 0,
+ .step = 1,
+ .menu_skip_mask = 0,
+ .qmenu = NULL,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_CONTINUE_DATA_TRANSFER,
+ .name = "control",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = 0,
+ .maximum = 1,
+ .default_value = 0,
+ .step = 1,
+ .menu_skip_mask = 0,
+ .qmenu = NULL,
+ },
+};
+
+#define NUM_CTRLS ARRAY_SIZE(msm_vdec_ctrls)
+
+static u32 get_frame_size_nv12(int plane,
+ u32 height, u32 width)
+{
+ int stride = (width + 31) & (~31);
+ return height * stride * 3/2;
+}
+static u32 get_frame_size_nv21(int plane,
+ u32 height, u32 width)
+{
+ return height * width * 2;
+}
+
+static u32 get_frame_size_compressed(int plane,
+ u32 height, u32 width)
+{
+ return 0x500000;
+}
+
+static const struct msm_vidc_format vdec_formats[] = {
+ {
+ .name = "YCbCr Semiplanar 4:2:0",
+ .description = "Y/CbCr 4:2:0",
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .num_planes = 1,
+ .get_frame_size = get_frame_size_nv12,
+ .type = CAPTURE_PORT,
+ },
+ {
+ .name = "Mpeg4",
+ .description = "Mpeg4 compressed format",
+ .fourcc = V4L2_PIX_FMT_MPEG4,
+ .num_planes = 1,
+ .get_frame_size = get_frame_size_compressed,
+ .type = OUTPUT_PORT,
+ },
+ {
+ .name = "Mpeg2",
+ .description = "Mpeg2 compressed format",
+ .fourcc = V4L2_PIX_FMT_MPEG2,
+ .num_planes = 1,
+ .get_frame_size = get_frame_size_compressed,
+ .type = OUTPUT_PORT,
+ },
+ {
+ .name = "H263",
+ .description = "H263 compressed format",
+ .fourcc = V4L2_PIX_FMT_H263,
+ .num_planes = 1,
+ .get_frame_size = get_frame_size_compressed,
+ .type = OUTPUT_PORT,
+ },
+ {
+ .name = "H264",
+ .description = "H264 compressed format",
+ .fourcc = V4L2_PIX_FMT_H264,
+ .num_planes = 1,
+ .get_frame_size = get_frame_size_compressed,
+ .type = OUTPUT_PORT,
+ },
+ {
+ .name = "YCrCb Semiplanar 4:2:0",
+ .description = "Y/CrCb 4:2:0",
+ .fourcc = V4L2_PIX_FMT_NV21,
+ .num_planes = 1,
+ .get_frame_size = get_frame_size_nv21,
+ .type = CAPTURE_PORT,
+ },
+};
+
+int msm_vdec_streamon(struct msm_vidc_inst *inst, enum v4l2_buf_type i)
+{
+ int rc = 0;
+ struct vb2_queue *q;
+ q = msm_comm_get_vb2q(inst, i);
+ if (!q) {
+ pr_err("Failed to find buffer queue for type = %d\n", i);
+ return -EINVAL;
+ }
+ pr_debug("Calling streamon\n");
+ rc = vb2_streamon(q, i);
+ if (rc)
+ pr_err("streamon failed on port: %d\n", i);
+ return rc;
+}
+
+int msm_vdec_streamoff(struct msm_vidc_inst *inst, enum v4l2_buf_type i)
+{
+ int rc = 0;
+ struct vb2_queue *q;
+
+ q = msm_comm_get_vb2q(inst, i);
+ if (!q) {
+ pr_err("Failed to find buffer queue for type = %d\n", i);
+ return -EINVAL;
+ }
+ pr_debug("Calling streamoff\n");
+ rc = vb2_streamoff(q, i);
+ if (rc)
+ pr_err("streamoff failed on port: %d\n", i);
+ return rc;
+}
+
+int msm_vdec_prepare_buf(struct msm_vidc_inst *inst,
+ struct v4l2_buffer *b)
+{
+ int rc = 0;
+ int i;
+ struct vidc_buffer_addr_info buffer_info;
+ switch (b->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ for (i = 0; i < b->length; i++) {
+ pr_err("device_addr = %ld, size = %d\n",
+ b->m.planes[i].m.userptr,
+ b->m.planes[i].length);
+ buffer_info.buffer_size = b->m.planes[i].length;
+ buffer_info.buffer_type = HAL_BUFFER_OUTPUT;
+ buffer_info.num_buffers = 1;
+ buffer_info.align_device_addr =
+ b->m.planes[i].m.userptr;
+ buffer_info.extradata_size = 0;
+ buffer_info.extradata_addr = 0;
+ rc = vidc_hal_session_set_buffers((void *)inst->session,
+ &buffer_info);
+ if (rc) {
+ pr_err("vidc_hal_session_set_buffers failed");
+ break;
+ }
+ }
+ break;
+ default:
+ pr_err("Buffer type not recognized: %d\n", b->type);
+ break;
+ }
+ return rc;
+}
+
+int msm_vdec_release_buf(struct msm_vidc_inst *inst,
+ struct v4l2_buffer *b)
+{
+ int rc = 0;
+ int i;
+ struct vidc_buffer_addr_info buffer_info;
+
+ switch (b->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ for (i = 0; i < b->length; i++) {
+ pr_debug("Release device_addr = %ld, size = %d\n",
+ b->m.planes[i].m.userptr,
+ b->m.planes[i].length);
+ buffer_info.buffer_size = b->m.planes[i].length;
+ buffer_info.buffer_type = HAL_BUFFER_OUTPUT;
+ buffer_info.num_buffers = 1;
+ buffer_info.align_device_addr =
+ b->m.planes[i].m.userptr;
+ buffer_info.extradata_addr = 0;
+ rc = vidc_hal_session_release_buffers(
+ (void *)inst->session, &buffer_info);
+ if (rc)
+ pr_err("vidc_hal_session_release_buffers failed");
+ }
+ break;
+ default:
+ pr_err("Buffer type not recognized: %d\n", b->type);
+ break;
+ }
+ return rc;
+}
+
+int msm_vdec_qbuf(struct msm_vidc_inst *inst, struct v4l2_buffer *b)
+{
+ struct vb2_queue *q = NULL;
+ int rc = 0;
+ q = msm_comm_get_vb2q(inst, b->type);
+ if (!q) {
+ pr_err("Failed to find buffer queue for type = %d\n", b->type);
+ return -EINVAL;
+ }
+ rc = vb2_qbuf(q, b);
+ if (rc)
+ pr_err("Failed to qbuf, %d\n", rc);
+ return rc;
+}
+int msm_vdec_dqbuf(struct msm_vidc_inst *inst, struct v4l2_buffer *b)
+{
+ struct vb2_queue *q = NULL;
+ int rc = 0;
+ q = msm_comm_get_vb2q(inst, b->type);
+ if (!q) {
+ pr_err("Failed to find buffer queue for type = %d\n", b->type);
+ return -EINVAL;
+ }
+ rc = vb2_dqbuf(q, b, true);
+ if (rc)
+ pr_err("Failed to dqbuf, %d\n", rc);
+ return rc;
+}
+
+int msm_vdec_reqbufs(struct msm_vidc_inst *inst, struct v4l2_requestbuffers *b)
+{
+ struct vb2_queue *q = NULL;
+ int rc = 0;
+ if (!inst || !b) {
+ pr_err("Invalid input, inst = %p, buffer = %p\n", inst, b);
+ return -EINVAL;
+ }
+ q = msm_comm_get_vb2q(inst, b->type);
+ if (!q) {
+ pr_err("Failed to find buffer queue for type = %d\n", b->type);
+ return -EINVAL;
+ }
+
+ rc = vb2_reqbufs(q, b);
+ if (rc)
+ pr_err("Failed to get reqbufs, %d\n", rc);
+ return rc;
+}
+
+int msm_vdec_g_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
+{
+ const struct msm_vidc_format *fmt = NULL;
+ int rc = 0;
+ int i;
+ if (!inst || !f) {
+ pr_err("Invalid input, inst = %p, format = %p\n", inst, f);
+ return -EINVAL;
+ }
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ fmt = inst->fmts[CAPTURE_PORT];
+ else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ fmt = inst->fmts[OUTPUT_PORT];
+
+ if (fmt) {
+ f->fmt.pix_mp.pixelformat = fmt->fourcc;
+ if (inst->in_reconfig == true) {
+ inst->height = inst->reconfig_height;
+ inst->width = inst->reconfig_width;
+ }
+ f->fmt.pix_mp.height = inst->height;
+ f->fmt.pix_mp.width = inst->width;
+ for (i = 0; i < fmt->num_planes; ++i) {
+ f->fmt.pix_mp.plane_fmt[i].sizeimage =
+ fmt->get_frame_size(i, inst->height, inst->width);
+ }
+ } else {
+ pr_err("Buf type not recognized, type = %d\n",
+ f->type);
+ rc = -EINVAL;
+ }
+ return rc;
+}
+
+int msm_vdec_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
+{
+ const struct msm_vidc_format *fmt = NULL;
+ int rc = 0;
+ int i;
+ if (!inst || !f) {
+ pr_err("Invalid input, inst = %p, format = %p\n", inst, f);
+ return -EINVAL;
+ }
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ inst->width = f->fmt.pix_mp.width;
+ inst->height = f->fmt.pix_mp.height;
+ fmt = msm_comm_get_pixel_fmt_fourcc(vdec_formats,
+ ARRAY_SIZE(vdec_formats), f->fmt.pix_mp.pixelformat,
+ CAPTURE_PORT);
+ if (fmt && fmt->type != CAPTURE_PORT) {
+ pr_err("Format: %d not supported on CAPTURE port\n",
+ f->fmt.pix_mp.pixelformat);
+ rc = -EINVAL;
+ goto err_invalid_fmt;
+ }
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ fmt = msm_comm_get_pixel_fmt_fourcc(vdec_formats,
+ ARRAY_SIZE(vdec_formats), f->fmt.pix_mp.pixelformat,
+ OUTPUT_PORT);
+ if (fmt && fmt->type != OUTPUT_PORT) {
+ pr_err("Format: %d not supported on OUTPUT port\n",
+ f->fmt.pix_mp.pixelformat);
+ rc = -EINVAL;
+ goto err_invalid_fmt;
+ }
+ }
+
+ if (fmt) {
+ for (i = 0; i < fmt->num_planes; ++i) {
+ f->fmt.pix_mp.plane_fmt[i].sizeimage =
+ fmt->get_frame_size(i, f->fmt.pix_mp.height,
+ f->fmt.pix_mp.width);
+ }
+ inst->fmts[fmt->type] = fmt;
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ rc = msm_comm_try_state(inst, MSM_VIDC_OPEN);
+ if (rc) {
+ pr_err("Failed to open instance\n");
+ goto err_invalid_fmt;
+ }
+ }
+ } else {
+ pr_err("Buf type not recognized, type = %d\n",
+ f->type);
+ rc = -EINVAL;
+ }
+err_invalid_fmt:
+ return rc;
+}
+
+int msm_vdec_querycap(struct msm_vidc_inst *inst, struct v4l2_capability *cap)
+{
+ if (!inst || !cap) {
+ pr_err("Invalid input, inst = %p, cap = %p\n", inst, cap);
+ return -EINVAL;
+ }
+ strlcpy(cap->driver, MSM_VIDC_DRV_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, MSM_VDEC_DVC_NAME, sizeof(cap->card));
+ cap->bus_info[0] = 0;
+ cap->version = MSM_VIDC_VERSION;
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE_MPLANE |
+ V4L2_CAP_VIDEO_OUTPUT_MPLANE |
+ V4L2_CAP_STREAMING;
+ memset(cap->reserved, 0, sizeof(cap->reserved));
+ return 0;
+}
+
+int msm_vdec_enum_fmt(struct msm_vidc_inst *inst, struct v4l2_fmtdesc *f)
+{
+ const struct msm_vidc_format *fmt = NULL;
+ int rc = 0;
+ if (!inst || !f) {
+ pr_err("Invalid input, inst = %p, f = %p\n", inst, f);
+ return -EINVAL;
+ }
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ fmt = msm_comm_get_pixel_fmt_index(vdec_formats,
+ ARRAY_SIZE(vdec_formats), f->index, CAPTURE_PORT);
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ fmt = msm_comm_get_pixel_fmt_index(vdec_formats,
+ ARRAY_SIZE(vdec_formats), f->index, OUTPUT_PORT);
+ f->flags = V4L2_FMT_FLAG_COMPRESSED;
+ }
+
+ memset(f->reserved, 0 , sizeof(f->reserved));
+ if (fmt) {
+ strlcpy(f->description, fmt->description,
+ sizeof(f->description));
+ f->pixelformat = fmt->fourcc;
+ } else {
+ pr_err("No more formats found\n");
+ rc = -EINVAL;
+ }
+ return rc;
+}
+
+static int msm_vdec_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
+ unsigned int *num_planes, unsigned long sizes[],
+ void *alloc_ctxs[])
+{
+ int i, rc = 0;
+ struct msm_vidc_inst *inst;
+ struct hal_frame_size frame_sz;
+ unsigned long flags;
+ if (!q || !q->drv_priv) {
+ pr_err("Invalid input, q = %p\n", q);
+ return -EINVAL;
+ }
+ inst = q->drv_priv;
+ switch (q->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ *num_planes = 1;
+ if (*num_buffers < MIN_NUM_OUTPUT_BUFFERS ||
+ *num_buffers > MAX_NUM_OUTPUT_BUFFERS)
+ *num_buffers = MIN_NUM_OUTPUT_BUFFERS;
+ for (i = 0; i < *num_planes; i++) {
+ sizes[i] = inst->fmts[OUTPUT_PORT]->get_frame_size(
+ i, inst->height, inst->width);
+ }
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ pr_debug("Getting bufreqs on capture plane\n");
+ rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE);
+ if (rc) {
+ pr_err("Failed to open instance\n");
+ break;
+ }
+ frame_sz.buffer_type = HAL_BUFFER_OUTPUT;
+ frame_sz.width = inst->width;
+ frame_sz.height = inst->height;
+ pr_debug("width = %d, height = %d\n",
+ frame_sz.width, frame_sz.height);
+ rc = vidc_hal_session_set_property((void *)inst->session,
+ HAL_PARAM_FRAME_SIZE, &frame_sz);
+ if (rc) {
+ pr_err("Failed to set hal property for framesize\n");
+ break;
+ }
+ rc = msm_comm_try_get_bufreqs(inst);
+ if (rc) {
+ pr_err("Failed to get buffer requirements: %d\n", rc);
+ break;
+ }
+ *num_planes = 1;
+ spin_lock_irqsave(&inst->lock, flags);
+ *num_buffers = inst->buff_req.buffer[1].buffer_count_actual;
+ spin_unlock_irqrestore(&inst->lock, flags);
+ pr_debug("size = %d, alignment = %d\n",
+ inst->buff_req.buffer[1].buffer_size,
+ inst->buff_req.buffer[1].buffer_alignment);
+ for (i = 0; i < *num_planes; i++) {
+ sizes[i] = inst->fmts[CAPTURE_PORT]->get_frame_size(
+ i, inst->height, inst->width);
+ }
+
+ break;
+ default:
+ pr_err("Invalid q type = %d\n", q->type);
+ rc = -EINVAL;
+ break;
+ }
+ return rc;
+}
+
+static inline int start_streaming(struct msm_vidc_inst *inst)
+{
+ int rc = 0;
+ unsigned long flags;
+ struct vb2_buf_entry *temp;
+ struct list_head *ptr, *next;
+ struct v4l2_control control;
+ struct hal_nal_stream_format_supported stream_format;
+ struct hal_enable_picture enable_picture;
+ struct hal_enable hal_property;
+ u32 control_idx = 0;
+ enum hal_property property_id = 0;
+ u32 property_val = 0;
+ void *pdata;
+ rc = msm_comm_set_scratch_buffers(inst);
+ if (rc) {
+ pr_err("Failed to set scratch buffers: %d\n", rc);
+ goto fail_start;
+ }
+ for (; control_idx < NUM_CTRLS; control_idx++) {
+ control.id = msm_vdec_ctrls[control_idx].id;
+ rc = v4l2_g_ctrl(&inst->ctrl_handler, &control);
+ if (rc) {
+ pr_err("Failed to get control value for ID=%d\n",
+ msm_vdec_ctrls[control_idx].id);
+ } else {
+ property_id = 0;
+ switch (control.id) {
+ case V4L2_CID_MPEG_VIDC_VIDEO_STREAM_FORMAT:
+ property_id =
+ HAL_PARAM_NAL_STREAM_FORMAT_SELECT;
+ stream_format.nal_stream_format_supported =
+ (0x00000001 << control.value);
+ pdata = &stream_format;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_OUTPUT_ORDER:
+ property_id = HAL_PARAM_VDEC_OUTPUT_ORDER;
+ property_val = control.value;
+ pdata = &property_val;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_ENABLE_PICTURE_TYPE:
+ property_id =
+ HAL_PARAM_VDEC_PICTURE_TYPE_DECODE;
+ enable_picture.picture_type = control.value;
+ pdata = &enable_picture;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_KEEP_ASPECT_RATIO:
+ property_id =
+ HAL_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO;
+ hal_property.enable = control.value;
+ pdata = &hal_property;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_POST_LOOP_DEBLOCKER_MODE:
+ property_id =
+ HAL_CONFIG_VDEC_POST_LOOP_DEBLOCKER;
+ hal_property.enable = control.value;
+ pdata = &hal_property;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_DIVX_FORMAT:
+ property_id = HAL_PARAM_DIVX_FORMAT;
+ property_val = control.value;
+ pdata = &property_val;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_MB_ERROR_MAP_REPORTING:
+ property_id =
+ HAL_CONFIG_VDEC_MB_ERROR_MAP_REPORTING;
+ hal_property.enable = control.value;
+ pdata = &hal_property;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_CONTINUE_DATA_TRANSFER:
+ property_id =
+ HAL_PARAM_VDEC_CONTINUE_DATA_TRANSFER;
+ hal_property.enable = control.value;
+ pdata = &hal_property;
+ break;
+ default:
+ break;
+ }
+ if (property_id) {
+ pr_err("Control: HAL property=%d,ctrl_id=%d,ctrl_value=%d\n",
+ property_id,
+ msm_vdec_ctrls[control_idx].id,
+ control.value);
+ rc = vidc_hal_session_set_property((void *)
+ inst->session, property_id,
+ pdata);
+ }
+ if (rc)
+ pr_err("Failed to set hal property for framesize\n");
+ }
+ }
+
+ rc = msm_comm_try_state(inst, MSM_VIDC_START_DONE);
+ if (rc) {
+ pr_err("Failed to move inst: %p to start done state\n",
+ inst);
+ goto fail_start;
+ }
+ spin_lock_irqsave(&inst->lock, flags);
+ if (!list_empty(&inst->pendingq)) {
+ list_for_each_safe(ptr, next, &inst->pendingq) {
+ temp = list_entry(ptr, struct vb2_buf_entry, list);
+ rc = msm_comm_qbuf(temp->vb);
+ if (rc) {
+ pr_err("Failed to qbuf to hardware\n");
+ break;
+ }
+ list_del(&temp->list);
+ kfree(temp);
+ }
+ }
+ spin_unlock_irqrestore(&inst->lock, flags);
+ return rc;
+fail_start:
+ return rc;
+}
+
+static int msm_vdec_start_streaming(struct vb2_queue *q)
+{
+ struct msm_vidc_inst *inst;
+ int rc = 0;
+ if (!q || !q->drv_priv) {
+ pr_err("Invalid input, q = %p\n", q);
+ return -EINVAL;
+ }
+ inst = q->drv_priv;
+ pr_debug("Streamon called on: %d capability\n", q->type);
+ switch (q->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ if (inst->vb2_bufq[CAPTURE_PORT].streaming)
+ rc = start_streaming(inst);
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ inst->in_reconfig = false;
+ if (inst->vb2_bufq[OUTPUT_PORT].streaming)
+ rc = start_streaming(inst);
+ break;
+ default:
+ pr_err("Q-type is not supported: %d\n", q->type);
+ rc = -EINVAL;
+ break;
+ }
+ return rc;
+}
+
+static int msm_vdec_stop_streaming(struct vb2_queue *q)
+{
+ struct msm_vidc_inst *inst;
+ int rc = 0;
+ if (!q || !q->drv_priv) {
+ pr_err("Invalid input, q = %p\n", q);
+ return -EINVAL;
+ }
+ inst = q->drv_priv;
+ pr_debug("Streamoff called on: %d capability\n", q->type);
+ switch (q->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ if (!inst->vb2_bufq[CAPTURE_PORT].streaming)
+ rc = msm_comm_try_state(inst, MSM_VIDC_CLOSE_DONE);
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ if (!inst->vb2_bufq[OUTPUT_PORT].streaming)
+ rc = msm_comm_try_state(inst, MSM_VIDC_CLOSE_DONE);
+ break;
+ default:
+ pr_err("Q-type is not supported: %d\n", q->type);
+ rc = -EINVAL;
+ break;
+ }
+ if (rc)
+ pr_err("Failed to move inst: %p, cap = %d to state: %d\n",
+ inst, q->type, MSM_VIDC_CLOSE_DONE);
+ return rc;
+}
+
+static void msm_vdec_buf_queue(struct vb2_buffer *vb)
+{
+ int rc;
+ rc = msm_comm_qbuf(vb);
+ if (rc)
+ pr_err("Failed to queue buffer: %d\n", rc);
+}
+
+static const struct vb2_ops msm_vdec_vb2q_ops = {
+ .queue_setup = msm_vdec_queue_setup,
+ .start_streaming = msm_vdec_start_streaming,
+ .buf_queue = msm_vdec_buf_queue,
+ .stop_streaming = msm_vdec_stop_streaming,
+};
+
+const struct vb2_ops *msm_vdec_get_vb2q_ops(void)
+{
+ return &msm_vdec_vb2q_ops;
+}
+
+int msm_vdec_inst_init(struct msm_vidc_inst *inst)
+{
+ int rc = 0;
+ if (!inst) {
+ pr_err("Invalid input = %p\n", inst);
+ return -EINVAL;
+ }
+ inst->fmts[OUTPUT_PORT] = &vdec_formats[1];
+ inst->fmts[CAPTURE_PORT] = &vdec_formats[0];
+ inst->height = DEFAULT_HEIGHT;
+ inst->width = DEFAULT_WIDTH;
+ return rc;
+}
+
+static int msm_vdec_op_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ return 0;
+}
+static int msm_vdec_op_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops msm_vdec_ctrl_ops = {
+
+ .s_ctrl = msm_vdec_op_s_ctrl,
+ .g_volatile_ctrl = msm_vdec_op_g_volatile_ctrl,
+};
+
+const struct v4l2_ctrl_ops *msm_vdec_get_ctrl_ops(void)
+{
+ return &msm_vdec_ctrl_ops;
+}
+
+int msm_vdec_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_control *ctrl)
+{
+ return v4l2_s_ctrl(&inst->ctrl_handler, ctrl);
+}
+int msm_vdec_g_ctrl(struct msm_vidc_inst *inst, struct v4l2_control *ctrl)
+{
+ return v4l2_g_ctrl(&inst->ctrl_handler, ctrl);
+}
+int msm_vdec_ctrl_init(struct msm_vidc_inst *inst)
+{
+ int idx = 0;
+ struct v4l2_ctrl_config ctrl_cfg;
+ int ret_val = 0;
+
+ ret_val = v4l2_ctrl_handler_init(&inst->ctrl_handler, NUM_CTRLS);
+
+ if (ret_val) {
+ pr_err("CTRL ERR: Control handler init failed, %d\n",
+ inst->ctrl_handler.error);
+ return ret_val;
+ }
+
+ for (; idx < NUM_CTRLS; idx++) {
+ if (IS_PRIV_CTRL(msm_vdec_ctrls[idx].id)) {
+ /*add private control*/
+ ctrl_cfg.def = msm_vdec_ctrls[idx].default_value;
+ ctrl_cfg.flags = 0;
+ ctrl_cfg.id = msm_vdec_ctrls[idx].id;
+ /*ctrl_cfg.is_private =
+ * msm_vdec_ctrls[idx].is_private;
+ * ctrl_cfg.is_volatile =
+ * msm_vdec_ctrls[idx].is_volatile;*/
+ ctrl_cfg.max = msm_vdec_ctrls[idx].maximum;
+ ctrl_cfg.min = msm_vdec_ctrls[idx].minimum;
+ ctrl_cfg.menu_skip_mask =
+ msm_vdec_ctrls[idx].menu_skip_mask;
+ ctrl_cfg.name = msm_vdec_ctrls[idx].name;
+ ctrl_cfg.ops = &msm_vdec_ctrl_ops;
+ ctrl_cfg.step = msm_vdec_ctrls[idx].step;
+ ctrl_cfg.type = msm_vdec_ctrls[idx].type;
+ ctrl_cfg.qmenu = msm_vdec_ctrls[idx].qmenu;
+
+ v4l2_ctrl_new_custom(&inst->ctrl_handler,
+ &ctrl_cfg, NULL);
+ } else {
+ if (msm_vdec_ctrls[idx].type == V4L2_CTRL_TYPE_MENU) {
+ v4l2_ctrl_new_std_menu(&inst->ctrl_handler,
+ &msm_vdec_ctrl_ops,
+ msm_vdec_ctrls[idx].id,
+ msm_vdec_ctrls[idx].maximum,
+ msm_vdec_ctrls[idx].menu_skip_mask,
+ msm_vdec_ctrls[idx].default_value);
+ } else {
+ v4l2_ctrl_new_std(&inst->ctrl_handler,
+ &msm_vdec_ctrl_ops,
+ msm_vdec_ctrls[idx].id,
+ msm_vdec_ctrls[idx].minimum,
+ msm_vdec_ctrls[idx].maximum,
+ msm_vdec_ctrls[idx].step,
+ msm_vdec_ctrls[idx].default_value);
+ }
+ }
+ }
+ ret_val = inst->ctrl_handler.error;
+ if (ret_val)
+ pr_err("CTRL ERR: Error adding ctrls to ctrl handle, %d\n",
+ inst->ctrl_handler.error);
+ return ret_val;
+}
diff --git a/drivers/media/video/msm_vidc/msm_vdec.h b/drivers/media/video/msm_vidc/msm_vdec.h
new file mode 100644
index 0000000..1242fb4
--- /dev/null
+++ b/drivers/media/video/msm_vidc/msm_vdec.h
@@ -0,0 +1,36 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _MSM_VDEC_H_
+#define _MSM_VDEC_H_
+
+#include <media/msm_vidc.h>
+#include "msm_vidc_internal.h"
+
+int msm_vdec_inst_init(struct msm_vidc_inst *inst);
+int msm_vdec_ctrl_init(struct msm_vidc_inst *inst);
+int msm_vdec_querycap(void *instance, struct v4l2_capability *cap);
+int msm_vdec_enum_fmt(void *instance, struct v4l2_fmtdesc *f);
+int msm_vdec_s_fmt(void *instance, struct v4l2_format *f);
+int msm_vdec_g_fmt(void *instance, struct v4l2_format *f);
+int msm_vdec_s_ctrl(void *instance, struct v4l2_control *a);
+int msm_vdec_g_ctrl(void *instance, struct v4l2_control *a);
+int msm_vdec_reqbufs(void *instance, struct v4l2_requestbuffers *b);
+int msm_vdec_prepare_buf(struct msm_vidc_inst *inst, struct v4l2_buffer *b);
+int msm_vdec_release_buf(struct msm_vidc_inst *inst, struct v4l2_buffer *b);
+int msm_vdec_qbuf(struct msm_vidc_inst *inst, struct v4l2_buffer *b);
+int msm_vdec_dqbuf(struct msm_vidc_inst *inst, struct v4l2_buffer *b);
+int msm_vdec_streamon(struct msm_vidc_inst *inst, enum v4l2_buf_type i);
+int msm_vdec_streamoff(struct msm_vidc_inst *inst, enum v4l2_buf_type i);
+struct vb2_ops *msm_vdec_get_vb2q_ops(void);
+
+#endif
diff --git a/drivers/media/video/msm_vidc/msm_venc.c b/drivers/media/video/msm_vidc/msm_venc.c
new file mode 100644
index 0000000..5dfea04f
--- /dev/null
+++ b/drivers/media/video/msm_vidc/msm_venc.c
@@ -0,0 +1,1252 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/slab.h>
+
+#include "msm_vidc_internal.h"
+#include "msm_vidc_common.h"
+#include "vidc_hal_api.h"
+#include "msm_smem.h"
+
+#define MSM_VENC_DVC_NAME "msm_venc_8974"
+#define DEFAULT_HEIGHT 720
+#define DEFAULT_WIDTH 1280
+#define MIN_NUM_OUTPUT_BUFFERS 2
+#define MAX_NUM_OUTPUT_BUFFERS 8
+#define MIN_BIT_RATE 64
+#define MAX_BIT_RATE 8000
+#define DEFAULT_BIT_RATE 64
+#define BIT_RATE_STEP 1
+#define MIN_FRAME_RATE 1
+#define MAX_FRAME_RATE 120
+#define DEFAULT_FRAME_RATE 30
+#define MAX_SLICE_BYTE_SIZE 1024
+#define MIN_SLICE_BYTE_SIZE 1024
+#define MAX_SLICE_MB_SIZE 300
+#define I_FRAME_QP 26
+#define P_FRAME_QP 28
+#define B_FRAME_QP 30
+#define MAX_INTRA_REFRESH_MBS 300
+#define L_MODE V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY
+
+static const char *const mpeg_video_rate_control[] = {
+ "No Rate Control",
+ "VBR VFR",
+ "VBR CFR",
+ "CBR VFR",
+ "CBR CFR",
+ NULL
+};
+
+static const char *const mpeg_video_rotation[] = {
+ "No Rotation",
+ "90 Degree Rotation",
+ "180 Degree Rotation",
+ "270 Degree Rotation",
+ NULL
+};
+
+static const char *const h264_video_entropy_cabac_model[] = {
+ "Model 0",
+ "Model 1",
+ "Model 2",
+ NULL
+};
+static const struct msm_vidc_ctrl msm_venc_ctrls[] = {
+ {
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_FRAME_RATE,
+ .name = "Frame Rate",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = MIN_FRAME_RATE,
+ .maximum = MAX_FRAME_RATE,
+ .default_value = DEFAULT_FRAME_RATE,
+ .step = 1,
+ .menu_skip_mask = 0,
+ .qmenu = NULL,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_IDR_PERIOD,
+ .name = "IDR Period",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 10*MAX_FRAME_RATE,
+ .default_value = 0,
+ .step = 1,
+ .menu_skip_mask = 0,
+ .qmenu = NULL,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_NUM_P_FRAMES,
+ .name = "Intra Period for P frames",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 10*DEFAULT_FRAME_RATE,
+ .default_value = 2*DEFAULT_FRAME_RATE-1,
+ .step = 1,
+ .menu_skip_mask = 0,
+ .qmenu = NULL,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_NUM_B_FRAMES,
+ .name = "Intra Period for B frames",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 10*DEFAULT_FRAME_RATE,
+ .default_value = 0,
+ .step = 1,
+ .menu_skip_mask = 0,
+ .qmenu = NULL,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_REQUEST_IFRAME,
+ .name = "Request I Frame",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = 0,
+ .maximum = 1,
+ .default_value = 0,
+ .step = 1,
+ .menu_skip_mask = 0,
+ .qmenu = NULL,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL,
+ .name = "Rate Control",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_OFF,
+ .maximum = V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_CBR_CFR,
+ .default_value = V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_OFF,
+ .step = 0,
+ .menu_skip_mask = ~(
+ (1 << V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_OFF) |
+ (1 << V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_VBR_VFR) |
+ (1 << V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_VBR_CFR) |
+ (1 << V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_CBR_VFR) |
+ (1 << V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_CBR_CFR)
+ ),
+ .qmenu = mpeg_video_rate_control,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_BITRATE,
+ .name = "Bit Rate",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = MIN_BIT_RATE,
+ .maximum = MAX_BIT_RATE,
+ .default_value = DEFAULT_BIT_RATE,
+ .step = BIT_RATE_STEP,
+ .menu_skip_mask = 0,
+ .qmenu = NULL,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE,
+ .name = "Entropy Mode",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC,
+ .maximum = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC,
+ .default_value = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC,
+ .step = 0,
+ .menu_skip_mask = ~(
+ (1 << V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC) |
+ (1 << V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC)
+ ),
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL,
+ .name = "CABAC Model",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL_0,
+ .maximum = V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL_1,
+ .default_value = V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL_0,
+ .step = 0,
+ .menu_skip_mask = ~(
+ (1 << V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL_0) |
+ (1 << V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL_1) |
+ (1 << V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL_2)
+ ),
+ .qmenu = h264_video_entropy_cabac_model,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+ .name = "H264 Profile",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
+ .maximum = V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH,
+ .default_value = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
+ .step = 1,
+ .menu_skip_mask = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_LEVEL,
+ .name = "H264 Level",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
+ .maximum = V4L2_MPEG_VIDEO_H264_LEVEL_5_1,
+ .default_value = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
+ .step = 1,
+ .menu_skip_mask = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_ROTATION,
+ .name = "Rotation",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_NONE,
+ .maximum = V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_270,
+ .default_value = V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_NONE,
+ .step = 0,
+ .menu_skip_mask = ~(
+ (1 << V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_NONE) |
+ (1 << V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_90) |
+ (1 << V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_180) |
+ (1 << V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_270)
+ ),
+ .qmenu = mpeg_video_rotation,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP,
+ .name = "I Frame Quantization",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 1,
+ .maximum = 51,
+ .default_value = I_FRAME_QP,
+ .step = 1,
+ .menu_skip_mask = 0,
+ .qmenu = NULL,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP,
+ .name = "P Frame Quantization",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 1,
+ .maximum = 51,
+ .default_value = P_FRAME_QP,
+ .step = 1,
+ .menu_skip_mask = 0,
+ .qmenu = NULL,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP,
+ .name = "B Frame Quantization",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 1,
+ .maximum = 51,
+ .default_value = B_FRAME_QP,
+ .step = 1,
+ .menu_skip_mask = 0,
+ .qmenu = NULL,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE,
+ .name = "Slice Mode",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE,
+ .maximum = V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES,
+ .default_value = V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE,
+ .step = 1,
+ .menu_skip_mask = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES,
+ .name = "Slice Byte Size",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = MIN_SLICE_BYTE_SIZE,
+ .maximum = MAX_SLICE_BYTE_SIZE,
+ .default_value = 0,
+ .step = 1,
+ .menu_skip_mask = 0,
+ .qmenu = NULL,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB,
+ .name = "Slice MB Size",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 1,
+ .maximum = MAX_SLICE_MB_SIZE,
+ .default_value = 0,
+ .step = 1,
+ .menu_skip_mask = 0,
+ .qmenu = NULL,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_MODE,
+ .name = "Intra Refresh Mode",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_NONE,
+ .maximum = V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_RANDOM,
+ .default_value = V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_NONE,
+ .step = 0,
+ .menu_skip_mask = ~(
+ (1 << V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_NONE) |
+ (1 << V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_CYCLIC) |
+ (1 << V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_ADAPTIVE) |
+ (1 << V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_CYCLIC_ADAPTIVE) |
+ (1 << V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_RANDOM)
+ ),
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_AIR_MBS,
+ .name = "Intra Refresh AIR MBS",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = MAX_INTRA_REFRESH_MBS,
+ .default_value = 0,
+ .step = 1,
+ .menu_skip_mask = 0,
+ .qmenu = NULL,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_AIR_REF,
+ .name = "Intra Refresh AIR REF",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = MAX_INTRA_REFRESH_MBS,
+ .default_value = 0,
+ .step = 1,
+ .menu_skip_mask = 0,
+ .qmenu = NULL,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_CIR_MBS,
+ .name = "Intra Refresh CIR MBS",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = MAX_INTRA_REFRESH_MBS,
+ .default_value = 0,
+ .step = 1,
+ .menu_skip_mask = 0,
+ .qmenu = NULL,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA,
+ .name = "H.264 Loop Filter Alpha Offset",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = -6,
+ .maximum = 6,
+ .default_value = 0,
+ .step = 1,
+ .menu_skip_mask = 0,
+ .qmenu = NULL,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA,
+ .name = "H.264 Loop Filter Beta Offset",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = -6,
+ .maximum = 6,
+ .default_value = 0,
+ .step = 1,
+ .menu_skip_mask = 0,
+ .qmenu = NULL,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE,
+ .name = "H.264 Loop Filter Mode",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED,
+ .maximum = L_MODE,
+ .default_value = V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED,
+ .step = 1,
+ .menu_skip_mask = ~(
+ (1 << V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED) |
+ (1 << V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED) |
+ (1 << L_MODE)
+ ),
+ },
+};
+
+#define NUM_CTRLS ARRAY_SIZE(msm_venc_ctrls)
+
+static u32 get_frame_size_nv12(int plane, u32 height, u32 width)
+{
+ return ((height + 31) & (~31)) * ((width + 31) & (~31)) * 3/2;
+}
+
+static u32 get_frame_size_nv21(int plane, u32 height, u32 width)
+{
+ return height * width * 2;
+}
+
+static u32 get_frame_size_compressed(int plane, u32 height, u32 width)
+{
+ return ((height + 31) & (~31)) * ((width + 31) & (~31)) * 3/2;
+}
+
+static struct hal_quantization
+ venc_quantization = {I_FRAME_QP, P_FRAME_QP, B_FRAME_QP};
+static struct hal_intra_period
+ venc_intra_period = {2*DEFAULT_FRAME_RATE-1 , 0};
+static struct hal_profile_level
+ venc_profile_level = {V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
+ V4L2_MPEG_VIDEO_H264_LEVEL_1_0};
+static struct hal_h264_entropy_control
+ venc_h264_entropy_control = {V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC,
+ V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL_0};
+static struct hal_multi_slice_control
+ venc_multi_slice_control = {V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE ,
+ 0};
+
+static const struct msm_vidc_format venc_formats[] = {
+ {
+ .name = "YCbCr Semiplanar 4:2:0",
+ .description = "Y/CbCr 4:2:0",
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .num_planes = 1,
+ .get_frame_size = get_frame_size_nv12,
+ .type = OUTPUT_PORT,
+ },
+ {
+ .name = "Mpeg4",
+ .description = "Mpeg4 compressed format",
+ .fourcc = V4L2_PIX_FMT_MPEG4,
+ .num_planes = 1,
+ .get_frame_size = get_frame_size_compressed,
+ .type = CAPTURE_PORT,
+ },
+ {
+ .name = "H263",
+ .description = "H263 compressed format",
+ .fourcc = V4L2_PIX_FMT_H263,
+ .num_planes = 1,
+ .get_frame_size = get_frame_size_compressed,
+ .type = CAPTURE_PORT,
+ },
+ {
+ .name = "H264",
+ .description = "H264 compressed format",
+ .fourcc = V4L2_PIX_FMT_H264,
+ .num_planes = 1,
+ .get_frame_size = get_frame_size_compressed,
+ .type = CAPTURE_PORT,
+ },
+ {
+ .name = "YCrCb Semiplanar 4:2:0",
+ .description = "Y/CrCb 4:2:0",
+ .fourcc = V4L2_PIX_FMT_NV21,
+ .num_planes = 1,
+ .get_frame_size = get_frame_size_nv21,
+ .type = OUTPUT_PORT,
+ },
+};
+
+static int msm_venc_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
+ unsigned int *num_planes, unsigned long sizes[],
+ void *alloc_ctxs[])
+{
+ int i, rc = 0;
+ struct msm_vidc_inst *inst;
+ struct hal_frame_size frame_sz;
+ unsigned long flags;
+ if (!q || !q->drv_priv) {
+ pr_err("Invalid input, q = %p\n", q);
+ return -EINVAL;
+ }
+ inst = q->drv_priv;
+ switch (q->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ *num_planes = 1;
+ if (*num_buffers < MIN_NUM_OUTPUT_BUFFERS ||
+ *num_buffers > MAX_NUM_OUTPUT_BUFFERS)
+ *num_buffers = MIN_NUM_OUTPUT_BUFFERS;
+ for (i = 0; i < *num_planes; i++) {
+ sizes[i] = inst->fmts[OUTPUT_PORT]->get_frame_size(
+ i, inst->height, inst->width);
+ }
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE);
+ if (rc) {
+ pr_err("Failed to open instance\n");
+ break;
+ }
+ frame_sz.buffer_type = HAL_BUFFER_INPUT;
+ frame_sz.width = inst->width;
+ frame_sz.height = inst->height;
+ pr_debug("width = %d, height = %d\n",
+ frame_sz.width, frame_sz.height);
+ rc = vidc_hal_session_set_property((void *)inst->session,
+ HAL_PARAM_FRAME_SIZE, &frame_sz);
+ if (rc) {
+ pr_err("Failed to set hal property for framesize\n");
+ break;
+ }
+ rc = msm_comm_try_get_bufreqs(inst);
+ if (rc) {
+ pr_err("Failed to get buffer requirements: %d\n", rc);
+ break;
+ }
+ *num_planes = 1;
+ spin_lock_irqsave(&inst->lock, flags);
+ *num_buffers = inst->buff_req.buffer[0].buffer_count_actual;
+ spin_unlock_irqrestore(&inst->lock, flags);
+ pr_debug("size = %d, alignment = %d, count = %d\n",
+ inst->buff_req.buffer[0].buffer_size,
+ inst->buff_req.buffer[0].buffer_alignment,
+ inst->buff_req.buffer[0].buffer_count_actual);
+ for (i = 0; i < *num_planes; i++) {
+ sizes[i] = inst->fmts[CAPTURE_PORT]->get_frame_size(
+ i, inst->height, inst->width);
+ }
+
+ break;
+ default:
+ pr_err("Invalid q type = %d\n", q->type);
+ rc = -EINVAL;
+ break;
+ }
+ return rc;
+}
+
+static inline int start_streaming(struct msm_vidc_inst *inst)
+{
+ int rc = 0;
+ unsigned long flags;
+ struct vb2_buf_entry *temp;
+ struct list_head *ptr, *next;
+ rc = msm_comm_set_scratch_buffers(inst);
+ if (rc) {
+ pr_err("Failed to set scratch buffers: %d\n", rc);
+ goto fail_start;
+ }
+ rc = msm_comm_try_state(inst, MSM_VIDC_START_DONE);
+ if (rc) {
+ pr_err("Failed to move inst: %p to start done state\n",
+ inst);
+ goto fail_start;
+ }
+ spin_lock_irqsave(&inst->lock, flags);
+ if (!list_empty(&inst->pendingq)) {
+ list_for_each_safe(ptr, next, &inst->pendingq) {
+ temp = list_entry(ptr, struct vb2_buf_entry, list);
+ rc = msm_comm_qbuf(temp->vb);
+ if (rc) {
+ pr_err("Failed to qbuf to hardware\n");
+ break;
+ }
+ list_del(&temp->list);
+ kfree(temp);
+ }
+ }
+ spin_unlock_irqrestore(&inst->lock, flags);
+ return rc;
+fail_start:
+ return rc;
+}
+
+static int msm_venc_start_streaming(struct vb2_queue *q)
+{
+ struct msm_vidc_inst *inst;
+ int rc = 0;
+ if (!q || !q->drv_priv) {
+ pr_err("Invalid input, q = %p\n", q);
+ return -EINVAL;
+ }
+ inst = q->drv_priv;
+ pr_debug("Streamon called on: %d capability\n", q->type);
+ switch (q->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ if (inst->vb2_bufq[CAPTURE_PORT].streaming)
+ rc = start_streaming(inst);
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ if (inst->vb2_bufq[OUTPUT_PORT].streaming)
+ rc = start_streaming(inst);
+ break;
+ default:
+ pr_err("Q-type is not supported: %d\n", q->type);
+ rc = -EINVAL;
+ break;
+ }
+ return rc;
+}
+
+static int msm_venc_stop_streaming(struct vb2_queue *q)
+{
+ struct msm_vidc_inst *inst;
+ int rc = 0;
+ if (!q || !q->drv_priv) {
+ pr_err("Invalid input, q = %p\n", q);
+ return -EINVAL;
+ }
+ inst = q->drv_priv;
+ pr_debug("Streamoff called on: %d capability\n", q->type);
+ switch (q->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ rc = msm_comm_try_state(inst, MSM_VIDC_CLOSE_DONE);
+ break;
+ default:
+ pr_err("Q-type is not supported: %d\n", q->type);
+ rc = -EINVAL;
+ break;
+ }
+ if (rc)
+ pr_err("Failed to move inst: %p, cap = %d to state: %d\n",
+ inst, q->type, MSM_VIDC_CLOSE_DONE);
+ return rc;
+}
+
+static void msm_venc_buf_queue(struct vb2_buffer *vb)
+{
+ int rc;
+ rc = msm_comm_qbuf(vb);
+ if (rc)
+ pr_err("Failed to queue buffer: %d\n", rc);
+}
+
+static const struct vb2_ops msm_venc_vb2q_ops = {
+ .queue_setup = msm_venc_queue_setup,
+ .start_streaming = msm_venc_start_streaming,
+ .buf_queue = msm_venc_buf_queue,
+ .stop_streaming = msm_venc_stop_streaming,
+};
+
+const struct vb2_ops *msm_venc_get_vb2q_ops(void)
+{
+ return &msm_venc_vb2q_ops;
+}
+
+static int msm_venc_op_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+
+ int rc = 0;
+ struct v4l2_control control;
+ struct hal_frame_rate frame_rate;
+ struct hal_request_iframe request_iframe;
+ struct hal_bitrate bitrate;
+ struct hal_profile_level profile_level;
+ struct hal_h264_entropy_control h264_entropy_control;
+ struct hal_quantization quantization;
+ struct hal_intra_period intra_period;
+ struct hal_idr_period idr_period;
+ struct hal_operations operations;
+ struct hal_intra_refresh intra_refresh;
+ struct hal_multi_slice_control multi_slice_control;
+ struct hal_h264_db_control h264_db_control;
+ u32 control_idx = 0;
+ u32 property_id = 0;
+ u32 property_val = 0;
+ void *pdata;
+ struct msm_vidc_inst *inst = container_of(ctrl->handler,
+ struct msm_vidc_inst, ctrl_handler);
+
+ control.id = ctrl->id;
+ control.value = ctrl->val;
+
+ switch (control.id) {
+ case V4L2_CID_MPEG_VIDC_VIDEO_FRAME_RATE:
+ property_id =
+ HAL_CONFIG_FRAME_RATE;
+ frame_rate.frame_rate = control.value;
+ frame_rate.buffer_type = HAL_BUFFER_OUTPUT;
+ pdata = &frame_rate;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_IDR_PERIOD:
+ property_id =
+ HAL_CONFIG_VENC_IDR_PERIOD;
+ idr_period.idr_period = control.value;
+ pdata = &idr_period;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_NUM_P_FRAMES:
+ property_id =
+ HAL_CONFIG_VENC_INTRA_PERIOD;
+ intra_period.pframes = control.value;
+ venc_intra_period.pframes = control.value;
+ intra_period.bframes = venc_intra_period.bframes;
+ pdata = &intra_period;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_NUM_B_FRAMES:
+ property_id =
+ HAL_CONFIG_VENC_INTRA_PERIOD;
+ intra_period.bframes = control.value;
+ venc_intra_period.bframes = control.value;
+ intra_period.pframes = venc_intra_period.pframes;
+ pdata = &intra_period;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_REQUEST_IFRAME:
+ property_id =
+ HAL_CONFIG_VENC_REQUEST_IFRAME;
+ request_iframe.enable = control.value;
+ pdata = &request_iframe;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL:
+ property_id =
+ HAL_PARAM_VENC_RATE_CONTROL;
+ property_val = control.value;
+ pdata = &property_val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE:
+ property_id =
+ HAL_CONFIG_VENC_TARGET_BITRATE;
+ bitrate.bit_rate = control.value;
+ pdata = &bitrate;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
+ property_id =
+ HAL_PARAM_VENC_H264_ENTROPY_CONTROL;
+ h264_entropy_control.entropy_mode = control.value;
+ venc_h264_entropy_control.entropy_mode = control.value;
+ h264_entropy_control.cabac_model =
+ venc_h264_entropy_control.cabac_model;
+ pdata = &h264_entropy_control;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL:
+ property_id =
+ HAL_PARAM_VENC_H264_ENTROPY_CONTROL;
+ h264_entropy_control.cabac_model = control.value;
+ venc_h264_entropy_control.cabac_model = control.value;
+ h264_entropy_control.entropy_mode =
+ venc_h264_entropy_control.entropy_mode;
+ pdata = &h264_entropy_control;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ property_id =
+ HAL_PARAM_PROFILE_LEVEL_CURRENT;
+
+ switch (control.value) {
+ case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE:
+ control.value = HAL_H264_PROFILE_BASELINE;
+ break;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_MAIN:
+ control.value = HAL_H264_PROFILE_MAIN;
+ break;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED:
+ control.value = HAL_H264_PROFILE_EXTENDED;
+ break;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH:
+ control.value = HAL_H264_PROFILE_HIGH;
+ break;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10:
+ control.value = HAL_H264_PROFILE_HIGH10;
+ break;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422:
+ control.value = HAL_H264_PROFILE_HIGH422;
+ break;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_PREDICTIVE:
+ control.value = HAL_H264_PROFILE_HIGH444;
+ break;
+ default:
+ break;
+ }
+ profile_level.profile = control.value;
+ venc_profile_level.profile = control.value;
+ profile_level.level = venc_profile_level.level;
+ pdata = &profile_level;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ property_id =
+ HAL_PARAM_PROFILE_LEVEL_CURRENT;
+
+ switch (control.value) {
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_0:
+ control.value = HAL_H264_LEVEL_1;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1B:
+ control.value = HAL_H264_LEVEL_1b;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_1:
+ control.value = HAL_H264_LEVEL_11;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_2:
+ control.value = HAL_H264_LEVEL_12;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_3:
+ control.value = HAL_H264_LEVEL_13;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_0:
+ control.value = HAL_H264_LEVEL_2;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_1:
+ control.value = HAL_H264_LEVEL_21;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_2:
+ control.value = HAL_H264_LEVEL_22;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_0:
+ control.value = HAL_H264_LEVEL_3;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_1:
+ control.value = HAL_H264_LEVEL_31;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_2:
+ control.value = HAL_H264_LEVEL_32;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_0:
+ control.value = HAL_H264_LEVEL_4;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_1:
+ control.value = HAL_H264_LEVEL_41;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_2:
+ control.value = HAL_H264_LEVEL_42;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_5_0:
+ control.value = HAL_H264_LEVEL_3;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_5_1:
+ control.value = HAL_H264_LEVEL_51;
+ break;
+ default:
+ break;
+ }
+ profile_level.level = control.value;
+ venc_profile_level.level = control.value;
+ profile_level.profile = venc_profile_level.profile;
+ pdata = &profile_level;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_ROTATION:
+ property_id =
+ HAL_CONFIG_VPE_OPERATIONS;
+ operations.rotate = control.value;
+ pdata = &operations;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP:
+ property_id =
+ HAL_PARAM_VENC_SESSION_QP;
+ quantization.qpi = control.value;
+ venc_quantization.qpi = control.value;
+ quantization.qpp = venc_quantization.qpp;
+ quantization.qpb = venc_quantization.qpb;
+ pdata = &quantization;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP:
+ property_id =
+ HAL_PARAM_VENC_SESSION_QP;
+ quantization.qpp = control.value;
+ venc_quantization.qpp = control.value;
+ quantization.qpi = venc_quantization.qpi;
+ quantization.qpb = venc_quantization.qpb;
+ pdata = &quantization;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP:
+ property_id =
+ HAL_PARAM_VENC_SESSION_QP;
+ quantization.qpb = control.value;
+ venc_quantization.qpb = control.value;
+ quantization.qpi = venc_quantization.qpi;
+ quantization.qpp = venc_quantization.qpp;
+ pdata = &quantization;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
+ property_id =
+ HAL_PARAM_VENC_MULTI_SLICE_CONTROL;
+ multi_slice_control.multi_slice = control.value;
+ venc_multi_slice_control.multi_slice = control.value;
+ multi_slice_control.slice_size =
+ venc_multi_slice_control.slice_size;
+ pdata = &multi_slice_control;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES:
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB:
+ property_id =
+ HAL_PARAM_VENC_MULTI_SLICE_CONTROL;
+ multi_slice_control.multi_slice =
+ venc_multi_slice_control.multi_slice;
+ multi_slice_control.slice_size = control.value;
+ venc_multi_slice_control.slice_size = control.value;
+ pdata = &multi_slice_control;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_MODE:
+ property_id =
+ HAL_PARAM_VENC_INTRA_REFRESH;
+ intra_refresh.mode = control.value;
+ pdata = &intra_refresh;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_AIR_MBS:
+ property_id =
+ HAL_PARAM_VENC_INTRA_REFRESH;
+ intra_refresh.air_mbs = control.value;
+ pdata = &intra_refresh;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_AIR_REF:
+ property_id =
+ HAL_PARAM_VENC_INTRA_REFRESH;
+ intra_refresh.air_ref = control.value;
+ pdata = &intra_refresh;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_CIR_MBS:
+ property_id =
+ HAL_PARAM_VENC_INTRA_REFRESH;
+ intra_refresh.cir_mbs = control.value;
+ pdata = &intra_refresh;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
+ property_id =
+ HAL_PARAM_VENC_H264_DEBLOCK_CONTROL;
+ h264_db_control.mode = control.value;
+ pdata = &h264_db_control;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA:
+ property_id =
+ HAL_PARAM_VENC_H264_DEBLOCK_CONTROL;
+ h264_db_control.slice_alpha_offset = control.value;
+ pdata = &h264_db_control;
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA:
+ property_id =
+ HAL_PARAM_VENC_H264_DEBLOCK_CONTROL;
+ h264_db_control.slicebeta_offset = control.value;
+ pdata = &h264_db_control;
+ default:
+ break;
+ }
+ if (property_id) {
+ pr_debug("Control: HAL property=%d,ctrl_id=%d,ctrl_value=%d\n",
+ property_id,
+ msm_venc_ctrls[control_idx].id,
+ control.value);
+ rc = vidc_hal_session_set_property((void *)inst->session,
+ property_id, pdata);
+ }
+ if (rc)
+ pr_err("Failed to set hal property for framesize\n");
+ return rc;
+}
+static int msm_venc_op_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops msm_venc_ctrl_ops = {
+
+ .s_ctrl = msm_venc_op_s_ctrl,
+ .g_volatile_ctrl = msm_venc_op_g_volatile_ctrl,
+};
+
+const struct v4l2_ctrl_ops *msm_venc_get_ctrl_ops(void)
+{
+ return &msm_venc_ctrl_ops;
+}
+
+int msm_venc_inst_init(struct msm_vidc_inst *inst)
+{
+ int rc = 0;
+ if (!inst) {
+ pr_err("Invalid input = %p\n", inst);
+ return -EINVAL;
+ }
+ inst->fmts[CAPTURE_PORT] = &venc_formats[1];
+ inst->fmts[OUTPUT_PORT] = &venc_formats[0];
+ inst->height = DEFAULT_HEIGHT;
+ inst->width = DEFAULT_WIDTH;
+ return rc;
+}
+
+int msm_venc_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_control *ctrl)
+{
+ return v4l2_s_ctrl(&inst->ctrl_handler, ctrl);
+}
+int msm_venc_g_ctrl(struct msm_vidc_inst *inst, struct v4l2_control *ctrl)
+{
+ return v4l2_g_ctrl(&inst->ctrl_handler, ctrl);
+}
+
+int msm_venc_querycap(struct msm_vidc_inst *inst, struct v4l2_capability *cap)
+{
+ if (!inst || !cap) {
+ pr_err("Invalid input, inst = %p, cap = %p\n", inst, cap);
+ return -EINVAL;
+ }
+ strlcpy(cap->driver, MSM_VIDC_DRV_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, MSM_VENC_DVC_NAME, sizeof(cap->card));
+ cap->bus_info[0] = 0;
+ cap->version = MSM_VIDC_VERSION;
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE_MPLANE |
+ V4L2_CAP_VIDEO_OUTPUT_MPLANE |
+ V4L2_CAP_STREAMING;
+ memset(cap->reserved, 0, sizeof(cap->reserved));
+ return 0;
+}
+
+int msm_venc_enum_fmt(struct msm_vidc_inst *inst, struct v4l2_fmtdesc *f)
+{
+ const struct msm_vidc_format *fmt = NULL;
+ int rc = 0;
+ if (!inst || !f) {
+ pr_err("Invalid input, inst = %p, f = %p\n", inst, f);
+ return -EINVAL;
+ }
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ fmt = msm_comm_get_pixel_fmt_index(venc_formats,
+ ARRAY_SIZE(venc_formats), f->index, CAPTURE_PORT);
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ fmt = msm_comm_get_pixel_fmt_index(venc_formats,
+ ARRAY_SIZE(venc_formats), f->index, OUTPUT_PORT);
+ f->flags = V4L2_FMT_FLAG_COMPRESSED;
+ }
+
+ memset(f->reserved, 0 , sizeof(f->reserved));
+ if (fmt) {
+ strlcpy(f->description, fmt->description,
+ sizeof(f->description));
+ f->pixelformat = fmt->fourcc;
+ } else {
+ pr_err("No more formats found\n");
+ rc = -EINVAL;
+ }
+ return rc;
+}
+
+int msm_venc_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
+{
+ const struct msm_vidc_format *fmt = NULL;
+ int rc = 0;
+ int i;
+ if (!inst || !f) {
+ pr_err("Invalid input, inst = %p, format = %p\n", inst, f);
+ return -EINVAL;
+ }
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ fmt = msm_comm_get_pixel_fmt_fourcc(venc_formats,
+ ARRAY_SIZE(venc_formats), f->fmt.pix_mp.pixelformat,
+ CAPTURE_PORT);
+ if (fmt && fmt->type != CAPTURE_PORT) {
+ pr_err("Format: %d not supported on CAPTURE port\n",
+ f->fmt.pix_mp.pixelformat);
+ rc = -EINVAL;
+ goto exit;
+ }
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ inst->width = f->fmt.pix_mp.width;
+ inst->height = f->fmt.pix_mp.height;
+ fmt = msm_comm_get_pixel_fmt_fourcc(venc_formats,
+ ARRAY_SIZE(venc_formats), f->fmt.pix_mp.pixelformat,
+ OUTPUT_PORT);
+ if (fmt && fmt->type != OUTPUT_PORT) {
+ pr_err("Format: %d not supported on OUTPUT port\n",
+ f->fmt.pix_mp.pixelformat);
+ rc = -EINVAL;
+ goto exit;
+ }
+ }
+
+ if (fmt) {
+ for (i = 0; i < fmt->num_planes; ++i) {
+ f->fmt.pix_mp.plane_fmt[i].sizeimage =
+ fmt->get_frame_size(i, f->fmt.pix_mp.height,
+ f->fmt.pix_mp.width);
+ }
+ inst->fmts[fmt->type] = fmt;
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE);
+ if (rc) {
+ pr_err("Failed to open instance\n");
+ goto exit;
+ }
+ }
+ } else {
+ pr_err("Buf type not recognized, type = %d\n",
+ f->type);
+ rc = -EINVAL;
+ }
+exit:
+ return rc;
+}
+
+int msm_venc_g_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
+{
+ const struct msm_vidc_format *fmt = NULL;
+ int rc = 0;
+ int i;
+ if (!inst || !f) {
+ pr_err("Invalid input, inst = %p, format = %p\n", inst, f);
+ return -EINVAL;
+ }
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ fmt = inst->fmts[CAPTURE_PORT];
+ else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ fmt = inst->fmts[OUTPUT_PORT];
+
+ if (fmt) {
+ f->fmt.pix_mp.pixelformat = fmt->fourcc;
+ f->fmt.pix_mp.height = inst->height;
+ f->fmt.pix_mp.width = inst->width;
+ for (i = 0; i < fmt->num_planes; ++i) {
+ f->fmt.pix_mp.plane_fmt[i].sizeimage =
+ fmt->get_frame_size(i, inst->height, inst->width);
+ }
+ } else {
+ pr_err("Buf type not recognized, type = %d\n",
+ f->type);
+ rc = -EINVAL;
+ }
+ return rc;
+}
+
+int msm_venc_reqbufs(struct msm_vidc_inst *inst, struct v4l2_requestbuffers *b)
+{
+ struct vb2_queue *q = NULL;
+ int rc = 0;
+ if (!inst || !b) {
+ pr_err("Invalid input, inst = %p, buffer = %p\n", inst, b);
+ return -EINVAL;
+ }
+ q = msm_comm_get_vb2q(inst, b->type);
+ if (!q) {
+ pr_err("Failed to find buffer queue for type = %d\n", b->type);
+ return -EINVAL;
+ }
+
+ rc = vb2_reqbufs(q, b);
+ if (rc)
+ pr_err("Failed to get reqbufs, %d\n", rc);
+ return rc;
+}
+
+int msm_venc_prepare_buf(struct msm_vidc_inst *inst,
+ struct v4l2_buffer *b)
+{
+ int rc = 0;
+ int i;
+ struct vidc_buffer_addr_info buffer_info;
+
+ switch (b->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ for (i = 0; i < b->length; i++) {
+ pr_debug("device_addr = %ld, size = %d\n",
+ b->m.planes[i].m.userptr,
+ b->m.planes[i].length);
+ buffer_info.buffer_size = b->m.planes[i].length;
+ buffer_info.buffer_type = HAL_BUFFER_OUTPUT;
+ buffer_info.num_buffers = 1;
+ buffer_info.align_device_addr =
+ b->m.planes[i].m.userptr;
+ buffer_info.extradata_size = 0;
+ buffer_info.extradata_addr = 0;
+ rc = vidc_hal_session_set_buffers((void *)inst->session,
+ &buffer_info);
+ if (rc)
+ pr_err("vidc_hal_session_set_buffers failed");
+ }
+ break;
+ default:
+ pr_err("Buffer type not recognized: %d\n", b->type);
+ break;
+ }
+ return rc;
+}
+
+int msm_venc_qbuf(struct msm_vidc_inst *inst, struct v4l2_buffer *b)
+{
+ struct vb2_queue *q = NULL;
+ int rc = 0;
+ q = msm_comm_get_vb2q(inst, b->type);
+ if (!q) {
+ pr_err("Failed to find buffer queue for type = %d\n", b->type);
+ return -EINVAL;
+ }
+ rc = vb2_qbuf(q, b);
+ if (rc)
+ pr_err("Failed to qbuf, %d\n", rc);
+ return rc;
+}
+
+int msm_venc_dqbuf(struct msm_vidc_inst *inst, struct v4l2_buffer *b)
+{
+ struct vb2_queue *q = NULL;
+ int rc = 0;
+ q = msm_comm_get_vb2q(inst, b->type);
+ if (!q) {
+ pr_err("Failed to find buffer queue for type = %d\n", b->type);
+ return -EINVAL;
+ }
+ rc = vb2_dqbuf(q, b, true);
+ if (rc)
+ pr_err("Failed to qbuf, %d\n", rc);
+ return rc;
+}
+
+int msm_venc_streamon(struct msm_vidc_inst *inst, enum v4l2_buf_type i)
+{
+ int rc = 0;
+ struct vb2_queue *q;
+ q = msm_comm_get_vb2q(inst, i);
+ if (!q) {
+ pr_err("Failed to find buffer queue for type = %d\n", i);
+ return -EINVAL;
+ }
+ pr_debug("Calling streamon\n");
+ rc = vb2_streamon(q, i);
+ if (rc)
+ pr_err("streamon failed on port: %d\n", i);
+ return rc;
+}
+
+int msm_venc_streamoff(struct msm_vidc_inst *inst, enum v4l2_buf_type i)
+{
+ int rc = 0;
+ struct vb2_queue *q;
+ q = msm_comm_get_vb2q(inst, i);
+ if (!q) {
+ pr_err("Failed to find buffer queue for type = %d\n", i);
+ return -EINVAL;
+ }
+ pr_debug("Calling streamoff\n");
+ rc = vb2_streamoff(q, i);
+ if (rc)
+ pr_err("streamoff failed on port: %d\n", i);
+ return rc;
+}
+
+int msm_venc_ctrl_init(struct msm_vidc_inst *inst)
+{
+
+ int idx = 0;
+ struct v4l2_ctrl_config ctrl_cfg;
+ int ret_val = 0;
+ ret_val = v4l2_ctrl_handler_init(&inst->ctrl_handler, NUM_CTRLS);
+ if (ret_val) {
+ pr_err("CTRL ERR: Control handler init failed, %d\n",
+ inst->ctrl_handler.error);
+ return ret_val;
+ }
+
+ for (; idx < NUM_CTRLS; idx++) {
+ if (IS_PRIV_CTRL(msm_venc_ctrls[idx].id)) {
+ ctrl_cfg.def = msm_venc_ctrls[idx].default_value;
+ ctrl_cfg.flags = 0;
+ ctrl_cfg.id = msm_venc_ctrls[idx].id;
+ ctrl_cfg.max = msm_venc_ctrls[idx].maximum;
+ ctrl_cfg.min = msm_venc_ctrls[idx].minimum;
+ ctrl_cfg.menu_skip_mask =
+ msm_venc_ctrls[idx].menu_skip_mask;
+ ctrl_cfg.name = msm_venc_ctrls[idx].name;
+ ctrl_cfg.ops = &msm_venc_ctrl_ops;
+ ctrl_cfg.step = msm_venc_ctrls[idx].step;
+ ctrl_cfg.type = msm_venc_ctrls[idx].type;
+ ctrl_cfg.qmenu = msm_venc_ctrls[idx].qmenu;
+ v4l2_ctrl_new_custom(&inst->ctrl_handler,
+ &ctrl_cfg, NULL);
+ } else {
+ if (msm_venc_ctrls[idx].type == V4L2_CTRL_TYPE_MENU) {
+ v4l2_ctrl_new_std_menu(&inst->ctrl_handler,
+ &msm_venc_ctrl_ops,
+ msm_venc_ctrls[idx].id,
+ msm_venc_ctrls[idx].maximum,
+ msm_venc_ctrls[idx].menu_skip_mask,
+ msm_venc_ctrls[idx].default_value);
+ } else {
+ v4l2_ctrl_new_std(&inst->ctrl_handler,
+ &msm_venc_ctrl_ops,
+ msm_venc_ctrls[idx].id,
+ msm_venc_ctrls[idx].minimum,
+ msm_venc_ctrls[idx].maximum,
+ msm_venc_ctrls[idx].step,
+ msm_venc_ctrls[idx].default_value);
+ }
+ }
+ }
+ ret_val = inst->ctrl_handler.error;
+ if (ret_val)
+ pr_err("CTRL ERR: Error adding ctrls to ctrl handle, %d\n",
+ inst->ctrl_handler.error);
+ return ret_val;
+}
diff --git a/drivers/media/video/msm_vidc/msm_venc.h b/drivers/media/video/msm_vidc/msm_venc.h
new file mode 100644
index 0000000..4a156dd
--- /dev/null
+++ b/drivers/media/video/msm_vidc/msm_venc.h
@@ -0,0 +1,35 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _MSM_VENC_H_
+#define _MSM_VENC_H_
+
+#include <media/msm_vidc.h>
+#include "msm_vidc_internal.h"
+
+int msm_venc_inst_init(struct msm_vidc_inst *inst);
+int msm_venc_ctrl_init(struct msm_vidc_inst *inst);
+int msm_venc_querycap(void *instance, struct v4l2_capability *cap);
+int msm_venc_enum_fmt(void *instance, struct v4l2_fmtdesc *f);
+int msm_venc_s_fmt(void *instance, struct v4l2_format *f);
+int msm_venc_g_fmt(void *instance, struct v4l2_format *f);
+int msm_venc_s_ctrl(void *instance, struct v4l2_control *a);
+int msm_venc_g_ctrl(void *instance, struct v4l2_control *a);
+int msm_venc_reqbufs(void *instance, struct v4l2_requestbuffers *b);
+int msm_venc_prepare_buf(struct msm_vidc_inst *inst, struct v4l2_buffer *b);
+int msm_venc_qbuf(struct msm_vidc_inst *inst, struct v4l2_buffer *b);
+int msm_venc_dqbuf(struct msm_vidc_inst *inst, struct v4l2_buffer *b);
+int msm_venc_streamon(struct msm_vidc_inst *inst, enum v4l2_buf_type i);
+int msm_venc_streamoff(struct msm_vidc_inst *inst, enum v4l2_buf_type i);
+struct vb2_ops *msm_venc_get_vb2q_ops(void);
+
+#endif
diff --git a/drivers/media/video/msm_vidc/msm_vidc.c b/drivers/media/video/msm_vidc/msm_vidc.c
new file mode 100644
index 0000000..09d37ce
--- /dev/null
+++ b/drivers/media/video/msm_vidc/msm_vidc.c
@@ -0,0 +1,348 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include <media/msm_vidc.h>
+#include "msm_vidc_internal.h"
+#include "msm_vdec.h"
+#include "msm_venc.h"
+#include "msm_vidc_common.h"
+#include "msm_smem.h"
+
+int msm_vidc_poll(void *instance, struct file *filp,
+ struct poll_table_struct *wait)
+{
+ int rc = 0;
+ struct msm_vidc_inst *inst = instance;
+ struct vb2_queue *outq = &inst->vb2_bufq[OUTPUT_PORT];
+ struct vb2_queue *capq = &inst->vb2_bufq[CAPTURE_PORT];
+ struct vb2_buffer *out_vb = NULL;
+ struct vb2_buffer *cap_vb = NULL;
+ unsigned long flags;
+ poll_wait(filp, &inst->event_handler.events->wait, wait);
+ if (v4l2_event_pending(&inst->event_handler))
+ return POLLPRI;
+ if (!outq->streaming && !capq->streaming) {
+ pr_err("Returning POLLERR from here: %d, %d\n",
+ outq->streaming, capq->streaming);
+ return POLLERR;
+ }
+ poll_wait(filp, &inst->event_handler.events->wait, wait);
+ if (v4l2_event_pending(&inst->event_handler))
+ return POLLPRI;
+ poll_wait(filp, &capq->done_wq, wait);
+ poll_wait(filp, &outq->done_wq, wait);
+ spin_lock_irqsave(&capq->done_lock, flags);
+ if (!list_empty(&capq->done_list))
+ cap_vb = list_first_entry(&capq->done_list, struct vb2_buffer,
+ done_entry);
+ if (cap_vb && (cap_vb->state == VB2_BUF_STATE_DONE
+ || cap_vb->state == VB2_BUF_STATE_ERROR))
+ rc |= POLLIN | POLLRDNORM;
+ spin_unlock_irqrestore(&capq->done_lock, flags);
+ spin_lock_irqsave(&outq->done_lock, flags);
+ if (!list_empty(&outq->done_list))
+ out_vb = list_first_entry(&outq->done_list, struct vb2_buffer,
+ done_entry);
+ if (out_vb && (out_vb->state == VB2_BUF_STATE_DONE
+ || out_vb->state == VB2_BUF_STATE_ERROR))
+ rc |= POLLOUT | POLLWRNORM;
+ spin_unlock_irqrestore(&outq->done_lock, flags);
+ return rc;
+}
+
+int msm_vidc_querycap(void *instance, struct v4l2_capability *cap)
+{
+ struct msm_vidc_inst *inst = instance;
+ if (inst->session_type == MSM_VIDC_DECODER)
+ return msm_vdec_querycap(instance, cap);
+ else if (inst->session_type == MSM_VIDC_ENCODER)
+ return msm_venc_querycap(instance, cap);
+ return -EINVAL;
+}
+int msm_vidc_enum_fmt(void *instance, struct v4l2_fmtdesc *f)
+{
+ struct msm_vidc_inst *inst = instance;
+ if (inst->session_type == MSM_VIDC_DECODER)
+ return msm_vdec_enum_fmt(instance, f);
+ else if (inst->session_type == MSM_VIDC_ENCODER)
+ return msm_venc_enum_fmt(instance, f);
+ return -EINVAL;
+}
+int msm_vidc_s_fmt(void *instance, struct v4l2_format *f)
+{
+ struct msm_vidc_inst *inst = instance;
+ if (inst->session_type == MSM_VIDC_DECODER)
+ return msm_vdec_s_fmt(instance, f);
+ if (inst->session_type == MSM_VIDC_ENCODER)
+ return msm_venc_s_fmt(instance, f);
+ return -EINVAL;
+}
+int msm_vidc_g_fmt(void *instance, struct v4l2_format *f)
+{
+ struct msm_vidc_inst *inst = instance;
+ if (inst->session_type == MSM_VIDC_DECODER)
+ return msm_vdec_g_fmt(instance, f);
+ else if (inst->session_type == MSM_VIDC_ENCODER)
+ return msm_venc_g_fmt(instance, f);
+ return -EINVAL;
+}
+int msm_vidc_s_ctrl(void *instance, struct v4l2_control *control)
+{
+ struct msm_vidc_inst *inst = instance;
+ if (inst->session_type == MSM_VIDC_DECODER)
+ return msm_vdec_s_ctrl(instance, control);
+ if (inst->session_type == MSM_VIDC_ENCODER)
+ return msm_venc_s_ctrl(instance, control);
+ return -EINVAL;
+}
+int msm_vidc_g_ctrl(void *instance, struct v4l2_control *control)
+{
+ struct msm_vidc_inst *inst = instance;
+ if (inst->session_type == MSM_VIDC_DECODER)
+ return msm_vdec_g_ctrl(instance, control);
+ if (inst->session_type == MSM_VIDC_ENCODER)
+ return msm_venc_g_ctrl(instance, control);
+ return -EINVAL;
+}
+int msm_vidc_reqbufs(void *instance, struct v4l2_requestbuffers *b)
+{
+ struct msm_vidc_inst *inst = instance;
+ if (inst->session_type == MSM_VIDC_DECODER)
+ return msm_vdec_reqbufs(instance, b);
+ if (inst->session_type == MSM_VIDC_ENCODER)
+ return msm_venc_reqbufs(instance, b);
+ return -EINVAL;
+}
+
+int msm_vidc_prepare_buf(void *instance, struct v4l2_buffer *b)
+{
+ struct msm_vidc_inst *inst = instance;
+ if (inst->session_type == MSM_VIDC_DECODER)
+ return msm_vdec_prepare_buf(instance, b);
+ if (inst->session_type == MSM_VIDC_ENCODER)
+ return msm_venc_prepare_buf(instance, b);
+ return -EINVAL;
+}
+
+int msm_vidc_release_buf(void *instance, struct v4l2_buffer *b)
+{
+ struct msm_vidc_inst *inst = instance;
+ if (inst->session_type == MSM_VIDC_DECODER)
+ return msm_vdec_release_buf(instance, b);
+ return -EINVAL;
+}
+
+int msm_vidc_qbuf(void *instance, struct v4l2_buffer *b)
+{
+ struct msm_vidc_inst *inst = instance;
+ if (inst->session_type == MSM_VIDC_DECODER)
+ return msm_vdec_qbuf(instance, b);
+ if (inst->session_type == MSM_VIDC_ENCODER)
+ return msm_venc_qbuf(instance, b);
+ return -EINVAL;
+}
+
+int msm_vidc_dqbuf(void *instance, struct v4l2_buffer *b)
+{
+ struct msm_vidc_inst *inst = instance;
+ if (inst->session_type == MSM_VIDC_DECODER)
+ return msm_vdec_dqbuf(instance, b);
+ if (inst->session_type == MSM_VIDC_ENCODER)
+ return msm_venc_dqbuf(instance, b);
+ return -EINVAL;
+}
+
+int msm_vidc_streamon(void *instance, enum v4l2_buf_type i)
+{
+ struct msm_vidc_inst *inst = instance;
+ if (inst->session_type == MSM_VIDC_DECODER)
+ return msm_vdec_streamon(instance, i);
+ if (inst->session_type == MSM_VIDC_ENCODER)
+ return msm_venc_streamon(instance, i);
+ return -EINVAL;
+}
+
+int msm_vidc_streamoff(void *instance, enum v4l2_buf_type i)
+{
+ struct msm_vidc_inst *inst = instance;
+ if (inst->session_type == MSM_VIDC_DECODER)
+ return msm_vdec_streamoff(instance, i);
+ if (inst->session_type == MSM_VIDC_ENCODER)
+ return msm_venc_streamoff(instance, i);
+ return -EINVAL;
+}
+
+void *vidc_get_userptr(void *alloc_ctx, unsigned long vaddr,
+ unsigned long size, int write)
+{
+ return NULL;
+}
+
+void vidc_put_userptr(void *buf_priv)
+{
+}
+
+static const struct vb2_mem_ops msm_vidc_vb2_mem_ops = {
+ .get_userptr = vidc_get_userptr,
+ .put_userptr = vidc_put_userptr,
+};
+
+static inline int vb2_bufq_init(struct msm_vidc_inst *inst,
+ enum v4l2_buf_type type, enum session_type sess)
+{
+ struct vb2_queue *q = NULL;
+ if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ q = &inst->vb2_bufq[CAPTURE_PORT];
+ } else if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ q = &inst->vb2_bufq[OUTPUT_PORT];
+ } else {
+ pr_err("buf_type = %d not recognised\n", type);
+ return -EINVAL;
+ }
+ q->type = type;
+ q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->io_flags = 0;
+ if (sess == MSM_VIDC_DECODER)
+ q->ops = msm_vdec_get_vb2q_ops();
+ else if (sess == MSM_VIDC_ENCODER)
+ q->ops = msm_venc_get_vb2q_ops();
+ q->mem_ops = &msm_vidc_vb2_mem_ops;
+ q->drv_priv = inst;
+ return vb2_queue_init(q);
+}
+
+int msm_vidc_open(void *vidc_inst, int core_id, int session_type)
+{
+ struct msm_vidc_inst *inst = (struct msm_vidc_inst *)vidc_inst;
+ struct msm_vidc_core *core = NULL;
+ unsigned long flags;
+ int rc = 0;
+ int i = 0;
+ if (core_id >= MSM_VIDC_CORES_MAX ||
+ session_type >= MSM_VIDC_MAX_DEVICES) {
+ pr_err("Invalid input, core_id = %d, session = %d\n",
+ core_id, session_type);
+ goto err_invalid_core;
+ }
+ core = get_vidc_core(core_id);
+ if (!core) {
+ pr_err("Failed to find core for core_id = %d\n", core_id);
+ goto err_invalid_core;
+ }
+
+ mutex_init(&inst->sync_lock);
+ spin_lock_init(&inst->lock);
+ inst->session_type = session_type;
+ INIT_LIST_HEAD(&inst->pendingq);
+ INIT_LIST_HEAD(&inst->internalbufs);
+ inst->state = MSM_VIDC_CORE_UNINIT_DONE;
+ inst->core = core;
+ for (i = SESSION_MSG_INDEX(SESSION_MSG_START);
+ i <= SESSION_MSG_INDEX(SESSION_MSG_END); i++) {
+ init_completion(&inst->completions[i]);
+ }
+ inst->mem_client = msm_smem_new_client(SMEM_ION);
+ if (!inst->mem_client) {
+ pr_err("Failed to create memory client\n");
+ goto fail_mem_client;
+ }
+ if (session_type == MSM_VIDC_DECODER) {
+ msm_vdec_inst_init(inst);
+ msm_vdec_ctrl_init(inst);
+ } else if (session_type == MSM_VIDC_ENCODER) {
+ msm_venc_inst_init(inst);
+ msm_venc_ctrl_init(inst);
+ }
+ rc = vb2_bufq_init(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+ session_type);
+ if (rc) {
+ pr_err("Failed to initialize vb2 queue on capture port\n");
+ goto fail_init;
+ }
+ rc = vb2_bufq_init(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ session_type);
+ if (rc) {
+ pr_err("Failed to initialize vb2 queue on capture port\n");
+ goto fail_init;
+ }
+ rc = msm_comm_try_state(inst, MSM_VIDC_CORE_INIT);
+ if (rc) {
+ pr_err("Failed to move video instance to init state\n");
+ goto fail_init;
+ }
+ spin_lock_irqsave(&core->lock, flags);
+ list_add_tail(&inst->list, &core->instances);
+ spin_unlock_irqrestore(&core->lock, flags);
+ return rc;
+fail_init:
+ msm_smem_delete_client(inst->mem_client);
+fail_mem_client:
+ kfree(inst);
+ inst = NULL;
+err_invalid_core:
+ return rc;
+}
+
+static void cleanup_instance(struct msm_vidc_inst *inst)
+{
+ unsigned long flags;
+ struct list_head *ptr, *next;
+ struct vb2_buf_entry *entry;
+ struct internal_buf *buf;
+ if (inst) {
+ spin_lock_irqsave(&inst->lock, flags);
+ if (!list_empty(&inst->pendingq)) {
+ list_for_each_safe(ptr, next, &inst->pendingq) {
+ entry = list_entry(ptr, struct vb2_buf_entry,
+ list);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ }
+ if (!list_empty(&inst->internalbufs)) {
+ list_for_each_safe(ptr, next, &inst->internalbufs) {
+ buf = list_entry(ptr, struct internal_buf,
+ list);
+ list_del(&buf->list);
+ msm_smem_free(inst->mem_client, buf->handle);
+ kfree(buf);
+ }
+ }
+ spin_unlock_irqrestore(&inst->lock, flags);
+ msm_smem_delete_client(inst->mem_client);
+ }
+}
+
+int msm_vidc_close(void *instance)
+{
+ struct msm_vidc_inst *inst = instance;
+ struct msm_vidc_inst *temp;
+ struct msm_vidc_core *core;
+ struct list_head *ptr, *next;
+ int rc = 0;
+ core = inst->core;
+ mutex_lock(&core->sync_lock);
+ list_for_each_safe(ptr, next, &core->instances) {
+ temp = list_entry(ptr, struct msm_vidc_inst, list);
+ if (temp == inst)
+ list_del(&inst->list);
+ }
+ mutex_unlock(&core->sync_lock);
+ rc = msm_comm_try_state(inst, MSM_VIDC_CORE_UNINIT);
+ if (rc)
+ pr_err("Failed to move video instance to uninit state\n");
+ cleanup_instance(inst);
+ pr_debug("Closed the instance\n");
+ return 0;
+}
diff --git a/drivers/media/video/msm_vidc/msm_vidc_common.c b/drivers/media/video/msm_vidc/msm_vidc_common.c
new file mode 100644
index 0000000..31879b7
--- /dev/null
+++ b/drivers/media/video/msm_vidc/msm_vidc_common.c
@@ -0,0 +1,1016 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include "msm_vidc_common.h"
+#include "vidc_hal_api.h"
+#include "msm_smem.h"
+
+#define HW_RESPONSE_TIMEOUT 5000
+
+#define IS_ALREADY_IN_STATE(__p, __d) ({\
+ int __rc = (__p >= __d);\
+ __rc; \
+})
+
+struct msm_vidc_core *get_vidc_core(int core_id)
+{
+ struct msm_vidc_core *core;
+ int found = 0;
+ unsigned long flags;
+ if (core_id > MSM_VIDC_CORES_MAX) {
+ pr_err("Core id = %d is greater than max = %d\n",
+ core_id, MSM_VIDC_CORES_MAX);
+ return NULL;
+ }
+ spin_lock_irqsave(&vidc_driver->lock, flags);
+ list_for_each_entry(core, &vidc_driver->cores, list) {
+ if (core && core->id == core_id)
+ found = 1;
+ break;
+ }
+ spin_unlock_irqrestore(&vidc_driver->lock, flags);
+ if (found)
+ return core;
+ return NULL;
+}
+
+const struct msm_vidc_format *msm_comm_get_pixel_fmt_index(
+ const struct msm_vidc_format fmt[], int size, int index, int fmt_type)
+{
+ int i, k = 0;
+ if (!fmt || index < 0) {
+ pr_err("Invalid inputs, fmt = %p, index = %d\n",
+ fmt, index);
+ return NULL;
+ }
+ for (i = 0; i < size; i++) {
+ if (fmt[i].type != fmt_type)
+ continue;
+ if (k == index)
+ break;
+ k++;
+ }
+ if (i == size) {
+ pr_err("Format not found\n");
+ return NULL;
+ }
+ return &fmt[i];
+}
+const struct msm_vidc_format *msm_comm_get_pixel_fmt_fourcc(
+ const struct msm_vidc_format fmt[], int size, int fourcc, int fmt_type)
+{
+ int i;
+ if (!fmt) {
+ pr_err("Invalid inputs, fmt = %p\n", fmt);
+ return NULL;
+ }
+ for (i = 0; i < size; i++) {
+ if (fmt[i].fourcc == fourcc)
+ break;
+ }
+ if (i == size) {
+ pr_err("Format not found\n");
+ return NULL;
+ }
+ return &fmt[i];
+}
+
+struct vb2_queue *msm_comm_get_vb2q(
+ struct msm_vidc_inst *inst, enum v4l2_buf_type type)
+{
+ if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return &inst->vb2_bufq[CAPTURE_PORT];
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return &inst->vb2_bufq[OUTPUT_PORT];
+ return NULL;
+}
+
+static void handle_sys_init_done(enum command_response cmd, void *data)
+{
+ struct msm_vidc_cb_cmd_done *response = data;
+ struct msm_vidc_core *core;
+ struct vidc_hal_sys_init_done *sys_init_msg;
+ int index = SYS_MSG_INDEX(cmd);
+ if (!response) {
+ pr_err("Failed to get valid response for sys init\n");
+ return;
+ }
+ core = get_vidc_core(response->device_id);
+ if (!core) {
+ pr_err("Wrong device_id received\n");
+ return;
+ }
+ pr_debug("index = %d\n", index);
+ pr_debug("ptr = %p\n", &(core->completions[index]));
+ complete(&(core->completions[index]));
+ sys_init_msg = response->data;
+ if (!sys_init_msg) {
+ pr_err("sys_init_done message not proper\n");
+ return;
+ }
+}
+
+static inline void change_inst_state(struct msm_vidc_inst *inst,
+ enum instance_state state)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&inst->lock, flags);
+ pr_debug("Moved inst: %p from state: %d to state: %d\n",
+ inst, inst->state, state);
+ inst->state = state;
+ spin_unlock_irqrestore(&inst->lock, flags);
+}
+
+static int signal_session_msg_receipt(enum command_response cmd,
+ struct msm_vidc_inst *inst)
+{
+ if (!inst) {
+ pr_err("Invalid(%p) instance id\n", inst);
+ return -EINVAL;
+ }
+ complete(&inst->completions[SESSION_MSG_INDEX(cmd)]);
+ return 0;
+}
+
+static int wait_for_sess_signal_receipt(struct msm_vidc_inst *inst,
+ enum command_response cmd)
+{
+ int rc = 0;
+ rc = wait_for_completion_timeout(
+ &inst->completions[SESSION_MSG_INDEX(cmd)],
+ msecs_to_jiffies(HW_RESPONSE_TIMEOUT));
+ if (!rc) {
+ pr_err("Wait interrupted or timeout: %d\n", rc);
+ rc = -EIO;
+ } else {
+ rc = 0;
+ }
+ return rc;
+}
+
+static int wait_for_state(struct msm_vidc_inst *inst,
+ enum instance_state flipped_state,
+ enum instance_state desired_state,
+ enum command_response hal_cmd)
+{
+ int rc = 0;
+ if (IS_ALREADY_IN_STATE(flipped_state, desired_state)) {
+ pr_err("inst: %p is already in state: %d\n", inst, inst->state);
+ goto err_same_state;
+ }
+ pr_debug("Waiting for hal_cmd: %d\n", hal_cmd);
+ rc = wait_for_sess_signal_receipt(inst, hal_cmd);
+ if (!rc)
+ change_inst_state(inst, desired_state);
+err_same_state:
+ return rc;
+}
+
+static void handle_session_init_done(enum command_response cmd, void *data)
+{
+ struct msm_vidc_cb_cmd_done *response = data;
+ struct msm_vidc_inst *inst;
+ struct video_device *vdev;
+ struct v4l2_event dqevent;
+ struct msm_vidc_core *core;
+ if (response) {
+ inst = (struct msm_vidc_inst *)response->session_id;
+ signal_session_msg_receipt(cmd, inst);
+ core = inst->core;
+ if (inst->session_type == MSM_VIDC_ENCODER)
+ vdev = &core->vdev[MSM_VIDC_ENCODER].vdev;
+ else
+ vdev = &core->vdev[MSM_VIDC_DECODER].vdev;
+ dqevent.type = V4L2_EVENT_PRIVATE_START + V4L2_EVENT_VIDC_BASE;
+ dqevent.u.data[0] = (uint8_t)MSM_VIDC_OPEN_DONE;
+ v4l2_event_queue(vdev, &dqevent);
+ return;
+ } else {
+ pr_err("Failed to get valid response for session init\n");
+ }
+}
+
+static void handle_event_change(enum command_response cmd, void *data)
+{
+ struct msm_vidc_cb_cmd_done *response = data;
+ struct msm_vidc_inst *inst;
+ struct video_device *vdev;
+ struct v4l2_event dqevent;
+ struct msm_vidc_cb_event *event_notify;
+ struct msm_vidc_core *core;
+ if (response) {
+ inst = (struct msm_vidc_inst *)response->session_id;
+ core = inst->core;
+ if (inst->session_type == MSM_VIDC_ENCODER)
+ vdev = &core->vdev[MSM_VIDC_ENCODER].vdev;
+ else
+ vdev = &core->vdev[MSM_VIDC_DECODER].vdev;
+ dqevent.type = V4L2_EVENT_PRIVATE_START + V4L2_EVENT_VIDC_BASE;
+ dqevent.u.data[0] = (uint8_t)MSM_VIDC_DECODER_EVENT_CHANGE;
+ event_notify = (struct msm_vidc_cb_event *) response->data;
+ inst->reconfig_height = event_notify->height;
+ inst->reconfig_width = event_notify->width;
+ inst->in_reconfig = true;
+ v4l2_event_queue(vdev, &dqevent);
+ return;
+ } else {
+ pr_err("Failed to get valid response for event_change\n");
+ }
+}
+
+static void handle_session_prop_info(enum command_response cmd, void *data)
+{
+ struct msm_vidc_cb_cmd_done *response = data;
+ struct msm_vidc_inst *inst;
+ unsigned long flags;
+ if (!response || !response->data) {
+ pr_err("Failed to get valid response for prop info\n");
+ return;
+ }
+ inst = (struct msm_vidc_inst *)response->session_id;
+ spin_lock_irqsave(&inst->lock, flags);
+ memcpy(&inst->buff_req, response->data,
+ sizeof(struct buffer_requirements));
+ spin_unlock_irqrestore(&inst->lock, flags);
+ signal_session_msg_receipt(cmd, inst);
+}
+
+static void handle_load_resource_done(enum command_response cmd, void *data)
+{
+ struct msm_vidc_cb_cmd_done *response = data;
+ struct msm_vidc_inst *inst;
+ if (response)
+ inst = (struct msm_vidc_inst *)response->session_id;
+ else
+ pr_err("Failed to get valid response for load resource\n");
+}
+
+static void handle_start_done(enum command_response cmd, void *data)
+{
+ struct msm_vidc_cb_cmd_done *response = data;
+ struct msm_vidc_inst *inst;
+ struct video_device *vdev;
+ struct v4l2_event dqevent;
+ struct msm_vidc_core *core;
+ if (response) {
+ inst = (struct msm_vidc_inst *)response->session_id;
+ signal_session_msg_receipt(cmd, inst);
+ core = inst->core;
+ if (inst->session_type == MSM_VIDC_ENCODER)
+ vdev = &core->vdev[MSM_VIDC_ENCODER].vdev;
+ else
+ vdev = &core->vdev[MSM_VIDC_DECODER].vdev;
+ dqevent.type = V4L2_EVENT_PRIVATE_START + V4L2_EVENT_VIDC_BASE;
+ dqevent.u.data[0] = (uint8_t)MSM_VIDC_START_DONE;
+ v4l2_event_queue(vdev, &dqevent);
+ } else {
+ pr_err("Failed to get valid response for start\n");
+ }
+}
+
+static void handle_stop_done(enum command_response cmd, void *data)
+{
+ struct msm_vidc_cb_cmd_done *response = data;
+ struct msm_vidc_inst *inst;
+ struct video_device *vdev;
+ struct v4l2_event dqevent;
+ struct msm_vidc_core *core;
+ if (response) {
+ inst = (struct msm_vidc_inst *)response->session_id;
+ signal_session_msg_receipt(cmd, inst);
+ core = inst->core;
+ if (inst->session_type == MSM_VIDC_ENCODER)
+ vdev = &core->vdev[MSM_VIDC_ENCODER].vdev;
+ else
+ vdev = &core->vdev[MSM_VIDC_DECODER].vdev;
+ dqevent.type = V4L2_EVENT_PRIVATE_START + V4L2_EVENT_VIDC_BASE;
+ dqevent.u.data[0] = (uint8_t)MSM_VIDC_STOP_DONE;
+ v4l2_event_queue(vdev, &dqevent);
+ } else {
+ pr_err("Failed to get valid response for stop\n");
+ }
+}
+
+static void handle_release_res_done(enum command_response cmd, void *data)
+{
+ struct msm_vidc_cb_cmd_done *response = data;
+ struct msm_vidc_inst *inst;
+ if (response) {
+ inst = (struct msm_vidc_inst *)response->session_id;
+ signal_session_msg_receipt(cmd, inst);
+ } else {
+ pr_err("Failed to get valid response for release resource\n");
+ }
+}
+
+static void handle_session_flush(enum command_response cmd, void *data)
+{
+ struct msm_vidc_cb_cmd_done *response = data;
+ struct msm_vidc_inst *inst;
+ struct video_device *vdev;
+ struct v4l2_event dqevent;
+ struct msm_vidc_core *core;
+ if (response) {
+ inst = (struct msm_vidc_inst *)response->session_id;
+ core = inst->core;
+ if (inst->session_type == MSM_VIDC_ENCODER)
+ vdev = &core->vdev[MSM_VIDC_ENCODER].vdev;
+ else
+ vdev = &core->vdev[MSM_VIDC_DECODER].vdev;
+ dqevent.type = V4L2_EVENT_PRIVATE_START + V4L2_EVENT_VIDC_BASE;
+ dqevent.u.data[0] = (uint8_t)MSM_VIDC_DECODER_FLUSH_DONE;
+ v4l2_event_queue(vdev, &dqevent);
+ } else {
+ pr_err("Failed to get valid response for flush\n");
+ }
+}
+
+
+static void handle_session_close(enum command_response cmd, void *data)
+{
+ struct msm_vidc_cb_cmd_done *response = data;
+ struct msm_vidc_inst *inst;
+ struct video_device *vdev;
+ struct v4l2_event dqevent;
+ struct msm_vidc_core *core;
+ if (response) {
+ inst = (struct msm_vidc_inst *)response->session_id;
+ signal_session_msg_receipt(cmd, inst);
+ core = inst->core;
+ if (inst->session_type == MSM_VIDC_ENCODER)
+ vdev = &core->vdev[MSM_VIDC_ENCODER].vdev;
+ else
+ vdev = &core->vdev[MSM_VIDC_DECODER].vdev;
+ dqevent.type = V4L2_EVENT_PRIVATE_START + V4L2_EVENT_VIDC_BASE;
+ dqevent.u.data[0] = (uint8_t)MSM_VIDC_CLOSE_DONE;
+ v4l2_event_queue(vdev, &dqevent);
+ } else {
+ pr_err("Failed to get valid response for session close\n");
+ }
+}
+
+static struct vb2_buffer *get_vb_from_device_addr(struct vb2_queue *q,
+ u32 dev_addr)
+{
+ struct vb2_buffer *vb = NULL;
+ int found = 0;
+ if (!q) {
+ pr_err("Invalid parameter\n");
+ return NULL;
+ }
+ list_for_each_entry(vb, &q->queued_list, queued_entry) {
+ if (vb->v4l2_planes[0].m.userptr == dev_addr) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ pr_err("Failed to find the buffer in queued list: %d, %d\n",
+ dev_addr, q->type);
+ vb = NULL;
+ }
+ return vb;
+}
+
+static void handle_ebd(enum command_response cmd, void *data)
+{
+ struct msm_vidc_cb_data_done *response = data;
+ struct vb2_buffer *vb;
+ if (!response) {
+ pr_err("Invalid response from vidc_hal\n");
+ return;
+ }
+ vb = response->clnt_data;
+ if (vb)
+ vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+}
+
+static void handle_fbd(enum command_response cmd, void *data)
+{
+ struct msm_vidc_cb_data_done *response = data;
+ struct msm_vidc_inst *inst;
+ struct vb2_buffer *vb;
+ struct vidc_hal_fbd *fill_buf_done;
+ if (!response) {
+ pr_err("Invalid response from vidc_hal\n");
+ return;
+ }
+ inst = (struct msm_vidc_inst *)response->session_id;
+ fill_buf_done = (struct vidc_hal_fbd *)&response->output_done;
+ vb = get_vb_from_device_addr(&inst->vb2_bufq[CAPTURE_PORT],
+ (u32)fill_buf_done->packet_buffer1);
+ if (vb) {
+ vb->v4l2_planes[0].bytesused = fill_buf_done->filled_len1;
+ pr_debug("Filled length = %d\n", vb->v4l2_planes[0].bytesused);
+ if (fill_buf_done->flags1 & HAL_BUFFERFLAG_EOS)
+ vb->v4l2_buf.flags |= V4L2_BUF_FLAG_EOS;
+ vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+ }
+}
+
+void handle_cmd_response(enum command_response cmd, void *data)
+{
+ pr_debug("Command response = %d\n", cmd);
+ switch (cmd) {
+ case SYS_INIT_DONE:
+ handle_sys_init_done(cmd, data);
+ break;
+ case SESSION_INIT_DONE:
+ handle_session_init_done(cmd, data);
+ break;
+ case SESSION_PROPERTY_INFO:
+ handle_session_prop_info(cmd, data);
+ break;
+ case SESSION_LOAD_RESOURCE_DONE:
+ handle_load_resource_done(cmd, data);
+ break;
+ case SESSION_START_DONE:
+ handle_start_done(cmd, data);
+ break;
+ case SESSION_ETB_DONE:
+ handle_ebd(cmd, data);
+ break;
+ case SESSION_FTB_DONE:
+ handle_fbd(cmd, data);
+ break;
+ case SESSION_STOP_DONE:
+ handle_stop_done(cmd, data);
+ break;
+ case SESSION_RELEASE_RESOURCE_DONE:
+ handle_release_res_done(cmd, data);
+ break;
+ case SESSION_END_DONE:
+ handle_session_close(cmd, data);
+ break;
+ case VIDC_EVENT_CHANGE:
+ handle_event_change(cmd, data);
+ break;
+ case SESSION_FLUSH_DONE:
+ handle_session_flush(cmd, data);
+ break;
+ default:
+ pr_err("response unhandled\n");
+ break;
+ }
+}
+
+static int msm_comm_init_core_done(struct msm_vidc_inst *inst)
+{
+ struct msm_vidc_core *core = inst->core;
+ unsigned long flags;
+ int rc = 0;
+ mutex_lock(&core->sync_lock);
+ if (core->state >= VIDC_CORE_INIT_DONE) {
+ pr_err("Video core: %d is already in state: %d\n",
+ core->id, core->state);
+ goto core_already_inited;
+ }
+ pr_debug("Waiting for SYS_INIT_DONE\n");
+ rc = wait_for_completion_timeout(
+ &core->completions[SYS_MSG_INDEX(SYS_INIT_DONE)],
+ msecs_to_jiffies(HW_RESPONSE_TIMEOUT));
+ if (!rc) {
+ pr_err("Wait interrupted or timeout: %d\n", rc);
+ rc = -EIO;
+ goto exit;
+ } else {
+ spin_lock_irqsave(&core->lock, flags);
+ core->state = VIDC_CORE_INIT_DONE;
+ spin_unlock_irqrestore(&core->lock, flags);
+ }
+ pr_debug("SYS_INIT_DONE!!!\n");
+core_already_inited:
+ change_inst_state(inst, MSM_VIDC_CORE_INIT_DONE);
+ rc = 0;
+exit:
+ mutex_unlock(&core->sync_lock);
+ return rc;
+}
+
+static int msm_comm_init_core(struct msm_vidc_inst *inst)
+{
+ int rc = 0;
+ struct msm_vidc_core *core = inst->core;
+ unsigned long flags;
+ mutex_lock(&core->sync_lock);
+ if (core->state >= VIDC_CORE_INIT) {
+ pr_err("Video core: %d is already in state: %d\n",
+ core->id, core->state);
+ goto core_already_inited;
+ }
+ init_completion(&core->completions[SYS_MSG_INDEX(SYS_INIT_DONE)]);
+ rc = vidc_hal_core_init(core->device);
+ if (rc) {
+ pr_err("Failed to init core, id = %d\n", core->id);
+ goto exit;
+ }
+ spin_lock_irqsave(&core->lock, flags);
+ core->state = VIDC_CORE_INIT;
+ spin_unlock_irqrestore(&core->lock, flags);
+core_already_inited:
+ change_inst_state(inst, MSM_VIDC_CORE_INIT);
+exit:
+ mutex_unlock(&core->sync_lock);
+ return rc;
+}
+
+static int msm_vidc_deinit_core(struct msm_vidc_inst *inst)
+{
+ int rc = 0;
+ struct msm_vidc_core *core = inst->core;
+ unsigned long flags;
+ mutex_lock(&core->sync_lock);
+ if (core->state == VIDC_CORE_UNINIT) {
+ pr_err("Video core: %d is already in state: %d\n",
+ core->id, core->state);
+ goto core_already_uninited;
+ }
+ if (list_empty(&core->instances)) {
+ pr_debug("Calling vidc_hal_core_release\n");
+ rc = vidc_hal_core_release(core->device);
+ if (rc) {
+ pr_err("Failed to release core, id = %d\n", core->id);
+ goto exit;
+ }
+ spin_lock_irqsave(&core->lock, flags);
+ core->state = VIDC_CORE_UNINIT;
+ spin_unlock_irqrestore(&core->lock, flags);
+ }
+core_already_uninited:
+ change_inst_state(inst, MSM_VIDC_CORE_UNINIT);
+exit:
+ mutex_unlock(&core->sync_lock);
+ return rc;
+}
+
+static enum hal_domain get_hal_domain(int session_type)
+{
+ enum hal_domain domain;
+ switch (session_type) {
+ case MSM_VIDC_ENCODER:
+ domain = HAL_VIDEO_DOMAIN_ENCODER;
+ break;
+ case MSM_VIDC_DECODER:
+ domain = HAL_VIDEO_DOMAIN_DECODER;
+ break;
+ default:
+ pr_err("Wrong domain\n");
+ domain = HAL_UNUSED_DOMAIN;
+ break;
+ }
+ return domain;
+}
+
+static enum hal_video_codec get_hal_codec_type(int fourcc)
+{
+ enum hal_video_codec codec;
+ pr_debug("codec in %s is 0x%x", __func__, fourcc);
+ switch (fourcc) {
+ case V4L2_PIX_FMT_H264:
+ case V4L2_PIX_FMT_H264_NO_SC:
+ codec = HAL_VIDEO_CODEC_H264;
+ break;
+ case V4L2_PIX_FMT_H263:
+ codec = HAL_VIDEO_CODEC_H263;
+ break;
+ case V4L2_PIX_FMT_MPEG1:
+ codec = HAL_VIDEO_CODEC_MPEG1;
+ break;
+ case V4L2_PIX_FMT_MPEG2:
+ codec = HAL_VIDEO_CODEC_MPEG2;
+ break;
+ case V4L2_PIX_FMT_MPEG4:
+ codec = HAL_VIDEO_CODEC_MPEG4;
+ break;
+ case V4L2_PIX_FMT_VC1_ANNEX_G:
+ case V4L2_PIX_FMT_VC1_ANNEX_L:
+ codec = HAL_VIDEO_CODEC_VC1;
+ break;
+ /*HAL_VIDEO_CODEC_MVC
+ HAL_VIDEO_CODEC_DIVX_311
+ HAL_VIDEO_CODEC_DIVX
+ HAL_VIDEO_CODEC_SPARK
+ HAL_VIDEO_CODEC_VP6
+ HAL_VIDEO_CODEC_VP7
+ HAL_VIDEO_CODEC_VP8*/
+ default:
+ pr_err("Wrong codec: %d\n", fourcc);
+ codec = HAL_UNUSED_CODEC;
+ }
+ return codec;
+}
+
+static int msm_comm_session_init(int flipped_state,
+ struct msm_vidc_inst *inst)
+{
+ int rc = 0;
+ int fourcc = 0;
+ if (IS_ALREADY_IN_STATE(flipped_state, MSM_VIDC_OPEN)) {
+ pr_err("inst: %p is already in state: %d\n", inst, inst->state);
+ goto exit;
+ }
+ if (inst->session_type == MSM_VIDC_DECODER) {
+ fourcc = inst->fmts[OUTPUT_PORT]->fourcc;
+ } else if (inst->session_type == MSM_VIDC_ENCODER) {
+ fourcc = inst->fmts[CAPTURE_PORT]->fourcc;
+ } else {
+ pr_err("Invalid session\n");
+ return -EINVAL;
+ }
+ init_completion(
+ &inst->completions[SESSION_MSG_INDEX(SESSION_INIT_DONE)]);
+ inst->session = vidc_hal_session_init(inst->core->device, (u32) inst,
+ get_hal_domain(inst->session_type),
+ get_hal_codec_type(fourcc));
+ if (!inst->session) {
+ pr_err("Failed to call session init for: %d, %d, %d, %d\n",
+ (int)inst->core->device, (int)inst,
+ inst->session_type, fourcc);
+ goto exit;
+ }
+ change_inst_state(inst, MSM_VIDC_OPEN);
+exit:
+ return rc;
+}
+
+static int msm_vidc_load_resources(int flipped_state,
+ struct msm_vidc_inst *inst)
+{
+ int rc = 0;
+ if (IS_ALREADY_IN_STATE(flipped_state, MSM_VIDC_LOAD_RESOURCES)) {
+ pr_err("inst: %p is already in state: %d\n", inst, inst->state);
+ goto exit;
+ }
+ rc = vidc_hal_session_load_res((void *) inst->session);
+ if (rc) {
+ pr_err("Failed to send load resources\n");
+ goto exit;
+ }
+ change_inst_state(inst, MSM_VIDC_LOAD_RESOURCES);
+exit:
+ return rc;
+}
+
+static int msm_vidc_start(int flipped_state, struct msm_vidc_inst *inst)
+{
+ int rc = 0;
+ if (IS_ALREADY_IN_STATE(flipped_state, MSM_VIDC_START)) {
+ pr_err("inst: %p is already in state: %d\n", inst, inst->state);
+ goto exit;
+ }
+ init_completion(
+ &inst->completions[SESSION_MSG_INDEX(SESSION_START_DONE)]);
+ rc = vidc_hal_session_start((void *) inst->session);
+ if (rc) {
+ pr_err("Failed to send load resources\n");
+ goto exit;
+ }
+ change_inst_state(inst, MSM_VIDC_START);
+exit:
+ return rc;
+}
+
+static int msm_vidc_stop(int flipped_state, struct msm_vidc_inst *inst)
+{
+ int rc = 0;
+ if (IS_ALREADY_IN_STATE(flipped_state, MSM_VIDC_STOP)) {
+ pr_err("inst: %p is already in state: %d\n", inst, inst->state);
+ goto exit;
+ }
+ pr_debug("Send Stop to hal\n");
+ init_completion(
+ &inst->completions[SESSION_MSG_INDEX(SESSION_STOP_DONE)]);
+ rc = vidc_hal_session_stop((void *) inst->session);
+ if (rc) {
+ pr_err("Failed to send stop\n");
+ goto exit;
+ }
+ change_inst_state(inst, MSM_VIDC_STOP);
+exit:
+ return rc;
+}
+
+static int msm_vidc_release_res(int flipped_state, struct msm_vidc_inst *inst)
+{
+ int rc = 0;
+ if (IS_ALREADY_IN_STATE(flipped_state, MSM_VIDC_RELEASE_RESOURCES)) {
+ pr_err("inst: %p is already in state: %d\n", inst, inst->state);
+ goto exit;
+ }
+ pr_debug("Send release res to hal\n");
+ init_completion(
+ &inst->completions[SESSION_MSG_INDEX(SESSION_RELEASE_RESOURCE_DONE)]);
+ rc = vidc_hal_session_release_res((void *) inst->session);
+ if (rc) {
+ pr_err("Failed to send load resources\n");
+ goto exit;
+ }
+ change_inst_state(inst, MSM_VIDC_RELEASE_RESOURCES);
+exit:
+ return rc;
+}
+
+static int msm_comm_session_close(int flipped_state, struct msm_vidc_inst *inst)
+{
+ int rc = 0;
+ if (IS_ALREADY_IN_STATE(flipped_state, MSM_VIDC_CLOSE)) {
+ pr_err("inst: %p is already in state: %d\n", inst, inst->state);
+ goto exit;
+ }
+ pr_debug("Send session close to hal\n");
+ init_completion(
+ &inst->completions[SESSION_MSG_INDEX(SESSION_END_DONE)]);
+ rc = vidc_hal_session_end((void *) inst->session);
+ if (rc) {
+ pr_err("Failed to send load resources\n");
+ goto exit;
+ }
+ change_inst_state(inst, MSM_VIDC_OPEN);
+exit:
+ return rc;
+}
+
+int msm_comm_try_state(struct msm_vidc_inst *inst, int state)
+{
+ int rc = 0;
+ int flipped_state;
+ if (!inst) {
+ pr_err("Invalid instance pointer = %p\n", inst);
+ return -EINVAL;
+ }
+ pr_debug("Trying to move inst: %p from: 0x%x to 0x%x\n",
+ inst, inst->state, state);
+ mutex_lock(&inst->sync_lock);
+ flipped_state = inst->state;
+ if (flipped_state < MSM_VIDC_STOP
+ && state > MSM_VIDC_STOP) {
+ flipped_state = MSM_VIDC_STOP + (MSM_VIDC_STOP - flipped_state);
+ flipped_state &= 0xFFFE;
+ flipped_state = flipped_state - 1;
+ } else if (flipped_state > MSM_VIDC_STOP
+ && state < MSM_VIDC_STOP) {
+ flipped_state = MSM_VIDC_STOP -
+ (flipped_state - MSM_VIDC_STOP + 1);
+ flipped_state &= 0xFFFE;
+ flipped_state = flipped_state - 1;
+ }
+ pr_debug("flipped_state = 0x%x\n", flipped_state);
+ switch (flipped_state) {
+ case MSM_VIDC_CORE_UNINIT_DONE:
+ case MSM_VIDC_CORE_INIT:
+ rc = msm_comm_init_core(inst);
+ if (rc || state <= inst->state)
+ break;
+ case MSM_VIDC_CORE_INIT_DONE:
+ rc = msm_comm_init_core_done(inst);
+ if (rc || state <= inst->state)
+ break;
+ case MSM_VIDC_OPEN:
+ rc = msm_comm_session_init(flipped_state, inst);
+ if (rc || state <= inst->state)
+ break;
+ case MSM_VIDC_OPEN_DONE:
+ rc = wait_for_state(inst, flipped_state, MSM_VIDC_OPEN_DONE,
+ SESSION_INIT_DONE);
+ if (rc || state <= inst->state)
+ break;
+ case MSM_VIDC_LOAD_RESOURCES:
+ rc = msm_vidc_load_resources(flipped_state, inst);
+ if (rc || state <= inst->state)
+ break;
+ case MSM_VIDC_LOAD_RESOURCES_DONE:
+ case MSM_VIDC_START:
+ rc = msm_vidc_start(flipped_state, inst);
+ if (rc || state <= inst->state)
+ break;
+ case MSM_VIDC_START_DONE:
+ rc = wait_for_state(inst, flipped_state, MSM_VIDC_START_DONE,
+ SESSION_START_DONE);
+ if (rc || state <= inst->state)
+ break;
+ case MSM_VIDC_STOP:
+ rc = msm_vidc_stop(flipped_state, inst);
+ if (rc || state <= inst->state)
+ break;
+ case MSM_VIDC_STOP_DONE:
+ rc = wait_for_state(inst, flipped_state, MSM_VIDC_STOP_DONE,
+ SESSION_STOP_DONE);
+ if (rc || state <= inst->state)
+ break;
+ pr_debug("Moving to Stop Done state\n");
+ case MSM_VIDC_RELEASE_RESOURCES:
+ rc = msm_vidc_release_res(flipped_state, inst);
+ if (rc || state <= inst->state)
+ break;
+ case MSM_VIDC_RELEASE_RESOURCES_DONE:
+ rc = wait_for_state(inst, flipped_state,
+ MSM_VIDC_RELEASE_RESOURCES_DONE,
+ SESSION_RELEASE_RESOURCE_DONE);
+ if (rc || state <= inst->state)
+ break;
+ pr_debug("Moving to release resources done state\n");
+ case MSM_VIDC_CLOSE:
+ rc = msm_comm_session_close(flipped_state, inst);
+ if (rc || state <= inst->state)
+ break;
+ case MSM_VIDC_CLOSE_DONE:
+ rc = wait_for_state(inst, flipped_state, MSM_VIDC_CLOSE_DONE,
+ SESSION_END_DONE);
+ if (rc || state <= inst->state)
+ break;
+ case MSM_VIDC_CORE_UNINIT:
+ pr_debug("***************Sending core uninit\n");
+ rc = msm_vidc_deinit_core(inst);
+ if (rc || state == inst->state)
+ break;
+ default:
+ pr_err("State not recognized\n");
+ rc = -EINVAL;
+ break;
+ }
+ mutex_unlock(&inst->sync_lock);
+ if (rc)
+ pr_err("Failed to move from state: %d to %d\n",
+ inst->state, state);
+ return rc;
+}
+
+int msm_comm_qbuf(struct vb2_buffer *vb)
+{
+ int rc = 0;
+ struct vb2_queue *q;
+ struct msm_vidc_inst *inst;
+ unsigned long flags;
+ struct vb2_buf_entry *entry;
+ struct vidc_frame_data frame_data;
+ q = vb->vb2_queue;
+ inst = q->drv_priv;
+
+ if (!inst || !vb) {
+ pr_err("Invalid input: %p, %p\n", inst, vb);
+ return -EINVAL;
+ }
+ if (inst->state != MSM_VIDC_START_DONE) {
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ pr_err("Out of memory\n");
+ goto err_no_mem;
+ }
+ entry->vb = vb;
+ pr_debug("Queueing buffer in pendingq\n");
+ spin_lock_irqsave(&inst->lock, flags);
+ list_add_tail(&entry->list, &inst->pendingq);
+ spin_unlock_irqrestore(&inst->lock, flags);
+ } else {
+ memset(&frame_data, 0 , sizeof(struct vidc_frame_data));
+ frame_data.alloc_len = vb->v4l2_planes[0].length;
+ frame_data.filled_len = vb->v4l2_planes[0].bytesused;
+ frame_data.device_addr = vb->v4l2_planes[0].m.userptr;
+ frame_data.clnt_data = (u32)vb;
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ frame_data.buffer_type = HAL_BUFFER_INPUT;
+ if (vb->v4l2_buf.flags & V4L2_BUF_FLAG_EOS) {
+ frame_data.flags = HAL_BUFFERFLAG_EOS;
+ pr_debug("Received EOS on output capability\n");
+ }
+ pr_debug("Sending etb to hal: Alloc: %d :filled: %d\n",
+ frame_data.alloc_len, frame_data.filled_len);
+ rc = vidc_hal_session_etb((void *) inst->session,
+ &frame_data);
+ pr_debug("Sent etb to HAL\n");
+ } else if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ frame_data.filled_len = 0;
+ frame_data.buffer_type = HAL_BUFFER_OUTPUT;
+ frame_data.extradata_addr = 0;
+ pr_debug("Sending ftb to hal...: Alloc: %d :filled: %d"
+ " extradata_addr: %d\n", frame_data.alloc_len,
+ frame_data.filled_len,
+ frame_data.extradata_addr);
+ rc = vidc_hal_session_ftb((void *) inst->session,
+ &frame_data);
+ } else {
+ pr_err("This capability is not supported: %d\n",
+ q->type);
+ rc = -EINVAL;
+ }
+ }
+ if (rc)
+ pr_err("Failed to queue buffer\n");
+err_no_mem:
+ return rc;
+}
+
+int msm_comm_try_get_bufreqs(struct msm_vidc_inst *inst)
+{
+ int rc = 0;
+ mutex_lock(&inst->sync_lock);
+ init_completion(
+ &inst->completions[SESSION_MSG_INDEX(SESSION_PROPERTY_INFO)]);
+ rc = vidc_hal_session_get_buf_req((void *) inst->session);
+ if (rc) {
+ pr_err("Failed to get property\n");
+ goto exit;
+ }
+ rc = wait_for_completion_timeout(
+ &inst->completions[SESSION_MSG_INDEX(SESSION_PROPERTY_INFO)],
+ msecs_to_jiffies(HW_RESPONSE_TIMEOUT));
+ if (!rc) {
+ pr_err("Wait interrupted or timeout: %d\n", rc);
+ rc = -EIO;
+ goto exit;
+ }
+ rc = 0;
+exit:
+ mutex_unlock(&inst->sync_lock);
+ return rc;
+}
+
+int msm_vidc_decoder_cmd(void *instance, struct v4l2_decoder_cmd *dec)
+{
+ int rc = 0;
+ struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
+ mutex_lock(&inst->sync_lock);
+ if (dec->cmd != V4L2_DEC_CMD_STOP)
+ return -EINVAL;
+ rc = vidc_hal_session_flush((void *)inst->session, HAL_FLUSH_OUTPUT);
+ if (rc) {
+ pr_err("Failed to get property\n");
+ goto exit;
+ }
+exit:
+ mutex_unlock(&inst->sync_lock);
+ return rc;
+}
+
+int msm_comm_set_scratch_buffers(struct msm_vidc_inst *inst)
+{
+ int rc = 0;
+ struct msm_smem *handle;
+ struct internal_buf *binfo;
+ struct list_head *ptr, *next;
+ struct vidc_buffer_addr_info buffer_info;
+ unsigned long flags;
+ int i;
+ pr_debug("scratch: num = %d, size = %d\n",
+ inst->buff_req.buffer[6].buffer_count_actual,
+ inst->buff_req.buffer[6].buffer_size);
+ spin_lock_irqsave(&inst->lock, flags);
+ if (!list_empty(&inst->internalbufs)) {
+ list_for_each_safe(ptr, next, &inst->internalbufs) {
+ binfo = list_entry(ptr, struct internal_buf,
+ list);
+ list_del(&binfo->list);
+ msm_smem_free(inst->mem_client, binfo->handle);
+ kfree(binfo);
+ }
+ }
+ spin_unlock_irqrestore(&inst->lock, flags);
+
+
+ for (i = 0; i < inst->buff_req.buffer[6].buffer_count_actual;
+ i++) {
+ handle = msm_smem_alloc(inst->mem_client,
+ inst->buff_req.buffer[6].buffer_size, 1, 0);
+ if (!handle) {
+ pr_err("Failed to allocate scratch memory\n");
+ rc = -ENOMEM;
+ goto err_no_mem;
+ }
+ binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
+ if (!binfo) {
+ pr_err("Out of memory\n");
+ rc = -ENOMEM;
+ goto err_no_mem;
+ }
+ binfo->handle = handle;
+ spin_lock_irqsave(&inst->lock, flags);
+ list_add_tail(&binfo->list, &inst->internalbufs);
+ spin_unlock_irqrestore(&inst->lock, flags);
+ buffer_info.buffer_size =
+ inst->buff_req.buffer[6].buffer_size;
+ buffer_info.buffer_type = HAL_BUFFER_INTERNAL_SCRATCH;
+ buffer_info.num_buffers = 1;
+ buffer_info.align_device_addr = handle->device_addr;
+ rc = vidc_hal_session_set_buffers((void *) inst->session,
+ &buffer_info);
+ if (rc) {
+ pr_err("vidc_hal_session_set_buffers failed");
+ break;
+ }
+ }
+err_no_mem:
+ return rc;
+}
diff --git a/drivers/media/video/msm_vidc/msm_vidc_common.h b/drivers/media/video/msm_vidc/msm_vidc_common.h
new file mode 100644
index 0000000..2fafa79
--- /dev/null
+++ b/drivers/media/video/msm_vidc/msm_vidc_common.h
@@ -0,0 +1,36 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MSM_VIDC_COMMON_H_
+#define _MSM_VIDC_COMMON_H_
+#include "msm_vidc_internal.h"
+struct vb2_buf_entry {
+ struct list_head list;
+ struct vb2_buffer *vb;
+};
+struct msm_vidc_core *get_vidc_core(int core_id);
+const struct msm_vidc_format *msm_comm_get_pixel_fmt_index(
+ const struct msm_vidc_format fmt[], int size, int index, int fmt_type);
+const struct msm_vidc_format *msm_comm_get_pixel_fmt_fourcc(
+ const struct msm_vidc_format fmt[], int size, int fourcc, int fmt_type);
+struct vb2_queue *msm_comm_get_vb2q(
+ struct msm_vidc_inst *inst, enum v4l2_buf_type type);
+int msm_comm_try_state(struct msm_vidc_inst *inst, int state);
+int msm_comm_try_get_bufreqs(struct msm_vidc_inst *inst);
+int msm_comm_set_scratch_buffers(struct msm_vidc_inst *inst);
+int msm_comm_qbuf(struct vb2_buffer *vb);
+#define IS_PRIV_CTRL(idx) (\
+ (V4L2_CTRL_ID2CLASS(idx) == V4L2_CTRL_CLASS_MPEG) && \
+ V4L2_CTRL_DRIVER_PRIV(idx))
+
+#endif
diff --git a/drivers/media/video/msm_vidc/msm_vidc_internal.h b/drivers/media/video/msm_vidc/msm_vidc_internal.h
new file mode 100644
index 0000000..fb1ab58
--- /dev/null
+++ b/drivers/media/video/msm_vidc/msm_vidc_internal.h
@@ -0,0 +1,171 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MSM_VIDC_INTERNAL_H_
+#define _MSM_VIDC_INTERNAL_H_
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ctrls.h>
+#include <media/videobuf2-core.h>
+#include <media/msm_vidc.h>
+
+#include "vidc_hal_api.h"
+
+#define MSM_VIDC_DRV_NAME "msm_vidc_driver"
+#define MSM_VIDC_VERSION KERNEL_VERSION(0, 0, 1);
+#define MAX_DEBUGFS_NAME 50
+#define DEFAULT_TIMEOUT 3
+
+#define V4L2_EVENT_VIDC_BASE 10
+
+#define SYS_MSG_START VIDC_EVENT_CHANGE
+#define SYS_MSG_END SYS_DEBUG
+#define SESSION_MSG_START SESSION_LOAD_RESOURCE_DONE
+#define SESSION_MSG_END SESSION_PROPERTY_INFO
+#define SYS_MSG_INDEX(__msg) (__msg - SYS_MSG_START)
+#define SESSION_MSG_INDEX(__msg) (__msg - SESSION_MSG_START)
+
+enum vidc_ports {
+ OUTPUT_PORT,
+ CAPTURE_PORT,
+ MAX_PORT_NUM
+};
+
+enum vidc_core_state {
+ VIDC_CORE_UNINIT = 0,
+ VIDC_CORE_INIT,
+ VIDC_CORE_INIT_DONE,
+};
+
+/*Donot change the enum values unless
+ * you know what you are doing*/
+enum instance_state {
+ MSM_VIDC_CORE_UNINIT_DONE = 0x0001,
+ MSM_VIDC_CORE_INIT,
+ MSM_VIDC_CORE_INIT_DONE,
+ MSM_VIDC_OPEN,
+ MSM_VIDC_OPEN_DONE,
+ MSM_VIDC_LOAD_RESOURCES,
+ MSM_VIDC_LOAD_RESOURCES_DONE,
+ MSM_VIDC_START,
+ MSM_VIDC_START_DONE,
+ MSM_VIDC_STOP,
+ MSM_VIDC_STOP_DONE,
+ MSM_VIDC_RELEASE_RESOURCES,
+ MSM_VIDC_RELEASE_RESOURCES_DONE,
+ MSM_VIDC_CLOSE,
+ MSM_VIDC_CLOSE_DONE,
+ MSM_VIDC_CORE_UNINIT,
+};
+
+enum vidc_resposes_id {
+ MSM_VIDC_DECODER_FLUSH_DONE = 0x11,
+ MSM_VIDC_DECODER_EVENT_CHANGE,
+};
+
+struct buf_info {
+ struct list_head list;
+ struct vb2_buffer *buf;
+};
+
+struct internal_buf {
+ struct list_head list;
+ struct msm_smem *handle;
+};
+
+struct msm_vidc_format {
+ char name[64];
+ u8 description[32];
+ u32 fourcc;
+ int num_planes;
+ int type;
+ u32 (*get_frame_size)(int plane, u32 height, u32 width);
+};
+
+struct msm_vidc_drv {
+ spinlock_t lock;
+ struct list_head cores;
+ int num_cores;
+ struct dentry *debugfs_root;
+};
+
+struct msm_video_device {
+ int type;
+ struct video_device vdev;
+};
+
+struct msm_vidc_core {
+ struct list_head list;
+ struct mutex sync_lock;
+ int id;
+ void *device;
+ struct msm_video_device vdev[MSM_VIDC_MAX_DEVICES];
+ struct v4l2_device v4l2_dev;
+ spinlock_t lock;
+ struct list_head instances;
+ struct dentry *debugfs_root;
+ u32 base_addr;
+ u32 register_base;
+ u32 register_size;
+ u32 irq;
+ enum vidc_core_state state;
+ struct completion completions[SYS_MSG_END - SYS_MSG_START + 1];
+};
+
+struct msm_vidc_inst {
+ struct list_head list;
+ struct mutex sync_lock;
+ struct msm_vidc_core *core;
+ int session_type;
+ void *session;
+ u32 width;
+ u32 height;
+ int state;
+ const struct msm_vidc_format *fmts[MAX_PORT_NUM];
+ struct vb2_queue vb2_bufq[MAX_PORT_NUM];
+ spinlock_t lock;
+ struct list_head pendingq;
+ struct list_head internalbufs;
+ struct buffer_requirements buff_req;
+ void *mem_client;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct completion completions[SESSION_MSG_END - SESSION_MSG_START + 1];
+ struct v4l2_fh event_handler;
+ bool in_reconfig;
+ u32 reconfig_width;
+ u32 reconfig_height;
+};
+
+extern struct msm_vidc_drv *vidc_driver;
+
+struct msm_vidc_ctrl {
+ u32 id;
+ char name[64];
+ enum v4l2_ctrl_type type;
+ s32 minimum;
+ s32 maximum;
+ s32 default_value;
+ u32 step;
+ u32 menu_skip_mask;
+ const char * const *qmenu;
+};
+
+void handle_cmd_response(enum command_response cmd, void *data);
+#endif
diff --git a/drivers/media/video/msm_vidc/vidc_hal.c b/drivers/media/video/msm_vidc/vidc_hal.c
new file mode 100644
index 0000000..f4c7878
--- /dev/null
+++ b/drivers/media/video/msm_vidc/vidc_hal.c
@@ -0,0 +1,1925 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <asm/memory.h>
+#include "vidc_hal.h"
+#include "vidc_hal_io.h"
+
+#define FIRMWARE_SIZE 0X00A00000
+#define REG_ADDR_OFFSET_BITMASK 0x000FFFFF
+
+/*Workaround for virtio */
+#define HFI_VIRTIO_FW_BIAS 0x34f00000
+
+struct hal_device_data hal_ctxt;
+
+static void hal_virtio_modify_cmd_packet(u8 *packet)
+{
+ struct hfi_cmd_sys_session_init_packet *sys_init;
+ struct hal_session *sess;
+ u8 i;
+
+ if (!packet) {
+ HAL_MSG_ERROR("Invalid Param: %s", __func__);
+ return;
+ }
+
+ sys_init = (struct hfi_cmd_sys_session_init_packet *)packet;
+ sess = (struct hal_session *) sys_init->session_id;
+ switch (sys_init->packet) {
+ case HFI_CMD_SESSION_EMPTY_BUFFER:
+ if (sess->is_decoder) {
+ struct hfi_cmd_session_empty_buffer_compressed_packet
+ *pkt = (struct
+ hfi_cmd_session_empty_buffer_compressed_packet
+ *) packet;
+ pkt->packet_buffer -= HFI_VIRTIO_FW_BIAS;
+ } else {
+ struct
+ hfi_cmd_session_empty_buffer_uncompressed_plane0_packet
+ *pkt = (struct
+ hfi_cmd_session_empty_buffer_uncompressed_plane0_packet
+ *) packet;
+ pkt->packet_buffer -= HFI_VIRTIO_FW_BIAS;
+ }
+ break;
+ case HFI_CMD_SESSION_FILL_BUFFER:
+ {
+ struct hfi_cmd_session_fill_buffer_packet *pkt =
+ (struct hfi_cmd_session_fill_buffer_packet *)packet;
+ pkt->packet_buffer -= HFI_VIRTIO_FW_BIAS;
+ break;
+ }
+ case HFI_CMD_SESSION_SET_BUFFERS:
+ {
+ struct hfi_cmd_session_set_buffers_packet *pkt =
+ (struct hfi_cmd_session_set_buffers_packet *)packet;
+ if ((pkt->buffer_type == HFI_BUFFER_OUTPUT) ||
+ (pkt->buffer_type == HFI_BUFFER_OUTPUT2)) {
+ struct hfi_buffer_info *buff;
+ buff = (struct hfi_buffer_info *) pkt->rg_buffer_info;
+ buff->buffer_addr -= HFI_VIRTIO_FW_BIAS;
+ buff->extradata_addr -= HFI_VIRTIO_FW_BIAS;
+ } else {
+ for (i = 0; i < pkt->num_buffers; i++)
+ pkt->rg_buffer_info[i] -= HFI_VIRTIO_FW_BIAS;
+ }
+ break;
+ }
+ case HFI_CMD_SESSION_RELEASE_BUFFERS:
+ {
+ struct hfi_cmd_session_release_buffer_packet *pkt =
+ (struct hfi_cmd_session_release_buffer_packet *)packet;
+ if ((pkt->buffer_type == HAL_BUFFER_OUTPUT) ||
+ (pkt->buffer_type == HAL_BUFFER_OUTPUT2)) {
+ struct hfi_buffer_info *buff;
+ buff = (struct hfi_buffer_info *) pkt->rg_buffer_info;
+ buff->buffer_addr -= HFI_VIRTIO_FW_BIAS;
+ buff->extradata_addr -= HFI_VIRTIO_FW_BIAS;
+ } else {
+ for (i = 0; i < pkt->num_buffers; i++)
+ pkt->rg_buffer_info[i] -= HFI_VIRTIO_FW_BIAS;
+ }
+ break;
+ }
+ case HFI_CMD_SESSION_PARSE_SEQUENCE_HEADER:
+ {
+ struct hfi_cmd_session_parse_sequence_header_packet *pkt =
+ (struct hfi_cmd_session_parse_sequence_header_packet *)
+ packet;
+ pkt->packet_buffer -= HFI_VIRTIO_FW_BIAS;
+ break;
+ }
+ case HFI_CMD_SESSION_GET_SEQUENCE_HEADER:
+ {
+ struct hfi_cmd_session_get_sequence_header_packet *pkt =
+ (struct hfi_cmd_session_get_sequence_header_packet *)
+ packet;
+ pkt->packet_buffer -= HFI_VIRTIO_FW_BIAS;
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+static int write_queue(void *info, u8 *packet, u32 *rx_req_is_set)
+{
+ struct hfi_queue_header *queue;
+ u32 packet_size_in_words, new_write_idx;
+ struct vidc_iface_q_info *qinfo;
+ u32 empty_space, read_idx;
+ u32 *write_ptr;
+
+ if (!info || !packet || !rx_req_is_set) {
+ HAL_MSG_ERROR("Invalid Params in %s", __func__);
+ return -EINVAL;
+ }
+
+ qinfo = (struct vidc_iface_q_info *) info;
+ HAL_MSG_LOW("In %s: ", __func__);
+ hal_virtio_modify_cmd_packet(packet);
+
+ queue = (struct hfi_queue_header *) qinfo->q_hdr;
+
+ if (!queue) {
+ HAL_MSG_ERROR("queue not present");
+ return -ENOENT;
+ }
+
+ packet_size_in_words = (*(u32 *)packet) >> 2;
+ HAL_MSG_LOW("Packet_size in words: %d", packet_size_in_words);
+
+ if (packet_size_in_words == 0) {
+ HAL_MSG_ERROR("Zero packet size");
+ return -ENODATA;
+ }
+
+ read_idx = queue->qhdr_read_idx;
+
+ empty_space = (queue->qhdr_write_idx >= read_idx) ?
+ (queue->qhdr_q_size - (queue->qhdr_write_idx - read_idx)) :
+ (read_idx - queue->qhdr_write_idx);
+ HAL_MSG_LOW("Empty_space: %d", empty_space);
+ if (empty_space <= packet_size_in_words) {
+ queue->qhdr_tx_req = 1;
+ HAL_MSG_ERROR("Insufficient size (%d) to write (%d)",
+ empty_space, packet_size_in_words);
+ return -ENOTEMPTY;
+ }
+
+ queue->qhdr_tx_req = 0;
+
+ new_write_idx = (queue->qhdr_write_idx + packet_size_in_words);
+ write_ptr = (u32 *)((qinfo->q_array.align_virtual_addr) +
+ (queue->qhdr_write_idx << 2));
+ HAL_MSG_LOW("Write Ptr: %d", (u32) write_ptr);
+ if (new_write_idx < queue->qhdr_q_size) {
+ memcpy(write_ptr, packet, packet_size_in_words << 2);
+ } else {
+ new_write_idx -= queue->qhdr_q_size;
+ memcpy(write_ptr, packet, (packet_size_in_words -
+ new_write_idx) << 2);
+ memcpy((void *)queue->qhdr_start_addr,
+ packet + ((packet_size_in_words - new_write_idx) << 2),
+ new_write_idx << 2);
+ }
+ queue->qhdr_write_idx = new_write_idx;
+ *rx_req_is_set = (1 == queue->qhdr_rx_req) ? 1 : 0;
+ HAL_MSG_LOW("Out %s: ", __func__);
+ return 0;
+}
+
+static void hal_virtio_modify_msg_packet(u8 *packet)
+{
+ struct hfi_msg_sys_session_init_done_packet *sys_idle;
+ struct hal_session *sess;
+
+ if (!packet) {
+ HAL_MSG_ERROR("Invalid Param: %s", __func__);
+ return;
+ }
+
+ sys_idle = (struct hfi_msg_sys_session_init_done_packet *)packet;
+ sess = (struct hal_session *) sys_idle->session_id;
+
+ switch (sys_idle->packet_type) {
+ case HFI_MSG_SESSION_FILL_BUFFER_DONE:
+ if (sess->is_decoder) {
+ struct
+ hfi_msg_session_fbd_uncompressed_plane0_packet
+ *pkt_uc = (struct
+ hfi_msg_session_fbd_uncompressed_plane0_packet
+ *) packet;
+ pkt_uc->packet_buffer += HFI_VIRTIO_FW_BIAS;
+ } else {
+ struct
+ hfi_msg_session_fill_buffer_done_compressed_packet
+ *pkt = (struct
+ hfi_msg_session_fill_buffer_done_compressed_packet
+ *) packet;
+ pkt->packet_buffer += HFI_VIRTIO_FW_BIAS;
+ }
+ break;
+ case HFI_MSG_SESSION_EMPTY_BUFFER_DONE:
+ {
+ struct hfi_msg_session_empty_buffer_done_packet *pkt =
+ (struct hfi_msg_session_empty_buffer_done_packet *)packet;
+ pkt->packet_buffer += HFI_VIRTIO_FW_BIAS;
+ break;
+ }
+ case HFI_MSG_SESSION_GET_SEQUENCE_HEADER_DONE:
+ {
+ struct
+ hfi_msg_session_get_sequence_header_done_packet
+ *pkt =
+ (struct hfi_msg_session_get_sequence_header_done_packet *)
+ packet;
+ pkt->sequence_header += HFI_VIRTIO_FW_BIAS;
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+static int read_queue(void *info, u8 *packet, u32 *pb_tx_req_is_set)
+{
+ struct hfi_queue_header *queue;
+ u32 packet_size_in_words, new_read_idx;
+ u32 *read_ptr;
+ struct vidc_iface_q_info *qinfo;
+
+ if (!info || !packet || !pb_tx_req_is_set) {
+ HAL_MSG_ERROR("Invalid Params in %s", __func__);
+ return -EINVAL;
+ }
+
+ qinfo = (struct vidc_iface_q_info *) info;
+ HAL_MSG_LOW("In %s: ", __func__);
+ queue = (struct hfi_queue_header *) qinfo->q_hdr;
+
+ if (!queue) {
+ HAL_MSG_ERROR("Queue memory is not allocated\n");
+ return -ENOMEM;
+ }
+
+ if (queue->qhdr_read_idx == queue->qhdr_write_idx) {
+ queue->qhdr_rx_req = 1;
+ *pb_tx_req_is_set = 0;
+ return -EPERM;
+ }
+
+ read_ptr = (u32 *)((qinfo->q_array.align_virtual_addr) +
+ (queue->qhdr_read_idx << 2));
+ packet_size_in_words = (*read_ptr) >> 2;
+ HAL_MSG_LOW("packet_size_in_words: %d", packet_size_in_words);
+ if (packet_size_in_words == 0) {
+ HAL_MSG_ERROR("Zero packet size");
+ return -ENODATA;
+ }
+
+ new_read_idx = queue->qhdr_read_idx + packet_size_in_words;
+ HAL_MSG_LOW("Read Ptr: %d", (u32) new_read_idx);
+ if (new_read_idx < queue->qhdr_q_size) {
+ memcpy(packet, read_ptr,
+ packet_size_in_words << 2);
+ } else {
+ new_read_idx -= queue->qhdr_q_size;
+ memcpy(packet, read_ptr,
+ (packet_size_in_words - new_read_idx) << 2);
+ memcpy(packet + ((packet_size_in_words -
+ new_read_idx) << 2),
+ (u8 *)queue->qhdr_start_addr, new_read_idx << 2);
+ }
+
+ queue->qhdr_read_idx = new_read_idx;
+
+ if (queue->qhdr_read_idx != queue->qhdr_write_idx)
+ queue->qhdr_rx_req = 0;
+ else
+ queue->qhdr_rx_req = 1;
+
+ *pb_tx_req_is_set = (1 == queue->qhdr_tx_req) ? 1 : 0;
+ hal_virtio_modify_msg_packet(packet);
+ HAL_MSG_LOW("Out %s: ", __func__);
+ return 0;
+}
+
+static int vidc_hal_alloc(void *mem, void *clnt, u32 size, u32 align, u32 flags)
+{
+ struct vidc_mem_addr *vmem;
+ struct msm_smem *alloc;
+
+ if (!mem || !clnt || !size) {
+ HAL_MSG_ERROR("Invalid Params in %s", __func__);
+ return -EINVAL;
+ }
+ vmem = (struct vidc_mem_addr *)mem;
+ HAL_MSG_HIGH("start to alloc: size:%d, Flags: %d", size, flags);
+
+ alloc = msm_smem_alloc(clnt, size, align, flags);
+ HAL_MSG_LOW("Alloc done");
+ if (!alloc) {
+ HAL_MSG_HIGH("Alloc fail in %s", __func__);
+ return -ENOMEM;
+ } else {
+ HAL_MSG_MEDIUM("vidc_hal_alloc:ptr=%p,size=%d",
+ alloc->kvaddr, size);
+ vmem->mem_size = alloc->size;
+ vmem->mem_data = alloc;
+ vmem->align_virtual_addr = (u8 *) alloc->kvaddr;
+ vmem->align_device_addr = (u8 *)alloc->device_addr;
+ }
+ return 0;
+}
+
+static void vidc_hal_free(struct smem_client *clnt, struct msm_smem *mem)
+{
+ msm_smem_free(clnt, mem);
+}
+
+static void write_register(u8 *base_addr, u32 reg, u32 value, u8 *vaddr)
+{
+ u32 hwiosymaddr = reg;
+
+ reg &= REG_ADDR_OFFSET_BITMASK;
+ if (reg == (u32)VIDC_CPU_CS_SCIACMDARG2) {
+ /* workaround to offset of FW bias */
+ struct hfi_queue_header *qhdr;
+ struct hfi_queue_table_header *qtbl_hdr =
+ (struct hfi_queue_table_header *)vaddr;
+
+ qhdr = VIDC_IFACEQ_GET_QHDR_START_ADDR(qtbl_hdr, 0);
+ qhdr->qhdr_start_addr -= HFI_VIRTIO_FW_BIAS;
+
+ qhdr = VIDC_IFACEQ_GET_QHDR_START_ADDR(qtbl_hdr, 1);
+ qhdr->qhdr_start_addr -= HFI_VIRTIO_FW_BIAS;
+
+ qhdr = VIDC_IFACEQ_GET_QHDR_START_ADDR(qtbl_hdr, 2);
+ qhdr->qhdr_start_addr -= HFI_VIRTIO_FW_BIAS;
+ value -= HFI_VIRTIO_FW_BIAS;
+ }
+
+ hwiosymaddr = ((u32)base_addr + (hwiosymaddr));
+ HAL_MSG_LOW("Base addr: 0x%x, written to: 0x%x, Value: 0x%x...",
+ (u32)base_addr, hwiosymaddr, value);
+ writel_relaxed(value, hwiosymaddr);
+ wmb();
+}
+
+static int read_register(u8 *base_addr, u32 reg)
+{
+ int rc = readl_relaxed((u32)base_addr + reg);
+ rmb();
+ return rc;
+}
+
+static int vidc_hal_iface_cmdq_write(struct hal_device *device, void *pkt)
+{
+ u32 rx_req_is_set = 0;
+ struct vidc_iface_q_info *q_info;
+ int result = -EPERM;
+
+ if (!device || !pkt) {
+ HAL_MSG_ERROR("Invalid Params in %s", __func__);
+ return -EINVAL;
+ }
+
+ spin_lock(&device->write_lock);
+ q_info = &device->iface_queues[VIDC_IFACEQ_CMDQ_IDX];
+ if (!q_info) {
+ HAL_MSG_ERROR("cannot write to shared Q's");
+ goto err_q_write;
+ }
+
+ if (!write_queue(q_info, (u8 *)pkt, &rx_req_is_set)) {
+ if (rx_req_is_set)
+ write_register(device->hal_data->register_base_addr,
+ VIDC_CPU_IC_SOFTINT,
+ 1 << VIDC_CPU_IC_SOFTINT_H2A_SHFT, 0);
+ result = 0;
+ } else {
+ HAL_MSG_ERROR("vidc_hal_iface_cmdq_write:queue_full");
+ }
+err_q_write:
+ spin_unlock(&device->write_lock);
+ return result;
+}
+
+int vidc_hal_iface_msgq_read(struct hal_device *device, void *pkt)
+{
+ u32 tx_req_is_set = 0;
+ int rc = 0;
+ struct vidc_iface_q_info *q_info;
+
+ if (!pkt) {
+ HAL_MSG_ERROR("Invalid Params in %s", __func__);
+ return -EINVAL;
+ }
+ spin_lock(&device->read_lock);
+ if (device->iface_queues[VIDC_IFACEQ_MSGQ_IDX].
+ q_array.align_virtual_addr == 0) {
+ HAL_MSG_ERROR("cannot read from shared MSG Q's");
+ rc = -ENODATA;
+ goto read_error;
+ }
+ q_info = &device->iface_queues[VIDC_IFACEQ_MSGQ_IDX];
+
+ if (!read_queue(q_info, (u8 *)pkt, &tx_req_is_set)) {
+ if (tx_req_is_set)
+ write_register(device->hal_data->register_base_addr,
+ VIDC_CPU_IC_SOFTINT,
+ 1 << VIDC_CPU_IC_SOFTINT_H2A_SHFT, 0);
+ rc = 0;
+ } else {
+ HAL_MSG_ERROR("vidc_hal_iface_msgq_read:queue_empty");
+ rc = -ENODATA;
+ }
+read_error:
+ spin_unlock(&device->read_lock);
+ return rc;
+}
+
+int vidc_hal_iface_dbgq_read(struct hal_device *device, void *pkt)
+{
+ u32 tx_req_is_set = 0;
+ int rc = 0;
+ struct vidc_iface_q_info *q_info;
+
+ if (!pkt) {
+ HAL_MSG_ERROR("Invalid Params in %s", __func__);
+ return -EINVAL;
+ }
+ spin_lock(&device->read_lock);
+ if (device->iface_queues[VIDC_IFACEQ_DBGQ_IDX].
+ q_array.align_virtual_addr == 0) {
+ HAL_MSG_ERROR("cannot read from shared DBG Q's");
+ rc = -ENODATA;
+ goto dbg_error;
+ }
+ q_info = &device->iface_queues[VIDC_IFACEQ_DBGQ_IDX];
+ if (!read_queue(q_info, (u8 *)pkt, &tx_req_is_set)) {
+ if (tx_req_is_set)
+ write_register(device->hal_data->register_base_addr,
+ VIDC_CPU_IC_SOFTINT,
+ 1 << VIDC_CPU_IC_SOFTINT_H2A_SHFT, 0);
+ rc = 0;
+ } else {
+ HAL_MSG_MEDIUM("vidc_hal_iface_dbgq_read:queue_empty");
+ rc = -ENODATA;
+ }
+dbg_error:
+ spin_unlock(&device->read_lock);
+ return rc;
+}
+
+static void vidc_hal_set_queue_hdr_defaults(struct hfi_queue_header *q_hdr)
+{
+ q_hdr->qhdr_status = 0x1;
+ q_hdr->qhdr_type = VIDC_IFACEQ_DFLT_QHDR;
+ q_hdr->qhdr_q_size = VIDC_IFACEQ_QUEUE_SIZE;
+ q_hdr->qhdr_pkt_size = 0;
+ q_hdr->qhdr_rx_wm = 0x1;
+ q_hdr->qhdr_tx_wm = 0x1;
+ q_hdr->qhdr_rx_req = 0x1;
+ q_hdr->qhdr_tx_req = 0x0;
+ q_hdr->qhdr_rx_irq_status = 0x0;
+ q_hdr->qhdr_tx_irq_status = 0x0;
+ q_hdr->qhdr_read_idx = 0x0;
+ q_hdr->qhdr_write_idx = 0x0;
+}
+
+static void vidc_hal_interface_queues_release(struct hal_device *device)
+{
+ int i;
+ for (i = 0; i < VIDC_IFACEQ_NUMQ; i++) {
+ vidc_hal_free(device->hal_client,
+ device->iface_queues[i].q_array.mem_data);
+ device->iface_queues[i].q_hdr = NULL;
+ device->iface_queues[i].q_array.mem_data = NULL;
+ device->iface_queues[i].q_array.align_virtual_addr = NULL;
+ device->iface_queues[i].q_array.align_device_addr = NULL;
+ }
+ vidc_hal_free(device->hal_client,
+ device->iface_q_table.mem_data);
+ device->iface_q_table.align_virtual_addr = NULL;
+ device->iface_q_table.align_device_addr = NULL;
+ msm_smem_delete_client(device->hal_client);
+ device->hal_client = NULL;
+}
+
+static int vidc_hal_interface_queues_init(struct hal_device *dev)
+{
+ struct hfi_queue_table_header *q_tbl_hdr;
+ struct hfi_queue_header *q_hdr;
+ u8 i;
+ int rc = 0;
+ struct vidc_iface_q_info *iface_q;
+
+ rc = vidc_hal_alloc((void *) &dev->iface_q_table,
+ dev->hal_client,
+ VIDC_IFACEQ_TABLE_SIZE, 1, 0);
+ if (rc) {
+ HAL_MSG_ERROR("%s:iface_q_table_alloc_fail", __func__);
+ return -ENOMEM;
+ }
+ q_tbl_hdr = (struct hfi_queue_table_header *)
+ dev->iface_q_table.align_virtual_addr;
+ q_tbl_hdr->qtbl_version = 0;
+ q_tbl_hdr->qtbl_size = VIDC_IFACEQ_TABLE_SIZE;
+ q_tbl_hdr->qtbl_qhdr0_offset = sizeof(
+ struct hfi_queue_table_header);
+ q_tbl_hdr->qtbl_qhdr_size = sizeof(
+ struct hfi_queue_header);
+ q_tbl_hdr->qtbl_num_q = VIDC_IFACEQ_NUMQ;
+ q_tbl_hdr->qtbl_num_active_q = VIDC_IFACEQ_NUMQ;
+
+ for (i = 0; i < VIDC_IFACEQ_NUMQ; i++) {
+ iface_q = &dev->iface_queues[i];
+ rc = vidc_hal_alloc((void *) &iface_q->q_array,
+ dev->hal_client, VIDC_IFACEQ_QUEUE_SIZE,
+ 1, 0);
+ if (rc) {
+ HAL_MSG_ERROR("%s:iface_q_table_alloc[%d]_fail",
+ __func__, i);
+ vidc_hal_interface_queues_release(dev);
+ return -ENOMEM;
+ } else {
+ iface_q->q_hdr =
+ VIDC_IFACEQ_GET_QHDR_START_ADDR(
+ dev->iface_q_table.align_virtual_addr, i);
+ vidc_hal_set_queue_hdr_defaults(iface_q->q_hdr);
+ }
+ }
+
+ iface_q = &dev->iface_queues[VIDC_IFACEQ_CMDQ_IDX];
+ q_hdr = iface_q->q_hdr;
+ q_hdr->qhdr_start_addr = (u32)
+ iface_q->q_array.align_device_addr;
+ q_hdr->qhdr_type |= HFI_Q_ID_HOST_TO_CTRL_CMD_Q;
+
+ iface_q = &dev->iface_queues[VIDC_IFACEQ_MSGQ_IDX];
+ q_hdr = iface_q->q_hdr;
+ q_hdr->qhdr_start_addr = (u32)
+ iface_q->q_array.align_device_addr;
+ q_hdr->qhdr_type |= HFI_Q_ID_CTRL_TO_HOST_MSG_Q;
+
+ iface_q = &dev->iface_queues[VIDC_IFACEQ_DBGQ_IDX];
+ q_hdr = iface_q->q_hdr;
+ q_hdr->qhdr_start_addr = (u32)
+ iface_q->q_array.align_device_addr;
+ q_hdr->qhdr_type |= HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q;
+ write_register(dev->hal_data->register_base_addr,
+ VIDC_CPU_CS_SCIACMDARG2,
+ (u32) dev->iface_q_table.align_device_addr,
+ dev->iface_q_table.align_virtual_addr);
+ write_register(dev->hal_data->register_base_addr,
+ VIDC_CPU_CS_SCIACMDARG1, 0x01,
+ dev->iface_q_table.align_virtual_addr);
+ return 0;
+}
+
+static int vidc_hal_core_start_cpu(struct hal_device *device)
+{
+ u32 ctrl_status = 0, count = 0, rc = 0;
+ write_register(device->hal_data->register_base_addr,
+ VIDC_WRAPPER_INTR_MASK, 0, 0);
+ write_register(device->hal_data->register_base_addr,
+ VIDC_CPU_CS_SCIACMDARG3, 1, 0);
+ while (!ctrl_status && count < 25) {
+ ctrl_status = read_register(
+ device->hal_data->register_base_addr,
+ VIDC_CPU_CS_SCIACMDARG0);
+ count++;
+ }
+ if (count >= 25)
+ rc = -ETIME;
+ return rc;
+}
+
+int vidc_hal_core_init(void *device)
+{
+ struct hfi_cmd_sys_init_packet pkt;
+ int rc = 0;
+ struct hal_device *dev;
+
+ if (device) {
+ dev = device;
+ } else {
+ HAL_MSG_ERROR("%s:invalid device", __func__);
+ return -ENODEV;
+ }
+ enable_irq(dev->hal_data->irq);
+ INIT_LIST_HEAD(&dev->sess_head);
+ spin_lock_init(&dev->read_lock);
+ spin_lock_init(&dev->write_lock);
+
+ if (!dev->hal_client) {
+ dev->hal_client = msm_smem_new_client(SMEM_ION);
+ if (dev->hal_client == NULL) {
+ HAL_MSG_ERROR("Failed to alloc ION_Client");
+ rc = -ENODEV;
+ goto err_no_mem;
+ }
+
+ HAL_MSG_ERROR("Device_Virt_Address : 0x%x,"
+ "Register_Virt_Addr: 0x%x",
+ (u32) dev->hal_data->device_base_addr,
+ (u32) dev->hal_data->register_base_addr);
+
+ rc = vidc_hal_interface_queues_init(dev);
+ if (rc) {
+ HAL_MSG_ERROR("failed to init queues");
+ rc = -ENOMEM;
+ goto err_no_mem;
+ }
+ } else {
+ HAL_MSG_ERROR("hal_client exists");
+ rc = -EEXIST;
+ goto err_no_mem;
+ }
+ rc = vidc_hal_core_start_cpu(dev);
+ if (rc) {
+ HAL_MSG_ERROR("Failed to start core");
+ rc = -ENODEV;
+ goto err_no_dev;
+ }
+ pkt.size = sizeof(struct hfi_cmd_sys_init_packet);
+ pkt.packet = HFI_CMD_SYS_INIT;
+ if (vidc_hal_iface_cmdq_write(dev, &pkt)) {
+ rc = -ENOTEMPTY;
+ goto err_write_fail;
+ }
+ return rc;
+err_no_dev:
+err_write_fail:
+err_no_mem:
+ disable_irq_nosync(dev->hal_data->irq);
+ return rc;
+}
+
+int vidc_hal_core_release(void *device)
+{
+ struct hal_device *dev;
+ if (device) {
+ dev = device;
+ } else {
+ HAL_MSG_ERROR("%s:invalid device", __func__);
+ return -ENODEV;
+ }
+ write_register(dev->hal_data->register_base_addr,
+ VIDC_CPU_CS_SCIACMDARG3, 0, 0);
+ disable_irq_nosync(dev->hal_data->irq);
+ vidc_hal_interface_queues_release(dev);
+ HAL_MSG_INFO("\nHAL exited\n");
+ return 0;
+}
+
+int vidc_hal_core_pc_prep(void *device)
+{
+ struct hfi_cmd_sys_pc_prep_packet pkt;
+ int rc = 0;
+ struct hal_device *dev;
+
+ if (device) {
+ dev = device;
+ } else {
+ HAL_MSG_ERROR("%s:invalid device", __func__);
+ return -ENODEV;
+ }
+ pkt.size = sizeof(struct hfi_cmd_sys_pc_prep_packet);
+ pkt.packet_type = HFI_CMD_SYS_PC_PREP;
+ if (vidc_hal_iface_cmdq_write(dev, &pkt))
+ rc = -ENOTEMPTY;
+ return rc;
+}
+
+static void vidc_hal_core_clear_interrupt(struct hal_device *device)
+{
+ u32 intr_status = 0;
+
+ if (!device->callback)
+ return;
+
+ intr_status = read_register(
+ device->hal_data->register_base_addr,
+ VIDC_WRAPPER_INTR_STATUS);
+
+ if ((intr_status & VIDC_WRAPPER_INTR_STATUS_A2H_BMSK) ||
+ (intr_status & VIDC_WRAPPER_INTR_STATUS_A2HWD_BMSK)) {
+ device->intr_status |= intr_status;
+ HAL_MSG_ERROR("INTERRUPT for device: 0x%x: "
+ "times: %d interrupt_status: %d",
+ (u32) device, ++device->reg_count, intr_status);
+ } else {
+ HAL_MSG_ERROR("SPURIOUS_INTR for device: 0x%x: "
+ "times: %d interrupt_status: %d",
+ (u32) device, ++device->spur_count, intr_status);
+ }
+ write_register(device->hal_data->register_base_addr,
+ VIDC_CPU_CS_A2HSOFTINTCLR, 1, 0);
+ write_register(device->hal_data->register_base_addr,
+ VIDC_WRAPPER_INTR_CLEAR, intr_status, 0);
+ HAL_MSG_ERROR("Cleared WRAPPER/A2H interrupt");
+}
+
+int vidc_hal_core_set_resource(void *device,
+ struct vidc_resource_hdr *resource_hdr, void *resource_value)
+{
+ struct hfi_cmd_sys_set_resource_packet *pkt;
+ u8 packet[VIDC_IFACEQ_VAR_SMALL_PKT_SIZE];
+ int rc = 0;
+ struct hal_device *dev;
+
+ if (!device || !resource_hdr || !resource_value) {
+ HAL_MSG_ERROR("Invalid Params in %s", __func__);
+ return -EINVAL;
+ } else {
+ dev = device;
+ }
+
+ pkt = (struct hfi_cmd_sys_set_resource_packet *) packet;
+
+ pkt->size = sizeof(struct hfi_cmd_sys_set_resource_packet);
+ pkt->packet_type = HFI_CMD_SYS_SET_RESOURCE;
+ pkt->resource_handle = resource_hdr->resource_handle;
+
+ switch (resource_hdr->resource_id) {
+ case VIDC_RESOURCE_OCMEM:
+ {
+ struct hfi_resource_ocmem_type *hfioc_mem =
+ (struct hfi_resource_ocmem_type *)
+ &pkt->rg_resource_data[0];
+ struct vidc_mem_addr *vidc_oc_mem =
+ (struct vidc_mem_addr *) resource_value;
+
+ pkt->resource_type = HFI_RESOURCE_OCMEM;
+ hfioc_mem->size = (u32) vidc_oc_mem->mem_size;
+ hfioc_mem->mem = (u8 *) vidc_oc_mem->align_device_addr;
+ pkt->size += sizeof(struct hfi_resource_ocmem_type);
+ if (vidc_hal_iface_cmdq_write(dev, pkt))
+ rc = -ENOTEMPTY;
+ break;
+ }
+ default:
+ HAL_MSG_INFO("In %s called for resource %d",
+ __func__, resource_hdr->resource_id);
+ break;
+ }
+ return rc;
+}
+
+int vidc_hal_core_release_resource(void *device,
+ struct vidc_resource_hdr *resource_hdr)
+{
+ struct hfi_cmd_sys_release_resource_packet pkt;
+ int rc = 0;
+ struct hal_device *dev;
+
+ if (!device || !resource_hdr) {
+ HAL_MSG_ERROR("Invalid Params in %s", __func__);
+ return -EINVAL;
+ } else {
+ dev = device;
+ }
+
+ pkt.size = sizeof(struct hfi_cmd_sys_release_resource_packet);
+ pkt.packet_type = HFI_CMD_SYS_RELEASE_RESOURCE;
+ pkt.resource_type = resource_hdr->resource_id;
+ pkt.resource_handle = resource_hdr->resource_handle;
+
+ if (vidc_hal_iface_cmdq_write(dev, &pkt))
+ rc = -ENOTEMPTY;
+ return rc;
+}
+
+int vidc_hal_core_ping(void *device)
+{
+ struct hfi_cmd_sys_ping_packet pkt;
+ int rc = 0;
+ struct hal_device *dev;
+
+ if (device) {
+ dev = device;
+ } else {
+ HAL_MSG_ERROR("%s:invalid device", __func__);
+ return -ENODEV;
+ }
+ pkt.size = sizeof(struct hfi_cmd_sys_ping_packet);
+ pkt.packet_type = HFI_CMD_SYS_PING;
+
+ if (vidc_hal_iface_cmdq_write(dev, &pkt))
+ rc = -ENOTEMPTY;
+ return rc;
+}
+
+int vidc_hal_session_set_property(void *sess,
+ enum hal_property ptype, void *pdata)
+{
+ u8 packet[VIDC_IFACEQ_VAR_LARGE_PKT_SIZE];
+ struct hfi_cmd_session_set_property_packet *pkt =
+ (struct hfi_cmd_session_set_property_packet *) &packet;
+ struct hal_session *session;
+
+ if (!sess || !pdata) {
+ HAL_MSG_ERROR("Invalid Params in %s", __func__);
+ return -EINVAL;
+ } else {
+ session = sess;
+ }
+
+ HAL_MSG_INFO("IN func: %s, with property id: %d", __func__, ptype);
+ pkt->size = sizeof(struct hfi_cmd_session_set_property_packet)
+ - sizeof(u32);
+ pkt->packet_type = HFI_CMD_SESSION_SET_PROPERTY;
+ pkt->session_id = (u32) session;
+ pkt->num_properties = 1;
+
+ switch (ptype) {
+ case HAL_CONFIG_FRAME_RATE:
+ {
+ struct hfi_frame_rate *hfi_fps;
+ pkt->rg_property_data[0] = HFI_PROPERTY_CONFIG_FRAME_RATE;
+ hfi_fps = (struct hfi_frame_rate *) &pkt->rg_property_data[1];
+ memcpy(hfi_fps, (struct hfi_frame_rate *)
+ pdata, sizeof(struct hfi_frame_rate));
+ pkt->size += sizeof(u32) + sizeof(struct hfi_frame_rate);
+ break;
+ }
+ case HAL_PARAM_UNCOMPRESSED_FORMAT_SELECT:
+ {
+ struct hfi_uncompressed_format_select *hfi_buf_fmt;
+ pkt->rg_property_data[0] =
+ HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT;
+ hfi_buf_fmt =
+ (struct hfi_uncompressed_format_select *)
+ &pkt->rg_property_data[1];
+ memcpy(hfi_buf_fmt, (struct hfi_uncompressed_format_select *)
+ pdata, sizeof(struct hfi_uncompressed_format_select));
+ pkt->size += sizeof(u32) + sizeof(struct
+ hfi_uncompressed_format_select);
+ break;
+ }
+ case HAL_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO:
+ break;
+ case HAL_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO:
+ break;
+ case HAL_PARAM_EXTRA_DATA_HEADER_CONFIG:
+ break;
+ case HAL_PARAM_FRAME_SIZE:
+ {
+ struct hfi_frame_size *hfi_rect;
+ pkt->rg_property_data[0] = HFI_PROPERTY_PARAM_FRAME_SIZE;
+ hfi_rect = (struct hfi_frame_size *) &pkt->rg_property_data[1];
+ memcpy(hfi_rect, (struct hfi_frame_size *) pdata,
+ sizeof(struct hfi_frame_size));
+ pkt->size += sizeof(u32) + sizeof(struct hfi_frame_size);
+ break;
+ }
+ case HAL_CONFIG_REALTIME:
+ {
+ struct hfi_enable *hfi;
+ pkt->rg_property_data[0] = HFI_PROPERTY_CONFIG_REALTIME;
+ hfi = (struct hfi_enable *) &pkt->rg_property_data[1];
+ memcpy(hfi, (struct hfi_enable *) pdata,
+ sizeof(struct hfi_enable));
+ pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+ break;
+ }
+ case HAL_PARAM_BUFFER_COUNT_ACTUAL:
+ {
+ struct hfi_buffer_count_actual *hfi;
+ pkt->rg_property_data[0] =
+ HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL;
+ hfi = (struct hfi_buffer_count_actual *)
+ &pkt->rg_property_data[1];
+ memcpy(hfi, (struct hfi_buffer_count_actual *) pdata,
+ sizeof(struct hfi_buffer_count_actual));
+ pkt->size += sizeof(u32) + sizeof(struct
+ hfi_buffer_count_actual);
+ break;
+ }
+ case HAL_PARAM_NAL_STREAM_FORMAT_SELECT:
+ {
+ pkt->rg_property_data[0] =
+ HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SELECT;
+ pkt->rg_property_data[1] = (enum HFI_NAL_STREAM_FORMAT)pdata;
+ pkt->size += sizeof(u32) + sizeof(enum HFI_NAL_STREAM_FORMAT);
+ break;
+ }
+ case HAL_PARAM_VDEC_OUTPUT_ORDER:
+ {
+ pkt->rg_property_data[0] =
+ HFI_PROPERTY_PARAM_VDEC_OUTPUT_ORDER;
+ pkt->rg_property_data[1] = (enum HFI_OUTPUT_ORDER)pdata;
+ pkt->size += sizeof(u32) + sizeof(enum HFI_OUTPUT_ORDER);
+ break;
+ }
+ case HAL_PARAM_VDEC_PICTURE_TYPE_DECODE:
+ {
+ struct hfi_enable_picture *hfi;
+ pkt->rg_property_data[0] =
+ HFI_PROPERTY_PARAM_VDEC_PICTURE_TYPE_DECODE;
+ hfi = (struct hfi_enable_picture *) &pkt->rg_property_data[1];
+ hfi->picture_type = (u32) pdata;
+ pkt->size += sizeof(u32) + sizeof(struct hfi_enable_picture);
+ break;
+ }
+ case HAL_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO:
+ {
+ struct hfi_enable *hfi;
+ pkt->rg_property_data[0] =
+ HFI_PROPERTY_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO;
+ hfi = (struct hfi_enable *) &pkt->rg_property_data[1];
+ memcpy(hfi, (struct hfi_enable *) pdata,
+ sizeof(struct hfi_enable));
+ pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+ break;
+ }
+ case HAL_CONFIG_VDEC_POST_LOOP_DEBLOCKER:
+ {
+ struct hfi_enable *hfi;
+ pkt->rg_property_data[0] =
+ HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER;
+ hfi = (struct hfi_enable *) &pkt->rg_property_data[1];
+ memcpy(hfi, (struct hfi_enable *) pdata,
+ sizeof(struct hfi_enable));
+ pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+ break;
+ }
+ case HAL_PARAM_VDEC_MULTI_STREAM:
+ {
+ struct hfi_multi_stream *hfi;
+ pkt->rg_property_data[0] =
+ HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM;
+ hfi = (struct hfi_multi_stream *) &pkt->rg_property_data[1];
+ memcpy(hfi, (struct hfi_multi_stream *)pdata,
+ sizeof(struct hfi_multi_stream));
+ pkt->size += sizeof(u32) + sizeof(struct hfi_multi_stream);
+ break;
+ }
+ case HAL_PARAM_VDEC_DISPLAY_PICTURE_BUFFER_COUNT:
+ {
+ struct hfi_display_picture_buffer_count *hfi_disp_buf;
+ pkt->rg_property_data[0] =
+ HFI_PROPERTY_PARAM_VDEC_DISPLAY_PICTURE_BUFFER_COUNT;
+ hfi_disp_buf = (struct hfi_display_picture_buffer_count *)
+ &pkt->rg_property_data[1];
+ memcpy(hfi_disp_buf,
+ (struct hfi_display_picture_buffer_count *)pdata,
+ sizeof(struct hfi_display_picture_buffer_count));
+ pkt->size += sizeof(u32) +
+ sizeof(struct hfi_display_picture_buffer_count);
+ break;
+ }
+ case HAL_PARAM_DIVX_FORMAT:
+ {
+ pkt->rg_property_data[0] = HFI_PROPERTY_PARAM_DIVX_FORMAT;
+ pkt->rg_property_data[1] = (enum HFI_DIVX_FORMAT)pdata;
+ pkt->size += sizeof(u32) + sizeof(enum HFI_DIVX_FORMAT);
+ break;
+ }
+ case HAL_CONFIG_VDEC_MB_ERROR_MAP_REPORTING:
+ {
+ struct hfi_enable *hfi;
+ pkt->rg_property_data[0] =
+ HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP_REPORTING;
+ hfi = (struct hfi_enable *) &pkt->rg_property_data[1];
+ memcpy(hfi, (struct hfi_enable *) pdata,
+ sizeof(struct hfi_enable));
+ pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+ break;
+ }
+ case HAL_PARAM_VDEC_CONTINUE_DATA_TRANSFER:
+ {
+ struct hfi_enable *enable;
+ pkt->rg_property_data[0] =
+ HFI_PROPERTY_PARAM_VDEC_CONTINUE_DATA_TRANSFER;
+ enable = (struct hfi_enable *) &pkt->rg_property_data[1];
+ memcpy(enable, (struct hfi_enable *) pdata,
+ sizeof(struct hfi_enable));
+ pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+ break;
+ }
+ case HAL_CONFIG_VENC_REQUEST_IFRAME:
+ pkt->rg_property_data[0] =
+ HFI_PROPERTY_CONFIG_VENC_REQUEST_IFRAME;
+ break;
+ case HAL_PARAM_VENC_MPEG4_SHORT_HEADER:
+ break;
+ case HAL_PARAM_VENC_MPEG4_AC_PREDICTION:
+ break;
+ case HAL_CONFIG_VENC_TARGET_BITRATE:
+ {
+ struct hfi_bitrate *hfi;
+ pkt->rg_property_data[0] =
+ HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE;
+ hfi = (struct hfi_bitrate *) &pkt->rg_property_data[1];
+ hfi->bit_rate = ((struct hfi_bitrate *)pdata)->bit_rate;
+ pkt->size += sizeof(u32) + sizeof(struct hfi_bitrate);
+ break;
+ }
+ case HAL_PARAM_PROFILE_LEVEL_CURRENT:
+ {
+ struct hfi_profile_level *hfi_profile_level;
+ pkt->rg_property_data[0] =
+ HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT;
+ hfi_profile_level = (struct hfi_profile_level *)
+ &pkt->rg_property_data[1];
+ memcpy(hfi_profile_level, (struct hfi_profile_level *) pdata,
+ sizeof(struct hfi_profile_level));
+ pkt->size += sizeof(u32) + sizeof(struct hfi_profile_level);
+ break;
+ }
+ case HAL_PARAM_VENC_H264_ENTROPY_CONTROL:
+ {
+ struct hfi_h264_entropy_control *hfi;
+ pkt->rg_property_data[0] =
+ HFI_PROPERTY_PARAM_VENC_H264_ENTROPY_CONTROL;
+ hfi = (struct hfi_h264_entropy_control *)
+ &pkt->rg_property_data[1];
+ memcpy(hfi, (struct hfi_h264_entropy_control *) pdata,
+ sizeof(struct hfi_h264_entropy_control));
+ pkt->size += sizeof(u32) + sizeof(
+ struct hfi_h264_entropy_control);
+ break;
+ }
+ case HAL_PARAM_VENC_RATE_CONTROL:
+ {
+ pkt->rg_property_data[0] =
+ HFI_PROPERTY_PARAM_VENC_RATE_CONTROL;
+ pkt->rg_property_data[1] = (enum HFI_RATE_CONTROL)pdata;
+ pkt->size += sizeof(u32) + sizeof(enum HFI_RATE_CONTROL);
+ break;
+ }
+ case HAL_PARAM_VENC_MPEG4_TIME_RESOLUTION:
+ {
+ struct hfi_mpeg4_time_resolution *hfi;
+ pkt->rg_property_data[0] =
+ HFI_PROPERTY_PARAM_VENC_MPEG4_TIME_RESOLUTION;
+ hfi = (struct hfi_mpeg4_time_resolution *)
+ &pkt->rg_property_data[1];
+ hfi->time_increment_resolution =
+ ((struct hal_mpeg4_time_resolution *)pdata)->
+ time_increment_resolution;
+ pkt->size += sizeof(u32) + sizeof(
+ struct hfi_mpeg4_time_resolution);
+ break;
+ }
+ case HAL_PARAM_VENC_MPEG4_HEADER_EXTENSION:
+ {
+ struct hfi_mpeg4_header_extension *hfi;
+ pkt->rg_property_data[0] =
+ HFI_PROPERTY_PARAM_VENC_MPEG4_HEADER_EXTENSION;
+ hfi = (struct hfi_mpeg4_header_extension *)
+ &pkt->rg_property_data[1];
+ hfi->header_extension = (u32) pdata;
+ pkt->size += sizeof(u32) +
+ sizeof(struct hfi_mpeg4_header_extension);
+ break;
+ }
+ case HAL_PARAM_VENC_H264_DEBLOCK_CONTROL:
+ {
+ struct hfi_h264_db_control *hfi;
+ pkt->rg_property_data[0] =
+ HFI_PROPERTY_PARAM_VENC_H264_DEBLOCK_CONTROL;
+ hfi = (struct hfi_h264_db_control *) &pkt->rg_property_data[1];
+ memcpy(hfi, (struct hfi_h264_db_control *) pdata,
+ sizeof(struct hfi_h264_db_control));
+ pkt->size += sizeof(u32) +
+ sizeof(struct hfi_h264_db_control);
+ break;
+ }
+ case HAL_PARAM_VENC_TEMPORAL_SPATIAL_TRADEOFF:
+ {
+ struct hfi_temporal_spatial_tradeoff *hfi;
+ pkt->rg_property_data[0] =
+ HFI_PROPERTY_PARAM_VENC_TEMPORAL_SPATIAL_TRADEOFF;
+ hfi = (struct hfi_temporal_spatial_tradeoff *)
+ &pkt->rg_property_data[1];
+ hfi->ts_factor = ((struct hfi_temporal_spatial_tradeoff *)
+ pdata)->ts_factor;
+ pkt->size += sizeof(u32) +
+ sizeof(struct hfi_temporal_spatial_tradeoff);
+ break;
+ }
+ case HAL_PARAM_VENC_SESSION_QP:
+ {
+ struct hfi_quantization *hfi;
+ pkt->rg_property_data[0] =
+ HFI_PROPERTY_PARAM_VENC_SESSION_QP;
+ hfi = (struct hfi_quantization *) &pkt->rg_property_data[1];
+ memcpy(hfi, (struct hfi_quantization *) pdata,
+ sizeof(struct hfi_quantization));
+ pkt->size += sizeof(u32) + sizeof(struct hfi_quantization);
+ break;
+ }
+ case HAL_CONFIG_VENC_INTRA_PERIOD:
+ {
+ struct hfi_intra_period *hfi;
+ pkt->rg_property_data[0] =
+ HFI_PROPERTY_CONFIG_VENC_INTRA_PERIOD;
+ hfi = (struct hfi_intra_period *) &pkt->rg_property_data[1];
+ memcpy(hfi, (struct hfi_intra_period *) pdata,
+ sizeof(struct hfi_intra_period));
+ pkt->size += sizeof(u32) + sizeof(struct hfi_intra_period);
+ break;
+ }
+ case HAL_CONFIG_VENC_IDR_PERIOD:
+ {
+ struct hfi_idr_period *hfi;
+ pkt->rg_property_data[0] = HFI_PROPERTY_CONFIG_VENC_IDR_PERIOD;
+ hfi = (struct hfi_idr_period *) &pkt->rg_property_data[1];
+ hfi->idr_period = ((struct hfi_idr_period *) pdata)->idr_period;
+ pkt->size += sizeof(u32) + sizeof(struct hfi_idr_period);
+ break;
+ }
+ case HAL_CONFIG_VPE_OPERATIONS:
+ break;
+ case HAL_PARAM_VENC_INTRA_REFRESH:
+ {
+ struct hfi_intra_refresh *hfi;
+ pkt->rg_property_data[0] =
+ HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH;
+ hfi = (struct hfi_intra_refresh *) &pkt->rg_property_data[1];
+ memcpy(hfi, (struct hfi_intra_refresh *) pdata,
+ sizeof(struct hfi_intra_refresh));
+ pkt->size += sizeof(u32) + sizeof(struct hfi_intra_refresh);
+ break;
+ }
+ case HAL_PARAM_VENC_MULTI_SLICE_CONTROL:
+ {
+ struct hfi_multi_slice_control *hfi;
+ pkt->rg_property_data[0] =
+ HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_CONTROL;
+ hfi = (struct hfi_multi_slice_control *)
+ &pkt->rg_property_data[1];
+ memcpy(hfi, (struct hfi_multi_slice_control *) pdata,
+ sizeof(struct hfi_multi_slice_control));
+ pkt->size += sizeof(u32) + sizeof(struct
+ hfi_multi_slice_control);
+ break;
+ }
+ case HAL_CONFIG_VPE_DEINTERLACE:
+ break;
+ case HAL_SYS_DEBUG_CONFIG:
+ {
+ struct hfi_debug_config *hfi;
+ pkt->rg_property_data[0] = HFI_PROPERTY_SYS_DEBUG_CONFIG;
+ hfi = (struct hfi_debug_config *) &pkt->rg_property_data[1];
+ memcpy(hfi, (struct hfi_debug_config *) pdata,
+ sizeof(struct hfi_debug_config));
+ pkt->size = sizeof(struct hfi_cmd_sys_set_property_packet) +
+ sizeof(struct hfi_debug_config);
+ break;
+ }
+ /* FOLLOWING PROPERTIES ARE NOT IMPLEMENTED IN CORE YET */
+ case HAL_CONFIG_BUFFER_REQUIREMENTS:
+ case HAL_CONFIG_PRIORITY:
+ case HAL_CONFIG_BATCH_INFO:
+ case HAL_PARAM_METADATA_PASS_THROUGH:
+ case HAL_SYS_IDLE_INDICATOR:
+ case HAL_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED:
+ case HAL_PARAM_INTERLACE_FORMAT_SUPPORTED:
+ case HAL_PARAM_CHROMA_SITE:
+ case HAL_PARAM_PROPERTIES_SUPPORTED:
+ case HAL_PARAM_PROFILE_LEVEL_SUPPORTED:
+ case HAL_PARAM_CAPABILITY_SUPPORTED:
+ case HAL_PARAM_NAL_STREAM_FORMAT_SUPPORTED:
+ case HAL_PARAM_MULTI_VIEW_FORMAT:
+ case HAL_PARAM_MAX_SEQUENCE_HEADER_SIZE:
+ case HAL_PARAM_CODEC_SUPPORTED:
+ case HAL_PARAM_VDEC_MULTI_VIEW_SELECT:
+ case HAL_PARAM_VDEC_MB_QUANTIZATION:
+ case HAL_PARAM_VDEC_NUM_CONCEALED_MB:
+ case HAL_PARAM_VDEC_H264_ENTROPY_SWITCHING:
+ case HAL_PARAM_VENC_SLICE_DELIVERY_MODE:
+ case HAL_PARAM_VENC_MPEG4_DATA_PARTITIONING:
+
+ case HAL_CONFIG_BUFFER_COUNT_ACTUAL:
+ case HAL_CONFIG_VDEC_MULTI_STREAM:
+ case HAL_PARAM_VENC_MULTI_SLICE_INFO:
+ case HAL_CONFIG_VENC_TIMESTAMP_SCALE:
+ case HAL_PARAM_VENC_LOW_LATENCY:
+ default:
+ HAL_MSG_INFO("DEFAULT: Calling 0x%x", ptype);
+ break;
+ }
+ if (vidc_hal_iface_cmdq_write(session->device, pkt))
+ return -ENOTEMPTY;
+ return 0;
+}
+
+int vidc_hal_session_get_property(void *sess,
+ enum hal_property ptype, void *pdata)
+{
+ struct hal_session *session;
+
+ if (!sess || !pdata) {
+ HAL_MSG_ERROR("Invalid Params in %s", __func__);
+ return -EINVAL;
+ } else {
+ session = sess;
+ }
+ HAL_MSG_INFO("IN func: %s, with property id: %d", __func__, ptype);
+
+ switch (ptype) {
+ case HAL_CONFIG_FRAME_RATE:
+ break;
+ case HAL_PARAM_UNCOMPRESSED_FORMAT_SELECT:
+ break;
+ case HAL_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO:
+ break;
+ case HAL_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO:
+ break;
+ case HAL_PARAM_EXTRA_DATA_HEADER_CONFIG:
+ break;
+ case HAL_PARAM_FRAME_SIZE:
+ break;
+ case HAL_CONFIG_REALTIME:
+ break;
+ case HAL_PARAM_BUFFER_COUNT_ACTUAL:
+ break;
+ case HAL_PARAM_NAL_STREAM_FORMAT_SELECT:
+ break;
+ case HAL_PARAM_VDEC_OUTPUT_ORDER:
+ break;
+ case HAL_PARAM_VDEC_PICTURE_TYPE_DECODE:
+ break;
+ case HAL_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO:
+ break;
+ case HAL_CONFIG_VDEC_POST_LOOP_DEBLOCKER:
+ break;
+ case HAL_PARAM_VDEC_MULTI_STREAM:
+ break;
+ case HAL_PARAM_VDEC_DISPLAY_PICTURE_BUFFER_COUNT:
+ break;
+ case HAL_PARAM_DIVX_FORMAT:
+ break;
+ case HAL_CONFIG_VDEC_MB_ERROR_MAP_REPORTING:
+ break;
+ case HAL_PARAM_VDEC_CONTINUE_DATA_TRANSFER:
+ break;
+ case HAL_CONFIG_VDEC_MB_ERROR_MAP:
+ break;
+ case HAL_CONFIG_VENC_REQUEST_IFRAME:
+ break;
+ case HAL_PARAM_VENC_MPEG4_SHORT_HEADER:
+ break;
+ case HAL_PARAM_VENC_MPEG4_AC_PREDICTION:
+ break;
+ case HAL_CONFIG_VENC_TARGET_BITRATE:
+ break;
+ case HAL_PARAM_PROFILE_LEVEL_CURRENT:
+ break;
+ case HAL_PARAM_VENC_H264_ENTROPY_CONTROL:
+ break;
+ case HAL_PARAM_VENC_RATE_CONTROL:
+ break;
+ case HAL_PARAM_VENC_MPEG4_TIME_RESOLUTION:
+ break;
+ case HAL_PARAM_VENC_MPEG4_HEADER_EXTENSION:
+ break;
+ case HAL_PARAM_VENC_H264_DEBLOCK_CONTROL:
+ break;
+ case HAL_PARAM_VENC_TEMPORAL_SPATIAL_TRADEOFF:
+ break;
+ case HAL_PARAM_VENC_SESSION_QP:
+ break;
+ case HAL_CONFIG_VENC_INTRA_PERIOD:
+ break;
+ case HAL_CONFIG_VENC_IDR_PERIOD:
+ break;
+ case HAL_CONFIG_VPE_OPERATIONS:
+ break;
+ case HAL_PARAM_VENC_INTRA_REFRESH:
+ break;
+ case HAL_PARAM_VENC_MULTI_SLICE_CONTROL:
+ break;
+ case HAL_CONFIG_VPE_DEINTERLACE:
+ break;
+ case HAL_SYS_DEBUG_CONFIG:
+ break;
+ /*FOLLOWING PROPERTIES ARE NOT IMPLEMENTED IN CORE YET*/
+ case HAL_CONFIG_BUFFER_REQUIREMENTS:
+ case HAL_CONFIG_PRIORITY:
+ case HAL_CONFIG_BATCH_INFO:
+ case HAL_PARAM_METADATA_PASS_THROUGH:
+ case HAL_SYS_IDLE_INDICATOR:
+ case HAL_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED:
+ case HAL_PARAM_INTERLACE_FORMAT_SUPPORTED:
+ case HAL_PARAM_CHROMA_SITE:
+ case HAL_PARAM_PROPERTIES_SUPPORTED:
+ case HAL_PARAM_PROFILE_LEVEL_SUPPORTED:
+ case HAL_PARAM_CAPABILITY_SUPPORTED:
+ case HAL_PARAM_NAL_STREAM_FORMAT_SUPPORTED:
+ case HAL_PARAM_MULTI_VIEW_FORMAT:
+ case HAL_PARAM_MAX_SEQUENCE_HEADER_SIZE:
+ case HAL_PARAM_CODEC_SUPPORTED:
+ case HAL_PARAM_VDEC_MULTI_VIEW_SELECT:
+ case HAL_PARAM_VDEC_MB_QUANTIZATION:
+ case HAL_PARAM_VDEC_NUM_CONCEALED_MB:
+ case HAL_PARAM_VDEC_H264_ENTROPY_SWITCHING:
+ case HAL_PARAM_VENC_SLICE_DELIVERY_MODE:
+ case HAL_PARAM_VENC_MPEG4_DATA_PARTITIONING:
+
+ case HAL_CONFIG_BUFFER_COUNT_ACTUAL:
+ case HAL_CONFIG_VDEC_MULTI_STREAM:
+ case HAL_PARAM_VENC_MULTI_SLICE_INFO:
+ case HAL_CONFIG_VENC_TIMESTAMP_SCALE:
+ case HAL_PARAM_VENC_LOW_LATENCY:
+ default:
+ HAL_MSG_INFO("DEFAULT: Calling 0x%x", ptype);
+ break;
+ }
+ return 0;
+}
+
+void *vidc_hal_session_init(void *device, u32 session_id,
+ enum hal_domain session_type, enum hal_video_codec codec_type)
+{
+ struct hfi_cmd_sys_session_init_packet pkt;
+ struct hal_session *new_session;
+ struct hal_device *dev;
+
+ if (device) {
+ dev = device;
+ } else {
+ HAL_MSG_ERROR("%s:invalid device", __func__);
+ return NULL;
+ }
+
+ new_session = (struct hal_session *)
+ kzalloc(sizeof(struct hal_session), GFP_KERNEL);
+ new_session->session_id = (u32) session_id;
+ if (session_type == 1)
+ new_session->is_decoder = 0;
+ else if (session_type == 2)
+ new_session->is_decoder = 1;
+ new_session->device = dev;
+ list_add_tail(&new_session->list, &dev->sess_head);
+ pkt.size = sizeof(struct hfi_cmd_sys_session_init_packet);
+ pkt.packet = HFI_CMD_SYS_SESSION_INIT;
+ pkt.session_id = (u32) new_session;
+ pkt.session_domain = session_type;
+ pkt.session_codec = codec_type;
+ if (vidc_hal_iface_cmdq_write(dev, &pkt))
+ return NULL;
+ return (void *) new_session;
+}
+
+static int vidc_hal_send_session_cmd(void *session_id,
+ enum HFI_COMMAND pkt_type)
+{
+ struct vidc_hal_session_cmd_pkt pkt;
+ int rc = 0;
+ struct hal_session *session;
+
+ if (session_id) {
+ session = session_id;
+ } else {
+ HAL_MSG_ERROR("%s:invalid session", __func__);
+ return -ENODEV;
+ }
+
+ pkt.size = sizeof(struct vidc_hal_session_cmd_pkt);
+ pkt.packet_type = pkt_type;
+ pkt.session_id = (u32) session;
+
+ if (vidc_hal_iface_cmdq_write(session->device, &pkt))
+ rc = -ENOTEMPTY;
+ return rc;
+}
+
+int vidc_hal_session_end(void *session)
+{
+ return vidc_hal_send_session_cmd(session,
+ HFI_CMD_SYS_SESSION_END);
+}
+
+int vidc_hal_session_abort(void *session)
+{
+ return vidc_hal_send_session_cmd(session,
+ HFI_CMD_SYS_SESSION_ABORT);
+}
+
+int vidc_hal_session_set_buffers(void *sess,
+ struct vidc_buffer_addr_info *buffer_info)
+{
+ struct hfi_cmd_session_set_buffers_packet *pkt;
+ u8 packet[VIDC_IFACEQ_VAR_LARGE_PKT_SIZE];
+ int rc = 0;
+ u16 i;
+ struct hal_session *session;
+
+ if (!sess || !buffer_info) {
+ HAL_MSG_ERROR("Invalid Params in %s", __func__);
+ return -EINVAL;
+ } else {
+ session = sess;
+ }
+
+ if (buffer_info->buffer_type == HAL_BUFFER_INPUT)
+ return 0;
+
+ pkt = (struct hfi_cmd_session_set_buffers_packet *)packet;
+
+ pkt->size = sizeof(struct hfi_cmd_session_set_buffers_packet) +
+ ((buffer_info->num_buffers - 1) * sizeof(u32));
+ pkt->packet_type = HFI_CMD_SESSION_SET_BUFFERS;
+ pkt->session_id = (u32) session;
+ pkt->buffer_mode = HFI_BUFFER_MODE_STATIC;
+ pkt->buffer_size = buffer_info->buffer_size;
+ pkt->min_buffer_size = buffer_info->buffer_size;
+ pkt->num_buffers = buffer_info->num_buffers;
+
+ if ((buffer_info->buffer_type == HAL_BUFFER_OUTPUT) ||
+ (buffer_info->buffer_type == HAL_BUFFER_OUTPUT2)) {
+ struct hfi_buffer_info *buff;
+ pkt->extradata_size = buffer_info->extradata_size;
+ pkt->size = sizeof(struct hfi_cmd_session_set_buffers_packet) -
+ sizeof(u32) + ((buffer_info->num_buffers) *
+ sizeof(struct hfi_buffer_info));
+ buff = (struct hfi_buffer_info *) pkt->rg_buffer_info;
+ for (i = 0; i < pkt->num_buffers; i++) {
+ buff->buffer_addr =
+ buffer_info->align_device_addr;
+ buff->extradata_addr =
+ buffer_info->extradata_addr;
+ }
+ } else {
+ pkt->extradata_size = 0;
+ pkt->size = sizeof(struct hfi_cmd_session_set_buffers_packet) +
+ ((buffer_info->num_buffers - 1) * sizeof(u32));
+ for (i = 0; i < pkt->num_buffers; i++)
+ pkt->rg_buffer_info[i] =
+ buffer_info->align_device_addr;
+ }
+
+ if (buffer_info->buffer_type == HAL_BUFFER_INTERNAL_SCRATCH)
+ pkt->buffer_type = HFI_BUFFER_INTERNAL_SCRATCH;
+ else if (buffer_info->buffer_type == HAL_BUFFER_INTERNAL_PERSIST)
+ pkt->buffer_type = HFI_BUFFER_INTERNAL_PERSIST;
+ else
+ pkt->buffer_type = (enum HFI_BUFFER) buffer_info->buffer_type;
+
+ if (vidc_hal_iface_cmdq_write(session->device, pkt))
+ rc = -ENOTEMPTY;
+ return rc;
+}
+
+int vidc_hal_session_release_buffers(void *sess,
+ struct vidc_buffer_addr_info *buffer_info)
+{
+ struct hfi_cmd_session_release_buffer_packet *pkt;
+ u8 packet[VIDC_IFACEQ_VAR_LARGE_PKT_SIZE];
+ int rc = 0;
+ u32 i;
+ struct hal_session *session;
+
+ if (!sess || !buffer_info) {
+ HAL_MSG_ERROR("Invalid Params in %s", __func__);
+ return -EINVAL;
+ } else {
+ session = sess;
+ }
+
+ if (buffer_info->buffer_type == HAL_BUFFER_INPUT)
+ return 0;
+
+ pkt = (struct hfi_cmd_session_release_buffer_packet *) packet;
+ pkt->size = sizeof(struct hfi_cmd_session_release_buffer_packet) +
+ ((buffer_info->num_buffers - 1) * sizeof(u32));
+ pkt->packet_type = HFI_CMD_SESSION_RELEASE_BUFFERS;
+ pkt->session_id = (u32) session;
+ pkt->buffer_type = (enum HFI_BUFFER) buffer_info->buffer_type;
+ pkt->buffer_size = buffer_info->buffer_size;
+ pkt->num_buffers = buffer_info->num_buffers;
+
+ if ((buffer_info->buffer_type == HAL_BUFFER_OUTPUT) ||
+ (buffer_info->buffer_type == HAL_BUFFER_OUTPUT2)) {
+ struct hfi_buffer_info *buff;
+ buff = (struct hfi_buffer_info *) pkt->rg_buffer_info;
+ for (i = 0; i < pkt->num_buffers; i++) {
+ buff->buffer_addr =
+ buffer_info->align_device_addr;
+ buff->extradata_addr =
+ buffer_info->extradata_addr;
+ }
+ pkt->extradata_size = buffer_info->extradata_size;
+ pkt->size = sizeof(struct hfi_cmd_session_set_buffers_packet) -
+ sizeof(u32) + ((buffer_info->num_buffers) *
+ sizeof(struct hfi_buffer_info));
+ } else {
+ for (i = 0; i < pkt->num_buffers; i++)
+ pkt->rg_buffer_info[i] =
+ buffer_info->align_device_addr;
+ pkt->extradata_size = 0;
+ pkt->size = sizeof(struct hfi_cmd_session_set_buffers_packet) +
+ ((buffer_info->num_buffers - 1) * sizeof(u32));
+ }
+
+ if (vidc_hal_iface_cmdq_write(session->device, pkt))
+ rc = -ENOTEMPTY;
+ return rc;
+}
+
+int vidc_hal_session_load_res(void *sess)
+{
+ return vidc_hal_send_session_cmd(sess,
+ HFI_CMD_SESSION_LOAD_RESOURCES);
+}
+
+int vidc_hal_session_release_res(void *sess)
+{
+ return vidc_hal_send_session_cmd(sess,
+ HFI_CMD_SESSION_RELEASE_RESOURCES);
+}
+
+int vidc_hal_session_start(void *sess)
+{
+ return vidc_hal_send_session_cmd(sess,
+ HFI_CMD_SESSION_START);
+}
+
+int vidc_hal_session_stop(void *sess)
+{
+ return vidc_hal_send_session_cmd(sess,
+ HFI_CMD_SESSION_STOP);
+}
+
+int vidc_hal_session_suspend(void *sess)
+{
+ return vidc_hal_send_session_cmd(sess,
+ HFI_CMD_SESSION_SUSPEND);
+}
+
+int vidc_hal_session_resume(void *sess)
+{
+ return vidc_hal_send_session_cmd(sess,
+ HFI_CMD_SESSION_RESUME);
+}
+
+int vidc_hal_session_etb(void *sess, struct vidc_frame_data *input_frame)
+{
+ int rc = 0;
+ struct hal_session *session;
+
+ if (!sess || !input_frame) {
+ HAL_MSG_ERROR("Invalid Params in %s", __func__);
+ return -EINVAL;
+ } else {
+ session = sess;
+ }
+
+ if (session->is_decoder) {
+ struct hfi_cmd_session_empty_buffer_compressed_packet pkt;
+ pkt.size = sizeof(
+ struct hfi_cmd_session_empty_buffer_compressed_packet);
+ pkt.packet_type = HFI_CMD_SESSION_EMPTY_BUFFER;
+ pkt.session_id = (u32) session;
+ pkt.timestamp_hi = (int) (((u64)input_frame->timestamp) >> 32);
+ pkt.timestamp_lo = (int) input_frame->timestamp;
+ pkt.flags = input_frame->flags;
+ pkt.mark_target = input_frame->mark_target;
+ pkt.mark_data = input_frame->mark_data;
+ pkt.offset = input_frame->offset;
+ pkt.alloc_len = input_frame->alloc_len;
+ pkt.filled_len = input_frame->filled_len;
+ pkt.input_tag = input_frame->clnt_data;
+ pkt.packet_buffer = (u8 *) input_frame->device_addr;
+ HAL_MSG_ERROR("### Q DECODER INPUT BUFFER ###");
+ if (vidc_hal_iface_cmdq_write(session->device, &pkt))
+ rc = -ENOTEMPTY;
+ } else {
+ struct hfi_cmd_session_empty_buffer_uncompressed_plane0_packet
+ pkt;
+ pkt.size = sizeof(struct
+ hfi_cmd_session_empty_buffer_uncompressed_plane0_packet);
+ pkt.packet = HFI_CMD_SESSION_EMPTY_BUFFER;
+ pkt.session_id = (u32) session;
+ pkt.view_id = 0;
+ pkt.timestamp_hi = (u32) (((u64)input_frame->timestamp) >> 32);
+ pkt.timestamp_lo = (u32) input_frame->timestamp;
+ pkt.flags = input_frame->flags;
+ pkt.mark_target = input_frame->mark_target;
+ pkt.mark_data = input_frame->mark_data;
+ pkt.offset = input_frame->offset;
+ pkt.alloc_len = input_frame->alloc_len;
+ pkt.filled_len = input_frame->filled_len;
+ pkt.input_tag = input_frame->clnt_data;
+ pkt.packet_buffer = (u8 *) input_frame->device_addr;
+ HAL_MSG_ERROR("### Q ENCODER INPUT BUFFER ###");
+ if (vidc_hal_iface_cmdq_write(session->device, &pkt))
+ rc = -ENOTEMPTY;
+ }
+ return rc;
+}
+
+int vidc_hal_session_ftb(void *sess,
+ struct vidc_frame_data *output_frame)
+{
+ struct hfi_cmd_session_fill_buffer_packet pkt;
+ int rc = 0;
+ struct hal_session *session;
+
+ if (!sess || !output_frame) {
+ HAL_MSG_ERROR("Invalid Params in %s", __func__);
+ return -EINVAL;
+ } else {
+ session = sess;
+ }
+
+ pkt.size = sizeof(struct hfi_cmd_session_fill_buffer_packet);
+ pkt.packet_type = HFI_CMD_SESSION_FILL_BUFFER;
+ pkt.session_id = (u32) session;
+ if (output_frame->buffer_type == HAL_BUFFER_OUTPUT)
+ pkt.stream_id = 0;
+ else if (output_frame->buffer_type == HAL_BUFFER_OUTPUT2)
+ pkt.stream_id = 1;
+ pkt.packet_buffer = (u8 *) output_frame->device_addr;
+ pkt.extra_data_buffer =
+ (u8 *) output_frame->extradata_addr;
+
+ HAL_MSG_INFO("### Q OUTPUT BUFFER ###");
+ if (vidc_hal_iface_cmdq_write(session->device, &pkt))
+ rc = -ENOTEMPTY;
+ return rc;
+}
+
+int vidc_hal_session_parse_seq_hdr(void *sess,
+ struct vidc_seq_hdr *seq_hdr)
+{
+ struct hfi_cmd_session_parse_sequence_header_packet *pkt;
+ int rc = 0;
+ u8 packet[VIDC_IFACEQ_VAR_SMALL_PKT_SIZE];
+ struct hal_session *session;
+
+ if (!sess || !seq_hdr) {
+ HAL_MSG_ERROR("Invalid Params in %s", __func__);
+ return -EINVAL;
+ } else {
+ session = sess;
+ }
+
+ pkt = (struct hfi_cmd_session_parse_sequence_header_packet *) packet;
+ pkt->size = sizeof(struct hfi_cmd_session_parse_sequence_header_packet);
+ pkt->packet_type = HFI_CMD_SESSION_PARSE_SEQUENCE_HEADER;
+ pkt->session_id = (u32) session;
+ pkt->header_len = seq_hdr->seq_hdr_len;
+ pkt->packet_buffer = seq_hdr->seq_hdr;
+
+ if (vidc_hal_iface_cmdq_write(session->device, pkt))
+ rc = -ENOTEMPTY;
+ return rc;
+}
+
+int vidc_hal_session_get_seq_hdr(void *sess,
+ struct vidc_seq_hdr *seq_hdr)
+{
+ struct hfi_cmd_session_get_sequence_header_packet *pkt;
+ int rc = 0;
+ u8 packet[VIDC_IFACEQ_VAR_SMALL_PKT_SIZE];
+ struct hal_session *session;
+
+ if (!sess || !seq_hdr) {
+ HAL_MSG_ERROR("Invalid Params in %s", __func__);
+ return -EINVAL;
+ } else {
+ session = sess;
+ }
+
+ pkt = (struct hfi_cmd_session_get_sequence_header_packet *) packet;
+ pkt->size = sizeof(struct hfi_cmd_session_get_sequence_header_packet);
+ pkt->packet_type = HFI_CMD_SESSION_GET_SEQUENCE_HEADER;
+ pkt->session_id = (u32) session;
+ pkt->buffer_len = seq_hdr->seq_hdr_len;
+ pkt->packet_buffer = seq_hdr->seq_hdr;
+
+ if (vidc_hal_iface_cmdq_write(session->device, pkt))
+ rc = -ENOTEMPTY;
+ return rc;
+}
+
+int vidc_hal_session_get_buf_req(void *sess)
+{
+ struct hfi_cmd_session_get_property_packet pkt;
+ int rc = 0;
+ struct hal_session *session;
+
+ if (sess) {
+ session = sess;
+ } else {
+ HAL_MSG_ERROR("%s:invalid session", __func__);
+ return -ENODEV;
+ }
+
+ pkt.size = sizeof(struct hfi_cmd_session_get_property_packet);
+ pkt.packet_type = HFI_CMD_SESSION_GET_PROPERTY;
+ pkt.session_id = (u32) session;
+ pkt.num_properties = 1;
+ pkt.rg_property_data[0] = HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS;
+ if (vidc_hal_iface_cmdq_write(session->device, &pkt))
+ rc = -ENOTEMPTY;
+ return rc;
+}
+
+int vidc_hal_session_flush(void *sess, enum hal_flush flush_mode)
+{
+ struct hfi_cmd_session_flush_packet pkt;
+ int rc = 0;
+ struct hal_session *session;
+
+ if (sess) {
+ session = sess;
+ } else {
+ HAL_MSG_ERROR("%s:invalid session", __func__);
+ return -ENODEV;
+ }
+
+ pkt.size = sizeof(struct hfi_cmd_session_flush_packet);
+ pkt.packet_type = HFI_CMD_SESSION_FLUSH;
+ pkt.session_id = (u32) session;
+ pkt.flush_type = flush_mode;
+
+ if (vidc_hal_iface_cmdq_write(session->device, &pkt))
+ rc = -ENOTEMPTY;
+ return rc;
+}
+
+static int vidc_hal_check_core_registered(
+ struct hal_device_data core, u32 fw_addr,
+ u32 reg_addr, u32 reg_size, u32 irq)
+{
+ struct hal_device *device;
+ struct list_head *curr, *next;
+
+ if (core.dev_count) {
+ list_for_each_safe(curr, next, &core.dev_head) {
+ device = list_entry(curr, struct hal_device, list);
+ if (device && device->hal_data->irq == irq &&
+ (CONTAINS((u32)device->hal_data->
+ device_base_addr,
+ FIRMWARE_SIZE, fw_addr) ||
+ CONTAINS(fw_addr, FIRMWARE_SIZE,
+ (u32)device->hal_data->
+ device_base_addr) ||
+ CONTAINS((u32)device->hal_data->
+ register_base_addr,
+ reg_size, reg_addr) ||
+ CONTAINS(reg_addr, reg_size,
+ (u32)device->hal_data->
+ register_base_addr) ||
+ OVERLAPS((u32)device->hal_data->
+ register_base_addr,
+ reg_size, reg_addr, reg_size) ||
+ OVERLAPS(reg_addr, reg_size,
+ (u32)device->hal_data->
+ register_base_addr, reg_size) ||
+ OVERLAPS((u32)device->hal_data->
+ device_base_addr,
+ FIRMWARE_SIZE, fw_addr,
+ FIRMWARE_SIZE) ||
+ OVERLAPS(fw_addr, FIRMWARE_SIZE,
+ (u32)device->hal_data->
+ device_base_addr,
+ FIRMWARE_SIZE))) {
+ return 0;
+ } else {
+ HAL_MSG_INFO("Device not registered");
+ return -EINVAL;
+ }
+ }
+ } else {
+ HAL_MSG_INFO("no device Registered");
+ }
+ return -EINVAL;
+}
+
+static void vidc_hal_core_work_handler(struct work_struct *work)
+{
+ struct hal_device *device = list_first_entry(
+ &hal_ctxt.dev_head, struct hal_device, list);
+
+ HAL_MSG_INFO(" GOT INTERRUPT %s() ", __func__);
+ if (!device->callback) {
+ HAL_MSG_ERROR("No callback function "
+ "to process interrupt: %p\n", device);
+ return;
+ }
+ vidc_hal_core_clear_interrupt(device);
+ vidc_hal_response_handler(device);
+ enable_irq(device->hal_data->irq);
+}
+static DECLARE_WORK(vidc_hal_work, vidc_hal_core_work_handler);
+
+static irqreturn_t vidc_hal_isr(int irq, void *dev)
+{
+ struct hal_device *device = dev;
+ HAL_MSG_MEDIUM("\n vidc_hal_isr() %d ", irq);
+ disable_irq_nosync(irq);
+ queue_work(device->vidc_workq, &vidc_hal_work);
+ HAL_MSG_MEDIUM("\n vidc_hal_isr() %d ", irq);
+ return IRQ_HANDLED;
+}
+
+void *vidc_hal_add_device(u32 device_id, u32 fw_base_addr, u32 reg_base,
+ u32 reg_size, u32 irq,
+ void (*callback) (enum command_response cmd, void *data))
+{
+ struct hal_device *hdevice = NULL;
+ struct hal_data *hal = NULL;
+ int rc = 0;
+
+ if (device_id || !fw_base_addr || !reg_base || !reg_size ||
+ !irq || !callback) {
+ HAL_MSG_ERROR("Invalid Paramters");
+ return NULL;
+ } else {
+ HAL_MSG_INFO("entered %s, device_id: %d", __func__, device_id);
+ }
+
+ if (vidc_hal_check_core_registered(hal_ctxt, fw_base_addr,
+ reg_base, reg_size, irq)) {
+ HAL_MSG_LOW("HAL_DATA will be assigned now");
+ hal = (struct hal_data *)
+ kzalloc(sizeof(struct hal_data), GFP_KERNEL);
+ if (!hal) {
+ HAL_MSG_ERROR("Failed to alloc");
+ return NULL;
+ }
+ hal->irq = irq;
+ hal->device_base_addr =
+ ioremap_nocache(fw_base_addr, FIRMWARE_SIZE);
+ if (!hal->device_base_addr) {
+ HAL_MSG_ERROR("could not map fw addr %d of size %d",
+ fw_base_addr, FIRMWARE_SIZE);
+ goto err_map;
+ }
+ hal->register_base_addr =
+ ioremap_nocache(reg_base, reg_size);
+ if (!hal->register_base_addr) {
+ HAL_MSG_ERROR("could not map reg addr %d of size %d",
+ reg_base, reg_size);
+ goto err_map;
+ }
+ INIT_LIST_HEAD(&hal_ctxt.dev_head);
+ } else {
+ HAL_MSG_ERROR("Core present/Already added");
+ return NULL;
+ }
+
+ hdevice = (struct hal_device *)
+ kzalloc(sizeof(struct hal_device), GFP_KERNEL);
+ if (!hdevice) {
+ HAL_MSG_ERROR("failed to allocate new device");
+ goto err_map;
+ }
+
+ INIT_LIST_HEAD(&hdevice->list);
+ list_add_tail(&hdevice->list, &hal_ctxt.dev_head);
+ hal_ctxt.dev_count++;
+ hdevice->device_id = device_id;
+ hdevice->hal_data = hal;
+ hdevice->callback = callback;
+
+ hdevice->vidc_workq = create_singlethread_workqueue(
+ "msm_vidc_workerq");
+ if (!hdevice->vidc_workq) {
+ HAL_MSG_ERROR("%s: create workq failed\n", __func__);
+ goto error_createq;
+ }
+
+ rc = request_irq(irq, vidc_hal_isr, IRQF_TRIGGER_HIGH,
+ "msm_vidc", hdevice);
+ if (unlikely(rc)) {
+ HAL_MSG_ERROR("%s() :request_irq failed\n", __func__);
+ goto error_irq_fail;
+ }
+ disable_irq_nosync(irq);
+ return (void *) hdevice;
+error_irq_fail:
+ destroy_workqueue(hdevice->vidc_workq);
+error_createq:
+ hal_ctxt.dev_count--;
+ list_del(&hal_ctxt.dev_head);
+err_map:
+ kfree(hal);
+ return NULL;
+}
+
+void vidc_hal_delete_device(void *device)
+{
+ struct hal_device *close, *dev;
+
+ if (device) {
+ dev = (struct hal_device *) device;
+ list_for_each_entry(close, &hal_ctxt.dev_head, list) {
+ if (close->hal_data->irq == dev->hal_data->irq) {
+ hal_ctxt.dev_count--;
+ free_irq(dev->hal_data->irq, NULL);
+ list_del(&close->list);
+ destroy_workqueue(close->vidc_workq);
+ kfree(close->hal_data);
+ kfree(close);
+ break;
+ }
+ }
+
+ }
+}
diff --git a/drivers/media/video/msm_vidc/vidc_hal.h b/drivers/media/video/msm_vidc/vidc_hal.h
new file mode 100644
index 0000000..166ed0d
--- /dev/null
+++ b/drivers/media/video/msm_vidc/vidc_hal.h
@@ -0,0 +1,1618 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __VIDC_HAL_H__
+#define __VIDC_HAL_H__
+
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include "vidc_hal_api.h"
+#include "msm_smem.h"
+
+#ifdef HAL_MSG_LOG
+#define HAL_MSG_LOW(x...) pr_debug(KERN_INFO x)
+#define HAL_MSG_MEDIUM(x...) pr_debug(KERN_INFO x)
+#define HAL_MSG_HIGH(x...) pr_debug(KERN_INFO x)
+#else
+#define HAL_MSG_LOW(x...)
+#define HAL_MSG_MEDIUM(x...)
+#define HAL_MSG_HIGH(x...)
+#endif
+
+#define HAL_MSG_ERROR(x...) pr_err(KERN_INFO x)
+#define HAL_MSG_FATAL(x...) pr_err(KERN_INFO x)
+#define HAL_MSG_INFO(x...) pr_info(KERN_INFO x)
+
+#define HFI_MASK_QHDR_TX_TYPE 0xFF000000
+#define HFI_MASK_QHDR_RX_TYPE 0x00FF0000
+#define HFI_MASK_QHDR_PRI_TYPE 0x0000FF00
+#define HFI_MASK_QHDR_Q_ID_TYPE 0x000000FF
+#define HFI_Q_ID_HOST_TO_CTRL_CMD_Q 0x00
+#define HFI_Q_ID_CTRL_TO_HOST_MSG_Q 0x01
+#define HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q 0x02
+#define HFI_MASK_QHDR_STATUS 0x000000FF
+
+#define VIDC_MAX_UNCOMPRESSED_FMT_PLANES 3
+
+#define VIDC_IFACEQ_NUMQ 3
+#define VIDC_IFACEQ_CMDQ_IDX 0
+#define VIDC_IFACEQ_MSGQ_IDX 1
+#define VIDC_IFACEQ_DBGQ_IDX 2
+
+#define VIDC_IFACEQ_MAX_PKT_SIZE 1024
+#define VIDC_IFACEQ_MED_PKT_SIZE 768
+#define VIDC_IFACEQ_MIN_PKT_SIZE 8
+#define VIDC_IFACEQ_VAR_SMALL_PKT_SIZE 100
+#define VIDC_IFACEQ_VAR_LARGE_PKT_SIZE 512
+#define VIDC_IFACEQ_MAX_BUF_COUNT 50
+#define VIDC_IFACE_MAX_PARALLEL_CLNTS 16
+#define VIDC_IFACEQ_DFLT_QHDR 0x01010000
+
+struct hfi_queue_table_header {
+ u32 qtbl_version;
+ u32 qtbl_size;
+ u32 qtbl_qhdr0_offset;
+ u32 qtbl_qhdr_size;
+ u32 qtbl_num_q;
+ u32 qtbl_num_active_q;
+};
+
+struct hfi_queue_header {
+ u32 qhdr_status;
+ u32 qhdr_start_addr;
+ u32 qhdr_type;
+ u32 qhdr_q_size;
+ u32 qhdr_pkt_size;
+ u32 qhdr_pkt_drop_cnt;
+ u32 qhdr_rx_wm;
+ u32 qhdr_tx_wm;
+ u32 qhdr_rx_req;
+ u32 qhdr_tx_req;
+ u32 qhdr_rx_irq_status;
+ u32 qhdr_tx_irq_status;
+ u32 qhdr_read_idx;
+ u32 qhdr_write_idx;
+};
+
+#define VIDC_IFACEQ_TABLE_SIZE (sizeof(struct hfi_queue_table_header) \
+ + sizeof(struct hfi_queue_header) * VIDC_IFACEQ_NUMQ)
+
+#define VIDC_IFACEQ_QUEUE_SIZE (VIDC_IFACEQ_MAX_PKT_SIZE * \
+ VIDC_IFACEQ_MAX_BUF_COUNT * VIDC_IFACE_MAX_PARALLEL_CLNTS)
+
+#define VIDC_IFACEQ_GET_QHDR_START_ADDR(ptr, i) \
+ (void *)((((u32)ptr) + sizeof(struct hfi_queue_table_header)) + \
+ (i * sizeof(struct hfi_queue_header)))
+
+enum vidc_hw_reg {
+ VIDC_HWREG_CTRL_STATUS = 0x1,
+ VIDC_HWREG_QTBL_INFO = 0x2,
+ VIDC_HWREG_QTBL_ADDR = 0x3,
+ VIDC_HWREG_CTRLR_RESET = 0x4,
+ VIDC_HWREG_IFACEQ_FWRXREQ = 0x5,
+ VIDC_HWREG_IFACEQ_FWTXREQ = 0x6,
+ VIDC_HWREG_VHI_SOFTINTEN = 0x7,
+ VIDC_HWREG_VHI_SOFTINTSTATUS = 0x8,
+ VIDC_HWREG_VHI_SOFTINTCLR = 0x9,
+ VIDC_HWREG_HVI_SOFTINTEN = 0xA,
+};
+
+enum HFI_EVENT {
+ HFI_EVENT_SYS_ERROR,
+ HFI_EVENT_SESSION_ERROR,
+ HFI_EVENT_SESSION_SEQUENCE_CHANGED,
+ HFI_EVENT_SESSION_PROPERTY_CHANGED,
+ HFI_UNUSED_EVENT = 0x10000000,
+};
+
+enum HFI_EVENT_DATA_SEQUENCE_CHANGED {
+ HFI_EVENT_DATA_SEQUENCE_CHANGED_SUFFICIENT_BUFFER_RESOURCES,
+ HFI_EVENT_DATA_SEQUENCE_CHANGED_INSUFFICIENT_BUFFER_RESOURCES,
+ HFI_UNUSED_SEQCHG = 0x10000000,
+};
+
+#define HFI_BUFFERFLAG_EOS 0x00000001
+#define HFI_BUFFERFLAG_STARTTIME 0x00000002
+#define HFI_BUFFERFLAG_DECODEONLY 0x00000004
+#define HFI_BUFFERFLAG_DATACORRUPT 0x00000008
+#define HFI_BUFFERFLAG_ENDOFFRAME 0x00000010
+#define HFI_BUFFERFLAG_SYNCFRAME 0x00000020
+#define HFI_BUFFERFLAG_EXTRADATA 0x00000040
+#define HFI_BUFFERFLAG_CODECCONFIG 0x00000080
+#define HFI_BUFFERFLAG_TIMESTAMPINVALID 0x00000100
+#define HFI_BUFFERFLAG_READONLY 0x00000200
+#define HFI_BUFFERFLAG_ENDOFSUBFRAME 0x00000400
+
+enum HFI_ERROR {
+ HFI_ERR_NONE = 0,
+ HFI_ERR_SYS_UNKNOWN = 0x80000001,
+ HFI_ERR_SYS_FATAL = 0x80000002,
+ HFI_ERR_SYS_INVALID_PARAMETER = 0x80000003,
+ HFI_ERR_SYS_VERSION_MISMATCH = 0x80000004,
+ HFI_ERR_SYS_INSUFFICIENT_RESOURCES = 0x80000005,
+ HFI_ERR_SYS_MAX_SESSIONS_REACHED = 0x80000006,
+ HFI_ERR_SYS_UNSUPPORTED_CODEC = 0x80000007,
+ HFI_ERR_SYS_SESSION_IN_USE = 0x80000008,
+ HFI_ERR_SYS_SESSION_ID_OUT_OF_RANGE = 0x80000009,
+ HFI_ERR_SYS_UNSUPPORTED_DOMAIN = 0x8000000A,
+ HFI_ERR_SESSION_START_UNUSED = 0x80001000,
+ HFI_ERR_SESSION_UNKNOWN = 0x80001001,
+ HFI_ERR_SESSION_FATAL = 0x80001002,
+ HFI_ERR_SESSION_INVALID_PARAMETER = 0x80001003,
+ HFI_ERR_SESSION_BAD_POINTER = 0x80001004,
+ HFI_ERR_SESSION_INVALID_SESSION_ID = 0x80001005,
+ HFI_ERR_SESSION_INVALID_STREAM_ID = 0x80001006,
+ HFI_ERR_SESSION_INCORRECT_STATE_OPERATION = 0x80001007,
+ HFI_ERR_SESSION_UNSUPPORTED_PROPERTY = 0x80001008,
+ HFI_ERR_SESSION_UNSUPPORTED_SETTING = 0x80001009,
+ HFI_ERR_SESSION_INSUFFICIENT_RESOURCES = 0x8000100A,
+ HFI_ERR_SESSION_STREAM_CORRUPT = 0x8000100B,
+ HFI_ERR_SESSION_STREAM_CORRUPT_OUTPUT_STALLED = 0x8000100C,
+ HFI_ERR_SESSION_SYNC_FRAME_NOT_DETECTED = 0x8000100D,
+ HFI_ERR_SESSION_EMPTY_BUFFER_DONE_OUTPUT_PENDING = 0x8000100E,
+ HFI_ERR_SESSION_SAME_STATE_OPERATION = 0x8000100F,
+ HFI_UNUSED_ERR = 0x10000000,
+};
+
+enum HFI_DOMAIN {
+ HFI_VIDEO_DOMAIN_VPE,
+ HFI_VIDEO_DOMAIN_ENCODER,
+ HFI_VIDEO_DOMAIN_DECODER,
+ HFI_UNUSED_DOMAIN = 0x10000000,
+};
+
+enum HFI_VIDEO_CODEC {
+ HFI_VIDEO_CODEC_UNKNOWN = 0x00000000,
+ HFI_VIDEO_CODEC_H264 = 0x00000002,
+ HFI_VIDEO_CODEC_H263 = 0x00000004,
+ HFI_VIDEO_CODEC_MPEG1 = 0x00000008,
+ HFI_VIDEO_CODEC_MPEG2 = 0x00000010,
+ HFI_VIDEO_CODEC_MPEG4 = 0x00000020,
+ HFI_VIDEO_CODEC_DIVX_311 = 0x00000040,
+ HFI_VIDEO_CODEC_DIVX = 0x00000080,
+ HFI_VIDEO_CODEC_VC1 = 0x00000100,
+ HFI_VIDEO_CODEC_SPARK = 0x00000200,
+ HFI_VIDEO_CODEC_VP6 = 0x00000400,
+ HFI_VIDEO_CODEC_VP7 = 0x00000800,
+ HFI_VIDEO_CODEC_VP8 = 0x00001000,
+ HFI_UNUSED_CODEC = 0x10000000,
+};
+
+enum HFI_H263_PROFILE {
+ HFI_H263_PROFILE_BASELINE = 0x00000001,
+ HFI_H263_PROFILE_H320CODING = 0x00000002,
+ HFI_H263_PROFILE_BACKWARDCOMPATIBLE = 0x00000004,
+ HFI_H263_PROFILE_ISWV2 = 0x00000008,
+ HFI_H263_PROFILE_ISWV3 = 0x00000010,
+ HFI_H263_PROFILE_HIGHCOMPRESSION = 0x00000020,
+ HFI_H263_PROFILE_INTERNET = 0x00000040,
+ HFI_H263_PROFILE_INTERLACE = 0x00000080,
+ HFI_H263_PROFILE_HIGHLATENCY = 0x00000100,
+ HFI_UNUSED_H263_PROFILE = 0x10000000,
+};
+
+enum HFI_H263_LEVEL {
+ HFI_H263_LEVEL_10 = 0x00000001,
+ HFI_H263_LEVEL_20 = 0x00000002,
+ HFI_H263_LEVEL_30 = 0x00000004,
+ HFI_H263_LEVEL_40 = 0x00000008,
+ HFI_H263_LEVEL_45 = 0x00000010,
+ HFI_H263_LEVEL_50 = 0x00000020,
+ HFI_H263_LEVEL_60 = 0x00000040,
+ HFI_H263_LEVEL_70 = 0x00000080,
+ HFI_UNUSED_H263_LEVEL = 0x10000000,
+};
+
+enum HFI_MPEG2_PROFILE {
+ HFI_MPEG2_PROFILE_SIMPLE = 0x00000001,
+ HFI_MPEG2_PROFILE_MAIN = 0x00000002,
+ HFI_MPEG2_PROFILE_422 = 0x00000004,
+ HFI_MPEG2_PROFILE_SNR = 0x00000008,
+ HFI_MPEG2_PROFILE_SPATIAL = 0x00000010,
+ HFI_MPEG2_PROFILE_HIGH = 0x00000020,
+ HFI_UNUSED_MPEG2_PROFILE = 0x10000000,
+};
+
+enum HFI_MPEG2_LEVEL {
+ HFI_MPEG2_LEVEL_LL = 0x00000001,
+ HFI_MPEG2_LEVEL_ML = 0x00000002,
+ HFI_MPEG2_LEVEL_H14 = 0x00000004,
+ HFI_MPEG2_LEVEL_HL = 0x00000008,
+ HFI_UNUSED_MEPG2_LEVEL = 0x10000000,
+};
+
+enum HFI_MPEG4_PROFILE {
+ HFI_MPEG4_PROFILE_SIMPLE = 0x00000001,
+ HFI_MPEG4_PROFILE_SIMPLESCALABLE = 0x00000002,
+ HFI_MPEG4_PROFILE_CORE = 0x00000004,
+ HFI_MPEG4_PROFILE_MAIN = 0x00000008,
+ HFI_MPEG4_PROFILE_NBIT = 0x00000010,
+ HFI_MPEG4_PROFILE_SCALABLETEXTURE = 0x00000020,
+ HFI_MPEG4_PROFILE_SIMPLEFACE = 0x00000040,
+ HFI_MPEG4_PROFILE_SIMPLEFBA = 0x00000080,
+ HFI_MPEG4_PROFILE_BASICANIMATED = 0x00000100,
+ HFI_MPEG4_PROFILE_HYBRID = 0x00000200,
+ HFI_MPEG4_PROFILE_ADVANCEDREALTIME = 0x00000400,
+ HFI_MPEG4_PROFILE_CORESCALABLE = 0x00000800,
+ HFI_MPEG4_PROFILE_ADVANCEDCODING = 0x00001000,
+ HFI_MPEG4_PROFILE_ADVANCEDCORE = 0x00002000,
+ HFI_MPEG4_PROFILE_ADVANCEDSCALABLE = 0x00004000,
+ HFI_MPEG4_PROFILE_ADVANCEDSIMPLE = 0x00008000,
+ HFI_UNUSED_MPEG4_PROFILE = 0x10000000,
+};
+
+enum HFI_MPEG4_LEVEL {
+ HFI_MPEG4_LEVEL_0 = 0x00000001,
+ HFI_MPEG4_LEVEL_0b = 0x00000002,
+ HFI_MPEG4_LEVEL_1 = 0x00000004,
+ HFI_MPEG4_LEVEL_2 = 0x00000008,
+ HFI_MPEG4_LEVEL_3 = 0x00000010,
+ HFI_MPEG4_LEVEL_4 = 0x00000020,
+ HFI_MPEG4_LEVEL_4a = 0x00000040,
+ HFI_MPEG4_LEVEL_5 = 0x00000080,
+ HFI_MPEG4_LEVEL_VENDOR_START_UNUSED = 0x7F000000,
+ HFI_MPEG4_LEVEL_6 = 0x7F000001,
+ HFI_MPEG4_LEVEL_7 = 0x7F000002,
+ HFI_MPEG4_LEVEL_8 = 0x7F000003,
+ HFI_MPEG4_LEVEL_9 = 0x7F000004,
+ HFI_MPEG4_LEVEL_3b = 0x7F000005,
+ HFI_UNUSED_MPEG4_LEVEL = 0x10000000,
+};
+
+enum HFI_H264_PROFILE {
+ HFI_H264_PROFILE_BASELINE = 0x00000001,
+ HFI_H264_PROFILE_MAIN = 0x00000002,
+ HFI_H264_PROFILE_EXTENDED = 0x00000004,
+ HFI_H264_PROFILE_HIGH = 0x00000008,
+ HFI_H264_PROFILE_HIGH10 = 0x00000010,
+ HFI_H264_PROFILE_HIGH422 = 0x00000020,
+ HFI_H264_PROFILE_HIGH444 = 0x00000040,
+ HFI_H264_PROFILE_STEREO_HIGH = 0x00000080,
+ HFI_H264_PROFILE_MV_HIGH = 0x00000100,
+ HFI_UNUSED_H264_PROFILE = 0x10000000,
+};
+
+enum HFI_H264_LEVEL {
+ HFI_H264_LEVEL_1 = 0x00000001,
+ HFI_H264_LEVEL_1b = 0x00000002,
+ HFI_H264_LEVEL_11 = 0x00000004,
+ HFI_H264_LEVEL_12 = 0x00000008,
+ HFI_H264_LEVEL_13 = 0x00000010,
+ HFI_H264_LEVEL_2 = 0x00000020,
+ HFI_H264_LEVEL_21 = 0x00000040,
+ HFI_H264_LEVEL_22 = 0x00000080,
+ HFI_H264_LEVEL_3 = 0x00000100,
+ HFI_H264_LEVEL_31 = 0x00000200,
+ HFI_H264_LEVEL_32 = 0x00000400,
+ HFI_H264_LEVEL_4 = 0x00000800,
+ HFI_H264_LEVEL_41 = 0x00001000,
+ HFI_H264_LEVEL_42 = 0x00002000,
+ HFI_H264_LEVEL_5 = 0x00004000,
+ HFI_H264_LEVEL_51 = 0x00008000,
+ HFI_UNUSED_H264_LEVEL = 0x10000000,
+};
+
+enum HFI_VPX_PROFILE {
+ HFI_VPX_PROFILE_SIMPLE = 0x00000001,
+ HFI_VPX_PROFILE_ADVANCED = 0x00000002,
+ HFI_VPX_PROFILE_VERSION_0 = 0x00000004,
+ HFI_VPX_PROFILE_VERSION_1 = 0x00000008,
+ HFI_VPX_PROFILE_VERSION_2 = 0x00000010,
+ HFI_VPX_PROFILE_VERSION_3 = 0x00000020,
+ HFI_VPX_PROFILE_UNUSED = 0x10000000,
+};
+
+enum HFI_VC1_PROFILE {
+ HFI_VC1_PROFILE_SIMPLE = 0x00000001,
+ HFI_VC1_PROFILE_MAIN = 0x00000002,
+ HFI_VC1_PROFILE_ADVANCED = 0x00000004,
+ HFI_UNUSED_VC1_PROFILE = 0x10000000,
+};
+
+enum HFI_VC1_LEVEL {
+ HFI_VC1_LEVEL_LOW = 0x00000001,
+ HFI_VC1_LEVEL_MEDIUM = 0x00000002,
+ HFI_VC1_LEVEL_HIGH = 0x00000004,
+ HFI_VC1_LEVEL_0 = 0x00000008,
+ HFI_VC1_LEVEL_1 = 0x00000010,
+ HFI_VC1_LEVEL_2 = 0x00000020,
+ HFI_VC1_LEVEL_3 = 0x00000040,
+ HFI_VC1_LEVEL_4 = 0x00000080,
+ HFI_UNUSED_VC1_LEVEL = 0x10000000,
+};
+
+enum HFI_DIVX_FORMAT {
+ HFI_DIVX_FORMAT_4,
+ HFI_DIVX_FORMAT_5,
+ HFI_DIVX_FORMAT_6,
+ HFI_UNUSED_DIVX_FORMAT = 0x10000000,
+};
+
+enum HFI_DIVX_PROFILE {
+ HFI_DIVX_PROFILE_QMOBILE = 0x00000001,
+ HFI_DIVX_PROFILE_MOBILE = 0x00000002,
+ HFI_DIVX_PROFILE_MT = 0x00000004,
+ HFI_DIVX_PROFILE_HT = 0x00000008,
+ HFI_DIVX_PROFILE_HD = 0x00000010,
+ HFI_UNUSED_DIVX_PROFILE = 0x10000000,
+};
+
+enum HFI_BUFFER {
+ HFI_BUFFER_INPUT,
+ HFI_BUFFER_OUTPUT,
+ HFI_BUFFER_OUTPUT2,
+ HFI_BUFFER_EXTRADATA_INPUT,
+ HFI_BUFFER_EXTRADATA_OUTPUT,
+ HFI_BUFFER_EXTRADATA_OUTPUT2,
+ HFI_BUFFER_INTERNAL_SCRATCH = 0x7F000001,
+ HFI_BUFFER_INTERNAL_PERSIST = 0x7F000002,
+ HFI_UNUSED_BUFFER = 0x10000000,
+};
+
+enum HFI_BUFFER_MODE {
+ HFI_BUFFER_MODE_STATIC,
+ HFI_BUFFER_MODE_RING,
+ HFI_UNUSED_BUFFER_MODE = 0x10000000,
+};
+
+enum HFI_FLUSH {
+ HFI_FLUSH_INPUT,
+ HFI_FLUSH_OUTPUT,
+ HFI_FLUSH_OUTPUT2,
+ HFI_FLUSH_ALL,
+ HFI_UNUSED_FLUSH = 0x10000000,
+};
+
+enum HFI_EXTRADATA {
+ HFI_EXTRADATA_NONE = 0x00000000,
+ HFI_EXTRADATA_MB_QUANTIZATION = 0x00000001,
+ HFI_EXTRADATA_INTERLACE_VIDEO = 0x00000002,
+ HFI_EXTRADATA_VC1_FRAMEDISP = 0x00000003,
+ HFI_EXTRADATA_VC1_SEQDISP = 0x00000004,
+ HFI_EXTRADATA_TIMESTAMP = 0x00000005,
+ HFI_EXTRADATA_MULTISLICE_INFO = 0x7F100000,
+ HFI_EXTRADATA_NUM_CONCEALED_MB = 0x7F100001,
+ HFI_EXTRADATA_INDEX = 0x7F100002,
+ HFI_EXTRADATA_METADATA_FILLER = 0x7FE00002,
+ HFI_UNUSED_EXTRADATA = 0x10000000,
+};
+
+enum HFI_EXTRADATA_INDEX_TYPE {
+ HFI_INDEX_EXTRADATA_INPUT_CROP = 0x0700000E,
+ HFI_INDEX_EXTRADATA_DIGITAL_ZOOM = 0x07000010,
+ HFI_INDEX_EXTRADATA_ASPECT_RATIO = 0x7F100003,
+};
+
+struct hfi_extradata_header {
+ u32 size;
+ u32 version;
+ u32 port_tndex;
+ enum HFI_EXTRADATA type;
+ u32 data_size;
+ u8 rg_data[1];
+};
+
+enum HFI_INTERLACE_FORMAT {
+ HFI_INTERLACE_FRAME_PROGRESSIVE = 0x01,
+ HFI_INTERLACE_INTERLEAVE_FRAME_TOPFIELDFIRST = 0x02,
+ HFI_INTERLACE_INTERLEAVE_FRAME_BOTTOMFIELDFIRST = 0x04,
+ HFI_INTERLACE_FRAME_TOPFIELDFIRST = 0x08,
+ HFI_INTERLACE_FRAME_BOTTOMFIELDFIRST = 0x10,
+ HFI_UNUSED_INTERLACE = 0x10000000,
+};
+
+enum HFI_PROPERTY {
+ HFI_PROPERTY_SYS_UNUSED = 0x08000000,
+ HFI_PROPERTY_SYS_IDLE_INDICATOR,
+ HFI_PROPERTY_SYS_DEBUG_CONFIG,
+ HFI_PROPERTY_SYS_RESOURCE_OCMEM_REQUIREMENT_INFO,
+ HFI_PROPERTY_PARAM_UNUSED = 0x04000000,
+ HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL,
+ HFI_PROPERTY_PARAM_FRAME_SIZE,
+ HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT,
+ HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED,
+ HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO,
+ HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO,
+ HFI_PROPERTY_PARAM_INTERLACE_FORMAT_SUPPORTED,
+ HFI_PROPERTY_PARAM_CHROMA_SITE,
+ HFI_PROPERTY_PARAM_EXTRA_DATA_HEADER_CONFIG,
+ HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT,
+ HFI_PROPERTY_PARAM_PROFILE_LEVEL_SUPPORTED,
+ HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED,
+ HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SUPPORTED,
+ HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SELECT,
+ HFI_PROPERTY_PARAM_MULTI_VIEW_FORMAT,
+ HFI_PROPERTY_PARAM_PROPERTIES_SUPPORTED,
+ HFI_PROPERTY_PARAM_MAX_SEQUENCE_HEADER_SIZE,
+ HFI_PROPERTY_PARAM_CODEC_SUPPORTED,
+ HFI_PROPERTY_PARAM_DIVX_FORMAT,
+
+ HFI_PROPERTY_CONFIG_UNUSED = 0x02000000,
+ HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS,
+ HFI_PROPERTY_CONFIG_REALTIME,
+ HFI_PROPERTY_CONFIG_PRIORITY,
+ HFI_PROPERTY_CONFIG_BATCH_INFO,
+ HFI_PROPERTY_CONFIG_FRAME_RATE,
+
+ HFI_PROPERTY_PARAM_VDEC_UNUSED = 0x01000000,
+ HFI_PROPERTY_PARAM_VDEC_CONTINUE_DATA_TRANSFER,
+ HFI_PROPERTY_PARAM_VDEC_DISPLAY_PICTURE_BUFFER_COUNT,
+ HFI_PROPERTY_PARAM_VDEC_MULTI_VIEW_SELECT,
+ HFI_PROPERTY_PARAM_VDEC_PICTURE_TYPE_DECODE,
+ HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM,
+ HFI_PROPERTY_PARAM_VDEC_OUTPUT_ORDER,
+ HFI_PROPERTY_PARAM_VDEC_MB_QUANTIZATION,
+ HFI_PROPERTY_PARAM_VDEC_NUM_CONCEALED_MB,
+ HFI_PROPERTY_PARAM_VDEC_H264_ENTROPY_SWITCHING,
+ HFI_PROPERTY_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO,
+
+ HFI_PROPERTY_CONFIG_VDEC_UNUSED = 0x00800000,
+ HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER,
+ HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP_REPORTING,
+ HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP,
+
+ HFI_PROPERTY_PARAM_VENC_UNUSED = 0x00400000,
+ HFI_PROPERTY_PARAM_VENC_SLICE_DELIVERY_MODE,
+ HFI_PROPERTY_PARAM_VENC_H264_ENTROPY_CONTROL,
+ HFI_PROPERTY_PARAM_VENC_H264_DEBLOCK_CONTROL,
+ HFI_PROPERTY_PARAM_VENC_RATE_CONTROL,
+ HFI_PROPERTY_PARAM_VENC_TEMPORAL_SPATIAL_TRADEOFF,
+ HFI_PROPERTY_PARAM_VENC_SESSION_QP,
+ HFI_PROPERTY_PARAM_VENC_MPEG4_AC_PREDICTION,
+ HFI_PROPERTY_PARAM_VENC_MPEG4_DATA_PARTITIONING,
+ HFI_PROPERTY_PARAM_VENC_MPEG4_TIME_RESOLUTION,
+ HFI_PROPERTY_PARAM_VENC_MPEG4_SHORT_HEADER,
+ HFI_PROPERTY_PARAM_VENC_MPEG4_HEADER_EXTENSION,
+ HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_INFO,
+ HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH,
+ HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_CONTROL,
+
+ HFI_PROPERTY_CONFIG_VENC_UNUSED = 0x00200000,
+ HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE,
+ HFI_PROPERTY_CONFIG_VENC_IDR_PERIOD,
+ HFI_PROPERTY_CONFIG_VENC_INTRA_PERIOD,
+ HFI_PROPERTY_CONFIG_VENC_REQUEST_IFRAME,
+ HFI_PROPERTY_CONFIG_VENC_TIMESTAMP_SCALE,
+ HFI_PROPERTY_PARAM_VENC_MPEG4_QPEL,
+ HFI_PROPERTY_PARAM_VENC_ADVANCED,
+
+ HFI_PROPERTY_PARAM_VPE_UNUSED = 0x00100000,
+
+ HFI_PROPERTY_CONFIG_VPE_UNUSED = 0x00080000,
+ HFI_PROPERTY_CONFIG_VPE_DEINTERLACE,
+ HFI_PROPERTY_CONFIG_VPE_OPERATIONS,
+ HFI_PROPERTY_UNUSED = 0x10000000,
+};
+
+struct hfi_batch_info {
+ u32 input_batch_count;
+ u32 output_batch_count;
+};
+
+struct hfi_bitrate {
+ u32 bit_rate;
+};
+
+struct hfi_buffer_count_actual {
+ enum HFI_BUFFER buffer;
+ u32 buffer_count_actual;
+};
+
+struct hfi_buffer_requirements {
+ enum HFI_BUFFER buffer;
+ u32 buffer_size;
+ u32 buffer_region_size;
+ u32 buffer_hold_count;
+ u32 buffer_count_min;
+ u32 buffer_count_actual;
+ u32 contiguous;
+ u32 buffer_alignment;
+};
+
+enum HFI_CAPABILITY {
+ HFI_CAPABILITY_FRAME_WIDTH,
+ HFI_CAPABILITY_FRAME_HEIGHT,
+ HFI_CAPABILITY_MBS_PER_FRAME,
+ HFI_CAPABILITY_MBS_PER_SECOND,
+ HFI_CAPABILITY_FRAMERATE,
+ HFI_CAPABILITY_SCALE_X,
+ HFI_CAPABILITY_SCALE_Y,
+ HFI_CAPABILITY_BITRATE,
+ HFI_UNUSED_CAPABILITY = 0x10000000,
+};
+
+struct hfi_capability_supported {
+ enum HFI_CAPABILITY eCapabilityType;
+ u32 min;
+ u32 max;
+ u32 step_size;
+};
+
+struct hfi_capability_supported_INFO {
+ u32 num_capabilities;
+ struct hfi_capability_supported rg_data[1];
+};
+
+enum HFI_CHROMA_SITE {
+ HFI_CHROMA_SITE_0,
+ HFI_CHROMA_SITE_1,
+ HFI_UNUSED_CHROMA = 0x10000000,
+};
+
+struct hfi_data_payload {
+ u32 size;
+ u8 rg_data[1];
+};
+
+struct hfi_seq_header_info {
+ u32 max_header_len;
+};
+
+struct hfi_enable_picture {
+ u32 picture_type;
+};
+
+struct hfi_display_picture_buffer_count {
+ int enable;
+ u32 count;
+};
+
+struct hfi_enable {
+ int enable;
+};
+
+enum HFI_H264_DB_MODE {
+ HFI_H264_DB_MODE_DISABLE,
+ HFI_H264_DB_MODE_SKIP_SLICE_BOUNDARY,
+ HFI_H264_DB_MODE_ALL_BOUNDARY,
+ HFI_UNUSED_H264_DB = 0x10000000,
+};
+
+struct hfi_h264_db_control {
+ enum HFI_H264_DB_MODE mode;
+ int slice_alpha_offset;
+ int slice_beta_offset;
+};
+
+enum HFI_H264_ENTROPY {
+ HFI_H264_ENTROPY_CAVLC,
+ HFI_H264_ENTROPY_CABAC,
+ HFI_UNUSED_ENTROPY = 0x10000000,
+};
+
+enum HFI_H264_CABAC_MODEL {
+ HFI_H264_CABAC_MODEL_0,
+ HFI_H264_CABAC_MODEL_1,
+ HFI_H264_CABAC_MODEL_2,
+ HFI_UNUSED_CABAC = 0x10000000,
+};
+
+struct hfi_h264_entropy_control {
+ enum HFI_H264_ENTROPY entropy_mode;
+ enum HFI_H264_CABAC_MODEL cabac_model;
+};
+
+struct hfi_extra_data_header_config {
+ u32 type;
+ enum HFI_BUFFER buffer_type;
+ u32 version;
+ u32 port_index;
+ u32 client_extradata_id;
+};
+
+struct hfi_frame_rate {
+ enum HFI_BUFFER buffer_type;
+ u32 frame_rate;
+};
+
+struct hfi_interlace_format_supported {
+ enum HFI_BUFFER buffer;
+ enum HFI_INTERLACE_FORMAT format;
+};
+
+enum hfi_intra_refresh_mode {
+ HFI_INTRA_REFRESH_NONE,
+ HFI_INTRA_REFRESH_CYCLIC,
+ HFI_INTRA_REFRESH_ADAPTIVE,
+ HFI_INTRA_REFRESH_CYCLIC_ADAPTIVE,
+ HFI_INTRA_REFRESH_RANDOM,
+ HFI_UNUSED_INTRA = 0x10000000,
+};
+
+struct hfi_intra_refresh {
+ enum hfi_intra_refresh_mode mode;
+ u32 air_mbs;
+ u32 air_ref;
+ u32 cir_mbs;
+};
+
+struct hfi_idr_period {
+ u32 idr_period;
+};
+
+struct hfi_intra_period {
+ u32 pframes;
+ u32 bframes;
+};
+
+struct hfi_timestamp_scale {
+ u32 time_stamp_scale;
+};
+
+struct hfi_mb_error_map {
+ u32 error_map_size;
+ u8 rg_error_map[1];
+};
+
+struct hfi_metadata_pass_through {
+ int enable;
+ u32 size;
+};
+
+struct hfi_mpeg4_header_extension {
+ u32 header_extension;
+};
+
+struct hfi_mpeg4_time_resolution {
+ u32 time_increment_resolution;
+};
+
+enum HFI_MULTI_SLICE {
+ HFI_MULTI_SLICE_OFF,
+ HFI_MULTI_SLICE_BY_MB_COUNT,
+ HFI_MULTI_SLICE_BY_BYTE_COUNT,
+ HFI_MULTI_SLICE_GOB,
+ HFI_UNUSED_SLICE = 0x10000000,
+};
+
+struct hfi_multi_slice_control {
+ enum HFI_MULTI_SLICE multi_slice;
+ u32 slice_size;
+};
+
+struct hfi_multi_stream {
+ enum HFI_BUFFER buffer;
+ u32 enable;
+ u32 width;
+ u32 height;
+};
+
+struct hfi_multi_view_format {
+ u32 views;
+ u32 rg_view_order[1];
+};
+
+struct hfi_multi_view_select {
+ u32 view_index;
+};
+
+enum HFI_NAL_STREAM_FORMAT {
+ HFI_NAL_FORMAT_STARTCODES = 0x00000001,
+ HFI_NAL_FORMAT_ONE_NAL_PER_BUFFER = 0x00000002,
+ HFI_NAL_FORMAT_ONE_BYTE_LENGTH = 0x00000004,
+ HFI_NAL_FORMAT_TWO_BYTE_LENGTH = 0x00000008,
+ HFI_NAL_FORMAT_FOUR_BYTE_LENGTH = 0x00000010,
+ HFI_UNUSED_NAL = 0x10000000,
+};
+
+struct hfi_nal_stream_format_supported {
+ u32 nal_stream_format_supported;
+};
+
+enum HFI_PICTURE {
+ HFI_PICTURE_I = 0x01,
+ HFI_PICTURE_P = 0x02,
+ HFI_PICTURE_B = 0x04,
+ HFI_PICTURE_IDR = 0x7F001000,
+ HFI_UNUSED_PICT = 0x10000000,
+};
+
+enum HFI_PRIORITY {
+ HFI_PRIORITY_LOW = 10,
+ HFI_PRIOIRTY_MEDIUM = 20,
+ HFI_PRIORITY_HIGH = 30,
+ HFI_UNUSED_PRIORITY = 0x10000000,
+};
+
+struct hfi_profile_level {
+ u32 profile;
+ u32 level;
+};
+
+struct hfi_profile_level_supported {
+ u32 profile_count;
+ struct hfi_profile_level rg_profile_level[1];
+};
+
+enum HFI_ROTATE {
+ HFI_ROTATE_NONE,
+ HFI_ROTATE_90,
+ HFI_ROTATE_180,
+ HFI_ROTATE_270,
+ HFI_UNUSED_ROTATE = 0x10000000,
+};
+
+enum HFI_FLIP {
+ HFI_FLIP_NONE,
+ HFI_FLIP_HORIZONTAL,
+ HFI_FLIP_VERTICAL,
+ HFI_UNUSED_FLIP = 0x10000000,
+};
+
+struct hfi_operations {
+ enum HFI_ROTATE rotate;
+ enum HFI_FLIP flip;
+};
+
+enum HFI_OUTPUT_ORDER {
+ HFI_OUTPUT_ORDER_DISPLAY,
+ HFI_OUTPUT_ORDER_DECODE,
+ HFI_UNUSED_OUTPUT = 0x10000000,
+};
+
+struct hfi_quantization {
+ u32 qp_i;
+ u32 qp_p;
+ u32 qp_b;
+};
+
+enum HFI_RATE_CONTROL {
+ HFI_RATE_CONTROL_OFF,
+ HFI_RATE_CONTROL_VBR_VFR,
+ HFI_RATE_CONTROL_VBR_CFR,
+ HFI_RATE_CONTROL_CBR_VFR,
+ HFI_RATE_CONTROL_CBR_CFR,
+ HFI_UNUSED_RC = 0x10000000,
+};
+
+struct hfi_slice_delivery_mode {
+ int enable;
+};
+
+struct hfi_temporal_spatial_tradeoff {
+ u32 ts_factor;
+};
+
+struct hfi_frame_size {
+ enum HFI_BUFFER buffer;
+ u32 width;
+ u32 height;
+};
+
+enum HFI_UNCOMPRESSED_FORMAT {
+ HFI_COLOR_FORMAT_MONOCHROME,
+ HFI_COLOR_FORMAT_NV12,
+ HFI_COLOR_FORMAT_NV21,
+ HFI_COLOR_FORMAT_NV12_4x4TILE,
+ HFI_COLOR_FORMAT_NV21_4x4TILE,
+ HFI_COLOR_FORMAT_YUYV,
+ HFI_COLOR_FORMAT_YVYU,
+ HFI_COLOR_FORMAT_UYVY,
+ HFI_COLOR_FORMAT_VYUY,
+ HFI_COLOR_FORMAT_RGB565,
+ HFI_COLOR_FORMAT_BGR565,
+ HFI_COLOR_FORMAT_RGB888,
+ HFI_COLOR_FORMAT_BGR888,
+ HFI_UNUSED_COLOR = 0x10000000,
+};
+
+struct hfi_uncompressed_format_select {
+ enum HFI_BUFFER buffer;
+ enum HFI_UNCOMPRESSED_FORMAT format;
+};
+
+struct hfi_uncompressed_format_supported {
+ enum HFI_BUFFER buffer;
+ u32 format_entries;
+ u32 rg_format_info[1];
+};
+
+struct hfi_uncompressed_plane_actual {
+ int actual_stride;
+ u32 actual_plane_buffer_height;
+};
+
+struct hfi_uncompressed_plane_actual_info {
+ enum HFI_BUFFER buffer;
+ u32 num_planes;
+ struct hfi_uncompressed_plane_actual rg_plane_format[1];
+};
+
+struct hfi_uncompressed_plane_constraints {
+ u32 stride_multiples;
+ u32 max_stride;
+ u32 min_plane_buffer_height_multiple;
+ u32 buffer_alignment;
+};
+
+struct hfi_uncompressed_plane_info {
+ enum HFI_UNCOMPRESSED_FORMAT format;
+ u32 num_planes;
+ struct hfi_uncompressed_plane_constraints rg_plane_format[1];
+};
+
+struct hfi_uncompressed_plane_actual_constraints_info {
+ enum HFI_BUFFER buffer;
+ u32 num_planes;
+ struct hfi_uncompressed_plane_constraints rg_plane_format[1];
+};
+
+struct hfi_codec_supported {
+ u32 decoder_codec_supported;
+ u32 encoder_codec_supported;
+};
+
+enum HFI_DEBUG_MSG {
+ HFI_DEBUG_MSG_LOW = 0x00000001,
+ HFI_DEBUG_MSG_MEDIUM = 0x00000002,
+ HFI_DEBUG_MSG_HIGH = 0x00000004,
+ HFI_DEBUG_MSG_ERROR = 0x00000008,
+ HFI_DEBUG_MSG_FATAL = 0x00000010,
+ HFI_UNUSED_DEBUG_MSG = 0x10000000,
+};
+
+struct hfi_debug_config {
+ u32 debug_config;
+};
+
+struct hfi_properties_supported {
+ u32 num_properties;
+ u32 rg_properties[1];
+};
+
+enum HFI_RESOURCE {
+ HFI_RESOURCE_OCMEM = 0x00000001,
+ HFI_UNUSED_RESOURCE = 0x10000000,
+};
+
+struct hfi_resource_ocmem_type {
+ u32 size;
+ u8 *mem;
+};
+
+struct hfi_resource_ocmem_requirement {
+ enum HFI_DOMAIN session_domain;
+ u32 width;
+ u32 height;
+ u32 size;
+};
+
+struct hfi_resource_ocmem_requirement_info {
+ u32 num_entries;
+ struct hfi_resource_ocmem_requirement rg_requirements[1];
+};
+
+struct hfi_venc_config_advanced {
+ u8 pipe2d;
+ u8 hw_mode;
+ u8 low_delay_enforce;
+ int h264_constrain_intra_pred;
+ int h264_transform_8x8_flag;
+ int mpeg4_qpel_enable;
+ int multi_refP_en;
+ int qmatrix_en;
+ u8 vpp_info_packet_mode;
+ u8 ref_tile_mode;
+ u8 bitstream_flush_mode;
+ u32 ds_display_frame_width;
+ u32 ds_display_frame_height;
+ u32 perf_tune_param_ptr;
+};
+
+enum HFI_COMMAND {
+ HFI_CMD_SYS_UNUSED = 0x01000000,
+ HFI_CMD_SYS_INIT,
+ HFI_CMD_SYS_SESSION_INIT,
+ HFI_CMD_SYS_SESSION_END,
+ HFI_CMD_SYS_SESSION_ABORT,
+ HFI_CMD_SYS_SET_RESOURCE,
+ HFI_CMD_SYS_RELEASE_RESOURCE,
+ HFI_CMD_SYS_PING,
+ HFI_CMD_SYS_PC_PREP,
+ HFI_CMD_SYS_SET_PROPERTY,
+ HFI_CMD_SYS_GET_PROPERTY,
+
+ HFI_CMD_SESSION_UNUSED = 0x02000000,
+ HFI_CMD_SESSION_LOAD_RESOURCES,
+ HFI_CMD_SESSION_START,
+ HFI_CMD_SESSION_STOP,
+ HFI_CMD_SESSION_EMPTY_BUFFER,
+ HFI_CMD_SESSION_FILL_BUFFER,
+ HFI_CMD_SESSION_FLUSH,
+ HFI_CMD_SESSION_SUSPEND,
+ HFI_CMD_SESSION_RESUME,
+ HFI_CMD_SESSION_SET_PROPERTY,
+ HFI_CMD_SESSION_GET_PROPERTY,
+ HFI_CMD_SESSION_PARSE_SEQUENCE_HEADER,
+ HFI_CMD_SESSION_GET_SEQUENCE_HEADER,
+ HFI_CMD_SESSION_SET_BUFFERS,
+ HFI_CMD_SESSION_RELEASE_BUFFERS,
+ HFI_CMD_SESSION_RELEASE_RESOURCES,
+
+ HFI_CMD_UNUSED = 0x10000000,
+};
+
+enum HFI_MESSAGE {
+ HFI_MSG_SYS_UNUSED = 0x01000000,
+ HFI_MSG_SYS_IDLE,
+ HFI_MSG_SYS_PC_PREP_DONE,
+ HFI_MSG_SYS_RELEASE_RESOURCE,
+ HFI_MSG_SYS_PING_ACK,
+ HFI_MSG_SYS_DEBUG,
+ HFI_MSG_SYS_INIT_DONE,
+ HFI_MSG_SYS_PROPERTY_INFO,
+ HFI_MSG_SESSION_UNUSED = 0x02000000,
+ HFI_MSG_EVENT_NOTIFY,
+ HFI_MSG_SYS_SESSION_INIT_DONE,
+ HFI_MSG_SYS_SESSION_END_DONE,
+ HFI_MSG_SYS_SESSION_ABORT_DONE,
+ HFI_MSG_SESSION_LOAD_RESOURCES_DONE,
+ HFI_MSG_SESSION_START_DONE,
+ HFI_MSG_SESSION_STOP_DONE,
+ HFI_MSG_SESSION_SUSPEND_DONE,
+ HFI_MSG_SESSION_RESUME_DONE,
+ HFI_MSG_SESSION_EMPTY_BUFFER_DONE,
+ HFI_MSG_SESSION_FILL_BUFFER_DONE,
+ HFI_MSG_SESSION_FLUSH_DONE,
+ HFI_MSG_SESSION_PROPERTY_INFO,
+ HFI_MSG_SESSION_RELEASE_RESOURCES_DONE,
+ HFI_MSG_SESSION_PARSE_SEQUENCE_HEADER_DONE,
+ HFI_MSG_SESSION_GET_SEQUENCE_HEADER_DONE,
+ HFI_MSG_UNUSED = 0x10000000,
+};
+
+struct vidc_hal_msg_pkt_hdr {
+ u32 size;
+ enum HFI_MESSAGE packet;
+};
+
+struct vidc_hal_session_cmd_pkt {
+ u32 size;
+ enum HFI_COMMAND packet_type;
+ u32 session_id;
+};
+
+enum HFI_STATUS {
+ HFI_FAIL = 0,
+ HFI_SUCCESS,
+ HFI_UNUSED_STATUS = 0x10000000,
+};
+
+struct hfi_cmd_sys_init_packet {
+ u32 size;
+ enum HFI_COMMAND packet;
+};
+
+struct hfi_cmd_sys_session_init_packet {
+ u32 size;
+ enum HFI_COMMAND packet;
+ u32 session_id;
+ enum HFI_DOMAIN session_domain;
+ enum HFI_VIDEO_CODEC session_codec;
+};
+
+struct hfi_cmd_sys_session_end_packet {
+ u32 size;
+ enum HFI_COMMAND packet_type;
+ u32 session_id;
+};
+
+struct hfi_cmd_sys_session_abort_packet {
+ u32 size;
+ enum HFI_COMMAND packet_type;
+ u32 session_id;
+};
+
+struct hfi_cmd_sys_pc_prep_packet {
+ u32 size;
+ enum HFI_COMMAND packet_type;
+};
+
+struct hfi_cmd_sys_set_resource_packet {
+ u32 size;
+ enum HFI_COMMAND packet_type;
+ u32 resource_handle;
+ enum HFI_RESOURCE resource_type;
+ u32 rg_resource_data[1];
+};
+
+struct hfi_cmd_sys_release_resource_packet {
+ u32 size;
+ enum HFI_COMMAND packet_type;
+ enum HFI_RESOURCE resource_type;
+ u32 resource_handle;
+};
+
+struct hfi_cmd_sys_ping_packet {
+ u32 size;
+ enum HFI_COMMAND packet_type;
+ u32 client_data;
+};
+
+struct hfi_cmd_sys_set_property_packet {
+ u32 size;
+ enum HFI_COMMAND packet_type;
+ u32 num_properties;
+ u32 rg_property_data[1];
+};
+
+struct hfi_cmd_sys_get_property_packet {
+ u32 size;
+ enum HFI_COMMAND packet_type;
+ u32 num_properties;
+ enum HFI_PROPERTY rg_property_data[1];
+};
+
+struct hfi_cmd_session_load_resources_packet {
+ u32 size;
+ enum HFI_COMMAND packet_type;
+ u32 session_id;
+};
+
+struct hfi_cmd_session_start_packet {
+ u32 size;
+ enum HFI_COMMAND packet_type;
+ u32 session_id;
+};
+
+struct hfi_cmd_session_stop_packet {
+ u32 size;
+ enum HFI_COMMAND packet_type;
+ u32 session_id;
+};
+
+struct hfi_cmd_session_empty_buffer_compressed_packet {
+ u32 size;
+ enum HFI_COMMAND packet_type;
+ u32 session_id;
+ u32 timestamp_hi;
+ u32 timestamp_lo;
+ u32 flags;
+ u32 mark_target;
+ u32 mark_data;
+ u32 offset;
+ u32 alloc_len;
+ u32 filled_len;
+ u32 input_tag;
+ u8 *packet_buffer;
+};
+
+struct hfi_cmd_session_empty_buffer_uncompressed_plane0_packet {
+ u32 size;
+ enum HFI_COMMAND packet;
+ u32 session_id;
+ u32 view_id;
+ u32 timestamp_hi;
+ u32 timestamp_lo;
+ u32 flags;
+ u32 mark_target;
+ u32 mark_data;
+ u32 alloc_len;
+ u32 filled_len;
+ u32 offset;
+ u32 input_tag;
+ u8 *packet_buffer;
+};
+
+struct hfi_cmd_session_empty_buffer_uncompressed_plane1_packet {
+ u32 flags;
+ u32 alloc_len;
+ u32 filled_len;
+ u32 offset;
+ u8 *packet_buffer2;
+};
+
+struct hfi_cmd_session_empty_buffer_uncompressed_plane2_packet {
+ u32 flags;
+ u32 alloc_len;
+ u32 filled_len;
+ u32 offset;
+ u8 *packet_buffer3;
+};
+
+struct hfi_cmd_session_fill_buffer_packet {
+ u32 size;
+ enum HFI_COMMAND packet_type;
+ u32 session_id;
+ u32 stream_id;
+ u8 *packet_buffer;
+ u8 *extra_data_buffer;
+};
+
+struct hfi_cmd_session_flush_packet {
+ u32 size;
+ enum HFI_COMMAND packet_type;
+ u32 session_id;
+ enum HFI_FLUSH flush_type;
+};
+
+struct hfi_cmd_session_suspend_packet {
+ u32 size;
+ enum HFI_COMMAND packet;
+ u32 session_id;
+};
+
+struct hfi_cmd_session_resume_packet {
+ u32 size;
+ enum HFI_COMMAND packet_type;
+ u32 session_id;
+};
+
+struct hfi_cmd_session_set_property_packet {
+ u32 size;
+ enum HFI_COMMAND packet_type;
+ u32 session_id;
+ u32 num_properties;
+ u32 rg_property_data[1];
+};
+
+struct hfi_cmd_session_get_property_packet {
+ u32 size;
+ enum HFI_COMMAND packet_type;
+ u32 session_id;
+ u32 num_properties;
+ enum HFI_PROPERTY rg_property_data[1];
+};
+
+struct hfi_buffer_info {
+ u32 buffer_addr;
+ u32 extradata_addr;
+};
+
+struct hfi_cmd_session_set_buffers_packet {
+ u32 size;
+ enum HFI_COMMAND packet_type;
+ u32 session_id;
+ enum HFI_BUFFER buffer_type;
+ enum HFI_BUFFER_MODE buffer_mode;
+ u32 buffer_size;
+ u32 extradata_size;
+ u32 min_buffer_size;
+ u32 num_buffers;
+ u32 rg_buffer_info[1];
+};
+
+struct hfi_cmd_session_release_buffer_packet {
+ u32 size;
+ enum HFI_COMMAND packet_type;
+ u32 session_id;
+ enum HFI_BUFFER buffer_type;
+ u32 buffer_size;
+ u32 extradata_size;
+ u32 num_buffers;
+ u32 rg_buffer_info[1];
+};
+
+struct hfi_cmd_session_release_resources_packet {
+ u32 size;
+ enum HFI_COMMAND packet_type;
+ u32 session_id;
+};
+
+struct hfi_cmd_session_parse_sequence_header_packet {
+ u32 size;
+ enum HFI_COMMAND packet_type;
+ u32 session_id;
+ u32 header_len;
+ u8 *packet_buffer;
+};
+
+struct hfi_cmd_session_get_sequence_header_packet {
+ u32 size;
+ enum HFI_COMMAND packet_type;
+ u32 session_id;
+ u32 buffer_len;
+ u8 *packet_buffer;
+};
+
+struct hfi_msg_event_notify_packet {
+ u32 size;
+ enum HFI_MESSAGE packet_type;
+ u32 session_id;
+ enum HFI_EVENT event_id;
+ u32 event_data1;
+ u32 event_data2;
+ u32 rg_ext_event_data[1];
+};
+
+struct hfi_msg_sys_init_done_packet {
+ u32 size;
+ enum HFI_MESSAGE packet_type;
+ enum HFI_ERROR error_type;
+ u32 num_properties;
+ u32 rg_property_data[1];
+};
+
+struct hfi_msg_sys_session_init_done_packet {
+ u32 size;
+ enum HFI_MESSAGE packet_type;
+ u32 session_id;
+ enum HFI_ERROR error_type;
+ u32 num_properties;
+ u32 rg_property_data[1];
+};
+
+struct hfi_msg_sys_session_end_done_packet {
+ u32 size;
+ enum HFI_MESSAGE packet_type;
+ u32 session_id;
+ enum HFI_ERROR error_type;
+};
+
+struct hfi_msg_sys_session_abort_done_packet {
+ u32 size;
+ enum HFI_MESSAGE packet_type;
+ u32 session_id;
+ enum HFI_ERROR error_type;
+};
+
+struct hfi_msg_sys_idle_packet {
+ u32 size;
+ enum HFI_MESSAGE packet_type;
+};
+
+struct hfi_msg_sys_pc_prep_done_packet {
+ u32 size;
+ enum HFI_MESSAGE packet_type;
+ enum HFI_ERROR error_type;
+};
+
+struct hfi_msg_sys_release_resource_done_packet {
+ u32 size;
+ enum HFI_MESSAGE packet_type;
+ u32 resource_handle;
+ enum HFI_ERROR error_type;
+};
+
+struct hfi_msg_sys_ping_ack_packet {
+ u32 size;
+ enum HFI_MESSAGE packet_type;
+ u32 client_data;
+};
+
+struct hfi_msg_sys_debug_packet {
+ u32 size;
+ enum HFI_MESSAGE packet_type;
+ enum HFI_DEBUG_MSG msg_type;
+ u32 msg_size;
+ u32 timestamp_hi;
+ u32 timestamp_lo;
+ u8 rg_msg_data[1];
+};
+
+struct hfi_msg_sys_property_info_packet {
+ u32 nsize;
+ enum HFI_MESSAGE packet_type;
+ u32 num_properties;
+ u32 rg_property_data[1];
+};
+
+struct hfi_msg_session_load_resources_done_packet {
+ u32 size;
+ enum HFI_MESSAGE packet_type;
+ u32 session_id;
+ enum HFI_ERROR error_type;
+};
+
+struct hfi_msg_session_start_done_packet {
+ u32 size;
+ enum HFI_MESSAGE packet_type;
+ u32 session_id;
+ enum HFI_ERROR error_type;
+};
+
+struct hfi_msg_session_stop_done_packet {
+ u32 size;
+ enum HFI_MESSAGE packet_type;
+ u32 session_id;
+ enum HFI_ERROR error_type;
+};
+
+struct hfi_msg_session_suspend_done_packet {
+ u32 size;
+ enum HFI_MESSAGE packet_type;
+ u32 session_id;
+ enum HFI_ERROR error_type;
+};
+
+struct hfi_msg_session_resume_done_packet {
+ u32 size;
+ enum HFI_MESSAGE packet_type;
+ u32 session_id;
+ enum HFI_ERROR error_type;
+};
+
+struct hfi_msg_session_empty_buffer_done_packet {
+ u32 size;
+ enum HFI_MESSAGE packet_type;
+ u32 session_id;
+ enum HFI_ERROR error_type;
+ u32 offset;
+ u32 filled_len;
+ u32 input_tag;
+ u8 *packet_buffer;
+};
+
+struct hfi_msg_session_fill_buffer_done_compressed_packet {
+ u32 size;
+ enum HFI_MESSAGE packet_type;
+ u32 session_id;
+ u32 timestamp_hi;
+ u32 timestamp_lo;
+ enum HFI_ERROR error_type;
+ u32 flags;
+ u32 mark_target;
+ u32 mark_data;
+ u32 stats;
+ u32 offset;
+ u32 alloc_len;
+ u32 filled_len;
+ u32 input_tag;
+ enum HFI_PICTURE picture_type;
+ u8 *packet_buffer;
+ u8 *extra_data_buffer;
+};
+
+struct hfi_msg_session_fbd_uncompressed_plane0_packet {
+ u32 size;
+ enum HFI_MESSAGE packet_type;
+ u32 session_id;
+ u32 stream_id;
+ u32 view_id;
+ enum HFI_ERROR error_type;
+ u32 timestamp_hi;
+ u32 timestamp_lo;
+ u32 flags;
+ u32 mark_target;
+ u32 mark_data;
+ u32 stats;
+ u32 alloc_len;
+ u32 filled_len;
+ u32 oofset;
+ u32 frame_width;
+ u32 frame_height;
+ u32 start_xCoord;
+ u32 start_yCoord;
+ u32 input_tag;
+ u32 input_tag1;
+ enum HFI_PICTURE picture_type;
+ u8 *packet_buffer;
+ u8 *extra_data_buffer;
+};
+
+struct hfi_msg_session_fill_buffer_done_uncompressed_plane1_packet {
+ u32 flags;
+ u32 alloc_len;
+ u32 filled_len;
+ u32 offset;
+ u8 *packet_buffer;
+};
+
+struct hfi_msg_session_fill_buffer_done_uncompressed_plane2_packet {
+ u32 flags;
+ u32 alloc_len;
+ u32 filled_len;
+ u32 offset;
+ u8 *packet_buffer;
+};
+
+struct hfi_msg_session_flush_done_packet {
+ u32 size;
+ enum HFI_MESSAGE packet_type;
+ u32 session_id;
+ enum HFI_ERROR error_type;
+ enum HFI_FLUSH flush_type;
+};
+
+struct hfi_msg_session_parse_sequence_header_done_packet {
+ u32 size;
+ enum HFI_MESSAGE packet_type;
+ u32 session_id;
+ enum HFI_ERROR error_type;
+ u32 num_properties;
+ u32 rg_property_data[1];
+};
+
+struct hfi_msg_session_get_sequence_header_done_packet {
+ u32 size;
+ enum HFI_MESSAGE packet_type;
+ u32 session_id;
+ enum HFI_ERROR error_type;
+ u32 header_len;
+ u8 *sequence_header;
+};
+
+struct hfi_msg_session_property_info_packet {
+ u32 size;
+ enum HFI_MESSAGE packet_type;
+ u32 session_id;
+ u32 num_properties;
+ u32 rg_property_data[1];
+};
+
+struct hfi_msg_session_release_resources_done_packet {
+ u32 size;
+ enum HFI_MESSAGE packet_type;
+ u32 session_id;
+ enum HFI_ERROR error_type;
+};
+
+struct hfi_extradata_mb_quantization_payload {
+ u8 rg_mb_qp[1];
+};
+
+struct hfi_extradata_vc1_pswnd {
+ u32 ps_wnd_h_offset;
+ u32 ps_wndv_offset;
+ u32 ps_wnd_width;
+ u32 ps_wnd_height;
+};
+
+struct hfi_extradata_vc1_framedisp_payload {
+ u32 res_pic;
+ u32 ref;
+ u32 range_map_present;
+ u32 range_map_y;
+ u32 range_map_uv;
+ u32 num_pan_scan_wnds;
+ struct hfi_extradata_vc1_pswnd rg_ps_wnd[1];
+};
+
+struct hfi_extradata_vc1_seqdisp_payload {
+ u32 prog_seg_frm;
+ u32 uv_sampling_fmt;
+ u32 color_fmt_flag;
+ u32 color_primaries;
+ u32 transfer_char;
+ u32 mat_coeff;
+ u32 aspect_ratio;
+ u32 aspect_horiz;
+ u32 aspect_vert;
+};
+
+struct hfi_extradata_timestamp_payload {
+ u32 timestamp_low;
+ u32 timestamp_high;
+};
+
+struct hfi_extradata_interlace_video_payload {
+ enum HFI_INTERLACE_FORMAT format;
+};
+
+enum HFI_S3D_FP_LAYOUT {
+ HFI_S3D_FP_LAYOUT_NONE,
+ HFI_S3D_FP_LAYOUT_INTRLV_CHECKERBOARD,
+ HFI_S3D_FP_LAYOUT_INTRLV_COLUMN,
+ HFI_S3D_FP_LAYOUT_INTRLV_ROW,
+ HFI_S3D_FP_LAYOUT_SIDEBYSIDE,
+ HFI_S3D_FP_LAYOUT_TOPBOTTOM,
+ HFI_S3D_FP_LAYOUT_UNUSED = 0x10000000,
+};
+
+enum HFI_S3D_FP_VIEW_ORDER {
+ HFI_S3D_FP_LEFTVIEW_FIRST,
+ HFI_S3D_FP_RIGHTVIEW_FIRST,
+ HFI_S3D_FP_UNKNOWN,
+ HFI_S3D_FP_VIEWORDER_UNUSED = 0x10000000,
+};
+
+enum HFI_S3D_FP_FLIP {
+ HFI_S3D_FP_FLIP_NONE,
+ HFI_S3D_FP_FLIP_LEFT_HORIZ,
+ HFI_S3D_FP_FLIP_LEFT_VERT,
+ HFI_S3D_FP_FLIP_RIGHT_HORIZ,
+ HFI_S3D_FP_FLIP_RIGHT_VERT,
+ HFI_S3D_FP_FLIP_UNUSED = 0x10000000,
+};
+
+struct hfi_extradata_s3d_frame_packing_payload {
+ enum HFI_S3D_FP_LAYOUT eLayout;
+ enum HFI_S3D_FP_VIEW_ORDER eOrder;
+ enum HFI_S3D_FP_FLIP eFlip;
+ int bQuinCunx;
+ u32 nLeftViewLumaSiteX;
+ u32 nLeftViewLumaSiteY;
+ u32 nRightViewLumaSiteX;
+ u32 nRightViewLumaSiteY;
+};
+
+struct hfi_extradata_num_concealed_mb_payload {
+ u32 num_mb_concealed;
+};
+
+struct hfi_extradata_sliceinfo {
+ u32 offset_in_stream;
+ u32 slice_length;
+};
+
+struct hfi_extradata_multislice_info_payload {
+ u32 num_slices;
+ struct hfi_extradata_sliceinfo rg_slice_info[1];
+};
+
+struct hfi_index_extradata_input_crop_payload {
+ u32 size;
+ u32 version;
+ u32 port_index;
+ u32 left;
+ u32 top;
+ u32 width;
+ u32 height;
+};
+
+struct hfi_index_extradata_digital_zoom_payload {
+ u32 size;
+ u32 version;
+ u32 port_index;
+ int width;
+ int height;
+};
+
+struct vidc_mem_addr {
+ u8 *align_device_addr;
+ u8 *align_virtual_addr;
+ u32 mem_size;
+ struct msm_smem *mem_data;
+};
+
+struct vidc_iface_q_info {
+ void *q_hdr;
+ struct vidc_mem_addr q_array;
+};
+
+/* Internal data used in vidc_hal not exposed to msm_vidc*/
+
+struct hal_data {
+ u32 irq;
+ u8 *device_base_addr;
+ u8 *register_base_addr;
+};
+
+struct hal_device {
+ struct list_head list;
+ struct list_head sess_head;
+ u32 intr_status;
+ u32 device_id;
+ spinlock_t read_lock;
+ spinlock_t write_lock;
+ void (*callback) (u32 response, void *callback);
+ struct vidc_mem_addr iface_q_table;
+ struct vidc_iface_q_info iface_queues[VIDC_IFACEQ_NUMQ];
+ struct smem_client *hal_client;
+ struct hal_data *hal_data;
+ struct workqueue_struct *vidc_workq;
+ int spur_count;
+ int reg_count;
+};
+
+struct hal_session {
+ struct list_head list;
+ u32 session_id;
+ u32 is_decoder;
+ struct hal_device *device;
+};
+
+struct hal_device_data {
+ struct list_head dev_head;
+ int dev_count;
+};
+
+extern struct hal_device_data hal_ctxt;
+
+int vidc_hal_iface_msgq_read(struct hal_device *device, void *pkt);
+int vidc_hal_iface_dbgq_read(struct hal_device *device, void *pkt);
+
+/* Interrupt Processing:*/
+void vidc_hal_response_handler(struct hal_device *device);
+
+#endif /*__VIDC_HAL_H__ */
diff --git a/drivers/media/video/msm_vidc/vidc_hal_api.h b/drivers/media/video/msm_vidc/vidc_hal_api.h
new file mode 100644
index 0000000..036091b
--- /dev/null
+++ b/drivers/media/video/msm_vidc/vidc_hal_api.h
@@ -0,0 +1,975 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __VIDC_HAL_API_H__
+#define __VIDC_HAL_API_H__
+
+#include <linux/types.h>
+
+#define CONTAINS(__a, __sz, __t) ({\
+ int __rc = __t >= __a && \
+ __t < __a + __sz; \
+ __rc; \
+})
+
+#define OVERLAPS(__t, __tsz, __a, __asz) ({\
+ int __rc = __t <= __a && \
+ __t + __tsz >= __a + __asz; \
+ __rc; \
+})
+
+#define HAL_BUFFERFLAG_EOS 0x00000001
+#define HAL_BUFFERFLAG_STARTTIME 0x00000002
+#define HAL_BUFFERFLAG_DECODEONLY 0x00000004
+#define HAL_BUFFERFLAG_DATACORRUPT 0x00000008
+#define HAL_BUFFERFLAG_ENDOFFRAME 0x00000010
+#define HAL_BUFFERFLAG_SYNCFRAME 0x00000020
+#define HAL_BUFFERFLAG_EXTRADATA 0x00000040
+#define HAL_BUFFERFLAG_CODECCONFIG 0x00000080
+#define HAL_BUFFERFLAG_TIMESTAMPINVALID 0x00000100
+#define HAL_BUFFERFLAG_READONLY 0x00000200
+#define HAL_BUFFERFLAG_ENDOFSUBFRAME 0x00000400
+
+enum vidc_status {
+ VIDC_ERR_NONE = 0x0,
+ VIDC_ERR_FAIL = 0x80000000,
+ VIDC_ERR_ALLOC_FAIL,
+ VIDC_ERR_ILLEGAL_OP,
+ VIDC_ERR_BAD_PARAM,
+ VIDC_ERR_BAD_HANDLE,
+ VIDC_ERR_NOT_SUPPORTED,
+ VIDC_ERR_BAD_STATE,
+ VIDC_ERR_MAX_CLIENT,
+ VIDC_ERR_IFRAME_EXPECTED,
+ VIDC_ERR_HW_FATAL,
+ VIDC_ERR_BITSTREAM_ERR,
+ VIDC_ERR_INDEX_NOMORE,
+ VIDC_ERR_SEQHDR_PARSE_FAIL,
+ VIDC_ERR_INSUFFICIENT_BUFFER,
+ VIDC_ERR_BAD_POWER_STATE,
+ VIDC_ERR_NO_VALID_SESSION,
+ VIDC_ERR_TIMEOUT,
+ VIDC_ERR_CMDQFULL,
+ VIDC_ERR_CLIENT_PRESENT = 0x90000001,
+ VIDC_ERR_CLIENT_FATAL,
+ VIDC_ERR_CMD_QUEUE_FULL,
+ VIDC_ERR_UNUSED = 0x10000000
+};
+
+enum hal_property {
+ HAL_CONFIG_FRAME_RATE = 0x04000001,
+ HAL_PARAM_UNCOMPRESSED_FORMAT_SELECT,
+ HAL_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO,
+ HAL_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO,
+ HAL_PARAM_EXTRA_DATA_HEADER_CONFIG,
+ HAL_PARAM_FRAME_SIZE,
+ HAL_CONFIG_REALTIME,
+ HAL_PARAM_BUFFER_COUNT_ACTUAL,
+ HAL_PARAM_NAL_STREAM_FORMAT_SELECT,
+ HAL_PARAM_VDEC_OUTPUT_ORDER,
+ HAL_PARAM_VDEC_PICTURE_TYPE_DECODE,
+ HAL_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO,
+ HAL_CONFIG_VDEC_POST_LOOP_DEBLOCKER,
+ HAL_PARAM_VDEC_MULTI_STREAM,
+ HAL_PARAM_VDEC_DISPLAY_PICTURE_BUFFER_COUNT,
+ HAL_PARAM_DIVX_FORMAT,
+ HAL_CONFIG_VDEC_MB_ERROR_MAP_REPORTING,
+ HAL_PARAM_VDEC_CONTINUE_DATA_TRANSFER,
+ HAL_CONFIG_VDEC_MB_ERROR_MAP,
+ HAL_CONFIG_VENC_REQUEST_IFRAME,
+ HAL_PARAM_VENC_MPEG4_SHORT_HEADER,
+ HAL_PARAM_VENC_MPEG4_AC_PREDICTION,
+ HAL_CONFIG_VENC_TARGET_BITRATE,
+ HAL_PARAM_PROFILE_LEVEL_CURRENT,
+ HAL_PARAM_VENC_H264_ENTROPY_CONTROL,
+ HAL_PARAM_VENC_RATE_CONTROL,
+ HAL_PARAM_VENC_MPEG4_TIME_RESOLUTION,
+ HAL_PARAM_VENC_MPEG4_HEADER_EXTENSION,
+ HAL_PARAM_VENC_H264_DEBLOCK_CONTROL,
+ HAL_PARAM_VENC_TEMPORAL_SPATIAL_TRADEOFF,
+ HAL_PARAM_VENC_SESSION_QP,
+ HAL_CONFIG_VENC_INTRA_PERIOD,
+ HAL_CONFIG_VENC_IDR_PERIOD,
+ HAL_CONFIG_VPE_OPERATIONS,
+ HAL_PARAM_VENC_INTRA_REFRESH,
+ HAL_PARAM_VENC_MULTI_SLICE_CONTROL,
+ HAL_CONFIG_VPE_DEINTERLACE,
+ HAL_SYS_DEBUG_CONFIG,
+ HAL_CONFIG_BUFFER_REQUIREMENTS,
+ HAL_CONFIG_PRIORITY,
+ HAL_CONFIG_BATCH_INFO,
+ HAL_PARAM_METADATA_PASS_THROUGH,
+ HAL_SYS_IDLE_INDICATOR,
+ HAL_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED,
+ HAL_PARAM_INTERLACE_FORMAT_SUPPORTED,
+ HAL_PARAM_CHROMA_SITE,
+ HAL_PARAM_PROPERTIES_SUPPORTED,
+ HAL_PARAM_PROFILE_LEVEL_SUPPORTED,
+ HAL_PARAM_CAPABILITY_SUPPORTED,
+ HAL_PARAM_NAL_STREAM_FORMAT_SUPPORTED,
+ HAL_PARAM_MULTI_VIEW_FORMAT,
+ HAL_PARAM_MAX_SEQUENCE_HEADER_SIZE,
+ HAL_PARAM_CODEC_SUPPORTED,
+ HAL_PARAM_VDEC_MULTI_VIEW_SELECT,
+ HAL_PARAM_VDEC_MB_QUANTIZATION,
+ HAL_PARAM_VDEC_NUM_CONCEALED_MB,
+ HAL_PARAM_VDEC_H264_ENTROPY_SWITCHING,
+ HAL_PARAM_VENC_SLICE_DELIVERY_MODE,
+ HAL_PARAM_VENC_MPEG4_DATA_PARTITIONING,
+ HAL_CONFIG_BUFFER_COUNT_ACTUAL,
+ HAL_CONFIG_VDEC_MULTI_STREAM,
+ HAL_PARAM_VENC_MULTI_SLICE_INFO,
+ HAL_CONFIG_VENC_TIMESTAMP_SCALE,
+ HAL_PARAM_VENC_LOW_LATENCY,
+};
+
+enum hal_domain {
+ HAL_VIDEO_DOMAIN_VPE,
+ HAL_VIDEO_DOMAIN_ENCODER,
+ HAL_VIDEO_DOMAIN_DECODER,
+ HAL_UNUSED_DOMAIN = 0x10000000,
+};
+
+enum hal_video_codec {
+ HAL_VIDEO_CODEC_UNKNOWN = 0x00000000,
+ HAL_VIDEO_CODEC_MVC = 0x00000001,
+ HAL_VIDEO_CODEC_H264 = 0x00000002,
+ HAL_VIDEO_CODEC_H263 = 0x00000004,
+ HAL_VIDEO_CODEC_MPEG1 = 0x00000008,
+ HAL_VIDEO_CODEC_MPEG2 = 0x00000010,
+ HAL_VIDEO_CODEC_MPEG4 = 0x00000020,
+ HAL_VIDEO_CODEC_DIVX_311 = 0x00000040,
+ HAL_VIDEO_CODEC_DIVX = 0x00000080,
+ HAL_VIDEO_CODEC_VC1 = 0x00000100,
+ HAL_VIDEO_CODEC_SPARK = 0x00000200,
+ HAL_VIDEO_CODEC_VP6 = 0x00000400,
+ HAL_VIDEO_CODEC_VP7 = 0x00000800,
+ HAL_VIDEO_CODEC_VP8 = 0x00001000,
+ HAL_UNUSED_CODEC = 0x10000000,
+};
+
+enum hal_h263_profile {
+ HAL_H263_PROFILE_BASELINE = 0x00000001,
+ HAL_H263_PROFILE_H320CODING = 0x00000002,
+ HAL_H263_PROFILE_BACKWARDCOMPATIBLE = 0x00000004,
+ HAL_H263_PROFILE_ISWV2 = 0x00000008,
+ HAL_H263_PROFILE_ISWV3 = 0x00000010,
+ HAL_H263_PROFILE_HIGHCOMPRESSION = 0x00000020,
+ HAL_H263_PROFILE_INTERNET = 0x00000040,
+ HAL_H263_PROFILE_INTERLACE = 0x00000080,
+ HAL_H263_PROFILE_HIGHLATENCY = 0x00000100,
+ HAL_UNUSED_H263_PROFILE = 0x10000000,
+};
+
+enum hal_h263_level {
+ HAL_H263_LEVEL_10 = 0x00000001,
+ HAL_H263_LEVEL_20 = 0x00000002,
+ HAL_H263_LEVEL_30 = 0x00000004,
+ HAL_H263_LEVEL_40 = 0x00000008,
+ HAL_H263_LEVEL_45 = 0x00000010,
+ HAL_H263_LEVEL_50 = 0x00000020,
+ HAL_H263_LEVEL_60 = 0x00000040,
+ HAL_H263_LEVEL_70 = 0x00000080,
+ HAL_UNUSED_H263_LEVEL = 0x10000000,
+};
+
+enum hal_mpeg2_profile {
+ HAL_MPEG2_PROFILE_SIMPLE = 0x00000001,
+ HAL_MPEG2_PROFILE_MAIN = 0x00000002,
+ HAL_MPEG2_PROFILE_422 = 0x00000004,
+ HAL_MPEG2_PROFILE_SNR = 0x00000008,
+ HAL_MPEG2_PROFILE_SPATIAL = 0x00000010,
+ HAL_MPEG2_PROFILE_HIGH = 0x00000020,
+ HAL_UNUSED_MPEG2_PROFILE = 0x10000000,
+};
+
+enum hal_mpeg2_level {
+ HAL_MPEG2_LEVEL_LL = 0x00000001,
+ HAL_MPEG2_LEVEL_ML = 0x00000002,
+ HAL_MPEG2_LEVEL_H14 = 0x00000004,
+ HAL_MPEG2_LEVEL_HL = 0x00000008,
+ HAL_UNUSED_MEPG2_LEVEL = 0x10000000,
+};
+
+enum hal_mpeg4_profile {
+ HAL_MPEG4_PROFILE_SIMPLE = 0x00000001,
+ HAL_MPEG4_PROFILE_SIMPLESCALABLE = 0x00000002,
+ HAL_MPEG4_PROFILE_CORE = 0x00000004,
+ HAL_MPEG4_PROFILE_MAIN = 0x00000008,
+ HAL_MPEG4_PROFILE_NBIT = 0x00000010,
+ HAL_MPEG4_PROFILE_SCALABLETEXTURE = 0x00000020,
+ HAL_MPEG4_PROFILE_SIMPLEFACE = 0x00000040,
+ HAL_MPEG4_PROFILE_SIMPLEFBA = 0x00000080,
+ HAL_MPEG4_PROFILE_BASICANIMATED = 0x00000100,
+ HAL_MPEG4_PROFILE_HYBRID = 0x00000200,
+ HAL_MPEG4_PROFILE_ADVANCEDREALTIME = 0x00000400,
+ HAL_MPEG4_PROFILE_CORESCALABLE = 0x00000800,
+ HAL_MPEG4_PROFILE_ADVANCEDCODING = 0x00001000,
+ HAL_MPEG4_PROFILE_ADVANCEDCORE = 0x00002000,
+ HAL_MPEG4_PROFILE_ADVANCEDSCALABLE = 0x00004000,
+ HAL_MPEG4_PROFILE_ADVANCEDSIMPLE = 0x00008000,
+ HAL_UNUSED_MPEG4_PROFILE = 0x10000000,
+};
+
+enum hal_mpeg4_level {
+ HAL_MPEG4_LEVEL_0 = 0x00000001,
+ HAL_MPEG4_LEVEL_0b = 0x00000002,
+ HAL_MPEG4_LEVEL_1 = 0x00000004,
+ HAL_MPEG4_LEVEL_2 = 0x00000008,
+ HAL_MPEG4_LEVEL_3 = 0x00000010,
+ HAL_MPEG4_LEVEL_4 = 0x00000020,
+ HAL_MPEG4_LEVEL_4a = 0x00000040,
+ HAL_MPEG4_LEVEL_5 = 0x00000080,
+ HAL_MPEG4_LEVEL_VENDOR_START_UNUSED = 0x7F000000,
+ HAL_MPEG4_LEVEL_6 = 0x7F000001,
+ HAL_MPEG4_LEVEL_7 = 0x7F000002,
+ HAL_MPEG4_LEVEL_8 = 0x7F000003,
+ HAL_MPEG4_LEVEL_9 = 0x7F000004,
+ HAL_MPEG4_LEVEL_3b = 0x7F000005,
+ HAL_UNUSED_MPEG4_LEVEL = 0x10000000,
+};
+
+enum hal_h264_profile {
+ HAL_H264_PROFILE_BASELINE = 0x00000001,
+ HAL_H264_PROFILE_MAIN = 0x00000002,
+ HAL_H264_PROFILE_EXTENDED = 0x00000004,
+ HAL_H264_PROFILE_HIGH = 0x00000008,
+ HAL_H264_PROFILE_HIGH10 = 0x00000010,
+ HAL_H264_PROFILE_HIGH422 = 0x00000020,
+ HAL_H264_PROFILE_HIGH444 = 0x00000040,
+ HAL_UNUSED_H264_PROFILE = 0x10000000,
+};
+
+enum hal_h264_level {
+ HAL_H264_LEVEL_1 = 0x00000001,
+ HAL_H264_LEVEL_1b = 0x00000002,
+ HAL_H264_LEVEL_11 = 0x00000004,
+ HAL_H264_LEVEL_12 = 0x00000008,
+ HAL_H264_LEVEL_13 = 0x00000010,
+ HAL_H264_LEVEL_2 = 0x00000020,
+ HAL_H264_LEVEL_21 = 0x00000040,
+ HAL_H264_LEVEL_22 = 0x00000080,
+ HAL_H264_LEVEL_3 = 0x00000100,
+ HAL_H264_LEVEL_31 = 0x00000200,
+ HAL_H264_LEVEL_32 = 0x00000400,
+ HAL_H264_LEVEL_4 = 0x00000800,
+ HAL_H264_LEVEL_41 = 0x00001000,
+ HAL_H264_LEVEL_42 = 0x00002000,
+ HAL_H264_LEVEL_5 = 0x00004000,
+ HAL_H264_LEVEL_51 = 0x00008000,
+ HAL_UNUSED_H264_LEVEL = 0x10000000,
+};
+
+enum hal_vpx_profile {
+ HAL_VPX_PROFILE_SIMPLE = 0x00000001,
+ HAL_VPX_PROFILE_ADVANCED = 0x00000002,
+ HAL_VPX_PROFILE_VERSION_0 = 0x00000004,
+ HAL_VPX_PROFILE_VERSION_1 = 0x00000008,
+ HAL_VPX_PROFILE_VERSION_2 = 0x00000010,
+ HAL_VPX_PROFILE_VERSION_3 = 0x00000020,
+ HAL_VPX_PROFILE_UNUSED = 0x10000000,
+};
+
+enum hal_vc1_profile {
+ HAL_VC1_PROFILE_SIMPLE = 0x00000001,
+ HAL_VC1_PROFILE_MAIN = 0x00000002,
+ HAL_VC1_PROFILE_ADVANCED = 0x00000004,
+ HAL_UNUSED_VC1_PROFILE = 0x10000000,
+};
+
+enum hal_vc1_level {
+ HAL_VC1_LEVEL_LOW = 0x00000001,
+ HAL_VC1_LEVEL_MEDIUM = 0x00000002,
+ HAL_VC1_LEVEL_HIGH = 0x00000004,
+ HAL_VC1_LEVEL_0 = 0x00000008,
+ HAL_VC1_LEVEL_1 = 0x00000010,
+ HAL_VC1_LEVEL_2 = 0x00000020,
+ HAL_VC1_LEVEL_3 = 0x00000040,
+ HAL_VC1_LEVEL_4 = 0x00000080,
+ HAL_UNUSED_VC1_LEVEL = 0x10000000,
+};
+
+enum hal_divx_format {
+ HAL_DIVX_FORMAT_4,
+ HAL_DIVX_FORMAT_5,
+ HAL_DIVX_FORMAT_6,
+ HAL_UNUSED_DIVX_FORMAT = 0x10000000,
+};
+
+enum hal_divx_profile {
+ HAL_DIVX_PROFILE_QMOBILE = 0x00000001,
+ HAL_DIVX_PROFILE_MOBILE = 0x00000002,
+ HAL_DIVX_PROFILE_MT = 0x00000004,
+ HAL_DIVX_PROFILE_HT = 0x00000008,
+ HAL_DIVX_PROFILE_HD = 0x00000010,
+ HAL_UNUSED_DIVX_PROFILE = 0x10000000,
+};
+
+enum hal_mvc_profile {
+ HAL_MVC_PROFILE_STEREO_HIGH = 0x00000001,
+ HAL_MVC_PROFILE_MV_HIGH = 0x00000002,
+ HAL_UNUSED_MVC_PROFILE = 0x10000000,
+};
+
+enum hal_mvc_level {
+ HAL_MVC_LEVEL_1 = 0x00000001,
+ HAL_MVC_LEVEL_1b = 0x00000002,
+ HAL_MVC_LEVEL_11 = 0x00000004,
+ HAL_MVC_LEVEL_12 = 0x00000008,
+ HAL_MVC_LEVEL_13 = 0x00000010,
+ HAL_MVC_LEVEL_2 = 0x00000020,
+ HAL_MVC_LEVEL_21 = 0x00000040,
+ HAL_MVC_LEVEL_22 = 0x00000080,
+ HAL_MVC_LEVEL_3 = 0x00000100,
+ HAL_MVC_LEVEL_31 = 0x00000200,
+ HAL_MVC_LEVEL_32 = 0x00000400,
+ HAL_MVC_LEVEL_4 = 0x00000800,
+ HAL_MVC_LEVEL_41 = 0x00001000,
+ HAL_MVC_LEVEL_42 = 0x00002000,
+ HAL_MVC_LEVEL_5 = 0x00004000,
+ HAL_MVC_LEVEL_51 = 0x00008000,
+ HAL_UNUSED_MVC_LEVEL = 0x10000000,
+};
+
+enum hal_buffer {
+ HAL_BUFFER_INPUT,
+ HAL_BUFFER_OUTPUT,
+ HAL_BUFFER_OUTPUT2,
+ HAL_BUFFER_EXTRADATA_INPUT,
+ HAL_BUFFER_EXTRADATA_OUTPUT,
+ HAL_BUFFER_EXTRADATA_OUTPUT2,
+ HAL_BUFFER_INTERNAL_SCRATCH,
+ HAL_BUFFER_INTERNAL_PERSIST,
+ HAL_UNUSED_BUFFER = 0x10000000,
+};
+
+struct hal_frame_rate {
+ enum hal_buffer buffer_type;
+ u32 frame_rate;
+};
+
+enum hal_uncompressed_format {
+ HAL_COLOR_FORMAT_MONOCHROME,
+ HAL_COLOR_FORMAT_NV12,
+ HAL_COLOR_FORMAT_NV21,
+ HAL_COLOR_FORMAT_NV12_4x4TILE,
+ HAL_COLOR_FORMAT_NV21_4x4TILE,
+ HAL_COLOR_FORMAT_YUYV,
+ HAL_COLOR_FORMAT_YVYU,
+ HAL_COLOR_FORMAT_UYVY,
+ HAL_COLOR_FORMAT_VYUY,
+ HAL_COLOR_FORMAT_RGB565,
+ HAL_COLOR_FORMAT_BGR565,
+ HAL_COLOR_FORMAT_RGB888,
+ HAL_COLOR_FORMAT_BGR888,
+ HAL_UNUSED_COLOR = 0x10000000,
+};
+
+struct hal_uncompressed_format_select {
+ enum hal_buffer buffer_type;
+ enum hal_uncompressed_format format;
+};
+
+struct hal_uncompressed_plane_actual {
+ int actual_stride;
+ u32 actual_plane_buffer_height;
+};
+
+struct hal_uncompressed_plane_actual_info {
+ enum hal_buffer buffer_type;
+ u32 num_planes;
+ struct hal_uncompressed_plane_actual rg_plane_format[1];
+};
+
+struct hal_uncompressed_plane_constraints {
+ u32 stride_multiples;
+ u32 max_stride;
+ u32 min_plane_buffer_height_multiple;
+ u32 buffer_alignment;
+};
+
+struct hal_uncompressed_plane_actual_constraints_info {
+ enum hal_buffer buffer_type;
+ u32 num_planes;
+ struct hal_uncompressed_plane_constraints rg_plane_format[1];
+};
+
+struct hal_extra_data_header_config {
+ u32 type;
+ enum hal_buffer buffer_type;
+ u32 version;
+ u32 port_index;
+ u32 client_extradata_id;
+};
+
+struct hal_frame_size {
+ enum hal_buffer buffer_type;
+ u32 width;
+ u32 height;
+};
+
+struct hal_enable {
+ u32 enable;
+};
+
+struct hal_buffer_count_actual {
+ enum hal_buffer buffer_type;
+ u32 buffer_count_actual;
+};
+
+enum hal_nal_stream_format {
+ HAL_NAL_FORMAT_STARTCODES = 0x00000001,
+ HAL_NAL_FORMAT_ONE_NAL_PER_BUFFER = 0x00000002,
+ HAL_NAL_FORMAT_ONE_BYTE_LENGTH = 0x00000004,
+ HAL_NAL_FORMAT_TWO_BYTE_LENGTH = 0x00000008,
+ HAL_NAL_FORMAT_FOUR_BYTE_LENGTH = 0x00000010,
+};
+
+enum hal_output_order {
+ HAL_OUTPUT_ORDER_DISPLAY,
+ HAL_OUTPUT_ORDER_DECODE,
+ HAL_UNUSED_OUTPUT = 0x10000000,
+};
+
+enum hal_picture {
+ HAL_PICTURE_I = 0x01,
+ HAL_PICTURE_P = 0x02,
+ HAL_PICTURE_B = 0x04,
+ HAL_PICTURE_IDR = 0x7F001000,
+ HAL_FRAME_NOTCODED = 0x7F002000,
+ HAL_FRAME_YUV = 0x7F004000,
+ HAL_UNUSED_PICT = 0x10000000,
+};
+
+struct hal_enable_picture {
+ u32 picture_type;
+};
+
+struct hal_multi_stream {
+ enum hal_buffer buffer_type;
+ u32 enable;
+ u32 width;
+ u32 height;
+};
+
+struct hal_display_picture_buffer_count {
+ u32 enable;
+ u32 count;
+};
+
+struct hal_mb_error_map {
+ u32 error_map_size;
+ u8 rg_error_map[1];
+};
+
+struct hal_request_iframe {
+ u32 enable;
+};
+
+struct hal_bitrate {
+ u32 bit_rate;
+};
+
+struct hal_profile_level {
+ u32 profile;
+ u32 level;
+};
+/*
+struct hal_profile_level_range {
+ u32 profile;
+ u32 min_level;
+ u32 max_level;
+}
+
+struct hal_profile_level_supported {
+ u32 profile_count;
+ struct hal_profile_level_range profile_level[1];
+};
+*/
+enum hal_h264_entropy {
+ HAL_H264_ENTROPY_CAVLC,
+ HAL_H264_ENTROPY_CABAC,
+ HAL_UNUSED_ENTROPY = 0x10000000,
+};
+
+enum hal_h264_cabac_model {
+ HAL_H264_CABAC_MODEL_0,
+ HAL_H264_CABAC_MODEL_1,
+ HAL_H264_CABAC_MODEL_2,
+ HAL_UNUSED_CABAC = 0x10000000,
+};
+
+struct hal_h264_entropy_control {
+ enum hal_h264_entropy entropy_mode;
+ enum hal_h264_cabac_model cabac_model;
+};
+
+enum hal_rate_control {
+ HAL_RATE_CONTROL_OFF,
+ HAL_RATE_CONTROL_VBR_VFR,
+ HAL_RATE_CONTROL_VBR_CFR,
+ HAL_RATE_CONTROL_CBR_VFR,
+ HAL_RATE_CONTROL_CBR_CFR,
+ HAL_UNUSED_RC = 0x10000000,
+};
+
+struct hal_mpeg4_time_resolution {
+ u32 time_increment_resolution;
+};
+
+struct hal_mpeg4_header_extension {
+ u32 header_extension;
+};
+
+enum hal_h264_db_mode {
+ HAL_H264_DB_MODE_DISABLE,
+ HAL_H264_DB_MODE_SKIP_SLICE_BOUNDARY,
+ HAL_H264_DB_MODE_ALL_BOUNDARY,
+ HAL_UNUSED_H264_DB = 0x10000000,
+};
+
+struct hal_h264_db_control {
+ enum hal_h264_db_mode mode;
+ int slice_alpha_offset;
+ int slicebeta_offset;
+};
+
+struct hal_temporal_spatial_tradeoff {
+ u32 ts_factor;
+};
+
+struct hal_quantization {
+ u32 qpi;
+ u32 qpp;
+ u32 qpb;
+};
+
+struct hal_intra_period {
+ u32 pframes;
+ u32 bframes;
+};
+
+struct hal_idr_period {
+ u32 idr_period;
+};
+
+enum hal_rotate {
+ HAL_ROTATE_NONE,
+ HAL_ROTATE_90,
+ HAL_ROTATE_180,
+ HAL_ROTATE_270,
+ HAL_UNUSED_ROTATE = 0x10000000,
+};
+
+enum hal_flip {
+ HAL_FLIP_NONE,
+ HAL_FLIP_HORIZONTAL,
+ HAL_FLIP_VERTICAL,
+ HAL_UNUSED_FLIP = 0x10000000,
+};
+
+struct hal_operations {
+ enum hal_rotate rotate;
+ enum hal_flip flip;
+};
+
+enum hal_intra_refresh_mode {
+ HAL_INTRA_REFRESH_NONE,
+ HAL_INTRA_REFRESH_CYCLIC,
+ HAL_INTRA_REFRESH_ADAPTIVE,
+ HAL_INTRA_REFRESH_CYCLIC_ADAPTIVE,
+ HAL_INTRA_REFRESH_RANDOM,
+ HAL_UNUSED_INTRA = 0x10000000,
+};
+
+struct hal_intra_refresh {
+ enum hal_intra_refresh_mode mode;
+ u32 air_mbs;
+ u32 air_ref;
+ u32 cir_mbs;
+};
+
+enum hal_multi_slice {
+ HAL_MULTI_SLICE_OFF,
+ HAL_MULTI_SLICE_BY_MB_COUNT,
+ HAL_MULTI_SLICE_BY_BYTE_COUNT,
+ HAL_MULTI_SLICE_GOB,
+ HAL_UNUSED_SLICE = 0x10000000,
+};
+
+struct hal_multi_slice_control {
+ enum hal_multi_slice multi_slice;
+ u32 slice_size;
+};
+
+struct hal_debug_config {
+ u32 debug_config;
+};
+
+struct hal_buffer_requirements {
+ enum hal_buffer buffer_type;
+ u32 buffer_size;
+ u32 buffer_region_size;
+ u32 buffer_hold_count;
+ u32 buffer_count_min;
+ u32 buffer_count_actual;
+ u32 contiguous;
+ u32 buffer_alignment;
+};
+
+enum hal_priority {/* Priority increases with number */
+ HAL_PRIORITY_LOW = 10,
+ HAL_PRIOIRTY_MEDIUM = 20,
+ HAL_PRIORITY_HIGH = 30,
+ HAL_UNUSED_PRIORITY = 0x10000000,
+};
+
+struct hal_batch_info {
+ u32 input_batch_count;
+ u32 output_batch_count;
+};
+
+struct hal_metadata_pass_through {
+ u32 enable;
+ u32 size;
+};
+
+struct hal_uncompressed_format_supported {
+ enum hal_buffer buffer_type;
+ u32 format_entries;
+ u32 rg_format_info[1];
+};
+
+enum hal_interlace_format {
+ HAL_INTERLACE_FRAME_PROGRESSIVE = 0x01,
+ HAL_INTERLACE_INTERLEAVE_FRAME_TOPFIELDFIRST = 0x02,
+ HAL_INTERLACE_INTERLEAVE_FRAME_BOTTOMFIELDFIRST = 0x04,
+ HAL_INTERLACE_FRAME_TOPFIELDFIRST = 0x08,
+ HAL_INTERLACE_FRAME_BOTTOMFIELDFIRST = 0x10,
+ HAL_UNUSED_INTERLACE = 0x10000000,
+};
+
+struct hal_interlace_format_supported {
+ enum hal_buffer buffer_type;
+ enum hal_interlace_format format;
+};
+
+enum hal_chroma_site {
+ HAL_CHROMA_SITE_0,
+ HAL_CHROMA_SITE_1,
+ HAL_UNUSED_CHROMA = 0x10000000,
+};
+
+struct hal_properties_supported {
+ u32 num_properties;
+ u32 rg_properties[1];
+};
+
+enum hal_capability {
+ HAL_CAPABILITY_FRAME_WIDTH,
+ HAL_CAPABILITY_FRAME_HEIGHT,
+ HAL_CAPABILITY_MBS_PER_FRAME,
+ HAL_CAPABILITY_MBS_PER_SECOND,
+ HAL_CAPABILITY_FRAMERATE,
+ HAL_CAPABILITY_SCALE_X,
+ HAL_CAPABILITY_SCALE_Y,
+ HAL_CAPABILITY_BITRATE,
+ HAL_UNUSED_CAPABILITY = 0x10000000,
+};
+
+struct hal_capability_supported {
+ enum hal_capability capability_type;
+ u32 min;
+ u32 max;
+ u32 step_size;
+};
+
+struct hal_capability_supported_info {
+ u32 num_capabilities;
+ struct hal_capability_supported rg_data[1];
+};
+
+struct hal_nal_stream_format_supported {
+ u32 nal_stream_format_supported;
+};
+
+struct hal_multi_view_format {
+ u32 views;
+ u32 rg_view_order[1];
+};
+
+struct hal_seq_header_info {
+ u32 nax_header_len;
+};
+
+struct hal_codec_supported {
+ u32 decoder_codec_supported;
+ u32 encoder_codec_supported;
+};
+
+struct hal_multi_view_select {
+ u32 view_index;
+};
+
+struct hal_timestamp_scale {
+ u32 time_stamp_scale;
+};
+
+enum vidc_resource_id {
+ VIDC_RESOURCE_OCMEM = 0x00000001,
+ VIDC_UNUSED_RESORUCE = 0x10000000,
+};
+
+struct vidc_resource_hdr {
+ enum vidc_resource_id resource_id;
+ u32 resource_handle;
+ u32 size;
+};
+
+struct vidc_buffer_addr_info {
+ enum hal_buffer buffer_type;
+ u32 buffer_size;
+ u32 num_buffers;
+ u32 align_device_addr;
+ u32 extradata_size;
+ u32 extradata_addr;
+};
+
+struct vidc_frame_plane_config {
+ u32 left;
+ u32 top;
+ u32 width;
+ u32 height;
+ u32 stride;
+ u32 scan_lines;
+};
+
+struct vidc_uncompressed_frame_config {
+ struct vidc_frame_plane_config luma_plane;
+ struct vidc_frame_plane_config chroma_plane;
+};
+
+struct vidc_frame_data {
+ enum hal_buffer buffer_type;
+ u32 device_addr;
+ u32 extradata_addr;
+ int64_t timestamp;
+ u32 flags;
+ u32 offset;
+ u32 alloc_len;
+ u32 filled_len;
+ u32 mark_target;
+ u32 mark_data;
+ u32 clnt_data;
+};
+
+struct vidc_seq_hdr {
+ u8 *seq_hdr;
+ u32 seq_hdr_len;
+};
+
+enum hal_flush {
+ HAL_FLUSH_INPUT,
+ HAL_FLUSH_OUTPUT,
+ HAL_FLUSH_OUTPUT2,
+ HAL_FLUSH_ALL,
+ HAL_UNUSED_FLUSH = 0x10000000,
+};
+
+/* HAL Response */
+
+enum command_response {
+/* SYSTEM COMMANDS_DONE*/
+ VIDC_EVENT_CHANGE,
+ SYS_INIT_DONE,
+ SET_RESOURCE_DONE,
+ RELEASE_RESOURCE_DONE,
+ PING_ACK_DONE,
+ PC_PREP_DONE,
+ SYS_IDLE,
+ SYS_DEBUG,
+/* SESSION COMMANDS_DONE */
+ SESSION_LOAD_RESOURCE_DONE,
+ SESSION_INIT_DONE,
+ SESSION_END_DONE,
+ SESSION_ABORT_DONE,
+ SESSION_START_DONE,
+ SESSION_STOP_DONE,
+ SESSION_ETB_DONE,
+ SESSION_FTB_DONE,
+ SESSION_FLUSH_DONE,
+ SESSION_SUSPEND_DONE,
+ SESSION_RESUME_DONE,
+ SESSION_SET_PROP_DONE,
+ SESSION_GET_PROP_DONE,
+ SESSION_PARSE_SEQ_HDR_DONE,
+ SESSION_GET_SEQ_HDR_DONE,
+ SESSION_RELEASE_BUFFER_DONE,
+ SESSION_RELEASE_RESOURCE_DONE,
+ SESSION_PROPERTY_INFO,
+ RESPONSE_UNUSED = 0x10000000,
+};
+
+/* Command Callback structure */
+
+struct msm_vidc_cb_cmd_done {
+ u32 device_id;
+ u32 session_id;
+ u32 status;
+ u32 size;
+ void *data;
+};
+
+struct msm_vidc_cb_event {
+ u32 device_id;
+ u32 session_id;
+ u32 status;
+ u32 height;
+ u32 width;
+};
+
+/* Data callback structure */
+
+struct vidc_hal_ebd {
+ u32 timestamp_hi;
+ u32 timestamp_lo;
+ u32 flags;
+ u32 mark_target;
+ u32 mark_data;
+ u32 stats;
+ u32 offset;
+ u32 alloc_len;
+ u32 filled_len;
+ enum hal_picture picture_type;
+ u8 *packet_buffer;
+ u8 *extra_data_buffer;
+};
+
+struct vidc_hal_fbd {
+ u32 stream_id;
+ u32 view_id;
+ u32 timestamp_hi;
+ u32 timestamp_lo;
+ u32 flags1;
+ u32 mark_target;
+ u32 mark_data;
+ u32 stats;
+ u32 alloc_len1;
+ u32 filled_len1;
+ u32 offset1;
+ u32 frame_width;
+ u32 frame_height;
+ u32 start_xCoord;
+ u32 start_yCoord;
+ u32 input_tag;
+ u32 input_tag1;
+ enum hal_picture picture_type;
+ u8 *packet_buffer1;
+ u8 *extra_data_buffer;
+ u32 flags2;
+ u32 alloc_len2;
+ u32 filled_len2;
+ u32 offset2;
+ u8 *packet_buffer2;
+ u32 flags3;
+ u32 alloc_len3;
+ u32 filled_len3;
+ u32 offset3;
+ u8 *packet_buffer3;
+ enum hal_buffer buffer_type;
+};
+
+struct msm_vidc_cb_data_done {
+ u32 device_id;
+ u32 session_id;
+ u32 status;
+ u32 size;
+ void *clnt_data;
+ union {
+ struct vidc_hal_ebd input_done;
+ struct vidc_hal_fbd output_done;
+ };
+};
+
+struct vidc_hal_sys_init_done {
+ u32 enc_codec_supported;
+ u32 dec_codec_supported;
+};
+
+struct vidc_hal_session_init_done {
+ struct hal_capability_supported width;
+ struct hal_capability_supported height;
+ struct hal_capability_supported mbs_per_frame;
+ struct hal_capability_supported mbs_per_sec;
+ struct hal_capability_supported frame_rate;
+ struct hal_capability_supported scale_x;
+ struct hal_capability_supported scale_y;
+ struct hal_capability_supported bitrate;
+ struct hal_uncompressed_format_supported uncomp_format;
+ struct hal_interlace_format_supported HAL_format;
+ struct hal_nal_stream_format_supported nal_stream_format;
+/* struct hal_profile_level_supported profile_level;
+ // allocate and released memory for above. */
+ struct hal_intra_refresh intra_refresh;
+ struct hal_seq_header_info seq_hdr_info;
+};
+
+struct buffer_requirements {
+ struct hal_buffer_requirements buffer[8];
+};
+
+/* VIDC_HAL CORE API's */
+int vidc_hal_core_init(void *device);
+int vidc_hal_core_release(void *device);
+int vidc_hal_core_pc_prep(void *device);
+int vidc_hal_core_set_resource(void *device,
+ struct vidc_resource_hdr *resource_hdr, void *resource_value);
+int vidc_hal_core_release_resource(void *device,
+ struct vidc_resource_hdr *resource_hdr);
+int vidc_hal_core_ping(void *device);
+
+/* VIDC_HAL SESSION API's */
+void *vidc_hal_session_init(void *device, u32 session_id,
+ enum hal_domain session_type, enum hal_video_codec codec_type);
+int vidc_hal_session_end(void *session);
+int vidc_hal_session_abort(void *session);
+int vidc_hal_session_set_buffers(void *sess,
+ struct vidc_buffer_addr_info *buffer_info);
+int vidc_hal_session_release_buffers(void *sess,
+ struct vidc_buffer_addr_info *buffer_info);
+int vidc_hal_session_load_res(void *sess);
+int vidc_hal_session_release_res(void *sess);
+int vidc_hal_session_start(void *sess);
+int vidc_hal_session_stop(void *sess);
+int vidc_hal_session_suspend(void *sess);
+int vidc_hal_session_resume(void *sess);
+int vidc_hal_session_etb(void *sess,
+ struct vidc_frame_data *input_frame);
+int vidc_hal_session_ftb(void *sess,
+ struct vidc_frame_data *output_frame);
+int vidc_hal_session_parse_seq_hdr(void *sess,
+ struct vidc_seq_hdr *seq_hdr);
+int vidc_hal_session_get_seq_hdr(void *sess,
+ struct vidc_seq_hdr *seq_hdr);
+int vidc_hal_session_get_buf_req(void *sess);
+int vidc_hal_session_flush(void *sess, enum hal_flush flush_mode);
+int vidc_hal_session_set_property(void *sess, enum hal_property ptype,
+ void *pdata);
+int vidc_hal_session_get_property(void *sess, enum hal_property ptype,
+ void *pdata);
+void *vidc_hal_add_device(u32 device_id, u32 base_addr,
+ u32 reg_base, u32 reg_size, u32 irq,
+ void (*callback) (enum command_response cmd, void *data));
+void vidc_hal_delete_device(void *device);
+
+#endif /*__VIDC_HAL_API_H__ */
diff --git a/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c b/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c
new file mode 100644
index 0000000..cb44d3a
--- /dev/null
+++ b/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c
@@ -0,0 +1,781 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/list.h>
+#include "vidc_hal.h"
+
+static enum vidc_status vidc_map_hal_err_status(enum HFI_ERROR hfi_err)
+{
+ enum vidc_status vidc_err;
+ switch (hfi_err) {
+ case HFI_ERR_NONE:
+ case HFI_ERR_SESSION_SAME_STATE_OPERATION:
+ vidc_err = VIDC_ERR_NONE;
+ break;
+ case HFI_ERR_SYS_FATAL:
+ vidc_err = VIDC_ERR_HW_FATAL;
+ break;
+ case HFI_ERR_SYS_VERSION_MISMATCH:
+ case HFI_ERR_SYS_INVALID_PARAMETER:
+ case HFI_ERR_SYS_SESSION_ID_OUT_OF_RANGE:
+ case HFI_ERR_SESSION_INVALID_PARAMETER:
+ case HFI_ERR_SESSION_INVALID_SESSION_ID:
+ case HFI_ERR_SESSION_INVALID_STREAM_ID:
+ vidc_err = VIDC_ERR_BAD_PARAM;
+ break;
+ case HFI_ERR_SYS_INSUFFICIENT_RESOURCES:
+ case HFI_ERR_SYS_UNSUPPORTED_DOMAIN:
+ case HFI_ERR_SYS_UNSUPPORTED_CODEC:
+ case HFI_ERR_SESSION_UNSUPPORTED_PROPERTY:
+ case HFI_ERR_SESSION_UNSUPPORTED_SETTING:
+ case HFI_ERR_SESSION_INSUFFICIENT_RESOURCES:
+ vidc_err = VIDC_ERR_NOT_SUPPORTED;
+ break;
+ case HFI_ERR_SYS_MAX_SESSIONS_REACHED:
+ vidc_err = VIDC_ERR_MAX_CLIENT;
+ break;
+ case HFI_ERR_SYS_SESSION_IN_USE:
+ vidc_err = VIDC_ERR_CLIENT_PRESENT;
+ break;
+ case HFI_ERR_SESSION_FATAL:
+ vidc_err = VIDC_ERR_CLIENT_FATAL;
+ break;
+ case HFI_ERR_SESSION_BAD_POINTER:
+ vidc_err = VIDC_ERR_BAD_PARAM;
+ break;
+ case HFI_ERR_SESSION_INCORRECT_STATE_OPERATION:
+ vidc_err = VIDC_ERR_BAD_STATE;
+ break;
+ case HFI_ERR_SESSION_STREAM_CORRUPT:
+ case HFI_ERR_SESSION_STREAM_CORRUPT_OUTPUT_STALLED:
+ vidc_err = VIDC_ERR_BITSTREAM_ERR;
+ break;
+ case HFI_ERR_SESSION_SYNC_FRAME_NOT_DETECTED:
+ vidc_err = VIDC_ERR_IFRAME_EXPECTED;
+ break;
+ case HFI_ERR_SYS_UNKNOWN:
+ case HFI_ERR_SESSION_UNKNOWN:
+ case HFI_ERR_SESSION_EMPTY_BUFFER_DONE_OUTPUT_PENDING:
+ default:
+ vidc_err = VIDC_ERR_FAIL;
+ break;
+ }
+ return vidc_err;
+}
+
+void hal_process_sess_evt_seq_changed(struct hal_device *device,
+ struct hfi_msg_event_notify_packet *pkt)
+{
+ struct msm_vidc_cb_cmd_done cmd_done;
+ struct msm_vidc_cb_event event_notify;
+ int num_properties_changed;
+ struct hfi_frame_size frame_sz;
+ u8 *data_ptr;
+ enum HFI_PROPERTY prop_id;
+ HAL_MSG_LOW("RECEIVED:EVENT_NOTIFY");
+ if (sizeof(struct hfi_msg_event_notify_packet)
+ > pkt->size) {
+ HAL_MSG_ERROR("hal_process_session_init_done:bad_pkt_size");
+ return;
+ }
+
+ memset(&cmd_done, 0, sizeof(struct msm_vidc_cb_cmd_done));
+ memset(&event_notify, 0, sizeof(struct
+ msm_vidc_cb_event));
+
+ cmd_done.device_id = device->device_id;
+ cmd_done.session_id = ((struct hal_session *) pkt->session_id)->
+ session_id;
+ cmd_done.status = VIDC_ERR_NONE;
+ cmd_done.size = sizeof(struct msm_vidc_cb_event);
+ num_properties_changed = pkt->event_data2;
+ if (num_properties_changed) {
+ data_ptr = (u8 *) &pkt->rg_ext_event_data[0];
+ do {
+ prop_id = (enum HFI_PROPERTY) *((u32 *)data_ptr);
+ switch (prop_id) {
+ case HFI_PROPERTY_PARAM_FRAME_SIZE:
+ frame_sz.buffer =
+ (enum HFI_BUFFER)
+ *((((u32 *)data_ptr)+1));
+ frame_sz.width =
+ event_notify.width =
+ *((((u32 *)data_ptr)+2));
+ frame_sz.height =
+ event_notify.height =
+ *((((u32 *)data_ptr)+3));
+ data_ptr += 4;
+ break;
+ default:
+ break;
+ }
+ num_properties_changed--;
+ } while (num_properties_changed > 0);
+ }
+ cmd_done.data = &event_notify;
+ device->callback(VIDC_EVENT_CHANGE, &cmd_done);
+}
+
+static void hal_process_event_notify(struct hal_device *device,
+ struct hfi_msg_event_notify_packet *pkt)
+{
+ HAL_MSG_LOW("RECVD:EVENT_NOTIFY");
+
+ if (!device || !pkt ||
+ pkt->size < sizeof(struct hfi_msg_event_notify_packet)) {
+ HAL_MSG_ERROR("Invalid Params in %s", __func__);
+ return;
+ }
+
+ switch (pkt->event_id) {
+ case HFI_EVENT_SYS_ERROR:
+ HAL_MSG_INFO("HFI_EVENT_SYS_ERROR");
+ break;
+ case HFI_EVENT_SESSION_ERROR:
+ HAL_MSG_INFO("HFI_EVENT_SESSION_ERROR");
+ break;
+ case HFI_EVENT_SESSION_SEQUENCE_CHANGED:
+ HAL_MSG_INFO("HFI_EVENT_SESSION_SEQUENCE_CHANGED");
+ hal_process_sess_evt_seq_changed(device, pkt);
+ break;
+ case HFI_EVENT_SESSION_PROPERTY_CHANGED:
+ HAL_MSG_INFO("HFI_EVENT_SESSION_PROPERTY_CHANGED");
+ break;
+ default:
+ HAL_MSG_INFO("hal_process_event_notify:unkown_event_id");
+ break;
+ }
+}
+
+static void hal_process_sys_init_done(struct hal_device *device,
+ struct hfi_msg_sys_init_done_packet *pkt)
+{
+ struct msm_vidc_cb_cmd_done cmd_done;
+ struct vidc_hal_sys_init_done sys_init_done;
+ u32 rem_bytes, bytes_read = 0, num_properties;
+ u8 *data_ptr;
+ enum HFI_PROPERTY prop_id;
+ enum vidc_status status = VIDC_ERR_NONE;
+
+ HAL_MSG_LOW("RECEIVED:SYS_INIT_DONE");
+ if (sizeof(struct hfi_msg_sys_init_done_packet) > pkt->size) {
+ HAL_MSG_ERROR("hal_process_sys_init_done:bad_pkt_size: %d",
+ pkt->size);
+ return;
+ }
+
+ status = vidc_map_hal_err_status((u32)pkt->error_type);
+
+ if (!status) {
+ if (pkt->num_properties == 0) {
+ HAL_MSG_ERROR("hal_process_sys_init_done:"
+ "no_properties");
+ status = VIDC_ERR_FAIL;
+ goto err_no_prop;
+ }
+
+ rem_bytes = pkt->size - sizeof(struct
+ hfi_msg_sys_init_done_packet) + sizeof(u32);
+
+ if (rem_bytes == 0) {
+ HAL_MSG_ERROR("hal_process_sys_init_done:"
+ "missing_prop_info");
+ status = VIDC_ERR_FAIL;
+ goto err_no_prop;
+ }
+ memset(&cmd_done, 0, sizeof(struct msm_vidc_cb_cmd_done));
+ memset(&sys_init_done, 0, sizeof(struct
+ vidc_hal_sys_init_done));
+
+ data_ptr = (u8 *) &pkt->rg_property_data[0];
+ num_properties = pkt->num_properties;
+
+ while ((num_properties != 0) && (rem_bytes >= sizeof(u32))) {
+ prop_id = (enum HFI_PROPERTY) *((u32 *)data_ptr);
+ data_ptr = data_ptr + 4;
+
+ switch (prop_id) {
+ case HFI_PROPERTY_PARAM_CODEC_SUPPORTED:
+ {
+ struct hfi_codec_supported *prop =
+ (struct hfi_codec_supported *) data_ptr;
+ if (rem_bytes < sizeof(struct
+ hfi_codec_supported)) {
+ status = VIDC_ERR_BAD_PARAM;
+ break;
+ }
+ sys_init_done.dec_codec_supported =
+ prop->decoder_codec_supported;
+ sys_init_done.enc_codec_supported =
+ prop->encoder_codec_supported;
+ break;
+ }
+ default:
+ HAL_MSG_ERROR("hal_process_sys_init_done:"
+ "bad_prop_id");
+ status = VIDC_ERR_BAD_PARAM;
+ break;
+ }
+ if (!status) {
+ rem_bytes -= bytes_read;
+ data_ptr += bytes_read;
+ num_properties--;
+ }
+ }
+ }
+err_no_prop:
+ cmd_done.device_id = device->device_id;
+ cmd_done.session_id = 0;
+ cmd_done.status = (u32) status;
+ cmd_done.size = sizeof(struct vidc_hal_sys_init_done);
+ cmd_done.data = (void *) &sys_init_done;
+ device->callback(SYS_INIT_DONE, &cmd_done);
+}
+
+enum vidc_status vidc_hal_process_sess_init_done_prop_read(
+ struct hfi_msg_sys_session_init_done_packet *pkt,
+ struct msm_vidc_cb_cmd_done *cmddone)
+{
+ return VIDC_ERR_NONE;
+}
+
+static void hal_process_sess_get_prop_buf_req(
+ struct hfi_msg_session_property_info_packet *prop,
+ struct buffer_requirements *buffreq)
+{
+ struct hfi_buffer_requirements *hfi_buf_req;
+ u32 req_bytes;
+ enum vidc_status rc = VIDC_ERR_NONE;
+
+ HAL_MSG_LOW("Entered %s", __func__);
+ req_bytes = prop->size - sizeof(
+ struct hfi_msg_session_property_info_packet);
+
+ if (req_bytes == 0 || (req_bytes % sizeof(
+ struct hfi_buffer_requirements))) {
+ HAL_MSG_ERROR("hal_process_sess_get_prop_buf_req:bad_pkt_size:"
+ " %d", req_bytes);
+ return;
+ }
+
+ hfi_buf_req = (struct hfi_buffer_requirements *)
+ &prop->rg_property_data[1];
+
+ while (req_bytes != 0) {
+ if ((hfi_buf_req->buffer_count_min > hfi_buf_req->
+ buffer_count_actual)
+ || (hfi_buf_req->buffer_alignment == 0)
+ || (hfi_buf_req->buffer_size == 0)) {
+ HAL_MSG_ERROR("hal_process_sess_get_prop_buf_req:"
+ "bad_buf_req");
+ rc = VIDC_ERR_FAIL;
+ }
+ HAL_MSG_LOW("got buffer requirements for: %d",
+ hfi_buf_req->buffer);
+ switch (hfi_buf_req->buffer) {
+ case HFI_BUFFER_INPUT:
+ memcpy(&buffreq->buffer[0], hfi_buf_req,
+ sizeof(struct hfi_buffer_requirements));
+ buffreq->buffer[0].buffer_type = HAL_BUFFER_INPUT;
+ break;
+ case HFI_BUFFER_OUTPUT:
+ memcpy(&buffreq->buffer[1], hfi_buf_req,
+ sizeof(struct hfi_buffer_requirements));
+ buffreq->buffer[1].buffer_type = HAL_BUFFER_OUTPUT;
+ break;
+ case HFI_BUFFER_OUTPUT2:
+ memcpy(&buffreq->buffer[2], hfi_buf_req,
+ sizeof(struct hfi_buffer_requirements));
+ buffreq->buffer[2].buffer_type = HAL_BUFFER_OUTPUT2;
+ break;
+ case HFI_BUFFER_EXTRADATA_INPUT:
+ memcpy(&buffreq->buffer[3], hfi_buf_req,
+ sizeof(struct hfi_buffer_requirements));
+ buffreq->buffer[3].buffer_type =
+ HAL_BUFFER_EXTRADATA_INPUT;
+ break;
+ case HFI_BUFFER_EXTRADATA_OUTPUT:
+ memcpy(&buffreq->buffer[4], hfi_buf_req,
+ sizeof(struct hfi_buffer_requirements));
+ buffreq->buffer[4].buffer_type =
+ HAL_BUFFER_EXTRADATA_OUTPUT;
+ break;
+ case HFI_BUFFER_EXTRADATA_OUTPUT2:
+ memcpy(&buffreq->buffer[5], hfi_buf_req,
+ sizeof(struct hfi_buffer_requirements));
+ buffreq->buffer[5].buffer_type =
+ HAL_BUFFER_EXTRADATA_OUTPUT2;
+ break;
+ case HFI_BUFFER_INTERNAL_SCRATCH:
+ memcpy(&buffreq->buffer[6], hfi_buf_req,
+ sizeof(struct hfi_buffer_requirements));
+ buffreq->buffer[6].buffer_type =
+ HAL_BUFFER_INTERNAL_SCRATCH;
+ break;
+ case HFI_BUFFER_INTERNAL_PERSIST:
+ memcpy(&buffreq->buffer[7], hfi_buf_req,
+ sizeof(struct hfi_buffer_requirements));
+ buffreq->buffer[7].buffer_type =
+ HAL_BUFFER_INTERNAL_PERSIST;
+ break;
+ default:
+ HAL_MSG_ERROR("hal_process_sess_get_prop_buf_req:"
+ "bad_buffer_type: %d", hfi_buf_req->buffer);
+ break;
+ }
+ req_bytes -= sizeof(struct hfi_buffer_requirements);
+ hfi_buf_req++;
+ }
+}
+
+static void hal_process_session_prop_info(struct hal_device *device,
+ struct hfi_msg_session_property_info_packet *pkt)
+{
+ struct msm_vidc_cb_cmd_done cmd_done;
+ struct buffer_requirements buff_req;
+
+ HAL_MSG_INFO("Received SESSION_PROPERTY_INFO");
+
+ if (pkt->size < sizeof(struct hfi_msg_session_property_info_packet)) {
+ HAL_MSG_ERROR("hal_process_session_prop_info:bad_pkt_size");
+ return;
+ }
+
+ if (pkt->num_properties == 0) {
+ HAL_MSG_ERROR("hal_process_session_prop_info:no_properties");
+ return;
+ }
+
+ memset(&cmd_done, 0, sizeof(struct msm_vidc_cb_cmd_done));
+ memset(&buff_req, 0, sizeof(struct buffer_requirements));
+
+ switch (pkt->rg_property_data[0]) {
+ case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS:
+ hal_process_sess_get_prop_buf_req(pkt, &buff_req);
+ cmd_done.device_id = device->device_id;
+ cmd_done.session_id =
+ ((struct hal_session *) pkt->session_id)->session_id;
+ cmd_done.status = VIDC_ERR_NONE;
+ cmd_done.data = &buff_req;
+ cmd_done.size = sizeof(struct buffer_requirements);
+ device->callback(SESSION_PROPERTY_INFO, &cmd_done);
+ break;
+ default:
+ HAL_MSG_ERROR("hal_process_session_prop_info:"
+ "unknown_prop_id: %d",
+ pkt->rg_property_data[0]);
+ break;
+ }
+}
+
+static void hal_process_session_init_done(struct hal_device *device,
+ struct hfi_msg_sys_session_init_done_packet *pkt)
+{
+ struct msm_vidc_cb_cmd_done cmd_done;
+ struct vidc_hal_session_init_done session_init_done;
+
+ HAL_MSG_LOW("RECEIVED:SESSION_INIT_DONE");
+ if (sizeof(struct hfi_msg_sys_session_init_done_packet)
+ > pkt->size) {
+ HAL_MSG_ERROR("hal_process_session_init_done:bad_pkt_size");
+ return;
+ }
+
+ memset(&cmd_done, 0, sizeof(struct msm_vidc_cb_cmd_done));
+ memset(&session_init_done, 0, sizeof(struct
+ vidc_hal_session_init_done));
+
+ cmd_done.device_id = device->device_id;
+ cmd_done.session_id =
+ ((struct hal_session *) pkt->session_id)->session_id;
+ cmd_done.status = vidc_map_hal_err_status((u32)pkt->error_type);
+ cmd_done.data = &session_init_done;
+ if (!cmd_done.status) {
+ cmd_done.status = vidc_hal_process_sess_init_done_prop_read(
+ pkt, &cmd_done);
+ }
+ cmd_done.size = sizeof(struct vidc_hal_session_init_done);
+ device->callback(SESSION_INIT_DONE, &cmd_done);
+}
+
+static void hal_process_session_load_res_done(struct hal_device *device,
+ struct hfi_msg_session_load_resources_done_packet *pkt)
+{
+ struct msm_vidc_cb_cmd_done cmd_done;
+ HAL_MSG_LOW("RECEIVED:SESSION_LOAD_RESOURCES_DONE");
+
+ if (sizeof(struct hfi_msg_session_load_resources_done_packet) !=
+ pkt->size) {
+ HAL_MSG_ERROR("hal_process_session_load_res_done:"
+ " bad packet size: %d", pkt->size);
+ return;
+ }
+
+ memset(&cmd_done, 0, sizeof(struct msm_vidc_cb_cmd_done));
+
+ cmd_done.device_id = device->device_id;
+ cmd_done.session_id =
+ ((struct hal_session *) pkt->session_id)->session_id;
+ cmd_done.status = vidc_map_hal_err_status((u32)pkt->error_type);
+ cmd_done.data = NULL;
+ cmd_done.size = 0;
+ device->callback(SESSION_LOAD_RESOURCE_DONE, &cmd_done);
+}
+
+static void hal_process_session_flush_done(struct hal_device *device,
+ struct hfi_msg_session_flush_done_packet *pkt)
+{
+ struct msm_vidc_cb_cmd_done cmd_done;
+
+ HAL_MSG_LOW("RECEIVED:SESSION_FLUSH_DONE");
+
+ if (sizeof(struct hfi_msg_session_flush_done_packet) != pkt->size) {
+ HAL_MSG_ERROR("hal_process_session_flush_done: "
+ "bad packet size: %d", pkt->size);
+ return;
+ }
+
+ memset(&cmd_done, 0, sizeof(struct msm_vidc_cb_cmd_done));
+ cmd_done.device_id = device->device_id;
+ cmd_done.session_id =
+ ((struct hal_session *) pkt->session_id)->session_id;
+ cmd_done.status = vidc_map_hal_err_status((u32)pkt->error_type);
+ cmd_done.data = (void *) pkt->flush_type;
+ cmd_done.size = sizeof(u32);
+ device->callback(SESSION_FLUSH_DONE, &cmd_done);
+}
+
+static void hal_process_session_etb_done(struct hal_device *device,
+ struct hfi_msg_session_empty_buffer_done_packet *pkt)
+{
+ struct msm_vidc_cb_data_done data_done;
+
+ HAL_MSG_LOW("RECEIVED:SESSION_ETB_DONE");
+
+ if (!pkt || pkt->size !=
+ sizeof(struct hfi_msg_session_empty_buffer_done_packet)) {
+ HAL_MSG_ERROR("hal_process_session_etb_done:bad_pkt_size");
+ return;
+ }
+
+ memset(&data_done, 0, sizeof(struct msm_vidc_cb_data_done));
+
+ data_done.device_id = device->device_id;
+ data_done.session_id =
+ ((struct hal_session *) pkt->session_id)->session_id;
+ data_done.status = vidc_map_hal_err_status((u32) pkt->error_type);
+ data_done.size = sizeof(struct msm_vidc_cb_data_done);
+ data_done.clnt_data = (void *)pkt->input_tag;
+ data_done.input_done.offset = pkt->offset;
+ data_done.input_done.filled_len = pkt->filled_len;
+ data_done.input_done.packet_buffer = pkt->packet_buffer;
+ device->callback(SESSION_ETB_DONE, &data_done);
+}
+
+static void hal_process_session_ftb_done(struct hal_device *device,
+ void *msg_hdr)
+{
+ struct msm_vidc_cb_data_done data_done;
+ struct hfi_msg_session_fill_buffer_done_compressed_packet *pack =
+ (struct hfi_msg_session_fill_buffer_done_compressed_packet *) msg_hdr;
+ u32 is_decoder = ((struct hal_session *)pack->session_id)->is_decoder;
+ struct hal_session *session;
+
+ if (!msg_hdr) {
+ HAL_MSG_ERROR("Invalid Params in %s", __func__);
+ return;
+ }
+
+ session = (struct hal_session *)
+ ((struct hal_session *) pack->session_id)->session_id;
+ HAL_MSG_ERROR("RECEIVED:SESSION_FTB_DONE");
+
+ memset(&data_done, 0, sizeof(struct msm_vidc_cb_data_done));
+
+ if (is_decoder == 0) {
+ struct hfi_msg_session_fill_buffer_done_compressed_packet *pkt =
+ (struct hfi_msg_session_fill_buffer_done_compressed_packet *)
+ msg_hdr;
+ if (sizeof(struct
+ hfi_msg_session_fill_buffer_done_compressed_packet)
+ != pkt->size) {
+ HAL_MSG_ERROR("hal_process_session_ftb_done:"
+ "bad_pkt_size");
+ return;
+ }
+
+ data_done.device_id = device->device_id;
+ data_done.session_id = (u32) session;
+ data_done.status = vidc_map_hal_err_status((u32)
+ pkt->error_type);
+ data_done.size = sizeof(struct msm_vidc_cb_data_done);
+ data_done.clnt_data = (void *) pkt->input_tag;
+
+ data_done.output_done.timestamp_hi = pkt->timestamp_hi;
+ data_done.output_done.timestamp_lo = pkt->timestamp_lo;
+ data_done.output_done.flags1 = pkt->flags;
+ data_done.output_done.mark_target = pkt->mark_target;
+ data_done.output_done.mark_data = pkt->mark_data;
+ data_done.output_done.stats = pkt->stats;
+ data_done.output_done.offset1 = pkt->offset;
+ data_done.output_done.alloc_len1 = pkt->alloc_len;
+ data_done.output_done.filled_len1 = pkt->filled_len;
+ data_done.output_done.picture_type = pkt->picture_type;
+ data_done.output_done.packet_buffer1 = pkt->packet_buffer;
+ data_done.output_done.extra_data_buffer =
+ pkt->extra_data_buffer;
+ } else if (is_decoder == 1) {
+ struct hfi_msg_session_fbd_uncompressed_plane0_packet *pkt =
+ (struct hfi_msg_session_fbd_uncompressed_plane0_packet *)
+ msg_hdr;
+ if (sizeof(struct
+ hfi_msg_session_fbd_uncompressed_plane0_packet)
+ > pkt->size) {
+ HAL_MSG_ERROR("hal_process_session_ftb_done:"
+ "bad_pkt_size");
+ return;
+ }
+
+ data_done.device_id = device->device_id;
+ data_done.session_id = (u32) session;
+ data_done.status = vidc_map_hal_err_status((u32)
+ pkt->error_type);
+ data_done.size = sizeof(struct msm_vidc_cb_data_done);
+ data_done.clnt_data = (void *)pkt->input_tag;
+
+ data_done.output_done.stream_id = pkt->stream_id;
+ data_done.output_done.view_id = pkt->view_id;
+ data_done.output_done.timestamp_hi = pkt->timestamp_hi;
+ data_done.output_done.timestamp_lo = pkt->timestamp_lo;
+ data_done.output_done.flags1 = pkt->flags;
+ data_done.output_done.mark_target = pkt->mark_target;
+ data_done.output_done.mark_data = pkt->mark_data;
+ data_done.output_done.stats = pkt->stats;
+ data_done.output_done.alloc_len1 = pkt->alloc_len;
+ data_done.output_done.filled_len1 = pkt->filled_len;
+ data_done.output_done.offset1 = pkt->oofset;
+ data_done.output_done.frame_width = pkt->frame_width;
+ data_done.output_done.frame_height = pkt->frame_height;
+ data_done.output_done.start_xCoord = pkt->start_xCoord;
+ data_done.output_done.start_yCoord = pkt->start_yCoord;
+ data_done.output_done.input_tag1 = pkt->input_tag1;
+ data_done.output_done.picture_type = pkt->picture_type;
+ data_done.output_done.packet_buffer1 = pkt->packet_buffer;
+ data_done.output_done.extra_data_buffer =
+ pkt->extra_data_buffer;
+
+ if (pkt->stream_id == 0)
+ data_done.output_done.buffer_type = HAL_BUFFER_OUTPUT;
+ else if (pkt->stream_id == 1)
+ data_done.output_done.buffer_type = HAL_BUFFER_OUTPUT2;
+ }
+ device->callback(SESSION_FTB_DONE, &data_done);
+}
+
+static void hal_process_session_start_done(struct hal_device *device,
+ struct hfi_msg_session_start_done_packet *pkt)
+{
+ struct msm_vidc_cb_cmd_done cmd_done;
+
+ HAL_MSG_LOW("RECEIVED:SESSION_START_DONE");
+
+ if (!pkt || pkt->size !=
+ sizeof(struct hfi_msg_session_start_done_packet)) {
+ HAL_MSG_ERROR("hal_process_session_start_done:"
+ "bad packet/packet size: %d", pkt->size);
+ return;
+ }
+
+ memset(&cmd_done, 0, sizeof(struct msm_vidc_cb_cmd_done));
+ cmd_done.device_id = device->device_id;
+ cmd_done.session_id =
+ ((struct hal_session *) pkt->session_id)->session_id;
+ cmd_done.status = vidc_map_hal_err_status((u32)pkt->error_type);
+ cmd_done.data = NULL;
+ cmd_done.size = 0;
+ device->callback(SESSION_START_DONE, &cmd_done);
+}
+
+static void hal_process_session_stop_done(struct hal_device *device,
+ struct hfi_msg_session_stop_done_packet *pkt)
+{
+ struct msm_vidc_cb_cmd_done cmd_done;
+
+ HAL_MSG_LOW("RECEIVED:SESSION_STOP_DONE");
+
+ if (!pkt || pkt->size !=
+ sizeof(struct hfi_msg_session_stop_done_packet)) {
+ HAL_MSG_ERROR("hal_process_session_stop_done:"
+ "bad packet/packet size: %d", pkt->size);
+ return;
+ }
+
+ memset(&cmd_done, 0, sizeof(struct msm_vidc_cb_cmd_done));
+ cmd_done.device_id = device->device_id;
+ cmd_done.session_id =
+ ((struct hal_session *) pkt->session_id)->session_id;
+ cmd_done.status = vidc_map_hal_err_status((u32)pkt->error_type);
+ cmd_done.data = NULL;
+ cmd_done.size = 0;
+ device->callback(SESSION_STOP_DONE, &cmd_done);
+}
+
+static void hal_process_session_rel_res_done(struct hal_device *device,
+ struct hfi_msg_session_release_resources_done_packet *pkt)
+{
+ struct msm_vidc_cb_cmd_done cmd_done;
+
+ HAL_MSG_LOW("RECEIVED:SESSION_RELEASE_RESOURCES_DONE");
+
+ if (!pkt || pkt->size !=
+ sizeof(struct hfi_msg_session_release_resources_done_packet)) {
+ HAL_MSG_ERROR("hal_process_session_rel_res_done:"
+ "bad packet/packet size: %d", pkt->size);
+ return;
+ }
+
+ memset(&cmd_done, 0, sizeof(struct msm_vidc_cb_cmd_done));
+ cmd_done.device_id = device->device_id;
+ cmd_done.session_id =
+ ((struct hal_session *) pkt->session_id)->session_id;
+ cmd_done.status = vidc_map_hal_err_status((u32)pkt->error_type);
+ cmd_done.data = NULL;
+ cmd_done.size = 0;
+ device->callback(SESSION_RELEASE_RESOURCE_DONE, &cmd_done);
+}
+
+static void hal_process_session_end_done(struct hal_device *device,
+ struct hfi_msg_sys_session_end_done_packet *pkt)
+{
+ struct msm_vidc_cb_cmd_done cmd_done;
+ struct list_head *curr, *next;
+ struct hal_session *sess_close;
+
+ HAL_MSG_LOW("RECEIVED:SESSION_END_DONE");
+
+ if (!pkt || pkt->size !=
+ sizeof(struct hfi_msg_sys_session_end_done_packet)) {
+ HAL_MSG_ERROR("hal_process_session_end_done: "
+ "bad packet/packet size: %d", pkt->size);
+ return;
+ }
+
+ list_for_each_safe(curr, next, &device->sess_head) {
+ sess_close = list_entry(curr, struct hal_session, list);
+ HAL_MSG_MEDIUM("deleted the session: 0x%x",
+ sess_close->session_id);
+ list_del(&sess_close->list);
+ kfree(sess_close);
+ }
+
+ memset(&cmd_done, 0, sizeof(struct msm_vidc_cb_cmd_done));
+ cmd_done.device_id = device->device_id;
+ cmd_done.session_id =
+ ((struct hal_session *) pkt->session_id)->session_id;
+ cmd_done.status = vidc_map_hal_err_status((u32)pkt->error_type);
+ cmd_done.data = NULL;
+ cmd_done.size = 0;
+ device->callback(SESSION_END_DONE, &cmd_done);
+}
+
+static void hal_process_msg_packet(struct hal_device *device,
+ struct vidc_hal_msg_pkt_hdr *msg_hdr)
+{
+ if (!device || !msg_hdr || msg_hdr->size <
+ VIDC_IFACEQ_MIN_PKT_SIZE) {
+ HAL_MSG_ERROR("hal_process_msg_packet:bad"
+ "packet/packet size: %d", msg_hdr->size);
+ return;
+ }
+
+ HAL_MSG_INFO("Received: 0x%x in %s", msg_hdr->packet, __func__);
+
+ switch (msg_hdr->packet) {
+ case HFI_MSG_EVENT_NOTIFY:
+ hal_process_event_notify(device,
+ (struct hfi_msg_event_notify_packet *) msg_hdr);
+ break;
+ case HFI_MSG_SYS_INIT_DONE:
+ hal_process_sys_init_done(device,
+ (struct hfi_msg_sys_init_done_packet *)
+ msg_hdr);
+ break;
+ case HFI_MSG_SYS_SESSION_INIT_DONE:
+ hal_process_session_init_done(device,
+ (struct hfi_msg_sys_session_init_done_packet *)
+ msg_hdr);
+ break;
+ case HFI_MSG_SYS_SESSION_END_DONE:
+ hal_process_session_end_done(device,
+ (struct hfi_msg_sys_session_end_done_packet *)
+ msg_hdr);
+ break;
+ case HFI_MSG_SESSION_LOAD_RESOURCES_DONE:
+ hal_process_session_load_res_done(device,
+ (struct hfi_msg_session_load_resources_done_packet *)
+ msg_hdr);
+ break;
+ case HFI_MSG_SESSION_START_DONE:
+ hal_process_session_start_done(device,
+ (struct hfi_msg_session_start_done_packet *)
+ msg_hdr);
+ break;
+ case HFI_MSG_SESSION_STOP_DONE:
+ hal_process_session_stop_done(device,
+ (struct hfi_msg_session_stop_done_packet *)
+ msg_hdr);
+ break;
+ case HFI_MSG_SESSION_EMPTY_BUFFER_DONE:
+ hal_process_session_etb_done(device,
+ (struct hfi_msg_session_empty_buffer_done_packet *)
+ msg_hdr);
+ break;
+ case HFI_MSG_SESSION_FILL_BUFFER_DONE:
+ hal_process_session_ftb_done(device, msg_hdr);
+ break;
+ case HFI_MSG_SESSION_FLUSH_DONE:
+ hal_process_session_flush_done(device,
+ (struct hfi_msg_session_flush_done_packet *)
+ msg_hdr);
+ break;
+ case HFI_MSG_SESSION_PROPERTY_INFO:
+ hal_process_session_prop_info(device,
+ (struct hfi_msg_session_property_info_packet *)
+ msg_hdr);
+ break;
+ case HFI_MSG_SESSION_RELEASE_RESOURCES_DONE:
+ hal_process_session_rel_res_done(device,
+ (struct hfi_msg_session_release_resources_done_packet *)
+ msg_hdr);
+ break;
+ default:
+ HAL_MSG_ERROR("UNKNOWN_MSG_TYPE : %d", msg_hdr->packet);
+ break;
+ }
+}
+
+void vidc_hal_response_handler(struct hal_device *device)
+{
+ u8 packet[VIDC_IFACEQ_MED_PKT_SIZE];
+
+ HAL_MSG_INFO("############vidc_hal_response_handler\n");
+ if (device) {
+ while (!vidc_hal_iface_msgq_read(device, packet)) {
+ hal_process_msg_packet(device,
+ (struct vidc_hal_msg_pkt_hdr *) packet);
+ }
+ } else {
+ HAL_MSG_ERROR("SPURIOUS_INTERRUPT");
+ }
+}
diff --git a/drivers/media/video/msm_vidc/vidc_hal_io.h b/drivers/media/video/msm_vidc/vidc_hal_io.h
new file mode 100644
index 0000000..05a4c60
--- /dev/null
+++ b/drivers/media/video/msm_vidc/vidc_hal_io.h
@@ -0,0 +1,103 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __VIDCHALIO_H__
+#define __VIDCHALIO_H__
+
+#include <linux/io.h>
+
+#define VIDC_VBIF_BASE_OFFS 0x00080000
+#define VIDC_VBIF_VERSION (VIDC_VBIF_BASE_OFFS + 0x00)
+#define VIDC_VBIF_ADDR_TRANS_EN (VIDC_VBIF_BASE_OFFS + 0x10)
+#define VIDC_VBIF_AT_OLD_BASE (VIDC_VBIF_BASE_OFFS + 0x14)
+#define VIDC_VBIF_AT_OLD_HIGH (VIDC_VBIF_BASE_OFFS + 0x18)
+#define VIDC_VBIF_AT_NEW_BASE (VIDC_VBIF_BASE_OFFS + 0x20)
+#define VIDC_VBIF_AT_NEW_HIGH (VIDC_VBIF_BASE_OFFS + 0x28)
+
+#define VIDC_CPU_BASE_OFFS 0x000C0000
+#define VIDC_CPU_CS_BASE_OFFS (VIDC_CPU_BASE_OFFS + 0x00012000)
+#define VIDC_CPU_IC_BASE_OFFS (VIDC_CPU_BASE_OFFS + 0x0001F000)
+
+#define VIDC_CPU_CS_REMAP_OFFS (VIDC_CPU_CS_BASE_OFFS + 0x00)
+#define VIDC_CPU_CS_TIMER_CONTROL (VIDC_CPU_CS_BASE_OFFS + 0x04)
+#define VIDC_CPU_CS_A2HSOFTINTEN (VIDC_CPU_CS_BASE_OFFS + 0x10)
+#define VIDC_CPU_CS_A2HSOFTINTENCLR (VIDC_CPU_CS_BASE_OFFS + 0x14)
+#define VIDC_CPU_CS_A2HSOFTINT (VIDC_CPU_CS_BASE_OFFS + 0x18)
+#define VIDC_CPU_CS_A2HSOFTINTCLR (VIDC_CPU_CS_BASE_OFFS + 0x1C)
+#define VIDC_CPU_CS_SCIACMD (VIDC_CPU_CS_BASE_OFFS + 0x48)
+
+/* HFI_CTRL_STATUS */
+#define VIDC_CPU_CS_SCIACMDARG0 (VIDC_CPU_CS_BASE_OFFS + 0x4C)
+#define VIDC_CPU_CS_SCIACMDARG0_BMSK 0xff
+#define VIDC_CPU_CS_SCIACMDARG0_SHFT 0x0
+#define VIDC_CPU_CS_SCIACMDARG0_HFI_CTRL_ERROR_STATUS_BMSK 0xfe
+#define VIDC_CPU_CS_SCIACMDARG0_HFI_CTRL_ERROR_STATUS_SHFT 0x1
+#define VIDC_CPU_CS_SCIACMDARG0_HFI_CTRL_INIT_STATUS_BMSK 0x1
+#define VIDC_CPU_CS_SCIACMDARG0_HFI_CTRL_INIT_STATUS_SHFT 0x0
+
+/* HFI_QTBL_INFO */
+#define VIDC_CPU_CS_SCIACMDARG1 (VIDC_CPU_CS_BASE_OFFS + 0x50)
+
+/* HFI_QTBL_ADDR */
+#define VIDC_CPU_CS_SCIACMDARG2 (VIDC_CPU_CS_BASE_OFFS + 0x54)
+
+/* HFI_VERSION_INFO */
+#define VIDC_CPU_CS_SCIACMDARG3 (VIDC_CPU_CS_BASE_OFFS + 0x58)
+#define VIDC_CPU_IC_IRQSTATUS (VIDC_CPU_IC_BASE_OFFS + 0x00)
+#define VIDC_CPU_IC_FIQSTATUS (VIDC_CPU_IC_BASE_OFFS + 0x04)
+#define VIDC_CPU_IC_RAWINTR (VIDC_CPU_IC_BASE_OFFS + 0x08)
+#define VIDC_CPU_IC_INTSELECT (VIDC_CPU_IC_BASE_OFFS + 0x0C)
+#define VIDC_CPU_IC_INTENABLE (VIDC_CPU_IC_BASE_OFFS + 0x10)
+#define VIDC_CPU_IC_INTENACLEAR (VIDC_CPU_IC_BASE_OFFS + 0x14)
+#define VIDC_CPU_IC_SOFTINT (VIDC_CPU_IC_BASE_OFFS + 0x18)
+#define VIDC_CPU_IC_SOFTINT_H2A_BMSK 0x8000
+#define VIDC_CPU_IC_SOFTINT_H2A_SHFT 0xF
+#define VIDC_CPU_IC_SOFTINTCLEAR (VIDC_CPU_IC_BASE_OFFS + 0x1C)
+
+/*---------------------------------------------------------------------------
+ * MODULE: vidc_wrapper
+ *--------------------------------------------------------------------------*/
+#define VIDC_WRAPPER_BASE_OFFS 0x000E0000
+
+#define VIDC_WRAPPER_HW_VERSION (VIDC_WRAPPER_BASE_OFFS + 0x00)
+#define VIDC_WRAPPER_CLOCK_CONFIG (VIDC_WRAPPER_BASE_OFFS + 0x04)
+
+#define VIDC_WRAPPER_INTR_STATUS (VIDC_WRAPPER_BASE_OFFS + 0x0C)
+#define VIDC_WRAPPER_INTR_STATUS_A2HWD_BMSK 0x10
+#define VIDC_WRAPPER_INTR_STATUS_A2HWD_SHFT 0x4
+#define VIDC_WRAPPER_INTR_STATUS_A2H_BMSK 0x4
+#define VIDC_WRAPPER_INTR_STATUS_A2H_SHFT 0x2
+
+#define VIDC_WRAPPER_INTR_MASK (VIDC_WRAPPER_BASE_OFFS + 0x10)
+#define VIDC_WRAPPER_INTR_MASK_A2HWD_BMSK 0x10
+#define VIDC_WRAPPER_INTR_MASK_A2HWD_SHFT 0x4
+#define VIDC_WRAPPER_INTR_MASK_A2H_BMSK 0x4
+#define VIDC_WRAPPER_INTR_MASK_A2H_SHFT 0x2
+
+#define VIDC_WRAPPER_INTR_CLEAR (VIDC_WRAPPER_BASE_OFFS + 0x14)
+#define VIDC_WRAPPER_INTR_CLEAR_A2HWD_BMSK 0x10
+#define VIDC_WRAPPER_INTR_CLEAR_A2HWD_SHFT 0x4
+#define VIDC_WRAPPER_INTR_CLEAR_A2H_BMSK 0x4
+#define VIDC_WRAPPER_INTR_CLEAR_A2H_SHFT 0x2
+
+#define VIDC_WRAPPER_VBIF_XIN_SW_RESET (VIDC_WRAPPER_BASE_OFFS + 0x18)
+#define VIDC_WRAPPER_VBIF_XIN_STATUS (VIDC_WRAPPER_BASE_OFFS + 0x1C)
+#define VIDC_WRAPPER_CPU_CLOCK_CONFIG (VIDC_WRAPPER_BASE_OFFS + 0x2000)
+#define VIDC_WRAPPER_VBIF_XIN_CPU_SW_RESET \
+ (VIDC_WRAPPER_BASE_OFFS + 0x2004)
+#define VIDC_WRAPPER_AXI_HALT (VIDC_WRAPPER_BASE_OFFS + 0x2008)
+#define VIDC_WRAPPER_AXI_HALT_STATUS (VIDC_WRAPPER_BASE_OFFS + 0x200C)
+#define VIDC_WRAPPER_CPU_CGC_DIS (VIDC_WRAPPER_BASE_OFFS + 0x2010)
+
+#endif
+
diff --git a/drivers/media/video/v4l2-compat-ioctl32.c b/drivers/media/video/v4l2-compat-ioctl32.c
index deb35cc..737b726 100644
--- a/drivers/media/video/v4l2-compat-ioctl32.c
+++ b/drivers/media/video/v4l2-compat-ioctl32.c
@@ -958,6 +958,8 @@
case VIDIOC_G_ENC_INDEX:
case VIDIOC_ENCODER_CMD:
case VIDIOC_TRY_ENCODER_CMD:
+ case VIDIOC_DECODER_CMD:
+ case VIDIOC_TRY_DECODER_CMD:
case VIDIOC_DBG_S_REGISTER:
case VIDIOC_DBG_G_REGISTER:
case VIDIOC_DBG_G_CHIP_IDENT:
diff --git a/drivers/media/video/v4l2-ctrls.c b/drivers/media/video/v4l2-ctrls.c
index 2412f08..f915a94 100644
--- a/drivers/media/video/v4l2-ctrls.c
+++ b/drivers/media/video/v4l2-ctrls.c
@@ -216,6 +216,69 @@
"75 useconds",
NULL,
};
+ static const char *const h264_video_entropy_mode[] = {
+ "CAVLC",
+ "CABAC",
+ NULL
+ };
+ static const char *const mpeg_video_slice_mode[] = {
+ "Single Slice Mode",
+ "MB Based Slice",
+ "Byte Based Slice",
+ NULL
+ };
+ static const char *const h264_video_profile[] = {
+ "Baseline Profile",
+ "Constrained Baseline Profile",
+ "Main Profile",
+ "Extended Profile",
+ "High Profile",
+ "High Profile 10",
+ "High Profile 422",
+ "High Profile 444 Predicitve",
+ "High Profile 10 Intra",
+ "High Profile 422 Intra",
+ "High Profile 444 Intra",
+ "CAVLC Profile 444 Intra",
+ "Scalable Baseline Profile",
+ "Scalable high Profile",
+ "Scalable High Intra Profile",
+ "Stereo High Profile",
+ "Multiview High Profile",
+ NULL
+ };
+ static const char *const h264_video_level[] = {
+ "Level 1 0",
+ "Level 1 B",
+ "Level 1 1",
+ "Level 1 2",
+ "Level 1 3",
+ "Level 2 0",
+ "Level 2 1",
+ "Level 2 2",
+ "Level 3 0",
+ "Level 3 1",
+ "Level 3 2",
+ "Level 4 0",
+ "Level 4 1",
+ "Level 4 2",
+ "Level 5 0",
+ "Level 5 1",
+ NULL
+ };
+ static const char *const mpeg_video_intra_refresh_mode[] = {
+ "No Intra Refresh",
+ "AIR MBS",
+ "AIR REF",
+ "CIR MBS",
+ NULL
+ };
+ static const char *const h264_loop_filter_mode[] = {
+ "Ebnabled",
+ "Disabled",
+ "Disabled At Slice Boundary",
+ NULL
+ };
switch (id) {
case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ:
@@ -256,6 +319,18 @@
return colorfx;
case V4L2_CID_TUNE_PREEMPHASIS:
return tune_preemphasis;
+ case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
+ return h264_video_entropy_mode;
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ return h264_video_profile;
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ return h264_video_level;
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
+ return mpeg_video_slice_mode;
+ case V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_MODE:
+ return mpeg_video_intra_refresh_mode;
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
+ return h264_loop_filter_mode;
default:
return NULL;
}
@@ -343,6 +418,30 @@
case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: return "Video Temporal Decimation";
case V4L2_CID_MPEG_VIDEO_MUTE: return "Video Mute";
case V4L2_CID_MPEG_VIDEO_MUTE_YUV: return "Video Mute YUV";
+ case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: return "Entropy Mode";
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE: return "H264 Profile";
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL: return "H264 Level";
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: return "Slice Mode";
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB: return "Slice MB Size";
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES:
+ return "Slice Byte Size";
+ case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP: return "I Frame Quantization";
+ case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP: return "P Frame Quantization";
+ case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP: return "B Frame Quantization";
+ case V4L2_CID_MPEG_VIDC_VIDEO_ROTATION: return "Rotation";
+ case V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL: return "Rate Control";
+ case V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL: return "CABAC Model";
+ case V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_MODE:
+ return "Intra Refresh Mode";
+ case V4L2_CID_MPEG_VIDC_VIDEO_AIR_MBS: return "Intra Refresh AIR MBS";
+ case V4L2_CID_MPEG_VIDC_VIDEO_AIR_REF: return "Intra Refresh AIR REF";
+ case V4L2_CID_MPEG_VIDC_VIDEO_CIR_MBS: return "Intra Refresh CIR MBS";
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
+ return "H.264 Loop Filter Mode";
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA:
+ return "H.264 Loop Filter Beta Offset";
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA:
+ return "H.264 Loop Filter Alpha Offset";
/* CAMERA controls */
/* Keep the order of the 'case's the same as in videodev2.h! */
@@ -452,6 +551,13 @@
case V4L2_CID_EXPOSURE_AUTO:
case V4L2_CID_COLORFX:
case V4L2_CID_TUNE_PREEMPHASIS:
+ case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
+ case V4L2_CID_MPEG_VIDC_VIDEO_ROTATION:
+ case V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL:
+ case V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL:
*type = V4L2_CTRL_TYPE_MENU;
break;
case V4L2_CID_RDS_TX_PS_NAME:
@@ -468,7 +574,12 @@
*min = *max = *step = *def = 0;
break;
case V4L2_CID_BG_COLOR:
- *type = V4L2_CTRL_TYPE_INTEGER;
+ case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP:
+ case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP:
+ case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP:
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES:
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB:
+ *type = V4L2_CTRL_TYPE_INTEGER;
*step = 1;
*min = 0;
/* Max is calculated as RGB888 that is 2^24 */
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index 5e44c90..d64ecb5 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -244,6 +244,8 @@
[_IOC_NR(VIDIOC_ENCODER_CMD)] = "VIDIOC_ENCODER_CMD",
[_IOC_NR(VIDIOC_TRY_ENCODER_CMD)] = "VIDIOC_TRY_ENCODER_CMD",
+ [_IOC_NR(VIDIOC_DECODER_CMD)] = "VIDIOC_DECODER_CMD",
+ [_IOC_NR(VIDIOC_TRY_DECODER_CMD)] = "VIDIOC_TRY_DECODER_CMD",
[_IOC_NR(VIDIOC_DBG_S_REGISTER)] = "VIDIOC_DBG_S_REGISTER",
[_IOC_NR(VIDIOC_DBG_G_REGISTER)] = "VIDIOC_DBG_G_REGISTER",
@@ -1775,6 +1777,28 @@
dbgarg(cmd, "cmd=%d, flags=%x\n", p->cmd, p->flags);
break;
}
+ case VIDIOC_DECODER_CMD:
+ {
+ struct v4l2_decoder_cmd *p = arg;
+
+ if (!ops->vidioc_decoder_cmd)
+ break;
+ ret = ops->vidioc_decoder_cmd(file, fh, p);
+ if (!ret)
+ dbgarg(cmd, "cmd=%d, flags=%x\n", p->cmd, p->flags);
+ break;
+ }
+ case VIDIOC_TRY_DECODER_CMD:
+ {
+ struct v4l2_decoder_cmd *p = arg;
+
+ if (!ops->vidioc_try_decoder_cmd)
+ break;
+ ret = ops->vidioc_try_decoder_cmd(file, fh, p);
+ if (!ret)
+ dbgarg(cmd, "cmd=%d, flags=%x\n", p->cmd, p->flags);
+ break;
+ }
case VIDIOC_G_PARM:
{
struct v4l2_streamparm *p = arg;
diff --git a/drivers/media/video/vcap_v4l2.c b/drivers/media/video/vcap_v4l2.c
index 219f7a0..dd5bd0f 100644
--- a/drivers/media/video/vcap_v4l2.c
+++ b/drivers/media/video/vcap_v4l2.c
@@ -43,6 +43,7 @@
#include <media/vcap_v4l2.h>
#include <media/vcap_fmt.h>
#include "vcap_vc.h"
+#include "vcap_vp.h"
#define NUM_INPUTS 1
#define MSM_VCAP_DRV_NAME "msm_vcap"
@@ -57,6 +58,28 @@
printk(KERN_DEBUG "VCAP: " fmt, ## arg); \
} while (0)
+enum vcap_op_mode determine_mode(struct vcap_client_data *cd)
+{
+ if (cd->set_cap == 1 && cd->set_vp_o == 0 &&
+ cd->set_decode == 0)
+ return VC_VCAP_OP;
+ else if (cd->set_cap == 1 && cd->set_vp_o == 1 &&
+ cd->set_decode == 0)
+ return VC_AND_VP_VCAP_OP;
+ else if (cd->set_cap == 0 && cd->set_vp_o == 1 &&
+ cd->set_decode == 1)
+ return VP_VCAP_OP;
+ else
+ return UNKNOWN_VCAP_OP;
+}
+
+void dealloc_resources(struct vcap_client_data *cd)
+{
+ cd->set_cap = false;
+ cd->set_decode = false;
+ cd->set_vp_o = false;
+}
+
int get_phys_addr(struct vcap_dev *dev, struct vb2_queue *q,
struct v4l2_buffer *b)
{
@@ -103,6 +126,8 @@
&buf->paddr, (size_t *)&len);
if (rc < 0) {
pr_err("%s: Could not get phys addr\n", __func__);
+ ion_free(dev->ion_client, buf->ion_handle);
+ buf->ion_handle = NULL;
return -EFAULT;
}
@@ -148,7 +173,7 @@
return 0;
}
-/* Videobuf operations */
+/* VC Videobuf operations */
static int capture_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
unsigned int *nplanes, unsigned long sizes[],
@@ -157,7 +182,6 @@
*nbuffers += 2;
if (*nbuffers > VIDEO_MAX_FRAME)
return -EINVAL;
-
*nplanes = 1;
return 0;
}
@@ -240,6 +264,197 @@
.buf_cleanup = capture_buffer_cleanup,
};
+/* VP I/P Videobuf operations */
+
+static int vp_in_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned long sizes[],
+ void *alloc_ctxs[])
+{
+ if (*nbuffers >= VIDEO_MAX_FRAME && *nbuffers < 5)
+ *nbuffers = 5;
+
+ *nplanes = 1;
+ return 0;
+}
+
+static int vp_in_buffer_init(struct vb2_buffer *vb)
+{
+ return 0;
+}
+
+static int vp_in_buffer_prepare(struct vb2_buffer *vb)
+{
+ return 0;
+}
+
+static void vp_in_buffer_queue(struct vb2_buffer *vb)
+{
+ struct vcap_client_data *cd = vb2_get_drv_priv(vb->vb2_queue);
+ struct vcap_buffer *buf = container_of(vb, struct vcap_buffer, vb);
+ struct vp_action *vp_act = &cd->vid_vp_action;
+ struct vb2_queue *q = vb->vb2_queue;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&cd->cap_slock, flags);
+ list_add_tail(&buf->list, &vp_act->in_active);
+ spin_unlock_irqrestore(&cd->cap_slock, flags);
+
+ if (atomic_read(&cd->dev->vp_enabled) == 0) {
+ if (cd->vid_vp_action.vp_state == VP_FRAME1) {
+ if (atomic_read(&q->queued_count) > 1 &&
+ atomic_read(&cd->vp_out_vidq.queued_count) > 0)
+ /* Valid code flow for VC-VP mode */
+ kickoff_vp(cd);
+ } else {
+ /* VP has already kicked off just needs cont */
+ continue_vp(cd);
+ }
+ }
+}
+
+static int vp_in_start_streaming(struct vb2_queue *vq)
+{
+ dprintk(2, "VP IN start streaming\n");
+ return 0;
+}
+
+static int vp_in_stop_streaming(struct vb2_queue *vq)
+{
+ struct vcap_client_data *c_data = vb2_get_drv_priv(vq);
+ struct vb2_buffer *vb;
+
+ dprintk(2, "VP stop streaming\n");
+
+ while (!list_empty(&c_data->vid_vp_action.in_active)) {
+ struct vcap_buffer *buf;
+ buf = list_entry(c_data->vid_vp_action.in_active.next,
+ struct vcap_buffer, list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+
+ /* clean ion handles */
+ list_for_each_entry(vb, &vq->queued_list, queued_entry)
+ free_ion_handle_work(c_data->dev, vb);
+ return 0;
+}
+
+static int vp_in_buffer_finish(struct vb2_buffer *vb)
+{
+ return 0;
+}
+
+static void vp_in_buffer_cleanup(struct vb2_buffer *vb)
+{
+}
+
+static struct vb2_ops vp_in_video_qops = {
+ .queue_setup = vp_in_queue_setup,
+ .buf_init = vp_in_buffer_init,
+ .buf_prepare = vp_in_buffer_prepare,
+ .buf_queue = vp_in_buffer_queue,
+ .start_streaming = vp_in_start_streaming,
+ .stop_streaming = vp_in_stop_streaming,
+ .buf_finish = vp_in_buffer_finish,
+ .buf_cleanup = vp_in_buffer_cleanup,
+};
+
+
+/* VP O/P Videobuf operations */
+
+static int vp_out_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned long sizes[],
+ void *alloc_ctxs[])
+{
+ if (*nbuffers >= VIDEO_MAX_FRAME && *nbuffers < 3)
+ *nbuffers = 3;
+
+ *nplanes = 1;
+ return 0;
+}
+
+static int vp_out_buffer_init(struct vb2_buffer *vb)
+{
+ return 0;
+}
+
+static int vp_out_buffer_prepare(struct vb2_buffer *vb)
+{
+ return 0;
+}
+
+static void vp_out_buffer_queue(struct vb2_buffer *vb)
+{
+ struct vcap_client_data *cd = vb2_get_drv_priv(vb->vb2_queue);
+ struct vcap_buffer *buf = container_of(vb, struct vcap_buffer, vb);
+ struct vp_action *vp_act = &cd->vid_vp_action;
+ struct vb2_queue *q = vb->vb2_queue;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&cd->cap_slock, flags);
+ list_add_tail(&buf->list, &vp_act->out_active);
+ spin_unlock_irqrestore(&cd->cap_slock, flags);
+
+ if (atomic_read(&cd->dev->vp_enabled) == 0) {
+ if (cd->vid_vp_action.vp_state == VP_FRAME1) {
+ if (atomic_read(&q->queued_count) > 0 &&
+ atomic_read(&
+ cd->vp_in_vidq.queued_count) > 1)
+ kickoff_vp(cd);
+ } else {
+ /* VP has already kicked off just needs cont */
+ continue_vp(cd);
+ }
+ }
+}
+
+static int vp_out_start_streaming(struct vb2_queue *vq)
+{
+ return 0;
+}
+
+static int vp_out_stop_streaming(struct vb2_queue *vq)
+{
+ struct vcap_client_data *c_data = vb2_get_drv_priv(vq);
+ struct vb2_buffer *vb;
+
+ dprintk(2, "VP out q stop streaming\n");
+ vp_stop_capture(c_data);
+
+ while (!list_empty(&c_data->vid_vp_action.out_active)) {
+ struct vcap_buffer *buf;
+ buf = list_entry(c_data->vid_vp_action.out_active.next,
+ struct vcap_buffer, list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+
+ /* clean ion handles */
+ list_for_each_entry(vb, &vq->queued_list, queued_entry)
+ free_ion_handle_work(c_data->dev, vb);
+ return 0;
+}
+
+static int vp_out_buffer_finish(struct vb2_buffer *vb)
+{
+ return 0;
+}
+
+static void vp_out_buffer_cleanup(struct vb2_buffer *vb)
+{
+}
+
+static struct vb2_ops vp_out_video_qops = {
+ .queue_setup = vp_out_queue_setup,
+ .buf_init = vp_out_buffer_init,
+ .buf_prepare = vp_out_buffer_prepare,
+ .buf_queue = vp_out_buffer_queue,
+ .start_streaming = vp_out_start_streaming,
+ .stop_streaming = vp_out_stop_streaming,
+ .buf_finish = vp_out_buffer_finish,
+ .buf_cleanup = vp_out_buffer_cleanup,
+};
+
/* IOCTL vidioc handling */
static int vidioc_querycap(struct file *file, void *priv,
@@ -279,20 +494,16 @@
struct v4l2_format *f)
{
int size;
-#ifdef NEW_S_FMT
+ struct vcap_priv_fmt *priv_fmt;
struct v4l2_format_vc_ext *vc_format;
-#endif
struct vcap_client_data *c_data = file->private_data;
- switch (f->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
-#ifdef NEW_S_FMT
- vc_format = (struct v4l2_format_vc_ext *) f->fmt.raw_data;
+ priv_fmt = (struct vcap_priv_fmt *) f->fmt.raw_data;
+
+ switch (priv_fmt->type) {
+ case VC_TYPE:
+ vc_format = (struct v4l2_format_vc_ext *) &priv_fmt->u.timing;
c_data->vc_format = *vc_format;
-#else
- c_data->vc_format =
- vcap_vc_lut[f->fmt.pix.priv];
-#endif
config_vc_format(c_data);
@@ -304,22 +515,46 @@
else
size *= 2;
-#ifndef NEW_S_FMT
- f->fmt.pix.bytesperline = size;
+ priv_fmt->u.timing.bytesperline = size;
size *= (c_data->vc_format.vactive_end -
c_data->vc_format.vactive_start);
- f->fmt.pix.sizeimage = size;
-#endif
+ priv_fmt->u.timing.sizeimage = size;
vcap_ctrl->vc_client = c_data;
+ c_data->set_cap = true;
break;
- case V4L2_BUF_TYPE_INTERLACED_IN_DECODER:
- c_data->vp_buf_type_field = V4L2_BUF_TYPE_INTERLACED_IN_DECODER;
- c_data->vp_format.field = f->fmt.pix.field;
- c_data->vp_format.height = f->fmt.pix.height;
- c_data->vp_format.width = f->fmt.pix.width;
- c_data->vp_format.pixelformat = f->fmt.pix.pixelformat;
+ case VP_IN_TYPE:
+ vcap_ctrl->vp_client = c_data;
+ c_data->vp_in_fmt.width = priv_fmt->u.pix.width;
+ c_data->vp_in_fmt.height = priv_fmt->u.pix.height;
+ c_data->vp_in_fmt.pixfmt = priv_fmt->u.pix.pixelformat;
+
+ if (priv_fmt->u.pix.priv)
+ c_data->vid_vp_action.nr_enabled = 1;
+
+ size = c_data->vp_in_fmt.width * c_data->vp_in_fmt.height;
+ if (c_data->vp_in_fmt.pixfmt == V4L2_PIX_FMT_NV16)
+ size = size * 2;
+ else
+ size = size / 2 * 3;
+ priv_fmt->u.pix.sizeimage = size;
+ c_data->set_decode = true;
break;
- case V4L2_BUF_TYPE_INTERLACED_IN_AFE:
+ case VP_OUT_TYPE:
+ vcap_ctrl->vp_client = c_data;
+ c_data->vp_out_fmt.width = priv_fmt->u.pix.width;
+ c_data->vp_out_fmt.height = priv_fmt->u.pix.height;
+ c_data->vp_out_fmt.pixfmt = priv_fmt->u.pix.pixelformat;
+
+ if (priv_fmt->u.pix.priv)
+ c_data->vid_vp_action.nr_enabled = 1;
+
+ size = c_data->vp_out_fmt.width * c_data->vp_out_fmt.height;
+ if (c_data->vp_out_fmt.pixfmt == V4L2_PIX_FMT_NV16)
+ size = size * 2;
+ else
+ size = size / 2 * 3;
+ priv_fmt->u.pix.sizeimage = size;
+ c_data->set_vp_o = true;
break;
default:
break;
@@ -332,9 +567,55 @@
struct v4l2_requestbuffers *rb)
{
struct vcap_client_data *c_data = file->private_data;
+ int rc;
+
+ dprintk(3, "In Req Buf %08x\n", (unsigned int)rb->type);
+ c_data->op_mode = determine_mode(c_data);
+ if (c_data->op_mode == UNKNOWN_VCAP_OP) {
+ pr_err("VCAP Error: %s: VCAP in unknown mode\n", __func__);
+ return -ENOTRECOVERABLE;
+ }
+
switch (rb->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- return vb2_reqbufs(&c_data->vc_vidq, rb);
+ if (c_data->op_mode == VC_AND_VP_VCAP_OP) {
+ if (c_data->vc_format.color_space) {
+ pr_err("VCAP Err: %s: VP No RGB support\n",
+ __func__);
+ return -ENOTRECOVERABLE;
+ }
+ if (!c_data->vc_format.mode) {
+ pr_err("VCAP Err: VP No prog support\n");
+ return -ENOTRECOVERABLE;
+ }
+ if (rb->count < 6) {
+ pr_err("VCAP Err: Not enough buf for VC_VP\n");
+ return -EINVAL;
+ }
+ rc = vb2_reqbufs(&c_data->vc_vidq, rb);
+ if (rc < 0)
+ return rc;
+
+ c_data->vp_in_fmt.width =
+ (c_data->vc_format.hactive_end -
+ c_data->vc_format.hactive_start);
+ c_data->vp_in_fmt.height =
+ (c_data->vc_format.vactive_end -
+ c_data->vc_format.vactive_start);
+ /* VC outputs YCbCr 4:2:2 */
+ c_data->vp_in_fmt.pixfmt = V4L2_PIX_FMT_NV16;
+ rb->type = V4L2_BUF_TYPE_INTERLACED_IN_DECODER;
+ rc = vb2_reqbufs(&c_data->vp_in_vidq, rb);
+ rb->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ return rc;
+
+ } else {
+ return vb2_reqbufs(&c_data->vc_vidq, rb);
+ }
+ case V4L2_BUF_TYPE_INTERLACED_IN_DECODER:
+ return vb2_reqbufs(&c_data->vp_in_vidq, rb);
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return vb2_reqbufs(&c_data->vp_out_vidq, rb);
default:
pr_err("VCAP Error: %s: Unknown buffer type\n", __func__);
return -EINVAL;
@@ -359,16 +640,57 @@
static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
{
struct vcap_client_data *c_data = file->private_data;
+ struct vb2_buffer *vb;
+ struct vb2_queue *q;
int rc;
+ dprintk(3, "In Q Buf %08x\n", (unsigned int)p->type);
switch (p->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (get_phys_addr(c_data->dev, &c_data->vc_vidq, p))
- return -EINVAL;
+ if (c_data->op_mode == VC_AND_VP_VCAP_OP) {
+ /* If buffer in vp_in_q it will be coming back */
+ q = &c_data->vp_in_vidq;
+ if (p->index >= q->num_buffers) {
+ dprintk(1, "qbuf: buffer index out of range\n");
+ return -EINVAL;
+ }
+
+ vb = q->bufs[p->index];
+ if (NULL == vb) {
+ dprintk(1, "qbuf: buffer is NULL\n");
+ return -EINVAL;
+ }
+
+ if (vb->state != VB2_BUF_STATE_DEQUEUED) {
+ dprintk(1, "qbuf: buffer already in use\n");
+ return -EINVAL;
+ }
+ }
+ rc = get_phys_addr(c_data->dev, &c_data->vc_vidq, p);
+ if (rc < 0)
+ return rc;
rc = vb2_qbuf(&c_data->vc_vidq, p);
if (rc < 0)
free_ion_handle(c_data->dev, &c_data->vc_vidq, p);
return rc;
+ case V4L2_BUF_TYPE_INTERLACED_IN_DECODER:
+ if (c_data->op_mode == VC_AND_VP_VCAP_OP)
+ return -EINVAL;
+ rc = get_phys_addr(c_data->dev, &c_data->vp_in_vidq, p);
+ if (rc < 0)
+ return rc;
+ rc = vb2_qbuf(&c_data->vp_in_vidq, p);
+ if (rc < 0)
+ free_ion_handle(c_data->dev, &c_data->vp_in_vidq, p);
+ return rc;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ rc = get_phys_addr(c_data->dev, &c_data->vp_out_vidq, p);
+ if (rc < 0)
+ return rc;
+ rc = vb2_qbuf(&c_data->vp_out_vidq, p);
+ if (rc < 0)
+ free_ion_handle(c_data->dev, &c_data->vp_out_vidq, p);
+ return rc;
default:
pr_err("VCAP Error: %s: Unknown buffer type\n", __func__);
return -EINVAL;
@@ -381,12 +703,29 @@
struct vcap_client_data *c_data = file->private_data;
int rc;
+ dprintk(3, "In DQ Buf %08x\n", (unsigned int)p->type);
switch (p->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ if (c_data->op_mode == VC_AND_VP_VCAP_OP)
+ return -EINVAL;
rc = vb2_dqbuf(&c_data->vc_vidq, p, file->f_flags & O_NONBLOCK);
if (rc < 0)
return rc;
return free_ion_handle(c_data->dev, &c_data->vc_vidq, p);
+ case V4L2_BUF_TYPE_INTERLACED_IN_DECODER:
+ if (c_data->op_mode == VC_AND_VP_VCAP_OP)
+ return -EINVAL;
+ rc = vb2_dqbuf(&c_data->vp_in_vidq, p, file->f_flags &
+ O_NONBLOCK);
+ if (rc < 0)
+ return rc;
+ return free_ion_handle(c_data->dev, &c_data->vp_in_vidq, p);
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ rc = vb2_dqbuf(&c_data->vp_out_vidq, p, file->f_flags &
+ O_NONBLOCK);
+ if (rc < 0)
+ return rc;
+ return free_ion_handle(c_data->dev, &c_data->vp_out_vidq, p);
default:
pr_err("VCAP Error: %s: Unknown buffer type", __func__);
return -EINVAL;
@@ -394,15 +733,153 @@
return 0;
}
+/*
+ * When calling streamon on multiple queues there is a need to first verify
+ * that the steamon will succeed on all queues, similarly for streamoff
+ */
+int streamon_validate_q(struct vb2_queue *q)
+{
+ if (q->fileio) {
+ dprintk(1, "streamon: file io in progress\n");
+ return -EBUSY;
+ }
+
+ if (q->streaming) {
+ dprintk(1, "streamon: already streaming\n");
+ return -EBUSY;
+ }
+
+ if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ if (list_empty(&q->queued_list)) {
+ dprintk(1, "streamon: no output buffers queued\n");
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
{
struct vcap_client_data *c_data = file->private_data;
+ int rc;
- switch (i) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ dprintk(3, "In Stream ON\n");
+ if (determine_mode(c_data) != c_data->op_mode) {
+ pr_err("VCAP Error: %s: s_fmt called after req_buf", __func__);
+ return -ENOTRECOVERABLE;
+ }
+
+ switch (c_data->op_mode) {
+ case VC_VCAP_OP:
+ c_data->dev->vc_client = c_data;
+ config_vc_format(c_data);
return vb2_streamon(&c_data->vc_vidq, i);
+ case VP_VCAP_OP:
+ rc = streamon_validate_q(&c_data->vp_in_vidq);
+ if (rc < 0)
+ return rc;
+ rc = streamon_validate_q(&c_data->vp_out_vidq);
+ if (rc < 0)
+ return rc;
+
+ c_data->dev->vp_client = c_data;
+
+ rc = config_vp_format(c_data);
+ if (rc < 0)
+ return rc;
+ rc = init_motion_buf(c_data);
+ if (rc < 0)
+ return rc;
+ if (c_data->vid_vp_action.nr_enabled) {
+ rc = init_nr_buf(c_data);
+ if (rc < 0)
+ goto s_on_deinit_m_buf;
+ }
+
+ c_data->vid_vp_action.vp_state = VP_FRAME1;
+
+ rc = vb2_streamon(&c_data->vp_in_vidq,
+ V4L2_BUF_TYPE_INTERLACED_IN_DECODER);
+ if (rc < 0)
+ goto s_on_deinit_nr_buf;
+
+ rc = vb2_streamon(&c_data->vp_out_vidq,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ if (rc < 0)
+ goto s_on_deinit_nr_buf;
+ return rc;
+ case VC_AND_VP_VCAP_OP:
+ rc = streamon_validate_q(&c_data->vc_vidq);
+ if (rc < 0)
+ return rc;
+ rc = streamon_validate_q(&c_data->vp_in_vidq);
+ if (rc < 0)
+ return rc;
+ rc = streamon_validate_q(&c_data->vp_out_vidq);
+ if (rc < 0)
+ return rc;
+
+ c_data->dev->vc_client = c_data;
+ c_data->dev->vp_client = c_data;
+ c_data->dev->vc_to_vp_work.cd = c_data;
+
+ rc = config_vc_format(c_data);
+ if (rc < 0)
+ return rc;
+ rc = config_vp_format(c_data);
+ if (rc < 0)
+ return rc;
+ rc = init_motion_buf(c_data);
+ if (rc < 0)
+ return rc;
+ if (c_data->vid_vp_action.nr_enabled) {
+ rc = init_nr_buf(c_data);
+ if (rc < 0)
+ goto s_on_deinit_m_buf;
+ }
+ c_data->streaming = 1;
+
+ c_data->vid_vp_action.vp_state = VP_FRAME1;
+
+ /* These stream on calls should not fail */
+ rc = vb2_streamon(&c_data->vc_vidq,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ if (rc < 0)
+ goto s_on_deinit_nr_buf;
+
+ rc = vb2_streamon(&c_data->vp_in_vidq,
+ V4L2_BUF_TYPE_INTERLACED_IN_DECODER);
+ if (rc < 0)
+ goto s_on_deinit_nr_buf;
+
+ rc = vb2_streamon(&c_data->vp_out_vidq,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ if (rc < 0)
+ goto s_on_deinit_nr_buf;
+ return rc;
default:
- pr_err("VCAP Error: %s: Unknown buffer type", __func__);
+ pr_err("VCAP Error: %s: Operation Mode type", __func__);
+ return -ENOTRECOVERABLE;
+ }
+ return 0;
+
+s_on_deinit_nr_buf:
+ if (c_data->vid_vp_action.nr_enabled)
+ deinit_nr_buf(c_data);
+s_on_deinit_m_buf:
+ deinit_motion_buf(c_data);
+ return rc;
+}
+
+int streamoff_validate_q(struct vb2_queue *q)
+{
+ if (q->fileio) {
+ dprintk(1, "streamoff: file io in progress\n");
+ return -EBUSY;
+ }
+
+ if (!q->streaming) {
+ dprintk(1, "streamoff: not streaming\n");
return -EINVAL;
}
return 0;
@@ -413,21 +890,78 @@
struct vcap_client_data *c_data = file->private_data;
int rc;
- switch (i) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ switch (c_data->op_mode) {
+ case VC_VCAP_OP:
rc = vb2_streamoff(&c_data->vc_vidq, i);
if (rc >= 0)
atomic_set(&c_data->dev->vc_enabled, 0);
return rc;
+ case VP_VCAP_OP:
+ rc = streamoff_validate_q(&c_data->vp_in_vidq);
+ if (rc < 0)
+ return rc;
+ rc = streamoff_validate_q(&c_data->vp_out_vidq);
+ if (rc < 0)
+ return rc;
+
+ /* These stream on calls should not fail */
+ rc = vb2_streamoff(&c_data->vp_in_vidq,
+ V4L2_BUF_TYPE_INTERLACED_IN_DECODER);
+ if (rc < 0)
+ return rc;
+
+ rc = vb2_streamoff(&c_data->vp_out_vidq,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ if (rc < 0)
+ return rc;
+
+ deinit_motion_buf(c_data);
+ if (c_data->vid_vp_action.nr_enabled)
+ deinit_nr_buf(c_data);
+ atomic_set(&c_data->dev->vp_enabled, 0);
+ return rc;
+ case VC_AND_VP_VCAP_OP:
+ rc = streamoff_validate_q(&c_data->vc_vidq);
+ if (rc < 0)
+ return rc;
+ rc = streamoff_validate_q(&c_data->vp_in_vidq);
+ if (rc < 0)
+ return rc;
+ rc = streamoff_validate_q(&c_data->vp_out_vidq);
+ if (rc < 0)
+ return rc;
+
+ /* These stream on calls should not fail */
+ c_data->streaming = 0;
+ rc = vb2_streamoff(&c_data->vc_vidq,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ if (rc < 0)
+ return rc;
+
+ rc = vb2_streamoff(&c_data->vp_in_vidq,
+ V4L2_BUF_TYPE_INTERLACED_IN_DECODER);
+ if (rc < 0)
+ return rc;
+
+ rc = vb2_streamoff(&c_data->vp_out_vidq,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ if (rc < 0)
+ return rc;
+
+ deinit_motion_buf(c_data);
+ if (c_data->vid_vp_action.nr_enabled)
+ deinit_nr_buf(c_data);
+ atomic_set(&c_data->dev->vc_enabled, 0);
+ atomic_set(&c_data->dev->vp_enabled, 0);
+ return rc;
default:
- pr_err("VCAP Error: %s: Unknown buffer type", __func__);
- break;
+ pr_err("VCAP Error: %s: Unknown Operation mode", __func__);
+ return -ENOTRECOVERABLE;
}
return 0;
}
/* VCAP fops */
-
static void *vcap_ops_get_userptr(void *alloc_ctx, unsigned long vaddr,
unsigned long size, int write)
{
@@ -464,7 +998,7 @@
spin_lock_init(&c_data->cap_slock);
- /* initialize queue */
+ /* initialize vc queue */
q = &c_data->vc_vidq;
memset(q, 0, sizeof(c_data->vc_vidq));
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
@@ -473,17 +1007,49 @@
q->buf_struct_size = sizeof(struct vcap_buffer);
q->ops = &capture_video_qops;
q->mem_ops = &vcap_mem_ops;
+ ret = vb2_queue_init(q);
+ if (ret < 0)
+ goto vc_q_failed;
+
+ /* initialize vp in queue */
+ q = &c_data->vp_in_vidq;
+ memset(q, 0, sizeof(c_data->vp_in_vidq));
+ q->type = V4L2_BUF_TYPE_INTERLACED_IN_DECODER;
+ q->io_modes = VB2_USERPTR;
+ q->drv_priv = c_data;
+ q->buf_struct_size = sizeof(struct vcap_buffer);
+ q->ops = &vp_in_video_qops;
+ q->mem_ops = &vcap_mem_ops;
+ ret = vb2_queue_init(q);
+ if (ret < 0)
+ goto vp_in_q_failed;
+
+ /* initialize vp out queue */
+ q = &c_data->vp_out_vidq;
+ memset(q, 0, sizeof(c_data->vp_out_vidq));
+ q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ q->io_modes = VB2_USERPTR;
+ q->drv_priv = c_data;
+ q->buf_struct_size = sizeof(struct vcap_buffer);
+ q->ops = &vp_out_video_qops;
+ q->mem_ops = &vcap_mem_ops;
ret = vb2_queue_init(q);
if (ret < 0)
- goto open_failed;
+ goto vp_out_q_failed;
INIT_LIST_HEAD(&c_data->vid_vc_action.active);
+ INIT_LIST_HEAD(&c_data->vid_vp_action.in_active);
+ INIT_LIST_HEAD(&c_data->vid_vp_action.out_active);
file->private_data = c_data;
return 0;
-open_failed:
+vp_out_q_failed:
+ vb2_queue_release(&c_data->vp_in_vidq);
+vp_in_q_failed:
+ vb2_queue_release(&c_data->vc_vidq);
+vc_q_failed:
kfree(c_data);
return ret;
}
@@ -491,6 +1057,8 @@
static int vcap_close(struct file *file)
{
struct vcap_client_data *c_data = file->private_data;
+ vb2_queue_release(&c_data->vp_out_vidq);
+ vb2_queue_release(&c_data->vp_in_vidq);
vb2_queue_release(&c_data->vc_vidq);
c_data->dev->vc_client = NULL;
c_data->dev->vp_client = NULL;
@@ -498,13 +1066,60 @@
return 0;
}
+unsigned int poll_work(struct vb2_queue *q, struct file *file,
+ poll_table *wait, bool write_q)
+{
+ unsigned long flags;
+ struct vb2_buffer *vb = NULL;
+
+ if (q->num_buffers == 0)
+ return POLLERR;
+
+ if (list_empty(&q->queued_list))
+ return POLLERR;
+
+ poll_wait(file, &q->done_wq, wait);
+
+ spin_lock_irqsave(&q->done_lock, flags);
+ if (!list_empty(&q->done_list))
+ vb = list_first_entry(&q->done_list, struct vb2_buffer,
+ done_entry);
+ spin_unlock_irqrestore(&q->done_lock, flags);
+
+ if (vb && (vb->state == VB2_BUF_STATE_DONE
+ || vb->state == VB2_BUF_STATE_ERROR)) {
+ return (write_q) ? POLLOUT | POLLWRNORM :
+ POLLIN | POLLRDNORM;
+ }
+ return 0;
+}
+
static unsigned int vcap_poll(struct file *file,
struct poll_table_struct *wait)
{
struct vcap_client_data *c_data = file->private_data;
- struct vb2_queue *q = &c_data->vc_vidq;
+ struct vb2_queue *q;
+ unsigned int mask = 0;
- return vb2_poll(q, file, wait);
+ switch (c_data->op_mode) {
+ case VC_VCAP_OP:
+ q = &c_data->vc_vidq;
+ return vb2_poll(q, file, wait);
+ case VP_VCAP_OP:
+ q = &c_data->vp_in_vidq;
+ mask = poll_work(q, file, wait, 0);
+ q = &c_data->vp_out_vidq;
+ mask |= poll_work(q, file, wait, 1);
+ return mask;
+ case VC_AND_VP_VCAP_OP:
+ q = &c_data->vp_out_vidq;
+ mask = poll_work(q, file, wait, 0);
+ return mask;
+ default:
+ pr_err("VCAP Error: %s: Unknown operation mode", __func__);
+ return POLLERR;
+ }
+ return 0;
}
/* V4L2 and video device structures */
@@ -522,6 +1137,10 @@
.vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+ .vidioc_s_fmt_type_private = vidioc_s_fmt_vid_cap,
+ .vidioc_g_fmt_type_private = vidioc_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_out_mplane = vidioc_s_fmt_vid_cap,
+ .vidioc_g_fmt_vid_out_mplane = vidioc_g_fmt_vid_cap,
.vidioc_reqbufs = vidioc_reqbufs,
.vidioc_querybuf = vidioc_querybuf,
.vidioc_qbuf = vidioc_qbuf,
@@ -537,9 +1156,9 @@
.release = video_device_release,
};
-int vcap_reg_powerup(struct vcap_dev *dev, struct device *ddev)
+int vcap_reg_powerup(struct vcap_dev *dev)
{
- dev->fs_vcap = regulator_get(ddev, "vdd");
+ dev->fs_vcap = regulator_get(NULL, "fs_vcap");
if (IS_ERR(dev->fs_vcap)) {
pr_err("%s: Regulator FS_VCAP get failed %ld\n", __func__,
PTR_ERR(dev->fs_vcap));
@@ -715,7 +1334,7 @@
{
int rc;
- rc = vcap_reg_powerup(dev, ddev);
+ rc = vcap_reg_powerup(dev);
if (rc < 0)
goto reg_failed;
rc = vcap_clk_powerup(dev, ddev);
@@ -751,6 +1370,11 @@
return 0;
}
+static irqreturn_t vcap_vp_handler(int irq_num, void *data)
+{
+ return vp_handler(vcap_ctrl);
+}
+
static irqreturn_t vcap_vc_handler(int irq_num, void *data)
{
return vc_handler(vcap_ctrl);
@@ -793,26 +1417,44 @@
goto free_resource;
}
- dev->vcapirq = platform_get_resource_byname(pdev,
- IORESOURCE_IRQ, "vcap");
- if (!dev->vcapirq) {
- pr_err("%s: no irq resource?\n", __func__);
+ dev->vcirq = platform_get_resource_byname(pdev,
+ IORESOURCE_IRQ, "vc_irq");
+ if (!dev->vcirq) {
+ pr_err("%s: no vc irq resource?\n", __func__);
+ ret = -ENODEV;
+ goto free_resource;
+ }
+ dev->vpirq = platform_get_resource_byname(pdev,
+ IORESOURCE_IRQ, "vp_irq");
+ if (!dev->vpirq) {
+ pr_err("%s: no vp irq resource?\n", __func__);
ret = -ENODEV;
goto free_resource;
}
- ret = request_irq(dev->vcapirq->start, vcap_vc_handler,
- IRQF_TRIGGER_RISING, "vcap", 0);
+
+ ret = request_irq(dev->vcirq->start, vcap_vc_handler,
+ IRQF_TRIGGER_RISING, "vc_irq", 0);
if (ret < 0) {
- pr_err("%s: irq request fail\n", __func__);
+ pr_err("%s: vc irq request fail\n", __func__);
ret = -EBUSY;
goto free_resource;
}
+ disable_irq(dev->vcirq->start);
- disable_irq(dev->vcapirq->start);
+ ret = request_irq(dev->vpirq->start, vcap_vp_handler,
+ IRQF_TRIGGER_RISING, "vp_irq", 0);
+
+ if (ret < 0) {
+ pr_err("%s: vp irq request fail\n", __func__);
+ ret = -EBUSY;
+ goto free_resource;
+ }
+ disable_irq(dev->vpirq->start);
snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name),
"%s", MSM_VCAP_DRV_NAME);
+
ret = v4l2_device_register(NULL, &dev->v4l2_dev);
if (ret)
goto free_resource;
@@ -842,17 +1484,25 @@
dev->vfd = vfd;
video_set_drvdata(vfd, dev);
- dev->ion_client = msm_ion_client_create(-1, "vcap");
- if (IS_ERR((void *)dev->ion_client)) {
- pr_err("could not get ion client");
+ dev->vcap_wq = create_workqueue("vcap");
+ if (!dev->vcap_wq) {
+ pr_err("Could not create workqueue");
goto rel_vdev;
}
+ dev->ion_client = msm_ion_client_create(-1, "vcap");
+ if (IS_ERR((void *)dev->ion_client)) {
+ pr_err("could not get ion client");
+ goto rel_vcap_wq;
+ }
+
atomic_set(&dev->vc_enabled, 0);
+ atomic_set(&dev->vp_enabled, 0);
dprintk(1, "Exit probe succesfully");
return 0;
-
+rel_vcap_wq:
+ destroy_workqueue(dev->vcap_wq);
rel_vdev:
video_device_release(vfd);
deinit_vc:
@@ -874,6 +1524,8 @@
{
struct vcap_dev *dev = vcap_ctrl;
ion_client_destroy(dev->ion_client);
+ flush_workqueue(dev->vcap_wq);
+ destroy_workqueue(dev->vcap_wq);
video_device_release(dev->vfd);
deinit_vc();
vcap_disable(dev);
diff --git a/drivers/media/video/vcap_vc.c b/drivers/media/video/vcap_vc.c
index ed0bc25..2c4a243 100644
--- a/drivers/media/video/vcap_vc.c
+++ b/drivers/media/video/vcap_vc.c
@@ -34,401 +34,6 @@
printk(KERN_DEBUG "VC: " fmt, ## arg); \
} while (0)
-struct v4l2_format_vc_ext vcap_vc_lut[] = {
- /* 1080p */
- {
- HAL_VCAP_YUV_1080p_60_RH, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 148.5,
- 32, 2200, 192, 2112, 4, 24, 0, 2, 0, 44, 0, 0, 0, 0,
- 0, 0
- },
- {
- HAL_VCAP_YUV_1080p_60_FL, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 148.5,
- 1125, 2200, 192, 2112, 41, 1121, 0, 5, 0, 44, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_RGB_1080p_60_FL, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_RGB, 148.5,
- 1125, 2200, 192, 2112, 41, 1121, 0, 5, 0, 44, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_1080p_24_FL, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 74.25,
- 1125, 2750, 192, 2112, 41, 1121, 0, 5, 0, 44, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_1080p_24_RH, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 74.25,
- 112, 2750, 192, 2112, 4, 110, 0, 2, 0, 44, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_1080p_24_RW, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 74.25,
- 1125, 275, 19, 211, 41, 1121, 0, 5, 0, 16, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_1080p_60_RW, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 148.5,
- 1125, 200, 22, 182, 41, 1121, 0, 5, 0, 16, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_1080p_50_FL, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 148.5,
- 1125, 2640, 192, 2112, 41, 1121, 0, 5, 0, 44, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_1080p_50_RH, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 148.5,
- 15, 2640, 192, 2112, 6, 13, 0, 5, 0, 44, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_1080p_25_FL, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 74.25,
- 1125, 2640, 192, 2112, 41, 1121, 0, 5, 0, 44, 0, 0,
- 0, 0, 0, 0
- },
- {
- HAL_VCAP_YUV_1080p_25_RH, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 74.25,
- 10, 2640, 192, 2112, 4, 8, 0, 2, 0, 44, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_1080p_30_RH, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 74.25,
- 1125, 2200, 192, 2112, 41, 1121, 0, 5, 0, 44, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_RGB_1080p_25_FL, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_RGB, 74.25,
- 1125, 2640, 192, 2112, 41, 1121, 0, 5, 0, 44, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_RGB_1080p_25_RH, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_RGB, 74.25,
- 10, 2640, 192, 2112, 4, 8, 0, 2, 0, 44, 0, 0, 0,
- 0, 0, 0
- },
- /* 1080i */
- {
- HAL_VCAP_YUV_1080i_60_FL, HAL_VCAP_MODE_INT, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 74.25,
- 1125, 2200, 192, 2112, 20, 560, 0, 5, 0, 44, 583, 1123, 1100,
- 1100, 563, 568
- },
- {
- HAL_VCAP_YUV_1080i_60_RH, HAL_VCAP_MODE_INT, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 74.25,
- 18, 2200, 192, 2112, 3, 7, 0, 2, 0, 44, 11, 15, 1100,
- 1100, 8, 10
- },
- {
- HAL_VCAP_YUV_1080i_60_RW, HAL_VCAP_MODE_INT, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 74.25,
- 1125, 220, 19, 211, 20, 560, 0, 5, 0, 4, 583, 1123, 110,
- 110, 563, 568
- },
- {
- HAL_VCAP_YUV_1080i_50_FL, HAL_VCAP_MODE_INT, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 72.00,
- 1125, 2640, 192, 2112, 20, 560, 0, 5, 0, 44, 583, 1123, 1320,
- 1320, 563, 568
- },
- {
- HAL_VCAP_YUV_1080i_50_RH, HAL_VCAP_MODE_INT, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 72.00,
- 52, 2640, 192, 2112, 4, 24, 0, 2, 0, 44, 30, 50, 1320,
- 1320, 26, 28},
- {
- HAL_VCAP_YUV_1080i_50_RW, HAL_VCAP_MODE_INT, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 72.00,
- 1125, 264, 19, 211, 20, 560, 0, 5, 0, 4, 583, 1123, 110,
- 110, 563, 568
- },
- {
- HAL_VCAP_RGB_1080i_50_FL, HAL_VCAP_MODE_INT, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_RGB, 72.00,
- 1125, 2640, 192, 2112, 20, 560, 0, 5, 0, 44, 583, 1123, 1320,
- 1320, 563, 568
- },
- {
- HAL_VCAP_RGB_1080i_50_RH, HAL_VCAP_MODE_INT, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_RGB, 72.00,
- 52, 2640, 192, 2112, 4, 24, 0, 2, 0, 44, 30, 50, 1320,
- 1320, 26, 28
- },
- /* 480i */
- {
- HAL_VCAP_YUV_480i_60_RH, HAL_VCAP_MODE_INT, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 27.027,
- 20, 1716, 238, 1678, 3, 7, 0, 2, 0, 124, 14, 18, 820,
- 820, 10, 12
- },
- {
- HAL_VCAP_YUV_480i_60_FL, HAL_VCAP_MODE_INT, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 27.027,
- 525, 1716, 238, 1678, 18, 258, 0, 3, 0, 124, 281, 521, 858,
- 858, 262, 265
- },
- {
- HAL_VCAP_YUV_480i_60_RW, HAL_VCAP_MODE_INT, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 27.027,
- 525, 172, 24, 168, 18, 258, 0, 3, 0, 12, 281, 521, 86,
- 86, 262, 265
- },
- {
- HAL_VCAP_YUV_2880_480i_60_FL, HAL_VCAP_MODE_INT,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS,
- HAL_VCAP_YUV, 54.000, 525, 3432, 476, 3356, 18, 258, 0, 3,
- 0, 248, 281, 521, 1716, 1716, 262, 265
- },
- {
- HAL_VCAP_YUV_2880_480i_60_RH, HAL_VCAP_MODE_INT,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS,
- HAL_VCAP_YUV, 54.000, 32, 3432, 476, 3356, 4, 14, 0, 3, 0,
- 248, 20, 30, 1716, 1716, 16, 19
- },
- /* 480p */
- {
- HAL_VCAP_YUV_480p_60_RH, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 27.027,
- 8, 858, 122, 842, 2, 5, 0, 1, 0, 62, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_RGB_480p_60_RH, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS, HAL_VCAP_RGB, 27.027,
- 52, 858, 122, 842, 3, 50, 0, 2, 0, 62, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_RGB_480p_60_FL, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS, HAL_VCAP_RGB, 27.027,
- 525, 858, 122, 842, 36, 516, 0, 6, 0, 62, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_480p_60_FL, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 27.027,
- 525, 858, 122, 842, 36, 516, 0, 6, 0, 62, 0, 0, 0, 0, 0, 0
- },
- {
- HAL_VCAP_YUV_480p_60_RW, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 27.027,
- 525, 86, 12, 84, 36, 516, 0, 6, 0, 6, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_2880_480p_60_FL, HAL_VCAP_MODE_PRO,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS,
- HAL_VCAP_YUV, 108.000, 525, 3432, 488, 3368, 36, 516, 0, 6,
- 0, 248, 0, 0, 0, 0, 0, 0
- },
- {
- HAL_VCAP_YUV_2880_480p_60_RH, HAL_VCAP_MODE_PRO,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS,
- HAL_VCAP_YUV, 108.000, 25, 3432, 488, 3368, 8, 22, 0, 6, 0,
- 248, 0, 0, 0, 0, 0, 0
- },
- /* 720p */
- {
- HAL_VCAP_YUV_720p_60_FL, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 74.25,
- 750, 1650, 260, 1540, 25, 745, 0, 5, 0, 40, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_RGB_720p_60_FL, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_RGB, 74.25,
- 750, 1650, 260, 1540, 25, 745, 0, 5, 0, 40, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_720p_60_RW, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 74.25,
- 750, 165, 26, 154, 25, 745, 0, 5, 0, 4, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_720p_60_RH, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 74.25,
- 35, 1650, 260, 1540, 5, 32, 0, 3, 0, 40, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_720p_50_FL, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 74.25,
- 750, 1980, 260, 1540, 25, 745, 0, 5, 0, 40, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_720p_50_RW, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 74.25,
- 750, 198, 26, 154, 25, 745, 0, 5, 0, 4, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_720p_50_RH, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 74.25,
- 6, 1980, 260, 1540, 2, 5, 0, 1, 0, 40, 0, 0, 0,
- 0, 0, 0
- },
- /* 576p */
- {
- HAL_VCAP_YUV_576p_50_FL, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 27.0,
- 625, 864, 132, 852, 44, 620, 0, 5, 0, 64, 0, 0, 0,
- 0, 0, 0},
- {
- HAL_VCAP_RGB_576p_50_FL, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS, HAL_VCAP_RGB, 27.0,
- 625, 864, 132, 852, 44, 620, 0, 5, 0, 64, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_576p_50_RW, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 27.0,
- 625, 86, 13, 85, 44, 620, 0, 5, 0, 6, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_576p_50_RH, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 27.0,
- 25, 864, 132, 852, 4, 23, 0, 3, 0, 64, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_1440_576p_50_RH, HAL_VCAP_MODE_PRO,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS,
- HAL_VCAP_YUV, 54.000, 25, 1728, 264, 1704, 6, 23, 0, 5, 0,
- 128, 0, 0, 0, 0, 0, 0
- },
- {
- HAL_VCAP_YUV_2880_576p_50_FL, HAL_VCAP_MODE_PRO,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS,
- HAL_VCAP_YUV, 108.000, 625, 3456, 528, 3408, 44, 620, 0, 5,
- 0, 256, 0, 0, 0, 0, 0, 0
- },
- {
- HAL_VCAP_YUV_2880_576p_50_RH, HAL_VCAP_MODE_PRO,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS,
- HAL_VCAP_YUV, 108.000, 25, 3456, 528, 3408, 6, 23, 0, 5, 0,
- 256, 0, 0, 0, 0, 0, 0
- },
- /* 576i */
- {
- HAL_VCAP_YUV_576i_50_FL, HAL_VCAP_MODE_INT, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 27.0,
- 625, 1728, 264, 1704, 22, 310, 0, 3, 0, 126, 335, 623, 864,
- 864, 313, 316
- },
- {
- HAL_VCAP_YUV_576i_50_RW, HAL_VCAP_MODE_INT, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 27.0,
- 625, 172, 26, 170, 22, 310, 0, 3, 0, 13, 335, 623, 86,
- 86, 313, 316
- },
- {
- HAL_VCAP_YUV_576i_50_RH, HAL_VCAP_MODE_INT, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_NEG, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 27.0,
- 29, 1728, 264, 1704, 3, 13, 0, 1, 0, 126, 16, 26, 864, 864,
- 14, 15
- },
- /* XGA 1024x768 */
- {
- HAL_VCAP_YUV_XGA_FL, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 63.5,
- 798, 1328, 256, 1280, 27, 795, 0, 4, 0, 104, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_XGA_RH, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 63.5,
- 12, 1328, 256, 1280, 6, 10, 0, 4, 0, 104, 0, 0, 0, 0,
- 0, 0
- },
- {
- HAL_VCAP_YUV_XGA_RB, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 63.5,
- 12, 1216, 112, 1136, 6, 10, 0, 4, 0, 32, 0, 0, 0, 0,
- 0, 0
- },
- /* SXGA 1280x1024 */
- {
- HAL_VCAP_YUV_SXGA_FL, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 109.0,
- 1063, 1712, 352, 1632, 36, 1060, 0, 7, 0, 136, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_RGB_SXGA_FL, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_RGB, 109.0,
- 1063, 1712, 352, 1632, 36, 1060, 0, 7, 0, 136, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_SXGA_RH, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 109.0,
- 17, 1712, 352, 1632, 8, 15, 0, 7, 0, 136, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_SXGA_RB, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 109.0,
- 17, 1440, 112, 1392, 8, 15, 0, 7, 0, 32, 0, 0, 0, 0,
- 0, 0
- },
- /* UXGA 1600x1200 */
- {
- HAL_VCAP_YUV_UXGA_FL, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 161.0,
- 1245, 2160, 448, 2048, 42, 1242, 0, 4, 0, 168, 0,
- 0, 0, 0, 0, 0
- },
- {
- HAL_VCAP_RGB_UXGA_FL, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_RGB, 161.0,
- 1245, 2160, 448, 2048, 42, 1242, 0, 4, 0, 168, 0,
- 0, 0, 0, 0, 0
- },
- {
- HAL_VCAP_YUV_UXGA_RH, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 161.0,
- 12, 2160, 448, 2048, 6, 10, 0, 4, 0, 168, 0, 0, 0,
- 0, 0, 0
- },
- {
- HAL_VCAP_YUV_UXGA_RB, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_NEG,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_POS, HAL_VCAP_YUV, 161.0,
- 12, 1808, 112, 1712, 6, 10, 0, 4, 0, 32, 0, 0, 0, 0,
- 0, 0
- },
- /* test odd height */
- {
- HAL_VCAP_ODD_HEIGHT, HAL_VCAP_MODE_INT, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_NEG, HAL_VCAP_YUV, 148.5,
- 65, 1728, 264, 1704, 5, 20, 0, 3, 0, 126, 25, 40, 864,
- 864, 21, 24
- },
- /* test odd width RGB only */
- {
- HAL_VCAP_ODD_WIDTH, HAL_VCAP_MODE_PRO, HAL_VCAP_POLAR_POS,
- HAL_VCAP_POLAR_POS, HAL_VCAP_POLAR_NEG, HAL_VCAP_RGB, 148.5,
- 52, 859, 122, 843, 3, 50, 0, 2, 0, 62, 0, 0, 0, 0, 0, 0
- },
-};
-
void config_buffer(struct vcap_client_data *c_data,
struct vcap_buffer *buf,
void __iomem *y_addr,
@@ -446,6 +51,61 @@
}
}
+static void mov_buf_to_vp(struct work_struct *work)
+{
+ struct vp_work_t *vp_work = container_of(work, struct vp_work_t, work);
+ struct v4l2_buffer p;
+ struct vb2_buffer *vb_vc;
+ struct vcap_buffer *buf_vc;
+ struct vb2_buffer *vb_vp;
+ struct vcap_buffer *buf_vp;
+
+ int rc;
+ p.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ p.memory = V4L2_MEMORY_USERPTR;
+ while (1) {
+ if (!vp_work->cd->streaming)
+ return;
+ rc = vb2_dqbuf(&vp_work->cd->vc_vidq, &p, O_NONBLOCK);
+ if (rc < 0)
+ return;
+
+ vb_vc = vp_work->cd->vc_vidq.bufs[p.index];
+ if (NULL == vb_vc) {
+ dprintk(1, "%s: buffer is NULL\n", __func__);
+ vb2_qbuf(&vp_work->cd->vc_vidq, &p);
+ return;
+ }
+ buf_vc = container_of(vb_vc, struct vcap_buffer, vb);
+
+ vb_vp = vp_work->cd->vp_in_vidq.bufs[p.index];
+ if (NULL == vb_vp) {
+ dprintk(1, "%s: buffer is NULL\n", __func__);
+ vb2_qbuf(&vp_work->cd->vc_vidq, &p);
+ return;
+ }
+ buf_vp = container_of(vb_vp, struct vcap_buffer, vb);
+ buf_vp->ion_handle = buf_vc->ion_handle;
+ buf_vp->paddr = buf_vc->paddr;
+ buf_vc->ion_handle = NULL;
+ buf_vc->paddr = 0;
+
+ p.type = V4L2_BUF_TYPE_INTERLACED_IN_DECODER;
+
+ /* This call should not fail */
+ rc = vb2_qbuf(&vp_work->cd->vp_in_vidq, &p);
+ if (rc < 0) {
+ pr_err("%s: qbuf to vp_in failed\n", __func__);
+ buf_vc->ion_handle = buf_vp->ion_handle;
+ buf_vc->paddr = buf_vp->paddr;
+ buf_vp->ion_handle = NULL;
+ buf_vp->paddr = 0;
+ p.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vb2_qbuf(&vp_work->cd->vc_vidq, &p);
+ }
+ }
+}
+
irqreturn_t vc_handler(struct vcap_dev *dev)
{
uint32_t irq, timestamp;
@@ -454,6 +114,7 @@
struct vb2_buffer *vb = NULL;
struct vcap_client_data *c_data;
+
irq = readl_relaxed(VCAP_VC_INT_STATUS);
dprintk(1, "%s: irq=0x%08x\n", __func__, irq);
@@ -545,6 +206,10 @@
dev->vc_client->vid_vc_action.buf_ind = VC_BUF1;
irq = VC_BUF2;
}
+
+ if (c_data->op_mode == VC_AND_VP_VCAP_OP)
+ queue_work(dev->vcap_wq, &dev->vc_to_vp_work.work);
+
writel_relaxed(irq, VCAP_VC_INT_CLEAR);
return IRQ_HANDLED;
@@ -604,11 +269,11 @@
VCAP_VC_C_ADDR_2);
rc = readl_relaxed(VCAP_VC_CTRL);
- writel_relaxed(rc | 0x1, VCAP_VC_CTRL);
+ writel_iowmb(rc | 0x1, VCAP_VC_CTRL);
writel_relaxed(0x6, VCAP_VC_INT_MASK);
- enable_irq(dev->vcapirq->start);
+ enable_irq(dev->vcirq->start);
return 0;
}
@@ -618,9 +283,12 @@
int rc;
rc = readl_relaxed(VCAP_VC_CTRL);
- writel_relaxed(rc & ~(0x1), VCAP_VC_CTRL);
+ writel_iowmb(rc & ~(0x1), VCAP_VC_CTRL);
- disable_irq(c_data->dev->vcapirq->start);
+ if (atomic_read(&dev->vc_enabled) == 1)
+ disable_irq(dev->vcirq->start);
+
+ flush_workqueue(dev->vcap_wq);
}
int config_vc_format(struct vcap_client_data *c_data)
@@ -646,14 +314,16 @@
}
writel_relaxed(0x00000000, VCAP_SW_RESET_REQ);
- writel_relaxed(0x00000102, VCAP_VC_NPL_CTRL);
+ writel_iowmb(0x00000102, VCAP_VC_NPL_CTRL);
rc = readl_relaxed(VCAP_VC_NPL_CTRL);
rc = readl_relaxed(VCAP_VC_NPL_CTRL);
- writel_relaxed(0x00000002, VCAP_VC_NPL_CTRL);
+ writel_iowmb(0x00000002, VCAP_VC_NPL_CTRL);
dprintk(2, "%s: Starting VC configuration\n", __func__);
- writel_relaxed(0x00000002, VCAP_VC_NPL_CTRL);
- writel_relaxed(0x00000004 | vc_format->color_space << 1, VCAP_VC_CTRL);
+ writel_iowmb(0x00000002, VCAP_VC_NPL_CTRL);
+ writel_iowmb(0x00000004 | vc_format->color_space << 1 |
+ vc_format->mode << 3 |
+ vc_format->mode << 10, VCAP_VC_CTRL);
writel_relaxed(vc_format->h_polar << 4 |
vc_format->v_polar << 0, VCAP_VC_POLARITY);
@@ -677,7 +347,7 @@
vc_format->hsync_start), VCAP_VC_HSYNC_HPOS);
writel_relaxed(((vc_format->f2_vsync_h_end << 16) |
vc_format->f2_vsync_h_start), VCAP_VC_VSYNC_F2_HPOS);
- writel_relaxed(0x000033FF, VCAP_VC_BUF_CTRL);
+ writel_iowmb(0x000033FF, VCAP_VC_BUF_CTRL);
rc = vc_format->hactive_end - vc_format->hactive_start;
if (vc_format->color_space)
@@ -694,6 +364,7 @@
writel_relaxed(0x2f6ad272, VCAP_VC_IN_CTRL4);
writel_relaxed(0x00006b38, VCAP_VC_IN_CTRL5);
+ writel_iowmb(0x00000001 , VCAP_OFFSET(0x0d00));
dprintk(2, "%s: Done VC configuration\n", __func__);
return 0;
@@ -706,6 +377,7 @@
dprintk(1, "Hardware version: %08x\n", result);
if (result != VCAP_HARDWARE_VERSION)
return -ENODEV;
+ INIT_WORK(&dev->vc_to_vp_work.work, mov_buf_to_vp);
return 0;
}
diff --git a/drivers/media/video/vcap_vc.h b/drivers/media/video/vcap_vc.h
index e431038..57d13cd 100644
--- a/drivers/media/video/vcap_vc.h
+++ b/drivers/media/video/vcap_vc.h
@@ -13,11 +13,9 @@
#ifndef VCAP_VC_H
#define VCAP_VC_H
-/* #define NEW_S_FMT */
#include <linux/interrupt.h>
#include <media/vcap_v4l2.h>
-extern struct v4l2_format_vc_ext vcap_vc_lut[];
#define VCAP_HARDWARE_VERSION 0x10000000
diff --git a/drivers/media/video/vcap_vp.c b/drivers/media/video/vcap_vp.c
new file mode 100644
index 0000000..f8dfdc1
--- /dev/null
+++ b/drivers/media/video/vcap_vp.c
@@ -0,0 +1,606 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+#include <mach/camera.h>
+#include <linux/io.h>
+#include <mach/clk.h>
+#include <linux/clk.h>
+
+#include <media/vcap_v4l2.h>
+#include <media/vcap_fmt.h>
+#include "vcap_vp.h"
+
+static unsigned debug;
+
+#define dprintk(level, fmt, arg...) \
+ do { \
+ if (debug >= level) \
+ printk(KERN_DEBUG "VP: " fmt, ## arg); \
+ } while (0)
+
+void config_nr_buffer(struct vcap_client_data *c_data,
+ struct vcap_buffer *buf)
+{
+ struct vcap_dev *dev = c_data->dev;
+ int size = c_data->vp_in_fmt.height * c_data->vp_in_fmt.width;
+
+ writel_relaxed(buf->paddr, VCAP_VP_NR_T2_Y_BASE_ADDR);
+ writel_relaxed(buf->paddr + size, VCAP_VP_NR_T2_C_BASE_ADDR);
+}
+
+void config_in_buffer(struct vcap_client_data *c_data,
+ struct vcap_buffer *buf)
+{
+ struct vcap_dev *dev = c_data->dev;
+ int size = c_data->vp_in_fmt.height * c_data->vp_in_fmt.width;
+
+ writel_relaxed(buf->paddr, VCAP_VP_T2_Y_BASE_ADDR);
+ writel_relaxed(buf->paddr + size, VCAP_VP_T2_C_BASE_ADDR);
+}
+
+void config_out_buffer(struct vcap_client_data *c_data,
+ struct vcap_buffer *buf)
+{
+ struct vcap_dev *dev = c_data->dev;
+ int size;
+ size = c_data->vp_out_fmt.height * c_data->vp_out_fmt.width;
+ writel_relaxed(buf->paddr, VCAP_VP_OUT_Y_BASE_ADDR);
+ writel_relaxed(buf->paddr + size, VCAP_VP_OUT_C_BASE_ADDR);
+}
+
+int vp_setup_buffers(struct vcap_client_data *c_data)
+{
+ struct vp_action *vp_act;
+ struct vcap_dev *dev;
+ unsigned long flags = 0;
+
+ if (!c_data->streaming)
+ return -ENOEXEC;
+ dev = c_data->dev;
+ dprintk(2, "Start setup buffers\n");
+
+ /* No need to verify vp_client is not NULL caller does so */
+ vp_act = &dev->vp_client->vid_vp_action;
+
+ spin_lock_irqsave(&dev->vp_client->cap_slock, flags);
+ if (list_empty(&vp_act->in_active)) {
+ spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
+ dprintk(1, "%s: VP We have no more input buffers\n",
+ __func__);
+ return -EAGAIN;
+ }
+
+ if (list_empty(&vp_act->out_active)) {
+ spin_unlock_irqrestore(&dev->vp_client->cap_slock,
+ flags);
+ dprintk(1, "%s: VP We have no more output buffers\n",
+ __func__);
+ return -EAGAIN;
+ }
+
+ vp_act->bufT2 = list_entry(vp_act->in_active.next,
+ struct vcap_buffer, list);
+ list_del(&vp_act->bufT2->list);
+
+ vp_act->bufOut = list_entry(vp_act->out_active.next,
+ struct vcap_buffer, list);
+ list_del(&vp_act->bufOut->list);
+ spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
+
+ config_in_buffer(c_data, vp_act->bufT2);
+ config_out_buffer(c_data, vp_act->bufOut);
+ return 0;
+}
+
+static void mov_buf_to_vc(struct work_struct *work)
+{
+ struct vp_work_t *vp_work = container_of(work, struct vp_work_t, work);
+ struct v4l2_buffer p;
+ struct vb2_buffer *vb_vc;
+ struct vcap_buffer *buf_vc;
+ struct vb2_buffer *vb_vp;
+ struct vcap_buffer *buf_vp;
+ int rc;
+
+ p.type = V4L2_BUF_TYPE_INTERLACED_IN_DECODER;
+ p.memory = V4L2_MEMORY_USERPTR;
+
+ /* This loop exits when there is no more buffers left */
+ while (1) {
+ if (!vp_work->cd->streaming)
+ return;
+ rc = vb2_dqbuf(&vp_work->cd->vp_in_vidq, &p, O_NONBLOCK);
+ if (rc < 0)
+ return;
+
+ vb_vc = vp_work->cd->vc_vidq.bufs[p.index];
+ if (NULL == vb_vc) {
+ dprintk(1, "%s: buffer is NULL\n", __func__);
+ vb2_qbuf(&vp_work->cd->vp_in_vidq, &p);
+ return;
+ }
+ buf_vc = container_of(vb_vc, struct vcap_buffer, vb);
+
+ vb_vp = vp_work->cd->vp_in_vidq.bufs[p.index];
+ if (NULL == vb_vp) {
+ dprintk(1, "%s: buffer is NULL\n", __func__);
+ vb2_qbuf(&vp_work->cd->vp_in_vidq, &p);
+ return;
+ }
+ buf_vp = container_of(vb_vp, struct vcap_buffer, vb);
+ buf_vc->ion_handle = buf_vp->ion_handle;
+ buf_vc->paddr = buf_vp->paddr;
+ buf_vp->ion_handle = NULL;
+ buf_vp->paddr = 0;
+
+ p.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ /* This call should not fail */
+ rc = vb2_qbuf(&vp_work->cd->vc_vidq, &p);
+ if (rc < 0) {
+ dprintk(1, "%s: qbuf to vc failed\n", __func__);
+ buf_vp->ion_handle = buf_vc->ion_handle;
+ buf_vp->paddr = buf_vc->paddr;
+ buf_vc->ion_handle = NULL;
+ buf_vc->paddr = 0;
+ p.type = V4L2_BUF_TYPE_INTERLACED_IN_DECODER;
+ vb2_qbuf(&vp_work->cd->vp_in_vidq, &p);
+ }
+ }
+}
+
+static void vp_wq_fnc(struct work_struct *work)
+{
+ struct vp_work_t *vp_work = container_of(work, struct vp_work_t, work);
+ struct vcap_dev *dev;
+ struct vp_action *vp_act;
+ uint32_t irq;
+ int rc;
+#ifndef TOP_FIELD_FIX
+ bool top_field;
+#endif
+
+ if (vp_work && vp_work->cd && vp_work->cd->dev)
+ dev = vp_work->cd->dev;
+ else
+ return;
+
+ vp_act = &dev->vp_client->vid_vp_action;
+ irq = vp_work->irq;
+
+ rc = readl_relaxed(VCAP_OFFSET(0x048));
+ while (!(rc & 0x00000100))
+ rc = readl_relaxed(VCAP_OFFSET(0x048));
+
+ writel_relaxed(0x00000000, VCAP_VP_BAL_VMOTION_STATE);
+ writel_relaxed(0x40000000, VCAP_VP_REDUCT_AVG_MOTION2);
+
+ /* Queue the done buffers */
+ if (vp_act->vp_state == VP_NORMAL &&
+ vp_act->bufNR.nr_pos != TM1_BUF) {
+ vb2_buffer_done(&vp_act->bufTm1->vb, VB2_BUF_STATE_DONE);
+ if (vp_work->cd->op_mode == VC_AND_VP_VCAP_OP)
+ queue_work(dev->vcap_wq, &dev->vp_to_vc_work.work);
+ }
+
+ vb2_buffer_done(&vp_act->bufOut->vb, VB2_BUF_STATE_DONE);
+
+ /* Cycle to next state */
+ if (vp_act->vp_state != VP_NORMAL)
+ vp_act->vp_state++;
+#ifdef TOP_FIELD_FIX
+ vp_act->top_field = !vp_act->top_field;
+#endif
+
+ /* Cycle Buffers*/
+ if (vp_work->cd->vid_vp_action.nr_enabled) {
+ if (vp_act->bufNR.nr_pos == TM1_BUF)
+ vp_act->bufNR.nr_pos = BUF_NOT_IN_USE;
+
+ if (vp_act->bufNR.nr_pos != BUF_NOT_IN_USE)
+ vp_act->bufNR.nr_pos++;
+
+ vp_act->bufTm1 = vp_act->bufT0;
+ vp_act->bufT0 = vp_act->bufT1;
+ vp_act->bufT1 = vp_act->bufNRT2;
+ vp_act->bufNRT2 = vp_act->bufT2;
+ config_nr_buffer(vp_work->cd, vp_act->bufNRT2);
+ } else {
+ vp_act->bufTm1 = vp_act->bufT0;
+ vp_act->bufT0 = vp_act->bufT1;
+ vp_act->bufT1 = vp_act->bufT2;
+ }
+
+ rc = vp_setup_buffers(vp_work->cd);
+ if (rc < 0) {
+ /* setup_buf failed because we are waiting for buffers */
+ writel_relaxed(0x00000000, VCAP_VP_INTERRUPT_ENABLE);
+ writel_iowmb(irq, VCAP_VP_INT_CLEAR);
+ atomic_set(&dev->vp_enabled, 0);
+ return;
+ }
+
+ /* Config VP */
+#ifndef TOP_FIELD_FIX
+ if (vp_act->bufT2->vb.v4l2_buf.field == V4L2_FIELD_TOP)
+ top_field = 1;
+#endif
+
+#ifdef TOP_FIELD_FIX
+ writel_iowmb(0x00000000 | vp_act->top_field << 0, VCAP_VP_CTRL);
+ writel_iowmb(0x00030000 | vp_act->top_field << 0, VCAP_VP_CTRL);
+#else
+ writel_iowmb(0x00000000 | top_field, VCAP_VP_CTRL);
+ writel_iowmb(0x00030000 | top_field, VCAP_VP_CTRL);
+#endif
+ enable_irq(dev->vpirq->start);
+ writel_iowmb(irq, VCAP_VP_INT_CLEAR);
+}
+
+irqreturn_t vp_handler(struct vcap_dev *dev)
+{
+ struct vcap_client_data *c_data;
+ struct vp_action *vp_act;
+ uint32_t irq;
+ int rc;
+
+ irq = readl_relaxed(VCAP_VP_INT_STATUS);
+
+ dprintk(1, "%s: irq=0x%08x\n", __func__, irq);
+ if (!irq & VP_PIC_DONE) {
+ writel_relaxed(irq, VCAP_VP_INT_CLEAR);
+ pr_err("VP IRQ shows some error\n");
+ return IRQ_HANDLED;
+ }
+
+ if (dev->vp_client == NULL) {
+ writel_relaxed(irq, VCAP_VP_INT_CLEAR);
+ pr_err("VC: There is no active vp client\n");
+ return IRQ_HANDLED;
+ }
+
+ vp_act = &dev->vp_client->vid_vp_action;
+ c_data = dev->vp_client;
+
+ if (vp_act->vp_state == VP_UNKNOWN) {
+ writel_relaxed(irq, VCAP_VP_INT_CLEAR);
+ pr_err("%s: VP is in an unknown state\n",
+ __func__);
+ return -EAGAIN;
+ }
+
+ INIT_WORK(&dev->vp_work.work, vp_wq_fnc);
+ dev->vp_work.cd = c_data;
+ dev->vp_work.irq = irq;
+ rc = queue_work(dev->vcap_wq, &dev->vp_work.work);
+
+ disable_irq_nosync(dev->vpirq->start);
+ return IRQ_HANDLED;
+}
+
+void vp_stop_capture(struct vcap_client_data *c_data)
+{
+ struct vcap_dev *dev = c_data->dev;
+
+ writel_iowmb(0x00000000, VCAP_VP_CTRL);
+ flush_workqueue(dev->vcap_wq);
+
+ if (atomic_read(&dev->vp_enabled) == 1)
+ disable_irq(dev->vpirq->start);
+
+ writel_iowmb(0x00000001, VCAP_VP_SW_RESET);
+ writel_iowmb(0x00000000, VCAP_VP_SW_RESET);
+}
+
+int config_vp_format(struct vcap_client_data *c_data)
+{
+ struct vcap_dev *dev = c_data->dev;
+
+ INIT_WORK(&dev->vp_to_vc_work.work, mov_buf_to_vc);
+ dev->vp_to_vc_work.cd = c_data;
+
+ /* SW restart VP */
+ writel_iowmb(0x00000001, VCAP_VP_SW_RESET);
+ writel_iowmb(0x00000000, VCAP_VP_SW_RESET);
+
+ /* Film Mode related settings */
+ writel_iowmb(0x00000000, VCAP_VP_FILM_PROJECTION_T0);
+ writel_relaxed(0x00000000, VCAP_VP_FILM_PROJECTION_T2);
+ writel_relaxed(0x00000000, VCAP_VP_FILM_PAST_MAX_PROJ);
+ writel_relaxed(0x00000000, VCAP_VP_FILM_PAST_MIN_PROJ);
+ writel_relaxed(0x00000000, VCAP_VP_FILM_SEQUENCE_HIST);
+ writel_relaxed(0x00000000, VCAP_VP_FILM_MODE_STATE);
+
+ writel_relaxed(0x00000000, VCAP_VP_BAL_VMOTION_STATE);
+ writel_relaxed(0x00000010, VCAP_VP_REDUCT_AVG_MOTION);
+ writel_relaxed(0x40000000, VCAP_VP_REDUCT_AVG_MOTION2);
+ writel_relaxed(0x40000000, VCAP_VP_NR_AVG_LUMA);
+ writel_relaxed(0x40000000, VCAP_VP_NR_AVG_CHROMA);
+ writel_relaxed(0x40000000, VCAP_VP_NR_CTRL_LUMA);
+ writel_relaxed(0x40000000, VCAP_VP_NR_CTRL_CHROMA);
+ writel_relaxed(0x00000000, VCAP_VP_BAL_AVG_BLEND);
+ writel_relaxed(0x00000000, VCAP_VP_VMOTION_HIST);
+ writel_relaxed(0x05047D19, VCAP_VP_FILM_ANALYSIS_CONFIG);
+ writel_relaxed(0x20260200, VCAP_VP_FILM_STATE_CONFIG);
+ writel_relaxed(0x23A60114, VCAP_VP_FVM_CONFIG);
+ writel_relaxed(0x03043210, VCAP_VP_FILM_ANALYSIS_CONFIG2);
+ writel_relaxed(0x04DB7A51, VCAP_VP_MIXED_ANALYSIS_CONFIG);
+ writel_relaxed(0x14224916, VCAP_VP_SPATIAL_CONFIG);
+ writel_relaxed(0x83270400, VCAP_VP_SPATIAL_CONFIG2);
+ writel_relaxed(0x0F000F92, VCAP_VP_SPATIAL_CONFIG3);
+ writel_relaxed(0x00000000, VCAP_VP_TEMPORAL_CONFIG);
+ writel_relaxed(0x00000000, VCAP_VP_PIXEL_DIFF_CONFIG);
+ writel_relaxed(0x0C090511, VCAP_VP_H_FREQ_CONFIG);
+ writel_relaxed(0x0A000000, VCAP_VP_NR_CONFIG);
+ writel_relaxed(0x008F4149, VCAP_VP_NR_LUMA_CONFIG);
+ writel_relaxed(0x008F4149, VCAP_VP_NR_CHROMA_CONFIG);
+ writel_relaxed(0x43C0FD0C, VCAP_VP_BAL_CONFIG);
+ writel_relaxed(0x00000255, VCAP_VP_BAL_MOTION_CONFIG);
+ writel_relaxed(0x24154252, VCAP_VP_BAL_LIGHT_COMB);
+ writel_relaxed(0x10024414, VCAP_VP_BAL_VMOTION_CONFIG);
+ writel_relaxed(0x00000002, VCAP_VP_NR_CONFIG2);
+ writel_relaxed((c_data->vp_out_fmt.height-1)<<16 |
+ (c_data->vp_out_fmt.width - 1), VCAP_VP_FRAME_SIZE);
+ writel_relaxed(0x00000000, VCAP_VP_SPLIT_SCRN_CTRL);
+
+ return 0;
+}
+
+int init_motion_buf(struct vcap_client_data *c_data)
+{
+ struct vcap_dev *dev = c_data->dev;
+ void *buf;
+ unsigned long motion_base_addr;
+ uint32_t size = ((c_data->vp_out_fmt.width + 63) >> 6) *
+ ((c_data->vp_out_fmt.height + 7) >> 3) * 16;
+
+ if (c_data->vid_vp_action.bufMotion) {
+ pr_err("Motion buffer has already been created");
+ return -ENOEXEC;
+ }
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ c_data->vid_vp_action.bufMotion = buf;
+ motion_base_addr = virt_to_phys(buf);
+ writel_iowmb(motion_base_addr, VCAP_VP_MOTION_EST_ADDR);
+ return 0;
+}
+
+void deinit_motion_buf(struct vcap_client_data *c_data)
+{
+ struct vcap_dev *dev = c_data->dev;
+ void *buf;
+
+ if (!c_data->vid_vp_action.bufMotion) {
+ dprintk(1, "Motion buffer has not been created");
+ return;
+ }
+
+ buf = c_data->vid_vp_action.bufMotion;
+
+ writel_iowmb(0x00000000, VCAP_VP_MOTION_EST_ADDR);
+ c_data->vid_vp_action.bufMotion = NULL;
+ kfree(buf);
+ return;
+}
+
+int init_nr_buf(struct vcap_client_data *c_data)
+{
+ struct vcap_dev *dev = c_data->dev;
+ struct nr_buffer *buf;
+ uint32_t frame_size, tot_size, rc;
+
+ if (c_data->vid_vp_action.bufNR.vaddr) {
+ pr_err("NR buffer has already been created");
+ return -ENOEXEC;
+ }
+ buf = &c_data->vid_vp_action.bufNR;
+
+ frame_size = c_data->vp_in_fmt.width * c_data->vp_in_fmt.height;
+ if (c_data->vp_in_fmt.pixfmt == V4L2_PIX_FMT_NV16)
+ tot_size = frame_size * 2;
+ else
+ tot_size = frame_size / 2 * 3;
+
+ buf->vaddr = kzalloc(tot_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf->paddr = virt_to_phys(buf->vaddr);
+ rc = readl_relaxed(VCAP_VP_NR_CONFIG2);
+ rc |= 0x02D00001;
+ writel_relaxed(rc, VCAP_VP_NR_CONFIG2);
+ writel_relaxed(buf->paddr, VCAP_VP_NR_T2_Y_BASE_ADDR);
+ writel_relaxed(buf->paddr + frame_size, VCAP_VP_NR_T2_C_BASE_ADDR);
+ buf->nr_pos = NRT2_BUF;
+ return 0;
+}
+
+void deinit_nr_buf(struct vcap_client_data *c_data)
+{
+ struct vcap_dev *dev = c_data->dev;
+ struct nr_buffer *buf;
+ uint32_t rc;
+
+ if (!c_data->vid_vp_action.bufNR.vaddr) {
+ pr_err("NR buffer has not been created");
+ return;
+ }
+
+ buf = &c_data->vid_vp_action.bufNR;
+
+ rc = readl_relaxed(VCAP_VP_NR_CONFIG2);
+ rc &= !(0x02D00001);
+ writel_relaxed(rc, VCAP_VP_NR_CONFIG2);
+
+ kfree(buf->vaddr);
+ buf->paddr = 0;
+ buf->vaddr = NULL;
+ return;
+}
+
+int kickoff_vp(struct vcap_client_data *c_data)
+{
+ struct vcap_dev *dev;
+ struct vp_action *vp_act;
+ unsigned long flags = 0;
+ unsigned int chroma_fmt = 0;
+ int size;
+#ifndef TOP_FIELD_FIX
+ bool top_field;
+#endif
+
+ if (!c_data->streaming)
+ return -ENOEXEC;
+
+ dev = c_data->dev;
+ dprintk(2, "Start Kickoff\n");
+
+ if (dev->vp_client == NULL) {
+ pr_err("No active vp client\n");
+ return -ENODEV;
+ }
+ vp_act = &dev->vp_client->vid_vp_action;
+
+ spin_lock_irqsave(&dev->vp_client->cap_slock, flags);
+ if (list_empty(&vp_act->in_active)) {
+ spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
+ pr_err("%s: VP We have no more input buffers\n",
+ __func__);
+ return -EAGAIN;
+ }
+
+ vp_act->bufT1 = list_entry(vp_act->in_active.next,
+ struct vcap_buffer, list);
+ list_del(&vp_act->bufT1->list);
+
+ if (list_empty(&vp_act->in_active)) {
+ spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
+ list_add(&vp_act->bufT1->list, &vp_act->in_active);
+ pr_err("%s: VP We have no more input buffers\n",
+ __func__);
+ return -EAGAIN;
+ }
+
+ vp_act->bufT2 = list_entry(vp_act->in_active.next,
+ struct vcap_buffer, list);
+ list_del(&vp_act->bufT2->list);
+
+ if (list_empty(&vp_act->out_active)) {
+ spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
+ list_add(&vp_act->bufT2->list, &vp_act->in_active);
+ list_add(&vp_act->bufT1->list, &vp_act->in_active);
+ pr_err("%s: VP We have no more output buffers\n",
+ __func__);
+ return -EAGAIN;
+ }
+
+ vp_act->bufOut = list_entry(vp_act->out_active.next,
+ struct vcap_buffer, list);
+ list_del(&vp_act->bufOut->list);
+ spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
+
+ size = c_data->vp_in_fmt.height * c_data->vp_in_fmt.width;
+ writel_relaxed(vp_act->bufT1->paddr, VCAP_VP_T1_Y_BASE_ADDR);
+ writel_relaxed(vp_act->bufT1->paddr + size, VCAP_VP_T1_C_BASE_ADDR);
+
+ config_in_buffer(c_data, vp_act->bufT2);
+ config_out_buffer(c_data, vp_act->bufOut);
+
+ /* Config VP */
+ if (c_data->vp_in_fmt.pixfmt == V4L2_PIX_FMT_NV16)
+ chroma_fmt = 1;
+ writel_relaxed((c_data->vp_in_fmt.width / 16) << 20 |
+ chroma_fmt << 11 | 0x2 << 4, VCAP_VP_IN_CONFIG);
+
+ chroma_fmt = 0;
+ if (c_data->vp_in_fmt.pixfmt == V4L2_PIX_FMT_NV16)
+ chroma_fmt = 1;
+
+ writel_relaxed((c_data->vp_in_fmt.width / 16) << 20 |
+ chroma_fmt << 11 | 0x1 << 4, VCAP_VP_OUT_CONFIG);
+
+ /* Enable Interrupt */
+#ifdef TOP_FIELD_FIX
+ vp_act->top_field = 1;
+#else
+ if (vp_act->bufT2->vb.v4l2_buf.field == V4L2_FIELD_TOP)
+ top_field = 1;
+#endif
+ vp_act->vp_state = VP_FRAME2;
+ writel_relaxed(0x01100101, VCAP_VP_INTERRUPT_ENABLE);
+#ifdef TOP_FIELD_FIX
+ writel_iowmb(0x00000000 | vp_act->top_field << 0, VCAP_VP_CTRL);
+ writel_iowmb(0x00030000 | vp_act->top_field << 0, VCAP_VP_CTRL);
+#else
+ writel_iowmb(0x00000000 | top_field, VCAP_VP_CTRL);
+ writel_iowmb(0x00030000 | top_field, VCAP_VP_CTRL);
+#endif
+ atomic_set(&c_data->dev->vp_enabled, 1);
+ enable_irq(dev->vpirq->start);
+ return 0;
+}
+
+int continue_vp(struct vcap_client_data *c_data)
+{
+ struct vcap_dev *dev;
+ struct vp_action *vp_act;
+ int rc;
+#ifndef TOP_FIELD_FIX
+ bool top_field;
+#endif
+
+ dprintk(2, "Start Continue\n");
+ dev = c_data->dev;
+
+ if (dev->vp_client == NULL) {
+ pr_err("No active vp client\n");
+ return -ENODEV;
+ }
+ vp_act = &dev->vp_client->vid_vp_action;
+
+ if (vp_act->vp_state == VP_UNKNOWN) {
+ pr_err("%s: VP is in an unknown state\n",
+ __func__);
+ return -EAGAIN;
+ }
+
+ rc = vp_setup_buffers(c_data);
+ if (rc < 0)
+ return rc;
+
+#ifndef TOP_FIELD_FIX
+ if (vp_act->bufT2->vb.v4l2_buf.field == V4L2_FIELD_TOP)
+ top_field = 1;
+#endif
+
+ /* Config VP & Enable Interrupt */
+ writel_relaxed(0x01100101, VCAP_VP_INTERRUPT_ENABLE);
+#ifdef TOP_FIELD_FIX
+ writel_iowmb(0x00000000 | vp_act->top_field << 0, VCAP_VP_CTRL);
+ writel_iowmb(0x00030000 | vp_act->top_field << 0, VCAP_VP_CTRL);
+#else
+ writel_iowmb(0x00000000 | top_field, VCAP_VP_CTRL);
+ writel_iowmb(0x00030000 | top_field, VCAP_VP_CTRL);
+#endif
+
+ atomic_set(&c_data->dev->vp_enabled, 1);
+ enable_irq(dev->vpirq->start);
+ return 0;
+}
diff --git a/drivers/media/video/vcap_vp.h b/drivers/media/video/vcap_vp.h
new file mode 100644
index 0000000..47ad8d4
--- /dev/null
+++ b/drivers/media/video/vcap_vp.h
@@ -0,0 +1,103 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef VCAP_VP_H
+#define VCAP_VP_H
+
+#include <linux/interrupt.h>
+
+#include <media/vcap_v4l2.h>
+
+#define VCAP_BASE (dev->vcapbase)
+#define VCAP_OFFSET(off) (VCAP_BASE + off)
+
+#define VCAP_VP_INT_STATUS (VCAP_BASE + 0x404)
+#define VCAP_VP_INT_CLEAR (VCAP_BASE + 0x40C)
+
+#define VCAP_VP_SW_RESET (VCAP_BASE + 0x410)
+#define VCAP_VP_INTERRUPT_ENABLE (VCAP_BASE + 0x408)
+
+#define VCAP_VP_FILM_PROJECTION_T0 (VCAP_BASE + 0x50C)
+#define VCAP_VP_FILM_PROJECTION_T2 (VCAP_BASE + 0x508)
+#define VCAP_VP_FILM_PAST_MAX_PROJ (VCAP_BASE + 0x510)
+#define VCAP_VP_FILM_PAST_MIN_PROJ (VCAP_BASE + 0x514)
+#define VCAP_VP_FILM_SEQUENCE_HIST (VCAP_BASE + 0x504)
+#define VCAP_VP_FILM_MODE_STATE (VCAP_BASE + 0x500)
+
+#define VCAP_VP_BAL_VMOTION_STATE (VCAP_BASE + 0x690)
+#define VCAP_VP_REDUCT_AVG_MOTION (VCAP_BASE + 0x610)
+#define VCAP_VP_REDUCT_AVG_MOTION2 (VCAP_BASE + 0x614)
+
+#define VCAP_VP_NR_AVG_LUMA (VCAP_BASE + 0x608)
+#define VCAP_VP_NR_AVG_CHROMA (VCAP_BASE + 0x60C)
+#define VCAP_VP_NR_CTRL_LUMA (VCAP_BASE + 0x600)
+#define VCAP_VP_NR_CTRL_CHROMA (VCAP_BASE + 0x604)
+
+#define VCAP_VP_BAL_AVG_BLEND (VCAP_BASE + 0x694)
+#define VCAP_VP_VMOTION_HIST (VCAP_BASE + 0x6F8)
+
+#define VCAP_VP_MOTION_EST_ADDR (VCAP_BASE + 0x4E0)
+#define VCAP_VP_FILM_ANALYSIS_CONFIG (VCAP_BASE + 0x520)
+#define VCAP_VP_FILM_STATE_CONFIG (VCAP_BASE + 0x524)
+
+#define VCAP_VP_FVM_CONFIG (VCAP_BASE + 0x550)
+#define VCAP_VP_FILM_ANALYSIS_CONFIG2 (VCAP_BASE + 0x52C)
+#define VCAP_VP_MIXED_ANALYSIS_CONFIG (VCAP_BASE + 0x530)
+
+#define VCAP_VP_SPATIAL_CONFIG (VCAP_BASE + 0x580)
+#define VCAP_VP_SPATIAL_CONFIG2 (VCAP_BASE + 0x584)
+#define VCAP_VP_SPATIAL_CONFIG3 (VCAP_BASE + 0x588)
+#define VCAP_VP_TEMPORAL_CONFIG (VCAP_BASE + 0x5C0)
+
+#define VCAP_VP_PIXEL_DIFF_CONFIG (VCAP_BASE + 0x6FC)
+#define VCAP_VP_H_FREQ_CONFIG (VCAP_BASE + 0x528)
+#define VCAP_VP_NR_CONFIG (VCAP_BASE + 0x620)
+#define VCAP_VP_NR_LUMA_CONFIG (VCAP_BASE + 0x624)
+#define VCAP_VP_NR_CHROMA_CONFIG (VCAP_BASE + 0x628)
+#define VCAP_VP_BAL_CONFIG (VCAP_BASE + 0x680)
+#define VCAP_VP_BAL_MOTION_CONFIG (VCAP_BASE + 0x684)
+#define VCAP_VP_BAL_LIGHT_COMB (VCAP_BASE + 0x688)
+#define VCAP_VP_BAL_VMOTION_CONFIG (VCAP_BASE + 0x68C)
+
+#define VCAP_VP_NR_CONFIG2 (VCAP_BASE + 0x484)
+#define VCAP_VP_FRAME_SIZE (VCAP_BASE + 0x48C)
+#define VCAP_VP_SPLIT_SCRN_CTRL (VCAP_BASE + 0x750)
+
+#define VCAP_VP_IN_CONFIG (VCAP_BASE + 0x480)
+#define VCAP_VP_OUT_CONFIG (VCAP_BASE + 0x488)
+
+#define VCAP_VP_T2_Y_BASE_ADDR (VCAP_BASE + 0x4C0)
+#define VCAP_VP_T2_C_BASE_ADDR (VCAP_BASE + 0x4C4)
+#define VCAP_VP_OUT_Y_BASE_ADDR (VCAP_BASE + 0x4CC)
+#define VCAP_VP_OUT_C_BASE_ADDR (VCAP_BASE + 0x4D0)
+#define VCAP_VP_OUT_CR_BASE_ADDR (VCAP_BASE + 0x4D4)
+
+#define VCAP_VP_CTRL (VCAP_BASE + 0x4D8)
+
+#define VCAP_VP_T1_Y_BASE_ADDR (VCAP_BASE + 0x4A8)
+#define VCAP_VP_T1_C_BASE_ADDR (VCAP_BASE + 0x4Ac)
+#define VCAP_VP_NR_T2_Y_BASE_ADDR (VCAP_BASE + 0x4B4)
+#define VCAP_VP_NR_T2_C_BASE_ADDR (VCAP_BASE + 0x4B8)
+
+#define VP_PIC_DONE (0x1 << 0)
+
+irqreturn_t vp_handler(struct vcap_dev *dev);
+int config_vp_format(struct vcap_client_data *c_data);
+void vp_stop_capture(struct vcap_client_data *c_data);
+int init_motion_buf(struct vcap_client_data *c_data);
+void deinit_motion_buf(struct vcap_client_data *c_data);
+int init_nr_buf(struct vcap_client_data *c_data);
+void deinit_nr_buf(struct vcap_client_data *c_data);
+int kickoff_vp(struct vcap_client_data *c_data);
+int continue_vp(struct vcap_client_data *c_data);
+
+#endif
diff --git a/drivers/mfd/wcd9xxx-core.c b/drivers/mfd/wcd9xxx-core.c
index 2a25089..e2dff4b 100644
--- a/drivers/mfd/wcd9xxx-core.c
+++ b/drivers/mfd/wcd9xxx-core.c
@@ -41,7 +41,7 @@
};
struct wcd9xxx_i2c wcd9xxx_modules[MAX_WCD9XXX_DEVICE];
-static int wcd9xxx_intf;
+static int wcd9xxx_intf = -1;
static int wcd9xxx_read(struct wcd9xxx *wcd9xxx, unsigned short reg,
int bytes, void *dest, bool interface_reg)
@@ -331,17 +331,35 @@
pr_info("idbyte_0[%08x] idbyte_1[%08x] idbyte_2[%08x] idbyte_3[%08x]\n",
idbyte_0, idbyte_1, idbyte_2, idbyte_3);
- if (!strncmp(wcd9xxx->slim->name, "tabla", 5)) {
+ if (wcd9xxx->slim != NULL) {
+ if (!strncmp(wcd9xxx->slim->name, "tabla", 5)) {
+ if (TABLA_IS_1_X(wcd9xxx->version)) {
+ wcd9xxx_dev = tabla1x_devs;
+ wcd9xxx_dev_size = ARRAY_SIZE(tabla1x_devs);
+ } else {
+ wcd9xxx_dev = tabla_devs;
+ wcd9xxx_dev_size = ARRAY_SIZE(tabla_devs);
+ }
+ } else {
+ wcd9xxx_dev = sitar_devs;
+ wcd9xxx_dev_size = ARRAY_SIZE(sitar_devs);
+ }
+ } else {
+ /* Need to add here check for Tabla.
+ * For now the read of version takes
+ * care of now only tabla.
+ */
+ pr_debug("%s : Read codec version using I2C\n", __func__);
if (TABLA_IS_1_X(wcd9xxx->version)) {
wcd9xxx_dev = tabla1x_devs;
wcd9xxx_dev_size = ARRAY_SIZE(tabla1x_devs);
- } else {
+ } else if (TABLA_IS_2_0(wcd9xxx->version)) {
wcd9xxx_dev = tabla_devs;
wcd9xxx_dev_size = ARRAY_SIZE(tabla_devs);
+ } else {
+ wcd9xxx_dev = sitar_devs;
+ wcd9xxx_dev_size = ARRAY_SIZE(sitar_devs);
}
- } else {
- wcd9xxx_dev = sitar_devs;
- wcd9xxx_dev_size = ARRAY_SIZE(sitar_devs);
}
ret = mfd_add_devices(wcd9xxx->dev, -1,
@@ -372,7 +390,8 @@
wake_lock_destroy(&wcd9xxx->wlock);
mutex_destroy(&wcd9xxx->io_lock);
mutex_destroy(&wcd9xxx->xfer_lock);
- slim_remove_device(wcd9xxx->slim_slave);
+ if (wcd9xxx_intf == WCD9XXX_INTERFACE_TYPE_SLIMBUS)
+ slim_remove_device(wcd9xxx->slim_slave);
kfree(wcd9xxx);
}
@@ -478,12 +497,11 @@
};
#endif
-static int wcd9xxx_enable_supplies(struct wcd9xxx *wcd9xxx)
+static int wcd9xxx_enable_supplies(struct wcd9xxx *wcd9xxx,
+ struct wcd9xxx_pdata *pdata)
{
int ret;
int i;
- struct wcd9xxx_pdata *pdata = wcd9xxx->slim->dev.platform_data;
-
wcd9xxx->supplies = kzalloc(sizeof(struct regulator_bulk_data) *
ARRAY_SIZE(pdata->regulator),
GFP_KERNEL);
@@ -546,10 +564,10 @@
return ret;
}
-static void wcd9xxx_disable_supplies(struct wcd9xxx *wcd9xxx)
+static void wcd9xxx_disable_supplies(struct wcd9xxx *wcd9xxx,
+ struct wcd9xxx_pdata *pdata)
{
int i;
- struct wcd9xxx_pdata *pdata = wcd9xxx->slim->dev.platform_data;
regulator_bulk_disable(ARRAY_SIZE(pdata->regulator),
wcd9xxx->supplies);
@@ -695,6 +713,10 @@
int ret = 0;
static int device_id;
+ if (wcd9xxx_intf == WCD9XXX_INTERFACE_TYPE_SLIMBUS) {
+ pr_info("tabla card is already detected in slimbus mode\n");
+ return -ENODEV;
+ }
if (device_id > 0) {
wcd9xxx_modules[device_id++].client = client;
pr_info("probe for other slaves devices of tabla\n");
@@ -721,8 +743,7 @@
dev_set_drvdata(&client->dev, wcd9xxx);
wcd9xxx->dev = &client->dev;
wcd9xxx->reset_gpio = pdata->reset_gpio;
-
- ret = wcd9xxx_enable_supplies(wcd9xxx);
+ ret = wcd9xxx_enable_supplies(wcd9xxx, pdata);
if (ret) {
pr_err("%s: Fail to enable Codec supplies\n", __func__);
goto err_codec;
@@ -759,7 +780,7 @@
err_device_init:
wcd9xxx_free_reset(wcd9xxx);
err_supplies:
- wcd9xxx_disable_supplies(wcd9xxx);
+ wcd9xxx_disable_supplies(wcd9xxx, pdata);
err_codec:
kfree(wcd9xxx);
fail:
@@ -769,10 +790,10 @@
static int __devexit wcd9xxx_i2c_remove(struct i2c_client *client)
{
struct wcd9xxx *wcd9xxx;
-
+ struct wcd9xxx_pdata *pdata = client->dev.platform_data;
pr_debug("exit\n");
wcd9xxx = dev_get_drvdata(&client->dev);
- wcd9xxx_disable_supplies(wcd9xxx);
+ wcd9xxx_disable_supplies(wcd9xxx, pdata);
wcd9xxx_device_exit(wcd9xxx);
return 0;
}
@@ -809,7 +830,7 @@
wcd9xxx->reset_gpio = pdata->reset_gpio;
wcd9xxx->dev = &slim->dev;
- ret = wcd9xxx_enable_supplies(wcd9xxx);
+ ret = wcd9xxx_enable_supplies(wcd9xxx, pdata);
if (ret)
goto err_codec;
usleep_range(5, 5);
@@ -901,7 +922,7 @@
err_reset:
wcd9xxx_free_reset(wcd9xxx);
err_supplies:
- wcd9xxx_disable_supplies(wcd9xxx);
+ wcd9xxx_disable_supplies(wcd9xxx, pdata);
err_codec:
kfree(wcd9xxx);
err:
@@ -910,6 +931,7 @@
static int wcd9xxx_slim_remove(struct slim_device *pdev)
{
struct wcd9xxx *wcd9xxx;
+ struct wcd9xxx_pdata *pdata = pdev->dev.platform_data;
#ifdef CONFIG_DEBUG_FS
debugfs_remove(debugfs_peek);
@@ -919,7 +941,7 @@
wcd9xxx = slim_get_devicedata(pdev);
wcd9xxx_deinit_slimslave(wcd9xxx);
slim_remove_device(wcd9xxx->slim_slave);
- wcd9xxx_disable_supplies(wcd9xxx);
+ wcd9xxx_disable_supplies(wcd9xxx, pdata);
wcd9xxx_device_exit(wcd9xxx);
return 0;
}
@@ -1032,6 +1054,22 @@
.suspend = wcd9xxx_slim_suspend,
};
+static const struct slim_device_id sitar1p1_slimtest_id[] = {
+ {"sitar1p1-slim", 0},
+ {}
+};
+static struct slim_driver sitar1p1_slim_driver = {
+ .driver = {
+ .name = "sitar1p1-slim",
+ .owner = THIS_MODULE,
+ },
+ .probe = wcd9xxx_slim_probe,
+ .remove = wcd9xxx_slim_remove,
+ .id_table = sitar1p1_slimtest_id,
+ .resume = wcd9xxx_slim_resume,
+ .suspend = wcd9xxx_slim_suspend,
+};
+
static const struct slim_device_id slimtest_id[] = {
{"tabla-slim", 0},
{}
@@ -1094,7 +1132,7 @@
static int __init wcd9xxx_init(void)
{
- int ret1, ret2, ret3, ret4;
+ int ret1, ret2, ret3, ret4, ret5;
ret1 = slim_driver_register(&tabla_slim_driver);
if (ret1 != 0)
@@ -1109,10 +1147,14 @@
pr_err("failed to add the I2C driver\n");
ret4 = slim_driver_register(&sitar_slim_driver);
- if (ret1 != 0)
+ if (ret4 != 0)
pr_err("Failed to register sitar SB driver: %d\n", ret4);
- return (ret1 && ret2 && ret3 && ret4) ? -1 : 0;
+ ret5 = slim_driver_register(&sitar1p1_slim_driver);
+ if (ret5 != 0)
+ pr_err("Failed to register sitar SB driver: %d\n", ret5);
+
+ return (ret1 && ret2 && ret3 && ret4 && ret5) ? -1 : 0;
}
module_init(wcd9xxx_init);
diff --git a/drivers/mfd/wcd9xxx-slimslave.c b/drivers/mfd/wcd9xxx-slimslave.c
index 3aff7f17..889c416 100644
--- a/drivers/mfd/wcd9xxx-slimslave.c
+++ b/drivers/mfd/wcd9xxx-slimslave.c
@@ -461,7 +461,13 @@
pr_debug("%s: ch_cnt[%d]\n", __func__, ch_cnt);
for (i = 0; i < ch_cnt; i++) {
idx = (ch_num[i] - BASE_CH_NUM -
- SB_PGD_OFFSET_OF_RX_SLAVE_DEV_PORTS - 1);
+ SB_PGD_OFFSET_OF_RX_SLAVE_DEV_PORTS - 1);
+ if (idx < 0) {
+ pr_err("%s: Error:-Invalid index found = %d\n",
+ __func__, idx);
+ ret = -EINVAL;
+ goto err;
+ }
sph[i] = rx[idx].sph;
grph = rx[idx].grph;
}
@@ -501,6 +507,12 @@
pr_debug("%s: ch_cnt[%d]\n", __func__, ch_cnt);
for (i = 0; i < ch_cnt; i++) {
idx = (ch_num[i] - BASE_CH_NUM);
+ if (idx < 0) {
+ pr_err("%s: Error:- Invalid index found = %d\n",
+ __func__, idx);
+ ret = -EINVAL;
+ goto err;
+ }
sph[i] = tx[idx].sph;
grph = tx[idx].grph;
}
diff --git a/drivers/misc/isa1200.c b/drivers/misc/isa1200.c
index 31c79a0..555dfdd 100644
--- a/drivers/misc/isa1200.c
+++ b/drivers/misc/isa1200.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2009 Samsung Electronics
* Kyungmin Park <kyungmin.park@samsung.com>
- * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -32,6 +32,7 @@
#define ISA1200_HCTRL5_VIB_STRT 0xD5
#define ISA1200_HCTRL5_VIB_STOP 0x6B
+#define ISA1200_POWER_DOWN_MASK 0x7F
struct isa1200_chip {
struct i2c_client *client;
@@ -45,6 +46,8 @@
unsigned int period_ns;
bool is_len_gpio_valid;
struct regulator **regs;
+ bool clk_on;
+ u8 hctrl0_val;
};
static int isa1200_read_reg(struct i2c_client *client, int reg)
@@ -74,32 +77,111 @@
int rc = 0;
if (enable) {
+ /* if hen and len are seperate then enable hen
+ * otherwise set normal mode bit */
+ if (haptic->is_len_gpio_valid == true)
+ gpio_set_value_cansleep(haptic->pdata->hap_en_gpio, 1);
+ else {
+ rc = isa1200_write_reg(haptic->client, ISA1200_HCTRL0,
+ haptic->hctrl0_val | ~ISA1200_POWER_DOWN_MASK);
+ if (rc < 0) {
+ pr_err("%s: i2c write failure\n", __func__);
+ return;
+ }
+ }
+
if (haptic->pdata->mode_ctrl == PWM_INPUT_MODE) {
int period_us = haptic->period_ns / 1000;
+
rc = pwm_config(haptic->pwm,
(period_us * haptic->pdata->duty) / 100,
period_us);
- if (rc < 0)
+ if (rc < 0) {
pr_err("%s: pwm_config fail\n", __func__);
+ goto chip_dwn;
+ }
+
rc = pwm_enable(haptic->pwm);
- if (rc < 0)
+ if (rc < 0) {
pr_err("%s: pwm_enable fail\n", __func__);
+ goto chip_dwn;
+ }
} else if (haptic->pdata->mode_ctrl == PWM_GEN_MODE) {
+ /* vote for clock */
+ if (haptic->pdata->clk_enable && !haptic->clk_on) {
+ rc = haptic->pdata->clk_enable(true);
+ if (rc < 0) {
+ pr_err("%s: clk enable failed\n",
+ __func__);
+ goto chip_dwn;
+ }
+ haptic->clk_on = true;
+ }
+
rc = isa1200_write_reg(haptic->client,
ISA1200_HCTRL5,
ISA1200_HCTRL5_VIB_STRT);
- if (rc < 0)
+ if (rc < 0) {
pr_err("%s: start vibartion fail\n", __func__);
+ goto dis_clk;
+ }
}
} else {
- if (haptic->pdata->mode_ctrl == PWM_INPUT_MODE)
+ /* if hen and len are seperate then pull down hen
+ * otherwise set power down bit */
+ if (haptic->is_len_gpio_valid == true)
+ gpio_set_value_cansleep(haptic->pdata->hap_en_gpio, 0);
+ else {
+ rc = isa1200_write_reg(haptic->client, ISA1200_HCTRL0,
+ haptic->hctrl0_val & ISA1200_POWER_DOWN_MASK);
+ if (rc < 0) {
+ pr_err("%s: i2c write failure\n", __func__);
+ return;
+ }
+ }
+
+ if (haptic->pdata->mode_ctrl == PWM_INPUT_MODE) {
pwm_disable(haptic->pwm);
- else if (haptic->pdata->mode_ctrl == PWM_GEN_MODE) {
+ } else if (haptic->pdata->mode_ctrl == PWM_GEN_MODE) {
rc = isa1200_write_reg(haptic->client,
ISA1200_HCTRL5,
ISA1200_HCTRL5_VIB_STOP);
if (rc < 0)
pr_err("%s: stop vibartion fail\n", __func__);
+
+ /* de-vote clock */
+ if (haptic->pdata->clk_enable && haptic->clk_on) {
+ rc = haptic->pdata->clk_enable(false);
+ if (rc < 0) {
+ pr_err("%s: clk disable failed\n",
+ __func__);
+ return;
+ }
+ haptic->clk_on = false;
+ }
+ }
+ }
+
+ return;
+
+dis_clk:
+ if (haptic->pdata->clk_enable && haptic->clk_on) {
+ rc = haptic->pdata->clk_enable(false);
+ if (rc < 0) {
+ pr_err("%s: clk disable failed\n", __func__);
+ return;
+ }
+ haptic->clk_on = false;
+ }
+chip_dwn:
+ if (haptic->is_len_gpio_valid == true)
+ gpio_set_value_cansleep(haptic->pdata->hap_en_gpio, 0);
+ else {
+ rc = isa1200_write_reg(haptic->client, ISA1200_HCTRL0,
+ haptic->hctrl0_val & ISA1200_POWER_DOWN_MASK);
+ if (rc < 0) {
+ pr_err("%s: i2c write failure\n", __func__);
+ return;
}
}
}
@@ -168,7 +250,8 @@
static int isa1200_setup(struct i2c_client *client)
{
struct isa1200_chip *haptic = i2c_get_clientdata(client);
- int value, temp, rc;
+ int temp, rc;
+ u8 value;
gpio_set_value_cansleep(haptic->pdata->hap_en_gpio, 0);
if (haptic->is_len_gpio_valid == true)
@@ -218,6 +301,20 @@
goto reset_hctrl1;
}
+ /* if hen and len are seperate then pull down hen
+ * otherwise set power down bit */
+ if (haptic->is_len_gpio_valid == true)
+ gpio_set_value_cansleep(haptic->pdata->hap_en_gpio, 0);
+ else {
+ rc = isa1200_write_reg(client, ISA1200_HCTRL0,
+ value & ISA1200_POWER_DOWN_MASK);
+ if (rc < 0) {
+ pr_err("%s: i2c write failure\n", __func__);
+ goto reset_hctrl1;
+ }
+ }
+
+ haptic->hctrl0_val = value;
dump_isa1200_reg("new:", client);
return 0;
@@ -388,6 +485,7 @@
spin_lock_init(&haptic->lock);
INIT_WORK(&haptic->work, isa1200_chip_work);
+ haptic->clk_on = false;
hrtimer_init(&haptic->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
haptic->timer.function = isa1200_vib_timer_func;
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 2c42bc7..4c92ee5 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -1271,9 +1271,11 @@
{
struct ion_handle *ihandle; /* Ion handle */
struct qseecom_load_img_req load_img_req;
- int32_t ret;
+ int ret;
+ int set_cpu_ret = 0;
ion_phys_addr_t pa = 0;
uint32_t len;
+ struct cpumask mask;
struct qseecom_load_app_ireq load_req;
struct qseecom_command_scm_resp resp;
@@ -1302,14 +1304,25 @@
load_req.img_len = load_img_req.img_len;
load_req.phy_addr = pa;
+ /* SCM_CALL tied to Core0 */
+ mask = CPU_MASK_CPU0;
+ set_cpu_ret = set_cpus_allowed_ptr(current, &mask);
+ if (set_cpu_ret) {
+ pr_err("set_cpus_allowed_ptr failed : ret %d\n",
+ set_cpu_ret);
+ ret = -EFAULT;
+ goto qseecom_load_external_elf_set_cpu_err;
+ }
+
/* SCM_CALL to load the external elf */
ret = scm_call(SCM_SVC_TZSCHEDULER, 1, &load_req,
sizeof(struct qseecom_load_app_ireq),
&resp, sizeof(resp));
if (ret) {
- pr_err("scm_call to unload failed : ret %d\n",
+ pr_err("scm_call to load failed : ret %d\n",
ret);
ret = -EFAULT;
+ goto qseecom_load_external_elf_scm_err;
}
if (resp.result == QSEOS_RESULT_INCOMPLETE) {
@@ -1324,6 +1337,18 @@
ret = -EFAULT;
}
}
+
+qseecom_load_external_elf_scm_err:
+ /* Restore the CPU mask */
+ mask = CPU_MASK_ALL;
+ set_cpu_ret = set_cpus_allowed_ptr(current, &mask);
+ if (set_cpu_ret) {
+ pr_err("set_cpus_allowed_ptr failed to restore mask: ret %d\n",
+ set_cpu_ret);
+ ret = -EFAULT;
+ }
+
+qseecom_load_external_elf_set_cpu_err:
/* Deallocate the handle */
if (!IS_ERR_OR_NULL(ihandle))
ion_free(qseecom.ion_clnt, ihandle);
@@ -1334,11 +1359,23 @@
static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
{
int ret = 0;
+ int set_cpu_ret = 0;
struct qseecom_command_scm_resp resp;
struct qseecom_unload_app_ireq req;
+ struct cpumask mask;
/* Populate the structure for sending scm call to unload image */
req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
+
+ /* SCM_CALL tied to Core0 */
+ mask = CPU_MASK_CPU0;
+ ret = set_cpus_allowed_ptr(current, &mask);
+ if (ret) {
+ pr_err("set_cpus_allowed_ptr failed : ret %d\n",
+ ret);
+ return -EFAULT;
+ }
+
/* SCM_CALL to unload the external elf */
ret = scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
sizeof(struct qseecom_unload_app_ireq),
@@ -1346,7 +1383,8 @@
if (ret) {
pr_err("scm_call to unload failed : ret %d\n",
ret);
- return -EFAULT;
+ ret = -EFAULT;
+ goto qseecom_unload_external_elf_scm_err;
}
if (resp.result == QSEOS_RESULT_INCOMPLETE) {
ret = __qseecom_process_incomplete_cmd(data, &resp);
@@ -1360,6 +1398,17 @@
ret = -EFAULT;
}
}
+
+qseecom_unload_external_elf_scm_err:
+ /* Restore the CPU mask */
+ mask = CPU_MASK_ALL;
+ set_cpu_ret = set_cpus_allowed_ptr(current, &mask);
+ if (set_cpu_ret) {
+ pr_err("set_cpus_allowed_ptr failed to restore mask: ret %d\n",
+ set_cpu_ret);
+ ret = -EFAULT;
+ }
+
return ret;
}
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 44228a6..f917c98 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -59,6 +59,8 @@
#define INAND_CMD38_ARG_SECTRIM1 0x81
#define INAND_CMD38_ARG_SECTRIM2 0x88
+#define MMC_SANITIZE_REQ_TIMEOUT 240000 /* msec */
+
static DEFINE_MUTEX(block_mutex);
/*
@@ -802,18 +804,11 @@
unsigned int from, nr, arg;
int err = 0, type = MMC_BLK_SECDISCARD;
- if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) {
+ if (!(mmc_can_secure_erase_trim(card))) {
err = -EOPNOTSUPP;
goto out;
}
- /* The sanitize operation is supported at v4.5 only */
- if (mmc_can_sanitize(card)) {
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_SANITIZE_START, 1, 0);
- goto out;
- }
-
from = blk_rq_pos(req);
nr = blk_rq_sectors(req);
@@ -856,6 +851,47 @@
return err ? 0 : 1;
}
+static int mmc_blk_issue_sanitize_rq(struct mmc_queue *mq,
+ struct request *req)
+{
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_card *card = md->queue.card;
+ int err = 0;
+
+ BUG_ON(!card);
+ BUG_ON(!card->host);
+
+ if (!(mmc_can_sanitize(card) &&
+ (card->host->caps2 & MMC_CAP2_SANITIZE))) {
+ pr_warning("%s: %s - SANITIZE is not supported\n",
+ mmc_hostname(card->host), __func__);
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
+ mmc_hostname(card->host), __func__);
+
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_SANITIZE_START, 1,
+ MMC_SANITIZE_REQ_TIMEOUT);
+
+ if (err)
+ pr_err("%s: %s - mmc_switch() with "
+ "EXT_CSD_SANITIZE_START failed. err=%d\n",
+ mmc_hostname(card->host), __func__, err);
+
+ pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host),
+ __func__);
+
+out:
+ spin_lock_irq(&md->lock);
+ __blk_end_request(req, err, blk_rq_bytes(req));
+ spin_unlock_irq(&md->lock);
+
+ return err ? 0 : 1;
+}
+
static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
{
struct mmc_blk_data *md = mq->data;
@@ -1194,6 +1230,13 @@
type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
mmc_queue_bounce_post(mq_rq);
+ /*
+ * Check BKOPS urgency from each R1 response
+ */
+ if (mmc_card_mmc(card) &&
+ (brq->cmd.resp[0] & R1_EXCEPTION_EVENT))
+ mmc_card_set_check_bkops(card);
+
switch (status) {
case MMC_BLK_SUCCESS:
case MMC_BLK_PARTIAL:
@@ -1323,7 +1366,12 @@
goto out;
}
- if (req && req->cmd_flags & REQ_DISCARD) {
+ if (req && req->cmd_flags & REQ_SANITIZE) {
+ /* complete ongoing async transfer before issuing sanitize */
+ if (card->host && card->host->areq)
+ mmc_blk_issue_rw_rq(mq, NULL);
+ ret = mmc_blk_issue_sanitize_rq(mq, req);
+ } else if (req && req->cmd_flags & REQ_DISCARD) {
/* complete ongoing async transfer before issuing discard */
if (card->host->areq)
mmc_blk_issue_rw_rq(mq, NULL);
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 73f63c9..a8409c8 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -67,6 +67,9 @@
spin_unlock_irq(q->queue_lock);
if (req || mq->mqrq_prev->req) {
+ if (mmc_card_doing_bkops(mq->card))
+ mmc_interrupt_bkops(mq->card);
+
set_current_state(TASK_RUNNING);
mq->issue_fn(mq, req);
} else {
@@ -74,6 +77,8 @@
set_current_state(TASK_RUNNING);
break;
}
+
+ mmc_start_bkops(mq->card);
up(&mq->thread_sem);
schedule();
down(&mq->thread_sem);
@@ -146,10 +151,15 @@
/* granularity must not be greater than max. discard */
if (card->pref_erase > max_discard)
q->limits.discard_granularity = 0;
- if (mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))
+ if (mmc_can_secure_erase_trim(card))
queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
}
+static void mmc_queue_setup_sanitize(struct request_queue *q)
+{
+ queue_flag_set_unlocked(QUEUE_FLAG_SANITIZE, q);
+}
+
/**
* mmc_init_queue - initialise a queue structure.
* @mq: mmc queue
@@ -187,6 +197,9 @@
if (mmc_can_erase(card))
mmc_queue_setup_discard(mq->queue, card);
+ if ((mmc_can_sanitize(card) && (host->caps2 & MMC_CAP2_SANITIZE)))
+ mmc_queue_setup_sanitize(mq->queue);
+
#ifdef CONFIG_MMC_BLOCK_BOUNCE
if (host->max_segs == 1) {
unsigned int bouncesz;
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 925c032..6c82c74 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -223,21 +223,85 @@
host->ops->request(host, mrq);
}
+/**
+ * mmc_start_bkops - start BKOPS for supported cards
+ * @card: MMC card to start BKOPS
+ *
+ * Start background operations whenever requested.
+ * when the urgent BKOPS bit is set in a R1 command response
+ * then background operations should be started immediately.
+*/
+void mmc_start_bkops(struct mmc_card *card)
+{
+ int err;
+ unsigned long flags;
+
+ BUG_ON(!card);
+ if (!card->ext_csd.bkops_en || !(card->host->caps2 & MMC_CAP2_BKOPS))
+ return;
+
+ if (mmc_card_check_bkops(card)) {
+ spin_lock_irqsave(&card->host->lock, flags);
+ mmc_card_clr_check_bkops(card);
+ spin_unlock_irqrestore(&card->host->lock, flags);
+ if (mmc_is_exception_event(card, EXT_CSD_URGENT_BKOPS))
+ if (card->ext_csd.raw_bkops_status)
+ mmc_card_set_need_bkops(card);
+ }
+
+ /*
+ * If card is already doing bkops or need for
+ * bkops flag is not set, then do nothing just
+ * return
+ */
+ if (mmc_card_doing_bkops(card) || !mmc_card_need_bkops(card))
+ return;
+
+ mmc_claim_host(card->host);
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_BKOPS_START, 1, 0);
+ if (err) {
+ pr_warning("%s: error %d starting bkops\n",
+ mmc_hostname(card->host), err);
+ mmc_card_clr_need_bkops(card);
+ goto out;
+ }
+
+ spin_lock_irqsave(&card->host->lock, flags);
+ mmc_card_clr_need_bkops(card);
+
+ /*
+ * For urgent bkops status (LEVEL_2 and more)
+ * bkops executed synchronously, otherwise
+ * the operation is in progress
+ */
+ if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2)
+ mmc_card_set_check_bkops(card);
+ else
+ mmc_card_set_doing_bkops(card);
+
+ spin_unlock_irqrestore(&card->host->lock, flags);
+out:
+ mmc_release_host(card->host);
+}
+EXPORT_SYMBOL(mmc_start_bkops);
+
static void mmc_wait_done(struct mmc_request *mrq)
{
complete(&mrq->completion);
}
-static void __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
+static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
{
init_completion(&mrq->completion);
mrq->done = mmc_wait_done;
if (mmc_card_removed(host->card)) {
mrq->cmd->error = -ENOMEDIUM;
complete(&mrq->completion);
- return;
+ return -ENOMEDIUM;
}
mmc_start_request(host, mrq);
+ return 0;
}
static void mmc_wait_for_req_done(struct mmc_host *host,
@@ -315,6 +379,7 @@
{
int err = 0;
struct mmc_async_req *data = host->areq;
+ int start_err = 0;
/* Prepare a new request */
if (areq)
@@ -323,24 +388,23 @@
if (host->areq) {
mmc_wait_for_req_done(host, host->areq->mrq);
err = host->areq->err_check(host->card, host->areq);
- if (err) {
- mmc_post_req(host, host->areq->mrq, 0);
- if (areq)
- mmc_post_req(host, areq->mrq, -EINVAL);
-
- host->areq = NULL;
- goto out;
- }
}
- if (areq)
- __mmc_start_req(host, areq->mrq);
+ if (!err && areq)
+ start_err = __mmc_start_req(host, areq->mrq);
if (host->areq)
mmc_post_req(host, host->areq->mrq, 0);
- host->areq = areq;
- out:
+ /* Cancel a prepared request if it was not started. */
+ if ((err || start_err) && areq)
+ mmc_post_req(host, areq->mrq, -EINVAL);
+
+ if (err)
+ host->areq = NULL;
+ else
+ host->areq = areq;
+
if (error)
*error = err;
return data;
@@ -450,6 +514,69 @@
EXPORT_SYMBOL(mmc_wait_for_cmd);
/**
+ * mmc_interrupt_bkops - interrupt ongoing BKOPS
+ * @card: MMC card to check BKOPS
+ *
+ * Send HPI command to interrupt ongoing background operations,
+ * to allow rapid servicing of foreground operations,e.g. read/
+ * writes. Wait until the card comes out of the programming state
+ * to avoid errors in servicing read/write requests.
+ */
+int mmc_interrupt_bkops(struct mmc_card *card)
+{
+ int err = 0;
+ unsigned long flags;
+
+ BUG_ON(!card);
+
+ err = mmc_interrupt_hpi(card);
+
+ spin_lock_irqsave(&card->host->lock, flags);
+ mmc_card_clr_doing_bkops(card);
+ spin_unlock_irqrestore(&card->host->lock, flags);
+
+ return err;
+}
+EXPORT_SYMBOL(mmc_interrupt_bkops);
+
+int mmc_read_bkops_status(struct mmc_card *card)
+{
+ int err;
+ u8 ext_csd[512];
+
+ mmc_claim_host(card->host);
+ err = mmc_send_ext_csd(card, ext_csd);
+ mmc_release_host(card->host);
+ if (err)
+ return err;
+
+ card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
+ card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
+
+ return 0;
+}
+EXPORT_SYMBOL(mmc_read_bkops_status);
+
+int mmc_is_exception_event(struct mmc_card *card, unsigned int value)
+{
+ int err;
+
+ err = mmc_read_bkops_status(card);
+ if (err) {
+ pr_err("%s: Didn't read bkops status : %d\n",
+ mmc_hostname(card->host), err);
+ return 0;
+ }
+
+ /* In eMMC 4.41, R1_EXCEPTION_EVENT is URGENT_BKOPS */
+ if (card->ext_csd.rev == 5)
+ return 1;
+
+ return (card->ext_csd.raw_exception_status & value) ? 1 : 0;
+}
+EXPORT_SYMBOL(mmc_is_exception_event);
+
+/**
* mmc_set_data_timeout - set the timeout for a data command
* @data: data phase for command
* @card: the MMC card associated with the data transfer
@@ -2417,8 +2544,12 @@
err = -EBUSY;
if (!err) {
- if (host->bus_ops->suspend)
+ if (host->bus_ops->suspend) {
+ if (mmc_card_doing_bkops(host->card))
+ mmc_interrupt_bkops(host->card);
+
err = host->bus_ops->suspend(host);
+ }
if (!(host->card && mmc_card_sdio(host->card)))
mmc_do_release_host(host);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index a162586..ca54265 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -363,33 +363,24 @@
show_perf(struct device *dev, struct device_attribute *attr, char *buf)
{
struct mmc_host *host = dev_get_drvdata(dev);
- int64_t rtime_mmcq, wtime_mmcq, rtime_drv, wtime_drv;
- unsigned long rbytes_mmcq, wbytes_mmcq, rbytes_drv, wbytes_drv;
+ int64_t rtime_drv, wtime_drv;
+ unsigned long rbytes_drv, wbytes_drv;
spin_lock(&host->lock);
- rbytes_mmcq = host->perf.rbytes_mmcq;
- wbytes_mmcq = host->perf.wbytes_mmcq;
rbytes_drv = host->perf.rbytes_drv;
wbytes_drv = host->perf.wbytes_drv;
- rtime_mmcq = ktime_to_us(host->perf.rtime_mmcq);
- wtime_mmcq = ktime_to_us(host->perf.wtime_mmcq);
rtime_drv = ktime_to_us(host->perf.rtime_drv);
wtime_drv = ktime_to_us(host->perf.wtime_drv);
spin_unlock(&host->lock);
- return snprintf(buf, PAGE_SIZE, "Write performance at MMCQ Level:"
- "%lu bytes in %lld microseconds\n"
- "Read performance at MMCQ Level:"
- "%lu bytes in %lld microseconds\n"
- "Write performance at driver Level:"
+ return snprintf(buf, PAGE_SIZE, "Write performance at driver Level:"
"%lu bytes in %lld microseconds\n"
"Read performance at driver Level:"
"%lu bytes in %lld microseconds\n",
- wbytes_mmcq, wtime_mmcq, rbytes_mmcq,
- rtime_mmcq, wbytes_drv, wtime_drv,
+ wbytes_drv, wtime_drv,
rbytes_drv, rtime_drv);
}
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 9385087..6178097 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -425,6 +425,24 @@
}
if (card->ext_csd.rev >= 5) {
+ /* check whether the eMMC card support BKOPS */
+ if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
+ card->ext_csd.bkops = 1;
+ card->ext_csd.bkops_en = ext_csd[EXT_CSD_BKOPS_EN];
+ card->ext_csd.raw_bkops_status =
+ ext_csd[EXT_CSD_BKOPS_STATUS];
+ if (!card->ext_csd.bkops_en &&
+ card->host->caps2 & MMC_CAP2_INIT_BKOPS) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_BKOPS_EN, 1, 0);
+ if (err)
+ pr_warning("%s: Enabling BKOPS failed\n",
+ mmc_hostname(card->host));
+ else
+ card->ext_csd.bkops_en = 1;
+ }
+ }
+
/* check whether the eMMC card supports HPI */
if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) {
card->ext_csd.hpi = 1;
@@ -464,6 +482,11 @@
ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 |
ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 |
ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24;
+
+ card->ext_csd.max_packed_writes =
+ ext_csd[EXT_CSD_MAX_PACKED_WRITES];
+ card->ext_csd.max_packed_reads =
+ ext_csd[EXT_CSD_MAX_PACKED_READS];
}
out:
@@ -1183,6 +1206,25 @@
card->ext_csd.cache_ctrl = err ? 0 : 1;
}
+ if ((host->caps2 & MMC_CAP2_PACKED_CMD) &&
+ (card->ext_csd.max_packed_writes > 0) &&
+ (card->ext_csd.max_packed_reads > 0)) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_EXP_EVENTS_CTRL,
+ EXT_CSD_PACKED_EVENT_EN,
+ card->ext_csd.generic_cmd6_time);
+ if (err && err != -EBADMSG)
+ goto free_card;
+ if (err) {
+ pr_warning("%s: Enabling packed event failed\n",
+ mmc_hostname(card->host));
+ card->ext_csd.packed_event_en = 0;
+ err = 0;
+ } else {
+ card->ext_csd.packed_event_en = 1;
+ }
+ }
+
if (!oldcard)
host->card = card;
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 2438176..cf9aea5 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -391,13 +391,22 @@
(index << 16) |
(value << 8) |
set;
- cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+ cmd.flags = MMC_CMD_AC;
+ if (index == EXT_CSD_BKOPS_START &&
+ card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2)
+ cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
+ else
+ cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
cmd.cmd_timeout_ms = timeout_ms;
err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
if (err)
return err;
+ /* No need to check card status in case of BKOPS switch*/
+ if (index == EXT_CSD_BKOPS_START)
+ return 0;
+
mmc_delay(1);
/* Must check status to be sure of no errors */
do {
@@ -556,14 +565,14 @@
{
struct mmc_command cmd = {0};
unsigned int opcode;
- unsigned int flags;
+ unsigned int flags = MMC_CMD_AC;
int err;
opcode = card->ext_csd.hpi_cmd;
if (opcode == MMC_STOP_TRANSMISSION)
- flags = MMC_RSP_R1 | MMC_CMD_AC;
+ flags |= MMC_RSP_R1B;
else if (opcode == MMC_SEND_STATUS)
- flags = MMC_RSP_R1 | MMC_CMD_AC;
+ flags |= MMC_RSP_R1;
cmd.opcode = opcode;
cmd.arg = card->rca << 16 | 1;
diff --git a/drivers/mmc/core/quirks.c b/drivers/mmc/core/quirks.c
index 92990b9..f9999c5 100644
--- a/drivers/mmc/core/quirks.c
+++ b/drivers/mmc/core/quirks.c
@@ -29,6 +29,18 @@
#define SDIO_DEVICE_ID_MSM_WCN1314 0x2881
#endif
+#ifndef SDIO_VENDOR_ID_MSM_QCA
+#define SDIO_VENDOR_ID_MSM_QCA 0x271
+#endif
+
+#ifndef SDIO_DEVICE_ID_MSM_QCA_AR6003_1
+#define SDIO_DEVICE_ID_MSM_QCA_AR6003_1 0x300
+#endif
+
+#ifndef SDIO_DEVICE_ID_MSM_QCA_AR6003_2
+#define SDIO_DEVICE_ID_MSM_QCA_AR6003_2 0x301
+#endif
+
/*
* This hook just adds a quirk for all sdio devices
*/
@@ -51,6 +63,12 @@
SDIO_FIXUP(SDIO_VENDOR_ID_MSM, SDIO_DEVICE_ID_MSM_WCN1314,
remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
+ SDIO_FIXUP(SDIO_VENDOR_ID_MSM_QCA, SDIO_DEVICE_ID_MSM_QCA_AR6003_1,
+ remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
+
+ SDIO_FIXUP(SDIO_VENDOR_ID_MSM_QCA, SDIO_DEVICE_ID_MSM_QCA_AR6003_2,
+ remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
+
SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
add_quirk, MMC_QUIRK_NONSTD_FUNC_IF),
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 39e6ce3..13fe3e6 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -707,7 +707,7 @@
}
if (!err && host->sdio_irqs)
- mmc_signal_sdio_irq(host);
+ wake_up_process(host->sdio_irq_thread);
mmc_release_host(host);
/*
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index d489233..c442907 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -27,18 +27,20 @@
#include "sdio_ops.h"
-static int process_sdio_pending_irqs(struct mmc_card *card)
+static int process_sdio_pending_irqs(struct mmc_host *host)
{
+ struct mmc_card *card = host->card;
int i, ret, count;
unsigned char pending;
struct sdio_func *func;
/*
* Optimization, if there is only 1 function interrupt registered
- * call irq handler directly
+ * and we know an IRQ was signaled then call irq handler directly.
+ * Otherwise do the full probe.
*/
func = card->sdio_single_irq;
- if (func) {
+ if (func && host->sdio_irq_pending) {
func->irq_handler(func);
return 1;
}
@@ -115,7 +117,8 @@
ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort);
if (ret)
break;
- ret = process_sdio_pending_irqs(host->card);
+ ret = process_sdio_pending_irqs(host);
+ host->sdio_irq_pending = false;
mmc_release_host(host);
/*
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index 73a31716..29c09c4 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -55,6 +55,7 @@
#include <mach/dma.h>
#include <mach/sdio_al.h>
#include <mach/mpm.h>
+#include <mach/msm_bus.h>
#include "msm_sdcc.h"
#include "msm_sdcc_dml.h"
@@ -72,6 +73,8 @@
/* Use SPS only if transfer size is more than this macro */
#define SPS_MIN_XFER_SIZE MCI_FIFOSIZE
+#define MSM_MMC_BUS_VOTING_DELAY 200 /* msecs */
+
#if defined(CONFIG_DEBUG_FS)
static void msmsdcc_dbg_createhost(struct msmsdcc_host *);
static struct dentry *debugfs_dir;
@@ -1164,6 +1167,8 @@
if (data->flags & MMC_DATA_READ)
datactrl |= (MCI_DPSM_DIRECTION | MCI_RX_DATA_PEND);
+ else if (host->curr.use_wr_data_pend)
+ datactrl |= MCI_DATA_PEND;
clks = (unsigned long long)data->timeout_ns * host->clk_rate;
do_div(clks, 1000000000UL);
@@ -1609,11 +1614,18 @@
cmd->error = -EILSEQ;
}
+ if (!cmd->error) {
+ if (cmd->cmd_timeout_ms > host->curr.req_tout_ms) {
+ host->curr.req_tout_ms = cmd->cmd_timeout_ms;
+ mod_timer(&host->req_tout_timer, (jiffies +
+ msecs_to_jiffies(host->curr.req_tout_ms)));
+ }
+ }
+
if (!cmd->data || cmd->error) {
if (host->curr.data && host->dma.sg &&
host->is_dma_mode)
- msm_dmov_stop_cmd(host->dma.channel,
- &host->dma.hdr, 0);
+ msm_dmov_flush(host->dma.channel, 0);
else if (host->curr.data && host->sps.sg &&
host->is_sps_mode){
/* Stop current SPS transfer */
@@ -1640,13 +1652,11 @@
msmsdcc_request_end(host, cmd->mrq);
}
}
- } else if ((cmd == host->curr.mrq->sbc) && cmd->data) {
- if (cmd->data->flags & MMC_DATA_READ)
- msmsdcc_start_command(host, host->curr.mrq->cmd, 0);
- else
- msmsdcc_request_start(host, host->curr.mrq);
} else if (cmd->data) {
- if (!(cmd->data->flags & MMC_DATA_READ))
+ if (cmd == host->curr.mrq->sbc)
+ msmsdcc_start_command(host, host->curr.mrq->cmd, 0);
+ else if ((cmd->data->flags & MMC_DATA_WRITE) &&
+ !host->curr.use_wr_data_pend)
msmsdcc_start_data(host, cmd->data, NULL, 0);
}
}
@@ -1689,14 +1699,17 @@
* will take care of signaling sdio irq during
* mmc_sdio_resume().
*/
- if (host->sdcc_suspended)
+ if (host->sdcc_suspended) {
/*
* This is a wakeup interrupt so hold wakelock
* until SDCC resume is handled.
*/
wake_lock(&host->sdio_wlock);
- else
+ } else {
+ spin_unlock(&host->lock);
mmc_signal_sdio_irq(host->mmc);
+ spin_lock(&host->lock);
+ }
ret = 1;
break;
}
@@ -1723,7 +1736,9 @@
if (status & MCI_SDIOINTROPE) {
if (host->sdcc_suspending)
wake_lock(&host->sdio_suspend_wlock);
+ spin_unlock(&host->lock);
mmc_signal_sdio_irq(host->mmc);
+ spin_lock(&host->lock);
}
data = host->curr.data;
@@ -1766,8 +1781,7 @@
msmsdcc_data_err(host, data, status);
host->curr.data_xfered = 0;
if (host->dma.sg && host->is_dma_mode)
- msm_dmov_stop_cmd(host->dma.channel,
- &host->dma.hdr, 0);
+ msm_dmov_flush(host->dma.channel, 0);
else if (host->sps.sg && host->is_sps_mode) {
/* Stop current SPS transfer */
msmsdcc_sps_exit_curr_xfer(host);
@@ -1923,12 +1937,17 @@
static void
msmsdcc_request_start(struct msmsdcc_host *host, struct mmc_request *mrq)
{
- if (mrq->data && mrq->data->flags & MMC_DATA_READ) {
+ if (mrq->data) {
/* Queue/read data, daisy-chain command when data starts */
- if (mrq->sbc)
- msmsdcc_start_data(host, mrq->data, mrq->sbc, 0);
+ if ((mrq->data->flags & MMC_DATA_READ) ||
+ host->curr.use_wr_data_pend)
+ msmsdcc_start_data(host, mrq->data,
+ mrq->sbc ? mrq->sbc : mrq->cmd,
+ 0);
else
- msmsdcc_start_data(host, mrq->data, mrq->cmd, 0);
+ msmsdcc_start_command(host,
+ mrq->sbc ? mrq->sbc : mrq->cmd,
+ 0);
} else {
msmsdcc_start_command(host, mrq->cmd, 0);
}
@@ -1938,7 +1957,7 @@
msmsdcc_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct msmsdcc_host *host = mmc_priv(mmc);
- unsigned long flags, timeout;
+ unsigned long flags;
/*
* Get the SDIO AL client out of LPM.
@@ -1995,15 +2014,16 @@
* Set timeout value to 10 secs (or more in case of buggy cards)
*/
if ((mmc->card) && (mmc->card->quirks & MMC_QUIRK_INAND_DATA_TIMEOUT))
- timeout = 20000;
+ host->curr.req_tout_ms = 20000;
else
- timeout = MSM_MMC_REQ_TIMEOUT;
+ host->curr.req_tout_ms = MSM_MMC_REQ_TIMEOUT;
/*
* Kick the software request timeout timer here with the timeout
* value identified above
*/
mod_timer(&host->req_tout_timer,
- (jiffies + msecs_to_jiffies(timeout)));
+ (jiffies +
+ msecs_to_jiffies(host->curr.req_tout_ms)));
host->curr.mrq = mrq;
if (mrq->data && (mrq->data->flags & MMC_DATA_WRITE)) {
@@ -2024,20 +2044,18 @@
host->sdcc_version) {
host->curr.wait_for_auto_prog_done = 1;
}
+ if ((mrq->cmd->opcode == MMC_WRITE_BLOCK) ||
+ (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK))
+ host->curr.use_wr_data_pend = true;
}
if (mrq->data && mrq->sbc) {
mrq->sbc->mrq = mrq;
mrq->sbc->data = mrq->data;
- if (mrq->data->flags & MMC_DATA_WRITE) {
+ if (mrq->data->flags & MMC_DATA_WRITE)
host->curr.wait_for_auto_prog_done = 1;
- msmsdcc_start_command(host, mrq->sbc, 0);
- } else {
- msmsdcc_request_start(host, mrq);
- }
- } else {
- msmsdcc_request_start(host, mrq);
}
+ msmsdcc_request_start(host, mrq);
spin_unlock_irqrestore(&host->lock, flags);
}
@@ -2616,6 +2634,179 @@
}
}
+/* Returns required bandwidth in Bytes per Sec */
+static unsigned int msmsdcc_get_bw_required(struct msmsdcc_host *host,
+ struct mmc_ios *ios)
+{
+ unsigned int bw;
+
+ bw = host->clk_rate;
+ /*
+ * For DDR mode, SDCC controller clock will be at
+ * the double rate than the actual clock that goes to card.
+ */
+ if (ios->bus_width == MMC_BUS_WIDTH_4)
+ bw /= 2;
+ else if (ios->bus_width == MMC_BUS_WIDTH_1)
+ bw /= 8;
+
+ return bw;
+}
+
+static int msmsdcc_msm_bus_get_vote_for_bw(struct msmsdcc_host *host,
+ unsigned int bw)
+{
+ unsigned int *table = host->plat->msm_bus_voting_data->bw_vecs;
+ unsigned int size = host->plat->msm_bus_voting_data->bw_vecs_size;
+ int i;
+
+ if (host->msm_bus_vote.is_max_bw_needed && bw)
+ return host->msm_bus_vote.max_bw_vote;
+
+ for (i = 0; i < size; i++) {
+ if (bw <= table[i])
+ break;
+ }
+
+ if (i && (i == size))
+ i--;
+
+ return i;
+}
+
+static int msmsdcc_msm_bus_register(struct msmsdcc_host *host)
+{
+ int rc = 0;
+ struct msm_bus_scale_pdata *use_cases;
+
+ if (host->plat->msm_bus_voting_data &&
+ host->plat->msm_bus_voting_data->use_cases &&
+ host->plat->msm_bus_voting_data->bw_vecs &&
+ host->plat->msm_bus_voting_data->bw_vecs_size) {
+ use_cases = host->plat->msm_bus_voting_data->use_cases;
+ host->msm_bus_vote.client_handle =
+ msm_bus_scale_register_client(use_cases);
+ } else {
+ return 0;
+ }
+
+ if (!host->msm_bus_vote.client_handle) {
+ pr_err("%s: msm_bus_scale_register_client() failed\n",
+ mmc_hostname(host->mmc));
+ rc = -EFAULT;
+ } else {
+ /* cache the vote index for minimum and maximum bandwidth */
+ host->msm_bus_vote.min_bw_vote =
+ msmsdcc_msm_bus_get_vote_for_bw(host, 0);
+ host->msm_bus_vote.max_bw_vote =
+ msmsdcc_msm_bus_get_vote_for_bw(host, UINT_MAX);
+ }
+
+ return rc;
+}
+
+static void msmsdcc_msm_bus_unregister(struct msmsdcc_host *host)
+{
+ if (host->msm_bus_vote.client_handle)
+ msm_bus_scale_unregister_client(
+ host->msm_bus_vote.client_handle);
+}
+
+/*
+ * This function must be called with host lock acquired.
+ * Caller of this function should also ensure that msm bus client
+ * handle is not null.
+ */
+static inline int msmsdcc_msm_bus_set_vote(struct msmsdcc_host *host,
+ int vote,
+ unsigned long flags)
+{
+ int rc = 0;
+
+ if (vote != host->msm_bus_vote.curr_vote) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ rc = msm_bus_scale_client_update_request(
+ host->msm_bus_vote.client_handle, vote);
+ if (rc)
+ pr_err("%s: msm_bus_scale_client_update_request() failed."
+ " bus_client_handle=0x%x, vote=%d, err=%d\n",
+ mmc_hostname(host->mmc),
+ host->msm_bus_vote.client_handle, vote, rc);
+ spin_lock_irqsave(&host->lock, flags);
+ if (!rc)
+ host->msm_bus_vote.curr_vote = vote;
+ }
+
+ return rc;
+}
+
+/*
+ * Internal work. Work to set 0 bandwidth for msm bus.
+ */
+static void msmsdcc_msm_bus_work(struct work_struct *work)
+{
+ struct msmsdcc_host *host = container_of(work,
+ struct msmsdcc_host,
+ msm_bus_vote.vote_work.work);
+ unsigned long flags;
+
+ if (!host->msm_bus_vote.client_handle)
+ return;
+
+ spin_lock_irqsave(&host->lock, flags);
+ /* don't vote for 0 bandwidth if any request is in progress */
+ if (!host->curr.mrq)
+ msmsdcc_msm_bus_set_vote(host,
+ host->msm_bus_vote.min_bw_vote, flags);
+ else
+ pr_warning("%s: %s: SDCC transfer in progress. skipping"
+ " bus voting to 0 bandwidth\n",
+ mmc_hostname(host->mmc), __func__);
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+/*
+ * This function cancels any scheduled delayed work
+ * and sets the bus vote based on ios argument.
+ * If "ios" argument is NULL, bandwidth required is 0 else
+ * calculate the bandwidth based on ios parameters.
+ */
+static void msmsdcc_msm_bus_cancel_work_and_set_vote(
+ struct msmsdcc_host *host,
+ struct mmc_ios *ios)
+{
+ unsigned long flags;
+ unsigned int bw;
+ int vote;
+
+ if (!host->msm_bus_vote.client_handle)
+ return;
+
+ bw = ios ? msmsdcc_get_bw_required(host, ios) : 0;
+
+ cancel_delayed_work_sync(&host->msm_bus_vote.vote_work);
+ spin_lock_irqsave(&host->lock, flags);
+ vote = msmsdcc_msm_bus_get_vote_for_bw(host, bw);
+ msmsdcc_msm_bus_set_vote(host, vote, flags);
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+/* This function queues a work which will set the bandwidth requiement to 0 */
+static void msmsdcc_msm_bus_queue_work(struct msmsdcc_host *host)
+{
+ unsigned long flags;
+
+ if (!host->msm_bus_vote.client_handle)
+ return;
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (host->msm_bus_vote.min_bw_vote != host->msm_bus_vote.curr_vote)
+ queue_delayed_work(system_nrt_wq,
+ &host->msm_bus_vote.vote_work,
+ msecs_to_jiffies(MSM_MMC_BUS_VOTING_DELAY));
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
static void
msmsdcc_cfg_sdio_wakeup(struct msmsdcc_host *host, bool enable_wakeup_irq)
{
@@ -2928,15 +3119,14 @@
* clocks mci_irqenable will be written to MASK0 register.
*/
+ spin_lock_irqsave(&host->lock, flags);
if (enable) {
- spin_lock_irqsave(&host->lock, flags);
host->mci_irqenable |= MCI_SDIOINTOPERMASK;
if (host->clks_on) {
writel_relaxed(readl_relaxed(host->base + MMCIMASK0) |
MCI_SDIOINTOPERMASK, host->base + MMCIMASK0);
mb();
}
- spin_unlock_irqrestore(&host->lock, flags);
} else {
host->mci_irqenable &= ~MCI_SDIOINTOPERMASK;
if (host->clks_on) {
@@ -2945,6 +3135,7 @@
mb();
}
}
+ spin_unlock_irqrestore(&host->lock, flags);
}
#ifdef CONFIG_PM_RUNTIME
@@ -2971,14 +3162,14 @@
msmsdcc_pm_qos_update_latency(host, 1);
if (mmc->card && mmc_card_sdio(mmc->card))
- return 0;
+ goto out;
if (host->sdcc_suspended && host->pending_resume &&
!pm_runtime_suspended(dev)) {
host->pending_resume = false;
pm_runtime_get_noresume(dev);
rc = msmsdcc_runtime_resume(dev);
- goto out;
+ goto skip_get_sync;
}
if (dev->power.runtime_status == RPM_SUSPENDING) {
@@ -2990,14 +3181,15 @@
rc = pm_runtime_get_sync(dev);
-out:
+skip_get_sync:
if (rc < 0) {
pr_info("%s: %s: failed with error %d", mmc_hostname(mmc),
__func__, rc);
msmsdcc_print_rpm_info(host);
return rc;
}
-
+out:
+ msmsdcc_msm_bus_cancel_work_and_set_vote(host, &mmc->ios);
return 0;
}
@@ -3008,8 +3200,10 @@
msmsdcc_pm_qos_update_latency(host, 0);
- if (mmc->card && mmc_card_sdio(mmc->card))
- return 0;
+ if (mmc->card && mmc_card_sdio(mmc->card)) {
+ rc = 0;
+ goto out;
+ }
if (host->plat->disable_runtime_pm)
return -ENOTSUPP;
@@ -3029,7 +3223,9 @@
return rc;
}
- return 0;
+out:
+ msmsdcc_msm_bus_queue_work(host);
+ return rc;
}
#else
static void msmsdcc_print_rpm_info(struct msmsdcc_host *host) {}
@@ -3043,8 +3239,10 @@
msmsdcc_pm_qos_update_latency(host, 1);
- if (mmc->card && mmc_card_sdio(mmc->card))
- return 0;
+ if (mmc->card && mmc_card_sdio(mmc->card)) {
+ rc = 0;
+ goto out;
+ }
if (host->sdcc_suspended && host->pending_resume) {
host->pending_resume = false;
@@ -3069,7 +3267,7 @@
__func__, rc);
return rc;
}
-
+ msmsdcc_msm_bus_cancel_work_and_set_vote(host, &mmc->ios);
return 0;
}
@@ -3081,7 +3279,7 @@
msmsdcc_pm_qos_update_latency(host, 0);
if (mmc->card && mmc_card_sdio(mmc->card))
- return 0;
+ goto out;
mutex_lock(&host->clk_mutex);
spin_lock_irqsave(&host->lock, flags);
@@ -3094,6 +3292,8 @@
spin_unlock_irqrestore(&host->lock, flags);
mutex_unlock(&host->clk_mutex);
+out:
+ msmsdcc_msm_bus_queue_work(host);
return 0;
}
#endif
@@ -3748,10 +3948,13 @@
}
if (host->plat->is_sdio_al_client) {
wake_lock(&host->sdio_wlock);
+ spin_unlock(&host->lock);
mmc_signal_sdio_irq(host->mmc);
+ goto out_unlocked;
}
spin_unlock(&host->lock);
+out_unlocked:
return IRQ_HANDLED;
}
@@ -4213,10 +4416,46 @@
static DEVICE_ATTR(polling, S_IRUGO | S_IWUSR,
show_polling, set_polling);
+
+static ssize_t
+show_sdcc_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct mmc_host *mmc = dev_get_drvdata(dev);
+ struct msmsdcc_host *host = mmc_priv(mmc);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ host->msm_bus_vote.is_max_bw_needed);
+}
+
+static ssize_t
+set_sdcc_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mmc_host *mmc = dev_get_drvdata(dev);
+ struct msmsdcc_host *host = mmc_priv(mmc);
+ uint32_t value;
+ unsigned long flags;
+
+ if (!kstrtou32(buf, 0, &value)) {
+ spin_lock_irqsave(&host->lock, flags);
+ host->msm_bus_vote.is_max_bw_needed = !!value;
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR(max_bus_bw, S_IRUGO | S_IWUSR,
+ show_sdcc_to_mem_max_bus_bw, set_sdcc_to_mem_max_bus_bw);
+
static struct attribute *dev_attrs[] = {
- &dev_attr_polling.attr,
+ &dev_attr_max_bus_bw.attr,
+ /* if polling is enabled, this will be filled with dev_attr_polling */
+ NULL,
NULL,
};
+
static struct attribute_group dev_attr_grp = {
.attrs = dev_attrs,
};
@@ -4305,10 +4544,11 @@
}
pr_info("%s: got_dataend=%d, prog_enable=%d,"
- " wait_for_auto_prog_done=%d, got_auto_prog_done=%d\n",
- mmc_hostname(host->mmc), host->curr.got_dataend,
- host->prog_enable, host->curr.wait_for_auto_prog_done,
- host->curr.got_auto_prog_done);
+ " wait_for_auto_prog_done=%d, got_auto_prog_done=%d,"
+ " req_tout_ms=%d\n", mmc_hostname(host->mmc),
+ host->curr.got_dataend, host->prog_enable,
+ host->curr.wait_for_auto_prog_done,
+ host->curr.got_auto_prog_done, host->curr.req_tout_ms);
msmsdcc_print_rpm_info(host);
}
@@ -4340,8 +4580,7 @@
mrq->data->error = -ETIMEDOUT;
host->curr.data_xfered = 0;
if (host->dma.sg && host->is_dma_mode) {
- msm_dmov_stop_cmd(host->dma.channel,
- &host->dma.hdr, 0);
+ msm_dmov_flush(host->dma.channel, 0);
} else if (host->sps.sg && host->is_sps_mode) {
/* Stop current SPS transfer */
msmsdcc_sps_exit_curr_xfer(host);
@@ -4719,6 +4958,14 @@
pm_qos_add_request(&host->pm_qos_req_dma,
PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+ ret = msmsdcc_msm_bus_register(host);
+ if (ret)
+ goto pm_qos_remove;
+
+ if (host->msm_bus_vote.client_handle)
+ INIT_DELAYED_WORK(&host->msm_bus_vote.vote_work,
+ msmsdcc_msm_bus_work);
+
ret = msmsdcc_vreg_init(host, true);
if (ret) {
pr_err("%s: msmsdcc_vreg_init() failed (%d)\n", __func__, ret);
@@ -4968,11 +5215,12 @@
#if defined(CONFIG_DEBUG_FS)
msmsdcc_dbg_createhost(host);
#endif
- if (!plat->status_irq) {
- ret = sysfs_create_group(&pdev->dev.kobj, &dev_attr_grp);
- if (ret)
- goto platform_irq_free;
- }
+ if (!plat->status_irq)
+ dev_attrs[1] = &dev_attr_polling.attr;
+
+ ret = sysfs_create_group(&pdev->dev.kobj, &dev_attr_grp);
+ if (ret)
+ goto platform_irq_free;
return 0;
platform_irq_free:
@@ -5002,6 +5250,8 @@
msmsdcc_vreg_init(host, false);
clk_disable:
clk_disable(host->clk);
+ msmsdcc_msm_bus_unregister(host);
+ pm_qos_remove:
if (host->cpu_dma_latency)
pm_qos_remove_request(&host->pm_qos_req_dma);
clk_put:
@@ -5079,6 +5329,11 @@
if (host->cpu_dma_latency)
pm_qos_remove_request(&host->pm_qos_req_dma);
+ if (host->msm_bus_vote.client_handle) {
+ msmsdcc_msm_bus_cancel_work_and_set_vote(host, NULL);
+ msmsdcc_msm_bus_unregister(host);
+ }
+
msmsdcc_vreg_init(host, false);
if (host->is_dma_mode) {
@@ -5193,9 +5448,11 @@
int rc = 0;
unsigned long flags;
+ if (host->plat->is_sdio_al_client) {
+ rc = 0;
+ goto out;
+ }
- if (host->plat->is_sdio_al_client)
- return 0;
pr_debug("%s: %s: start\n", mmc_hostname(mmc), __func__);
if (mmc) {
host->sdcc_suspending = 1;
@@ -5250,6 +5507,9 @@
wake_unlock(&host->sdio_suspend_wlock);
}
pr_debug("%s: %s: ends with err=%d\n", mmc_hostname(mmc), __func__, rc);
+out:
+ /* set bus bandwidth to 0 immediately */
+ msmsdcc_msm_bus_cancel_work_and_set_vote(host, NULL);
return rc;
}
diff --git a/drivers/mmc/host/msm_sdcc.h b/drivers/mmc/host/msm_sdcc.h
index e6bd16c..14677c6 100644
--- a/drivers/mmc/host/msm_sdcc.h
+++ b/drivers/mmc/host/msm_sdcc.h
@@ -78,6 +78,7 @@
#define MCI_DPSM_DIRECTION (1 << 1)
#define MCI_DPSM_MODE (1 << 2)
#define MCI_DPSM_DMAENABLE (1 << 3)
+#define MCI_DATA_PEND (1 << 17)
#define MCI_AUTO_PROG_DONE (1 << 19)
#define MCI_RX_DATA_PEND (1 << 20)
@@ -294,7 +295,9 @@
int got_dataend;
int wait_for_auto_prog_done;
int got_auto_prog_done;
+ bool use_wr_data_pend;
int user_pages;
+ u32 req_tout_ms;
};
struct msmsdcc_sps_ep_conn_data {
@@ -319,6 +322,15 @@
struct tasklet_struct tlet;
};
+struct msmsdcc_msm_bus_vote {
+ uint32_t client_handle;
+ uint32_t curr_vote;
+ int min_bw_vote;
+ int max_bw_vote;
+ bool is_max_bw_needed;
+ struct delayed_work vote_work;
+};
+
struct msmsdcc_host {
struct resource *core_irqres;
struct resource *bam_irqres;
@@ -398,6 +410,7 @@
bool sdio_wakeupirq_disabled;
struct mutex clk_mutex;
bool pending_resume;
+ struct msmsdcc_msm_bus_vote msm_bus_vote;
};
int msmsdcc_set_pwrsave(struct mmc_host *mmc, int pwrsave);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index dedf3da..e13b5c3 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1437,6 +1437,8 @@
dto -= 13;
else
dto = 0;
+ /* Use the maximum timeout value allowed in the standard of 14
+ or 0xE */
if (dto > 14)
dto = 14;
}
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 8aab269..44041b8 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -3466,6 +3466,15 @@
for routing IP packets within the MSM using
BAM as a physical transport.
+config MSM_RMNET_SMUX
+ bool "RMNET SMUX Driver"
+ depends on N_SMUX
+ help
+ Implements RMNET over SMUX interface.
+ RMNET provides a virtual ethernet interface
+ for routing IP packets within the MSM using
+ HSUART as a physical transport.
+
config MSM_RMNET_DEBUG
bool "MSM RMNET debug interface"
depends on MSM_RMNET
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 7b3cd59..7373a61 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -304,6 +304,7 @@
obj-$(CONFIG_MSM_RMNET) += msm_rmnet.o
obj-$(CONFIG_MSM_RMNET_SDIO) += msm_rmnet_sdio.o
obj-$(CONFIG_MSM_RMNET_BAM) += msm_rmnet_bam.o
+obj-$(CONFIG_MSM_RMNET_SMUX) += msm_rmnet_smux.o
obj-$(CONFIG_NIU) += niu.o
obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
diff --git a/drivers/net/msm_rmnet_smux.c b/drivers/net/msm_rmnet_smux.c
new file mode 100644
index 0000000..70e7182
--- /dev/null
+++ b/drivers/net/msm_rmnet_smux.c
@@ -0,0 +1,944 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+ * RMNET SMUX Module.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/wakelock.h>
+#include <linux/if_arp.h>
+#include <linux/msm_rmnet.h>
+#include <linux/platform_device.h>
+#include <linux/smux.h>
+#include <linux/ip.h>
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+
+
+/* Debug message support */
+static int msm_rmnet_smux_debug_mask;
+module_param_named(debug_enable, msm_rmnet_smux_debug_mask,
+ int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define DEBUG_MASK_LVL0 (1U << 0)
+#define DEBUG_MASK_LVL1 (1U << 1)
+#define DEBUG_MASK_LVL2 (1U << 2)
+
+#define DBG(m, x...) do { \
+ if (msm_rmnet_smux_debug_mask & m) \
+ pr_info(x); \
+} while (0)
+
+#define DBG0(x...) DBG(DEBUG_MASK_LVL0, x)
+#define DBG1(x...) DBG(DEBUG_MASK_LVL1, x)
+#define DBG2(x...) DBG(DEBUG_MASK_LVL2, x)
+
+/* Configure device instances */
+#define RMNET_SMUX_DEVICE_COUNT (1)
+
+/* allow larger frames */
+#define RMNET_DATA_LEN 2000
+
+#define DEVICE_ID_INVALID -1
+
+#define DEVICE_INACTIVE 0x00
+#define DEVICE_ACTIVE 0x01
+
+#define HEADROOM_FOR_SMUX 8 /* for mux header */
+#define HEADROOM_FOR_QOS 8
+#define TAILROOM 8 /* for padding by mux layer */
+
+struct rmnet_private {
+ struct net_device_stats stats;
+ uint32_t ch_id;
+#ifdef CONFIG_MSM_RMNET_DEBUG
+ ktime_t last_packet;
+ unsigned long wakeups_xmit;
+ unsigned long wakeups_rcv;
+ unsigned long timeout_us;
+#endif
+ spinlock_t lock;
+ struct tasklet_struct tsklt;
+ /* IOCTL specified mode (protocol, QoS header) */
+ u32 operation_mode;
+ uint8_t device_state;
+ uint8_t in_reset;
+};
+
+static struct net_device *netdevs[RMNET_SMUX_DEVICE_COUNT];
+
+#ifdef CONFIG_MSM_RMNET_DEBUG
+static unsigned long timeout_us;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+/*
+ * If early suspend is enabled then we specify two timeout values,
+ * screen on (default), and screen is off.
+ */
+static unsigned long timeout_suspend_us;
+static struct device *rmnet0;
+
+/* Set timeout in us when the screen is off. */
+static ssize_t timeout_suspend_store(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ timeout_suspend_us = strict_strtoul(buf, NULL, 10);
+ return n;
+}
+
+static ssize_t timeout_suspend_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%lu\n",
+ (unsigned long) timeout_suspend_us);
+}
+
+static DEVICE_ATTR(timeout_suspend, 0664, timeout_suspend_show,
+ timeout_suspend_store);
+
+static void rmnet_early_suspend(struct early_suspend *handler)
+{
+ if (rmnet0) {
+ struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
+ p->timeout_us = timeout_suspend_us;
+ }
+}
+
+static void rmnet_late_resume(struct early_suspend *handler)
+{
+ if (rmnet0) {
+ struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
+ p->timeout_us = timeout_us;
+ }
+}
+
+static struct early_suspend rmnet_power_suspend = {
+ .suspend = rmnet_early_suspend,
+ .resume = rmnet_late_resume,
+};
+
+static int __init rmnet_late_init(void)
+{
+ register_early_suspend(&rmnet_power_suspend);
+ return 0;
+}
+
+late_initcall(rmnet_late_init);
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+
+/* Returns 1 if packet caused rmnet to wakeup, 0 otherwise. */
+static int rmnet_cause_wakeup(struct rmnet_private *p)
+{
+ int ret = 0;
+ ktime_t now;
+ if (p->timeout_us == 0) /* Check if disabled */
+ return 0;
+
+ /* Use real (wall) time. */
+ now = ktime_get_real();
+
+ if (ktime_us_delta(now, p->last_packet) > p->timeout_us)
+ ret = 1;
+
+ p->last_packet = now;
+ return ret;
+}
+
+static ssize_t wakeups_xmit_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct rmnet_private *p = netdev_priv(to_net_dev(d));
+ return snprintf(buf, PAGE_SIZE, "%lu\n", p->wakeups_xmit);
+}
+
+DEVICE_ATTR(wakeups_xmit, 0444, wakeups_xmit_show, NULL);
+
+static ssize_t wakeups_rcv_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct rmnet_private *p = netdev_priv(to_net_dev(d));
+ return snprintf(buf, PAGE_SIZE, "%lu\n", p->wakeups_rcv);
+}
+
+DEVICE_ATTR(wakeups_rcv, 0444, wakeups_rcv_show, NULL);
+
+/* Set timeout in us. */
+static ssize_t timeout_store(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+#ifndef CONFIG_HAS_EARLYSUSPEND
+ struct rmnet_private *p = netdev_priv(to_net_dev(d));
+ p->timeout_us = timeout_us = strict_strtoul(buf, NULL, 10);
+#else
+/* If using early suspend/resume hooks do not write the value on store. */
+ timeout_us = strict_strtoul(buf, NULL, 10);
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+ return n;
+}
+
+static ssize_t timeout_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct rmnet_private *p = netdev_priv(to_net_dev(d));
+ p = netdev_priv(to_net_dev(d));
+ return snprintf(buf, PAGE_SIZE, "%lu\n", timeout_us);
+}
+
+DEVICE_ATTR(timeout, 0664, timeout_show, timeout_store);
+#endif /* CONFIG_MSM_RMNET_DEBUG */
+
+/* Forward declaration */
+static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+
+
+
+
+static int count_this_packet(void *_hdr, int len)
+{
+ struct ethhdr *hdr = _hdr;
+
+ if (len >= ETH_HLEN && hdr->h_proto == htons(ETH_P_ARP))
+ return 0;
+
+ return 1;
+}
+
+static __be16 rmnet_ip_type_trans(struct sk_buff *skb, struct net_device *dev)
+{
+ __be16 protocol = 0;
+
+ skb->dev = dev;
+
+ /* Determine L3 protocol */
+ switch (skb->data[0] & 0xf0) {
+ case 0x40:
+ protocol = htons(ETH_P_IP);
+ break;
+ case 0x60:
+ protocol = htons(ETH_P_IPV6);
+ break;
+ default:
+ pr_err("[%s] rmnet_recv() L3 protocol decode error: 0x%02x",
+ dev->name, skb->data[0] & 0xf0);
+ /* skb will be dropped in upper layer for unknown protocol */
+ }
+ return protocol;
+}
+
+static void smux_read_done(void *rcv_dev, const void *meta_data)
+{
+ struct rmnet_private *p;
+ struct net_device *dev = rcv_dev;
+ u32 opmode;
+ unsigned long flags;
+ struct sk_buff *skb = NULL;
+ const struct smux_meta_read *read_meta_info = meta_data;
+
+ if (!dev || !read_meta_info) {
+ DBG1("%s:invalid read_done callback recieved", __func__);
+ return;
+ }
+
+ p = netdev_priv(dev);
+
+ skb = (struct sk_buff *) read_meta_info->pkt_priv;
+
+ if (!skb || skb->dev != dev) {
+ DBG1("%s: ERR:skb pointer NULL in READ_DONE CALLBACK",
+ __func__);
+ return;
+ }
+
+ /* Handle Rx frame format */
+ spin_lock_irqsave(&p->lock, flags);
+ opmode = p->operation_mode;
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ if (RMNET_IS_MODE_IP(opmode)) {
+ /* Driver in IP mode */
+ skb->protocol =
+ rmnet_ip_type_trans(skb, dev);
+ } else {
+ /* Driver in Ethernet mode */
+ skb->protocol =
+ eth_type_trans(skb, dev);
+ }
+ if (RMNET_IS_MODE_IP(opmode) ||
+ count_this_packet(skb->data, skb->len)) {
+#ifdef CONFIG_MSM_RMNET_DEBUG
+ p->wakeups_rcv +=
+ rmnet_cause_wakeup(p);
+#endif
+ p->stats.rx_packets++;
+ p->stats.rx_bytes += skb->len;
+ }
+ DBG2("[%s] Rx packet #%lu len=%d\n",
+ dev->name, p->stats.rx_packets,
+ skb->len);
+ /* Deliver to network stack */
+ netif_rx(skb);
+
+ return;
+}
+
+static void smux_write_done(void *dev, const void *meta_data)
+{
+ struct rmnet_private *p = netdev_priv(dev);
+ u32 opmode;
+ struct sk_buff *skb = NULL;
+ const struct smux_meta_write *write_meta_info = meta_data;
+ unsigned long flags;
+
+ if (!dev || !write_meta_info) {
+ DBG1("%s: ERR:invalid WRITE_DONE callback recieved", __func__);
+ return;
+ }
+
+ skb = (struct sk_buff *) write_meta_info->pkt_priv;
+
+ if (!skb) {
+ DBG1("%s: ERR:skb pointer NULL in WRITE_DONE"
+ " CALLBACK", __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&p->lock, flags);
+ opmode = p->operation_mode;
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ DBG1("%s: write complete\n", __func__);
+ if (RMNET_IS_MODE_IP(opmode) ||
+ count_this_packet(skb->data, skb->len)) {
+ p->stats.tx_packets++;
+ p->stats.tx_bytes += skb->len;
+#ifdef CONFIG_MSM_RMNET_DEBUG
+ p->wakeups_xmit += rmnet_cause_wakeup(p);
+#endif
+ }
+ DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n",
+ ((struct net_device *)(dev))->name, p->stats.tx_packets,
+ skb->len, skb->mark);
+ dev_kfree_skb_any(skb);
+ if (netif_queue_stopped(dev) &&
+ msm_smux_is_ch_low(p->ch_id)) {
+ DBG0("%s: Low WM hit, waking queue=%p\n",
+ __func__, skb);
+ netif_wake_queue(dev);
+ }
+}
+
+void rmnet_smux_notify(void *priv, int event_type, const void *metadata)
+{
+ struct rmnet_private *p;
+ struct net_device *dev;
+ unsigned long flags;
+ struct sk_buff *skb = NULL;
+ u32 opmode;
+ const struct smux_meta_disconnected *ssr_info;
+ const struct smux_meta_read *read_meta_info;
+ const struct smux_meta_write *write_meta_info = metadata;
+
+
+ if (!priv)
+ DBG0("%s: priv(cookie) NULL, ignoring notification:"
+ " %d\n", __func__, event_type);
+
+ switch (event_type) {
+ case SMUX_CONNECTED:
+ p = netdev_priv(priv);
+ dev = priv;
+
+ DBG0("[%s] SMUX_CONNECTED event dev:%s\n", __func__, dev->name);
+
+ netif_carrier_on(dev);
+ netif_start_queue(dev);
+
+ spin_lock_irqsave(&p->lock, flags);
+ p->device_state = DEVICE_ACTIVE;
+ spin_unlock_irqrestore(&p->lock, flags);
+ break;
+
+ case SMUX_DISCONNECTED:
+ p = netdev_priv(priv);
+ dev = priv;
+ ssr_info = metadata;
+
+ DBG0("[%s] SMUX_DISCONNECTED event dev:%s\n",
+ __func__, dev->name);
+
+ if (ssr_info && ssr_info->is_ssr == 1)
+ DBG0("SSR detected on :%s\n", dev->name);
+
+ netif_carrier_off(dev);
+ netif_stop_queue(dev);
+
+ spin_lock_irqsave(&p->lock, flags);
+ p->device_state = DEVICE_INACTIVE;
+ spin_unlock_irqrestore(&p->lock, flags);
+ break;
+
+ case SMUX_READ_DONE:
+ smux_read_done(priv, metadata);
+ break;
+
+ case SMUX_READ_FAIL:
+ p = netdev_priv(priv);
+ dev = priv;
+ read_meta_info = metadata;
+
+ if (!dev || !read_meta_info) {
+ DBG1("%s: ERR:invalid read failed callback"
+ " recieved", __func__);
+ return;
+ }
+
+ skb = (struct sk_buff *) read_meta_info->pkt_priv;
+
+ if (!skb) {
+ DBG1("%s: ERR:skb pointer NULL in read fail"
+ " CALLBACK", __func__);
+ return;
+ }
+
+ DBG0("%s: read failed\n", __func__);
+
+ opmode = p->operation_mode;
+
+ if (RMNET_IS_MODE_IP(opmode) ||
+ count_this_packet(skb->data, skb->len))
+ p->stats.rx_dropped++;
+
+ dev_kfree_skb_any(skb);
+ break;
+
+ case SMUX_WRITE_DONE:
+ smux_write_done(priv, metadata);
+ break;
+
+ case SMUX_WRITE_FAIL:
+ p = netdev_priv(priv);
+ dev = priv;
+ write_meta_info = metadata;
+
+ if (!dev || !write_meta_info) {
+ DBG1("%s: ERR:invalid WRITE_DONE"
+ "callback recieved", __func__);
+ return;
+ }
+
+ skb = (struct sk_buff *) write_meta_info->pkt_priv;
+
+ if (!skb) {
+ DBG1("%s: ERR:skb pointer NULL in"
+ " WRITE_DONE CALLBACK", __func__);
+ return;
+ }
+
+ DBG0("%s: write failed\n", __func__);
+
+ opmode = p->operation_mode;
+
+ if (RMNET_IS_MODE_IP(opmode) ||
+ count_this_packet(skb->data, skb->len)) {
+ p->stats.tx_dropped++;
+ }
+
+ dev_kfree_skb_any(skb);
+ break;
+
+ case SMUX_LOW_WM_HIT:
+ dev = priv;
+ DBG0("[%s] Low WM hit dev:%s\n", __func__, dev->name);
+ netif_start_queue(dev);
+ break;
+
+ case SMUX_HIGH_WM_HIT:
+ dev = priv;
+ DBG0("[%s] Low WM hit dev:%s\n", __func__, dev->name);
+ netif_stop_queue(dev);
+ break;
+
+ default:
+ dev = priv;
+ DBG0("[%s] Invalid event:%d received on"
+ " dev: %s\n", __func__, event_type, dev->name);
+ break;
+ }
+
+ return;
+}
+
+int get_rx_buffers(void *priv, void **pkt_priv, void **buffer, int size)
+{
+ struct net_device *dev = (struct net_device *) priv;
+ struct sk_buff *skb = NULL;
+ void *ptr = NULL;
+
+ DBG0("[%s] dev:%s\n", __func__, dev->name);
+ skb = __dev_alloc_skb(size, GFP_ATOMIC);
+ if (skb == NULL) {
+ DBG0("%s: unable to alloc skb\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* TODO skb_reserve(skb, NET_IP_ALIGN); for ethernet mode */
+ /* Populate some params now. */
+ skb->dev = dev;
+ ptr = skb_put(skb, size);
+
+ skb_set_network_header(skb, 0);
+
+ /* done with skb setup, return the buffer pointer. */
+ *pkt_priv = skb;
+ *buffer = ptr;
+
+ return 0;
+}
+
+static int __rmnet_open(struct net_device *dev)
+{
+ struct rmnet_private *p = netdev_priv(dev);
+
+ DBG0("[%s] __rmnet_open()\n", dev->name);
+
+ if (p->device_state == DEVICE_ACTIVE) {
+ return 0;
+ } else {
+ DBG0("[%s] Platform inactive\n", dev->name);
+ return -ENODEV;
+ }
+}
+
+static int rmnet_open(struct net_device *dev)
+{
+ int rc = 0;
+
+ DBG0("[%s] rmnet_open()\n", dev->name);
+
+ rc = __rmnet_open(dev);
+
+ if (rc == 0)
+ netif_start_queue(dev);
+
+ return rc;
+}
+
+static int rmnet_stop(struct net_device *dev)
+{
+ DBG0("[%s] rmnet_stop()\n", dev->name);
+
+ netif_stop_queue(dev);
+ return 0;
+}
+
+static int rmnet_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if (0 > new_mtu || RMNET_DATA_LEN < new_mtu)
+ return -EINVAL;
+
+ DBG0("[%s] MTU change: old=%d new=%d\n",
+ dev->name, dev->mtu, new_mtu);
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+
+static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct rmnet_private *p = netdev_priv(dev);
+ int smux_ret;
+ struct QMI_QOS_HDR_S *qmih;
+ u32 opmode;
+ unsigned long flags;
+
+ /* For QoS mode, prepend QMI header and assign flow ID from skb->mark */
+ spin_lock_irqsave(&p->lock, flags);
+ opmode = p->operation_mode;
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ if (RMNET_IS_MODE_QOS(opmode)) {
+ qmih = (struct QMI_QOS_HDR_S *)
+ skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
+ qmih->version = 1;
+ qmih->flags = 0;
+ qmih->flow_id = skb->mark;
+ }
+
+ dev->trans_start = jiffies;
+
+ /* if write() succeeds, skb access is unsafe in this process */
+ smux_ret = msm_smux_write(p->ch_id, skb, skb->data, skb->len);
+
+ if (smux_ret != 0 && smux_ret != -EAGAIN) {
+ pr_err("[%s] %s: write returned error %d",
+ dev->name, __func__, smux_ret);
+ return -EPERM;
+ }
+
+ return smux_ret;
+}
+
+static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct rmnet_private *p = netdev_priv(dev);
+ int ret = 0;
+
+ if (netif_queue_stopped(dev) || (p->device_state == DEVICE_INACTIVE)) {
+ pr_err("[%s]fatal: rmnet_xmit called when "
+ "netif_queue is stopped", dev->name);
+ return 0;
+ }
+
+ ret = _rmnet_xmit(skb, dev);
+
+ if (ret == -EPERM) {
+ /* Do not stop the queue here.
+ * It will lead to ir-recoverable state.
+ */
+ ret = NETDEV_TX_BUSY;
+ goto exit;
+ }
+
+ if (msm_smux_is_ch_full(p->ch_id) || (ret == -EAGAIN)) {
+ /*
+ * EAGAIN means we attempted to overflow the high watermark
+ * Clearly the queue is not stopped like it should be, so
+ * stop it and return BUSY to the TCP/IP framework. It will
+ * retry this packet with the queue is restarted which happens
+ * low watermark is called.
+ */
+ netif_stop_queue(dev);
+ ret = NETDEV_TX_BUSY;
+ goto exit;
+ }
+exit:
+ return ret;
+}
+
+static struct net_device_stats *rmnet_get_stats(struct net_device *dev)
+{
+ struct rmnet_private *p = netdev_priv(dev);
+ return &p->stats;
+}
+
+static void rmnet_set_multicast_list(struct net_device *dev)
+{
+}
+
+static void rmnet_tx_timeout(struct net_device *dev)
+{
+ pr_warning("[%s] rmnet_tx_timeout()\n", dev->name);
+}
+
+static const struct net_device_ops rmnet_ops_ether = {
+ .ndo_open = rmnet_open,
+ .ndo_stop = rmnet_stop,
+ .ndo_start_xmit = rmnet_xmit,
+ .ndo_get_stats = rmnet_get_stats,
+ .ndo_set_multicast_list = rmnet_set_multicast_list,
+ .ndo_tx_timeout = rmnet_tx_timeout,
+ .ndo_do_ioctl = rmnet_ioctl,
+ .ndo_change_mtu = rmnet_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static const struct net_device_ops rmnet_ops_ip = {
+ .ndo_open = rmnet_open,
+ .ndo_stop = rmnet_stop,
+ .ndo_start_xmit = rmnet_xmit,
+ .ndo_get_stats = rmnet_get_stats,
+ .ndo_set_multicast_list = rmnet_set_multicast_list,
+ .ndo_tx_timeout = rmnet_tx_timeout,
+ .ndo_do_ioctl = rmnet_ioctl,
+ .ndo_change_mtu = rmnet_change_mtu,
+ .ndo_set_mac_address = 0,
+ .ndo_validate_addr = 0,
+};
+
+static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct rmnet_private *p = netdev_priv(dev);
+ u32 old_opmode = p->operation_mode;
+ unsigned long flags;
+ int prev_mtu = dev->mtu;
+ int rc = 0;
+
+ /* Process IOCTL command */
+ switch (cmd) {
+ case RMNET_IOCTL_SET_LLP_ETHERNET: /* Set Ethernet protocol */
+ /* Perform Ethernet config only if in IP mode currently*/
+ if (p->operation_mode & RMNET_MODE_LLP_IP) {
+ ether_setup(dev);
+ random_ether_addr(dev->dev_addr);
+ dev->mtu = prev_mtu;
+
+ dev->netdev_ops = &rmnet_ops_ether;
+ spin_lock_irqsave(&p->lock, flags);
+ p->operation_mode &= ~RMNET_MODE_LLP_IP;
+ p->operation_mode |= RMNET_MODE_LLP_ETH;
+ spin_unlock_irqrestore(&p->lock, flags);
+ DBG0("[%s] rmnet_ioctl(): "
+ "set Ethernet protocol mode\n",
+ dev->name);
+ }
+ break;
+
+ case RMNET_IOCTL_SET_LLP_IP: /* Set RAWIP protocol */
+ /* Perform IP config only if in Ethernet mode currently*/
+ if (p->operation_mode & RMNET_MODE_LLP_ETH) {
+
+ /* Undo config done in ether_setup() */
+ dev->header_ops = 0; /* No header */
+ dev->type = ARPHRD_RAWIP;
+ dev->hard_header_len = 0;
+ dev->mtu = prev_mtu;
+ dev->addr_len = 0;
+ dev->flags &= ~(IFF_BROADCAST |
+ IFF_MULTICAST);
+
+ dev->needed_headroom = HEADROOM_FOR_SMUX +
+ HEADROOM_FOR_QOS;
+ dev->needed_tailroom = TAILROOM;
+ dev->netdev_ops = &rmnet_ops_ip;
+ spin_lock_irqsave(&p->lock, flags);
+ p->operation_mode &= ~RMNET_MODE_LLP_ETH;
+ p->operation_mode |= RMNET_MODE_LLP_IP;
+ spin_unlock_irqrestore(&p->lock, flags);
+ DBG0("[%s] rmnet_ioctl(): "
+ "set IP protocol mode\n",
+ dev->name);
+ }
+ break;
+
+ case RMNET_IOCTL_GET_LLP: /* Get link protocol state */
+ ifr->ifr_ifru.ifru_data =
+ (void *)(p->operation_mode &
+ (RMNET_MODE_LLP_ETH|RMNET_MODE_LLP_IP));
+ break;
+
+ case RMNET_IOCTL_SET_QOS_ENABLE: /* Set QoS header enabled */
+ spin_lock_irqsave(&p->lock, flags);
+ p->operation_mode |= RMNET_MODE_QOS;
+ spin_unlock_irqrestore(&p->lock, flags);
+ DBG0("[%s] rmnet_ioctl(): set QMI QOS header enable\n",
+ dev->name);
+ break;
+
+ case RMNET_IOCTL_SET_QOS_DISABLE: /* Set QoS header disabled */
+ spin_lock_irqsave(&p->lock, flags);
+ p->operation_mode &= ~RMNET_MODE_QOS;
+ spin_unlock_irqrestore(&p->lock, flags);
+ DBG0("[%s] rmnet_ioctl(): set QMI QOS header disable\n",
+ dev->name);
+ break;
+
+ case RMNET_IOCTL_GET_QOS: /* Get QoS header state */
+ ifr->ifr_ifru.ifru_data =
+ (void *)(p->operation_mode & RMNET_MODE_QOS);
+ break;
+
+ case RMNET_IOCTL_GET_OPMODE: /* Get operation mode */
+ ifr->ifr_ifru.ifru_data = (void *)p->operation_mode;
+ break;
+
+ case RMNET_IOCTL_OPEN: /* Open transport port */
+ rc = __rmnet_open(dev);
+ DBG0("[%s] rmnet_ioctl(): open transport port\n",
+ dev->name);
+ break;
+
+ case RMNET_IOCTL_CLOSE: /* Close transport port */
+ DBG0("[%s] rmnet_ioctl(): close transport port\n",
+ dev->name);
+ break;
+
+ default:
+ pr_err("[%s] error: rmnet_ioct called for unsupported cmd[%d]",
+ dev->name, cmd);
+ return -EINVAL;
+ }
+
+ DBG2("[%s] %s: cmd=0x%x opmode old=0x%08x new=0x%08x\n",
+ dev->name, __func__, cmd, old_opmode, p->operation_mode);
+ return rc;
+}
+
+static void __init rmnet_setup(struct net_device *dev)
+{
+ /* Using Ethernet mode by default */
+ dev->netdev_ops = &rmnet_ops_ether;
+ ether_setup(dev);
+
+ /* set this after calling ether_setup */
+ dev->mtu = RMNET_DATA_LEN;
+ dev->needed_headroom = HEADROOM_FOR_SMUX + HEADROOM_FOR_QOS ;
+ dev->needed_tailroom = TAILROOM;
+ random_ether_addr(dev->dev_addr);
+
+ dev->watchdog_timeo = 1000; /* 10 seconds? */
+}
+
+
+static int smux_rmnet_probe(struct platform_device *pdev)
+{
+ int i;
+ int r;
+ struct rmnet_private *p;
+
+ for (i = 0; i < RMNET_SMUX_DEVICE_COUNT; i++) {
+ p = netdev_priv(netdevs[i]);
+
+ if ((p != NULL) && (p->device_state == DEVICE_INACTIVE)) {
+ r = msm_smux_open(p->ch_id,
+ netdevs[i],
+ rmnet_smux_notify,
+ get_rx_buffers);
+
+ if (r < 0) {
+ DBG0("%s: ch=%d open failed with rc %d\n",
+ __func__, p->ch_id, r);
+ }
+ }
+ }
+ return 0;
+}
+
+static int smux_rmnet_remove(struct platform_device *pdev)
+{
+ int i;
+ int r;
+ struct rmnet_private *p;
+
+ for (i = 0; i < RMNET_SMUX_DEVICE_COUNT; i++) {
+ p = netdev_priv(netdevs[i]);
+
+ if ((p != NULL) && (p->device_state == DEVICE_ACTIVE)) {
+ r = msm_smux_close(p->ch_id);
+
+ if (r < 0) {
+ DBG0("%s: ch=%d close failed with rc %d\n",
+ __func__, p->ch_id, r);
+ continue;
+ }
+ netif_carrier_off(netdevs[i]);
+ netif_stop_queue(netdevs[i]);
+ }
+ }
+ return 0;
+}
+
+
+static struct platform_driver smux_rmnet_driver = {
+ .probe = smux_rmnet_probe,
+ .remove = smux_rmnet_remove,
+ .driver = {
+ .name = "SMUX_RMNET",
+ .owner = THIS_MODULE,
+ },
+};
+
+
+static int __init rmnet_init(void)
+{
+ int ret;
+ struct device *d;
+ struct net_device *dev;
+ struct rmnet_private *p;
+ unsigned n;
+
+#ifdef CONFIG_MSM_RMNET_DEBUG
+ timeout_us = 0;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ timeout_suspend_us = 0;
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+#endif /* CONFIG_MSM_RMNET_DEBUG */
+
+ for (n = 0; n < RMNET_SMUX_DEVICE_COUNT; n++) {
+ dev = alloc_netdev(sizeof(struct rmnet_private),
+ "rmnet_smux%d", rmnet_setup);
+
+ if (!dev) {
+ pr_err("%s: no memory for netdev %d\n", __func__, n);
+ return -ENOMEM;
+ }
+
+ netdevs[n] = dev;
+ d = &(dev->dev);
+ p = netdev_priv(dev);
+ /* Initial config uses Ethernet */
+ p->operation_mode = RMNET_MODE_LLP_ETH;
+ p->ch_id = n;
+ p->in_reset = 0;
+ spin_lock_init(&p->lock);
+#ifdef CONFIG_MSM_RMNET_DEBUG
+ p->timeout_us = timeout_us;
+ p->wakeups_xmit = p->wakeups_rcv = 0;
+#endif
+
+ ret = register_netdev(dev);
+ if (ret) {
+ pr_err("%s: unable to register netdev"
+ " %d rc=%d\n", __func__, n, ret);
+ free_netdev(dev);
+ return ret;
+ }
+
+#ifdef CONFIG_MSM_RMNET_DEBUG
+ if (device_create_file(d, &dev_attr_timeout))
+ continue;
+ if (device_create_file(d, &dev_attr_wakeups_xmit))
+ continue;
+ if (device_create_file(d, &dev_attr_wakeups_rcv))
+ continue;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ if (device_create_file(d, &dev_attr_timeout_suspend))
+ continue;
+
+ /* Only care about rmnet0 for suspend/resume tiemout hooks. */
+ if (n == 0)
+ rmnet0 = d;
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+#endif /* CONFIG_MSM_RMNET_DEBUG */
+
+ }
+
+ ret = platform_driver_register(&smux_rmnet_driver);
+ if (ret) {
+ pr_err("%s: registration failed n=%d rc=%d\n",
+ __func__, n, ret);
+ return ret;
+ }
+ return 0;
+}
+
+module_init(rmnet_init);
+MODULE_DESCRIPTION("MSM RMNET SMUX TRANSPORT");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/wireless/wcnss/wcnss_wlan.c b/drivers/net/wireless/wcnss/wcnss_wlan.c
index cc95fcd..ad9dc7d 100644
--- a/drivers/net/wireless/wcnss/wcnss_wlan.c
+++ b/drivers/net/wireless/wcnss/wcnss_wlan.c
@@ -45,7 +45,7 @@
int smd_channel_ready;
unsigned int serial_number;
int thermal_mitigation;
- void (*tm_notify)(int);
+ void (*tm_notify)(struct device *, int);
struct wcnss_wlan_config wlan_config;
struct delayed_work wcnss_work;
} *penv = NULL;
@@ -99,7 +99,7 @@
return -EINVAL;
penv->thermal_mitigation = value;
if (penv->tm_notify)
- (penv->tm_notify)(value);
+ (penv->tm_notify)(dev, value);
return count;
}
@@ -275,14 +275,16 @@
}
EXPORT_SYMBOL(wcnss_wlan_unregister_pm_ops);
-void wcnss_register_thermal_mitigation(void (*tm_notify)(int))
+void wcnss_register_thermal_mitigation(struct device *dev,
+ void (*tm_notify)(struct device *, int))
{
- if (penv && tm_notify)
+ if (penv && dev && tm_notify)
penv->tm_notify = tm_notify;
}
EXPORT_SYMBOL(wcnss_register_thermal_mitigation);
-void wcnss_unregister_thermal_mitigation(void (*tm_notify)(int))
+void wcnss_unregister_thermal_mitigation(
+ void (*tm_notify)(struct device *, int))
{
if (penv && tm_notify) {
if (tm_notify != penv->tm_notify)
diff --git a/drivers/platform/msm/sps/sps.c b/drivers/platform/msm/sps/sps.c
index 0fe8f2a..6d61bb6 100644
--- a/drivers/platform/msm/sps/sps.c
+++ b/drivers/platform/msm/sps/sps.c
@@ -662,6 +662,11 @@
{
struct sps_bam *bam;
+ if (handle == NULL) {
+ SPS_ERR("sps:%s:handle is NULL.\n", __func__);
+ return SPS_ERROR;
+ }
+
list_for_each_entry(bam, &sps->bams_q, list) {
if (bam->props.phys_addr == phys_addr) {
*handle = (u32) bam;
@@ -816,6 +821,14 @@
struct sps_bam *bam;
int result;
+ if (h == NULL) {
+ SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
+ return SPS_ERROR;
+ } else if (connect == NULL) {
+ SPS_ERR("sps:%s:connection is NULL.\n", __func__);
+ return SPS_ERROR;
+ }
+
if (sps == NULL)
return -ENODEV;
@@ -959,6 +972,14 @@
SPS_DBG2("sps:%s.", __func__);
+ if (h == NULL) {
+ SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
+ return SPS_ERROR;
+ } else if (reg == NULL) {
+ SPS_ERR("sps:%s:registered event is NULL.\n", __func__);
+ return SPS_ERROR;
+ }
+
if (sps == NULL)
return -ENODEV;
@@ -993,6 +1014,11 @@
SPS_DBG2("sps:%s.", __func__);
+ if (h == NULL) {
+ SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
+ return SPS_ERROR;
+ }
+
bam = sps_bam_lock(pipe);
if (bam == NULL)
return SPS_ERROR;
@@ -1017,6 +1043,11 @@
SPS_DBG2("sps:%s.", __func__);
+ if (h == NULL) {
+ SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
+ return SPS_ERROR;
+ }
+
bam = sps_bam_lock(pipe);
if (bam == NULL)
return SPS_ERROR;
@@ -1041,6 +1072,14 @@
SPS_DBG("sps:%s.", __func__);
+ if (h == NULL) {
+ SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
+ return SPS_ERROR;
+ } else if (transfer == NULL) {
+ SPS_ERR("sps:%s:transfer is NULL.\n", __func__);
+ return SPS_ERROR;
+ }
+
bam = sps_bam_lock(pipe);
if (bam == NULL)
return SPS_ERROR;
@@ -1066,6 +1105,11 @@
SPS_DBG("sps:%s.", __func__);
+ if (h == NULL) {
+ SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
+ return SPS_ERROR;
+ }
+
if ((flags & SPS_IOVEC_FLAG_NWD) &&
!(flags & (SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_CMD))) {
SPS_ERR("sps:NWD is only valid with EOT or CMD.\n");
@@ -1119,6 +1163,14 @@
SPS_DBG("sps:%s.", __func__);
+ if (h == NULL) {
+ SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
+ return SPS_ERROR;
+ } else if (notify == NULL) {
+ SPS_ERR("sps:%s:event_notify is NULL.\n", __func__);
+ return SPS_ERROR;
+ }
+
bam = sps_bam_lock(pipe);
if (bam == NULL)
return SPS_ERROR;
@@ -1142,6 +1194,14 @@
SPS_DBG("sps:%s.", __func__);
+ if (h == NULL) {
+ SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
+ return SPS_ERROR;
+ } else if (empty == NULL) {
+ SPS_ERR("sps:%s:result pointer is NULL.\n", __func__);
+ return SPS_ERROR;
+ }
+
bam = sps_bam_lock(pipe);
if (bam == NULL)
return SPS_ERROR;
@@ -1165,6 +1225,14 @@
SPS_DBG("sps:%s.", __func__);
+ if (h == NULL) {
+ SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
+ return SPS_ERROR;
+ } else if (count == NULL) {
+ SPS_ERR("sps:%s:result pointer is NULL.\n", __func__);
+ return SPS_ERROR;
+ }
+
bam = sps_bam_lock(pipe);
if (bam == NULL)
return SPS_ERROR;
@@ -1187,6 +1255,11 @@
SPS_DBG2("sps:%s: dev = 0x%x", __func__, dev);
+ if (dev == 0) {
+ SPS_ERR("sps:%s:device handle should not be 0.\n", __func__);
+ return SPS_ERROR;
+ }
+
mutex_lock(&sps->lock);
/* Search for the target BAM device */
bam = sps_h2bam(dev);
@@ -1219,8 +1292,11 @@
{
struct sps_pipe *pipe = h;
- if (config == NULL) {
- SPS_ERR("sps:Config pointer is NULL");
+ if (h == NULL) {
+ SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
+ return SPS_ERROR;
+ } else if (config == NULL) {
+ SPS_ERR("sps:%s:config pointer is NULL.\n", __func__);
return SPS_ERROR;
}
@@ -1243,6 +1319,14 @@
SPS_DBG("sps:%s.", __func__);
+ if (h == NULL) {
+ SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
+ return SPS_ERROR;
+ } else if (config == NULL) {
+ SPS_ERR("sps:%s:config pointer is NULL.\n", __func__);
+ return SPS_ERROR;
+ }
+
bam = sps_bam_lock(pipe);
if (bam == NULL)
return SPS_ERROR;
@@ -1268,6 +1352,14 @@
struct sps_bam *bam;
int result;
+ if (h == NULL) {
+ SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
+ return SPS_ERROR;
+ } else if (connect == NULL) {
+ SPS_ERR("sps:%s:connection is NULL.\n", __func__);
+ return SPS_ERROR;
+ }
+
if (owner != SPS_OWNER_REMOTE) {
SPS_ERR("sps:Unsupported ownership state: %d", owner);
return SPS_ERROR;
@@ -1309,6 +1401,11 @@
int sps_alloc_mem(struct sps_pipe *h, enum sps_mem mem,
struct sps_mem_buffer *mem_buffer)
{
+ if (h == NULL) {
+ SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
+ return SPS_ERROR;
+ }
+
if (sps == NULL)
return -ENODEV;
@@ -1340,6 +1437,11 @@
*/
int sps_free_mem(struct sps_pipe *h, struct sps_mem_buffer *mem_buffer)
{
+ if (h == NULL) {
+ SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
+ return SPS_ERROR;
+ }
+
if (mem_buffer == NULL || mem_buffer->phys_base == SPS_ADDR_INVALID) {
SPS_ERR("sps:invalid memory to free");
return SPS_ERROR;
@@ -1364,6 +1466,14 @@
SPS_DBG("sps:%s.", __func__);
+ if (h == NULL) {
+ SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
+ return SPS_ERROR;
+ } else if (desc_num == NULL) {
+ SPS_ERR("sps:%s:result pointer is NULL.\n", __func__);
+ return SPS_ERROR;
+ }
+
bam = sps_bam_lock(pipe);
if (bam == NULL)
return SPS_ERROR;
@@ -1390,6 +1500,14 @@
int ok;
int result;
+ if (bam_props == NULL) {
+ SPS_ERR("sps:%s:bam_props is NULL.\n", __func__);
+ return SPS_ERROR;
+ } else if (dev_handle == NULL) {
+ SPS_ERR("sps:%s:device handle is NULL.\n", __func__);
+ return SPS_ERROR;
+ }
+
if (sps == NULL)
return SPS_ERROR;
@@ -1399,9 +1517,6 @@
return -EAGAIN;
}
- if (bam_props == NULL || dev_handle == NULL)
- return SPS_ERROR;
-
/* Check BAM parameters */
manage = bam_props->manage & SPS_BAM_MGR_ACCESS_MASK;
if (manage != SPS_BAM_MGR_NONE) {
@@ -1532,6 +1647,11 @@
{
struct sps_bam *bam;
+ if (dev_handle == 0) {
+ SPS_ERR("sps:%s:device handle should not be 0.\n", __func__);
+ return SPS_ERROR;
+ }
+
bam = sps_h2bam(dev_handle);
if (bam == NULL) {
SPS_ERR("sps:did not find a BAM for this handle");
@@ -1578,13 +1698,16 @@
struct sps_bam *bam;
int result;
- if (h == NULL || iovec == NULL) {
- SPS_ERR("sps:invalid pipe or iovec");
+ SPS_DBG("sps:%s.", __func__);
+
+ if (h == NULL) {
+ SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
+ return SPS_ERROR;
+ } else if (iovec == NULL) {
+ SPS_ERR("sps:%s:iovec pointer is NULL.\n", __func__);
return SPS_ERROR;
}
- SPS_DBG("sps:%s.", __func__);
-
bam = sps_bam_lock(pipe);
if (bam == NULL)
return SPS_ERROR;
@@ -1611,8 +1734,14 @@
SPS_DBG("sps:%s.", __func__);
- if (h == NULL || timer_ctrl == NULL) {
- SPS_ERR("sps:invalid pipe or timer ctrl");
+ if (h == NULL) {
+ SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
+ return SPS_ERROR;
+ } else if (timer_ctrl == NULL) {
+ SPS_ERR("sps:%s:timer_ctrl pointer is NULL.\n", __func__);
+ return SPS_ERROR;
+ } else if (timer_result == NULL) {
+ SPS_ERR("sps:%s:result pointer is NULL.\n", __func__);
return SPS_ERROR;
}
@@ -1657,6 +1786,11 @@
{
int res;
+ if (ctx == NULL) {
+ SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
+ return SPS_ERROR;
+ }
+
res = sps_client_de_init(ctx);
if (res == 0)
diff --git a/drivers/platform/msm/sps/sps_bam.c b/drivers/platform/msm/sps/sps_bam.c
index 0abd739..e0289ad 100644
--- a/drivers/platform/msm/sps/sps_bam.c
+++ b/drivers/platform/msm/sps/sps_bam.c
@@ -17,6 +17,7 @@
#include <linux/slab.h> /* kzalloc() */
#include <linux/interrupt.h> /* request_irq() */
#include <linux/memory.h> /* memset */
+#include <linux/vmalloc.h>
#include "sps_bam.h"
#include "bam.h"
@@ -914,7 +915,11 @@
dev->pipe_remote_mask &= ~(1UL << pipe_index);
bam_pipe_exit(dev->base, pipe_index, dev->props.ee);
if (pipe->sys.desc_cache != NULL) {
- kfree(pipe->sys.desc_cache);
+ u32 size = pipe->num_descs * sizeof(void *);
+ if (pipe->desc_size + size <= PAGE_SIZE)
+ kfree(pipe->sys.desc_cache);
+ else
+ vfree(pipe->sys.desc_cache);
pipe->sys.desc_cache = NULL;
}
dev->pipes[pipe_index] = BAM_PIPE_UNASSIGNED;
@@ -1034,8 +1039,16 @@
&& (pipe->state & BAM_STATE_BAM2BAM) == 0) {
/* Allocate both descriptor cache and user pointer array */
size = pipe->num_descs * sizeof(void *);
- pipe->sys.desc_cache =
- kzalloc(pipe->desc_size + size, GFP_KERNEL);
+
+ if (pipe->desc_size + size <= PAGE_SIZE)
+ pipe->sys.desc_cache =
+ kzalloc(pipe->desc_size + size, GFP_KERNEL);
+ else {
+ pipe->sys.desc_cache =
+ vmalloc(pipe->desc_size + size);
+ memset(pipe->sys.desc_cache, 0, pipe->desc_size + size);
+ }
+
if (pipe->sys.desc_cache == NULL) {
/*** MUST BE LAST POINT OF FAILURE (see below) *****/
SPS_ERR("sps:Desc cache error: BAM 0x%x pipe %d: %d",
diff --git a/drivers/power/pm8921-charger.c b/drivers/power/pm8921-charger.c
index de5e741..b1a16bb 100644
--- a/drivers/power/pm8921-charger.c
+++ b/drivers/power/pm8921-charger.c
@@ -1555,6 +1555,26 @@
pr_debug("Enter charge=%d\n", mA);
+ if (!the_chip) {
+ pr_err("chip not yet initalized\n");
+ return;
+ }
+
+ /*
+ * Reject VBUS requests if USB connection is the only available
+ * power source. This makes sure that if booting without
+ * battery the iusb_max value is not decreased avoiding potential
+ * brown_outs.
+ *
+ * This would also apply when the battery has been
+ * removed from the running system.
+ */
+ if (!get_prop_batt_present(the_chip)
+ && !is_dc_chg_plugged_in(the_chip)) {
+ pr_err("rejected: no other power source connected\n");
+ return;
+ }
+
if (usb_max_current && mA > usb_max_current) {
pr_warn("restricting usb current to %d instead of %d\n",
usb_max_current, mA);
@@ -1863,6 +1883,8 @@
chip->ext_charging = false;
chip->ext_charge_done = false;
bms_notify_check(chip);
+ /* Update battery charging LEDs and user space battery info */
+ power_supply_changed(&chip->batt_psy);
}
static void handle_start_ext_chg(struct pm8921_chg_chip *chip)
@@ -1917,6 +1939,8 @@
/* Start BMS */
schedule_delayed_work(&chip->eoc_work, delay);
wake_lock(&chip->eoc_wake_lock);
+ /* Update battery charging LEDs and user space battery info */
+ power_supply_changed(&chip->batt_psy);
}
static void turn_off_usb_ovp_fet(struct pm8921_chg_chip *chip)
@@ -3359,13 +3383,6 @@
return rc;
}
- /* init with the lowest USB current */
- rc = pm_chg_iusbmax_set(chip, 0);
- if (rc) {
- pr_err("Failed to set usb max to %d rc=%d\n", 0, rc);
- return rc;
- }
-
if (chip->safety_time != 0) {
rc = pm_chg_tchg_max_set(chip, chip->safety_time);
if (rc) {
diff --git a/drivers/spi/spi_qsd.c b/drivers/spi/spi_qsd.c
index 2fc95af..152bbb4 100644
--- a/drivers/spi/spi_qsd.c
+++ b/drivers/spi/spi_qsd.c
@@ -392,6 +392,7 @@
u32 num_transfers;
atomic_set(&dd->rx_irq_called, 0);
+ atomic_set(&dd->tx_irq_called, 0);
if (dd->write_len && !dd->read_len) {
/* WR-WR transfer */
bytes_sent = dd->cur_msg_len - dd->tx_bytes_remaining;
@@ -712,6 +713,8 @@
readl_relaxed(dd->base + SPI_OPERATIONAL) &
SPI_OP_MAX_OUTPUT_DONE_FLAG) {
msm_spi_ack_transfer(dd);
+ if (atomic_inc_return(&dd->tx_irq_called) == 1)
+ return IRQ_HANDLED;
msm_spi_complete(dd);
return IRQ_HANDLED;
}
@@ -1586,9 +1589,12 @@
}
/* restore original context */
dd = container_of(cmd, struct msm_spi, tx_hdr);
- if (result & DMOV_RSLT_DONE)
+ if (result & DMOV_RSLT_DONE) {
dd->stat_dmov_tx++;
- else {
+ if ((atomic_inc_return(&dd->tx_irq_called) == 1))
+ return;
+ complete(&dd->transfer_complete);
+ } else {
/* Error or flush */
if (result & DMOV_RSLT_ERROR) {
dev_err(dd->dev, "DMA error (0x%08x)\n", result);
diff --git a/drivers/spi/spi_qsd.h b/drivers/spi/spi_qsd.h
index a434bbb..223fce6 100644
--- a/drivers/spi/spi_qsd.h
+++ b/drivers/spi/spi_qsd.h
@@ -285,6 +285,7 @@
int output_block_size;
int burst_size;
atomic_t rx_irq_called;
+ atomic_t tx_irq_called;
/* Used to pad messages unaligned to block size */
u8 *tx_padding;
dma_addr_t tx_padding_dma;
diff --git a/drivers/thermal/msm8960_tsens.c b/drivers/thermal/msm8960_tsens.c
index fbb377e..78a1292 100644
--- a/drivers/thermal/msm8960_tsens.c
+++ b/drivers/thermal/msm8960_tsens.c
@@ -723,7 +723,8 @@
} else if (tmdev->hw_type == APQ_8064) {
reg_cntl |= TSENS_8960_SLP_CLK_ENA |
(TSENS_MEASURE_PERIOD << 18) |
- TSENS_8064_SENSORS_EN;
+ (((1 << tmdev->tsens_num_sensor) - 1)
+ << TSENS_SENSOR0_SHIFT);
writel_relaxed(reg_cntl, TSENS_CNTL_ADDR);
reg_status_cntl = readl_relaxed(TSENS_8064_STATUS_CNTL);
reg_status_cntl |= TSENS_MIN_STATUS_MASK |
@@ -823,7 +824,8 @@
} else if (tmdev->hw_type == APQ_8064) {
reg_cntl |= TSENS_8960_SLP_CLK_ENA |
(TSENS_MEASURE_PERIOD << 18) |
- TSENS_8064_SENSORS_EN;
+ (((1 << tmdev->tsens_num_sensor) - 1)
+ << TSENS_SENSOR0_SHIFT);
writel_relaxed(reg_cntl, TSENS_CNTL_ADDR);
reg_status_cntl = readl_relaxed(TSENS_8064_STATUS_CNTL);
reg_status_cntl |= TSENS_LOWER_STATUS_CLR |
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index bd7cc05..ae5a62c 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -319,6 +319,25 @@
This line discipline provides support for the GSM MUX protocol and
presents the mux as a set of 61 individual tty devices.
+config N_SMUX
+ tristate "SMUX line discipline support"
+ depends on NET && SERIAL_MSM_HS
+ help
+ This line discipline provides support for the Serial MUX protocol
+ and provides a TTY and kernel API for multiple logical channels.
+
+config N_SMUX_LOOPBACK
+ tristate "SMUX line discipline loopback support"
+ depends on N_SMUX
+ help
+ Provides loopback and unit testing support for the Serial MUX Protocol.
+
+config SMUX_CTL
+ tristate "SMUX control driver"
+ depends on N_SMUX
+ help
+ Support for SMUX control driver on top of serial MUX.
+
config TRACE_ROUTER
tristate "Trace data router for MIPI P1149.7 cJTAG standard"
depends on TRACE_SINK
diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile
index ea89b0b..3078e8d 100644
--- a/drivers/tty/Makefile
+++ b/drivers/tty/Makefile
@@ -6,6 +6,9 @@
obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o
obj-$(CONFIG_N_HDLC) += n_hdlc.o
obj-$(CONFIG_N_GSM) += n_gsm.o
+obj-$(CONFIG_N_SMUX) += n_smux.o
+obj-$(CONFIG_N_SMUX_LOOPBACK) += smux_test.o smux_loopback.o
+obj-$(CONFIG_SMUX_CTL) += smux_ctl.o
obj-$(CONFIG_TRACE_ROUTER) += n_tracerouter.o
obj-$(CONFIG_TRACE_SINK) += n_tracesink.o
obj-$(CONFIG_R3964) += n_r3964.o
diff --git a/drivers/tty/n_smux.c b/drivers/tty/n_smux.c
new file mode 100644
index 0000000..7ba54fe
--- /dev/null
+++ b/drivers/tty/n_smux.c
@@ -0,0 +1,2938 @@
+/* drivers/tty/n_smux.c
+ *
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/tty_driver.h>
+#include <linux/smux.h>
+#include <linux/list.h>
+#include <linux/kfifo.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <mach/msm_serial_hs.h>
+#include "smux_private.h"
+#include "smux_loopback.h"
+
+#define SMUX_NOTIFY_FIFO_SIZE 128
+#define SMUX_TX_QUEUE_SIZE 256
+#define SMUX_GET_RX_BUFF_MAX_RETRY_CNT 2
+#define SMUX_WM_LOW 2
+#define SMUX_WM_HIGH 4
+#define SMUX_PKT_LOG_SIZE 80
+
+/* Maximum size we can accept in a single RX buffer */
+#define TTY_RECEIVE_ROOM 65536
+#define TTY_BUFFER_FULL_WAIT_MS 50
+
+/* maximum sleep time between wakeup attempts */
+#define SMUX_WAKEUP_DELAY_MAX (1 << 20)
+
+/* minimum delay for scheduling delayed work */
+#define SMUX_WAKEUP_DELAY_MIN (1 << 15)
+
+/* inactivity timeout for no rx/tx activity */
+#define SMUX_INACTIVITY_TIMEOUT_MS 1000
+
+enum {
+ MSM_SMUX_DEBUG = 1U << 0,
+ MSM_SMUX_INFO = 1U << 1,
+ MSM_SMUX_POWER_INFO = 1U << 2,
+ MSM_SMUX_PKT = 1U << 3,
+};
+
+static int smux_debug_mask;
+module_param_named(debug_mask, smux_debug_mask,
+ int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+/* Simulated wakeup used for testing */
+int smux_byte_loopback;
+module_param_named(byte_loopback, smux_byte_loopback,
+ int, S_IRUGO | S_IWUSR | S_IWGRP);
+int smux_simulate_wakeup_delay = 1;
+module_param_named(simulate_wakeup_delay, smux_simulate_wakeup_delay,
+ int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define SMUX_DBG(x...) do { \
+ if (smux_debug_mask & MSM_SMUX_DEBUG) \
+ pr_info(x); \
+} while (0)
+
+#define SMUX_LOG_PKT_RX(pkt) do { \
+ if (smux_debug_mask & MSM_SMUX_PKT) \
+ smux_log_pkt(pkt, 1); \
+} while (0)
+
+#define SMUX_LOG_PKT_TX(pkt) do { \
+ if (smux_debug_mask & MSM_SMUX_PKT) \
+ smux_log_pkt(pkt, 0); \
+} while (0)
+
+/**
+ * Return true if channel is fully opened (both
+ * local and remote sides are in the OPENED state).
+ */
+#define IS_FULLY_OPENED(ch) \
+ (ch && (ch)->local_state == SMUX_LCH_LOCAL_OPENED \
+ && (ch)->remote_state == SMUX_LCH_REMOTE_OPENED)
+
+static struct platform_device smux_devs[] = {
+ {.name = "SMUX_CTL", .id = -1},
+ {.name = "SMUX_RMNET", .id = -1},
+ {.name = "SMUX_DUN_DATA_HSUART", .id = 0},
+ {.name = "SMUX_RMNET_DATA_HSUART", .id = 1},
+ {.name = "SMUX_RMNET_CTL_HSUART", .id = 0},
+ {.name = "SMUX_DIAG", .id = -1},
+};
+
+enum {
+ SMUX_CMD_STATUS_RTC = 1 << 0,
+ SMUX_CMD_STATUS_RTR = 1 << 1,
+ SMUX_CMD_STATUS_RI = 1 << 2,
+ SMUX_CMD_STATUS_DCD = 1 << 3,
+ SMUX_CMD_STATUS_FLOW_CNTL = 1 << 4,
+};
+
+/* Channel mode */
+enum {
+ SMUX_LCH_MODE_NORMAL,
+ SMUX_LCH_MODE_LOCAL_LOOPBACK,
+ SMUX_LCH_MODE_REMOTE_LOOPBACK,
+};
+
+enum {
+ SMUX_RX_IDLE,
+ SMUX_RX_MAGIC,
+ SMUX_RX_HDR,
+ SMUX_RX_PAYLOAD,
+ SMUX_RX_FAILURE,
+};
+
+/**
+ * Power states.
+ *
+ * The _FLUSH states are internal transitional states and are not part of the
+ * official state machine.
+ */
+enum {
+ SMUX_PWR_OFF,
+ SMUX_PWR_TURNING_ON,
+ SMUX_PWR_ON,
+ SMUX_PWR_TURNING_OFF_FLUSH,
+ SMUX_PWR_TURNING_OFF,
+ SMUX_PWR_OFF_FLUSH,
+};
+
+/**
+ * Logical Channel Structure. One instance per channel.
+ *
+ * Locking Hierarchy
+ * Each lock has a postfix that describes the locking level. If multiple locks
+ * are required, only increasing lock hierarchy numbers may be locked which
+ * ensures avoiding a deadlock.
+ *
+ * Locking Example
+ * If state_lock_lhb1 is currently held and the TX list needs to be
+ * manipulated, then tx_lock_lhb2 may be locked since it's locking hierarchy
+ * is greater. However, if tx_lock_lhb2 is held, then state_lock_lhb1 may
+ * not be acquired since it would result in a deadlock.
+ *
+ * Note that the Line Discipline locks (*_lha) should always be acquired
+ * before the logical channel locks.
+ */
+struct smux_lch_t {
+ /* channel state */
+ spinlock_t state_lock_lhb1;
+ uint8_t lcid;
+ unsigned local_state;
+ unsigned local_mode;
+ uint8_t local_tiocm;
+
+ unsigned remote_state;
+ unsigned remote_mode;
+ uint8_t remote_tiocm;
+
+ int tx_flow_control;
+
+ /* client callbacks and private data */
+ void *priv;
+ void (*notify)(void *priv, int event_type, const void *metadata);
+ int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
+ int size);
+
+ /* TX Info */
+ spinlock_t tx_lock_lhb2;
+ struct list_head tx_queue;
+ struct list_head tx_ready_list;
+ unsigned tx_pending_data_cnt;
+ unsigned notify_lwm;
+};
+
+union notifier_metadata {
+ struct smux_meta_disconnected disconnected;
+ struct smux_meta_read read;
+ struct smux_meta_write write;
+ struct smux_meta_tiocm tiocm;
+};
+
+struct smux_notify_handle {
+ void (*notify)(void *priv, int event_type, const void *metadata);
+ void *priv;
+ int event_type;
+ union notifier_metadata *metadata;
+};
+
+/**
+ * Line discipline and module structure.
+ *
+ * Only one instance since multiple instances of line discipline are not
+ * allowed.
+ */
+struct smux_ldisc_t {
+ spinlock_t lock_lha0;
+
+ int is_initialized;
+ int in_reset;
+ int ld_open_count;
+ struct tty_struct *tty;
+
+ /* RX State Machine */
+ spinlock_t rx_lock_lha1;
+ unsigned char recv_buf[SMUX_MAX_PKT_SIZE];
+ unsigned int recv_len;
+ unsigned int pkt_remain;
+ unsigned rx_state;
+ unsigned rx_activity_flag;
+
+ /* TX / Power */
+ spinlock_t tx_lock_lha2;
+ struct list_head lch_tx_ready_list;
+ unsigned power_state;
+ unsigned pwr_wakeup_delay_us;
+ unsigned tx_activity_flag;
+ unsigned powerdown_enabled;
+};
+
+
+/* data structures */
+static struct smux_lch_t smux_lch[SMUX_NUM_LOGICAL_CHANNELS];
+static struct smux_ldisc_t smux;
+static const char *tty_error_type[] = {
+ [TTY_NORMAL] = "normal",
+ [TTY_OVERRUN] = "overrun",
+ [TTY_BREAK] = "break",
+ [TTY_PARITY] = "parity",
+ [TTY_FRAME] = "framing",
+};
+
+static const char *smux_cmds[] = {
+ [SMUX_CMD_DATA] = "DATA",
+ [SMUX_CMD_OPEN_LCH] = "OPEN",
+ [SMUX_CMD_CLOSE_LCH] = "CLOSE",
+ [SMUX_CMD_STATUS] = "STATUS",
+ [SMUX_CMD_PWR_CTL] = "PWR",
+ [SMUX_CMD_BYTE] = "Raw Byte",
+};
+
+static void smux_notify_local_fn(struct work_struct *work);
+static DECLARE_WORK(smux_notify_local, smux_notify_local_fn);
+
+static struct workqueue_struct *smux_notify_wq;
+static size_t handle_size;
+static struct kfifo smux_notify_fifo;
+static int queued_fifo_notifications;
+static DEFINE_SPINLOCK(notify_lock_lhc1);
+
+static struct workqueue_struct *smux_tx_wq;
+static void smux_tx_worker(struct work_struct *work);
+static DECLARE_WORK(smux_tx_work, smux_tx_worker);
+
+static void smux_wakeup_worker(struct work_struct *work);
+static DECLARE_WORK(smux_wakeup_work, smux_wakeup_worker);
+static DECLARE_DELAYED_WORK(smux_wakeup_delayed_work, smux_wakeup_worker);
+
+static void smux_inactivity_worker(struct work_struct *work);
+static DECLARE_WORK(smux_inactivity_work, smux_inactivity_worker);
+static DECLARE_DELAYED_WORK(smux_delayed_inactivity_work,
+ smux_inactivity_worker);
+
+static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch);
+static void list_channel(struct smux_lch_t *ch);
+static int smux_send_status_cmd(struct smux_lch_t *ch);
+static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt);
+
+/**
+ * Convert TTY Error Flags to string for logging purposes.
+ *
+ * @flag TTY_* flag
+ * @returns String description or NULL if unknown
+ */
+static const char *tty_flag_to_str(unsigned flag)
+{
+ if (flag < ARRAY_SIZE(tty_error_type))
+ return tty_error_type[flag];
+ return NULL;
+}
+
+/**
+ * Convert SMUX Command to string for logging purposes.
+ *
+ * @cmd SMUX command
+ * @returns String description or NULL if unknown
+ */
+static const char *cmd_to_str(unsigned cmd)
+{
+ if (cmd < ARRAY_SIZE(smux_cmds))
+ return smux_cmds[cmd];
+ return NULL;
+}
+
+/**
+ * Set the reset state due to an unrecoverable failure.
+ */
+static void smux_enter_reset(void)
+{
+ pr_err("%s: unrecoverable failure, waiting for ssr\n", __func__);
+ smux.in_reset = 1;
+}
+
+static int lch_init(void)
+{
+ unsigned int id;
+ struct smux_lch_t *ch;
+ int i = 0;
+
+ handle_size = sizeof(struct smux_notify_handle *);
+
+ smux_notify_wq = create_singlethread_workqueue("smux_notify_wq");
+ smux_tx_wq = create_singlethread_workqueue("smux_tx_wq");
+
+ if (IS_ERR(smux_notify_wq) || IS_ERR(smux_tx_wq)) {
+ SMUX_DBG("%s: create_singlethread_workqueue ENOMEM\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ i |= kfifo_alloc(&smux_notify_fifo,
+ SMUX_NOTIFY_FIFO_SIZE * handle_size,
+ GFP_KERNEL);
+ i |= smux_loopback_init();
+
+ if (i) {
+ pr_err("%s: out of memory error\n", __func__);
+ return -ENOMEM;
+ }
+
+ for (id = 0 ; id < SMUX_NUM_LOGICAL_CHANNELS; id++) {
+ ch = &smux_lch[id];
+
+ spin_lock_init(&ch->state_lock_lhb1);
+ ch->lcid = id;
+ ch->local_state = SMUX_LCH_LOCAL_CLOSED;
+ ch->local_mode = SMUX_LCH_MODE_NORMAL;
+ ch->local_tiocm = 0x0;
+ ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
+ ch->remote_mode = SMUX_LCH_MODE_NORMAL;
+ ch->remote_tiocm = 0x0;
+ ch->tx_flow_control = 0;
+ ch->priv = 0;
+ ch->notify = 0;
+ ch->get_rx_buffer = 0;
+
+ spin_lock_init(&ch->tx_lock_lhb2);
+ INIT_LIST_HEAD(&ch->tx_queue);
+ INIT_LIST_HEAD(&ch->tx_ready_list);
+ ch->tx_pending_data_cnt = 0;
+ ch->notify_lwm = 0;
+ }
+
+ return 0;
+}
+
+int smux_assert_lch_id(uint32_t lcid)
+{
+ if (lcid >= SMUX_NUM_LOGICAL_CHANNELS)
+ return -ENXIO;
+ else
+ return 0;
+}
+
+/**
+ * Log packet information for debug purposes.
+ *
+ * @pkt Packet to log
+ * @is_recv 1 = RX packet; 0 = TX Packet
+ *
+ * [DIR][LCID] [LOCAL_STATE][LOCAL_MODE]:[REMOTE_STATE][REMOTE_MODE] PKT Info
+ *
+ * PKT Info:
+ * [CMD] flags [flags] len [PAYLOAD_LEN]:[PAD_LEN] [Payload hex bytes]
+ *
+ * Direction: R = Receive, S = Send
+ * Local State: C = Closed; c = closing; o = opening; O = Opened
+ * Local Mode: L = Local loopback; R = Remote loopback; N = Normal
+ * Remote State: C = Closed; O = Opened
+ * Remote Mode: R = Remote loopback; N = Normal
+ */
+static void smux_log_pkt(struct smux_pkt_t *pkt, int is_recv)
+{
+ char logbuf[SMUX_PKT_LOG_SIZE];
+ char cmd_extra[16];
+ int i = 0;
+ int count;
+ int len;
+ char local_state;
+ char local_mode;
+ char remote_state;
+ char remote_mode;
+ struct smux_lch_t *ch;
+ unsigned char *data;
+
+ ch = &smux_lch[pkt->hdr.lcid];
+
+ switch (ch->local_state) {
+ case SMUX_LCH_LOCAL_CLOSED:
+ local_state = 'C';
+ break;
+ case SMUX_LCH_LOCAL_OPENING:
+ local_state = 'o';
+ break;
+ case SMUX_LCH_LOCAL_OPENED:
+ local_state = 'O';
+ break;
+ case SMUX_LCH_LOCAL_CLOSING:
+ local_state = 'c';
+ break;
+ default:
+ local_state = 'U';
+ break;
+ }
+
+ switch (ch->local_mode) {
+ case SMUX_LCH_MODE_LOCAL_LOOPBACK:
+ local_mode = 'L';
+ break;
+ case SMUX_LCH_MODE_REMOTE_LOOPBACK:
+ local_mode = 'R';
+ break;
+ case SMUX_LCH_MODE_NORMAL:
+ local_mode = 'N';
+ break;
+ default:
+ local_mode = 'U';
+ break;
+ }
+
+ switch (ch->remote_state) {
+ case SMUX_LCH_REMOTE_CLOSED:
+ remote_state = 'C';
+ break;
+ case SMUX_LCH_REMOTE_OPENED:
+ remote_state = 'O';
+ break;
+
+ default:
+ remote_state = 'U';
+ break;
+ }
+
+ switch (ch->remote_mode) {
+ case SMUX_LCH_MODE_REMOTE_LOOPBACK:
+ remote_mode = 'R';
+ break;
+ case SMUX_LCH_MODE_NORMAL:
+ remote_mode = 'N';
+ break;
+ default:
+ remote_mode = 'U';
+ break;
+ }
+
+ /* determine command type (ACK, etc) */
+ cmd_extra[0] = '\0';
+ switch (pkt->hdr.cmd) {
+ case SMUX_CMD_OPEN_LCH:
+ if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
+ snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
+ break;
+ case SMUX_CMD_CLOSE_LCH:
+ if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
+ snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
+ break;
+ };
+
+ i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
+ "smux: %c%d %c%c:%c%c %s%s flags %x len %d:%d ",
+ is_recv ? 'R' : 'S', pkt->hdr.lcid,
+ local_state, local_mode,
+ remote_state, remote_mode,
+ cmd_to_str(pkt->hdr.cmd), cmd_extra, pkt->hdr.flags,
+ pkt->hdr.payload_len, pkt->hdr.pad_len);
+
+ len = (pkt->hdr.payload_len > 16) ? 16 : pkt->hdr.payload_len;
+ data = (unsigned char *)pkt->payload;
+ for (count = 0; count < len; count++)
+ i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
+ "%02x ", (unsigned)data[count]);
+
+ pr_info("%s\n", logbuf);
+}
+
+static void smux_notify_local_fn(struct work_struct *work)
+{
+ struct smux_notify_handle *notify_handle = NULL;
+ union notifier_metadata *metadata = NULL;
+ unsigned long flags;
+ int i;
+
+ for (;;) {
+ /* retrieve notification */
+ spin_lock_irqsave(¬ify_lock_lhc1, flags);
+ if (kfifo_len(&smux_notify_fifo) >= handle_size) {
+ i = kfifo_out(&smux_notify_fifo,
+ ¬ify_handle,
+ handle_size);
+ if (i != handle_size) {
+ pr_err("%s: unable to retrieve handle %d expected %d\n",
+ __func__, i, handle_size);
+ spin_unlock_irqrestore(¬ify_lock_lhc1, flags);
+ break;
+ }
+ } else {
+ spin_unlock_irqrestore(¬ify_lock_lhc1, flags);
+ break;
+ }
+ --queued_fifo_notifications;
+ spin_unlock_irqrestore(¬ify_lock_lhc1, flags);
+
+ /* notify client */
+ metadata = notify_handle->metadata;
+ notify_handle->notify(notify_handle->priv,
+ notify_handle->event_type,
+ metadata);
+
+ kfree(metadata);
+ kfree(notify_handle);
+ }
+}
+
+/**
+ * Initialize existing packet.
+ */
+void smux_init_pkt(struct smux_pkt_t *pkt)
+{
+ memset(pkt, 0x0, sizeof(*pkt));
+ pkt->hdr.magic = SMUX_MAGIC;
+ INIT_LIST_HEAD(&pkt->list);
+}
+
+/**
+ * Allocate and initialize packet.
+ *
+ * If a payload is needed, either set it directly and ensure that it's freed or
+ * use smd_alloc_pkt_payload() to allocate a packet and it will be freed
+ * automatically when smd_free_pkt() is called.
+ */
+struct smux_pkt_t *smux_alloc_pkt(void)
+{
+ struct smux_pkt_t *pkt;
+
+ /* Consider a free list implementation instead of kmalloc */
+ pkt = kmalloc(sizeof(struct smux_pkt_t), GFP_ATOMIC);
+ if (!pkt) {
+ pr_err("%s: out of memory\n", __func__);
+ return NULL;
+ }
+ smux_init_pkt(pkt);
+ pkt->allocated = 1;
+
+ return pkt;
+}
+
+/**
+ * Free packet.
+ *
+ * @pkt Packet to free (may be NULL)
+ *
+ * If payload was allocated using smux_alloc_pkt_payload(), then it is freed as
+ * well. Otherwise, the caller is responsible for freeing the payload.
+ */
+void smux_free_pkt(struct smux_pkt_t *pkt)
+{
+ if (pkt) {
+ if (pkt->free_payload)
+ kfree(pkt->payload);
+ if (pkt->allocated)
+ kfree(pkt);
+ }
+}
+
+/**
+ * Allocate packet payload.
+ *
+ * @pkt Packet to add payload to
+ *
+ * @returns 0 on success, <0 upon error
+ *
+ * A flag is set to signal smux_free_pkt() to free the payload.
+ */
+int smux_alloc_pkt_payload(struct smux_pkt_t *pkt)
+{
+ if (!pkt)
+ return -EINVAL;
+
+ pkt->payload = kmalloc(pkt->hdr.payload_len, GFP_ATOMIC);
+ pkt->free_payload = 1;
+ if (!pkt->payload) {
+ pr_err("%s: unable to malloc %d bytes for payload\n",
+ __func__, pkt->hdr.payload_len);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int schedule_notify(uint8_t lcid, int event,
+ const union notifier_metadata *metadata)
+{
+ struct smux_notify_handle *notify_handle = 0;
+ union notifier_metadata *meta_copy = 0;
+ struct smux_lch_t *ch;
+ int i;
+ unsigned long flags;
+ int ret = 0;
+
+ ch = &smux_lch[lcid];
+ notify_handle = kzalloc(sizeof(struct smux_notify_handle),
+ GFP_ATOMIC);
+ if (!notify_handle) {
+ pr_err("%s: out of memory\n", __func__);
+ ret = -ENOMEM;
+ goto free_out;
+ }
+
+ notify_handle->notify = ch->notify;
+ notify_handle->priv = ch->priv;
+ notify_handle->event_type = event;
+ if (metadata) {
+ meta_copy = kzalloc(sizeof(union notifier_metadata),
+ GFP_ATOMIC);
+ if (!meta_copy) {
+ pr_err("%s: out of memory\n", __func__);
+ ret = -ENOMEM;
+ goto free_out;
+ }
+ *meta_copy = *metadata;
+ notify_handle->metadata = meta_copy;
+ } else {
+ notify_handle->metadata = NULL;
+ }
+
+ spin_lock_irqsave(¬ify_lock_lhc1, flags);
+ i = kfifo_avail(&smux_notify_fifo);
+ if (i < handle_size) {
+ pr_err("%s: fifo full error %d expected %d\n",
+ __func__, i, handle_size);
+ ret = -ENOMEM;
+ goto unlock_out;
+ }
+
+ i = kfifo_in(&smux_notify_fifo, ¬ify_handle, handle_size);
+ if (i < 0 || i != handle_size) {
+ pr_err("%s: fifo not available error %d (expected %d)\n",
+ __func__, i, handle_size);
+ ret = -ENOSPC;
+ goto unlock_out;
+ }
+ ++queued_fifo_notifications;
+
+unlock_out:
+ spin_unlock_irqrestore(¬ify_lock_lhc1, flags);
+
+free_out:
+ queue_work(smux_notify_wq, &smux_notify_local);
+ if (ret < 0 && notify_handle) {
+ kfree(notify_handle->metadata);
+ kfree(notify_handle);
+ }
+ return ret;
+}
+
+/**
+ * Returns the serialized size of a packet.
+ *
+ * @pkt Packet to serialize
+ *
+ * @returns Serialized length of packet
+ */
+static unsigned int smux_serialize_size(struct smux_pkt_t *pkt)
+{
+ unsigned int size;
+
+ size = sizeof(struct smux_hdr_t);
+ size += pkt->hdr.payload_len;
+ size += pkt->hdr.pad_len;
+
+ return size;
+}
+
+/**
+ * Serialize packet @pkt into output buffer @data.
+ *
+ * @pkt Packet to serialize
+ * @out Destination buffer pointer
+ * @out_len Size of serialized packet
+ *
+ * @returns 0 for success
+ */
+int smux_serialize(struct smux_pkt_t *pkt, char *out,
+ unsigned int *out_len)
+{
+ char *data_start = out;
+
+ if (smux_serialize_size(pkt) > SMUX_MAX_PKT_SIZE) {
+ pr_err("%s: packet size %d too big\n",
+ __func__, smux_serialize_size(pkt));
+ return -E2BIG;
+ }
+
+ memcpy(out, &pkt->hdr, sizeof(struct smux_hdr_t));
+ out += sizeof(struct smux_hdr_t);
+ if (pkt->payload) {
+ memcpy(out, pkt->payload, pkt->hdr.payload_len);
+ out += pkt->hdr.payload_len;
+ }
+ if (pkt->hdr.pad_len) {
+ memset(out, 0x0, pkt->hdr.pad_len);
+ out += pkt->hdr.pad_len;
+ }
+ *out_len = out - data_start;
+ return 0;
+}
+
+/**
+ * Serialize header and provide pointer to the data.
+ *
+ * @pkt Packet
+ * @out[out] Pointer to the serialized header data
+ * @out_len[out] Pointer to the serialized header length
+ */
+static void smux_serialize_hdr(struct smux_pkt_t *pkt, char **out,
+ unsigned int *out_len)
+{
+ *out = (char *)&pkt->hdr;
+ *out_len = sizeof(struct smux_hdr_t);
+}
+
+/**
+ * Serialize payload and provide pointer to the data.
+ *
+ * @pkt Packet
+ * @out[out] Pointer to the serialized payload data
+ * @out_len[out] Pointer to the serialized payload length
+ */
+static void smux_serialize_payload(struct smux_pkt_t *pkt, char **out,
+ unsigned int *out_len)
+{
+ *out = pkt->payload;
+ *out_len = pkt->hdr.payload_len;
+}
+
+/**
+ * Serialize padding and provide pointer to the data.
+ *
+ * @pkt Packet
+ * @out[out] Pointer to the serialized padding (always NULL)
+ * @out_len[out] Pointer to the serialized payload length
+ *
+ * Since the padding field value is undefined, only the size of the patting
+ * (@out_len) is set and the buffer pointer (@out) will always be NULL.
+ */
+static void smux_serialize_padding(struct smux_pkt_t *pkt, char **out,
+ unsigned int *out_len)
+{
+ *out = NULL;
+ *out_len = pkt->hdr.pad_len;
+}
+
+/**
+ * Write data to TTY framework and handle breaking the writes up if needed.
+ *
+ * @data Data to write
+ * @len Length of data
+ *
+ * @returns 0 for success, < 0 for failure
+ */
+static int write_to_tty(char *data, unsigned len)
+{
+ int data_written;
+
+ if (!data)
+ return 0;
+
+ while (len > 0) {
+ data_written = smux.tty->ops->write(smux.tty, data, len);
+ if (data_written >= 0) {
+ len -= data_written;
+ data += data_written;
+ } else {
+ pr_err("%s: TTY write returned error %d\n",
+ __func__, data_written);
+ return data_written;
+ }
+
+ if (len)
+ tty_wait_until_sent(smux.tty,
+ msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
+
+ /* FUTURE - add SSR logic */
+ }
+ return 0;
+}
+
+/**
+ * Write packet to TTY.
+ *
+ * @pkt packet to write
+ *
+ * @returns 0 on success
+ */
+static int smux_tx_tty(struct smux_pkt_t *pkt)
+{
+ char *data;
+ unsigned int len;
+ int ret;
+
+ if (!smux.tty) {
+ pr_err("%s: TTY not initialized", __func__);
+ return -ENOTTY;
+ }
+
+ if (pkt->hdr.cmd == SMUX_CMD_BYTE) {
+ SMUX_DBG("%s: tty send single byte\n", __func__);
+ ret = write_to_tty(&pkt->hdr.flags, 1);
+ return ret;
+ }
+
+ smux_serialize_hdr(pkt, &data, &len);
+ ret = write_to_tty(data, len);
+ if (ret) {
+ pr_err("%s: failed %d to write header %d\n",
+ __func__, ret, len);
+ return ret;
+ }
+
+ smux_serialize_payload(pkt, &data, &len);
+ ret = write_to_tty(data, len);
+ if (ret) {
+ pr_err("%s: failed %d to write payload %d\n",
+ __func__, ret, len);
+ return ret;
+ }
+
+ smux_serialize_padding(pkt, &data, &len);
+ while (len > 0) {
+ char zero = 0x0;
+ ret = write_to_tty(&zero, 1);
+ if (ret) {
+ pr_err("%s: failed %d to write padding %d\n",
+ __func__, ret, len);
+ return ret;
+ }
+ --len;
+ }
+ return 0;
+}
+
+/**
+ * Send a single character.
+ *
+ * @ch Character to send
+ */
+static void smux_send_byte(char ch)
+{
+ struct smux_pkt_t pkt;
+
+ smux_init_pkt(&pkt);
+
+ pkt.hdr.cmd = SMUX_CMD_BYTE;
+ pkt.hdr.flags = ch;
+ pkt.hdr.lcid = 0;
+ pkt.hdr.flags = ch;
+ SMUX_LOG_PKT_TX(&pkt);
+ if (!smux_byte_loopback)
+ smux_tx_tty(&pkt);
+ else
+ smux_tx_loopback(&pkt);
+}
+
+/**
+ * Receive a single-character packet (used for internal testing).
+ *
+ * @ch Character to receive
+ * @lcid Logical channel ID for packet
+ *
+ * @returns 0 for success
+ *
+ * Called with rx_lock_lha1 locked.
+ */
+static int smux_receive_byte(char ch, int lcid)
+{
+ struct smux_pkt_t pkt;
+
+ smux_init_pkt(&pkt);
+ pkt.hdr.lcid = lcid;
+ pkt.hdr.cmd = SMUX_CMD_BYTE;
+ pkt.hdr.flags = ch;
+
+ return smux_dispatch_rx_pkt(&pkt);
+}
+
+/**
+ * Queue packet for transmit.
+ *
+ * @pkt_ptr Packet to queue
+ * @ch Channel to queue packet on
+ * @queue Queue channel on ready list
+ */
+static void smux_tx_queue(struct smux_pkt_t *pkt_ptr, struct smux_lch_t *ch,
+ int queue)
+{
+ unsigned long flags;
+
+ SMUX_DBG("%s: queuing pkt %p\n", __func__, pkt_ptr);
+
+ spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
+ list_add_tail(&pkt_ptr->list, &ch->tx_queue);
+ spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
+
+ if (queue)
+ list_channel(ch);
+}
+
+/**
+ * Handle receive OPEN ACK command.
+ *
+ * @pkt Received packet
+ *
+ * @returns 0 for success
+ *
+ * Called with rx_lock_lha1 already locked.
+ */
+static int smux_handle_rx_open_ack(struct smux_pkt_t *pkt)
+{
+ uint8_t lcid;
+ int ret;
+ struct smux_lch_t *ch;
+ int enable_powerdown = 0;
+
+ lcid = pkt->hdr.lcid;
+ ch = &smux_lch[lcid];
+
+ spin_lock(&ch->state_lock_lhb1);
+ if (ch->local_state == SMUX_LCH_LOCAL_OPENING) {
+ SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
+ ch->local_state,
+ SMUX_LCH_LOCAL_OPENED);
+
+ if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
+ enable_powerdown = 1;
+
+ ch->local_state = SMUX_LCH_LOCAL_OPENED;
+ if (ch->remote_state == SMUX_LCH_REMOTE_OPENED)
+ schedule_notify(lcid, SMUX_CONNECTED, NULL);
+ ret = 0;
+ } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
+ SMUX_DBG("Remote loopback OPEN ACK received\n");
+ ret = 0;
+ } else {
+ pr_err("%s: lcid %d state 0x%x open ack invalid\n",
+ __func__, lcid, ch->local_state);
+ ret = -EINVAL;
+ }
+ spin_unlock(&ch->state_lock_lhb1);
+
+ if (enable_powerdown) {
+ spin_lock(&smux.tx_lock_lha2);
+ if (!smux.powerdown_enabled) {
+ smux.powerdown_enabled = 1;
+ SMUX_DBG("%s: enabling power-collapse support\n",
+ __func__);
+ }
+ spin_unlock(&smux.tx_lock_lha2);
+ }
+
+ return ret;
+}
+
+static int smux_handle_close_ack(struct smux_pkt_t *pkt)
+{
+ uint8_t lcid;
+ int ret;
+ struct smux_lch_t *ch;
+ union notifier_metadata meta_disconnected;
+ unsigned long flags;
+
+ lcid = pkt->hdr.lcid;
+ ch = &smux_lch[lcid];
+ meta_disconnected.disconnected.is_ssr = 0;
+
+ spin_lock_irqsave(&ch->state_lock_lhb1, flags);
+
+ if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
+ SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
+ SMUX_LCH_LOCAL_CLOSING,
+ SMUX_LCH_LOCAL_CLOSED);
+ ch->local_state = SMUX_LCH_LOCAL_CLOSED;
+ if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED)
+ schedule_notify(lcid, SMUX_DISCONNECTED,
+ &meta_disconnected);
+ ret = 0;
+ } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
+ SMUX_DBG("Remote loopback CLOSE ACK received\n");
+ ret = 0;
+ } else {
+ pr_err("%s: lcid %d state 0x%x close ack invalid\n",
+ __func__, lcid, ch->local_state);
+ ret = -EINVAL;
+ }
+ spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+ return ret;
+}
+
+/**
+ * Handle receive OPEN command.
+ *
+ * @pkt Received packet
+ *
+ * @returns 0 for success
+ *
+ * Called with rx_lock_lha1 already locked.
+ */
+static int smux_handle_rx_open_cmd(struct smux_pkt_t *pkt)
+{
+ uint8_t lcid;
+ int ret;
+ struct smux_lch_t *ch;
+ struct smux_pkt_t *ack_pkt;
+ int tx_ready = 0;
+ int enable_powerdown = 0;
+
+ if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
+ return smux_handle_rx_open_ack(pkt);
+
+ lcid = pkt->hdr.lcid;
+ ch = &smux_lch[lcid];
+
+ spin_lock(&ch->state_lock_lhb1);
+
+ if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED) {
+ SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
+ SMUX_LCH_REMOTE_CLOSED,
+ SMUX_LCH_REMOTE_OPENED);
+
+ ch->remote_state = SMUX_LCH_REMOTE_OPENED;
+ if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
+ enable_powerdown = 1;
+
+ /* Send Open ACK */
+ ack_pkt = smux_alloc_pkt();
+ if (!ack_pkt) {
+ /* exit out to allow retrying this later */
+ ret = -ENOMEM;
+ goto out;
+ }
+ ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
+ ack_pkt->hdr.flags = SMUX_CMD_OPEN_ACK
+ | SMUX_CMD_OPEN_POWER_COLLAPSE;
+ ack_pkt->hdr.lcid = lcid;
+ ack_pkt->hdr.payload_len = 0;
+ ack_pkt->hdr.pad_len = 0;
+ if (pkt->hdr.flags & SMUX_CMD_OPEN_REMOTE_LOOPBACK) {
+ ch->remote_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
+ ack_pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
+ }
+ smux_tx_queue(ack_pkt, ch, 0);
+ tx_ready = 1;
+
+ if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
+ /*
+ * Send an Open command to the remote side to
+ * simulate our local client doing it.
+ */
+ ack_pkt = smux_alloc_pkt();
+ if (ack_pkt) {
+ ack_pkt->hdr.lcid = lcid;
+ ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
+ ack_pkt->hdr.flags =
+ SMUX_CMD_OPEN_POWER_COLLAPSE;
+ ack_pkt->hdr.payload_len = 0;
+ ack_pkt->hdr.pad_len = 0;
+ smux_tx_queue(ack_pkt, ch, 0);
+ tx_ready = 1;
+ } else {
+ pr_err("%s: Remote loopack allocation failure\n",
+ __func__);
+ }
+ } else if (ch->local_state == SMUX_LCH_LOCAL_OPENED) {
+ schedule_notify(lcid, SMUX_CONNECTED, NULL);
+ }
+ ret = 0;
+ } else {
+ pr_err("%s: lcid %d remote state 0x%x open invalid\n",
+ __func__, lcid, ch->remote_state);
+ ret = -EINVAL;
+ }
+
+out:
+ spin_unlock(&ch->state_lock_lhb1);
+
+ if (enable_powerdown) {
+ spin_lock(&smux.tx_lock_lha2);
+ smux.powerdown_enabled = 1;
+ SMUX_DBG("%s: enabling power-collapse support\n", __func__);
+ spin_unlock(&smux.tx_lock_lha2);
+ }
+
+ if (tx_ready)
+ list_channel(ch);
+
+ return ret;
+}
+
+/**
+ * Handle receive CLOSE command.
+ *
+ * @pkt Received packet
+ *
+ * @returns 0 for success
+ *
+ * Called with rx_lock_lha1 already locked.
+ */
+static int smux_handle_rx_close_cmd(struct smux_pkt_t *pkt)
+{
+ uint8_t lcid;
+ int ret;
+ struct smux_lch_t *ch;
+ struct smux_pkt_t *ack_pkt;
+ union notifier_metadata meta_disconnected;
+ int tx_ready = 0;
+
+ if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
+ return smux_handle_close_ack(pkt);
+
+ lcid = pkt->hdr.lcid;
+ ch = &smux_lch[lcid];
+ meta_disconnected.disconnected.is_ssr = 0;
+
+ spin_lock(&ch->state_lock_lhb1);
+ if (ch->remote_state == SMUX_LCH_REMOTE_OPENED) {
+ SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
+ SMUX_LCH_REMOTE_OPENED,
+ SMUX_LCH_REMOTE_CLOSED);
+
+ ack_pkt = smux_alloc_pkt();
+ if (!ack_pkt) {
+ /* exit out to allow retrying this later */
+ ret = -ENOMEM;
+ goto out;
+ }
+ ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
+ ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
+ ack_pkt->hdr.flags = SMUX_CMD_CLOSE_ACK;
+ ack_pkt->hdr.lcid = lcid;
+ ack_pkt->hdr.payload_len = 0;
+ ack_pkt->hdr.pad_len = 0;
+ smux_tx_queue(ack_pkt, ch, 0);
+ tx_ready = 1;
+
+ if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
+ /*
+ * Send a Close command to the remote side to simulate
+ * our local client doing it.
+ */
+ ack_pkt = smux_alloc_pkt();
+ if (ack_pkt) {
+ ack_pkt->hdr.lcid = lcid;
+ ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
+ ack_pkt->hdr.flags = 0;
+ ack_pkt->hdr.payload_len = 0;
+ ack_pkt->hdr.pad_len = 0;
+ smux_tx_queue(ack_pkt, ch, 0);
+ tx_ready = 1;
+ } else {
+ pr_err("%s: Remote loopack allocation failure\n",
+ __func__);
+ }
+ }
+
+ if (ch->local_state == SMUX_LCH_LOCAL_CLOSED)
+ schedule_notify(lcid, SMUX_DISCONNECTED,
+ &meta_disconnected);
+ ret = 0;
+ } else {
+ pr_err("%s: lcid %d remote state 0x%x close invalid\n",
+ __func__, lcid, ch->remote_state);
+ ret = -EINVAL;
+ }
+out:
+ spin_unlock(&ch->state_lock_lhb1);
+ if (tx_ready)
+ list_channel(ch);
+
+ return ret;
+}
+
+/*
+ * Handle receive DATA command.
+ *
+ * @pkt Received packet
+ *
+ * @returns 0 for success
+ *
+ * Called with rx_lock_lha1 already locked.
+ */
+static int smux_handle_rx_data_cmd(struct smux_pkt_t *pkt)
+{
+ uint8_t lcid;
+ int ret;
+ int i;
+ int tmp;
+ int rx_len;
+ struct smux_lch_t *ch;
+ union notifier_metadata metadata;
+ int remote_loopback;
+ int tx_ready = 0;
+ struct smux_pkt_t *ack_pkt;
+ unsigned long flags;
+
+ if (!pkt || smux_assert_lch_id(pkt->hdr.lcid))
+ return -ENXIO;
+
+ lcid = pkt->hdr.lcid;
+ ch = &smux_lch[lcid];
+ spin_lock_irqsave(&ch->state_lock_lhb1, flags);
+ remote_loopback = ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK;
+
+ if (ch->local_state != SMUX_LCH_LOCAL_OPENED
+ && !remote_loopback) {
+ pr_err("smux: ch %d error data on local state 0x%x",
+ lcid, ch->local_state);
+ ret = -EIO;
+ goto out;
+ }
+
+ if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
+ pr_err("smux: ch %d error data on remote state 0x%x",
+ lcid, ch->remote_state);
+ ret = -EIO;
+ goto out;
+ }
+
+ rx_len = pkt->hdr.payload_len;
+ if (rx_len == 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ for (i = 0; i < SMUX_GET_RX_BUFF_MAX_RETRY_CNT; ++i) {
+ metadata.read.pkt_priv = 0;
+ metadata.read.buffer = 0;
+
+ if (!remote_loopback) {
+ tmp = ch->get_rx_buffer(ch->priv,
+ (void **)&metadata.read.pkt_priv,
+ (void **)&metadata.read.buffer,
+ rx_len);
+ if (tmp == 0 && metadata.read.buffer) {
+ /* place data into RX buffer */
+ memcpy(metadata.read.buffer, pkt->payload,
+ rx_len);
+ metadata.read.len = rx_len;
+ schedule_notify(lcid, SMUX_READ_DONE,
+ &metadata);
+ ret = 0;
+ break;
+ } else if (tmp == -EAGAIN) {
+ ret = -ENOMEM;
+ } else if (tmp < 0) {
+ schedule_notify(lcid, SMUX_READ_FAIL, NULL);
+ ret = -ENOMEM;
+ break;
+ } else if (!metadata.read.buffer) {
+ pr_err("%s: get_rx_buffer() buffer is NULL\n",
+ __func__);
+ ret = -ENOMEM;
+ }
+ } else {
+ /* Echo the data back to the remote client. */
+ ack_pkt = smux_alloc_pkt();
+ if (ack_pkt) {
+ ack_pkt->hdr.lcid = lcid;
+ ack_pkt->hdr.cmd = SMUX_CMD_DATA;
+ ack_pkt->hdr.flags = 0;
+ ack_pkt->hdr.payload_len = pkt->hdr.payload_len;
+ ack_pkt->payload = pkt->payload;
+ ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
+ smux_tx_queue(ack_pkt, ch, 0);
+ tx_ready = 1;
+ } else {
+ pr_err("%s: Remote loopack allocation failure\n",
+ __func__);
+ }
+ }
+ }
+
+out:
+ spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+
+ if (tx_ready)
+ list_channel(ch);
+
+ return ret;
+}
+
+/**
+ * Handle receive byte command for testing purposes.
+ *
+ * @pkt Received packet
+ *
+ * @returns 0 for success
+ */
+static int smux_handle_rx_byte_cmd(struct smux_pkt_t *pkt)
+{
+ uint8_t lcid;
+ int ret;
+ struct smux_lch_t *ch;
+ union notifier_metadata metadata;
+ unsigned long flags;
+
+ if (!pkt || smux_assert_lch_id(pkt->hdr.lcid))
+ return -ENXIO;
+
+ lcid = pkt->hdr.lcid;
+ ch = &smux_lch[lcid];
+ spin_lock_irqsave(&ch->state_lock_lhb1, flags);
+
+ if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
+ pr_err("smux: ch %d error data on local state 0x%x",
+ lcid, ch->local_state);
+ ret = -EIO;
+ goto out;
+ }
+
+ if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
+ pr_err("smux: ch %d error data on remote state 0x%x",
+ lcid, ch->remote_state);
+ ret = -EIO;
+ goto out;
+ }
+
+ metadata.read.pkt_priv = (void *)(int)pkt->hdr.flags;
+ metadata.read.buffer = 0;
+ schedule_notify(lcid, SMUX_READ_DONE, &metadata);
+ ret = 0;
+
+out:
+ spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+ return ret;
+}
+
+/**
+ * Handle receive status command.
+ *
+ * @pkt Received packet
+ *
+ * @returns 0 for success
+ *
+ * Called with rx_lock_lha1 already locked.
+ */
+static int smux_handle_rx_status_cmd(struct smux_pkt_t *pkt)
+{
+ uint8_t lcid;
+ int ret;
+ struct smux_lch_t *ch;
+ union notifier_metadata meta;
+ unsigned long flags;
+ int tx_ready = 0;
+
+ lcid = pkt->hdr.lcid;
+ ch = &smux_lch[lcid];
+
+ spin_lock_irqsave(&ch->state_lock_lhb1, flags);
+ meta.tiocm.tiocm_old = ch->remote_tiocm;
+ meta.tiocm.tiocm_new = pkt->hdr.flags;
+
+ /* update logical channel flow control */
+ if ((meta.tiocm.tiocm_old & SMUX_CMD_STATUS_FLOW_CNTL) ^
+ (meta.tiocm.tiocm_new & SMUX_CMD_STATUS_FLOW_CNTL)) {
+ /* logical channel flow control changed */
+ if (pkt->hdr.flags & SMUX_CMD_STATUS_FLOW_CNTL) {
+ /* disabled TX */
+ SMUX_DBG("TX Flow control enabled\n");
+ ch->tx_flow_control = 1;
+ } else {
+ /* re-enable channel */
+ SMUX_DBG("TX Flow control disabled\n");
+ ch->tx_flow_control = 0;
+ tx_ready = 1;
+ }
+ }
+ meta.tiocm.tiocm_old = msm_smux_tiocm_get_atomic(ch);
+ ch->remote_tiocm = pkt->hdr.flags;
+ meta.tiocm.tiocm_new = msm_smux_tiocm_get_atomic(ch);
+
+ /* client notification for status change */
+ if (IS_FULLY_OPENED(ch)) {
+ if (meta.tiocm.tiocm_old != meta.tiocm.tiocm_new)
+ schedule_notify(lcid, SMUX_TIOCM_UPDATE, &meta);
+ ret = 0;
+ }
+ spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+ if (tx_ready)
+ list_channel(ch);
+
+ return ret;
+}
+
+/**
+ * Handle receive power command.
+ *
+ * @pkt Received packet
+ *
+ * @returns 0 for success
+ *
+ * Called with rx_lock_lha1 already locked.
+ */
+static int smux_handle_rx_power_cmd(struct smux_pkt_t *pkt)
+{
+ int tx_ready = 0;
+ struct smux_pkt_t *ack_pkt;
+
+ spin_lock(&smux.tx_lock_lha2);
+ if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK) {
+ /* local sleep request ack */
+ if (smux.power_state == SMUX_PWR_TURNING_OFF) {
+ /* Power-down complete, turn off UART */
+ SMUX_DBG("%s: Power %d->%d\n", __func__,
+ smux.power_state, SMUX_PWR_OFF_FLUSH);
+ smux.power_state = SMUX_PWR_OFF_FLUSH;
+ queue_work(smux_tx_wq, &smux_inactivity_work);
+ } else {
+ pr_err("%s: sleep request ack invalid in state %d\n",
+ __func__, smux.power_state);
+ }
+ } else {
+ /* remote sleep request */
+ if (smux.power_state == SMUX_PWR_ON
+ || smux.power_state == SMUX_PWR_TURNING_OFF) {
+ ack_pkt = smux_alloc_pkt();
+ if (ack_pkt) {
+ SMUX_DBG("%s: Power %d->%d\n", __func__,
+ smux.power_state,
+ SMUX_PWR_TURNING_OFF_FLUSH);
+
+ /* send power-down request */
+ ack_pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
+ ack_pkt->hdr.flags = SMUX_CMD_PWR_CTL_ACK;
+ ack_pkt->hdr.lcid = pkt->hdr.lcid;
+ smux_tx_queue(ack_pkt,
+ &smux_lch[ack_pkt->hdr.lcid], 0);
+ tx_ready = 1;
+ smux.power_state = SMUX_PWR_TURNING_OFF_FLUSH;
+ queue_delayed_work(smux_tx_wq,
+ &smux_delayed_inactivity_work,
+ msecs_to_jiffies(
+ SMUX_INACTIVITY_TIMEOUT_MS));
+ }
+ } else {
+ pr_err("%s: sleep request invalid in state %d\n",
+ __func__, smux.power_state);
+ }
+ }
+ spin_unlock(&smux.tx_lock_lha2);
+
+ if (tx_ready)
+ list_channel(&smux_lch[ack_pkt->hdr.lcid]);
+
+ return 0;
+}
+
+/**
+ * Handle dispatching a completed packet for receive processing.
+ *
+ * @pkt Packet to process
+ *
+ * @returns 0 for success
+ *
+ * Called with rx_lock_lha1 already locked.
+ */
+static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt)
+{
+ int ret;
+
+ SMUX_LOG_PKT_RX(pkt);
+
+ switch (pkt->hdr.cmd) {
+ case SMUX_CMD_OPEN_LCH:
+ ret = smux_handle_rx_open_cmd(pkt);
+ break;
+
+ case SMUX_CMD_DATA:
+ ret = smux_handle_rx_data_cmd(pkt);
+ break;
+
+ case SMUX_CMD_CLOSE_LCH:
+ ret = smux_handle_rx_close_cmd(pkt);
+ break;
+
+ case SMUX_CMD_STATUS:
+ ret = smux_handle_rx_status_cmd(pkt);
+ break;
+
+ case SMUX_CMD_PWR_CTL:
+ ret = smux_handle_rx_power_cmd(pkt);
+ break;
+
+ case SMUX_CMD_BYTE:
+ ret = smux_handle_rx_byte_cmd(pkt);
+ break;
+
+ default:
+ pr_err("%s: command %d unknown\n", __func__, pkt->hdr.cmd);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+/**
+ * Deserializes a packet and dispatches it to the packet receive logic.
+ *
+ * @data Raw data for one packet
+ * @len Length of the data
+ *
+ * @returns 0 for success
+ *
+ * Called with rx_lock_lha1 already locked.
+ */
+static int smux_deserialize(unsigned char *data, int len)
+{
+ struct smux_pkt_t recv;
+ uint8_t lcid;
+
+ smux_init_pkt(&recv);
+
+ /*
+ * It may be possible to optimize this to not use the
+ * temporary buffer.
+ */
+ memcpy(&recv.hdr, data, sizeof(struct smux_hdr_t));
+
+ if (recv.hdr.magic != SMUX_MAGIC) {
+ pr_err("%s: invalid header magic\n", __func__);
+ return -EINVAL;
+ }
+
+ lcid = recv.hdr.lcid;
+ if (smux_assert_lch_id(lcid)) {
+ pr_err("%s: invalid channel id %d\n", __func__, lcid);
+ return -ENXIO;
+ }
+
+ if (recv.hdr.payload_len)
+ recv.payload = data + sizeof(struct smux_hdr_t);
+
+ return smux_dispatch_rx_pkt(&recv);
+}
+
+/**
+ * Handle wakeup request byte.
+ *
+ * Called with rx_lock_lha1 already locked.
+ */
+static void smux_handle_wakeup_req(void)
+{
+ spin_lock(&smux.tx_lock_lha2);
+ if (smux.power_state == SMUX_PWR_OFF
+ || smux.power_state == SMUX_PWR_TURNING_ON) {
+ /* wakeup system */
+ SMUX_DBG("%s: Power %d->%d\n", __func__,
+ smux.power_state, SMUX_PWR_ON);
+ smux.power_state = SMUX_PWR_ON;
+ queue_work(smux_tx_wq, &smux_wakeup_work);
+ queue_work(smux_tx_wq, &smux_tx_work);
+ queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
+ msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
+ smux_send_byte(SMUX_WAKEUP_ACK);
+ } else {
+ smux_send_byte(SMUX_WAKEUP_ACK);
+ }
+ spin_unlock(&smux.tx_lock_lha2);
+}
+
+/**
+ * Handle wakeup request ack.
+ *
+ * Called with rx_lock_lha1 already locked.
+ */
+static void smux_handle_wakeup_ack(void)
+{
+ spin_lock(&smux.tx_lock_lha2);
+ if (smux.power_state == SMUX_PWR_TURNING_ON) {
+ /* received response to wakeup request */
+ SMUX_DBG("%s: Power %d->%d\n", __func__,
+ smux.power_state, SMUX_PWR_ON);
+ smux.power_state = SMUX_PWR_ON;
+ queue_work(smux_tx_wq, &smux_tx_work);
+ queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
+ msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
+
+ } else if (smux.power_state != SMUX_PWR_ON) {
+ /* invalid message */
+ pr_err("%s: wakeup request ack invalid in state %d\n",
+ __func__, smux.power_state);
+ }
+ spin_unlock(&smux.tx_lock_lha2);
+}
+
+/**
+ * RX State machine - IDLE state processing.
+ *
+ * @data New RX data to process
+ * @len Length of the data
+ * @used Return value of length processed
+ * @flag Error flag - TTY_NORMAL 0 for no failure
+ *
+ * Called with rx_lock_lha1 locked.
+ */
+static void smux_rx_handle_idle(const unsigned char *data,
+ int len, int *used, int flag)
+{
+ int i;
+
+ if (flag) {
+ if (smux_byte_loopback)
+ smux_receive_byte(SMUX_UT_ECHO_ACK_FAIL,
+ smux_byte_loopback);
+ pr_err("%s: TTY error 0x%x - ignoring\n", __func__, flag);
+ ++*used;
+ return;
+ }
+
+ for (i = *used; i < len && smux.rx_state == SMUX_RX_IDLE; i++) {
+ switch (data[i]) {
+ case SMUX_MAGIC_WORD1:
+ smux.rx_state = SMUX_RX_MAGIC;
+ break;
+ case SMUX_WAKEUP_REQ:
+ smux_handle_wakeup_req();
+ break;
+ case SMUX_WAKEUP_ACK:
+ smux_handle_wakeup_ack();
+ break;
+ default:
+ /* unexpected character */
+ if (smux_byte_loopback && data[i] == SMUX_UT_ECHO_REQ)
+ smux_receive_byte(SMUX_UT_ECHO_ACK_OK,
+ smux_byte_loopback);
+ pr_err("%s: parse error 0x%02x - ignoring\n", __func__,
+ (unsigned)data[i]);
+ break;
+ }
+ }
+
+ *used = i;
+}
+
+/**
+ * RX State machine - Header Magic state processing.
+ *
+ * @data New RX data to process
+ * @len Length of the data
+ * @used Return value of length processed
+ * @flag Error flag - TTY_NORMAL 0 for no failure
+ *
+ * Called with rx_lock_lha1 locked.
+ */
+static void smux_rx_handle_magic(const unsigned char *data,
+ int len, int *used, int flag)
+{
+ int i;
+
+ if (flag) {
+ pr_err("%s: TTY RX error %d\n", __func__, flag);
+ smux_enter_reset();
+ smux.rx_state = SMUX_RX_FAILURE;
+ ++*used;
+ return;
+ }
+
+ for (i = *used; i < len && smux.rx_state == SMUX_RX_MAGIC; i++) {
+ /* wait for completion of the magic */
+ if (data[i] == SMUX_MAGIC_WORD2) {
+ smux.recv_len = 0;
+ smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD1;
+ smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD2;
+ smux.rx_state = SMUX_RX_HDR;
+ } else {
+ /* unexpected / trash character */
+ pr_err("%s: rx parse error for char %c; *used=%d, len=%d\n",
+ __func__, data[i], *used, len);
+ smux.rx_state = SMUX_RX_IDLE;
+ }
+ }
+
+ *used = i;
+}
+
+/**
+ * RX State machine - Packet Header state processing.
+ *
+ * @data New RX data to process
+ * @len Length of the data
+ * @used Return value of length processed
+ * @flag Error flag - TTY_NORMAL 0 for no failure
+ *
+ * Called with rx_lock_lha1 locked.
+ */
+static void smux_rx_handle_hdr(const unsigned char *data,
+ int len, int *used, int flag)
+{
+ int i;
+ struct smux_hdr_t *hdr;
+
+ if (flag) {
+ pr_err("%s: TTY RX error %d\n", __func__, flag);
+ smux_enter_reset();
+ smux.rx_state = SMUX_RX_FAILURE;
+ ++*used;
+ return;
+ }
+
+ for (i = *used; i < len && smux.rx_state == SMUX_RX_HDR; i++) {
+ smux.recv_buf[smux.recv_len++] = data[i];
+
+ if (smux.recv_len == sizeof(struct smux_hdr_t)) {
+ /* complete header received */
+ hdr = (struct smux_hdr_t *)smux.recv_buf;
+ smux.pkt_remain = hdr->payload_len + hdr->pad_len;
+ smux.rx_state = SMUX_RX_PAYLOAD;
+ }
+ }
+ *used = i;
+}
+
+/**
+ * RX State machine - Packet Payload state processing.
+ *
+ * @data New RX data to process
+ * @len Length of the data
+ * @used Return value of length processed
+ * @flag Error flag - TTY_NORMAL 0 for no failure
+ *
+ * Called with rx_lock_lha1 locked.
+ */
+static void smux_rx_handle_pkt_payload(const unsigned char *data,
+ int len, int *used, int flag)
+{
+ int remaining;
+
+ if (flag) {
+ pr_err("%s: TTY RX error %d\n", __func__, flag);
+ smux_enter_reset();
+ smux.rx_state = SMUX_RX_FAILURE;
+ ++*used;
+ return;
+ }
+
+ /* copy data into rx buffer */
+ if (smux.pkt_remain < (len - *used))
+ remaining = smux.pkt_remain;
+ else
+ remaining = len - *used;
+
+ memcpy(&smux.recv_buf[smux.recv_len], &data[*used], remaining);
+ smux.recv_len += remaining;
+ smux.pkt_remain -= remaining;
+ *used += remaining;
+
+ if (smux.pkt_remain == 0) {
+ /* complete packet received */
+ smux_deserialize(smux.recv_buf, smux.recv_len);
+ smux.rx_state = SMUX_RX_IDLE;
+ }
+}
+
+/**
+ * Feed data to the receive state machine.
+ *
+ * @data Pointer to data block
+ * @len Length of data
+ * @flag TTY_NORMAL (0) for no error, otherwise TTY Error Flag
+ *
+ * Called with rx_lock_lha1 locked.
+ */
+void smux_rx_state_machine(const unsigned char *data,
+ int len, int flag)
+{
+ unsigned long flags;
+ int used;
+ int initial_rx_state;
+
+
+ SMUX_DBG("%s: %p, len=%d, flag=%d\n", __func__, data, len, flag);
+ spin_lock_irqsave(&smux.rx_lock_lha1, flags);
+ used = 0;
+ smux.rx_activity_flag = 1;
+ do {
+ SMUX_DBG("%s: state %d; %d of %d\n",
+ __func__, smux.rx_state, used, len);
+ initial_rx_state = smux.rx_state;
+
+ switch (smux.rx_state) {
+ case SMUX_RX_IDLE:
+ smux_rx_handle_idle(data, len, &used, flag);
+ break;
+ case SMUX_RX_MAGIC:
+ smux_rx_handle_magic(data, len, &used, flag);
+ break;
+ case SMUX_RX_HDR:
+ smux_rx_handle_hdr(data, len, &used, flag);
+ break;
+ case SMUX_RX_PAYLOAD:
+ smux_rx_handle_pkt_payload(data, len, &used, flag);
+ break;
+ default:
+ SMUX_DBG("%s: invalid state %d\n",
+ __func__, smux.rx_state);
+ smux.rx_state = SMUX_RX_IDLE;
+ break;
+ }
+ } while (used < len || smux.rx_state != initial_rx_state);
+ spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
+}
+
+/**
+ * Add channel to transmit-ready list and trigger transmit worker.
+ *
+ * @ch Channel to add
+ */
+static void list_channel(struct smux_lch_t *ch)
+{
+ unsigned long flags;
+
+ SMUX_DBG("%s: listing channel %d\n",
+ __func__, ch->lcid);
+
+ spin_lock_irqsave(&smux.tx_lock_lha2, flags);
+ spin_lock(&ch->tx_lock_lhb2);
+ smux.tx_activity_flag = 1;
+ if (list_empty(&ch->tx_ready_list))
+ list_add_tail(&ch->tx_ready_list, &smux.lch_tx_ready_list);
+ spin_unlock(&ch->tx_lock_lhb2);
+ spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
+
+ queue_work(smux_tx_wq, &smux_tx_work);
+}
+
+/**
+ * Transmit packet on correct transport and then perform client
+ * notification.
+ *
+ * @ch Channel to transmit on
+ * @pkt Packet to transmit
+ */
+static void smux_tx_pkt(struct smux_lch_t *ch, struct smux_pkt_t *pkt)
+{
+ union notifier_metadata meta_write;
+ int ret;
+
+ if (ch && pkt) {
+ SMUX_LOG_PKT_TX(pkt);
+ if (ch->local_mode == SMUX_LCH_MODE_LOCAL_LOOPBACK)
+ ret = smux_tx_loopback(pkt);
+ else
+ ret = smux_tx_tty(pkt);
+
+ if (pkt->hdr.cmd == SMUX_CMD_DATA) {
+ /* notify write-done */
+ meta_write.write.pkt_priv = pkt->priv;
+ meta_write.write.buffer = pkt->payload;
+ meta_write.write.len = pkt->hdr.payload_len;
+ if (ret >= 0) {
+ SMUX_DBG("%s: PKT write done", __func__);
+ schedule_notify(ch->lcid, SMUX_WRITE_DONE,
+ &meta_write);
+ } else {
+ pr_err("%s: failed to write pkt %d\n",
+ __func__, ret);
+ schedule_notify(ch->lcid, SMUX_WRITE_FAIL,
+ &meta_write);
+ }
+ }
+ }
+}
+
+/**
+ * Power-up the UART.
+ */
+static void smux_uart_power_on(void)
+{
+ struct uart_state *state;
+
+ if (!smux.tty || !smux.tty->driver_data) {
+ pr_err("%s: unable to find UART port for tty %p\n",
+ __func__, smux.tty);
+ return;
+ }
+ state = smux.tty->driver_data;
+ msm_hs_request_clock_on(state->uart_port);
+}
+
+/**
+ * Power down the UART.
+ */
+static void smux_uart_power_off(void)
+{
+ struct uart_state *state;
+
+ if (!smux.tty || !smux.tty->driver_data) {
+ pr_err("%s: unable to find UART port for tty %p\n",
+ __func__, smux.tty);
+ return;
+ }
+ state = smux.tty->driver_data;
+ msm_hs_request_clock_off(state->uart_port);
+}
+
+/**
+ * TX Wakeup Worker
+ *
+ * @work Not used
+ *
+ * Do an exponential back-off wakeup sequence with a maximum period
+ * of approximately 1 second (1 << 20 microseconds).
+ */
+static void smux_wakeup_worker(struct work_struct *work)
+{
+ unsigned long flags;
+ unsigned wakeup_delay;
+ int complete = 0;
+
+ for (;;) {
+ spin_lock_irqsave(&smux.tx_lock_lha2, flags);
+ if (smux.power_state == SMUX_PWR_ON) {
+ /* wakeup complete */
+ complete = 1;
+ spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
+ break;
+ } else {
+ /* retry */
+ wakeup_delay = smux.pwr_wakeup_delay_us;
+ smux.pwr_wakeup_delay_us <<= 1;
+ if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
+ smux.pwr_wakeup_delay_us =
+ SMUX_WAKEUP_DELAY_MAX;
+ }
+ spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
+ SMUX_DBG("%s: triggering wakeup\n", __func__);
+ smux_send_byte(SMUX_WAKEUP_REQ);
+
+ if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
+ SMUX_DBG("%s: sleeping for %u us\n", __func__,
+ wakeup_delay);
+ usleep_range(wakeup_delay, 2*wakeup_delay);
+ } else {
+ /* schedule delayed work */
+ SMUX_DBG("%s: scheduling delayed wakeup in %u ms\n",
+ __func__, wakeup_delay / 1000);
+ queue_delayed_work(smux_tx_wq,
+ &smux_wakeup_delayed_work,
+ msecs_to_jiffies(wakeup_delay / 1000));
+ break;
+ }
+ }
+
+ if (complete) {
+ SMUX_DBG("%s: wakeup complete\n", __func__);
+ /*
+ * Cancel any pending retry. This avoids a race condition with
+ * a new power-up request because:
+ * 1) this worker doesn't modify the state
+ * 2) this worker is processed on the same single-threaded
+ * workqueue as new TX wakeup requests
+ */
+ cancel_delayed_work(&smux_wakeup_delayed_work);
+ }
+}
+
+
+/**
+ * Inactivity timeout worker. Periodically scheduled when link is active.
+ * When it detects inactivity, it will power-down the UART link.
+ *
+ * @work Work structure (not used)
+ */
+static void smux_inactivity_worker(struct work_struct *work)
+{
+ int tx_ready = 0;
+ struct smux_pkt_t *pkt;
+ unsigned long flags;
+
+ spin_lock_irqsave(&smux.rx_lock_lha1, flags);
+ spin_lock(&smux.tx_lock_lha2);
+
+ if (!smux.tx_activity_flag && !smux.rx_activity_flag) {
+ /* no activity */
+ if (smux.powerdown_enabled) {
+ if (smux.power_state == SMUX_PWR_ON) {
+ /* start power-down sequence */
+ pkt = smux_alloc_pkt();
+ if (pkt) {
+ SMUX_DBG("%s: Power %d->%d\n", __func__,
+ smux.power_state,
+ SMUX_PWR_TURNING_OFF);
+ smux.power_state = SMUX_PWR_TURNING_OFF;
+
+ /* send power-down request */
+ pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
+ pkt->hdr.flags = 0;
+ pkt->hdr.lcid = 0;
+ smux_tx_queue(pkt,
+ &smux_lch[SMUX_TEST_LCID],
+ 0);
+ tx_ready = 1;
+ }
+ }
+ } else {
+ SMUX_DBG("%s: link inactive, but powerdown disabled\n",
+ __func__);
+ }
+ }
+ smux.tx_activity_flag = 0;
+ smux.rx_activity_flag = 0;
+
+ spin_unlock(&smux.tx_lock_lha2);
+ spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
+
+ if (tx_ready)
+ list_channel(&smux_lch[SMUX_TEST_LCID]);
+
+ if ((smux.power_state == SMUX_PWR_OFF_FLUSH) ||
+ (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH)) {
+ /* ready to power-down the UART */
+ SMUX_DBG("%s: Power %d->%d\n", __func__,
+ smux.power_state, SMUX_PWR_OFF);
+ smux_uart_power_off();
+ spin_lock_irqsave(&smux.tx_lock_lha2, flags);
+ smux.power_state = SMUX_PWR_OFF;
+ spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
+ }
+
+ /* reschedule inactivity worker */
+ if (smux.power_state != SMUX_PWR_OFF)
+ queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
+ msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
+}
+
+/**
+ * Transmit worker handles serializing and transmitting packets onto the
+ * underlying transport.
+ *
+ * @work Work structure (not used)
+ */
+static void smux_tx_worker(struct work_struct *work)
+{
+ struct smux_pkt_t *pkt;
+ struct smux_lch_t *ch;
+ unsigned low_wm_notif;
+ unsigned lcid;
+ unsigned long flags;
+
+
+ /*
+ * Transmit packets in round-robin fashion based upon ready
+ * channels.
+ *
+ * To eliminate the need to hold a lock for the entire
+ * iteration through the channel ready list, the head of the
+ * ready-channel list is always the next channel to be
+ * processed. To send a packet, the first valid packet in
+ * the head channel is removed and the head channel is then
+ * rescheduled at the end of the queue by removing it and
+ * inserting after the tail. The locks can then be released
+ * while the packet is processed.
+ */
+ for (;;) {
+ pkt = NULL;
+ low_wm_notif = 0;
+
+ /* get the next ready channel */
+ spin_lock_irqsave(&smux.tx_lock_lha2, flags);
+ if (list_empty(&smux.lch_tx_ready_list)) {
+ /* no ready channels */
+ SMUX_DBG("%s: no more ready channels, exiting\n",
+ __func__);
+ spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
+ break;
+ }
+ smux.tx_activity_flag = 1;
+
+ if (smux.power_state != SMUX_PWR_ON
+ && smux.power_state != SMUX_PWR_TURNING_OFF
+ && smux.power_state != SMUX_PWR_TURNING_OFF_FLUSH) {
+ /* Link isn't ready to transmit */
+ if (smux.power_state == SMUX_PWR_OFF) {
+ /* link is off, trigger wakeup */
+ smux.pwr_wakeup_delay_us = 1;
+ SMUX_DBG("%s: Power %d->%d\n", __func__,
+ smux.power_state,
+ SMUX_PWR_TURNING_ON);
+ smux.power_state = SMUX_PWR_TURNING_ON;
+ spin_unlock_irqrestore(&smux.tx_lock_lha2,
+ flags);
+ smux_uart_power_on();
+ queue_work(smux_tx_wq, &smux_wakeup_work);
+ } else {
+ SMUX_DBG("%s: can not tx with power state %d\n",
+ __func__,
+ smux.power_state);
+ spin_unlock_irqrestore(&smux.tx_lock_lha2,
+ flags);
+ }
+ break;
+ }
+
+ /* get the next packet to send and rotate channel list */
+ ch = list_first_entry(&smux.lch_tx_ready_list,
+ struct smux_lch_t,
+ tx_ready_list);
+
+ spin_lock(&ch->state_lock_lhb1);
+ spin_lock(&ch->tx_lock_lhb2);
+ if (!list_empty(&ch->tx_queue)) {
+ /*
+ * If remote TX flow control is enabled or
+ * the channel is not fully opened, then only
+ * send command packets.
+ */
+ if (ch->tx_flow_control || !IS_FULLY_OPENED(ch)) {
+ struct smux_pkt_t *curr;
+ list_for_each_entry(curr, &ch->tx_queue, list) {
+ if (curr->hdr.cmd != SMUX_CMD_DATA) {
+ pkt = curr;
+ break;
+ }
+ }
+ } else {
+ /* get next cmd/data packet to send */
+ pkt = list_first_entry(&ch->tx_queue,
+ struct smux_pkt_t, list);
+ }
+ }
+
+ if (pkt) {
+ list_del(&pkt->list);
+
+ /* update packet stats */
+ if (pkt->hdr.cmd == SMUX_CMD_DATA) {
+ --ch->tx_pending_data_cnt;
+ if (ch->notify_lwm &&
+ ch->tx_pending_data_cnt
+ <= SMUX_WM_LOW) {
+ ch->notify_lwm = 0;
+ low_wm_notif = 1;
+ }
+ }
+
+ /* advance to the next ready channel */
+ list_rotate_left(&smux.lch_tx_ready_list);
+ } else {
+ /* no data in channel to send, remove from ready list */
+ list_del(&ch->tx_ready_list);
+ INIT_LIST_HEAD(&ch->tx_ready_list);
+ }
+ lcid = ch->lcid;
+ spin_unlock(&ch->tx_lock_lhb2);
+ spin_unlock(&ch->state_lock_lhb1);
+ spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
+
+ if (low_wm_notif)
+ schedule_notify(lcid, SMUX_LOW_WM_HIT, NULL);
+
+ /* send the packet */
+ smux_tx_pkt(ch, pkt);
+ smux_free_pkt(pkt);
+ }
+}
+
+
+/**********************************************************************/
+/* Kernel API */
+/**********************************************************************/
+
+/**
+ * Set or clear channel option using the SMUX_CH_OPTION_* channel
+ * flags.
+ *
+ * @lcid Logical channel ID
+ * @set Options to set
+ * @clear Options to clear
+ *
+ * @returns 0 for success, < 0 for failure
+ */
+int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear)
+{
+ unsigned long flags;
+ struct smux_lch_t *ch;
+ int tx_ready = 0;
+ int ret = 0;
+
+ if (smux_assert_lch_id(lcid))
+ return -ENXIO;
+
+ ch = &smux_lch[lcid];
+ spin_lock_irqsave(&ch->state_lock_lhb1, flags);
+
+ /* Local loopback mode */
+ if (set & SMUX_CH_OPTION_LOCAL_LOOPBACK)
+ ch->local_mode = SMUX_LCH_MODE_LOCAL_LOOPBACK;
+
+ if (clear & SMUX_CH_OPTION_LOCAL_LOOPBACK)
+ ch->local_mode = SMUX_LCH_MODE_NORMAL;
+
+ /* Remote loopback mode */
+ if (set & SMUX_CH_OPTION_REMOTE_LOOPBACK)
+ ch->local_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
+
+ if (clear & SMUX_CH_OPTION_REMOTE_LOOPBACK)
+ ch->local_mode = SMUX_LCH_MODE_NORMAL;
+
+ /* Flow control */
+ if (set & SMUX_CH_OPTION_REMOTE_TX_STOP) {
+ ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
+ ret = smux_send_status_cmd(ch);
+ tx_ready = 1;
+ }
+
+ if (clear & SMUX_CH_OPTION_REMOTE_TX_STOP) {
+ ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
+ ret = smux_send_status_cmd(ch);
+ tx_ready = 1;
+ }
+
+ spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+
+ if (tx_ready)
+ list_channel(ch);
+
+ return ret;
+}
+
+/**
+ * Starts the opening sequence for a logical channel.
+ *
+ * @lcid Logical channel ID
+ * @priv Free for client usage
+ * @notify Event notification function
+ * @get_rx_buffer Function used to provide a receive buffer to SMUX
+ *
+ * @returns 0 for success, <0 otherwise
+ *
+ * A channel must be fully closed (either not previously opened or
+ * msm_smux_close() has been called and the SMUX_DISCONNECTED has been
+ * received.
+ *
+ * One the remote side is opened, the client will receive a SMUX_CONNECTED
+ * event.
+ */
+int msm_smux_open(uint8_t lcid, void *priv,
+ void (*notify)(void *priv, int event_type, const void *metadata),
+ int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
+ int size))
+{
+ int ret;
+ struct smux_lch_t *ch;
+ struct smux_pkt_t *pkt;
+ int tx_ready = 0;
+ unsigned long flags;
+
+ if (smux_assert_lch_id(lcid))
+ return -ENXIO;
+
+ ch = &smux_lch[lcid];
+ spin_lock_irqsave(&ch->state_lock_lhb1, flags);
+
+ if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ if (ch->local_state != SMUX_LCH_LOCAL_CLOSED) {
+ pr_err("%s: open lcid %d local state %x invalid\n",
+ __func__, lcid, ch->local_state);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
+ ch->local_state,
+ SMUX_LCH_LOCAL_OPENING);
+
+ ch->local_state = SMUX_LCH_LOCAL_OPENING;
+
+ ch->priv = priv;
+ ch->notify = notify;
+ ch->get_rx_buffer = get_rx_buffer;
+ ret = 0;
+
+ /* Send Open Command */
+ pkt = smux_alloc_pkt();
+ if (!pkt) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ pkt->hdr.magic = SMUX_MAGIC;
+ pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
+ pkt->hdr.flags = SMUX_CMD_OPEN_POWER_COLLAPSE;
+ if (ch->local_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK)
+ pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
+ pkt->hdr.lcid = lcid;
+ pkt->hdr.payload_len = 0;
+ pkt->hdr.pad_len = 0;
+ smux_tx_queue(pkt, ch, 0);
+ tx_ready = 1;
+
+out:
+ spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+ if (tx_ready)
+ list_channel(ch);
+ return ret;
+}
+
+/**
+ * Starts the closing sequence for a logical channel.
+ *
+ * @lcid Logical channel ID
+ *
+ * @returns 0 for success, <0 otherwise
+ *
+ * Once the close event has been acknowledge by the remote side, the client
+ * will receive a SMUX_DISCONNECTED notification.
+ */
+int msm_smux_close(uint8_t lcid)
+{
+ int ret = 0;
+ struct smux_lch_t *ch;
+ struct smux_pkt_t *pkt;
+ int tx_ready = 0;
+ unsigned long flags;
+
+ if (smux_assert_lch_id(lcid))
+ return -ENXIO;
+
+ ch = &smux_lch[lcid];
+ spin_lock_irqsave(&ch->state_lock_lhb1, flags);
+ ch->local_tiocm = 0x0;
+ ch->remote_tiocm = 0x0;
+ ch->tx_pending_data_cnt = 0;
+ ch->notify_lwm = 0;
+
+ /* Purge TX queue */
+ spin_lock(&ch->tx_lock_lhb2);
+ while (!list_empty(&ch->tx_queue)) {
+ pkt = list_first_entry(&ch->tx_queue, struct smux_pkt_t,
+ list);
+ list_del(&pkt->list);
+
+ if (pkt->hdr.cmd == SMUX_CMD_OPEN_LCH) {
+ /* Open was never sent, just force to closed state */
+ union notifier_metadata meta_disconnected;
+
+ ch->local_state = SMUX_LCH_LOCAL_CLOSED;
+ meta_disconnected.disconnected.is_ssr = 0;
+ schedule_notify(lcid, SMUX_DISCONNECTED,
+ &meta_disconnected);
+ } else if (pkt->hdr.cmd == SMUX_CMD_DATA) {
+ /* Notify client of failed write */
+ union notifier_metadata meta_write;
+
+ meta_write.write.pkt_priv = pkt->priv;
+ meta_write.write.buffer = pkt->payload;
+ meta_write.write.len = pkt->hdr.payload_len;
+ schedule_notify(ch->lcid, SMUX_WRITE_FAIL, &meta_write);
+ }
+ smux_free_pkt(pkt);
+ }
+ spin_unlock(&ch->tx_lock_lhb2);
+
+ /* Send Close Command */
+ if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
+ ch->local_state == SMUX_LCH_LOCAL_OPENING) {
+ SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
+ ch->local_state,
+ SMUX_LCH_LOCAL_CLOSING);
+
+ ch->local_state = SMUX_LCH_LOCAL_CLOSING;
+ pkt = smux_alloc_pkt();
+ if (pkt) {
+ pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
+ pkt->hdr.flags = 0;
+ pkt->hdr.lcid = lcid;
+ pkt->hdr.payload_len = 0;
+ pkt->hdr.pad_len = 0;
+ smux_tx_queue(pkt, ch, 0);
+ tx_ready = 1;
+ } else {
+ pr_err("%s: pkt allocation failed\n", __func__);
+ ret = -ENOMEM;
+ }
+ }
+ spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+
+ if (tx_ready)
+ list_channel(ch);
+
+ return ret;
+}
+
+/**
+ * Write data to a logical channel.
+ *
+ * @lcid Logical channel ID
+ * @pkt_priv Client data that will be returned with the SMUX_WRITE_DONE or
+ * SMUX_WRITE_FAIL notification.
+ * @data Data to write
+ * @len Length of @data
+ *
+ * @returns 0 for success, <0 otherwise
+ *
+ * Data may be written immediately after msm_smux_open() is called,
+ * but the data will wait in the transmit queue until the channel has
+ * been fully opened.
+ *
+ * Once the data has been written, the client will receive either a completion
+ * (SMUX_WRITE_DONE) or a failure notice (SMUX_WRITE_FAIL).
+ */
+int msm_smux_write(uint8_t lcid, void *pkt_priv, const void *data, int len)
+{
+ struct smux_lch_t *ch;
+ struct smux_pkt_t *pkt;
+ int tx_ready = 0;
+ unsigned long flags;
+ int ret;
+
+ if (smux_assert_lch_id(lcid))
+ return -ENXIO;
+
+ ch = &smux_lch[lcid];
+ spin_lock_irqsave(&ch->state_lock_lhb1, flags);
+
+ if (ch->local_state != SMUX_LCH_LOCAL_OPENED &&
+ ch->local_state != SMUX_LCH_LOCAL_OPENING) {
+ pr_err("%s: hdr.invalid local state %d channel %d\n",
+ __func__, ch->local_state, lcid);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (len > SMUX_MAX_PKT_SIZE - sizeof(struct smux_hdr_t)) {
+ pr_err("%s: payload %d too large\n",
+ __func__, len);
+ ret = -E2BIG;
+ goto out;
+ }
+
+ pkt = smux_alloc_pkt();
+ if (!pkt) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ pkt->hdr.cmd = SMUX_CMD_DATA;
+ pkt->hdr.lcid = lcid;
+ pkt->hdr.flags = 0;
+ pkt->hdr.payload_len = len;
+ pkt->payload = (void *)data;
+ pkt->priv = pkt_priv;
+ pkt->hdr.pad_len = 0;
+
+ spin_lock(&ch->tx_lock_lhb2);
+ /* verify high watermark */
+ SMUX_DBG("%s: pending %d", __func__, ch->tx_pending_data_cnt);
+
+ if (ch->tx_pending_data_cnt >= SMUX_WM_HIGH) {
+ pr_err("%s: ch %d high watermark %d exceeded %d\n",
+ __func__, lcid, SMUX_WM_HIGH,
+ ch->tx_pending_data_cnt);
+ ret = -EAGAIN;
+ goto out_inner;
+ }
+
+ /* queue packet for transmit */
+ if (++ch->tx_pending_data_cnt == SMUX_WM_HIGH) {
+ ch->notify_lwm = 1;
+ pr_err("%s: high watermark hit\n", __func__);
+ schedule_notify(lcid, SMUX_HIGH_WM_HIT, NULL);
+ }
+ list_add_tail(&pkt->list, &ch->tx_queue);
+
+ /* add to ready list */
+ if (IS_FULLY_OPENED(ch))
+ tx_ready = 1;
+
+ ret = 0;
+
+out_inner:
+ spin_unlock(&ch->tx_lock_lhb2);
+
+out:
+ if (ret)
+ smux_free_pkt(pkt);
+ spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+
+ if (tx_ready)
+ list_channel(ch);
+
+ return ret;
+}
+
+/**
+ * Returns true if the TX queue is currently full (high water mark).
+ *
+ * @lcid Logical channel ID
+ * @returns 0 if channel is not full
+ * 1 if it is full
+ * < 0 for error
+ */
+int msm_smux_is_ch_full(uint8_t lcid)
+{
+ struct smux_lch_t *ch;
+ unsigned long flags;
+ int is_full = 0;
+
+ if (smux_assert_lch_id(lcid))
+ return -ENXIO;
+
+ ch = &smux_lch[lcid];
+
+ spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
+ if (ch->tx_pending_data_cnt >= SMUX_WM_HIGH)
+ is_full = 1;
+ spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
+
+ return is_full;
+}
+
+/**
+ * Returns true if the TX queue has space for more packets it is at or
+ * below the low water mark).
+ *
+ * @lcid Logical channel ID
+ * @returns 0 if channel is above low watermark
+ * 1 if it's at or below the low watermark
+ * < 0 for error
+ */
+int msm_smux_is_ch_low(uint8_t lcid)
+{
+ struct smux_lch_t *ch;
+ unsigned long flags;
+ int is_low = 0;
+
+ if (smux_assert_lch_id(lcid))
+ return -ENXIO;
+
+ ch = &smux_lch[lcid];
+
+ spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
+ if (ch->tx_pending_data_cnt <= SMUX_WM_LOW)
+ is_low = 1;
+ spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
+
+ return is_low;
+}
+
+/**
+ * Send TIOCM status update.
+ *
+ * @ch Channel for update
+ *
+ * @returns 0 for success, <0 for failure
+ *
+ * Channel lock must be held before calling.
+ */
+static int smux_send_status_cmd(struct smux_lch_t *ch)
+{
+ struct smux_pkt_t *pkt;
+
+ if (!ch)
+ return -EINVAL;
+
+ pkt = smux_alloc_pkt();
+ if (!pkt)
+ return -ENOMEM;
+
+ pkt->hdr.lcid = ch->lcid;
+ pkt->hdr.cmd = SMUX_CMD_STATUS;
+ pkt->hdr.flags = ch->local_tiocm;
+ pkt->hdr.payload_len = 0;
+ pkt->hdr.pad_len = 0;
+ smux_tx_queue(pkt, ch, 0);
+
+ return 0;
+}
+
+/**
+ * Internal helper function for getting the TIOCM status with
+ * state_lock_lhb1 already locked.
+ *
+ * @ch Channel pointer
+ *
+ * @returns TIOCM status
+ */
+static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch)
+{
+ long status = 0x0;
+
+ status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DSR : 0;
+ status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_CTS : 0;
+ status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RI) ? TIOCM_RI : 0;
+ status |= (ch->remote_tiocm & SMUX_CMD_STATUS_DCD) ? TIOCM_CD : 0;
+
+ status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DTR : 0;
+ status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_RTS : 0;
+
+ return status;
+}
+
+/**
+ * Get the TIOCM status bits.
+ *
+ * @lcid Logical channel ID
+ *
+ * @returns >= 0 TIOCM status bits
+ * < 0 Error condition
+ */
+long msm_smux_tiocm_get(uint8_t lcid)
+{
+ struct smux_lch_t *ch;
+ unsigned long flags;
+ long status = 0x0;
+
+ if (smux_assert_lch_id(lcid))
+ return -ENXIO;
+
+ ch = &smux_lch[lcid];
+ spin_lock_irqsave(&ch->state_lock_lhb1, flags);
+ status = msm_smux_tiocm_get_atomic(ch);
+ spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+
+ return status;
+}
+
+/**
+ * Set/clear the TIOCM status bits.
+ *
+ * @lcid Logical channel ID
+ * @set Bits to set
+ * @clear Bits to clear
+ *
+ * @returns 0 for success; < 0 for failure
+ *
+ * If a bit is specified in both the @set and @clear masks, then the clear bit
+ * definition will dominate and the bit will be cleared.
+ */
+int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear)
+{
+ struct smux_lch_t *ch;
+ unsigned long flags;
+ uint8_t old_status;
+ uint8_t status_set = 0x0;
+ uint8_t status_clear = 0x0;
+ int tx_ready = 0;
+ int ret = 0;
+
+ if (smux_assert_lch_id(lcid))
+ return -ENXIO;
+
+ ch = &smux_lch[lcid];
+ spin_lock_irqsave(&ch->state_lock_lhb1, flags);
+
+ status_set |= (set & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
+ status_set |= (set & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
+ status_set |= (set & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
+ status_set |= (set & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
+
+ status_clear |= (clear & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
+ status_clear |= (clear & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
+ status_clear |= (clear & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
+ status_clear |= (clear & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
+
+ old_status = ch->local_tiocm;
+ ch->local_tiocm |= status_set;
+ ch->local_tiocm &= ~status_clear;
+
+ if (ch->local_tiocm != old_status) {
+ ret = smux_send_status_cmd(ch);
+ tx_ready = 1;
+ }
+ spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+
+ if (tx_ready)
+ list_channel(ch);
+
+ return ret;
+}
+
+/**********************************************************************/
+/* Line Discipline Interface */
+/**********************************************************************/
+static int smuxld_open(struct tty_struct *tty)
+{
+ int i;
+ int tmp;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!smux.is_initialized)
+ return -ENODEV;
+
+ spin_lock_irqsave(&smux.lock_lha0, flags);
+ if (smux.ld_open_count) {
+ pr_err("%s: %p multiple instances not supported\n",
+ __func__, tty);
+ ret = -EEXIST;
+ goto out;
+ }
+
+ ++smux.ld_open_count;
+ if (tty->ops->write == NULL) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* connect to TTY */
+ smux.tty = tty;
+ tty->disc_data = &smux;
+ tty->receive_room = TTY_RECEIVE_ROOM;
+ tty_driver_flush_buffer(tty);
+
+ /* power-down the UART if we are idle */
+ spin_lock(&smux.tx_lock_lha2);
+ if (smux.power_state == SMUX_PWR_OFF) {
+ SMUX_DBG("%s: powering off uart\n", __func__);
+ smux.power_state = SMUX_PWR_OFF_FLUSH;
+ spin_unlock(&smux.tx_lock_lha2);
+ queue_work(smux_tx_wq, &smux_inactivity_work);
+ } else {
+ spin_unlock(&smux.tx_lock_lha2);
+ }
+
+ /* register platform devices */
+ for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
+ tmp = platform_device_register(&smux_devs[i]);
+ if (tmp)
+ pr_err("%s: error %d registering device %s\n",
+ __func__, tmp, smux_devs[i].name);
+ }
+
+out:
+ spin_unlock_irqrestore(&smux.lock_lha0, flags);
+ return ret;
+}
+
+static void smuxld_close(struct tty_struct *tty)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&smux.lock_lha0, flags);
+ if (smux.ld_open_count <= 0) {
+ pr_err("%s: invalid ld count %d\n", __func__,
+ smux.ld_open_count);
+ goto out;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(smux_devs); ++i)
+ platform_device_unregister(&smux_devs[i]);
+
+ --smux.ld_open_count;
+
+out:
+ spin_unlock_irqrestore(&smux.lock_lha0, flags);
+}
+
+/**
+ * Receive data from TTY Line Discipline.
+ *
+ * @tty TTY structure
+ * @cp Character data
+ * @fp Flag data
+ * @count Size of character and flag data
+ */
+void smuxld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
+{
+ int i;
+ int last_idx = 0;
+ const char *tty_name = NULL;
+ char *f;
+
+ if (smux_debug_mask & MSM_SMUX_DEBUG)
+ print_hex_dump(KERN_INFO, "smux tty rx: ", DUMP_PREFIX_OFFSET,
+ 16, 1, cp, count, true);
+
+ /* verify error flags */
+ for (i = 0, f = fp; i < count; ++i, ++f) {
+ if (*f != TTY_NORMAL) {
+ if (tty)
+ tty_name = tty->name;
+ pr_err("%s: TTY %s Error %d (%s)\n", __func__,
+ tty_name, *f, tty_flag_to_str(*f));
+
+ /* feed all previous valid data to the parser */
+ smux_rx_state_machine(cp + last_idx, i - last_idx,
+ TTY_NORMAL);
+
+ /* feed bad data to parser */
+ smux_rx_state_machine(cp + i, 1, *f);
+ last_idx = i + 1;
+ }
+ }
+
+ /* feed data to RX state machine */
+ smux_rx_state_machine(cp + last_idx, count - last_idx, TTY_NORMAL);
+}
+
+static void smuxld_flush_buffer(struct tty_struct *tty)
+{
+ pr_err("%s: not supported\n", __func__);
+}
+
+static ssize_t smuxld_chars_in_buffer(struct tty_struct *tty)
+{
+ pr_err("%s: not supported\n", __func__);
+ return -ENODEV;
+}
+
+static ssize_t smuxld_read(struct tty_struct *tty, struct file *file,
+ unsigned char __user *buf, size_t nr)
+{
+ pr_err("%s: not supported\n", __func__);
+ return -ENODEV;
+}
+
+static ssize_t smuxld_write(struct tty_struct *tty, struct file *file,
+ const unsigned char *buf, size_t nr)
+{
+ pr_err("%s: not supported\n", __func__);
+ return -ENODEV;
+}
+
+static int smuxld_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ pr_err("%s: not supported\n", __func__);
+ return -ENODEV;
+}
+
+static unsigned int smuxld_poll(struct tty_struct *tty, struct file *file,
+ struct poll_table_struct *tbl)
+{
+ pr_err("%s: not supported\n", __func__);
+ return -ENODEV;
+}
+
+static void smuxld_write_wakeup(struct tty_struct *tty)
+{
+ pr_err("%s: not supported\n", __func__);
+}
+
+static struct tty_ldisc_ops smux_ldisc_ops = {
+ .owner = THIS_MODULE,
+ .magic = TTY_LDISC_MAGIC,
+ .name = "n_smux",
+ .open = smuxld_open,
+ .close = smuxld_close,
+ .flush_buffer = smuxld_flush_buffer,
+ .chars_in_buffer = smuxld_chars_in_buffer,
+ .read = smuxld_read,
+ .write = smuxld_write,
+ .ioctl = smuxld_ioctl,
+ .poll = smuxld_poll,
+ .receive_buf = smuxld_receive_buf,
+ .write_wakeup = smuxld_write_wakeup
+};
+
+static int __init smux_init(void)
+{
+ int ret;
+
+ spin_lock_init(&smux.lock_lha0);
+
+ spin_lock_init(&smux.rx_lock_lha1);
+ smux.rx_state = SMUX_RX_IDLE;
+ smux.power_state = SMUX_PWR_OFF;
+ smux.pwr_wakeup_delay_us = 1;
+ smux.powerdown_enabled = 0;
+ smux.rx_activity_flag = 0;
+ smux.tx_activity_flag = 0;
+ smux.recv_len = 0;
+ smux.tty = NULL;
+ smux.ld_open_count = 0;
+ smux.in_reset = 0;
+ smux.is_initialized = 1;
+ smux_byte_loopback = 0;
+
+ spin_lock_init(&smux.tx_lock_lha2);
+ INIT_LIST_HEAD(&smux.lch_tx_ready_list);
+
+ ret = tty_register_ldisc(N_SMUX, &smux_ldisc_ops);
+ if (ret != 0) {
+ pr_err("%s: error %d registering line discipline\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = lch_init();
+ if (ret != 0) {
+ pr_err("%s: lch_init failed\n", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit smux_exit(void)
+{
+ int ret;
+
+ ret = tty_unregister_ldisc(N_SMUX);
+ if (ret != 0) {
+ pr_err("%s error %d unregistering line discipline\n",
+ __func__, ret);
+ return;
+ }
+}
+
+module_init(smux_init);
+module_exit(smux_exit);
+
+MODULE_DESCRIPTION("Serial Mux TTY Line Discipline");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_LDISC(N_SMUX);
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index f559160..d0b8323 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -769,8 +769,7 @@
*/
mb();
/* do discard flush */
- msm_dmov_stop_cmd(msm_uport->dma_rx_channel,
- &msm_uport->rx.xfer, 0);
+ msm_dmov_flush(msm_uport->dma_rx_channel, 0);
}
msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
@@ -831,8 +830,7 @@
if (msm_uport->rx.flush == FLUSH_NONE) {
wake_lock(&msm_uport->rx.wake_lock);
/* do discard flush */
- msm_dmov_stop_cmd(msm_uport->dma_rx_channel,
- &msm_uport->rx.xfer, 0);
+ msm_dmov_flush(msm_uport->dma_rx_channel, 0);
}
if (msm_uport->rx.flush != FLUSH_SHUTDOWN)
msm_uport->rx.flush = FLUSH_STOP;
@@ -1966,12 +1964,25 @@
INIT_WORK(&msm_uport->clock_off_w, hsuart_clock_off_work);
mutex_init(&msm_uport->clk_mutex);
+ clk_prepare_enable(msm_uport->clk);
+ if (msm_uport->pclk)
+ clk_prepare_enable(msm_uport->pclk);
+
ret = uartdm_init_port(uport);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ clk_disable_unprepare(msm_uport->clk);
+ if (msm_uport->pclk)
+ clk_disable_unprepare(msm_uport->pclk);
return ret;
+ }
/* configure the CR Protection to Enable */
msm_hs_write(uport, UARTDM_CR_ADDR, CR_PROTECTION_EN);
+
+ clk_disable_unprepare(msm_uport->clk);
+ if (msm_uport->pclk)
+ clk_disable_unprepare(msm_uport->pclk);
+
/*
* Enable Command register protection before going ahead as this hw
* configuration makes sure that issued cmd to CR register gets complete
diff --git a/drivers/tty/smux_ctl.c b/drivers/tty/smux_ctl.c
new file mode 100644
index 0000000..69adbf3
--- /dev/null
+++ b/drivers/tty/smux_ctl.c
@@ -0,0 +1,937 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Serial Mux Control Driver -- Provides a binary serial muxed control
+ * port interface.
+ */
+
+#define DEBUG
+
+#include <linux/cdev.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+#include <linux/atomic.h>
+#include <linux/platform_device.h>
+#include <linux/smux.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+
+#include <asm/ioctls.h>
+
+#define MAX_WRITE_RETRY 5
+#define MAGIC_NO_V1 0x33FC
+#define DEVICE_NAME "smuxctl"
+#define SMUX_CTL_MAX_BUF_SIZE 2048
+#define SMUX_CTL_MODULE_NAME "smux_ctl"
+#define DEBUG
+
+static int msm_smux_ctl_debug_mask;
+module_param_named(debug_mask, msm_smux_ctl_debug_mask,
+ int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+static uint32_t smux_ctl_ch_id[] = {
+ SMUX_DATA_CTL_0,
+ SMUX_DATA_CTL_1,
+ SMUX_DATA_CTL_2,
+ SMUX_DATA_CTL_3,
+ SMUX_DATA_CTL_4,
+ SMUX_DATA_CTL_5,
+ SMUX_DATA_CTL_6,
+ SMUX_DATA_CTL_7,
+ SMUX_USB_RMNET_CTL_0,
+ SMUX_CSVT_CTL_0
+};
+
+#define SMUX_CTL_NUM_CHANNELS ARRAY_SIZE(smux_ctl_ch_id)
+
+struct smux_ctl_dev {
+ int id;
+ char name[10];
+ struct cdev cdev;
+ struct device *devicep;
+ struct mutex dev_lock;
+ atomic_t ref_count;
+ int state;
+ int is_channel_reset;
+ int is_high_wm;
+ int write_pending;
+
+ struct mutex rx_lock;
+ uint32_t read_avail;
+ struct list_head rx_list;
+
+ wait_queue_head_t read_wait_queue;
+ wait_queue_head_t write_wait_queue;
+
+ struct {
+ uint32_t bytes_tx;
+ uint32_t bytes_rx;
+ uint32_t pkts_tx;
+ uint32_t pkts_rx;
+ uint32_t cnt_ssr;
+ uint32_t cnt_read_fail;
+ uint32_t cnt_write_fail;
+ uint32_t cnt_high_wm_hit;
+ } stats;
+
+} *smux_ctl_devp[SMUX_CTL_NUM_CHANNELS];
+
+struct smux_ctl_pkt {
+ int data_size;
+ void *data;
+};
+
+struct smux_ctl_list_elem {
+ struct list_head list;
+ struct smux_ctl_pkt ctl_pkt;
+};
+
+struct class *smux_ctl_classp;
+static dev_t smux_ctl_number;
+static uint32_t smux_ctl_inited;
+
+enum {
+ MSM_SMUX_CTL_DEBUG = 1U << 0,
+ MSM_SMUX_CTL_DUMP_BUFFER = 1U << 1,
+};
+
+#if defined(DEBUG)
+
+static const char *smux_ctl_event_str[] = {
+ "SMUX_CONNECTED",
+ "SMUX_DISCONNECTED",
+ "SMUX_READ_DONE",
+ "SMUX_READ_FAIL",
+ "SMUX_WRITE_DONE",
+ "SMUX_WRITE_FAIL",
+ "SMUX_TIOCM_UPDATE",
+ "SMUX_LOW_WM_HIT",
+ "SMUX_HIGH_WM_HIT",
+};
+
+#define SMUXCTL_DUMP_BUFFER(prestr, cnt, buf) \
+do { \
+ if (msm_smux_ctl_debug_mask & MSM_SMUX_CTL_DUMP_BUFFER) { \
+ int i; \
+ pr_err("%s", prestr); \
+ for (i = 0; i < cnt; i++) \
+ pr_err("%.2x", buf[i]); \
+ pr_err("\n"); \
+ } \
+} while (0)
+
+#define SMUXCTL_DBG(x...) \
+do { \
+ if (msm_smux_ctl_debug_mask & MSM_SMUX_CTL_DEBUG) \
+ pr_err(x); \
+} while (0)
+
+
+#else
+#define SMUXCTL_DUMP_BUFFER(prestr, cnt, buf) do {} while (0)
+#define SMUXCTL_DBG(x...) do {} while (0)
+#endif
+
+#if defined(DEBUG_LOOPBACK)
+#define SMUXCTL_SET_LOOPBACK(lcid) \
+ msm_smux_set_ch_option(lcid, SMUX_CH_OPTION_LOCAL_LOOPBACK, 0)
+#else
+#define SMUXCTL_SET_LOOPBACK(lcid) do {} while (0)
+#endif
+
+static int get_ctl_dev_index(int id)
+{
+ int dev_index;
+ for (dev_index = 0; dev_index < SMUX_CTL_NUM_CHANNELS; dev_index++) {
+ if (smux_ctl_ch_id[dev_index] == id)
+ return dev_index;
+ }
+ return -ENODEV;
+}
+
+static int smux_ctl_get_rx_buf_cb(void *priv, void **pkt_priv,
+ void **buffer, int size)
+{
+ void *buf = NULL;
+ int id = ((struct smux_ctl_dev *)(priv))->id;
+ int dev_index;
+
+ if (id < 0 || id > smux_ctl_ch_id[SMUX_CTL_NUM_CHANNELS - 1])
+ return -ENODEV;
+
+ if (!buffer || 0 >= size)
+ return -EINVAL;
+
+ dev_index = get_ctl_dev_index(id);
+ if (dev_index < 0) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: Ch%d is not "
+ "exported to user-space\n",
+ __func__, id);
+ return -ENODEV;
+ }
+
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s: Allocating Rx buf size %d "
+ "for ch%d\n",
+ __func__, size, smux_ctl_devp[dev_index]->id);
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: buffer allocation failed: "
+ "Ch%d, size %d ", __func__, id, size);
+ return -ENOMEM;
+ }
+
+ *buffer = buf;
+ *pkt_priv = NULL;
+ return 0;
+
+}
+
+void smux_ctl_notify_cb(void *priv, int event_type, const void *metadata)
+{
+ int id = ((struct smux_ctl_dev *)(priv))->id;
+ struct smux_ctl_list_elem *list_elem = NULL;
+ int dev_index;
+ void *data;
+ int len;
+
+ if (id < 0 || id > smux_ctl_ch_id[SMUX_CTL_NUM_CHANNELS - 1])
+ return;
+
+ dev_index = get_ctl_dev_index(id);
+ if (dev_index < 0) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: Ch%d is not exported "
+ "to user-space\n", __func__, id);
+ return;
+ }
+
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s: Ch%d, Event %d (%s)\n",
+ __func__, smux_ctl_devp[dev_index]->id,
+ event_type, smux_ctl_event_str[event_type]);
+
+
+ switch (event_type) {
+ case SMUX_CONNECTED:
+ mutex_lock(&smux_ctl_devp[dev_index]->dev_lock);
+ smux_ctl_devp[dev_index]->state = SMUX_CONNECTED;
+ smux_ctl_devp[dev_index]->is_high_wm = 0;
+ smux_ctl_devp[dev_index]->is_channel_reset = 0;
+ smux_ctl_devp[dev_index]->read_avail = 0;
+ mutex_unlock(&smux_ctl_devp[dev_index]->dev_lock);
+ wake_up(&smux_ctl_devp[dev_index]->write_wait_queue);
+ break;
+
+ case SMUX_DISCONNECTED:
+ mutex_lock(&smux_ctl_devp[dev_index]->dev_lock);
+ smux_ctl_devp[dev_index]->state = SMUX_DISCONNECTED;
+ smux_ctl_devp[dev_index]->is_channel_reset =
+ ((struct smux_meta_disconnected *)metadata)->is_ssr;
+ if (smux_ctl_devp[dev_index]->is_channel_reset)
+ smux_ctl_devp[dev_index]->stats.cnt_ssr++;
+ mutex_unlock(&smux_ctl_devp[dev_index]->dev_lock);
+ wake_up(&smux_ctl_devp[dev_index]->write_wait_queue);
+ wake_up(&smux_ctl_devp[dev_index]->read_wait_queue);
+ break;
+
+ case SMUX_READ_FAIL:
+ data = ((struct smux_meta_read *)metadata)->buffer;
+ kfree(data);
+ mutex_lock(&smux_ctl_devp[dev_index]->dev_lock);
+ smux_ctl_devp[dev_index]->stats.cnt_read_fail++;
+ mutex_unlock(&smux_ctl_devp[dev_index]->dev_lock);
+ wake_up(&smux_ctl_devp[dev_index]->read_wait_queue);
+ break;
+
+ case SMUX_READ_DONE:
+ data = ((struct smux_meta_read *)metadata)->buffer;
+ len = ((struct smux_meta_read *)metadata)->len;
+
+ if (data && len > 0) {
+ list_elem = kmalloc(sizeof(struct smux_ctl_list_elem),
+ GFP_KERNEL);
+ if (list_elem) {
+ list_elem->ctl_pkt.data = data;
+ list_elem->ctl_pkt.data_size = len;
+
+ mutex_lock(&smux_ctl_devp[dev_index]->rx_lock);
+ list_add_tail(&list_elem->list,
+ &smux_ctl_devp[dev_index]->rx_list);
+ smux_ctl_devp[dev_index]->read_avail += len;
+ mutex_unlock(
+ &smux_ctl_devp[dev_index]->rx_lock);
+ } else {
+ kfree(data);
+ }
+ }
+
+ wake_up(&smux_ctl_devp[dev_index]->read_wait_queue);
+ break;
+
+ case SMUX_WRITE_DONE:
+ mutex_lock(&smux_ctl_devp[dev_index]->dev_lock);
+ smux_ctl_devp[dev_index]->write_pending = 0;
+ mutex_unlock(&smux_ctl_devp[dev_index]->dev_lock);
+ data = ((struct smux_meta_write *)metadata)->buffer;
+ kfree(data);
+ wake_up(&smux_ctl_devp[dev_index]->write_wait_queue);
+ break;
+
+ case SMUX_WRITE_FAIL:
+ data = ((struct smux_meta_write *)metadata)->buffer;
+ kfree(data);
+ mutex_lock(&smux_ctl_devp[dev_index]->dev_lock);
+ smux_ctl_devp[dev_index]->stats.cnt_write_fail++;
+ mutex_unlock(&smux_ctl_devp[dev_index]->dev_lock);
+ wake_up(&smux_ctl_devp[dev_index]->write_wait_queue);
+ break;
+
+ case SMUX_LOW_WM_HIT:
+ mutex_lock(&smux_ctl_devp[dev_index]->dev_lock);
+ smux_ctl_devp[dev_index]->is_high_wm = 0;
+ mutex_unlock(&smux_ctl_devp[dev_index]->dev_lock);
+ wake_up(&smux_ctl_devp[dev_index]->write_wait_queue);
+ break;
+
+ case SMUX_HIGH_WM_HIT:
+ mutex_lock(&smux_ctl_devp[dev_index]->dev_lock);
+ smux_ctl_devp[dev_index]->is_high_wm = 1;
+ smux_ctl_devp[dev_index]->stats.cnt_high_wm_hit++;
+ mutex_unlock(&smux_ctl_devp[dev_index]->dev_lock);
+ break;
+
+ case SMUX_TIOCM_UPDATE:
+ default:
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: Event %d not supported\n",
+ __func__, event_type);
+ break;
+
+ }
+
+}
+
+int smux_ctl_open(struct inode *inode, struct file *file)
+{
+ int r = 0;
+ struct smux_ctl_dev *devp;
+
+ if (!smux_ctl_inited)
+ return -EIO;
+
+ devp = container_of(inode->i_cdev, struct smux_ctl_dev, cdev);
+ if (!devp)
+ return -ENODEV;
+
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s called on smuxctl%d device\n",
+ __func__, devp->id);
+
+ if (1 == atomic_add_return(1, &devp->ref_count)) {
+
+ SMUXCTL_SET_LOOPBACK(devp->id);
+ r = msm_smux_open(devp->id,
+ devp,
+ smux_ctl_notify_cb,
+ smux_ctl_get_rx_buf_cb);
+ if (r < 0) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: smux_open failed "
+ "for smuxctl%d with rc %d\n",
+ __func__, devp->id, r);
+ atomic_dec(&devp->ref_count);
+ return r;
+ }
+
+ r = wait_event_interruptible_timeout(
+ devp->write_wait_queue,
+ (devp->state == SMUX_CONNECTED),
+ (5 * HZ));
+ if (r == 0)
+ r = -ETIMEDOUT;
+
+ if (r < 0) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: "
+ "SMUX open timed out: %d, LCID %d\n",
+ __func__, r, devp->id);
+ atomic_dec(&devp->ref_count);
+ msm_smux_close(devp->id);
+ return r;
+
+ } else if (devp->state != SMUX_CONNECTED) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: "
+ "Invalid open notification\n", __func__);
+ r = -ENODEV;
+ atomic_dec(&devp->ref_count);
+ msm_smux_close(devp->id);
+ return r;
+ }
+ }
+
+ file->private_data = devp;
+ return 0;
+}
+
+int smux_ctl_release(struct inode *inode, struct file *file)
+{
+ struct smux_ctl_dev *devp;
+ struct smux_ctl_list_elem *list_elem = NULL;
+
+ devp = file->private_data;
+ if (!devp)
+ return -EINVAL;
+
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s called on smuxctl%d device\n",
+ __func__, devp->id);
+
+ mutex_lock(&devp->dev_lock);
+ if (atomic_dec_and_test(&devp->ref_count)) {
+ mutex_lock(&devp->rx_lock);
+ while (!list_empty(&devp->rx_list)) {
+ list_elem = list_first_entry(
+ &devp->rx_list,
+ struct smux_ctl_list_elem,
+ list);
+ list_del(&list_elem->list);
+ kfree(list_elem->ctl_pkt.data);
+ kfree(list_elem);
+ }
+ devp->read_avail = 0;
+ mutex_unlock(&devp->rx_lock);
+ msm_smux_close(devp->id);
+ }
+ mutex_unlock(&devp->dev_lock);
+ file->private_data = NULL;
+
+ return 0;
+}
+
+static int smux_ctl_readable(int id)
+{
+ int r;
+ int dev_index;
+
+ if (id < 0 || id > smux_ctl_ch_id[SMUX_CTL_NUM_CHANNELS - 1])
+ return -ENODEV;
+
+ dev_index = get_ctl_dev_index(id);
+ if (dev_index < 0) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: Ch%d "
+ "is not exported to user-space\n",
+ __func__, id);
+ return -ENODEV;
+ }
+
+ mutex_lock(&smux_ctl_devp[dev_index]->dev_lock);
+
+ if (signal_pending(current))
+ r = -ERESTARTSYS;
+
+ if (smux_ctl_devp[dev_index]->state == SMUX_DISCONNECTED &&
+ smux_ctl_devp[dev_index]->is_channel_reset != 0)
+ r = -ENETRESET;
+
+ else if (smux_ctl_devp[dev_index]->state != SMUX_CONNECTED)
+ r = -ENODEV;
+
+ else
+ r = smux_ctl_devp[dev_index]->read_avail;
+
+
+ mutex_unlock(&smux_ctl_devp[dev_index]->dev_lock);
+
+ return r;
+
+}
+
+ssize_t smux_ctl_read(struct file *file,
+ char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ int r = 0, id, bytes_to_read, read_err;
+ struct smux_ctl_dev *devp;
+ struct smux_ctl_list_elem *list_elem = NULL;
+
+ devp = file->private_data;
+
+ if (!devp)
+ return -ENODEV;
+
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s: read from ch%d\n",
+ __func__, devp->id);
+
+ id = devp->id;
+ mutex_lock(&devp->rx_lock);
+ while (devp->read_avail <= 0) {
+ mutex_unlock(&devp->rx_lock);
+ r = wait_event_interruptible(devp->read_wait_queue,
+ 0 != (read_err = smux_ctl_readable(id)));
+
+ if (r < 0) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s:"
+ "wait_event_interruptible "
+ "ret %i\n", __func__, r);
+ return r;
+ }
+
+ if (read_err < 0) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s:"
+ " Read block failed for Ch%d, err %d\n",
+ __func__, devp->id, read_err);
+ return read_err;
+ }
+
+ mutex_lock(&devp->rx_lock);
+ }
+
+ if (list_empty(&devp->rx_list)) {
+ mutex_unlock(&devp->rx_lock);
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s: "
+ "Nothing in ch%d's rx_list\n", __func__,
+ devp->id);
+ return -EAGAIN;
+ }
+
+ list_elem = list_first_entry(&devp->rx_list,
+ struct smux_ctl_list_elem, list);
+ bytes_to_read = (uint32_t)(list_elem->ctl_pkt.data_size);
+ if (bytes_to_read > count) {
+ mutex_unlock(&devp->rx_lock);
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: "
+ "Packet size %d > buf size %d\n", __func__,
+ bytes_to_read, count);
+ return -ENOMEM;
+ }
+
+ if (copy_to_user(buf, list_elem->ctl_pkt.data, bytes_to_read)) {
+ mutex_unlock(&devp->rx_lock);
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: "
+ "copy_to_user failed for ch%d\n", __func__,
+ devp->id);
+ return -EFAULT;
+ }
+
+ devp->read_avail -= bytes_to_read;
+ list_del(&list_elem->list);
+ kfree(list_elem->ctl_pkt.data);
+ kfree(list_elem);
+ devp->stats.pkts_rx++;
+ devp->stats.bytes_rx += bytes_to_read;
+ mutex_unlock(&devp->rx_lock);
+
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s: "
+ "Returning %d bytes to ch%d\n", __func__,
+ bytes_to_read, devp->id);
+ return bytes_to_read;
+}
+
+static int smux_ctl_writeable(int id)
+{
+ int r;
+ int dev_index;
+
+ if (id < 0 || id > smux_ctl_ch_id[SMUX_CTL_NUM_CHANNELS - 1])
+ return -ENODEV;
+
+ dev_index = get_ctl_dev_index(id);
+ if (dev_index < 0) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: "
+ "Ch%d is not exported to user-space\n",
+ __func__, id);
+ return -ENODEV;
+ }
+
+ mutex_lock(&smux_ctl_devp[dev_index]->dev_lock);
+
+ if (signal_pending(current))
+ r = -ERESTARTSYS;
+ else if (smux_ctl_devp[dev_index]->state == SMUX_DISCONNECTED &&
+ smux_ctl_devp[dev_index]->is_channel_reset != 0)
+ r = -ENETRESET;
+ else if (smux_ctl_devp[dev_index]->state != SMUX_CONNECTED)
+ r = -ENODEV;
+ else if (smux_ctl_devp[dev_index]->is_high_wm ||
+ smux_ctl_devp[dev_index]->write_pending)
+ r = 0;
+ else
+ r = SMUX_CTL_MAX_BUF_SIZE;
+
+ mutex_unlock(&smux_ctl_devp[dev_index]->dev_lock);
+
+ return r;
+
+}
+
+ssize_t smux_ctl_write(struct file *file,
+ const char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ int r = 0, id, write_err;
+ char *temp_buf;
+ struct smux_ctl_dev *devp;
+
+ if (count <= 0)
+ return -EINVAL;
+
+ devp = file->private_data;
+ if (!devp)
+ return -ENODEV;
+
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s: writing %i bytes on ch%d\n",
+ __func__, count, devp->id);
+
+ id = devp->id;
+ r = wait_event_interruptible(devp->write_wait_queue,
+ 0 != (write_err = smux_ctl_writeable(id)));
+
+ if (r < 0) {
+ pr_err(SMUX_CTL_MODULE_NAME
+ ": %s: wait_event_interruptible "
+ "ret %i\n", __func__, r);
+ return r;
+ }
+
+ if (write_err < 0) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s:"
+ "Write block failed for Ch%d, err %d\n",
+ __func__, devp->id, write_err);
+ return write_err;
+ }
+
+ temp_buf = kmalloc(count, GFP_KERNEL);
+ if (!temp_buf) {
+ pr_err(SMUX_CTL_MODULE_NAME
+ ": %s: temp_buf alloc failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(temp_buf, buf, count)) {
+ pr_err(SMUX_CTL_MODULE_NAME
+ ": %s: copy_from_user failed\n", __func__);
+ kfree(temp_buf);
+ return -EFAULT;
+ }
+
+ mutex_lock(&devp->dev_lock);
+ devp->write_pending = 1;
+ mutex_unlock(&devp->dev_lock);
+
+ r = msm_smux_write(id, NULL, (void *)temp_buf, count);
+ if (r < 0) {
+ pr_err(SMUX_CTL_MODULE_NAME
+ ": %s: smux_write on Ch%dfailed, err %d\n",
+ __func__, id, r);
+ mutex_lock(&devp->dev_lock);
+ devp->write_pending = 0;
+ mutex_unlock(&devp->dev_lock);
+ return r;
+ }
+
+ r = wait_event_interruptible(devp->write_wait_queue,
+ 0 != (write_err = smux_ctl_writeable(id)));
+ if (r < 0) {
+ pr_err(SMUX_CTL_MODULE_NAME " :%s: wait_event_interruptible "
+ "ret %i\n", __func__, r);
+ mutex_lock(&devp->dev_lock);
+ devp->write_pending = 0;
+ mutex_unlock(&devp->dev_lock);
+ return r;
+ }
+
+ mutex_lock(&devp->dev_lock);
+ devp->write_pending = 0;
+ devp->stats.pkts_tx++;
+ devp->stats.bytes_tx += count;
+ mutex_unlock(&devp->dev_lock);
+ return count;
+}
+
+static long smux_ctl_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+ struct smux_ctl_dev *devp;
+
+ devp = file->private_data;
+ if (!devp)
+ return -ENODEV;
+
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s called on smuxctl%d device\n",
+ __func__, devp->id);
+
+ switch (cmd) {
+ case TIOCMGET:
+ ret = msm_smux_tiocm_get(devp->id);
+ break;
+ case TIOCMSET:
+ ret = msm_smux_tiocm_set(devp->id, arg, ~arg);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static const struct file_operations smux_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = smux_ctl_open,
+ .release = smux_ctl_release,
+ .read = smux_ctl_read,
+ .write = smux_ctl_write,
+ .unlocked_ioctl = smux_ctl_ioctl,
+};
+
+static int smux_ctl_probe(struct platform_device *pdev)
+{
+ int i;
+ int r;
+
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s Begins\n", __func__);
+
+ for (i = 0; i < SMUX_CTL_NUM_CHANNELS; ++i) {
+ smux_ctl_devp[i] = kzalloc(sizeof(struct smux_ctl_dev),
+ GFP_KERNEL);
+ if (IS_ERR(smux_ctl_devp[i])) {
+ pr_err(SMUX_CTL_MODULE_NAME
+ ": %s kmalloc() ENOMEM\n", __func__);
+ r = -ENOMEM;
+ goto error0;
+ }
+
+ smux_ctl_devp[i]->id = smux_ctl_ch_id[i];
+ atomic_set(&smux_ctl_devp[i]->ref_count, 0);
+ smux_ctl_devp[i]->is_high_wm = 0;
+ smux_ctl_devp[i]->write_pending = 0;
+ smux_ctl_devp[i]->is_channel_reset = 0;
+ smux_ctl_devp[i]->state = SMUX_DISCONNECTED;
+ smux_ctl_devp[i]->read_avail = 0;
+
+ smux_ctl_devp[i]->stats.bytes_tx = 0;
+ smux_ctl_devp[i]->stats.bytes_rx = 0;
+ smux_ctl_devp[i]->stats.pkts_tx = 0;
+ smux_ctl_devp[i]->stats.pkts_rx = 0;
+ smux_ctl_devp[i]->stats.cnt_ssr = 0;
+ smux_ctl_devp[i]->stats.cnt_read_fail = 0;
+ smux_ctl_devp[i]->stats.cnt_write_fail = 0;
+ smux_ctl_devp[i]->stats.cnt_high_wm_hit = 0;
+
+ mutex_init(&smux_ctl_devp[i]->dev_lock);
+ init_waitqueue_head(&smux_ctl_devp[i]->read_wait_queue);
+ init_waitqueue_head(&smux_ctl_devp[i]->write_wait_queue);
+ mutex_init(&smux_ctl_devp[i]->rx_lock);
+ INIT_LIST_HEAD(&smux_ctl_devp[i]->rx_list);
+ }
+
+ r = alloc_chrdev_region(&smux_ctl_number, 0, SMUX_CTL_NUM_CHANNELS,
+ DEVICE_NAME);
+ if (IS_ERR_VALUE(r)) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: "
+ "alloc_chrdev_region() ret %i.\n",
+ __func__, r);
+ goto error0;
+ }
+
+ smux_ctl_classp = class_create(THIS_MODULE, DEVICE_NAME);
+ if (IS_ERR(smux_ctl_classp)) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: "
+ "class_create() ENOMEM\n", __func__);
+ r = -ENOMEM;
+ goto error1;
+ }
+
+ for (i = 0; i < SMUX_CTL_NUM_CHANNELS; ++i) {
+ cdev_init(&smux_ctl_devp[i]->cdev, &smux_ctl_fops);
+ smux_ctl_devp[i]->cdev.owner = THIS_MODULE;
+
+ r = cdev_add(&smux_ctl_devp[i]->cdev, (smux_ctl_number + i), 1);
+
+ if (IS_ERR_VALUE(r)) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: "
+ "cdev_add() ret %i\n", __func__, r);
+ kfree(smux_ctl_devp[i]);
+ goto error2;
+ }
+
+ smux_ctl_devp[i]->devicep =
+ device_create(smux_ctl_classp, NULL,
+ (smux_ctl_number + i), NULL,
+ DEVICE_NAME "%d", smux_ctl_ch_id[i]);
+
+ if (IS_ERR(smux_ctl_devp[i]->devicep)) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: "
+ "device_create() ENOMEM\n", __func__);
+ r = -ENOMEM;
+ cdev_del(&smux_ctl_devp[i]->cdev);
+ kfree(smux_ctl_devp[i]);
+ goto error2;
+ }
+ }
+
+ smux_ctl_inited = 1;
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s: "
+ "SMUX Control Port Driver Initialized.\n", __func__);
+ return 0;
+
+error2:
+ while (--i >= 0) {
+ cdev_del(&smux_ctl_devp[i]->cdev);
+ device_destroy(smux_ctl_classp,
+ MKDEV(MAJOR(smux_ctl_number), i));
+ }
+
+ class_destroy(smux_ctl_classp);
+ i = SMUX_CTL_NUM_CHANNELS;
+
+error1:
+ unregister_chrdev_region(MAJOR(smux_ctl_number),
+ SMUX_CTL_NUM_CHANNELS);
+
+error0:
+ while (--i >= 0)
+ kfree(smux_ctl_devp[i]);
+
+ return r;
+}
+
+static int smux_ctl_remove(struct platform_device *pdev)
+{
+ int i;
+
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s Begins\n", __func__);
+
+ for (i = 0; i < SMUX_CTL_NUM_CHANNELS; ++i) {
+ cdev_del(&smux_ctl_devp[i]->cdev);
+ kfree(smux_ctl_devp[i]);
+ device_destroy(smux_ctl_classp,
+ MKDEV(MAJOR(smux_ctl_number), i));
+ }
+ class_destroy(smux_ctl_classp);
+ unregister_chrdev_region(MAJOR(smux_ctl_number),
+ SMUX_CTL_NUM_CHANNELS);
+
+ return 0;
+}
+
+static struct platform_driver smux_ctl_driver = {
+ .probe = smux_ctl_probe,
+ .remove = smux_ctl_remove,
+ .driver = {
+ .name = "SMUX_CTL",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init smux_ctl_init(void)
+{
+ msm_smux_ctl_debug_mask = MSM_SMUX_CTL_DEBUG | MSM_SMUX_CTL_DUMP_BUFFER;
+
+ SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s Begins\n", __func__);
+ return platform_driver_register(&smux_ctl_driver);
+}
+
+
+#if defined(CONFIG_DEBUG_FS)
+
+#define DEBUG_BUFMAX 4096
+static char debug_buffer[DEBUG_BUFMAX];
+
+static ssize_t debug_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int bsize = 0;
+ int i;
+ if (!smux_ctl_inited) {
+ pr_err(SMUX_CTL_MODULE_NAME ": %s: SMUX_CTL not yet inited\n",
+ __func__);
+ return -EIO;
+ }
+
+ bsize += scnprintf(debug_buffer + bsize, DEBUG_BUFMAX - bsize,
+ "SMUX_CTL Channel States:\n");
+
+ for (i = 0; i < SMUX_CTL_NUM_CHANNELS; ++i) {
+ bsize += scnprintf(debug_buffer + bsize, DEBUG_BUFMAX - bsize,
+ "Ch%02d %s RefCnt=%01d State=%02d "
+ "SSR=%02d HighWM=%02d ReadAvail=%04d WritePending=%02d\n",
+ smux_ctl_devp[i]->id,
+ smux_ctl_devp[i]->name,
+ atomic_read(&smux_ctl_devp[i]->ref_count),
+ smux_ctl_devp[i]->state,
+ smux_ctl_devp[i]->is_channel_reset,
+ smux_ctl_devp[i]->is_high_wm,
+ smux_ctl_devp[i]->read_avail,
+ smux_ctl_devp[i]->write_pending);
+ }
+
+ bsize += scnprintf(debug_buffer + bsize, DEBUG_BUFMAX - bsize,
+ "\nSMUX_CTL Channel Statistics:\n");
+ for (i = 0; i < SMUX_CTL_NUM_CHANNELS; ++i) {
+ bsize += scnprintf(debug_buffer + bsize, DEBUG_BUFMAX - bsize,
+ "Ch%02d %s BytesTX=%08d "
+ "BytesRx=%08d PktsTx=%04d PktsRx=%04d"
+ "CntSSR=%02d CntHighWM=%02d "
+ "CntReadFail%02d CntWriteFailed=%02d\n",
+ smux_ctl_devp[i]->id,
+ smux_ctl_devp[i]->name,
+ smux_ctl_devp[i]->stats.bytes_tx,
+ smux_ctl_devp[i]->stats.bytes_rx,
+ smux_ctl_devp[i]->stats.pkts_tx,
+ smux_ctl_devp[i]->stats.pkts_rx,
+ smux_ctl_devp[i]->stats.cnt_ssr,
+ smux_ctl_devp[i]->stats.cnt_high_wm_hit,
+ smux_ctl_devp[i]->stats.cnt_read_fail,
+ smux_ctl_devp[i]->stats.cnt_write_fail);
+ }
+
+ return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static const struct file_operations debug_ops = {
+ .read = debug_read,
+ .open = debug_open,
+};
+
+static int __init smux_debugfs_init(void)
+{
+ struct dentry *dent;
+
+ dent = debugfs_create_dir("smux_ctl", 0);
+ if (!IS_ERR(dent))
+ debugfs_create_file("smux_ctl_state", 0444, dent,
+ NULL, &debug_ops);
+
+ return 0;
+}
+
+late_initcall(smux_debugfs_init);
+#endif
+
+module_init(smux_ctl_init);
+MODULE_DESCRIPTION("MSM SMUX Control Port");
+MODULE_LICENSE("GPL v2");
+
+
diff --git a/drivers/tty/smux_loopback.c b/drivers/tty/smux_loopback.c
new file mode 100644
index 0000000..52ce17f
--- /dev/null
+++ b/drivers/tty/smux_loopback.c
@@ -0,0 +1,289 @@
+/* drivers/tty/smux_loopback.c
+ *
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/workqueue.h>
+#include <linux/kfifo.h>
+#include <linux/slab.h>
+#include <linux/smux.h>
+#include "smux_private.h"
+
+#define SMUX_LOOP_FIFO_SIZE 128
+
+static void smux_loopback_rx_worker(struct work_struct *work);
+static struct workqueue_struct *smux_loopback_wq;
+static DECLARE_WORK(smux_loopback_work, smux_loopback_rx_worker);
+static struct kfifo smux_loop_pkt_fifo;
+static DEFINE_SPINLOCK(hw_fn_lock);
+
+/**
+ * Initialize loopback framework (called by n_smux.c).
+ */
+int smux_loopback_init(void)
+{
+ int ret = 0;
+
+ spin_lock_init(&hw_fn_lock);
+ smux_loopback_wq = create_singlethread_workqueue("smux_loopback_wq");
+ if (IS_ERR(smux_loopback_wq)) {
+ pr_err("%s: failed to create workqueue\n", __func__);
+ return -ENOMEM;
+ }
+
+ ret |= kfifo_alloc(&smux_loop_pkt_fifo,
+ SMUX_LOOP_FIFO_SIZE * sizeof(struct smux_pkt_t *),
+ GFP_KERNEL);
+
+ return ret;
+}
+
+/**
+ * Simulate a write to the TTY hardware by duplicating
+ * the TX packet and putting it into the RX queue.
+ *
+ * @pkt Packet to write
+ *
+ * @returns 0 on success
+ */
+int smux_tx_loopback(struct smux_pkt_t *pkt_ptr)
+{
+ struct smux_pkt_t *send_pkt;
+ unsigned long flags;
+ int i;
+ int ret;
+
+ /* duplicate packet */
+ send_pkt = smux_alloc_pkt();
+ send_pkt->hdr = pkt_ptr->hdr;
+ if (pkt_ptr->hdr.payload_len) {
+ ret = smux_alloc_pkt_payload(send_pkt);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ memcpy(send_pkt->payload, pkt_ptr->payload,
+ pkt_ptr->hdr.payload_len);
+ }
+
+ /* queue duplicate as pseudo-RX data */
+ spin_lock_irqsave(&hw_fn_lock, flags);
+ i = kfifo_avail(&smux_loop_pkt_fifo);
+ if (i < sizeof(struct smux_pkt_t *)) {
+ pr_err("%s: no space in fifo\n", __func__);
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ i = kfifo_in(&smux_loop_pkt_fifo,
+ &send_pkt,
+ sizeof(struct smux_pkt_t *));
+ if (i < 0) {
+ pr_err("%s: fifo error\n", __func__);
+ ret = -ENOMEM;
+ goto unlock;
+ }
+ queue_work(smux_loopback_wq, &smux_loopback_work);
+ ret = 0;
+
+unlock:
+ spin_unlock_irqrestore(&hw_fn_lock, flags);
+out:
+ return ret;
+}
+
+/**
+ * Receive loopback byte processor.
+ *
+ * @pkt Incoming packet
+ */
+static void smux_loopback_rx_byte(struct smux_pkt_t *pkt)
+{
+ static int simulated_retry_cnt;
+ const char ack = SMUX_WAKEUP_ACK;
+
+ switch (pkt->hdr.flags) {
+ case SMUX_WAKEUP_REQ:
+ /* reply with ACK after appropriate delays */
+ ++simulated_retry_cnt;
+ if (simulated_retry_cnt >= smux_simulate_wakeup_delay) {
+ pr_err("%s: completed %d of %d\n",
+ __func__, simulated_retry_cnt,
+ smux_simulate_wakeup_delay);
+ pr_err("%s: simulated wakeup\n", __func__);
+ simulated_retry_cnt = 0;
+ smux_rx_state_machine(&ack, 1, 0);
+ } else {
+ /* force retry */
+ pr_err("%s: dropping wakeup request %d of %d\n",
+ __func__, simulated_retry_cnt,
+ smux_simulate_wakeup_delay);
+ }
+ break;
+ case SMUX_WAKEUP_ACK:
+ /* this shouldn't happen since we don't send requests */
+ pr_err("%s: wakeup ACK unexpected\n", __func__);
+ break;
+
+ default:
+ /* invalid character */
+ pr_err("%s: invalid character 0x%x\n",
+ __func__, (unsigned)pkt->hdr.flags);
+ break;
+ }
+}
+
+/**
+ * Simulated remote hardware used for local loopback testing.
+ *
+ * @work Not used
+ */
+static void smux_loopback_rx_worker(struct work_struct *work)
+{
+ struct smux_pkt_t *pkt;
+ struct smux_pkt_t reply_pkt;
+ char *data;
+ int len;
+ int lcid;
+ int i;
+ unsigned long flags;
+
+ data = kzalloc(SMUX_MAX_PKT_SIZE, GFP_ATOMIC);
+
+ spin_lock_irqsave(&hw_fn_lock, flags);
+ while (kfifo_len(&smux_loop_pkt_fifo) >= sizeof(struct smux_pkt_t *)) {
+ i = kfifo_out(&smux_loop_pkt_fifo, &pkt,
+ sizeof(struct smux_pkt_t *));
+ spin_unlock_irqrestore(&hw_fn_lock, flags);
+
+ if (pkt->hdr.magic != SMUX_MAGIC) {
+ pr_err("%s: invalid magic %x\n", __func__,
+ pkt->hdr.magic);
+ return;
+ }
+
+ lcid = pkt->hdr.lcid;
+ if (smux_assert_lch_id(lcid)) {
+ pr_err("%s: invalid channel id %d\n", __func__, lcid);
+ return;
+ }
+
+ switch (pkt->hdr.cmd) {
+ case SMUX_CMD_OPEN_LCH:
+ if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
+ break;
+
+ /* Reply with Open ACK */
+ smux_init_pkt(&reply_pkt);
+ reply_pkt.hdr.lcid = lcid;
+ reply_pkt.hdr.cmd = SMUX_CMD_OPEN_LCH;
+ reply_pkt.hdr.flags = SMUX_CMD_OPEN_ACK
+ | SMUX_CMD_OPEN_POWER_COLLAPSE;
+ reply_pkt.hdr.payload_len = 0;
+ reply_pkt.hdr.pad_len = 0;
+ smux_serialize(&reply_pkt, data, &len);
+ smux_rx_state_machine(data, len, 0);
+
+ /* Send Remote Open */
+ smux_init_pkt(&reply_pkt);
+ reply_pkt.hdr.lcid = lcid;
+ reply_pkt.hdr.cmd = SMUX_CMD_OPEN_LCH;
+ reply_pkt.hdr.flags = SMUX_CMD_OPEN_POWER_COLLAPSE;
+ reply_pkt.hdr.payload_len = 0;
+ reply_pkt.hdr.pad_len = 0;
+ smux_serialize(&reply_pkt, data, &len);
+ smux_rx_state_machine(data, len, 0);
+ break;
+
+ case SMUX_CMD_CLOSE_LCH:
+ if (pkt->hdr.flags == SMUX_CMD_CLOSE_ACK)
+ break;
+
+ /* Reply with Close ACK */
+ smux_init_pkt(&reply_pkt);
+ reply_pkt.hdr.lcid = lcid;
+ reply_pkt.hdr.cmd = SMUX_CMD_CLOSE_LCH;
+ reply_pkt.hdr.flags = SMUX_CMD_CLOSE_ACK;
+ reply_pkt.hdr.payload_len = 0;
+ reply_pkt.hdr.pad_len = 0;
+ smux_serialize(&reply_pkt, data, &len);
+ smux_rx_state_machine(data, len, 0);
+
+ /* Send Remote Close */
+ smux_init_pkt(&reply_pkt);
+ reply_pkt.hdr.lcid = lcid;
+ reply_pkt.hdr.cmd = SMUX_CMD_CLOSE_LCH;
+ reply_pkt.hdr.flags = 0;
+ reply_pkt.hdr.payload_len = 0;
+ reply_pkt.hdr.pad_len = 0;
+ smux_serialize(&reply_pkt, data, &len);
+ smux_rx_state_machine(data, len, 0);
+ break;
+
+ case SMUX_CMD_DATA:
+ /* Echo back received data */
+ smux_init_pkt(&reply_pkt);
+ reply_pkt.hdr.lcid = lcid;
+ reply_pkt.hdr.cmd = SMUX_CMD_DATA;
+ reply_pkt.hdr.flags = 0;
+ reply_pkt.hdr.payload_len = pkt->hdr.payload_len;
+ reply_pkt.payload = pkt->payload;
+ reply_pkt.hdr.pad_len = pkt->hdr.pad_len;
+ smux_serialize(&reply_pkt, data, &len);
+ smux_rx_state_machine(data, len, 0);
+ break;
+
+ case SMUX_CMD_STATUS:
+ /* Echo back received status */
+ smux_init_pkt(&reply_pkt);
+ reply_pkt.hdr.lcid = lcid;
+ reply_pkt.hdr.cmd = SMUX_CMD_STATUS;
+ reply_pkt.hdr.flags = pkt->hdr.flags;
+ reply_pkt.hdr.payload_len = 0;
+ reply_pkt.payload = NULL;
+ reply_pkt.hdr.pad_len = pkt->hdr.pad_len;
+ smux_serialize(&reply_pkt, data, &len);
+ smux_rx_state_machine(data, len, 0);
+ break;
+
+ case SMUX_CMD_PWR_CTL:
+ /* reply with ack */
+ smux_init_pkt(&reply_pkt);
+ reply_pkt.hdr.lcid = lcid;
+ reply_pkt.hdr.cmd = SMUX_CMD_PWR_CTL;
+ reply_pkt.hdr.flags = SMUX_CMD_PWR_CTL_SLEEP_REQ
+ | SMUX_CMD_PWR_CTL_ACK;
+ reply_pkt.hdr.payload_len = 0;
+ reply_pkt.payload = NULL;
+ reply_pkt.hdr.pad_len = pkt->hdr.pad_len;
+ smux_serialize(&reply_pkt, data, &len);
+ smux_rx_state_machine(data, len, 0);
+ break;
+
+ case SMUX_CMD_BYTE:
+ smux_loopback_rx_byte(pkt);
+ break;
+
+ default:
+ pr_err("%s: unknown command %d\n",
+ __func__, pkt->hdr.cmd);
+ break;
+ };
+
+ smux_free_pkt(pkt);
+ spin_lock_irqsave(&hw_fn_lock, flags);
+ }
+ spin_unlock_irqrestore(&hw_fn_lock, flags);
+ kfree(data);
+}
diff --git a/drivers/tty/smux_loopback.h b/drivers/tty/smux_loopback.h
new file mode 100644
index 0000000..85c6c23
--- /dev/null
+++ b/drivers/tty/smux_loopback.h
@@ -0,0 +1,39 @@
+/* drivers/tty/smux_loopback.h
+ *
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef SMUX_LOOPBACK_H
+#define SMUX_LOOPBACK_H
+
+#include "smux_private.h"
+
+#ifdef CONFIG_N_SMUX_LOOPBACK
+
+int smux_loopback_init(void);
+int smux_tx_loopback(struct smux_pkt_t *pkt_ptr);
+
+#else
+static inline int smux_loopback_init(void)
+{
+ return 0;
+}
+
+static inline int smux_tx_loopback(struct smux_pkt_t *pkt_ptr)
+{
+ return -ENODEV;
+}
+
+
+#endif /* CONFIG_N_SMUX_LOOPBACK */
+#endif /* SMUX_LOOPBACK_H */
+
diff --git a/drivers/tty/smux_private.h b/drivers/tty/smux_private.h
new file mode 100644
index 0000000..5ce8fb8
--- /dev/null
+++ b/drivers/tty/smux_private.h
@@ -0,0 +1,115 @@
+/* drivers/tty/smux_private.h
+ *
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef SMUX_PRIVATE_H
+#define SMUX_PRIVATE_H
+
+#define SMUX_MAX_PKT_SIZE 8192
+
+/* SMUX Protocol Characters */
+#define SMUX_MAGIC 0x33FC
+#define SMUX_MAGIC_WORD1 0xFC
+#define SMUX_MAGIC_WORD2 0x33
+#define SMUX_WAKEUP_REQ 0xFD
+#define SMUX_WAKEUP_ACK 0xFE
+
+/* Unit testing characters */
+#define SMUX_UT_ECHO_REQ 0xF0
+#define SMUX_UT_ECHO_ACK_OK 0xF1
+#define SMUX_UT_ECHO_ACK_FAIL 0xF2
+
+struct tty_struct;
+
+/* Packet header. */
+struct smux_hdr_t {
+ uint16_t magic;
+ uint8_t flags;
+ uint8_t cmd;
+ uint8_t pad_len;
+ uint8_t lcid;
+ uint16_t payload_len;
+};
+
+/* Internal packet structure. */
+struct smux_pkt_t {
+ struct smux_hdr_t hdr;
+ int allocated;
+ unsigned char *payload;
+ int free_payload;
+ struct list_head list;
+ void *priv;
+};
+
+/* SMUX Packet Commands */
+enum {
+ SMUX_CMD_DATA = 0x0,
+ SMUX_CMD_OPEN_LCH = 0x1,
+ SMUX_CMD_CLOSE_LCH = 0x2,
+ SMUX_CMD_STATUS = 0x3,
+ SMUX_CMD_PWR_CTL = 0x4,
+
+ SMUX_CMD_BYTE, /* for internal usage */
+ SMUX_NUM_COMMANDS
+};
+
+/* Open command flags */
+enum {
+ SMUX_CMD_OPEN_ACK = 1 << 0,
+ SMUX_CMD_OPEN_POWER_COLLAPSE = 1 << 1,
+ SMUX_CMD_OPEN_REMOTE_LOOPBACK = 1 << 2,
+};
+
+/* Close command flags */
+enum {
+ SMUX_CMD_CLOSE_ACK = 1 << 0,
+};
+
+/* Power command flags */
+enum {
+ SMUX_CMD_PWR_CTL_ACK = 1 << 0,
+ SMUX_CMD_PWR_CTL_SLEEP_REQ = 1 << 1,
+};
+
+/* Local logical channel states */
+enum {
+ SMUX_LCH_LOCAL_CLOSED,
+ SMUX_LCH_LOCAL_OPENING,
+ SMUX_LCH_LOCAL_OPENED,
+ SMUX_LCH_LOCAL_CLOSING,
+};
+
+/* Remote logical channel states */
+enum {
+ SMUX_LCH_REMOTE_CLOSED,
+ SMUX_LCH_REMOTE_OPENED,
+};
+
+
+int smux_assert_lch_id(uint32_t lcid);
+void smux_init_pkt(struct smux_pkt_t *pkt);
+struct smux_pkt_t *smux_alloc_pkt(void);
+int smux_alloc_pkt_payload(struct smux_pkt_t *pkt);
+void smux_free_pkt(struct smux_pkt_t *pkt);
+int smux_serialize(struct smux_pkt_t *pkt, char *out,
+ unsigned int *out_len);
+
+void smux_rx_state_machine(const unsigned char *data, int len, int flag);
+void smuxld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count);
+
+/* testing parameters */
+extern int smux_byte_loopback;
+extern int smux_simulate_wakeup_delay;
+
+#endif /* SMUX_PRIVATE_H */
diff --git a/drivers/tty/smux_test.c b/drivers/tty/smux_test.c
new file mode 100644
index 0000000..242c66e
--- /dev/null
+++ b/drivers/tty/smux_test.c
@@ -0,0 +1,1222 @@
+/* drivers/tty/smux_test.c
+ *
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/termios.h>
+#include <linux/smux.h>
+#include "smux_private.h"
+
+#define DEBUG_BUFMAX 4096
+
+/**
+ * Unit test assertion for logging test cases.
+ *
+ * @a lval
+ * @b rval
+ * @cmp comparison operator
+ *
+ * Assertion fails if (@a cmp @b) is not true which then
+ * logs the function and line number where the error occurred
+ * along with the values of @a and @b.
+ *
+ * Assumes that the following local variables exist:
+ * @buf - buffer to write failure message to
+ * @i - number of bytes written to buffer
+ * @max - maximum size of the buffer
+ * @failed - set to true if test fails
+ */
+#define UT_ASSERT_INT(a, cmp, b) \
+ if (!((a)cmp(b))) { \
+ i += scnprintf(buf + i, max - i, \
+ "%s:%d Fail: " #a "(%d) " #cmp " " #b "(%d)\n", \
+ __func__, __LINE__, \
+ a, b); \
+ failed = 1; \
+ break; \
+ } \
+ do {} while (0)
+
+#define UT_ASSERT_PTR(a, cmp, b) \
+ if (!((a)cmp(b))) { \
+ i += scnprintf(buf + i, max - i, \
+ "%s:%d Fail: " #a "(%p) " #cmp " " #b "(%p)\n", \
+ __func__, __LINE__, \
+ a, b); \
+ failed = 1; \
+ break; \
+ } \
+ do {} while (0)
+
+#define UT_ASSERT_UINT(a, cmp, b) \
+ if (!((a)cmp(b))) { \
+ i += scnprintf(buf + i, max - i, \
+ "%s:%d Fail: " #a "(%u) " #cmp " " #b "(%u)\n", \
+ __func__, __LINE__, \
+ a, b); \
+ failed = 1; \
+ break; \
+ } \
+ do {} while (0)
+
+static unsigned char test_array[] = {1, 1, 2, 3, 5, 8, 13, 21, 34, 55,
+ 89, 144, 233};
+
+/* Used for mapping local to remote TIOCM signals */
+struct tiocm_test_vector {
+ uint32_t input;
+ uint32_t set_old;
+ uint32_t set_new;
+ uint32_t clr_old;
+};
+
+/**
+ * Allocates a new buffer for SMUX for every call.
+ */
+int get_rx_buffer(void *priv, void **pkt_priv, void **buffer, int size)
+{
+ void *rx_buf;
+
+ rx_buf = kmalloc(size, GFP_ATOMIC);
+ *pkt_priv = (void *)0x1234;
+ *buffer = rx_buf;
+
+ return 0;
+}
+
+/* Test vector for packet tests. */
+struct test_vector {
+ const char *data;
+ const unsigned len;
+};
+
+/* Mock object metadata for SMUX_READ_DONE event */
+struct mock_read_event {
+ struct list_head list;
+ struct smux_meta_read meta;
+};
+
+/* Mock object metadata for SMUX_WRITE_DONE event */
+struct mock_write_event {
+ struct list_head list;
+ struct smux_meta_write meta;
+};
+
+/* Mock object for all SMUX callback events */
+struct smux_mock_callback {
+ int cb_count;
+ struct completion cb_completion;
+ spinlock_t lock;
+
+ /* status changes */
+ int event_connected;
+ int event_disconnected;
+ int event_disconnected_ssr;
+ int event_low_wm;
+ int event_high_wm;
+
+ /* TIOCM changes */
+ int event_tiocm;
+ struct smux_meta_tiocm tiocm_meta;
+
+ /* read event data */
+ int event_read_done;
+ int event_read_failed;
+ struct list_head read_events;
+
+ /* write event data */
+ int event_write_done;
+ int event_write_failed;
+ struct list_head write_events;
+};
+
+/**
+ * Initialize mock callback data. Only call once.
+ *
+ * @cb Mock callback data
+ */
+void mock_cb_data_init(struct smux_mock_callback *cb)
+{
+ init_completion(&cb->cb_completion);
+ spin_lock_init(&cb->lock);
+ INIT_LIST_HEAD(&cb->read_events);
+ INIT_LIST_HEAD(&cb->write_events);
+}
+
+/**
+ * Reset mock callback data to default values.
+ *
+ * @cb Mock callback data
+ *
+ * All packets are freed and counters reset to zero.
+ */
+void mock_cb_data_reset(struct smux_mock_callback *cb)
+{
+ cb->cb_count = 0;
+ INIT_COMPLETION(cb->cb_completion);
+ cb->event_connected = 0;
+ cb->event_disconnected = 0;
+ cb->event_disconnected_ssr = 0;
+ cb->event_low_wm = 0;
+ cb->event_high_wm = 0;
+ cb->event_tiocm = 0;
+ cb->tiocm_meta.tiocm_old = 0;
+ cb->tiocm_meta.tiocm_new = 0;
+
+ cb->event_read_done = 0;
+ cb->event_read_failed = 0;
+ while (!list_empty(&cb->read_events)) {
+ struct mock_read_event *meta;
+ meta = list_first_entry(&cb->read_events,
+ struct mock_read_event,
+ list);
+ kfree(meta->meta.buffer);
+ list_del(&meta->list);
+ kfree(meta);
+ }
+
+ cb->event_write_done = 0;
+ cb->event_write_failed = 0;
+ while (!list_empty(&cb->write_events)) {
+ struct mock_write_event *meta;
+ meta = list_first_entry(&cb->write_events,
+ struct mock_write_event,
+ list);
+ list_del(&meta->list);
+ kfree(meta);
+ }
+}
+
+/**
+ * Dump the values of the mock callback data for debug purposes.
+ *
+ * @cb Mock callback data
+ * @buf Print buffer
+ * @max Maximum number of characters to print
+ *
+ * @returns Number of characters added to buffer
+ */
+static int mock_cb_data_print(const struct smux_mock_callback *cb,
+ char *buf, int max)
+{
+ int i = 0;
+
+ i += scnprintf(buf + i, max - i,
+ "\tcb_count=%d\n"
+ "\tcb_completion.done=%d\n"
+ "\tevent_connected=%d\n"
+ "\tevent_disconnected=%d\n"
+ "\tevent_disconnected_ssr=%d\n"
+ "\tevent_low_wm=%d\n"
+ "\tevent_high_wm=%d\n"
+ "\tevent_tiocm=%d\n"
+ "\tevent_read_done=%d\n"
+ "\tevent_read_failed=%d\n"
+ "\tread_events=%d\n"
+ "\tevent_write_done=%d\n"
+ "\tevent_write_failed=%d\n"
+ "\twrite_events=%d\n",
+ cb->cb_count,
+ cb->cb_completion.done,
+ cb->event_connected,
+ cb->event_disconnected,
+ cb->event_disconnected_ssr,
+ cb->event_low_wm,
+ cb->event_high_wm,
+ cb->event_tiocm,
+ cb->event_read_done,
+ cb->event_read_failed,
+ !list_empty(&cb->read_events),
+ cb->event_write_done,
+ cb->event_write_failed,
+ list_empty(&cb->write_events)
+ );
+
+ return i;
+}
+
+/**
+ * Mock object event callback. Used to logs events for analysis in the unit
+ * tests.
+ */
+void smux_mock_cb(void *priv, int event, const void *metadata)
+{
+ struct smux_mock_callback *cb_data_ptr;
+ struct mock_write_event *write_event_meta;
+ struct mock_read_event *read_event_meta;
+ unsigned long flags;
+
+ cb_data_ptr = (struct smux_mock_callback *)priv;
+ if (cb_data_ptr == NULL) {
+ pr_err("%s: invalid private data\n", __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&cb_data_ptr->lock, flags);
+ switch (event) {
+ case SMUX_CONNECTED:
+ ++cb_data_ptr->event_connected;
+ break;
+
+ case SMUX_DISCONNECTED:
+ ++cb_data_ptr->event_disconnected;
+ cb_data_ptr->event_disconnected_ssr =
+ ((struct smux_meta_disconnected *)metadata)->is_ssr;
+ break;
+
+ case SMUX_READ_DONE:
+ ++cb_data_ptr->event_read_done;
+ read_event_meta = kmalloc(sizeof(struct mock_read_event),
+ GFP_ATOMIC);
+ if (read_event_meta) {
+ read_event_meta->meta =
+ *(struct smux_meta_read *)metadata;
+ list_add_tail(&read_event_meta->list,
+ &cb_data_ptr->read_events);
+ }
+ break;
+
+ case SMUX_READ_FAIL:
+ ++cb_data_ptr->event_read_failed;
+ read_event_meta = kmalloc(sizeof(struct mock_read_event),
+ GFP_ATOMIC);
+ if (read_event_meta) {
+ read_event_meta->meta =
+ *(struct smux_meta_read *)metadata;
+ list_add_tail(&read_event_meta->list,
+ &cb_data_ptr->read_events);
+ }
+ break;
+
+ case SMUX_WRITE_DONE:
+ ++cb_data_ptr->event_write_done;
+ write_event_meta = kmalloc(sizeof(struct mock_write_event),
+ GFP_ATOMIC);
+ if (write_event_meta) {
+ write_event_meta->meta =
+ *(struct smux_meta_write *)metadata;
+ list_add_tail(&write_event_meta->list,
+ &cb_data_ptr->write_events);
+ }
+ break;
+
+ case SMUX_WRITE_FAIL:
+ ++cb_data_ptr->event_write_failed;
+ write_event_meta = kmalloc(sizeof(struct mock_write_event),
+ GFP_ATOMIC);
+ if (write_event_meta) {
+ write_event_meta->meta =
+ *(struct smux_meta_write *)metadata;
+ list_add_tail(&write_event_meta->list,
+ &cb_data_ptr->write_events);
+ }
+ break;
+
+ case SMUX_LOW_WM_HIT:
+ ++cb_data_ptr->event_low_wm;
+ break;
+
+ case SMUX_HIGH_WM_HIT:
+ ++cb_data_ptr->event_high_wm;
+ break;
+
+ case SMUX_TIOCM_UPDATE:
+ ++cb_data_ptr->event_tiocm;
+ cb_data_ptr->tiocm_meta = *(struct smux_meta_tiocm *)metadata;
+ break;
+
+ default:
+ pr_err("%s: unknown event %d\n", __func__, event);
+ };
+
+ ++cb_data_ptr->cb_count;
+ complete(&cb_data_ptr->cb_completion);
+ spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
+}
+
+/**
+ * Test Read/write usage.
+ *
+ * @buf Output buffer for failure/status messages
+ * @max Size of @buf
+ * @vectors Test vector data (must end with NULL item)
+ * @name Name of the test case for failure messages
+ *
+ * Perform a sanity test consisting of opening a port, writing test packet(s),
+ * reading the response(s), and closing the port.
+ *
+ * The port should already be configured to use either local or remote
+ * loopback.
+ */
+static int smux_ut_basic_core(char *buf, int max,
+ const struct test_vector *vectors,
+ const char *name)
+{
+ int i = 0;
+ int failed = 0;
+ static struct smux_mock_callback cb_data;
+ static int cb_initialized;
+ int ret;
+
+ if (!cb_initialized)
+ mock_cb_data_init(&cb_data);
+
+ mock_cb_data_reset(&cb_data);
+ while (!failed) {
+ struct mock_write_event *write_event;
+ struct mock_read_event *read_event;
+
+ /* open port */
+ ret = msm_smux_open(SMUX_TEST_LCID, &cb_data, smux_mock_cb,
+ get_rx_buffer);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ), >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_connected, ==, 1);
+ mock_cb_data_reset(&cb_data);
+
+ /* write, read, and verify the test vector data */
+ for (; vectors->data != NULL; ++vectors) {
+ const char *test_data = vectors->data;
+ const unsigned test_len = vectors->len;
+
+ i += scnprintf(buf + i, max - i,
+ "Writing vector %p len %d\n",
+ test_data, test_len);
+
+ /* write data */
+ msm_smux_write(SMUX_TEST_LCID, (void *)0xCAFEFACE,
+ test_data, test_len);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ), >, 0);
+
+ /* wait for write and echo'd read to complete */
+ INIT_COMPLETION(cb_data.cb_completion);
+ if (cb_data.cb_count < 2)
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ),
+ >, 0);
+
+ UT_ASSERT_INT(cb_data.cb_count, >=, 1);
+ UT_ASSERT_INT(cb_data.event_write_done, ==, 1);
+ UT_ASSERT_INT(list_empty(&cb_data.write_events), ==, 0);
+
+ write_event = list_first_entry(&cb_data.write_events,
+ struct mock_write_event, list);
+ UT_ASSERT_PTR(write_event->meta.pkt_priv, ==,
+ (void *)0xCAFEFACE);
+ UT_ASSERT_PTR(write_event->meta.buffer, ==,
+ (void *)test_data);
+ UT_ASSERT_INT(write_event->meta.len, ==, test_len);
+
+ /* verify read event */
+ UT_ASSERT_INT(cb_data.event_read_done, ==, 1);
+ UT_ASSERT_INT(list_empty(&cb_data.read_events), ==, 0);
+ read_event = list_first_entry(&cb_data.read_events,
+ struct mock_read_event, list);
+ UT_ASSERT_PTR(read_event->meta.pkt_priv, ==,
+ (void *)0x1234);
+ UT_ASSERT_PTR(read_event->meta.buffer, !=, NULL);
+
+ if (read_event->meta.len != test_len ||
+ memcmp(read_event->meta.buffer,
+ test_data, test_len)) {
+ /* data mismatch */
+ char linebuff[80];
+
+ hex_dump_to_buffer(test_data, test_len,
+ 16, 1, linebuff, sizeof(linebuff), 1);
+ i += scnprintf(buf + i, max - i,
+ "Expected:\n%s\n\n", linebuff);
+
+ hex_dump_to_buffer(read_event->meta.buffer,
+ read_event->meta.len,
+ 16, 1, linebuff, sizeof(linebuff), 1);
+ i += scnprintf(buf + i, max - i,
+ "Actual:\n%s\n", linebuff);
+ failed = 1;
+ break;
+ }
+ mock_cb_data_reset(&cb_data);
+ }
+
+ /* close port */
+ ret = msm_smux_close(SMUX_TEST_LCID);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ),
+ >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
+ UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 0);
+ break;
+ }
+
+ if (!failed) {
+ i += scnprintf(buf + i, max - i, "\tOK\n");
+ } else {
+ pr_err("%s: Failed\n", name);
+ i += scnprintf(buf + i, max - i, "\tFailed\n");
+ i += mock_cb_data_print(&cb_data, buf + i, max - i);
+ msm_smux_close(SMUX_TEST_LCID);
+ }
+
+ mock_cb_data_reset(&cb_data);
+ return i;
+}
+
+/**
+ * Verify Basic Local Loopback Support
+ *
+ * Perform a sanity test consisting of opening a port in local loopback
+ * mode and writing a packet and reading the echo'd packet back.
+ */
+static int smux_ut_basic(char *buf, int max)
+{
+ const struct test_vector test_data[] = {
+ {"hello\0world\n", sizeof("hello\0world\n")},
+ {0, 0},
+ };
+ int i = 0;
+ int failed = 0;
+ int ret;
+
+ i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
+ while (!failed) {
+ /* enable loopback mode */
+ ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
+ SMUX_CH_OPTION_LOCAL_LOOPBACK, 0);
+ UT_ASSERT_INT(ret, ==, 0);
+
+ i += smux_ut_basic_core(buf + i, max - i, test_data, __func__);
+ break;
+ }
+
+ if (failed) {
+ pr_err("%s: Failed\n", __func__);
+ i += scnprintf(buf + i, max - i, "\tFailed\n");
+ }
+ return i;
+}
+
+/**
+ * Verify Basic Remote Loopback Support
+ *
+ * Perform a sanity test consisting of opening a port in remote loopback
+ * mode and writing a packet and reading the echo'd packet back.
+ */
+static int smux_ut_remote_basic(char *buf, int max)
+{
+ const struct test_vector test_data[] = {
+ {"hello\0world\n", sizeof("hello\0world\n")},
+ {0, 0},
+ };
+ int i = 0;
+ int failed = 0;
+ int ret;
+
+ i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
+ while (!failed) {
+ /* enable remote mode */
+ ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
+ SMUX_CH_OPTION_REMOTE_LOOPBACK, 0);
+ UT_ASSERT_INT(ret, ==, 0);
+
+ i += smux_ut_basic_core(buf + i, max - i, test_data, __func__);
+ break;
+ }
+
+ if (failed) {
+ pr_err("%s: Failed\n", __func__);
+ i += scnprintf(buf + i, max - i, "\tFailed\n");
+ }
+ return i;
+}
+
+/**
+ * Fill test pattern into provided buffer including an optional
+ * redzone 16 bytes before and 16 bytes after the buffer.
+ *
+ * buf ---------
+ * redzone
+ * --------- <- returned pointer
+ * data
+ * --------- <- returned pointer + len
+ * redzone
+ * ---------
+ *
+ * @buf Pointer to the buffer of size len or len+32 (redzone)
+ * @len Length of the *data* buffer (excluding 32-byte redzone)
+ * @redzone If true, adds redzone data
+ *
+ * @returns pointer to buffer (buf + 16 if redzone enabled)
+ */
+uint8_t *test_pattern_fill(char *buf, int len, int redzone)
+{
+ void *ret;
+ uint8_t ch;
+
+ ret = buf;
+ if (redzone) {
+ memset((char *)buf, 0xAB, 16);
+ memset((char *)buf + len, 0xBA, 16);
+ ret += 16;
+ }
+
+ /* fill with test pattern */
+ for (ch = 0; len > 0; --len, ++ch)
+ *buf++ = (char)ch;
+
+ return ret;
+}
+
+/**
+ * Verify test pattern generated by test_pattern_fill.
+ *
+ * @buf_ptr Pointer to buffer pointer
+ * @len Length of the *data* buffer (excluding 32-byte redzone)
+ * @redzone If true, verifies redzone and adjusts *buf_ptr
+ * @errmsg Buffer for error message
+ * @errmsg_max Size of error message buffer
+ *
+ * @returns 0 for success; length of error message otherwise
+ */
+unsigned test_pattern_verify(char **buf_ptr, int len, int redzone,
+ char *errmsg, int errmsg_max)
+{
+ int n;
+ int i = 0;
+ char linebuff[80];
+
+ if (redzone) {
+ *buf_ptr -= 16;
+
+ /* verify prefix redzone */
+ for (n = 0; n < 16; ++n) {
+ if (*buf_ptr[n] != 0xAB) {
+ hex_dump_to_buffer(*buf_ptr, 16,
+ 16, 1, linebuff, sizeof(linebuff), 1);
+ i += scnprintf(errmsg + i, errmsg_max - i,
+ "Redzone violation: %s\n", linebuff);
+ break;
+ }
+ }
+
+ /* verify postfix redzone */
+ for (n = 0; n < 16; ++n) {
+ if (*buf_ptr[len + n] != 0xBA) {
+ hex_dump_to_buffer(&(*buf_ptr)[len], 16,
+ 16, 1, linebuff, sizeof(linebuff), 1);
+ i += scnprintf(errmsg + i, errmsg_max - i,
+ "Redzone violation: %s\n", linebuff);
+ break;
+ }
+ }
+ }
+ return i;
+}
+
+/**
+ * Write a multiple packets in ascending size and verify packet is received
+ * correctly.
+ *
+ * @buf Buffer for status message
+ * @max Size of buffer
+ * @name Name of the test for error reporting
+ *
+ * @returns Number of bytes written to @buf
+ *
+ * Requires that the port already be opened and loopback mode is
+ * configured correctly (if required).
+ */
+static int smux_ut_loopback_big_pkt(char *buf, int max, const char *name)
+{
+ struct test_vector test_data[] = {
+ {0, 64},
+ {0, 128},
+ {0, 256},
+ {0, 512},
+ {0, 1024},
+ {0, 2048},
+ {0, 4096},
+ {0, 0},
+ };
+ int i = 0;
+ int failed = 0;
+ struct test_vector *tv;
+
+ /* generate test data */
+ for (tv = test_data; tv->len > 0; ++tv) {
+ tv->data = kmalloc(tv->len + 32, GFP_KERNEL);
+ pr_err("%s: allocating %p len %d\n",
+ __func__, tv->data, tv->len);
+ if (!tv->data) {
+ i += scnprintf(buf + i, max - i,
+ "%s: Unable to allocate %d bytes\n",
+ __func__, tv->len);
+ failed = 1;
+ goto out;
+ }
+ test_pattern_fill((uint8_t *)tv->data, tv->len, 1);
+ }
+
+ /* run test */
+ i += scnprintf(buf + i, max - i, "Running %s\n", name);
+ while (!failed) {
+ i += smux_ut_basic_core(buf + i, max - i, test_data, name);
+ break;
+ }
+
+out:
+ if (failed) {
+ pr_err("%s: Failed\n", name);
+ i += scnprintf(buf + i, max - i, "\tFailed\n");
+ }
+
+ for (tv = test_data; tv->len > 0; ++tv) {
+ if (!tv->data) {
+ i += test_pattern_verify((char **)&tv->data,
+ tv->len, 1, buf + i, max - i);
+ pr_err("%s: freeing %p len %d\n", __func__,
+ tv->data, tv->len);
+ kfree(tv->data);
+ }
+ }
+
+ return i;
+}
+
+/**
+ * Verify Large-packet Local Loopback Support.
+ *
+ * @buf Buffer for status message
+ * @max Size of buffer
+ *
+ * @returns Number of bytes written to @buf
+ *
+ * Open port in local loopback mode and write a multiple packets in ascending
+ * size and verify packet is received correctly.
+ */
+static int smux_ut_local_big_pkt(char *buf, int max)
+{
+ int i = 0;
+ int ret;
+
+ ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
+ SMUX_CH_OPTION_LOCAL_LOOPBACK, 0);
+
+ if (ret == 0) {
+ smux_byte_loopback = SMUX_TEST_LCID;
+ i += smux_ut_loopback_big_pkt(buf, max, __func__);
+ smux_byte_loopback = 0;
+ } else {
+ i += scnprintf(buf + i, max - i,
+ "%s: Unable to set loopback mode\n",
+ __func__);
+ }
+
+ return i;
+}
+
+/**
+ * Verify Large-packet Remote Loopback Support.
+ *
+ * @buf Buffer for status message
+ * @max Size of buffer
+ *
+ * @returns Number of bytes written to @buf
+ *
+ * Open port in remote loopback mode and write a multiple packets in ascending
+ * size and verify packet is received correctly.
+ */
+static int smux_ut_remote_big_pkt(char *buf, int max)
+{
+ int i = 0;
+ int ret;
+
+ ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
+ SMUX_CH_OPTION_REMOTE_LOOPBACK, 0);
+ if (ret == 0) {
+ i += smux_ut_loopback_big_pkt(buf, max, __func__);
+ } else {
+ i += scnprintf(buf + i, max - i,
+ "%s: Unable to set loopback mode\n",
+ __func__);
+ }
+
+ return i;
+}
+
+/**
+ * Verify set and get operations for each TIOCM bit.
+ *
+ * @buf Buffer for status message
+ * @max Size of buffer
+ * @name Name of the test for error reporting
+ *
+ * @returns Number of bytes written to @buf
+ */
+static int smux_ut_tiocm(char *buf, int max, const char *name)
+{
+ static struct smux_mock_callback cb_data;
+ static int cb_initialized;
+ static const struct tiocm_test_vector tiocm_vectors[] = {
+ /* bit to set, set old, set new, clear old */
+ {TIOCM_DTR, TIOCM_DTR, TIOCM_DTR | TIOCM_DSR, TIOCM_DSR},
+ {TIOCM_RTS, TIOCM_RTS, TIOCM_RTS | TIOCM_CTS, TIOCM_CTS},
+ {TIOCM_RI, 0x0, TIOCM_RI, TIOCM_RI},
+ {TIOCM_CD, 0x0, TIOCM_CD, TIOCM_CD},
+ };
+ int i = 0;
+ int failed = 0;
+ int n;
+ int ret;
+
+ i += scnprintf(buf + i, max - i, "Running %s\n", name);
+
+ if (!cb_initialized)
+ mock_cb_data_init(&cb_data);
+
+ mock_cb_data_reset(&cb_data);
+ while (!failed) {
+ /* open port */
+ ret = msm_smux_open(SMUX_TEST_LCID, &cb_data, smux_mock_cb,
+ get_rx_buffer);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ), >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_connected, ==, 1);
+ mock_cb_data_reset(&cb_data);
+
+ /* set and clear each TIOCM bit */
+ for (n = 0; n < ARRAY_SIZE(tiocm_vectors) && !failed; ++n) {
+ /* set signal and verify */
+ ret = msm_smux_tiocm_set(SMUX_TEST_LCID,
+ tiocm_vectors[n].input, 0x0);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ), >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_tiocm, ==, 1);
+ UT_ASSERT_INT(cb_data.tiocm_meta.tiocm_old, ==,
+ tiocm_vectors[n].set_old);
+ UT_ASSERT_INT(cb_data.tiocm_meta.tiocm_new, ==,
+ tiocm_vectors[n].set_new);
+ mock_cb_data_reset(&cb_data);
+
+ /* clear signal and verify */
+ ret = msm_smux_tiocm_set(SMUX_TEST_LCID, 0x0,
+ tiocm_vectors[n].input);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ),
+ >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_tiocm, ==, 1);
+ UT_ASSERT_INT(cb_data.tiocm_meta.tiocm_old, ==,
+ tiocm_vectors[n].clr_old);
+ UT_ASSERT_INT(cb_data.tiocm_meta.tiocm_new, ==, 0x0);
+ mock_cb_data_reset(&cb_data);
+ }
+ if (failed)
+ break;
+
+ /* close port */
+ ret = msm_smux_close(SMUX_TEST_LCID);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ),
+ >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
+ UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 0);
+ break;
+ }
+
+ if (!failed) {
+ i += scnprintf(buf + i, max - i, "\tOK\n");
+ } else {
+ pr_err("%s: Failed\n", name);
+ i += scnprintf(buf + i, max - i, "\tFailed\n");
+ i += mock_cb_data_print(&cb_data, buf + i, max - i);
+ msm_smux_close(SMUX_TEST_LCID);
+ }
+
+ mock_cb_data_reset(&cb_data);
+ return i;
+}
+
+/**
+ * Verify TIOCM Status Bits for local loopback.
+ *
+ * @buf Buffer for status message
+ * @max Size of buffer
+ *
+ * @returns Number of bytes written to @buf
+ */
+static int smux_ut_local_tiocm(char *buf, int max)
+{
+ int i = 0;
+ int ret;
+
+ ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
+ SMUX_CH_OPTION_LOCAL_LOOPBACK, 0);
+
+ if (ret == 0) {
+ smux_byte_loopback = SMUX_TEST_LCID;
+ i += smux_ut_tiocm(buf, max, __func__);
+ smux_byte_loopback = 0;
+ } else {
+ i += scnprintf(buf + i, max - i,
+ "%s: Unable to set loopback mode\n",
+ __func__);
+ }
+
+ return i;
+}
+
+/**
+ * Verify TIOCM Status Bits for remote loopback.
+ *
+ * @buf Buffer for status message
+ * @max Size of buffer
+ *
+ * @returns Number of bytes written to @buf
+ */
+static int smux_ut_remote_tiocm(char *buf, int max)
+{
+ int i = 0;
+ int ret;
+
+ ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
+ SMUX_CH_OPTION_REMOTE_LOOPBACK, 0);
+ if (ret == 0) {
+ i += smux_ut_tiocm(buf, max, __func__);
+ } else {
+ i += scnprintf(buf + i, max - i,
+ "%s: Unable to set loopback mode\n",
+ __func__);
+ }
+
+ return i;
+}
+
+/**
+ * Verify High/Low Watermark notifications.
+ *
+ * @buf Buffer for status message
+ * @max Size of buffer
+ *
+ * @returns Number of bytes written to @buf
+ */
+static int smux_ut_local_wm(char *buf, int max)
+{
+ static struct smux_mock_callback cb_data;
+ static int cb_initialized;
+ int i = 0;
+ int failed = 0;
+ int ret;
+
+ i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
+ pr_err("%s", buf);
+
+ if (!cb_initialized)
+ mock_cb_data_init(&cb_data);
+
+ mock_cb_data_reset(&cb_data);
+ smux_byte_loopback = SMUX_TEST_LCID;
+ while (!failed) {
+ /* open port for loopback with TX disabled */
+ ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
+ SMUX_CH_OPTION_LOCAL_LOOPBACK
+ | SMUX_CH_OPTION_REMOTE_TX_STOP,
+ 0);
+ UT_ASSERT_INT(ret, ==, 0);
+
+ ret = msm_smux_open(SMUX_TEST_LCID, &cb_data, smux_mock_cb,
+ get_rx_buffer);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ), >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_connected, ==, 1);
+ mock_cb_data_reset(&cb_data);
+
+ /* transmit 4 packets and verify high-watermark notification */
+ ret = 0;
+ ret |= msm_smux_write(SMUX_TEST_LCID, (void *)1,
+ test_array, sizeof(test_array));
+ ret |= msm_smux_write(SMUX_TEST_LCID, (void *)2,
+ test_array, sizeof(test_array));
+ ret |= msm_smux_write(SMUX_TEST_LCID, (void *)3,
+ test_array, sizeof(test_array));
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 0);
+ UT_ASSERT_INT(cb_data.event_high_wm, ==, 0);
+
+ ret = msm_smux_write(SMUX_TEST_LCID, (void *)4,
+ test_array, sizeof(test_array));
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ),
+ >, 0);
+ UT_ASSERT_INT(cb_data.event_high_wm, ==, 1);
+ UT_ASSERT_INT(cb_data.event_low_wm, ==, 0);
+ mock_cb_data_reset(&cb_data);
+
+ /* exceed watermark and verify failure return value */
+ ret = msm_smux_write(SMUX_TEST_LCID, (void *)5,
+ test_array, sizeof(test_array));
+ UT_ASSERT_INT(ret, ==, -EAGAIN);
+
+ /* re-enable TX and verify low-watermark notification */
+ ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
+ 0, SMUX_CH_OPTION_REMOTE_TX_STOP);
+ UT_ASSERT_INT(ret, ==, 0);
+ while (cb_data.cb_count < 9) {
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ),
+ >, 0);
+ INIT_COMPLETION(cb_data.cb_completion);
+ }
+ if (failed)
+ break;
+
+ UT_ASSERT_INT(cb_data.event_high_wm, ==, 0);
+ UT_ASSERT_INT(cb_data.event_low_wm, ==, 1);
+ UT_ASSERT_INT(cb_data.event_write_done, ==, 4);
+ mock_cb_data_reset(&cb_data);
+
+ /* close port */
+ ret = msm_smux_close(SMUX_TEST_LCID);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ),
+ >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
+ UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 0);
+ break;
+ }
+
+ if (!failed) {
+ i += scnprintf(buf + i, max - i, "\tOK\n");
+ } else {
+ pr_err("%s: Failed\n", __func__);
+ i += scnprintf(buf + i, max - i, "\tFailed\n");
+ i += mock_cb_data_print(&cb_data, buf + i, max - i);
+ msm_smux_close(SMUX_TEST_LCID);
+ }
+ smux_byte_loopback = 0;
+ mock_cb_data_reset(&cb_data);
+ return i;
+}
+
+/**
+ * Verify smuxld_receive_buf regular and error processing.
+ *
+ * @buf Buffer for status message
+ * @max Size of buffer
+ *
+ * @returns Number of bytes written to @buf
+ */
+static int smux_ut_local_smuxld_receive_buf(char *buf, int max)
+{
+ static struct smux_mock_callback cb_data;
+ static int cb_initialized;
+ struct mock_read_event *meta;
+ int i = 0;
+ int failed = 0;
+ int ret;
+ char data[] = {SMUX_UT_ECHO_REQ,
+ SMUX_UT_ECHO_REQ, SMUX_UT_ECHO_REQ,
+ };
+ char flags[] = {0x0, 0x1, 0x0,};
+
+
+ i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
+
+ if (!cb_initialized)
+ mock_cb_data_init(&cb_data);
+
+ mock_cb_data_reset(&cb_data);
+ smux_byte_loopback = SMUX_TEST_LCID;
+ while (!failed) {
+ /* open port for loopback */
+ ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
+ SMUX_CH_OPTION_LOCAL_LOOPBACK, 0);
+ UT_ASSERT_INT(ret, ==, 0);
+
+ ret = msm_smux_open(SMUX_TEST_LCID, &cb_data, smux_mock_cb,
+ get_rx_buffer);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ), >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_connected, ==, 1);
+ mock_cb_data_reset(&cb_data);
+
+ /*
+ * Verify RX error processing by sending 3 echo requests:
+ * one OK, one fail, and a final OK
+ *
+ * The parsing framework should process the requests
+ * and send us three BYTE command packets with
+ * ECHO ACK FAIL and ECHO ACK OK characters.
+ */
+ smuxld_receive_buf(0, data, flags, sizeof(data));
+
+ /* verify response characters */
+ do {
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ), >, 0);
+ INIT_COMPLETION(cb_data.cb_completion);
+ } while (cb_data.cb_count < 3);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 3);
+ UT_ASSERT_INT(cb_data.event_read_done, ==, 3);
+
+ meta = list_first_entry(&cb_data.read_events,
+ struct mock_read_event, list);
+ UT_ASSERT_INT((int)meta->meta.pkt_priv, ==,
+ SMUX_UT_ECHO_ACK_OK);
+ list_del(&meta->list);
+
+ meta = list_first_entry(&cb_data.read_events,
+ struct mock_read_event, list);
+ UT_ASSERT_INT((int)meta->meta.pkt_priv, ==,
+ SMUX_UT_ECHO_ACK_FAIL);
+ list_del(&meta->list);
+
+ meta = list_first_entry(&cb_data.read_events,
+ struct mock_read_event, list);
+ UT_ASSERT_INT((int)meta->meta.pkt_priv, ==,
+ SMUX_UT_ECHO_ACK_OK);
+ list_del(&meta->list);
+ mock_cb_data_reset(&cb_data);
+
+ /* close port */
+ ret = msm_smux_close(SMUX_TEST_LCID);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ),
+ >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
+ UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 0);
+ break;
+ }
+
+ if (!failed) {
+ i += scnprintf(buf + i, max - i, "\tOK\n");
+ } else {
+ pr_err("%s: Failed\n", __func__);
+ i += scnprintf(buf + i, max - i, "\tFailed\n");
+ i += mock_cb_data_print(&cb_data, buf + i, max - i);
+ msm_smux_close(SMUX_TEST_LCID);
+ }
+ smux_byte_loopback = 0;
+ mock_cb_data_reset(&cb_data);
+ return i;
+}
+
+static char debug_buffer[DEBUG_BUFMAX];
+
+static ssize_t debug_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int (*fill)(char *buf, int max) = file->private_data;
+ int bsize;
+
+ if (*ppos != 0)
+ return 0;
+
+ bsize = fill(debug_buffer, DEBUG_BUFMAX);
+ return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static const struct file_operations debug_ops = {
+ .read = debug_read,
+ .open = debug_open,
+};
+
+static void debug_create(const char *name, mode_t mode,
+ struct dentry *dent,
+ int (*fill)(char *buf, int max))
+{
+ debugfs_create_file(name, mode, dent, fill, &debug_ops);
+}
+
+static int __init smux_debugfs_init(void)
+{
+ struct dentry *dent;
+
+ dent = debugfs_create_dir("n_smux", 0);
+ if (IS_ERR(dent))
+ return PTR_ERR(dent);
+
+ /*
+ * Add Unit Test entries.
+ *
+ * The idea with unit tests is that you can run all of them
+ * from ADB shell by doing:
+ * adb shell
+ * cat ut*
+ *
+ * And if particular tests fail, you can then repeatedly run the failing
+ * tests as you debug and resolve the failing test.
+ */
+ debug_create("ut_local_basic", 0444, dent, smux_ut_basic);
+ debug_create("ut_remote_basic", 0444, dent, smux_ut_remote_basic);
+ debug_create("ut_local_big_pkt", 0444, dent, smux_ut_local_big_pkt);
+ debug_create("ut_remote_big_pkt", 0444, dent, smux_ut_remote_big_pkt);
+ debug_create("ut_local_tiocm", 0444, dent, smux_ut_local_tiocm);
+ debug_create("ut_remote_tiocm", 0444, dent, smux_ut_remote_tiocm);
+ debug_create("ut_local_wm", 0444, dent, smux_ut_local_wm);
+ debug_create("ut_local_smuxld_receive_buf", 0444, dent,
+ smux_ut_local_smuxld_receive_buf);
+
+ return 0;
+}
+
+late_initcall(smux_debugfs_init);
+
diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c
index d74959e..92e95a6 100644
--- a/drivers/usb/gadget/android.c
+++ b/drivers/usb/gadget/android.c
@@ -61,6 +61,8 @@
#include "u_rmnet_ctrl_smd.c"
#include "u_ctrl_hsic.c"
#include "u_data_hsic.c"
+#include "u_ctrl_hsuart.c"
+#include "u_data_hsuart.c"
#include "f_serial.c"
#include "f_acm.c"
#include "f_adb.c"
@@ -200,7 +202,8 @@
u32 swfi_latency = 0;
static int last_vote = -1;
- if (!pdata || vote == last_vote)
+ if (!pdata || vote == last_vote
+ || !pdata->swfi_latency)
return;
swfi_latency = pdata->swfi_latency + 1;
@@ -223,6 +226,7 @@
char **uevent_envp = NULL;
static enum android_device_state last_uevent, next_state;
unsigned long flags;
+ int pm_qos_vote = -1;
spin_lock_irqsave(&cdev->lock, flags);
if (cdev->config) {
@@ -232,13 +236,16 @@
uevent_envp = dev->connected ? connected : disconnected;
next_state = dev->connected ? USB_CONNECTED : USB_DISCONNECTED;
if (dev->connected && strncmp(dev->pm_qos, "low", 3))
- android_pm_qos_update_latency(dev, 1);
+ pm_qos_vote = 1;
else if (!dev->connected || !strncmp(dev->pm_qos, "low", 3))
- android_pm_qos_update_latency(dev, 0);
+ pm_qos_vote = 0;
}
dev->sw_connected = dev->connected;
spin_unlock_irqrestore(&cdev->lock, flags);
+ if (pm_qos_vote != -1)
+ android_pm_qos_update_latency(dev, pm_qos_vote);
+
if (uevent_envp) {
/*
* Some userspace modules, e.g. MTP, work correctly only if
diff --git a/drivers/usb/gadget/ci13xxx_msm.c b/drivers/usb/gadget/ci13xxx_msm.c
index 863143b..7fd120f 100644
--- a/drivers/usb/gadget/ci13xxx_msm.c
+++ b/drivers/usb/gadget/ci13xxx_msm.c
@@ -16,6 +16,7 @@
#include <linux/pm_runtime.h>
#include <linux/usb/msm_hsusb_hw.h>
#include <linux/usb/ulpi.h>
+#include <mach/gpio.h>
#include "ci13xxx_udc.c"
@@ -24,7 +25,11 @@
struct ci13xxx_udc_context {
int irq;
void __iomem *regs;
+ int wake_gpio;
+ int wake_irq;
+ bool wake_irq_state;
};
+
static struct ci13xxx_udc_context _udc_ctxt;
static irqreturn_t msm_udc_irq(int irq, void *data)
@@ -32,22 +37,71 @@
return udc_irq();
}
+static void ci13xxx_msm_suspend(void)
+{
+ struct device *dev = _udc->gadget.dev.parent;
+ dev_dbg(dev, "ci13xxx_msm_suspend\n");
+
+ if (_udc_ctxt.wake_irq && !_udc_ctxt.wake_irq_state) {
+ enable_irq_wake(_udc_ctxt.wake_irq);
+ enable_irq(_udc_ctxt.wake_irq);
+ _udc_ctxt.wake_irq_state = true;
+ }
+}
+
+static void ci13xxx_msm_resume(void)
+{
+ struct device *dev = _udc->gadget.dev.parent;
+ dev_dbg(dev, "ci13xxx_msm_resume\n");
+
+ if (_udc_ctxt.wake_irq && _udc_ctxt.wake_irq_state) {
+ disable_irq_wake(_udc_ctxt.wake_irq);
+ disable_irq(_udc_ctxt.wake_irq);
+ _udc_ctxt.wake_irq_state = false;
+ }
+}
+
static void ci13xxx_msm_notify_event(struct ci13xxx *udc, unsigned event)
{
struct device *dev = udc->gadget.dev.parent;
switch (event) {
case CI13XXX_CONTROLLER_RESET_EVENT:
- dev_dbg(dev, "CI13XXX_CONTROLLER_RESET_EVENT received\n");
+ dev_info(dev, "CI13XXX_CONTROLLER_RESET_EVENT received\n");
writel(0, USB_AHBBURST);
writel_relaxed(0x08, USB_AHBMODE);
break;
+ case CI13XXX_CONTROLLER_DISCONNECT_EVENT:
+ dev_info(dev, "CI13XXX_CONTROLLER_DISCONNECT_EVENT received\n");
+ ci13xxx_msm_resume();
+ break;
+ case CI13XXX_CONTROLLER_SUSPEND_EVENT:
+ dev_info(dev, "CI13XXX_CONTROLLER_SUSPEND_EVENT received\n");
+ ci13xxx_msm_suspend();
+ break;
+ case CI13XXX_CONTROLLER_RESUME_EVENT:
+ dev_info(dev, "CI13XXX_CONTROLLER_RESUME_EVENT received\n");
+ ci13xxx_msm_resume();
+ break;
+
default:
dev_dbg(dev, "unknown ci13xxx_udc event\n");
break;
}
}
+static irqreturn_t ci13xxx_msm_resume_irq(int irq, void *data)
+{
+ struct ci13xxx *udc = _udc;
+
+ if (udc->transceiver && udc->vbus_active && udc->suspended)
+ otg_set_suspend(udc->transceiver, 0);
+ else if (!udc->suspended)
+ ci13xxx_msm_resume();
+
+ return IRQ_HANDLED;
+}
+
static struct ci13xxx_udc_driver ci13xxx_msm_udc_driver = {
.name = "ci13xxx_msm",
.flags = CI13XXX_REGS_SHARED |
@@ -60,6 +114,52 @@
.notify_event = ci13xxx_msm_notify_event,
};
+static int ci13xxx_msm_install_wake_gpio(struct platform_device *pdev,
+ struct resource *res)
+{
+ int wake_irq;
+ int ret;
+
+ dev_dbg(&pdev->dev, "ci13xxx_msm_install_wake_gpio\n");
+
+ _udc_ctxt.wake_gpio = res->start;
+ gpio_request(_udc_ctxt.wake_gpio, "USB_RESUME");
+ gpio_direction_input(_udc_ctxt.wake_gpio);
+ wake_irq = MSM_GPIO_TO_INT(_udc_ctxt.wake_gpio);
+ if (wake_irq < 0) {
+ dev_err(&pdev->dev, "could not register USB_RESUME GPIO.\n");
+ return -ENXIO;
+ }
+
+ dev_dbg(&pdev->dev, "_udc_ctxt.gpio_irq = %d and irq = %d\n",
+ _udc_ctxt.wake_gpio, wake_irq);
+ ret = request_irq(wake_irq, ci13xxx_msm_resume_irq,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "usb resume", NULL);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "could not register USB_RESUME IRQ.\n");
+ goto gpio_free;
+ }
+ disable_irq(wake_irq);
+ _udc_ctxt.wake_irq = wake_irq;
+
+ return 0;
+
+gpio_free:
+ gpio_free(_udc_ctxt.wake_gpio);
+ _udc_ctxt.wake_gpio = 0;
+ return ret;
+}
+
+static void ci13xxx_msm_uninstall_wake_gpio(struct platform_device *pdev)
+{
+ dev_dbg(&pdev->dev, "ci13xxx_msm_uninstall_wake_gpio\n");
+
+ if (_udc_ctxt.wake_gpio) {
+ gpio_free(_udc_ctxt.wake_gpio);
+ _udc_ctxt.wake_gpio = 0;
+ }
+}
+
static int ci13xxx_msm_probe(struct platform_device *pdev)
{
struct resource *res;
@@ -92,11 +192,20 @@
goto udc_remove;
}
+ res = platform_get_resource_byname(pdev, IORESOURCE_IO, "USB_RESUME");
+ if (res) {
+ ret = ci13xxx_msm_install_wake_gpio(pdev, res);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "gpio irq install failed\n");
+ goto udc_remove;
+ }
+ }
+
ret = request_irq(_udc_ctxt.irq, msm_udc_irq, IRQF_SHARED, pdev->name,
pdev);
if (ret < 0) {
dev_err(&pdev->dev, "request_irq failed\n");
- goto udc_remove;
+ goto gpio_uninstall;
}
pm_runtime_no_callbacks(&pdev->dev);
@@ -104,6 +213,8 @@
return 0;
+gpio_uninstall:
+ ci13xxx_msm_uninstall_wake_gpio(pdev);
udc_remove:
udc_remove();
iounmap:
@@ -116,6 +227,7 @@
{
pm_runtime_disable(&pdev->dev);
free_irq(_udc_ctxt.irq, pdev);
+ ci13xxx_msm_uninstall_wake_gpio(pdev);
udc_remove();
iounmap(_udc_ctxt.regs);
return 0;
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index d754a88..b29ef82 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -2005,6 +2005,51 @@
}
/**
+ * isr_resume_handler: USB PCI interrupt handler
+ * @udc: UDC device
+ *
+ */
+static void isr_resume_handler(struct ci13xxx *udc)
+{
+ udc->gadget.speed = hw_port_is_high_speed() ?
+ USB_SPEED_HIGH : USB_SPEED_FULL;
+ if (udc->suspended) {
+ spin_unlock(udc->lock);
+ if (udc->udc_driver->notify_event)
+ udc->udc_driver->notify_event(udc,
+ CI13XXX_CONTROLLER_RESUME_EVENT);
+ if (udc->transceiver)
+ otg_set_suspend(udc->transceiver, 0);
+ udc->driver->resume(&udc->gadget);
+ spin_lock(udc->lock);
+ udc->suspended = 0;
+ }
+}
+
+/**
+ * isr_resume_handler: USB SLI interrupt handler
+ * @udc: UDC device
+ *
+ */
+static void isr_suspend_handler(struct ci13xxx *udc)
+{
+ if (udc->gadget.speed != USB_SPEED_UNKNOWN &&
+ udc->vbus_active) {
+ if (udc->suspended == 0) {
+ spin_unlock(udc->lock);
+ udc->driver->suspend(&udc->gadget);
+ if (udc->udc_driver->notify_event)
+ udc->udc_driver->notify_event(udc,
+ CI13XXX_CONTROLLER_SUSPEND_EVENT);
+ if (udc->transceiver)
+ otg_set_suspend(udc->transceiver, 1);
+ spin_lock(udc->lock);
+ udc->suspended = 1;
+ }
+ }
+}
+
+/**
* isr_get_status_complete: get_status request complete function
* @ep: endpoint
* @req: request handled
@@ -2865,6 +2910,9 @@
} else {
hw_device_state(0);
_gadget_stop_activity(&udc->gadget);
+ if (udc->udc_driver->notify_event)
+ udc->udc_driver->notify_event(udc,
+ CI13XXX_CONTROLLER_DISCONNECT_EVENT);
pm_runtime_put_sync(&_gadget->dev);
}
}
@@ -3174,14 +3222,7 @@
}
if (USBi_PCI & intr) {
isr_statistics.pci++;
- udc->gadget.speed = hw_port_is_high_speed() ?
- USB_SPEED_HIGH : USB_SPEED_FULL;
- if (udc->suspended) {
- spin_unlock(udc->lock);
- udc->driver->resume(&udc->gadget);
- spin_lock(udc->lock);
- udc->suspended = 0;
- }
+ isr_resume_handler(udc);
}
if (USBi_UEI & intr)
isr_statistics.uei++;
@@ -3190,15 +3231,7 @@
isr_tr_complete_handler(udc);
}
if (USBi_SLI & intr) {
- if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
- udc->suspended = 1;
- spin_unlock(udc->lock);
- udc->driver->suspend(&udc->gadget);
- if (udc->udc_driver->notify_event)
- udc->udc_driver->notify_event(udc,
- CI13XXX_CONTROLLER_SUSPEND_EVENT);
- spin_lock(udc->lock);
- }
+ isr_suspend_handler(udc);
isr_statistics.sli++;
}
retval = IRQ_HANDLED;
@@ -3273,15 +3306,6 @@
udc->gadget.dev.parent = dev;
udc->gadget.dev.release = udc_release;
- retval = hw_device_init(regs);
- if (retval < 0)
- goto free_udc;
-
- for (i = 0; i < hw_ep_max; i++) {
- struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
- INIT_LIST_HEAD(&mEp->ep.ep_list);
- }
-
udc->transceiver = otg_get_transceiver();
if (udc->udc_driver->flags & CI13XXX_REQUIRE_TRANSCEIVER) {
@@ -3291,6 +3315,15 @@
}
}
+ retval = hw_device_init(regs);
+ if (retval < 0)
+ goto put_transceiver;
+
+ for (i = 0; i < hw_ep_max; i++) {
+ struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
+ INIT_LIST_HEAD(&mEp->ep.ep_list);
+ }
+
if (!(udc->udc_driver->flags & CI13XXX_REGS_SHARED)) {
retval = hw_device_reset(udc);
if (retval)
diff --git a/drivers/usb/gadget/ci13xxx_udc.h b/drivers/usb/gadget/ci13xxx_udc.h
index 8e2b093..8cb62da 100644
--- a/drivers/usb/gadget/ci13xxx_udc.h
+++ b/drivers/usb/gadget/ci13xxx_udc.h
@@ -124,6 +124,8 @@
#define CI13XXX_CONTROLLER_CONNECT_EVENT 1
#define CI13XXX_CONTROLLER_SUSPEND_EVENT 2
#define CI13XXX_CONTROLLER_REMOTE_WAKEUP_EVENT 3
+#define CI13XXX_CONTROLLER_RESUME_EVENT 4
+#define CI13XXX_CONTROLLER_DISCONNECT_EVENT 5
void (*notify_event) (struct ci13xxx *udc, unsigned event);
};
diff --git a/drivers/usb/gadget/f_mbim.c b/drivers/usb/gadget/f_mbim.c
index 02b2cc3..41a1777 100644
--- a/drivers/usb/gadget/f_mbim.c
+++ b/drivers/usb/gadget/f_mbim.c
@@ -153,7 +153,7 @@
.wNdpOutDivisor = cpu_to_le16(4),
.wNdpOutPayloadRemainder = cpu_to_le16(0),
.wNdpOutAlignment = cpu_to_le16(4),
- .wNtbOutMaxDatagrams = cpu_to_le16(4),
+ .wNtbOutMaxDatagrams = 0,
};
/*
@@ -216,7 +216,7 @@
.wMaxControlMessage = cpu_to_le16(0x1000),
.bNumberFilters = 0x10,
.bMaxFilterSize = 0x80,
- .wMaxSegmentSize = cpu_to_le16(0x1000),
+ .wMaxSegmentSize = cpu_to_le16(0xfe0),
.bmNetworkCapabilities = 0x20,
};
diff --git a/drivers/usb/gadget/f_mtp.c b/drivers/usb/gadget/f_mtp.c
index 0e619e6..87244e9 100644
--- a/drivers/usb/gadget/f_mtp.c
+++ b/drivers/usb/gadget/f_mtp.c
@@ -708,7 +708,8 @@
ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
if (ret < 0) {
DBG(cdev, "send_file_work: xfer error %d\n", ret);
- dev->state = STATE_ERROR;
+ if (dev->state != STATE_OFFLINE)
+ dev->state = STATE_ERROR;
r = -EIO;
break;
}
@@ -760,7 +761,8 @@
ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL);
if (ret < 0) {
r = -EIO;
- dev->state = STATE_ERROR;
+ if (dev->state != STATE_OFFLINE)
+ dev->state = STATE_ERROR;
break;
}
}
@@ -772,7 +774,8 @@
DBG(cdev, "vfs_write %d\n", ret);
if (ret != write_req->actual) {
r = -EIO;
- dev->state = STATE_ERROR;
+ if (dev->state != STATE_OFFLINE)
+ dev->state = STATE_ERROR;
break;
}
write_req = NULL;
diff --git a/drivers/usb/gadget/f_rmnet.c b/drivers/usb/gadget/f_rmnet.c
index fcbc75c..53a6398 100644
--- a/drivers/usb/gadget/f_rmnet.c
+++ b/drivers/usb/gadget/f_rmnet.c
@@ -55,9 +55,11 @@
static unsigned int nr_rmnet_ports;
static unsigned int no_ctrl_smd_ports;
static unsigned int no_ctrl_hsic_ports;
+static unsigned int no_ctrl_hsuart_ports;
static unsigned int no_data_bam_ports;
static unsigned int no_data_bam2bam_ports;
static unsigned int no_data_hsic_ports;
+static unsigned int no_data_hsuart_ports;
static struct rmnet_ports {
enum transport_type data_xport;
enum transport_type ctrl_xport;
@@ -232,12 +234,12 @@
int port_idx;
int i;
- pr_debug("%s: bam ports: %u bam2bam ports: %u data hsic ports: %u"
- " smd ports: %u ctrl hsic ports: %u"
+ pr_debug("%s: bam ports: %u bam2bam ports: %u data hsic ports: %u data hsuart ports: %u"
+ " smd ports: %u ctrl hsic ports: %u ctrl hsuart ports: %u"
" nr_rmnet_ports: %u\n",
__func__, no_data_bam_ports, no_data_bam2bam_ports,
- no_data_hsic_ports, no_ctrl_smd_ports,
- no_ctrl_hsic_ports, nr_rmnet_ports);
+ no_data_hsic_ports, no_data_hsuart_ports, no_ctrl_smd_ports,
+ no_ctrl_hsic_ports, no_ctrl_hsuart_ports, nr_rmnet_ports);
if (no_data_bam_ports || no_data_bam2bam_ports) {
ret = gbam_setup(no_data_bam_ports,
@@ -280,6 +282,34 @@
}
}
+ if (no_data_hsuart_ports) {
+ port_idx = ghsuart_data_setup(no_data_hsuart_ports,
+ USB_GADGET_RMNET);
+ if (port_idx < 0)
+ return port_idx;
+ for (i = 0; i < nr_rmnet_ports; i++) {
+ if (rmnet_ports[i].data_xport ==
+ USB_GADGET_XPORT_HSUART) {
+ rmnet_ports[i].data_xport_num = port_idx;
+ port_idx++;
+ }
+ }
+ }
+
+ if (no_ctrl_hsuart_ports) {
+ port_idx = ghsuart_ctrl_setup(no_ctrl_hsuart_ports,
+ USB_GADGET_RMNET);
+ if (port_idx < 0)
+ return port_idx;
+ for (i = 0; i < nr_rmnet_ports; i++) {
+ if (rmnet_ports[i].ctrl_xport ==
+ USB_GADGET_XPORT_HSUART) {
+ rmnet_ports[i].ctrl_xport_num = port_idx;
+ port_idx++;
+ }
+ }
+ }
+
return 0;
}
@@ -312,6 +342,14 @@
return ret;
}
break;
+ case USB_GADGET_XPORT_HSUART:
+ ret = ghsuart_ctrl_connect(&dev->port, port_num);
+ if (ret) {
+ pr_err("%s: ghsuart_ctrl_connect failed: err:%d\n",
+ __func__, ret);
+ return ret;
+ }
+ break;
case USB_GADGET_XPORT_NONE:
break;
default:
@@ -342,6 +380,15 @@
return ret;
}
break;
+ case USB_GADGET_XPORT_HSUART:
+ ret = ghsuart_data_connect(&dev->port, port_num);
+ if (ret) {
+ pr_err("%s: ghsuart_data_connect failed: err:%d\n",
+ __func__, ret);
+ ghsuart_ctrl_disconnect(&dev->port, port_num);
+ return ret;
+ }
+ break;
case USB_GADGET_XPORT_NONE:
break;
default:
@@ -371,6 +418,9 @@
case USB_GADGET_XPORT_HSIC:
ghsic_ctrl_disconnect(&dev->port, port_num);
break;
+ case USB_GADGET_XPORT_HSUART:
+ ghsuart_ctrl_disconnect(&dev->port, port_num);
+ break;
case USB_GADGET_XPORT_NONE:
break;
default:
@@ -388,6 +438,9 @@
case USB_GADGET_XPORT_HSIC:
ghsic_data_disconnect(&dev->port, port_num);
break;
+ case USB_GADGET_XPORT_HSUART:
+ ghsuart_data_disconnect(&dev->port, port_num);
+ break;
case USB_GADGET_XPORT_NONE:
break;
default:
@@ -525,7 +578,7 @@
}
dev->notify->driver_data = dev;
- if (!dev->port.in->driver_data) {
+ if (!dev->port.in->desc || !dev->port.out->desc) {
if (config_ep_by_speed(cdev->gadget, f, dev->port.in) ||
config_ep_by_speed(cdev->gadget, f, dev->port.out)) {
dev->port.in->desc = NULL;
@@ -998,6 +1051,8 @@
no_data_bam2bam_ports = 0;
no_ctrl_hsic_ports = 0;
no_data_hsic_ports = 0;
+ no_ctrl_hsuart_ports = 0;
+ no_data_hsuart_ports = 0;
}
static int frmnet_init_port(const char *ctrl_name, const char *data_name)
@@ -1041,6 +1096,10 @@
rmnet_port->ctrl_xport_num = no_ctrl_hsic_ports;
no_ctrl_hsic_ports++;
break;
+ case USB_GADGET_XPORT_HSUART:
+ rmnet_port->ctrl_xport_num = no_ctrl_hsuart_ports;
+ no_ctrl_hsuart_ports++;
+ break;
case USB_GADGET_XPORT_NONE:
break;
default:
@@ -1063,6 +1122,10 @@
rmnet_port->data_xport_num = no_data_hsic_ports;
no_data_hsic_ports++;
break;
+ case USB_GADGET_XPORT_HSUART:
+ rmnet_port->data_xport_num = no_data_hsuart_ports;
+ no_data_hsuart_ports++;
+ break;
case USB_GADGET_XPORT_NONE:
break;
default:
@@ -1084,6 +1147,8 @@
no_data_bam_ports = 0;
no_ctrl_hsic_ports = 0;
no_data_hsic_ports = 0;
+ no_ctrl_hsuart_ports = 0;
+ no_data_hsuart_ports = 0;
return ret;
}
diff --git a/drivers/usb/gadget/f_serial.c b/drivers/usb/gadget/f_serial.c
index 8d9f090..08a1712 100644
--- a/drivers/usb/gadget/f_serial.c
+++ b/drivers/usb/gadget/f_serial.c
@@ -27,7 +27,7 @@
* CDC ACM driver. However, for many purposes it's just as functional
* if you can arrange appropriate host side drivers.
*/
-#define GSERIAL_NO_PORTS 2
+#define GSERIAL_NO_PORTS 3
struct f_gser {
@@ -67,6 +67,7 @@
static unsigned int no_sdio_ports;
static unsigned int no_smd_ports;
static unsigned int no_hsic_sports;
+static unsigned int no_hsuart_sports;
static unsigned int nr_ports;
static struct port_info {
@@ -249,9 +250,9 @@
int i;
pr_debug("%s: no_tty_ports: %u no_sdio_ports: %u"
- " no_smd_ports: %u no_hsic_sports: %u nr_ports: %u\n",
+ " no_smd_ports: %u no_hsic_sports: %u no_hsuart_ports: %u nr_ports: %u\n",
__func__, no_tty_ports, no_sdio_ports, no_smd_ports,
- no_hsic_sports, nr_ports);
+ no_hsic_sports, no_hsuart_sports, nr_ports);
if (no_tty_ports)
ret = gserial_setup(c->cdev->gadget, no_tty_ports);
@@ -278,6 +279,22 @@
return ret;
return 0;
}
+ if (no_hsuart_sports) {
+ port_idx = ghsuart_data_setup(no_hsuart_sports,
+ USB_GADGET_SERIAL);
+ if (port_idx < 0)
+ return port_idx;
+
+ for (i = 0; i < nr_ports; i++) {
+ if (gserial_ports[i].transport ==
+ USB_GADGET_XPORT_HSUART) {
+ gserial_ports[i].client_port_num = port_idx;
+ port_idx++;
+ }
+ }
+
+ return 0;
+ }
return ret;
}
@@ -317,6 +334,14 @@
return ret;
}
break;
+ case USB_GADGET_XPORT_HSUART:
+ ret = ghsuart_data_connect(&gser->port, port_num);
+ if (ret) {
+ pr_err("%s: ghsuart_data_connect failed: err:%d\n",
+ __func__, ret);
+ return ret;
+ }
+ break;
default:
pr_err("%s: Un-supported transport: %s\n", __func__,
xport_to_str(gser->transport));
@@ -350,6 +375,9 @@
ghsic_ctrl_disconnect(&gser->port, port_num);
ghsic_data_disconnect(&gser->port, port_num);
break;
+ case USB_GADGET_XPORT_HSUART:
+ ghsuart_data_disconnect(&gser->port, port_num);
+ break;
default:
pr_err("%s: Un-supported transport:%s\n", __func__,
xport_to_str(gser->transport));
@@ -854,11 +882,13 @@
gser->port.func.disable = gser_disable;
gser->transport = gserial_ports[port_num].transport;
#ifdef CONFIG_MODEM_SUPPORT
- /* We support only two ports for now */
+ /* We support only three ports for now */
if (port_num == 0)
gser->port.func.name = "modem";
- else
+ else if (port_num == 1)
gser->port.func.name = "nmea";
+ else
+ gser->port.func.name = "modem2";
gser->port.func.setup = gser_setup;
gser->port.connect = gser_connect;
gser->port.get_dtr = gser_get_dtr;
@@ -910,6 +940,10 @@
/*client port number will be updated in gport_setup*/
no_hsic_sports++;
break;
+ case USB_GADGET_XPORT_HSUART:
+ /*client port number will be updated in gport_setup*/
+ no_hsuart_sports++;
+ break;
default:
pr_err("%s: Un-supported transport transport: %u\n",
__func__, gserial_ports[port_num].transport);
diff --git a/drivers/usb/gadget/msm72k_udc.c b/drivers/usb/gadget/msm72k_udc.c
index 35ea497..863ddcd 100644
--- a/drivers/usb/gadget/msm72k_udc.c
+++ b/drivers/usb/gadget/msm72k_udc.c
@@ -2301,13 +2301,14 @@
static int msm72k_pullup(struct usb_gadget *_gadget, int is_active)
{
struct usb_info *ui = container_of(_gadget, struct usb_info, gadget);
+ struct msm_otg *otg = to_msm_otg(ui->xceiv);
unsigned long flags;
-
atomic_set(&ui->softconnect, is_active);
spin_lock_irqsave(&ui->lock, flags);
- if (ui->usb_state == USB_STATE_NOTATTACHED || ui->driver == NULL) {
+ if (ui->usb_state == USB_STATE_NOTATTACHED || ui->driver == NULL ||
+ atomic_read(&otg->chg_type) == USB_CHG_TYPE__WALLCHARGER) {
spin_unlock_irqrestore(&ui->lock, flags);
return 0;
}
diff --git a/drivers/usb/gadget/u_ctrl_hsuart.c b/drivers/usb/gadget/u_ctrl_hsuart.c
new file mode 100644
index 0000000..7102d81
--- /dev/null
+++ b/drivers/usb/gadget/u_ctrl_hsuart.c
@@ -0,0 +1,576 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/termios.h>
+#include <linux/debugfs.h>
+#include <linux/smux.h>
+
+#include <mach/usb_gadget_xport.h>
+
+#define CH_OPENED 0
+#define CH_READY 1
+
+static unsigned int num_ctrl_ports;
+
+static const char *ghsuart_ctrl_names[] = {
+ "SMUX_RMNET_CTL_HSUART"
+};
+
+struct ghsuart_ctrl_port {
+ /* port */
+ unsigned port_num;
+ /* gadget */
+ enum gadget_type gtype;
+ spinlock_t port_lock;
+ void *port_usb;
+ /* work queue*/
+ struct workqueue_struct *wq;
+ struct work_struct connect_w;
+ struct work_struct disconnect_w;
+ /*ctrl pkt response cb*/
+ int (*send_cpkt_response)(void *g, void *buf, size_t len);
+ void *ctxt;
+ unsigned int ch_id;
+ /* flow control bits */
+ unsigned long flags;
+ int (*send_pkt)(void *, void *, size_t actual);
+ /* Channel status */
+ unsigned long channel_sts;
+ /* control bits */
+ unsigned cbits_tomodem;
+ /* counters */
+ unsigned long to_modem;
+ unsigned long to_host;
+ unsigned long drp_cpkt_cnt;
+};
+
+static struct {
+ struct ghsuart_ctrl_port *port;
+ struct platform_driver pdrv;
+} ghsuart_ctrl_ports[NUM_HSUART_PORTS];
+
+static int ghsuart_ctrl_receive(void *dev, void *buf, size_t actual);
+
+static void smux_control_event(void *priv, int event_type, const void *metadata)
+{
+ struct grmnet *gr = NULL;
+ struct ghsuart_ctrl_port *port = priv;
+ void *buf;
+ unsigned long flags;
+ size_t len;
+
+ switch (event_type) {
+ case SMUX_CONNECTED:
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ set_bit(CH_OPENED, &port->channel_sts);
+ if (port->gtype == USB_GADGET_RMNET) {
+ gr = port->port_usb;
+ if (gr && gr->connect)
+ gr->connect(gr);
+ }
+ break;
+ case SMUX_DISCONNECTED:
+ clear_bit(CH_OPENED, &port->channel_sts);
+ break;
+ case SMUX_READ_DONE:
+ len = ((struct smux_meta_read *)metadata)->len;
+ buf = ((struct smux_meta_read *)metadata)->buffer;
+ ghsuart_ctrl_receive(port, buf, len);
+ break;
+ case SMUX_READ_FAIL:
+ buf = ((struct smux_meta_read *)metadata)->buffer;
+ kfree(buf);
+ break;
+ case SMUX_WRITE_DONE:
+ case SMUX_WRITE_FAIL:
+ buf = ((struct smux_meta_write *)metadata)->buffer;
+ kfree(buf);
+ break;
+ case SMUX_LOW_WM_HIT:
+ case SMUX_HIGH_WM_HIT:
+ case SMUX_TIOCM_UPDATE:
+ break;
+ default:
+ pr_err("%s Event %d not supported\n", __func__, event_type);
+ };
+}
+
+static int rx_control_buffer(void *priv, void **pkt_priv, void **buffer,
+ int size)
+{
+ void *rx_buf;
+
+ rx_buf = kmalloc(size, GFP_KERNEL);
+ if (!rx_buf)
+ return -EAGAIN;
+ *buffer = rx_buf;
+ *pkt_priv = NULL;
+
+ return 0;
+}
+
+static int ghsuart_ctrl_receive(void *dev, void *buf, size_t actual)
+{
+ struct ghsuart_ctrl_port *port = dev;
+ int retval = 0;
+
+ pr_debug_ratelimited("%s: read complete bytes read: %d\n",
+ __func__, actual);
+
+ /* send it to USB here */
+ if (port && port->send_cpkt_response) {
+ retval = port->send_cpkt_response(port->port_usb, buf, actual);
+ port->to_host++;
+ }
+ kfree(buf);
+ return retval;
+}
+
+static int
+ghsuart_send_cpkt_tomodem(u8 portno, void *buf, size_t len)
+{
+ void *cbuf;
+ struct ghsuart_ctrl_port *port;
+ int ret;
+
+ if (portno >= num_ctrl_ports) {
+ pr_err("%s: Invalid portno#%d\n", __func__, portno);
+ return -ENODEV;
+ }
+
+ port = ghsuart_ctrl_ports[portno].port;
+ if (!port) {
+ pr_err("%s: port is null\n", __func__);
+ return -ENODEV;
+ }
+ /* drop cpkt if ch is not open */
+ if (!test_bit(CH_OPENED, &port->channel_sts)) {
+ port->drp_cpkt_cnt++;
+ return 0;
+ }
+ cbuf = kmalloc(len, GFP_ATOMIC);
+ if (!cbuf)
+ return -ENOMEM;
+
+ memcpy(cbuf, buf, len);
+
+ pr_debug("%s: ctrl_pkt:%d bytes\n", __func__, len);
+
+ ret = msm_smux_write(port->ch_id, port, (void *)cbuf, len);
+ if (ret < 0) {
+ pr_err_ratelimited("%s: write error:%d\n",
+ __func__, ret);
+ port->drp_cpkt_cnt++;
+ kfree(cbuf);
+ return ret;
+ }
+ port->to_modem++;
+
+ return 0;
+}
+
+static void
+ghsuart_send_cbits_tomodem(void *gptr, u8 portno, int cbits)
+{
+ struct ghsuart_ctrl_port *port;
+
+ if (portno >= num_ctrl_ports || !gptr) {
+ pr_err("%s: Invalid portno#%d\n", __func__, portno);
+ return;
+ }
+
+ port = ghsuart_ctrl_ports[portno].port;
+ if (!port) {
+ pr_err("%s: port is null\n", __func__);
+ return;
+ }
+
+ if (cbits == port->cbits_tomodem)
+ return;
+
+ port->cbits_tomodem = cbits;
+
+ if (!test_bit(CH_OPENED, &port->channel_sts))
+ return;
+
+ pr_debug("%s: ctrl_tomodem:%d\n", __func__, cbits);
+ /* Send the control bits to the Modem */
+ msm_smux_tiocm_set(port->ch_id, cbits, ~cbits);
+}
+
+static void ghsuart_ctrl_connect_w(struct work_struct *w)
+{
+ struct ghsuart_ctrl_port *port =
+ container_of(w, struct ghsuart_ctrl_port, connect_w);
+ int retval;
+
+ if (!port || !test_bit(CH_READY, &port->channel_sts))
+ return;
+
+ pr_debug("%s: port:%p\n", __func__, port);
+
+ retval = msm_smux_open(port->ch_id, port->ctxt, smux_control_event,
+ rx_control_buffer);
+ if (retval < 0) {
+ pr_err(" %s smux_open failed\n", __func__);
+ return;
+ }
+
+}
+
+int ghsuart_ctrl_connect(void *gptr, int port_num)
+{
+ struct ghsuart_ctrl_port *port;
+ struct grmnet *gr;
+ unsigned long flags;
+
+ pr_debug("%s: port#%d\n", __func__, port_num);
+
+ if (port_num > num_ctrl_ports || !gptr) {
+ pr_err("%s: invalid portno#%d\n", __func__, port_num);
+ return -ENODEV;
+ }
+
+ port = ghsuart_ctrl_ports[port_num].port;
+ if (!port) {
+ pr_err("%s: port is null\n", __func__);
+ return -ENODEV;
+ }
+
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ gr = gptr;
+ port->send_cpkt_response = gr->send_cpkt_response;
+ gr->send_encap_cmd = ghsuart_send_cpkt_tomodem;
+ gr->notify_modem = ghsuart_send_cbits_tomodem;
+
+ port->port_usb = gptr;
+ port->to_host = 0;
+ port->to_modem = 0;
+ port->drp_cpkt_cnt = 0;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ if (test_bit(CH_READY, &port->channel_sts))
+ queue_work(port->wq, &port->connect_w);
+
+ return 0;
+}
+
+static void ghsuart_ctrl_disconnect_w(struct work_struct *w)
+{
+ struct ghsuart_ctrl_port *port =
+ container_of(w, struct ghsuart_ctrl_port, disconnect_w);
+
+ if (!test_bit(CH_OPENED, &port->channel_sts))
+ return;
+
+ msm_smux_close(port->ch_id);
+ clear_bit(CH_OPENED, &port->channel_sts);
+}
+
+void ghsuart_ctrl_disconnect(void *gptr, int port_num)
+{
+ struct gctrl_port *port;
+ struct grmnet *gr = NULL;
+ unsigned long flags;
+
+ pr_debug("%s: port#%d\n", __func__, port_num);
+
+ if (port_num > num_ctrl_ports) {
+ pr_err("%s: invalid portno#%d\n", __func__, port_num);
+ return;
+ }
+
+ port = gctrl_ports[port_num].port;
+
+ if (!gptr || !port) {
+ pr_err("%s: grmnet port is null\n", __func__);
+ return;
+ }
+
+ gr = gptr;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ gr->send_encap_cmd = 0;
+ gr->notify_modem = 0;
+ port->cbits_tomodem = 0;
+ port->port_usb = 0;
+ port->send_cpkt_response = 0;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ queue_work(port->wq, &port->disconnect_w);
+}
+
+static int ghsuart_ctrl_probe(struct platform_device *pdev)
+{
+ struct ghsuart_ctrl_port *port;
+ unsigned long flags;
+
+ pr_debug("%s: name:%s\n", __func__, pdev->name);
+
+ port = ghsuart_ctrl_ports[pdev->id].port;
+ set_bit(CH_READY, &port->channel_sts);
+
+ /* if usb is online, start read */
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (port->port_usb)
+ queue_work(port->wq, &port->connect_w);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ return 0;
+}
+
+static int ghsuart_ctrl_remove(struct platform_device *pdev)
+{
+ struct ghsuart_ctrl_port *port;
+ struct grmnet *gr = NULL;
+ unsigned long flags;
+
+ pr_debug("%s: name:%s\n", __func__, pdev->name);
+
+ port = ghsuart_ctrl_ports[pdev->id].port;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ goto not_ready;
+ }
+
+ gr = port->port_usb;
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ if (gr && gr->disconnect)
+ gr->disconnect(gr);
+
+ clear_bit(CH_OPENED, &port->channel_sts);
+not_ready:
+ clear_bit(CH_READY, &port->channel_sts);
+
+ return 0;
+}
+
+static void ghsuart_ctrl_port_free(int portno)
+{
+ struct ghsuart_ctrl_port *port = ghsuart_ctrl_ports[portno].port;
+ struct platform_driver *pdrv = &gctrl_ports[portno].pdrv;
+
+ destroy_workqueue(port->wq);
+ if (pdrv)
+ platform_driver_unregister(pdrv);
+ kfree(port);
+}
+
+static int ghsuart_ctrl_port_alloc(int portno, enum gadget_type gtype)
+{
+ struct ghsuart_ctrl_port *port;
+ struct platform_driver *pdrv;
+ int err;
+
+ port = kzalloc(sizeof(struct ghsuart_ctrl_port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ port->wq = create_singlethread_workqueue(ghsuart_ctrl_names[portno]);
+ if (!port->wq) {
+ pr_err("%s: Unable to create workqueue:%s\n",
+ __func__, ghsuart_ctrl_names[portno]);
+ kfree(port);
+ return -ENOMEM;
+ }
+
+ port->port_num = portno;
+ port->gtype = gtype;
+
+ spin_lock_init(&port->port_lock);
+
+ INIT_WORK(&port->connect_w, ghsuart_ctrl_connect_w);
+ INIT_WORK(&port->disconnect_w, ghsuart_ctrl_disconnect_w);
+
+ port->ch_id = SMUX_USB_RMNET_CTL_0;
+ port->ctxt = port;
+ port->send_pkt = ghsuart_ctrl_receive;
+ ghsuart_ctrl_ports[portno].port = port;
+
+ pdrv = &ghsuart_ctrl_ports[portno].pdrv;
+ pdrv->probe = ghsuart_ctrl_probe;
+ pdrv->remove = ghsuart_ctrl_remove;
+ pdrv->driver.name = ghsuart_ctrl_names[portno];
+ pdrv->driver.owner = THIS_MODULE;
+
+ err = platform_driver_register(pdrv);
+ if (unlikely(err < 0))
+ return err;
+ pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
+
+ return 0;
+}
+
+int ghsuart_ctrl_setup(unsigned int num_ports, enum gadget_type gtype)
+{
+ int first_port_id = num_ctrl_ports;
+ int total_num_ports = num_ports + num_ctrl_ports;
+ int i;
+ int ret = 0;
+
+ if (!num_ports || total_num_ports > NUM_HSUART_PORTS) {
+ pr_err("%s: Invalid num of ports count:%d\n",
+ __func__, num_ports);
+ return -EINVAL;
+ }
+
+ pr_debug("%s: requested ports:%d\n", __func__, num_ports);
+
+ for (i = first_port_id; i < (first_port_id + num_ports); i++) {
+
+ num_ctrl_ports++;
+ ret = ghsuart_ctrl_port_alloc(i, gtype);
+ if (ret) {
+ num_ctrl_ports--;
+ pr_err("%s: Unable to alloc port:%d\n", __func__, i);
+ goto free_ports;
+ }
+ }
+
+ return first_port_id;
+
+free_ports:
+ for (i = first_port_id; i < num_ctrl_ports; i++)
+ ghsuart_ctrl_port_free(i);
+ num_ctrl_ports = first_port_id;
+ return ret;
+}
+
+#define DEBUG_BUF_SIZE 1024
+static ssize_t ghsuart_ctrl_read_stats(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ghsuart_ctrl_port *port;
+ char *buf;
+ unsigned long flags;
+ int ret;
+ int i;
+ int temp = 0;
+
+ buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (i = 0; i < num_ctrl_ports; i++) {
+ port = ghsuart_ctrl_ports[i].port;
+ if (!port)
+ continue;
+ spin_lock_irqsave(&port->port_lock, flags);
+
+ temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
+ "#PORT:%d port: %p\n"
+ "to_usbhost: %lu\n"
+ "to_modem: %lu\n"
+ "cpkt_drp_cnt: %lu\n"
+ "DTR: %s\n",
+ i, port,
+ port->to_host, port->to_modem,
+ port->drp_cpkt_cnt,
+ port->cbits_tomodem ? "HIGH" : "LOW");
+
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ }
+
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
+
+ kfree(buf);
+
+ return ret;
+}
+
+static ssize_t ghsuart_ctrl_reset_stats(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos)
+{
+ struct ghsuart_ctrl_port *port;
+ int i;
+ unsigned long flags;
+
+ for (i = 0; i < num_ctrl_ports; i++) {
+ port = ghsuart_ctrl_ports[i].port;
+ if (!port)
+ continue;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->to_host = 0;
+ port->to_modem = 0;
+ port->drp_cpkt_cnt = 0;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ }
+ return count;
+}
+
+static const struct file_operations ghsuart_ctrl_stats_ops = {
+ .read = ghsuart_ctrl_read_stats,
+ .write = ghsuart_ctrl_reset_stats,
+};
+
+static struct dentry *ghsuart_ctrl_dent;
+static int ghsuart_ctrl_debugfs_init(void)
+{
+ struct dentry *ghsuart_ctrl_dfile;
+
+ ghsuart_ctrl_dent = debugfs_create_dir("ghsuart_ctrl_xport", 0);
+ if (!ghsuart_ctrl_dent || IS_ERR(ghsuart_ctrl_dent))
+ return -ENODEV;
+
+ ghsuart_ctrl_dfile =
+ debugfs_create_file("status", S_IRUGO | S_IWUSR,
+ ghsuart_ctrl_dent, 0, &gctrl_stats_ops);
+ if (!ghsuart_ctrl_dfile || IS_ERR(ghsuart_ctrl_dfile)) {
+ debugfs_remove(ghsuart_ctrl_dent);
+ ghsuart_ctrl_dent = NULL;
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static void ghsuart_ctrl_debugfs_exit(void)
+{
+ debugfs_remove_recursive(ghsuart_ctrl_dent);
+}
+
+static int __init ghsuart_ctrl_init(void)
+{
+ int ret;
+
+ ret = ghsuart_ctrl_debugfs_init();
+ if (ret) {
+ pr_debug("mode debugfs file is not available\n");
+ return ret;
+ }
+ return 0;
+}
+module_init(ghsuart_ctrl_init);
+
+static void __exit ghsuart_ctrl_exit(void)
+{
+ ghsuart_ctrl_debugfs_exit();
+}
+module_exit(ghsuart_ctrl_exit);
+
+MODULE_DESCRIPTION("HSUART control xport for RmNet");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/gadget/u_data_hsuart.c b/drivers/usb/gadget/u_data_hsuart.c
new file mode 100644
index 0000000..b2c57c4
--- /dev/null
+++ b/drivers/usb/gadget/u_data_hsuart.c
@@ -0,0 +1,1142 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/termios.h>
+#include <linux/netdevice.h>
+#include <linux/debugfs.h>
+#include <linux/bitops.h>
+#include <linux/smux.h>
+
+#include <mach/usb_gadget_xport.h>
+
+static unsigned int num_data_ports;
+
+static const char *ghsuart_data_names[] = {
+ "SMUX_DUN_DATA_HSUART",
+ "SMUX_RMNET_DATA_HSUART"
+};
+
+#define DATA_BRIDGE_NAME_MAX_LEN 20
+
+#define GHSUART_DATA_RMNET_RX_Q_SIZE 10
+#define GHSUART_DATA_RMNET_TX_Q_SIZE 20
+#define GHSUART_DATA_SERIAL_RX_Q_SIZE 5
+#define GHSUART_DATA_SERIAL_TX_Q_SIZE 5
+#define GHSUART_DATA_RX_REQ_SIZE 2048
+#define GHSUART_DATA_TX_INTR_THRESHOLD 1
+
+/* from cdc-acm.h */
+#define ACM_CTRL_RTS (1 << 1) /* unused with full duplex */
+#define ACM_CTRL_DTR (1 << 0) /* host is ready for data r/w */
+#define ACM_CTRL_OVERRUN (1 << 6)
+#define ACM_CTRL_PARITY (1 << 5)
+#define ACM_CTRL_FRAMING (1 << 4)
+#define ACM_CTRL_RI (1 << 3)
+#define ACM_CTRL_BRK (1 << 2)
+#define ACM_CTRL_DSR (1 << 1)
+#define ACM_CTRL_DCD (1 << 0)
+
+static unsigned int ghsuart_data_rmnet_tx_q_size = GHSUART_DATA_RMNET_TX_Q_SIZE;
+module_param(ghsuart_data_rmnet_tx_q_size, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int ghsuart_data_rmnet_rx_q_size = GHSUART_DATA_RMNET_RX_Q_SIZE;
+module_param(ghsuart_data_rmnet_rx_q_size, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int ghsuart_data_serial_tx_q_size =
+ GHSUART_DATA_SERIAL_TX_Q_SIZE;
+module_param(ghsuart_data_serial_tx_q_size, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int ghsuart_data_serial_rx_q_size =
+ GHSUART_DATA_SERIAL_RX_Q_SIZE;
+module_param(ghsuart_data_serial_rx_q_size, uint, S_IRUGO | S_IWUSR);
+
+static unsigned int ghsuart_data_rx_req_size = GHSUART_DATA_RX_REQ_SIZE;
+module_param(ghsuart_data_rx_req_size, uint, S_IRUGO | S_IWUSR);
+
+unsigned int ghsuart_data_tx_intr_thld = GHSUART_DATA_TX_INTR_THRESHOLD;
+module_param(ghsuart_data_tx_intr_thld, uint, S_IRUGO | S_IWUSR);
+
+#define CH_OPENED 0
+#define CH_READY 1
+
+struct ghsuart_data_port {
+ /* port */
+ unsigned port_num;
+
+ /* gadget */
+ atomic_t connected;
+ struct usb_ep *in;
+ struct usb_ep *out;
+
+ enum gadget_type gtype;
+ spinlock_t port_lock;
+ void *port_usb;
+
+ /* data transfer queues */
+ unsigned int tx_q_size;
+ struct list_head tx_idle;
+ struct sk_buff_head tx_skb_q;
+ spinlock_t tx_lock;
+
+ unsigned int rx_q_size;
+ struct list_head rx_idle;
+ struct sk_buff_head rx_skb_q;
+ spinlock_t rx_lock;
+
+ /* work */
+ struct workqueue_struct *wq;
+ struct work_struct connect_w;
+ struct work_struct disconnect_w;
+ struct work_struct write_tomdm_w;
+ struct work_struct write_tohost_w;
+ void *ctx;
+ unsigned int ch_id;
+ /* flow control bits */
+ unsigned long flags;
+ /* channel status */
+ unsigned long channel_sts;
+
+ unsigned int n_tx_req_queued;
+
+ /* control bits */
+ unsigned cbits_tomodem;
+ unsigned cbits_tohost;
+
+ /* counters */
+ unsigned long to_modem;
+ unsigned long to_host;
+ unsigned int tomodem_drp_cnt;
+};
+
+static struct {
+ struct ghsuart_data_port *port;
+ struct platform_driver pdrv;
+} ghsuart_data_ports[NUM_HSUART_PORTS];
+
+static void ghsuart_data_start_rx(struct ghsuart_data_port *port);
+
+static void ghsuart_data_free_requests(struct usb_ep *ep,
+ struct list_head *head)
+{
+ struct usb_request *req;
+
+ while (!list_empty(head)) {
+ req = list_entry(head->next, struct usb_request, list);
+ list_del(&req->list);
+ usb_ep_free_request(ep, req);
+ }
+}
+
+static int ghsuart_data_alloc_requests(struct usb_ep *ep,
+ struct list_head *head,
+ int num,
+ void (*cb)(struct usb_ep *ep, struct usb_request *),
+ gfp_t flags)
+{
+ int i;
+ struct usb_request *req;
+
+ pr_debug("%s: ep:%s head:%p num:%d cb:%p", __func__,
+ ep->name, head, num, cb);
+
+ for (i = 0; i < num; i++) {
+ req = usb_ep_alloc_request(ep, flags);
+ if (!req) {
+ pr_err("%s: req allocated:%d\n", __func__, i);
+ return list_empty(head) ? -ENOMEM : 0;
+ }
+ req->complete = cb;
+ list_add(&req->list, head);
+ }
+
+ return 0;
+}
+
+static void ghsuart_data_write_tohost(struct work_struct *w)
+{
+ unsigned long flags;
+ struct sk_buff *skb;
+ int ret;
+ struct usb_request *req;
+ struct usb_ep *ep;
+ struct ghsuart_data_port *port;
+
+ port = container_of(w, struct ghsuart_data_port, write_tohost_w);
+
+ if (!port || !atomic_read(&port->connected))
+ return;
+
+ spin_lock_irqsave(&port->tx_lock, flags);
+ ep = port->in;
+ if (!ep) {
+ spin_unlock_irqrestore(&port->tx_lock, flags);
+ return;
+ }
+
+ while (!list_empty(&port->tx_idle)) {
+ skb = __skb_dequeue(&port->tx_skb_q);
+ if (!skb)
+ break;
+
+ req = list_first_entry(&port->tx_idle, struct usb_request,
+ list);
+ req->context = skb;
+ req->buf = skb->data;
+ req->length = skb->len;
+
+ port->n_tx_req_queued++;
+ if (port->n_tx_req_queued == ghsuart_data_tx_intr_thld) {
+ req->no_interrupt = 0;
+ port->n_tx_req_queued = 0;
+ } else {
+ req->no_interrupt = 1;
+ }
+
+ list_del(&req->list);
+
+ spin_unlock_irqrestore(&port->tx_lock, flags);
+ ret = usb_ep_queue(ep, req, GFP_KERNEL);
+ spin_lock_irqsave(&port->tx_lock, flags);
+ if (ret) {
+ pr_err("%s: usb epIn failed\n", __func__);
+ list_add(&req->list, &port->tx_idle);
+ dev_kfree_skb_any(skb);
+ break;
+ }
+ port->to_host++;
+ }
+ spin_unlock_irqrestore(&port->tx_lock, flags);
+}
+
+static void ghsuart_data_write_tomdm(struct work_struct *w)
+{
+ struct ghsuart_data_port *port;
+ struct sk_buff *skb;
+ unsigned long flags;
+ int ret;
+
+ port = container_of(w, struct ghsuart_data_port, write_tomdm_w);
+
+ if (!port || !atomic_read(&port->connected))
+ return;
+
+ spin_lock_irqsave(&port->rx_lock, flags);
+ if (test_bit(TX_THROTTLED, &port->flags)) {
+ spin_unlock_irqrestore(&port->rx_lock, flags);
+ return;
+ }
+
+ while ((skb = __skb_dequeue(&port->rx_skb_q))) {
+ pr_debug("%s: port:%p tom:%lu pno:%d\n", __func__,
+ port, port->to_modem, port->port_num);
+
+ spin_unlock_irqrestore(&port->rx_lock, flags);
+ ret = msm_smux_write(port->ch_id, skb, skb->data, skb->len);
+ spin_lock_irqsave(&port->rx_lock, flags);
+ if (ret < 0) {
+ if (ret == -EAGAIN) {
+ /*flow control*/
+ set_bit(TX_THROTTLED, &port->flags);
+ __skb_queue_head(&port->rx_skb_q, skb);
+ break;
+ }
+ pr_err_ratelimited("%s: write error:%d\n",
+ __func__, ret);
+ port->tomodem_drp_cnt++;
+ dev_kfree_skb_any(skb);
+ break;
+ }
+ port->to_modem++;
+ }
+ spin_unlock_irqrestore(&port->rx_lock, flags);
+ ghsuart_data_start_rx(port);
+}
+
+static void ghsuart_data_epin_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct ghsuart_data_port *port = ep->driver_data;
+ struct sk_buff *skb = req->context;
+ int status = req->status;
+
+ switch (status) {
+ case 0:
+ /* successful completion */
+ break;
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* connection gone */
+ dev_kfree_skb_any(skb);
+ req->buf = 0;
+ usb_ep_free_request(ep, req);
+ return;
+ default:
+ pr_err("%s: data tx ep error %d\n", __func__, status);
+ break;
+ }
+
+ dev_kfree_skb_any(skb);
+
+ spin_lock(&port->tx_lock);
+ list_add_tail(&req->list, &port->tx_idle);
+ spin_unlock(&port->tx_lock);
+
+ queue_work(port->wq, &port->write_tohost_w);
+}
+
+static void
+ghsuart_data_epout_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct ghsuart_data_port *port = ep->driver_data;
+ struct sk_buff *skb = req->context;
+ int status = req->status;
+ int queue = 0;
+
+ switch (status) {
+ case 0:
+ skb_put(skb, req->actual);
+ queue = 1;
+ break;
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* cable disconnection */
+ dev_kfree_skb_any(skb);
+ req->buf = 0;
+ usb_ep_free_request(ep, req);
+ return;
+ default:
+ pr_err_ratelimited("%s: %s response error %d, %d/%d\n",
+ __func__, ep->name, status,
+ req->actual, req->length);
+ dev_kfree_skb_any(skb);
+ list_add_tail(&req->list, &port->rx_idle);
+ return;
+ }
+
+ spin_lock(&port->rx_lock);
+ if (queue) {
+ __skb_queue_tail(&port->rx_skb_q, skb);
+ list_add_tail(&req->list, &port->rx_idle);
+ queue_work(port->wq, &port->write_tomdm_w);
+ }
+ spin_unlock(&port->rx_lock);
+}
+
+static void ghsuart_data_start_rx(struct ghsuart_data_port *port)
+{
+ struct usb_request *req;
+ struct usb_ep *ep;
+ unsigned long flags;
+ int ret;
+ struct sk_buff *skb;
+
+ pr_debug("%s: port:%p\n", __func__, port);
+ if (!port)
+ return;
+
+ spin_lock_irqsave(&port->rx_lock, flags);
+ ep = port->out;
+ if (!ep) {
+ spin_unlock_irqrestore(&port->rx_lock, flags);
+ return;
+ }
+
+ if (test_bit(TX_THROTTLED, &port->flags)) {
+ spin_unlock_irqrestore(&port->rx_lock, flags);
+ return;
+ }
+
+ while (atomic_read(&port->connected) && !list_empty(&port->rx_idle)) {
+
+ req = list_first_entry(&port->rx_idle,
+ struct usb_request, list);
+
+ skb = alloc_skb(ghsuart_data_rx_req_size, GFP_ATOMIC);
+ if (!skb)
+ break;
+ list_del(&req->list);
+ req->buf = skb->data;
+ req->length = ghsuart_data_rx_req_size;
+ req->context = skb;
+
+ spin_unlock_irqrestore(&port->rx_lock, flags);
+ ret = usb_ep_queue(ep, req, GFP_KERNEL);
+ spin_lock_irqsave(&port->rx_lock, flags);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+
+ pr_err_ratelimited("%s: rx queue failed\n", __func__);
+
+ if (atomic_read(&port->connected))
+ list_add(&req->list, &port->rx_idle);
+ else
+ usb_ep_free_request(ep, req);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&port->rx_lock, flags);
+}
+
+static void ghsuart_data_start_io(struct ghsuart_data_port *port)
+{
+ unsigned long flags;
+ struct usb_ep *ep;
+ int ret;
+
+ pr_debug("%s: port:%p\n", __func__, port);
+
+ if (!port)
+ return;
+
+ spin_lock_irqsave(&port->rx_lock, flags);
+ ep = port->out;
+ if (!ep) {
+ spin_unlock_irqrestore(&port->rx_lock, flags);
+ return;
+ }
+
+ ret = ghsuart_data_alloc_requests(ep, &port->rx_idle,
+ port->rx_q_size, ghsuart_data_epout_complete, GFP_ATOMIC);
+ if (ret) {
+ pr_err("%s: rx req allocation failed\n", __func__);
+ spin_unlock_irqrestore(&port->rx_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&port->rx_lock, flags);
+
+ spin_lock_irqsave(&port->tx_lock, flags);
+ ep = port->in;
+ if (!ep) {
+ spin_unlock_irqrestore(&port->tx_lock, flags);
+ return;
+ }
+
+ ret = ghsuart_data_alloc_requests(ep, &port->tx_idle,
+ port->tx_q_size, ghsuart_data_epin_complete, GFP_ATOMIC);
+ if (ret) {
+ pr_err("%s: tx req allocation failed\n", __func__);
+ ghsuart_data_free_requests(ep, &port->rx_idle);
+ spin_unlock_irqrestore(&port->tx_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&port->tx_lock, flags);
+
+ /* queue out requests */
+ ghsuart_data_start_rx(port);
+}
+
+static void ghsuart_dunctrl_status(void *ctxt, unsigned int ctrl_bits)
+{
+ struct ghsuart_data_port *port = ctxt;
+ struct gserial *gser;
+ unsigned long flags;
+
+ pr_debug("%s - input control lines: dcd%c dsr%c break%c "
+ "ring%c framing%c parity%c overrun%c\n", __func__,
+ ctrl_bits & ACM_CTRL_DCD ? '+' : '-',
+ ctrl_bits & ACM_CTRL_DSR ? '+' : '-',
+ ctrl_bits & ACM_CTRL_BRK ? '+' : '-',
+ ctrl_bits & ACM_CTRL_RI ? '+' : '-',
+ ctrl_bits & ACM_CTRL_FRAMING ? '+' : '-',
+ ctrl_bits & ACM_CTRL_PARITY ? '+' : '-',
+ ctrl_bits & ACM_CTRL_OVERRUN ? '+' : '-');
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->cbits_tohost = ctrl_bits;
+ gser = port->port_usb;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ if (gser && gser->send_modem_ctrl_bits)
+ gser->send_modem_ctrl_bits(gser, ctrl_bits);
+}
+
+const char *event_string(int event_type)
+{
+ switch (event_type) {
+ case SMUX_CONNECTED:
+ return "SMUX_CONNECTED";
+ case SMUX_DISCONNECTED:
+ return "SMUX_DISCONNECTED";
+ case SMUX_READ_DONE:
+ return "SMUX_READ_DONE";
+ case SMUX_READ_FAIL:
+ return "SMUX_READ_FAIL";
+ case SMUX_WRITE_DONE:
+ return "SMUX_WRITE_DONE";
+ case SMUX_WRITE_FAIL:
+ return "SMUX_WRITE_FAIL";
+ case SMUX_HIGH_WM_HIT:
+ return "SMUX_HIGH_WM_HIT";
+ case SMUX_LOW_WM_HIT:
+ return "SMUX_LOW_WM_HIT";
+ case SMUX_TIOCM_UPDATE:
+ return "SMUX_TIOCM_UPDATE";
+ default:
+ return "UNDEFINED";
+ }
+}
+
+static void ghsuart_notify_event(void *priv, int event_type,
+ const void *metadata)
+{
+ struct ghsuart_data_port *port = priv;
+ struct smux_meta_write *meta_write =
+ (struct smux_meta_write *) metadata;
+ struct smux_meta_read *meta_read =
+ (struct smux_meta_read *) metadata;
+ struct sk_buff *skb;
+ unsigned long flags;
+ unsigned int cbits;
+ struct gserial *gser;
+
+ pr_debug("%s: event type: %s ", __func__, event_string(event_type));
+ switch (event_type) {
+ case SMUX_CONNECTED:
+ set_bit(CH_OPENED, &port->channel_sts);
+ if (port->gtype == USB_GADGET_SERIAL) {
+ cbits = msm_smux_tiocm_get(port->ch_id);
+ if (cbits & ACM_CTRL_DCD) {
+ gser = port->port_usb;
+ if (gser && gser->connect)
+ gser->connect(gser);
+ }
+ }
+ ghsuart_data_start_io(port);
+ break;
+ case SMUX_DISCONNECTED:
+ clear_bit(CH_OPENED, &port->channel_sts);
+ break;
+ case SMUX_READ_DONE:
+ skb = meta_read->pkt_priv;
+ skb->data = meta_read->buffer;
+ skb->len = meta_read->len;
+ spin_lock_irqsave(&port->tx_lock, flags);
+ __skb_queue_tail(&port->tx_skb_q, skb);
+ spin_unlock_irqrestore(&port->tx_lock, flags);
+ queue_work(port->wq, &port->write_tohost_w);
+ break;
+ case SMUX_WRITE_DONE:
+ skb = meta_write->pkt_priv;
+ skb->data = meta_write->buffer;
+ dev_kfree_skb_any(skb);
+ queue_work(port->wq, &port->write_tomdm_w);
+ break;
+ case SMUX_READ_FAIL:
+ skb = meta_read->pkt_priv;
+ skb->data = meta_read->buffer;
+ dev_kfree_skb_any(skb);
+ break;
+ case SMUX_WRITE_FAIL:
+ skb = meta_write->pkt_priv;
+ skb->data = meta_write->buffer;
+ dev_kfree_skb_any(skb);
+ break;
+ case SMUX_HIGH_WM_HIT:
+ spin_lock_irqsave(&port->rx_lock, flags);
+ set_bit(TX_THROTTLED, &port->flags);
+ spin_unlock_irqrestore(&port->rx_lock, flags);
+ case SMUX_LOW_WM_HIT:
+ spin_lock_irqsave(&port->rx_lock, flags);
+ clear_bit(TX_THROTTLED, &port->flags);
+ spin_unlock_irqrestore(&port->rx_lock, flags);
+ queue_work(port->wq, &port->write_tomdm_w);
+ break;
+ case SMUX_TIOCM_UPDATE:
+ if (port->gtype == USB_GADGET_SERIAL) {
+ cbits = msm_smux_tiocm_get(port->ch_id);
+ ghsuart_dunctrl_status(port, cbits);
+ }
+ break;
+ default:
+ pr_err("%s:wrong event recieved\n", __func__);
+ }
+}
+
+static int ghsuart_get_rx_buffer(void *priv, void **pkt_priv,
+ void **buffer, int size)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(size, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+ *pkt_priv = skb;
+ *buffer = skb->data;
+
+ return 0;
+}
+
+static void ghsuart_data_connect_w(struct work_struct *w)
+{
+ struct ghsuart_data_port *port =
+ container_of(w, struct ghsuart_data_port, connect_w);
+ int ret;
+
+ if (!port || !atomic_read(&port->connected) ||
+ !test_bit(CH_READY, &port->channel_sts))
+ return;
+
+ pr_debug("%s: port:%p\n", __func__, port);
+
+ ret = msm_smux_open(port->ch_id, port, &ghsuart_notify_event,
+ &ghsuart_get_rx_buffer);
+ if (ret) {
+ pr_err("%s: unable to open smux ch:%d err:%d\n",
+ __func__, port->ch_id, ret);
+ return;
+ }
+}
+
+static void ghsuart_data_disconnect_w(struct work_struct *w)
+{
+ struct ghsuart_data_port *port =
+ container_of(w, struct ghsuart_data_port, disconnect_w);
+
+ if (!test_bit(CH_OPENED, &port->channel_sts))
+ return;
+
+ msm_smux_close(port->ch_id);
+ clear_bit(CH_OPENED, &port->channel_sts);
+}
+
+static void ghsuart_data_free_buffers(struct ghsuart_data_port *port)
+{
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ if (!port)
+ return;
+
+ spin_lock_irqsave(&port->tx_lock, flags);
+ if (!port->in) {
+ spin_unlock_irqrestore(&port->tx_lock, flags);
+ return;
+ }
+
+ ghsuart_data_free_requests(port->in, &port->tx_idle);
+
+ while ((skb = __skb_dequeue(&port->tx_skb_q)))
+ dev_kfree_skb_any(skb);
+ spin_unlock_irqrestore(&port->tx_lock, flags);
+
+ spin_lock_irqsave(&port->rx_lock, flags);
+ if (!port->out) {
+ spin_unlock_irqrestore(&port->rx_lock, flags);
+ return;
+ }
+
+ ghsuart_data_free_requests(port->out, &port->rx_idle);
+
+ while ((skb = __skb_dequeue(&port->rx_skb_q)))
+ dev_kfree_skb_any(skb);
+ spin_unlock_irqrestore(&port->rx_lock, flags);
+}
+
+static int ghsuart_data_probe(struct platform_device *pdev)
+{
+ struct ghsuart_data_port *port;
+
+ pr_debug("%s: name:%s num_data_ports= %d\n",
+ __func__, pdev->name, num_data_ports);
+
+ if (pdev->id >= num_data_ports) {
+ pr_err("%s: invalid port: %d\n", __func__, pdev->id);
+ return -EINVAL;
+ }
+
+ port = ghsuart_data_ports[pdev->id].port;
+ set_bit(CH_READY, &port->channel_sts);
+
+ /* if usb is online, try opening bridge */
+ if (atomic_read(&port->connected))
+ queue_work(port->wq, &port->connect_w);
+
+ return 0;
+}
+
+/* mdm disconnect */
+static int ghsuart_data_remove(struct platform_device *pdev)
+{
+ struct ghsuart_data_port *port;
+ struct usb_ep *ep_in;
+ struct usb_ep *ep_out;
+ int ret;
+ struct gserial *gser = NULL;
+ unsigned long flags;
+
+ pr_debug("%s: name:%s\n", __func__, pdev->name);
+
+ if (pdev->id >= num_data_ports) {
+ pr_err("%s: invalid port: %d\n", __func__, pdev->id);
+ return -EINVAL;
+ }
+
+ port = ghsuart_data_ports[pdev->id].port;
+
+ ep_in = port->in;
+ if (ep_in)
+ usb_ep_fifo_flush(ep_in);
+
+ ep_out = port->out;
+ if (ep_out)
+ usb_ep_fifo_flush(ep_out);
+
+ ghsuart_data_free_buffers(port);
+
+ if (port->gtype == USB_GADGET_SERIAL) {
+ spin_lock_irqsave(&port->port_lock, flags);
+ gser = port->port_usb;
+ port->cbits_tohost = 0;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ if (gser && gser->disconnect)
+ gser->disconnect(gser);
+ }
+
+ ret = msm_smux_close(port->ch_id);
+ if (ret < 0)
+ pr_err("%s:Unable to close smux channel: %d\n",
+ __func__, port->ch_id);
+
+ clear_bit(CH_READY, &port->channel_sts);
+ clear_bit(CH_OPENED, &port->channel_sts);
+
+ return 0;
+}
+
+static void ghsuart_data_port_free(int portno)
+{
+ struct ghsuart_data_port *port = ghsuart_data_ports[portno].port;
+ struct platform_driver *pdrv = &ghsuart_data_ports[portno].pdrv;
+
+ destroy_workqueue(port->wq);
+ kfree(port);
+
+ if (pdrv)
+ platform_driver_unregister(pdrv);
+}
+
+static void
+ghsuart_send_controlbits_tomodem(void *gptr, u8 portno, int cbits)
+{
+ struct ghsuart_data_port *port;
+
+ if (portno >= num_ctrl_ports || !gptr) {
+ pr_err("%s: Invalid portno#%d\n", __func__, portno);
+ return;
+ }
+
+ port = ghsuart_data_ports[portno].port;
+ if (!port) {
+ pr_err("%s: port is null\n", __func__);
+ return;
+ }
+
+ if (cbits == port->cbits_tomodem)
+ return;
+
+ port->cbits_tomodem = cbits;
+
+ if (!test_bit(CH_OPENED, &port->channel_sts))
+ return;
+
+ /* if DTR is high, update latest modem info to Host */
+ if (port->cbits_tomodem & ACM_CTRL_DTR) {
+ unsigned int i;
+
+ i = msm_smux_tiocm_get(port->ch_id);
+ ghsuart_dunctrl_status(port, i);
+ }
+
+ pr_debug("%s: ctrl_tomodem:%d\n", __func__, cbits);
+ /* Send the control bits to the Modem */
+ msm_smux_tiocm_set(port->ch_id, cbits, ~cbits);
+}
+
+static int ghsuart_data_port_alloc(unsigned port_num, enum gadget_type gtype)
+{
+ struct ghsuart_data_port *port;
+ struct platform_driver *pdrv;
+
+ port = kzalloc(sizeof(struct ghsuart_data_port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ port->wq = create_singlethread_workqueue(ghsuart_data_names[port_num]);
+ if (!port->wq) {
+ pr_err("%s: Unable to create workqueue:%s\n",
+ __func__, ghsuart_data_names[port_num]);
+ kfree(port);
+ return -ENOMEM;
+ }
+ port->port_num = port_num;
+
+ /* port initialization */
+ spin_lock_init(&port->port_lock);
+ spin_lock_init(&port->rx_lock);
+ spin_lock_init(&port->tx_lock);
+
+ INIT_WORK(&port->connect_w, ghsuart_data_connect_w);
+ INIT_WORK(&port->disconnect_w, ghsuart_data_disconnect_w);
+ INIT_WORK(&port->write_tohost_w, ghsuart_data_write_tohost);
+ INIT_WORK(&port->write_tomdm_w, ghsuart_data_write_tomdm);
+
+ INIT_LIST_HEAD(&port->tx_idle);
+ INIT_LIST_HEAD(&port->rx_idle);
+
+ skb_queue_head_init(&port->tx_skb_q);
+ skb_queue_head_init(&port->rx_skb_q);
+
+ port->gtype = gtype;
+ if (port->gtype == USB_GADGET_SERIAL)
+ port->ch_id = SMUX_USB_DUN_0;
+ else
+ port->ch_id = SMUX_USB_RMNET_DATA_0;
+ port->ctx = port;
+ ghsuart_data_ports[port_num].port = port;
+
+ pdrv = &ghsuart_data_ports[port_num].pdrv;
+ pdrv->probe = ghsuart_data_probe;
+ pdrv->remove = ghsuart_data_remove;
+ pdrv->driver.name = ghsuart_data_names[port_num];
+ pdrv->driver.owner = THIS_MODULE;
+
+ platform_driver_register(pdrv);
+
+ pr_debug("%s: port:%p portno:%d\n", __func__, port, port_num);
+
+ return 0;
+}
+
+void ghsuart_data_disconnect(void *gptr, int port_num)
+{
+ struct ghsuart_data_port *port;
+ unsigned long flags;
+ struct gserial *gser = NULL;
+
+ pr_debug("%s: port#%d\n", __func__, port_num);
+
+ port = ghsuart_data_ports[port_num].port;
+
+ if (port_num > num_data_ports) {
+ pr_err("%s: invalid portno#%d\n", __func__, port_num);
+ return;
+ }
+
+ if (!gptr || !port) {
+ pr_err("%s: port is null\n", __func__);
+ return;
+ }
+
+ ghsuart_data_free_buffers(port);
+
+ /* disable endpoints */
+ if (port->in)
+ usb_ep_disable(port->in);
+
+ if (port->out)
+ usb_ep_disable(port->out);
+
+ atomic_set(&port->connected, 0);
+
+ if (port->gtype == USB_GADGET_SERIAL) {
+ gser = gptr;
+ spin_lock_irqsave(&port->port_lock, flags);
+ gser->notify_modem = 0;
+ port->cbits_tomodem = 0;
+ port->port_usb = 0;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ }
+
+ spin_lock_irqsave(&port->tx_lock, flags);
+ port->in = NULL;
+ port->n_tx_req_queued = 0;
+ clear_bit(RX_THROTTLED, &port->flags);
+ spin_unlock_irqrestore(&port->tx_lock, flags);
+
+ spin_lock_irqsave(&port->rx_lock, flags);
+ port->out = NULL;
+ clear_bit(TX_THROTTLED, &port->flags);
+ spin_unlock_irqrestore(&port->rx_lock, flags);
+
+ queue_work(port->wq, &port->disconnect_w);
+}
+
+int ghsuart_data_connect(void *gptr, int port_num)
+{
+ struct ghsuart_data_port *port;
+ struct gserial *gser;
+ struct grmnet *gr;
+ unsigned long flags;
+ int ret = 0;
+
+ pr_debug("%s: port#%d\n", __func__, port_num);
+
+ port = ghsuart_data_ports[port_num].port;
+
+ if (port_num > num_data_ports) {
+ pr_err("%s: invalid portno#%d\n", __func__, port_num);
+ return -ENODEV;
+ }
+
+ if (!gptr || !port) {
+ pr_err("%s: port is null\n", __func__);
+ return -ENODEV;
+ }
+
+ if (port->gtype == USB_GADGET_SERIAL) {
+ gser = gptr;
+
+ spin_lock_irqsave(&port->tx_lock, flags);
+ port->in = gser->in;
+ spin_unlock_irqrestore(&port->tx_lock, flags);
+
+ spin_lock_irqsave(&port->rx_lock, flags);
+ port->out = gser->out;
+ spin_unlock_irqrestore(&port->rx_lock, flags);
+
+
+ port->tx_q_size = ghsuart_data_serial_tx_q_size;
+ port->rx_q_size = ghsuart_data_serial_rx_q_size;
+ gser->in->driver_data = port;
+ gser->out->driver_data = port;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ gser->notify_modem = ghsuart_send_controlbits_tomodem;
+ port->port_usb = gptr;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ } else {
+ gr = gptr;
+
+ spin_lock_irqsave(&port->tx_lock, flags);
+ port->in = gr->in;
+ spin_unlock_irqrestore(&port->tx_lock, flags);
+
+ spin_lock_irqsave(&port->rx_lock, flags);
+ port->out = gr->out;
+ spin_unlock_irqrestore(&port->rx_lock, flags);
+
+ port->tx_q_size = ghsuart_data_rmnet_tx_q_size;
+ port->rx_q_size = ghsuart_data_rmnet_rx_q_size;
+ gr->in->driver_data = port;
+ gr->out->driver_data = port;
+ }
+
+ ret = usb_ep_enable(port->in);
+ if (ret) {
+ pr_err("%s: usb_ep_enable failed eptype:IN ep:%p",
+ __func__, port->in);
+ goto fail;
+ }
+
+ ret = usb_ep_enable(port->out);
+ if (ret) {
+ pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p",
+ __func__, port->out);
+ usb_ep_disable(port->in);
+ goto fail;
+ }
+
+ atomic_set(&port->connected, 1);
+
+ spin_lock_irqsave(&port->tx_lock, flags);
+ port->to_host = 0;
+ spin_unlock_irqrestore(&port->tx_lock, flags);
+
+ spin_lock_irqsave(&port->rx_lock, flags);
+ port->to_modem = 0;
+ port->tomodem_drp_cnt = 0;
+ spin_unlock_irqrestore(&port->rx_lock, flags);
+
+ queue_work(port->wq, &port->connect_w);
+fail:
+ return ret;
+}
+
+#define DEBUG_BUF_SIZE 1024
+static ssize_t ghsuart_data_read_stats(struct file *file,
+ char __user *ubuf, size_t count, loff_t *ppos)
+{
+ struct ghsuart_data_port *port;
+ struct platform_driver *pdrv;
+ char *buf;
+ unsigned long flags;
+ int ret;
+ int i;
+ int temp = 0;
+
+ buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (i = 0; i < num_data_ports; i++) {
+ port = ghsuart_data_ports[i].port;
+ if (!port)
+ continue;
+ pdrv = &ghsuart_data_ports[i].pdrv;
+
+ spin_lock_irqsave(&port->rx_lock, flags);
+ temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
+ "\nName: %s\n"
+ "#PORT:%d port#: %p\n"
+ "data_ch_open: %d\n"
+ "data_ch_ready: %d\n"
+ "\n******UL INFO*****\n\n"
+ "dpkts_to_modem: %lu\n"
+ "tomodem_drp_cnt: %u\n"
+ "rx_buf_len: %u\n"
+ "TX_THROTTLED %d\n",
+ pdrv->driver.name,
+ i, port,
+ test_bit(CH_OPENED, &port->channel_sts),
+ test_bit(CH_READY, &port->channel_sts),
+ port->to_modem,
+ port->tomodem_drp_cnt,
+ port->rx_skb_q.qlen,
+ test_bit(TX_THROTTLED, &port->flags));
+ spin_unlock_irqrestore(&port->rx_lock, flags);
+
+ spin_lock_irqsave(&port->tx_lock, flags);
+ temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
+ "\n******DL INFO******\n\n"
+ "dpkts_to_usbhost: %lu\n"
+ "tx_buf_len: %u\n"
+ "RX_THROTTLED %d\n",
+ port->to_host,
+ port->tx_skb_q.qlen,
+ test_bit(RX_THROTTLED, &port->flags));
+ spin_unlock_irqrestore(&port->tx_lock, flags);
+
+ }
+
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
+
+ kfree(buf);
+
+ return ret;
+}
+
+static ssize_t ghsuart_data_reset_stats(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos)
+{
+ struct ghsuart_data_port *port;
+ int i;
+ unsigned long flags;
+
+ for (i = 0; i < num_data_ports; i++) {
+ port = ghsuart_data_ports[i].port;
+ if (!port)
+ continue;
+
+ spin_lock_irqsave(&port->rx_lock, flags);
+ port->to_modem = 0;
+ port->tomodem_drp_cnt = 0;
+ spin_unlock_irqrestore(&port->rx_lock, flags);
+
+ spin_lock_irqsave(&port->tx_lock, flags);
+ port->to_host = 0;
+ spin_unlock_irqrestore(&port->tx_lock, flags);
+ }
+ return count;
+}
+
+const struct file_operations ghsuart_data_stats_ops = {
+ .read = ghsuart_data_read_stats,
+ .write = ghsuart_data_reset_stats,
+};
+
+static struct dentry *ghsuart_data_dent;
+static int ghsuart_data_debugfs_init(void)
+{
+ struct dentry *ghsuart_data_dfile;
+
+ ghsuart_data_dent = debugfs_create_dir("ghsic_data_xport", 0);
+ if (!ghsuart_data_dent || IS_ERR(ghsuart_data_dent))
+ return -ENODEV;
+
+ ghsuart_data_dfile = debugfs_create_file("status", S_IRUGO | S_IWUSR,
+ ghsuart_data_dent, 0, &ghsuart_data_stats_ops);
+ if (!ghsuart_data_dfile || IS_ERR(ghsuart_data_dfile)) {
+ debugfs_remove(ghsuart_data_dent);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void ghsuart_data_debugfs_exit(void)
+{
+ debugfs_remove_recursive(ghsuart_data_dent);
+}
+
+int ghsuart_data_setup(unsigned num_ports, enum gadget_type gtype)
+{
+ int first_port_id = num_data_ports;
+ int total_num_ports = num_ports + num_data_ports;
+ int ret = 0;
+ int i;
+
+ if (!num_ports || total_num_ports > NUM_PORTS) {
+ pr_err("%s: Invalid num of ports count:%d\n",
+ __func__, num_ports);
+ return -EINVAL;
+ }
+ pr_debug("%s: count: %d\n", __func__, num_ports);
+
+ for (i = first_port_id; i < total_num_ports; i++) {
+
+ /*probe can be called while port_alloc,so update no_data_ports*/
+ num_data_ports++;
+ ret = ghsuart_data_port_alloc(i, gtype);
+ if (ret) {
+ num_data_ports--;
+ pr_err("%s: Unable to alloc port:%d\n", __func__, i);
+ goto free_ports;
+ }
+ }
+
+ /*return the starting index*/
+ return first_port_id;
+
+free_ports:
+ for (i = first_port_id; i < num_data_ports; i++)
+ ghsuart_data_port_free(i);
+ num_data_ports = first_port_id;
+
+ return ret;
+}
+
+static int __init ghsuart_data_init(void)
+{
+ int ret;
+
+ ret = ghsuart_data_debugfs_init();
+ if (ret) {
+ pr_debug("mode debugfs file is not available");
+ return ret;
+ }
+
+ return 0;
+}
+module_init(ghsuart_data_init);
+
+static void __exit ghsuart_data_exit(void)
+{
+ ghsuart_data_debugfs_exit();
+}
+module_exit(ghsuart_data_exit);
+
+MODULE_DESCRIPTION("hsuart data xport driver for DUN and RMNET");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/ehci-msm-hsic.c b/drivers/usb/host/ehci-msm-hsic.c
index 5ee1908..82373e2 100644
--- a/drivers/usb/host/ehci-msm-hsic.c
+++ b/drivers/usb/host/ehci-msm-hsic.c
@@ -340,6 +340,15 @@
}
disable_irq(hcd->irq);
+
+ /* make sure we don't race against a remote wakeup */
+ if (test_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags) ||
+ readl_relaxed(USB_PORTSC) & PORT_RESUME) {
+ dev_dbg(mehci->dev, "wakeup pending, aborting suspend\n");
+ enable_irq(hcd->irq);
+ return -EBUSY;
+ }
+
/*
* PHY may take some time or even fail to enter into low power
* mode (LPM). Hence poll for 500 msec and reset the PHY and link
@@ -973,6 +982,7 @@
#ifdef CONFIG_PM_SLEEP
static int msm_hsic_pm_suspend(struct device *dev)
{
+ int ret;
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct msm_hsic_hcd *mehci = hcd_to_hsic(hcd);
@@ -981,7 +991,12 @@
if (device_may_wakeup(dev))
enable_irq_wake(hcd->irq);
- return msm_hsic_suspend(mehci);
+ ret = msm_hsic_suspend(mehci);
+
+ if (ret && device_may_wakeup(dev))
+ disable_irq_wake(hcd->irq);
+
+ return ret;
}
static int msm_hsic_pm_suspend_noirq(struct device *dev)
@@ -1033,14 +1048,7 @@
#ifdef CONFIG_PM_RUNTIME
static int msm_hsic_runtime_idle(struct device *dev)
{
- struct usb_hcd *hcd = dev_get_drvdata(dev);
-
dev_dbg(dev, "EHCI runtime idle\n");
-
- /*don't allow runtime suspend in the middle of remote wakeup*/
- if (readl_relaxed(USB_PORTSC) & PORT_RESUME)
- return -EAGAIN;
-
return 0;
}
diff --git a/drivers/usb/otg/msm72k_otg.c b/drivers/usb/otg/msm72k_otg.c
index 0ee1827..891a4e2 100644
--- a/drivers/usb/otg/msm72k_otg.c
+++ b/drivers/usb/otg/msm72k_otg.c
@@ -889,7 +889,13 @@
if (can_phy_power_collapse(dev) && dev->pdata->ldo_enable)
dev->pdata->ldo_enable(1);
- msm_otg_get_resume(dev);
+ if (pm_runtime_enabled(dev->otg.dev)) {
+ msm_otg_get_resume(dev);
+ } else {
+ pm_runtime_get_noresume(dev->otg.dev);
+ msm_otg_resume(dev);
+ pm_runtime_set_active(dev->otg.dev);
+ }
if (!is_phy_clk_disabled())
goto phy_resumed;
diff --git a/drivers/usb/otg/msm_otg.c b/drivers/usb/otg/msm_otg.c
index 4dd6aff..5b05c5b 100644
--- a/drivers/usb/otg/msm_otg.c
+++ b/drivers/usb/otg/msm_otg.c
@@ -673,8 +673,7 @@
{
struct msm_otg *motg = container_of(otg, struct msm_otg, otg);
- if (aca_enabled() || (test_bit(ID, &motg->inputs) &&
- !test_bit(ID_A, &motg->inputs)))
+ if (aca_enabled())
return 0;
if (suspend) {
@@ -688,6 +687,14 @@
clear_bit(A_BUS_REQ, &motg->inputs);
queue_work(system_nrt_wq, &motg->sm_work);
break;
+ case OTG_STATE_B_PERIPHERAL:
+ pr_debug("peripheral bus suspend\n");
+ if (!(motg->caps & ALLOW_LPM_ON_DEV_SUSPEND))
+ break;
+ set_bit(A_BUS_SUSPEND, &motg->inputs);
+ queue_work(system_nrt_wq, &motg->sm_work);
+ break;
+
default:
break;
}
@@ -697,6 +704,16 @@
/* Remote wakeup or resume */
set_bit(A_BUS_REQ, &motg->inputs);
otg->state = OTG_STATE_A_HOST;
+
+ /* ensure hardware is not in low power mode */
+ pm_runtime_resume(otg->dev);
+ break;
+ case OTG_STATE_B_PERIPHERAL:
+ pr_debug("peripheral bus resume\n");
+ if (!(motg->caps & ALLOW_LPM_ON_DEV_SUSPEND))
+ break;
+ clear_bit(A_BUS_SUSPEND, &motg->inputs);
+ queue_work(system_nrt_wq, &motg->sm_work);
break;
default:
break;
@@ -715,7 +732,7 @@
struct usb_bus *bus = otg->host;
struct msm_otg_platform_data *pdata = motg->pdata;
int cnt = 0;
- bool host_bus_suspend, dcp;
+ bool host_bus_suspend, device_bus_suspend, dcp;
u32 phy_ctrl_val = 0, cmd_val;
unsigned ret;
u32 portsc;
@@ -725,6 +742,9 @@
disable_irq(motg->irq);
host_bus_suspend = otg->host && !test_bit(ID, &motg->inputs);
+ device_bus_suspend = otg->gadget && test_bit(ID, &motg->inputs) &&
+ test_bit(A_BUS_SUSPEND, &motg->inputs) &&
+ motg->caps & ALLOW_LPM_ON_DEV_SUSPEND;
dcp = motg->chg_type == USB_DCP_CHARGER;
/*
* Chipidea 45-nm PHY suspend sequence:
@@ -788,8 +808,8 @@
* PMIC notifications are unavailable.
*/
cmd_val = readl_relaxed(USB_USBCMD);
- if (host_bus_suspend || (motg->pdata->otg_control == OTG_PHY_CONTROL &&
- dcp))
+ if (host_bus_suspend || device_bus_suspend ||
+ (motg->pdata->otg_control == OTG_PHY_CONTROL && dcp))
cmd_val |= ASYNC_INTR_CTRL | ULPI_STP_CTRL;
else
cmd_val |= ULPI_STP_CTRL;
@@ -799,7 +819,8 @@
* BC1.2 spec mandates PD to enable VDP_SRC when charging from DCP.
* PHY retention and collapse can not happen with VDP_SRC enabled.
*/
- if (motg->caps & ALLOW_PHY_RETENTION && !host_bus_suspend && !dcp) {
+ if (motg->caps & ALLOW_PHY_RETENTION && !host_bus_suspend &&
+ !device_bus_suspend && !dcp) {
phy_ctrl_val = readl_relaxed(USB_PHY_CTRL);
if (motg->pdata->otg_control == OTG_PHY_CONTROL)
/* Enable PHY HV interrupts to wake MPM/Link */
@@ -1189,10 +1210,12 @@
/*
* if entering host mode tell the charger to not draw any current
- * from usb - if exiting host mode let the charger draw current
+ * from usb before turning on the boost.
+ * if exiting host mode disable the boost before enabling to draw
+ * current from the source.
*/
- pm8921_disable_source_current(on);
if (on) {
+ pm8921_disable_source_current(on);
ret = regulator_enable(vbus_otg);
if (ret) {
pr_err("unable to enable vbus_otg\n");
@@ -1205,6 +1228,7 @@
pr_err("unable to disable vbus_otg\n");
return;
}
+ pm8921_disable_source_current(on);
vbus_is_on = false;
}
}
@@ -2127,6 +2151,13 @@
*/
otg->host->is_b_host = 1;
msm_otg_start_host(otg, 1);
+ } else if (test_bit(A_BUS_SUSPEND, &motg->inputs) &&
+ test_bit(B_SESS_VLD, &motg->inputs)) {
+ pr_debug("a_bus_suspend && b_sess_vld\n");
+ if (motg->caps & ALLOW_LPM_ON_DEV_SUSPEND) {
+ pm_runtime_put_noidle(otg->dev);
+ pm_runtime_suspend(otg->dev);
+ }
} else if (test_bit(ID_C, &motg->inputs)) {
msm_otg_notify_charger(motg, IDEV_ACA_CHG_MAX);
}
@@ -2511,7 +2542,10 @@
pr_debug("OTG IRQ: in LPM\n");
disable_irq_nosync(irq);
motg->async_int = 1;
- pm_request_resume(otg->dev);
+ if (atomic_read(&motg->pm_suspended))
+ motg->sm_work_pending = true;
+ else
+ pm_request_resume(otg->dev);
return IRQ_HANDLED;
}
@@ -2560,6 +2594,8 @@
} else {
pr_debug("BSV clear\n");
clear_bit(B_SESS_VLD, &motg->inputs);
+ clear_bit(A_BUS_SUSPEND, &motg->inputs);
+
msm_chg_check_aca_intr(motg);
}
work = 1;
@@ -2676,7 +2712,10 @@
return;
}
- queue_work(system_nrt_wq, &motg->sm_work);
+ if (atomic_read(&motg->pm_suspended))
+ motg->sm_work_pending = true;
+ else
+ queue_work(system_nrt_wq, &motg->sm_work);
}
static irqreturn_t msm_pmic_id_irq(int irq, void *data)
@@ -2695,8 +2734,12 @@
set_bit(A_BUS_REQ, &motg->inputs);
}
- if (motg->otg.state != OTG_STATE_UNDEFINED)
- queue_work(system_nrt_wq, &motg->sm_work);
+ if (motg->otg.state != OTG_STATE_UNDEFINED) {
+ if (atomic_read(&motg->pm_suspended))
+ motg->sm_work_pending = true;
+ else
+ queue_work(system_nrt_wq, &motg->sm_work);
+ }
return IRQ_HANDLED;
}
@@ -3373,6 +3416,9 @@
motg->caps = ALLOW_PHY_RETENTION;
}
+ if (motg->pdata->enable_lpm_on_dev_suspend)
+ motg->caps |= ALLOW_LPM_ON_DEV_SUSPEND;
+
wake_lock(&motg->wlock);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
@@ -3533,28 +3579,42 @@
#ifdef CONFIG_PM_SLEEP
static int msm_otg_pm_suspend(struct device *dev)
{
+ int ret = 0;
struct msm_otg *motg = dev_get_drvdata(dev);
dev_dbg(dev, "OTG PM suspend\n");
- return msm_otg_suspend(motg);
+
+ atomic_set(&motg->pm_suspended, 1);
+ ret = msm_otg_suspend(motg);
+ if (ret)
+ atomic_set(&motg->pm_suspended, 0);
+
+ return ret;
}
static int msm_otg_pm_resume(struct device *dev)
{
+ int ret = 0;
struct msm_otg *motg = dev_get_drvdata(dev);
- int ret;
dev_dbg(dev, "OTG PM resume\n");
- ret = msm_otg_resume(motg);
- if (ret)
- return ret;
- /* Update runtime PM status */
- pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
+ atomic_set(&motg->pm_suspended, 0);
+ if (motg->sm_work_pending) {
+ motg->sm_work_pending = false;
- return 0;
+ pm_runtime_get_noresume(dev);
+ ret = msm_otg_resume(motg);
+
+ /* Update runtime PM status */
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ queue_work(system_nrt_wq, &motg->sm_work);
+ }
+
+ return ret;
}
#endif
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index 2a08101..94de730 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -734,6 +734,10 @@
}
}
+ spin_lock_irq(&intfdata->susp_lock);
+ intfdata->suspended = 0;
+ spin_unlock_irq(&intfdata->susp_lock);
+
for (i = 0; i < serial->num_ports; i++) {
/* walk all ports */
port = serial->port[i];
@@ -759,9 +763,6 @@
play_delayed(port);
spin_unlock_irq(&intfdata->susp_lock);
}
- spin_lock_irq(&intfdata->susp_lock);
- intfdata->suspended = 0;
- spin_unlock_irq(&intfdata->susp_lock);
err_out:
return err;
}
diff --git a/drivers/video/msm/external_common.c b/drivers/video/msm/external_common.c
index 7f603dd..0212142 100644
--- a/drivers/video/msm/external_common.c
+++ b/drivers/video/msm/external_common.c
@@ -26,6 +26,8 @@
#include "external_common.h"
#include "mhl_api.h"
+#include "mdp.h"
+
struct external_common_state_type *external_common_state;
EXPORT_SYMBOL(external_common_state);
DEFINE_MUTEX(external_common_state_hpd_mutex);
@@ -77,6 +79,23 @@
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDF};
#endif /* DEBUG_EDID */
+#define DMA_E_BASE 0xB0000
+void mdp_vid_quant_set(void)
+{
+ if ((external_common_state->video_resolution == \
+ HDMI_VFRMT_720x480p60_4_3) || \
+ (external_common_state->video_resolution == \
+ HDMI_VFRMT_720x480p60_16_9)) {
+ MDP_OUTP(MDP_BASE + DMA_E_BASE + 0x70, 0x00EB0010);
+ MDP_OUTP(MDP_BASE + DMA_E_BASE + 0x74, 0x00EB0010);
+ MDP_OUTP(MDP_BASE + DMA_E_BASE + 0x78, 0x00EB0010);
+ } else {
+ MDP_OUTP(MDP_BASE + DMA_E_BASE + 0x70, 0x00FF0000);
+ MDP_OUTP(MDP_BASE + DMA_E_BASE + 0x74, 0x00FF0000);
+ MDP_OUTP(MDP_BASE + DMA_E_BASE + 0x78, 0x00FF0000);
+ }
+}
+
const char *video_format_2string(uint32 format)
{
switch (format) {
diff --git a/drivers/video/msm/hdmi_msm.c b/drivers/video/msm/hdmi_msm.c
index 7f6585c..a372016 100644
--- a/drivers/video/msm/hdmi_msm.c
+++ b/drivers/video/msm/hdmi_msm.c
@@ -3641,8 +3641,8 @@
0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10}, /*00*/
{0x18, 0x18, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28,
0x28, 0x28, 0x28, 0x28, 0x18, 0x28, 0x18}, /*01*/
- {0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
- 0x04, 0x04, 0x04, 0x04, 0x88, 0x04, 0x04}, /*02*/
+ {0x00, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x88, 0x00, 0x04}, /*02*/
{0x02, 0x06, 0x11, 0x15, 0x04, 0x13, 0x10, 0x05, 0x1F,
0x14, 0x20, 0x22, 0x21, 0x01, 0x03, 0x11}, /*03*/
{0x00, 0x01, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
diff --git a/drivers/video/msm/logo.c b/drivers/video/msm/logo.c
index c061e86..57d754e 100644
--- a/drivers/video/msm/logo.c
+++ b/drivers/video/msm/logo.c
@@ -37,7 +37,7 @@
}
/* 565RLE image format: [count(2 bytes), rle(2 bytes)] */
-int load_565rle_image(char *filename)
+int load_565rle_image(char *filename, bool bf_supported)
{
struct fb_info *info;
int fd, count, err = 0;
@@ -76,6 +76,12 @@
max = fb_width(info) * fb_height(info);
ptr = data;
+ if (bf_supported && (info->node == 1 || info->node == 2)) {
+ err = -EPERM;
+ pr_err("%s:%d no info->creen_base on fb%d!\n",
+ __func__, __LINE__, info->node);
+ goto err_logo_free_data;
+ }
bits = (unsigned short *)(info->screen_base);
while (count > 3) {
unsigned n = ptr[0];
diff --git a/drivers/video/msm/mdp.c b/drivers/video/msm/mdp.c
index 471ed4e..2a6a900 100644
--- a/drivers/video/msm/mdp.c
+++ b/drivers/video/msm/mdp.c
@@ -93,7 +93,7 @@
static struct delayed_work mdp_pipe_ctrl_worker;
static boolean mdp_suspended = FALSE;
-DEFINE_MUTEX(mdp_suspend_mutex);
+static DEFINE_MUTEX(mdp_suspend_mutex);
#ifdef CONFIG_FB_MSM_MDP40
struct mdp_dma_data dma2_data;
@@ -659,6 +659,7 @@
goto error_extra;
INIT_WORK(&mgmt->mdp_histogram_worker, mdp_hist_read_work);
+ mgmt->hist = NULL;
mdp_hist_mgmt_array[index] = mgmt;
return 0;
@@ -685,7 +686,8 @@
{
struct mdp_hist_mgmt *temp;
int i, ret;
- mdp_hist_wq = alloc_workqueue("mdp_hist_wq", WQ_UNBOUND, 0);
+ mdp_hist_wq = alloc_workqueue("mdp_hist_wq",
+ WQ_NON_REENTRANT | WQ_UNBOUND, 0);
for (i = 0; i < MDP_HIST_MGMT_MAX; i++)
mdp_hist_mgmt_array[i] = NULL;
@@ -897,6 +899,7 @@
mgmt->frame_cnt = req->frame_cnt;
mgmt->bit_mask = req->bit_mask;
mgmt->num_bins = req->num_bins;
+ mgmt->hist = NULL;
ret = mdp_histogram_enable(mgmt);
@@ -1080,8 +1083,11 @@
goto error;
}
- /* if read was triggered by an underrun, don't wake up readers*/
- if (mgmt->mdp_is_hist_valid && mgmt->mdp_is_hist_init) {
+ /*
+ * if read was triggered by an underrun or failed copying,
+ * don't wake up readers
+ */
+ if (!ret && mgmt->mdp_is_hist_valid && mgmt->mdp_is_hist_init) {
mgmt->hist = NULL;
complete(&mgmt->mdp_hist_comp);
}
@@ -1177,6 +1183,11 @@
goto error_lock;
}
+ if (mgmt->hist != NULL) {
+ pr_err("%s; histogram attempted to be read twice\n", __func__);
+ ret = -EPERM;
+ goto error_lock;
+ }
mgmt->hist = hist;
mutex_unlock(&mgmt->mdp_hist_mutex);
@@ -1740,7 +1751,6 @@
spin_lock_init(&mdp_spin_lock);
mdp_dma_wq = create_singlethread_workqueue("mdp_dma_wq");
mdp_vsync_wq = create_singlethread_workqueue("mdp_vsync_wq");
- mdp_hist_wq = create_singlethread_workqueue("mdp_hist_wq");
mdp_pipe_ctrl_wq = create_singlethread_workqueue("mdp_pipe_ctrl_wq");
INIT_DELAYED_WORK(&mdp_pipe_ctrl_worker,
mdp_pipe_ctrl_workqueue_handler);
@@ -2564,6 +2574,17 @@
return rc;
}
+unsigned int mdp_check_suspended(void)
+{
+ unsigned int ret;
+
+ mutex_lock(&mdp_suspend_mutex);
+ ret = mdp_suspended;
+ mutex_unlock(&mdp_suspend_mutex);
+
+ return ret;
+}
+
void mdp_footswitch_ctrl(boolean on)
{
mutex_lock(&mdp_suspend_mutex);
@@ -2630,6 +2651,7 @@
#ifdef CONFIG_FB_MSM_DTV
mdp4_dtv_set_black_screen();
#endif
+ mdp4_iommu_detach();
mdp_footswitch_ctrl(FALSE);
}
diff --git a/drivers/video/msm/mdp.h b/drivers/video/msm/mdp.h
index 6224dba..b104b33 100644
--- a/drivers/video/msm/mdp.h
+++ b/drivers/video/msm/mdp.h
@@ -801,6 +801,7 @@
void mdp_histogram_handle_isr(struct mdp_hist_mgmt *mgmt);
void __mdp_histogram_kickoff(struct mdp_hist_mgmt *mgmt);
void __mdp_histogram_reset(struct mdp_hist_mgmt *mgmt);
+unsigned int mdp_check_suspended(void);
void mdp_footswitch_ctrl(boolean on);
#ifdef CONFIG_FB_MSM_MDP303
@@ -828,6 +829,10 @@
{
/* empty */
}
+static inline void mdp4_iommu_detach(void)
+{
+ /* empty */
+}
#endif
int mdp_ppp_v4l2_overlay_set(struct fb_info *info, struct mdp_overlay *req);
@@ -836,4 +841,5 @@
unsigned long srcp0_addr, unsigned long srcp0_size,
unsigned long srcp1_addr, unsigned long srcp1_size);
+void mdp_vid_quant_set(void);
#endif /* MDP_H */
diff --git a/drivers/video/msm/mdp4.h b/drivers/video/msm/mdp4.h
index 860209f..e6dc795 100644
--- a/drivers/video/msm/mdp4.h
+++ b/drivers/video/msm/mdp4.h
@@ -516,6 +516,7 @@
struct mdp4_overlay_pipe *mdp4_overlay_stage_pipe(int mixer, int stage);
void mdp4_mixer_stage_up(struct mdp4_overlay_pipe *pipe);
void mdp4_mixer_stage_down(struct mdp4_overlay_pipe *pipe);
+void mdp4_mixer_pipe_cleanup(int mixer);
int mdp4_mixer_stage_can_run(struct mdp4_overlay_pipe *pipe);
void mdp4_overlayproc_cfg(struct mdp4_overlay_pipe *pipe);
void mdp4_mddi_overlay(struct msm_fb_data_type *mfd);
@@ -766,6 +767,7 @@
int mdp4_igc_lut_config(struct mdp_igc_lut_data *cfg);
void mdp4_iommu_unmap(struct mdp4_overlay_pipe *pipe);
void mdp4_iommu_attach(void);
+void mdp4_iommu_detach(void);
int mdp4_v4l2_overlay_set(struct fb_info *info, struct mdp_overlay *req,
struct mdp4_overlay_pipe **ppipe);
void mdp4_v4l2_overlay_clear(struct mdp4_overlay_pipe *pipe);
diff --git a/drivers/video/msm/mdp4_overlay.c b/drivers/video/msm/mdp4_overlay.c
index 34fd399..88424de 100644
--- a/drivers/video/msm/mdp4_overlay.c
+++ b/drivers/video/msm/mdp4_overlay.c
@@ -287,9 +287,7 @@
MDP_OUTP(MDP_BASE + 0xb3014, 0x1000080);
MDP_OUTP(MDP_BASE + 0xb4004, 0x67686970);
} else {
- MDP_OUTP(MDP_BASE + 0xb0070, 0xff0000);
- MDP_OUTP(MDP_BASE + 0xb0074, 0xff0000);
- MDP_OUTP(MDP_BASE + 0xb0078, 0xff0000);
+ mdp_vid_quant_set();
}
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
@@ -670,12 +668,14 @@
case MDP_BGR_565:
case MDP_XRGB_8888:
case MDP_RGB_888:
+ case MDP_YCBCR_H1V1:
+ case MDP_YCRCB_H1V1:
*luma_off = pipe->src_x * pipe->bpp;
break;
default:
- pr_err("Source format %u not supported for x offset adjustment\n",
- pipe->src_format);
+ pr_err("%s: fmt %u not supported for adjustment\n",
+ __func__, pipe->src_format);
break;
}
}
@@ -1497,6 +1497,21 @@
}
}
+void mdp4_mixer_pipe_cleanup(int mixer)
+{
+ struct mdp4_overlay_pipe *pipe;
+ int j;
+
+ for (j = MDP4_MIXER_STAGE_MAX - 1; j > MDP4_MIXER_STAGE_BASE; j--) {
+ pipe = ctrl->stage[mixer][j];
+ if (pipe == NULL)
+ continue;
+ pr_debug("%s(): pipe %u\n", __func__, pipe->pipe_ndx);
+ mdp4_mixer_stage_down(pipe);
+ mdp4_overlay_pipe_free(pipe);
+ }
+}
+
static void mdp4_mixer_stage_commit(int mixer)
{
struct mdp4_overlay_pipe *pipe;
@@ -1552,15 +1567,14 @@
mixer, data, flush_bits);
outpdw(MDP_BASE + off, data); /* LAYERMIXER_IN_CFG */
- if (pull_mode)
+ if (pull_mode) {
outpdw(MDP_BASE + 0x18000, flush_bits);
+ /* wait for vsync on both pull mode interfaces */
+ msleep(20);
+ }
}
if (ctrl->mixer_cfg[MDP4_MIXER2] != cfg[MDP4_MIXER2]) {
- /* wait for vsync on both pull mode interfaces */
- if (pull_mode)
- msleep(20);
-
off = 0x100F0;
ctrl->mixer_cfg[MDP4_MIXER2] = cfg[MDP4_MIXER2];
data = cfg[MDP4_MIXER2];
@@ -1700,7 +1714,8 @@
/*
* If solid fill is enabled, flip and scale
* have to be disabled. otherwise, h/w
- * underruns.
+ * underruns. Also flush the pipe inorder
+ * to take solid fill into effect.
*/
op_mode = inpdw(rgb_base + 0x0058);
op_mode &= ~(MDP4_OP_FLIP_LR + MDP4_OP_SCALEX_EN);
@@ -1708,6 +1723,7 @@
outpdw(rgb_base + 0x0058, op_mode);
outpdw(rgb_base + 0x50, rgb_src_format);
outpdw(rgb_base + 0x1008, constant_color);
+ mdp4_overlay_reg_flush(bg_pipe, 0);
}
} else if (fg_alpha) {
blend_op = (MDP4_BLEND_BG_ALPHA_FG_PIXEL |
@@ -2354,14 +2370,16 @@
return;
if (mfd->use_ov0_blt) {
- if (mfd->panel_info.type == LCDC_PANEL)
+ if (mfd->panel_info.type == LCDC_PANEL ||
+ mfd->panel_info.type == LVDS_PANEL)
mdp4_lcdc_overlay_blt_start(mfd);
else if (mfd->panel_info.type == MIPI_VIDEO_PANEL)
mdp4_dsi_video_blt_start(mfd);
else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
mdp4_dsi_overlay_blt_start(mfd);
} else {
- if (mfd->panel_info.type == LCDC_PANEL)
+ if (mfd->panel_info.type == LCDC_PANEL ||
+ mfd->panel_info.type == LVDS_PANEL)
mdp4_lcdc_overlay_blt_stop(mfd);
else if (mfd->panel_info.type == MIPI_VIDEO_PANEL)
mdp4_dsi_video_blt_stop(mfd);
@@ -2981,33 +2999,34 @@
} msm_iommu_ctx_names[] = {
/* Display */
{
- .name = "mdp_vg1",
+ .name = "mdp_port0_cb0",
.domain = DISPLAY_DOMAIN,
},
/* Display */
{
- .name = "mdp_vg2",
+ .name = "mdp_port0_cb1",
.domain = DISPLAY_DOMAIN,
},
/* Display */
{
- .name = "mdp_rgb1",
+ .name = "mdp_port1_cb0",
.domain = DISPLAY_DOMAIN,
},
/* Display */
{
- .name = "mdp_rgb2",
+ .name = "mdp_port1_cb1",
.domain = DISPLAY_DOMAIN,
},
};
+static int iommu_enabled;
+
void mdp4_iommu_attach(void)
{
- static int done;
struct iommu_domain *domain;
int i;
- if (!done) {
+ if (!iommu_enabled) {
for (i = 0; i < ARRAY_SIZE(msm_iommu_ctx_names); i++) {
int domain_idx;
struct device *ctx = msm_iommu_get_ctx(
@@ -3030,7 +3049,38 @@
continue;
}
}
- done = 1;
+ pr_debug("Attached MDP IOMMU device\n");
+ iommu_enabled = 1;
+ }
+}
+
+void mdp4_iommu_detach(void)
+{
+ struct iommu_domain *domain;
+ int i;
+
+ if (!mdp_check_suspended() || mdp4_extn_disp)
+ return;
+
+ if (iommu_enabled) {
+ for (i = 0; i < ARRAY_SIZE(msm_iommu_ctx_names); i++) {
+ int domain_idx;
+ struct device *ctx = msm_iommu_get_ctx(
+ msm_iommu_ctx_names[i].name);
+
+ if (!ctx)
+ continue;
+
+ domain_idx = msm_iommu_ctx_names[i].domain;
+
+ domain = msm_get_iommu_domain(domain_idx);
+ if (!domain)
+ continue;
+
+ iommu_detach_device(domain, ctx);
+ }
+ pr_debug("Detached MDP IOMMU device\n");
+ iommu_enabled = 0;
}
}
@@ -3119,10 +3169,12 @@
else
mdp4_overlay_rgb_setup(pipe);
+ if (ctrl->panel_mode & MDP4_PANEL_LCDC)
+ mdp4_overlay_reg_flush(pipe, 1);
+
mdp4_mixer_stage_up(pipe);
if (ctrl->panel_mode & MDP4_PANEL_LCDC) {
- mdp4_overlay_reg_flush(pipe, 1);
mdp4_overlay_lcdc_vsync_push(mfd, pipe);
} else {
#ifdef CONFIG_FB_MSM_MIPI_DSI
diff --git a/drivers/video/msm/mdp4_overlay_atv.c b/drivers/video/msm/mdp4_overlay_atv.c
index dd827aa..753ff23 100644
--- a/drivers/video/msm/mdp4_overlay_atv.c
+++ b/drivers/video/msm/mdp4_overlay_atv.c
@@ -113,11 +113,10 @@
mdp4_overlay_rgb_setup(pipe);
- mdp4_mixer_stage_up(pipe);
-
mdp4_overlayproc_cfg(pipe);
mdp4_overlay_reg_flush(pipe, 1);
+ mdp4_mixer_stage_up(pipe);
if (ret == 0)
mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
@@ -185,8 +184,8 @@
pipe->srcp0_addr = (uint32)(buf + buf_offset);
}
mdp4_overlay_rgb_setup(pipe);
- mdp4_mixer_stage_up(pipe);
mdp4_overlay_reg_flush(pipe, 0);
+ mdp4_mixer_stage_up(pipe);
printk(KERN_INFO "mdp4_atv_overlay: pipe=%x ndx=%d\n",
(int)pipe, pipe->pipe_ndx);
diff --git a/drivers/video/msm/mdp4_overlay_dsi_video.c b/drivers/video/msm/mdp4_overlay_dsi_video.c
index 8ab12590..8b2edd9 100644
--- a/drivers/video/msm/mdp4_overlay_dsi_video.c
+++ b/drivers/video/msm/mdp4_overlay_dsi_video.c
@@ -197,8 +197,6 @@
mdp4_overlay_rgb_setup(pipe);
- mdp4_mixer_stage_up(pipe);
-
mdp4_overlayproc_cfg(pipe);
/*
@@ -275,6 +273,7 @@
MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x30, dsi_hsync_skew);
MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x38, ctrl_polarity);
mdp4_overlay_reg_flush(pipe, 1);
+ mdp4_mixer_stage_up(pipe);
mdp_histogram_ctrl_all(TRUE);
@@ -297,6 +296,7 @@
/* MDP cmd block enable */
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+ mdp4_mixer_pipe_cleanup(dsi_pipe->mixer_num);
MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 0);
dsi_video_enabled = 0;
/* MDP cmd block disable */
@@ -305,6 +305,9 @@
mdp_histogram_ctrl_all(FALSE);
ret = panel_next_off(pdev);
+ /* delay to make sure the last frame finishes */
+ msleep(20);
+
/* dis-engage rgb0 from mixer0 */
if (dsi_pipe) {
mdp4_mixer_stage_down(dsi_pipe);
@@ -386,6 +389,8 @@
mdp4_overlay_dmap_cfg(mfd, 1);
+ mdp4_overlay_reg_flush(pipe, 1);
+
mdp4_mixer_stage_up(pipe);
mb();
@@ -705,8 +710,8 @@
}
mdp4_overlay_rgb_setup(pipe);
- mdp4_mixer_stage_up(pipe);
mdp4_overlay_reg_flush(pipe, 0);
+ mdp4_mixer_stage_up(pipe);
mdp4_overlay_dsi_video_start();
mdp4_overlay_dsi_video_vsync_push(mfd, pipe);
mdp4_iommu_unmap(pipe);
diff --git a/drivers/video/msm/mdp4_overlay_dtv.c b/drivers/video/msm/mdp4_overlay_dtv.c
index e3917e6..dd96439 100644
--- a/drivers/video/msm/mdp4_overlay_dtv.c
+++ b/drivers/video/msm/mdp4_overlay_dtv.c
@@ -208,6 +208,8 @@
/* MDP cmd block enable */
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+ mdp4_mixer_pipe_cleanup(dtv_pipe->mixer_num);
+ msleep(20);
MDP_OUTP(MDP_BASE + DTV_BASE, 0);
dtv_enabled = 0;
/* MDP cmd block disable */
@@ -260,6 +262,10 @@
if (dtv_pipe != NULL) {
mdp4_dtv_stop(mfd);
+
+ /* delay to make sure the last frame finishes */
+ msleep(20);
+
mdp4_mixer_stage_down(dtv_pipe);
mdp4_overlay_pipe_free(dtv_pipe);
mdp4_iommu_unmap(dtv_pipe);
@@ -268,6 +274,7 @@
mdp4_overlay_panel_mode_unset(MDP4_MIXER1, MDP4_PANEL_DTV);
ret = panel_next_off(pdev);
+ mdp4_iommu_detach();
mdp_footswitch_ctrl(FALSE);
dev_info(&pdev->dev, "mdp4_overlay_dtv: off");
@@ -343,8 +350,8 @@
mdp4_overlay_rgb_setup(pipe);
}
- mdp4_mixer_stage_up(pipe);
mdp4_overlay_reg_flush(pipe, 1);
+ mdp4_mixer_stage_up(pipe);
dtv_pipe = pipe; /* keep it */
}
@@ -693,8 +700,8 @@
pipe->srcp0_addr = (uint32) mfd->ibuf.buf;
mdp4_overlay_rgb_setup(pipe);
}
- mdp4_mixer_stage_up(pipe);
mdp4_overlay_reg_flush(pipe, 0);
+ mdp4_mixer_stage_up(pipe);
mdp4_overlay_dtv_start();
mdp4_overlay_dtv_ov_done_push(mfd, pipe);
mdp4_iommu_unmap(pipe);
diff --git a/drivers/video/msm/mdp4_overlay_lcdc.c b/drivers/video/msm/mdp4_overlay_lcdc.c
index 3f90380..1d3f992 100644
--- a/drivers/video/msm/mdp4_overlay_lcdc.c
+++ b/drivers/video/msm/mdp4_overlay_lcdc.c
@@ -159,8 +159,6 @@
mdp4_overlay_rgb_setup(pipe);
- mdp4_mixer_stage_up(pipe);
-
mdp4_overlayproc_cfg(pipe);
/*
@@ -243,6 +241,7 @@
MDP_OUTP(MDP_BASE + LCDC_BASE + 0x24, active_v_end);
mdp4_overlay_reg_flush(pipe, 1);
+ mdp4_mixer_stage_up(pipe);
#ifdef CONFIG_MSM_BUS_SCALING
mdp_bus_scale_update_request(2);
@@ -267,6 +266,7 @@
/* MDP cmd block enable */
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+ mdp4_mixer_pipe_cleanup(lcdc_pipe->mixer_num);
MDP_OUTP(MDP_BASE + LCDC_BASE, 0);
lcdc_enabled = 0;
/* MDP cmd block disable */
@@ -279,7 +279,7 @@
mutex_unlock(&mfd->dma->ov_mutex);
/* delay to make sure the last frame finishes */
- msleep(16);
+ msleep(20);
/* dis-engage rgb0 from mixer0 */
if (lcdc_pipe) {
@@ -597,8 +597,8 @@
pipe->srcp0_addr = (uint32)(buf + buf_offset);
}
mdp4_overlay_rgb_setup(pipe);
- mdp4_mixer_stage_up(pipe);
mdp4_overlay_reg_flush(pipe, 0);
+ mdp4_mixer_stage_up(pipe);
mdp4_overlay_lcdc_start();
mdp4_overlay_lcdc_vsync_push(mfd, pipe);
mdp4_iommu_unmap(pipe);
diff --git a/drivers/video/msm/mdp4_overlay_writeback.c b/drivers/video/msm/mdp4_overlay_writeback.c
index 2fba83d..342f565 100644
--- a/drivers/video/msm/mdp4_overlay_writeback.c
+++ b/drivers/video/msm/mdp4_overlay_writeback.c
@@ -273,6 +273,7 @@
if (node) {
list_del(&(node->active_entry));
node->state = IN_BUSY_QUEUE;
+ mfd->writeback_active_cnt++;
}
mutex_unlock(&mfd->writeback_mutex);
@@ -295,6 +296,7 @@
mutex_lock(&mfd->writeback_mutex);
list_add_tail(&node->active_entry, &mfd->writeback_busy_queue);
mutex_unlock(&mfd->writeback_mutex);
+ mfd->writeback_active_cnt--;
mutex_unlock(&mfd->unregister_mutex);
wake_up(&mfd->wait_q);
}
@@ -323,6 +325,7 @@
if (node) {
list_del(&(node->active_entry));
node->state = IN_BUSY_QUEUE;
+ mfd->writeback_active_cnt++;
}
mutex_unlock(&mfd->writeback_mutex);
@@ -367,6 +370,7 @@
mutex_lock(&mfd->writeback_mutex);
list_add_tail(&node->active_entry, &mfd->writeback_busy_queue);
+ mfd->writeback_active_cnt--;
mutex_unlock(&mfd->writeback_mutex);
wake_up(&mfd->wait_q);
fail_no_blt_addr:
@@ -523,13 +527,26 @@
return rc;
}
+static bool is_writeback_inactive(struct msm_fb_data_type *mfd)
+{
+ bool active;
+ mutex_lock(&mfd->writeback_mutex);
+ active = !mfd->writeback_active_cnt;
+ mutex_unlock(&mfd->writeback_mutex);
+ return active;
+}
int mdp4_writeback_stop(struct fb_info *info)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
mutex_lock(&mfd->writeback_mutex);
mfd->writeback_state = WB_STOPING;
mutex_unlock(&mfd->writeback_mutex);
+ /* Wait for all pending writebacks to finish */
+ wait_event_interruptible(mfd->wait_q, is_writeback_inactive(mfd));
+
+ /* Wake up dequeue thread in case of no UI update*/
wake_up(&mfd->wait_q);
+
return 0;
}
int mdp4_writeback_init(struct fb_info *info)
@@ -549,8 +566,19 @@
struct list_head *ptr, *next;
struct msmfb_writeback_data_list *temp;
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+ int rc = 0;
+
mutex_lock(&mfd->unregister_mutex);
mutex_lock(&mfd->writeback_mutex);
+
+ if (mfd->writeback_state != WB_STOPING &&
+ mfd->writeback_state != WB_STOP) {
+ pr_err("%s called without stopping\n", __func__);
+ rc = -EPERM;
+ goto terminate_err;
+
+ }
+
if (!list_empty(&mfd->writeback_register_queue)) {
list_for_each_safe(ptr, next,
&mfd->writeback_register_queue) {
@@ -564,7 +592,10 @@
INIT_LIST_HEAD(&mfd->writeback_register_queue);
INIT_LIST_HEAD(&mfd->writeback_busy_queue);
INIT_LIST_HEAD(&mfd->writeback_free_queue);
+
+
+terminate_err:
mutex_unlock(&mfd->writeback_mutex);
mutex_unlock(&mfd->unregister_mutex);
- return 0;
+ return rc;
}
diff --git a/drivers/video/msm/mipi_NT35510.c b/drivers/video/msm/mipi_NT35510.c
index eaf1868..964df4e 100644
--- a/drivers/video/msm/mipi_NT35510.c
+++ b/drivers/video/msm/mipi_NT35510.c
@@ -418,6 +418,7 @@
static char video27[2] = {
0x35, 0x00,
};
+static char config_video_MADCTL[2] = {0x36, 0xC0};
static struct dsi_cmd_desc nt35510_video_display_on_cmds[] = {
{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(video0), video0},
{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(video1), video1},
@@ -453,11 +454,15 @@
display_on},
};
+static struct dsi_cmd_desc nt35510_video_display_on_cmds_rotate[] = {
+ {DTYPE_DCS_WRITE1, 1, 0, 0, 150,
+ sizeof(config_video_MADCTL), config_video_MADCTL},
+};
static int mipi_nt35510_lcd_on(struct platform_device *pdev)
{
struct msm_fb_data_type *mfd;
struct mipi_panel_info *mipi;
-
+ static int rotate;
mfd = platform_get_drvdata(pdev);
if (!mfd)
return -ENODEV;
@@ -467,10 +472,19 @@
mipi = &mfd->panel_info.mipi;
+ if (mipi_nt35510_pdata && mipi_nt35510_pdata->rotate_panel)
+ rotate = mipi_nt35510_pdata->rotate_panel();
+
if (mipi->mode == DSI_VIDEO_MODE) {
mipi_dsi_cmds_tx(mfd, &nt35510_tx_buf,
nt35510_video_display_on_cmds,
ARRAY_SIZE(nt35510_video_display_on_cmds));
+
+ if (rotate) {
+ mipi_dsi_cmds_tx(mfd, &nt35510_tx_buf,
+ nt35510_video_display_on_cmds_rotate,
+ ARRAY_SIZE(nt35510_video_display_on_cmds_rotate));
+ }
} else if (mipi->mode == DSI_CMD_MODE) {
mipi_dsi_cmds_tx(mfd, &nt35510_tx_buf,
nt35510_cmd_display_on_cmds,
diff --git a/drivers/video/msm/mipi_dsi.c b/drivers/video/msm/mipi_dsi.c
index baad0a8..7564016 100644
--- a/drivers/video/msm/mipi_dsi.c
+++ b/drivers/video/msm/mipi_dsi.c
@@ -178,6 +178,17 @@
clk_rate = mfd->fbi->var.pixclock;
clk_rate = min(clk_rate, mfd->panel_info.clk_max);
+ mipi_dsi_phy_ctrl(1);
+
+ if (mdp_rev == MDP_REV_42 && mipi_dsi_pdata)
+ target_type = mipi_dsi_pdata->target_type;
+
+ mipi_dsi_phy_init(0, &(mfd->panel_info), target_type);
+
+ local_bh_disable();
+ mipi_dsi_clk_enable();
+ local_bh_enable();
+
MIPI_OUTP(MIPI_DSI_BASE + 0x114, 1);
MIPI_OUTP(MIPI_DSI_BASE + 0x114, 0);
@@ -190,17 +201,6 @@
width = mfd->panel_info.xres;
height = mfd->panel_info.yres;
- mipi_dsi_phy_ctrl(1);
-
- if (mdp_rev == MDP_REV_42 && mipi_dsi_pdata)
- target_type = mipi_dsi_pdata->target_type;
-
- mipi_dsi_phy_init(0, &(mfd->panel_info), target_type);
-
- local_bh_disable();
- mipi_dsi_clk_enable();
- local_bh_enable();
-
mipi = &mfd->panel_info.mipi;
if (mfd->panel_info.type == MIPI_VIDEO_PANEL) {
dummy_xres = mfd->panel_info.lcdc.xres_pad;
diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c
index b4d8db0..ff08548 100644
--- a/drivers/video/msm/msm_fb.c
+++ b/drivers/video/msm/msm_fb.c
@@ -48,11 +48,6 @@
#include "mdp.h"
#include "mdp4.h"
-#ifdef CONFIG_FB_MSM_LOGO
-#define INIT_IMAGE_FILE "/initlogo.rle"
-extern int load_565rle_image(char *filename);
-#endif
-
#ifdef CONFIG_FB_MSM_TRIPLE_BUFFER
#define MSM_FB_NUM 3
#endif
@@ -60,6 +55,7 @@
static unsigned char *fbram;
static unsigned char *fbram_phys;
static int fbram_size;
+static boolean bf_supported;
static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST];
static int pdev_list_cnt;
@@ -375,6 +371,9 @@
#ifdef CONFIG_FB_MSM_OVERLAY
mfd->overlay_play_enable = 1;
#endif
+
+ bf_supported = mdp4_overlay_borderfill_supported();
+
rc = msm_fb_register(mfd);
if (rc)
return rc;
@@ -1179,18 +1178,26 @@
if (!remainder_mode2)
remainder_mode2 = PAGE_SIZE;
- /* calculate smem_len based on max size of two supplied modes */
- fix->smem_len = MAX((msm_fb_line_length(mfd->index, panel_info->xres,
- bpp) *
- panel_info->yres + PAGE_SIZE -
- remainder) * mfd->fb_page,
- (msm_fb_line_length(mfd->index,
- panel_info->mode2_xres,
- bpp) *
- panel_info->mode2_yres + PAGE_SIZE -
- remainder_mode2) * mfd->fb_page);
-
-
+ /*
+ * calculate smem_len based on max size of two supplied modes.
+ * Only fb0 has mem. fb1 and fb2 don't have mem.
+ */
+ if (!bf_supported || mfd->index == 0)
+ fix->smem_len = MAX((msm_fb_line_length(mfd->index,
+ panel_info->xres,
+ bpp) *
+ panel_info->yres + PAGE_SIZE -
+ remainder) * mfd->fb_page,
+ (msm_fb_line_length(mfd->index,
+ panel_info->mode2_xres,
+ bpp) *
+ panel_info->mode2_yres + PAGE_SIZE -
+ remainder_mode2) * mfd->fb_page);
+ else if (mfd->index == 1 || mfd->index == 2) {
+ pr_debug("%s:%d no memory is allocated for fb%d!\n",
+ __func__, __LINE__, mfd->index);
+ fix->smem_len = 0;
+ }
mfd->var_xres = panel_info->xres;
mfd->var_yres = panel_info->yres;
@@ -1294,10 +1301,11 @@
fbram_phys += fbram_offset;
fbram_size -= fbram_offset;
- if (fbram_size < fix->smem_len) {
- printk(KERN_ERR "error: no more framebuffer memory!\n");
- return -ENOMEM;
- }
+ if (!bf_supported || mfd->index == 0)
+ if (fbram_size < fix->smem_len) {
+ pr_err("error: no more framebuffer memory!\n");
+ return -ENOMEM;
+ }
fbi->screen_base = fbram;
fbi->fix.smem_start = (unsigned long)fbram_phys;
@@ -1311,8 +1319,8 @@
fbi->fix.smem_start, mfd->map_buffer->iova[0],
mfd->map_buffer->iova[1]);
}
-
- memset(fbi->screen_base, 0x0, fix->smem_len);
+ if (!bf_supported || mfd->index == 0)
+ memset(fbi->screen_base, 0x0, fix->smem_len);
mfd->op_enable = TRUE;
mfd->panel_power_on = FALSE;
@@ -1357,7 +1365,9 @@
mfd->index, fbi->var.xres, fbi->var.yres, fbi->fix.smem_len);
#ifdef CONFIG_FB_MSM_LOGO
- if (!load_565rle_image(INIT_IMAGE_FILE)) ; /* Flip buffer */
+ /* Flip buffer */
+ if (!load_565rle_image(INIT_IMAGE_FILE, bf_supported))
+ ;
#endif
ret = 0;
@@ -1517,7 +1527,12 @@
}
if (!mfd->ref_cnt) {
- mdp_set_dma_pan_info(info, NULL, TRUE);
+ if (!bf_supported ||
+ (info->node != 1 && info->node != 2))
+ mdp_set_dma_pan_info(info, NULL, TRUE);
+ else
+ pr_debug("%s:%d no mdp_set_dma_pan_info %d\n",
+ __func__, __LINE__, info->node);
if (msm_fb_blank_sub(FB_BLANK_UNBLANK, info, mfd->op_enable)) {
printk(KERN_ERR "msm_fb_open: can't turn on display!\n");
@@ -1565,6 +1580,16 @@
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
struct msm_fb_panel_data *pdata;
+ /*
+ * If framebuffer is 1 or 2, io pen display is not allowed.
+ */
+ if (bf_supported &&
+ (info->node == 1 || info->node == 2)) {
+ pr_err("%s: no pan display for fb%d!",
+ __func__, info->node);
+ return -EPERM;
+ }
+
if (info->node != 0 || mfd->cont_splash_done) /* primary */
if ((!mfd->op_enable) || (!mfd->panel_power_on))
return -EPERM;
@@ -1585,6 +1610,7 @@
/* "UPDT" */
if (var->reserved[0] == 0x54445055) {
+
dirty.xoffset = var->reserved[1] & 0xffff;
dirty.yoffset = (var->reserved[1] >> 16) & 0xffff;
@@ -1738,9 +1764,13 @@
if ((var->xres_virtual <= 0) || (var->yres_virtual <= 0))
return -EINVAL;
- if (info->fix.smem_len <
- (var->xres_virtual*var->yres_virtual*(var->bits_per_pixel/8)))
- return -EINVAL;
+ if (!bf_supported ||
+ (info->node != 1 && info->node != 2))
+ if (info->fix.smem_len <
+ (var->xres_virtual*
+ var->yres_virtual*
+ (var->bits_per_pixel/8)))
+ return -EINVAL;
if ((var->xres == 0) || (var->yres == 0))
return -EINVAL;
@@ -2593,7 +2623,12 @@
struct mdp_blit_req_list req_list_header;
int count, i, req_list_count;
-
+ if (bf_supported &&
+ (info->node == 1 || info->node == 2)) {
+ pr_err("%s: no pan display for fb%d.",
+ __func__, info->node);
+ return -EPERM;
+ }
/* Get the count size for the total BLIT request. */
if (copy_from_user(&req_list_header, p, sizeof(req_list_header)))
return -EFAULT;
diff --git a/drivers/video/msm/msm_fb.h b/drivers/video/msm/msm_fb.h
index b63c022..87753b2 100644
--- a/drivers/video/msm/msm_fb.h
+++ b/drivers/video/msm/msm_fb.h
@@ -184,6 +184,7 @@
u32 use_ov0_blt, ov0_blt_state;
u32 use_ov1_blt, ov1_blt_state;
u32 writeback_state;
+ bool writeback_active_cnt;
int cont_splash_done;
};
@@ -214,4 +215,9 @@
int msm_fb_check_frame_rate(struct msm_fb_data_type *mfd,
struct fb_info *info);
+#ifdef CONFIG_FB_MSM_LOGO
+#define INIT_IMAGE_FILE "/initlogo.rle"
+int load_565rle_image(char *filename, bool bf_supported);
+#endif
+
#endif /* MSM_FB_H */
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.h b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.h
index 4a952c9..ac1ff24 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.h
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.h
@@ -84,6 +84,8 @@
#define DDL_MAX_NUM_IN_INPUTFRAME_POOL (DDL_MAX_NUM_OF_B_FRAME + 1)
+#define MDP_MIN_TILE_HEIGHT 96
+
enum ddl_mem_area {
DDL_FW_MEM = 0x0,
DDL_MM_MEM = 0x1,
@@ -467,6 +469,8 @@
u32 ddl_check_reconfig(struct ddl_client_context *ddl);
void ddl_handle_reconfig(u32 res_change, struct ddl_client_context *ddl);
void ddl_fill_dec_desc_buffer(struct ddl_client_context *ddl);
+void ddl_set_vidc_timeout(struct ddl_client_context *ddl);
+
#ifdef DDL_BUF_LOG
void ddl_list_buffers(struct ddl_client_context *ddl);
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_core.h b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_core.h
index 41604b0..50c3696 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_core.h
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_core.h
@@ -55,6 +55,13 @@
#define DDL_HW_TIMEOUT_IN_MS 1000
#define DDL_STREAMBUF_ALIGN_GUARD_BYTES 0x7FF
+#define DDL_VIDC_1080P_48MHZ (48000000)
+#define DDL_VIDC_1080P_133MHZ (133330000)
+#define DDL_VIDC_1080P_200MHZ (200000000)
+#define DDL_VIDC_1080P_48MHZ_TIMEOUT_VALUE (0xCB8)
+#define DDL_VIDC_1080P_133MHZ_TIMEOUT_VALUE (0x2355)
+#define DDL_VIDC_1080P_200MHZ_TIMEOUT_VALUE (0x3500)
+
#define DDL_CONTEXT_MEMORY (1024 * 15 * (VCD_MAX_NO_CLIENT + 1))
#define DDL_ENC_MIN_DPB_BUFFERS 2
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c
index 6d3a05a..b480b42 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c
@@ -744,11 +744,11 @@
memset(dec_bufs->desc.align_virtual_addr,
0, buf_size.sz_desc);
msm_ion_do_cache_op(
- ddl_context->video_ion_client,
- dec_bufs->desc.alloc_handle,
- dec_bufs->desc.alloc_handle,
- dec_bufs->desc.buffer_size,
- ION_IOC_CLEAN_INV_CACHES);
+ ddl_context->video_ion_client,
+ dec_bufs->desc.alloc_handle,
+ dec_bufs->desc.virtual_base_addr,
+ dec_bufs->desc.buffer_size,
+ ION_IOC_CLEAN_INV_CACHES);
}
}
}
@@ -1061,3 +1061,24 @@
ip_bitstream->desc_buf,
ip_bitstream->desc_size);
}
+
+void ddl_set_vidc_timeout(struct ddl_client_context *ddl)
+{
+ unsigned long core_clk_rate;
+ u32 vidc_time_out = 0;
+ if (ddl->codec_data.decoder.idr_only_decoding) {
+ vidc_time_out = 2 * DDL_VIDC_1080P_200MHZ_TIMEOUT_VALUE;
+ } else {
+ res_trk_get_clk_rate(&core_clk_rate);
+ if (core_clk_rate == DDL_VIDC_1080P_48MHZ)
+ vidc_time_out = DDL_VIDC_1080P_48MHZ_TIMEOUT_VALUE;
+ else if (core_clk_rate == DDL_VIDC_1080P_133MHZ)
+ vidc_time_out = DDL_VIDC_1080P_133MHZ_TIMEOUT_VALUE;
+ else
+ vidc_time_out = DDL_VIDC_1080P_200MHZ_TIMEOUT_VALUE;
+ }
+ DDL_MSG_HIGH("%s Video core time out value = 0x%x"
+ __func__, vidc_time_out);
+ vidc_sm_set_video_core_timeout_value(
+ &ddl->shared_mem[ddl->command_channel], vidc_time_out);
+}
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_properties.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_properties.c
index a5192b0..363fe53 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_properties.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_properties.c
@@ -1869,6 +1869,19 @@
input_buf_req = &decoder->actual_input_buf_req;
min_dpb = decoder->min_dpb_num;
y_cb_cr_size = decoder->y_cb_cr_size;
+ if ((decoder->buf_format.buffer_format ==
+ VCD_BUFFER_FORMAT_TILE_4x2) &&
+ (frame_size->height < MDP_MIN_TILE_HEIGHT)) {
+ frame_size->height = MDP_MIN_TILE_HEIGHT;
+ ddl_calculate_stride(frame_size,
+ !decoder->progressive_only);
+ y_cb_cr_size = ddl_get_yuv_buffer_size(
+ frame_size,
+ &decoder->buf_format,
+ (!decoder->progressive_only),
+ decoder->hdr.decoding, NULL);
+ } else
+ y_cb_cr_size = decoder->y_cb_cr_size;
}
memset(output_buf_req, 0,
sizeof(struct vcd_buffer_requirement));
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.c
index ac81916..878db62 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.c
@@ -243,6 +243,10 @@
#define VIDC_SM_NUM_STUFF_BYTES_CONSUME_ADDR 0X01ac
+#define VIDC_SM_TIMEOUT_VALUE_ADDR 0x0158
+#define VIDC_SM_TIMEOUT_VALUE_BMSK 0xffffffff
+#define VIDC_SM_TIMEOUT_VALUE_SHFT 0
+
#define VIDC_SM_ENC_EXT_CTRL_CLOSED_GOP_ENABLE_BMSK 0x40
#define VIDC_SM_ENC_EXT_CTRL_CLOSED_GOP_ENABLE_SHFT 6
@@ -862,3 +866,11 @@
*output_buffer_size = DDL_MEM_READ_32(shared_mem,
VIDC_SM_BATCH_OUTPUT_SIZE_ADDR);
}
+
+void vidc_sm_set_video_core_timeout_value(struct ddl_buf_addr *shared_mem,
+ u32 timeout)
+{
+ DDL_MEM_WRITE_32(shared_mem, VIDC_SM_TIMEOUT_VALUE_ADDR,
+ timeout);
+}
+
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.h b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.h
index 7d9896f..6cd75595 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.h
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.h
@@ -191,4 +191,6 @@
u32 output_buffer_size);
void vidc_sm_get_encoder_batch_output_size(struct ddl_buf_addr *shared_mem,
u32 *output_buffer_size);
+void vidc_sm_set_video_core_timeout_value(struct ddl_buf_addr *shared_mem,
+ u32 timeout);
#endif
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_vidc.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_vidc.c
index 97c8d0d..d0cf4e8 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_vidc.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_vidc.c
@@ -977,6 +977,7 @@
DDL_MSG_ERROR("STATE-CRITICAL");
return VCD_ERR_FAIL;
}
+ ddl_set_vidc_timeout(ddl);
ddl_vidc_decode_set_metadata_output(decoder);
if (decoder->dp_buf.no_of_dec_pic_buf <
decoder->client_output_buf_req.actual_count)
diff --git a/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c b/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c
index e71259a..c8365ce 100644
--- a/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c
+++ b/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c
@@ -382,7 +382,7 @@
return status;
}
-static u32 res_trk_get_clk_rate(unsigned long *phclk_rate)
+u32 res_trk_get_clk_rate(unsigned long *phclk_rate)
{
u32 status = true;
mutex_lock(&resource_context.lock);
diff --git a/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker_api.h b/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker_api.h
index 99b123c..2ae2512 100644
--- a/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker_api.h
+++ b/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker_api.h
@@ -49,4 +49,5 @@
int res_trk_disable_footswitch(void);
void res_trk_release_fw_addr(void);
u32 res_trk_estimate_perf_level(u32 pn_perf_lvl);
+u32 res_trk_get_clk_rate(unsigned long *phclk_rate);
#endif
diff --git a/drivers/video/msm/vidc/720p/ddl/vcd_ddl_interrupt_handler.c b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_interrupt_handler.c
index fe71dc1..5fa9b09 100644
--- a/drivers/video/msm/vidc/720p/ddl/vcd_ddl_interrupt_handler.c
+++ b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_interrupt_handler.c
@@ -290,6 +290,8 @@
decoder->client_output_buf_req.actual_count
&& decoder->progressive_only)
need_reconfig = false;
+ if (input_vcd_frm->flags & VCD_FRAME_FLAG_EOS)
+ need_reconfig = false;
if ((input_vcd_frm->data_len <= seq_hdr_info.dec_frm_size ||
(input_vcd_frm->flags & VCD_FRAME_FLAG_CODECCONFIG)) &&
(!need_reconfig ||
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_client_sm.c b/drivers/video/msm/vidc/common/vcd/vcd_client_sm.c
index 0d5ba9c..5019d31 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd_client_sm.c
+++ b/drivers/video/msm/vidc/common/vcd/vcd_client_sm.c
@@ -502,7 +502,12 @@
}
rc = ddl_set_property(cctxt->ddl_handle, prop_hdr, prop_val);
- VCD_FAILED_RETURN(rc, "Failed: ddl_set_property");
+ if (rc) {
+ /* Some properties aren't known to ddl that we can handle */
+ if (prop_hdr->prop_id != VCD_I_VOP_TIMING_CONSTANT_DELTA)
+ VCD_FAILED_RETURN(rc, "Failed: ddl_set_property");
+ }
+
switch (prop_hdr->prop_id) {
case VCD_I_META_BUFFER_MODE:
{
@@ -537,16 +542,30 @@
break;
}
case VCD_I_INTRA_PERIOD:
- {
- struct vcd_property_i_period *iperiod =
- (struct vcd_property_i_period *)prop_val;
- cctxt->bframe = iperiod->b_frames;
- break;
- }
+ {
+ struct vcd_property_i_period *iperiod =
+ (struct vcd_property_i_period *)prop_val;
+ cctxt->bframe = iperiod->b_frames;
+ break;
+ }
case VCD_REQ_PERF_LEVEL:
rc = vcd_req_perf_level(cctxt,
- (struct vcd_property_perf_level *)prop_val);
+ (struct vcd_property_perf_level *)prop_val);
break;
+ case VCD_I_VOP_TIMING_CONSTANT_DELTA:
+ {
+ struct vcd_property_vop_timing_constant_delta *delta =
+ prop_val;
+
+ if (delta->constant_delta > 0) {
+ cctxt->time_frame_delta = delta->constant_delta;
+ rc = VCD_S_SUCCESS;
+ } else {
+ VCD_MSG_ERROR("Frame delta must be positive");
+ rc = VCD_ERR_ILLEGAL_PARM;
+ }
+ break;
+ }
default:
{
break;
@@ -559,6 +578,7 @@
(struct vcd_clnt_ctxt *cctxt,
struct vcd_property_hdr *prop_hdr, void *prop_val)
{
+ int rc;
VCD_MSG_LOW("vcd_get_property_cmn in %d:", cctxt->clnt_state.state);
VCD_MSG_LOW("property Id = %d", prop_hdr->prop_id);
if (!prop_hdr->sz || !prop_hdr->prop_id) {
@@ -566,7 +586,24 @@
return VCD_ERR_ILLEGAL_PARM;
}
- return ddl_get_property(cctxt->ddl_handle, prop_hdr, prop_val);
+ rc = ddl_get_property(cctxt->ddl_handle, prop_hdr, prop_val);
+ if (rc) {
+ /* Some properties aren't known to ddl that we can handle */
+ if (prop_hdr->prop_id != VCD_I_VOP_TIMING_CONSTANT_DELTA)
+ VCD_FAILED_RETURN(rc, "Failed: ddl_set_property");
+ }
+
+ switch (prop_hdr->prop_id) {
+ case VCD_I_VOP_TIMING_CONSTANT_DELTA:
+ {
+ struct vcd_property_vop_timing_constant_delta *delta =
+ (struct vcd_property_vop_timing_constant_delta *)
+ prop_val;
+ delta->constant_delta = cctxt->time_frame_delta;
+ rc = VCD_S_SUCCESS;
+ }
+ }
+ return rc;
}
static u32 vcd_set_buffer_requirements_cmn
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_core.h b/drivers/video/msm/vidc/common/vcd/vcd_core.h
index d228146..7ae4f45 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd_core.h
+++ b/drivers/video/msm/vidc/common/vcd/vcd_core.h
@@ -191,6 +191,7 @@
u32 frm_p_units;
u32 reqd_perf_lvl;
u32 time_resoln;
+ u32 time_frame_delta;
struct vcd_buffer_pool in_buf_pool;
struct vcd_buffer_pool out_buf_pool;
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_device_sm.c b/drivers/video/msm/vidc/common/vcd/vcd_device_sm.c
index 9576387..49d885c 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd_device_sm.c
+++ b/drivers/video/msm/vidc/common/vcd/vcd_device_sm.c
@@ -954,6 +954,9 @@
u32 rc = VCD_S_SUCCESS;
u32 client_inited = false;
u32 fail_all_open = false;
+ struct ddl_context *ddl_context;
+
+ ddl_context = ddl_get_context();
VCD_MSG_LOW("vcd_dev_cb_in_initing:");
@@ -1027,6 +1030,8 @@
tmp_client = client;
client = client->next;
+ if (tmp_client == dev_ctxt->cctxt_list_head)
+ fail_all_open = true;
vcd_destroy_client_context(tmp_client);
}
@@ -1035,6 +1040,10 @@
if (!client_inited || fail_all_open) {
VCD_MSG_ERROR("All client open requests failed");
+ DDL_IDLE(ddl_context);
+
+ vcd_handle_device_init_failed(drv_ctxt,
+ DEVICE_STATE_EVENT_NUMBER(close));
dev_ctxt->pending_cmd = VCD_CMD_DEVICE_TERM;
} else {
if (vcd_power_event(dev_ctxt, NULL,
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_sub.c b/drivers/video/msm/vidc/common/vcd/vcd_sub.c
index 2df7144..1218794 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd_sub.c
+++ b/drivers/video/msm/vidc/common/vcd/vcd_sub.c
@@ -2387,6 +2387,7 @@
u32 rc, seqhdr_present = 0;
struct vcd_property_hdr prop_hdr;
struct vcd_sequence_hdr seq_hdr;
+ struct vcd_property_sps_pps_for_idr_enable idr_enable;
struct vcd_property_codec codec;
*handled = true;
prop_hdr.prop_id = DDL_I_SEQHDR_PRESENT;
@@ -2403,29 +2404,64 @@
rc = ddl_get_property(cctxt->ddl_handle, &prop_hdr, &codec);
if (!VCD_FAILED(rc)) {
if (codec.codec != VCD_CODEC_H263) {
- prop_hdr.prop_id = VCD_I_SEQ_HEADER;
- prop_hdr.sz = sizeof(struct vcd_sequence_hdr);
- seq_hdr.sequence_header = frm_entry->virtual;
- seq_hdr.sequence_header_len =
- frm_entry->alloc_len;
- rc = ddl_get_property(cctxt->ddl_handle,
- &prop_hdr, &seq_hdr);
- if (!VCD_FAILED(rc)) {
- frm_entry->data_len =
- seq_hdr.sequence_header_len;
- frm_entry->time_stamp = 0;
- frm_entry->flags |=
- VCD_FRAME_FLAG_CODECCONFIG;
+ /*
+ * The seq. header is stored in a seperate internal
+ * buffer and is memcopied into the output buffer
+ * that we provide. In secure sessions, we aren't
+ * allowed to touch these buffers. In these cases
+ * seq. headers are returned to client as part of
+ * I-frames. So for secure session, just return
+ * empty buffer.
+ */
+ if (!cctxt->secure) {
+ prop_hdr.prop_id = VCD_I_SEQ_HEADER;
+ prop_hdr.sz = sizeof(struct vcd_sequence_hdr);
+ seq_hdr.sequence_header = frm_entry->virtual;
+ seq_hdr.sequence_header_len =
+ frm_entry->alloc_len;
+ rc = ddl_get_property(cctxt->ddl_handle,
+ &prop_hdr, &seq_hdr);
+ if (!VCD_FAILED(rc)) {
+ frm_entry->data_len =
+ seq_hdr.sequence_header_len;
+ frm_entry->time_stamp = 0;
+ frm_entry->flags |=
+ VCD_FRAME_FLAG_CODECCONFIG;
+ } else
+ VCD_MSG_ERROR("rc = 0x%x. Failed:"
+ "ddl_get_property: VCD_I_SEQ_HEADER",
+ rc);
+ } else {
+ /*
+ * First check that the proper props are enabled
+ * so client can get the proper info eventually
+ */
+ prop_hdr.prop_id = VCD_I_ENABLE_SPS_PPS_FOR_IDR;
+ prop_hdr.sz = sizeof(idr_enable);
+ rc = ddl_get_property(cctxt->ddl_handle,
+ &prop_hdr, &idr_enable);
+ if (!VCD_FAILED(rc)) {
+ if (!idr_enable.
+ sps_pps_for_idr_enable_flag) {
+ VCD_MSG_ERROR("SPS/PPS per IDR "
+ "needs to be enabled");
+ rc = VCD_ERR_BAD_STATE;
+ } else {
+ /* Send zero len frame */
+ frm_entry->data_len = 0;
+ frm_entry->time_stamp = 0;
+ frm_entry->flags = 0;
+ }
+ }
+
+ }
+
+ if (!VCD_FAILED(rc))
cctxt->callback(VCD_EVT_RESP_OUTPUT_DONE,
- VCD_S_SUCCESS, frm_entry,
- sizeof(struct vcd_frame_data),
- cctxt,
- cctxt->client_data);
- } else
- VCD_MSG_ERROR(
- "rc = 0x%x. Failed:\
- ddl_get_property: VCD_I_SEQ_HEADER",
- rc);
+ VCD_S_SUCCESS, frm_entry,
+ sizeof(struct vcd_frame_data),
+ cctxt,
+ cctxt->client_data);
} else
VCD_MSG_LOW("Codec Type is H.263\n");
} else
@@ -3052,13 +3088,15 @@
u32 frm_delta;
u64 temp, max = ~((u64)0);
- if (frame->time_stamp >= cctxt->status.prev_ts)
+ if (cctxt->time_frame_delta)
+ temp = cctxt->time_frame_delta;
+ else if (frame->time_stamp >= cctxt->status.prev_ts)
temp = frame->time_stamp - cctxt->status.prev_ts;
else
temp = (max - cctxt->status.prev_ts) +
frame->time_stamp;
- VCD_MSG_LOW("Curr_ts=%lld Prev_ts=%lld Diff=%llu",
+ VCD_MSG_LOW("Curr_ts=%lld Prev_ts=%lld Diff=%llu\n",
frame->time_stamp, cctxt->status.prev_ts, temp);
temp *= cctxt->time_resoln;
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 6395692..47b1fe3 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -151,6 +151,7 @@
__REQ_IO_STAT, /* account I/O stat */
__REQ_MIXED_MERGE, /* merge of different types, fail separately */
__REQ_SECURE, /* secure discard (used with __REQ_DISCARD) */
+ __REQ_SANITIZE, /* sanitize */
__REQ_NR_BITS, /* stops here */
};
@@ -161,13 +162,14 @@
#define REQ_SYNC (1 << __REQ_SYNC)
#define REQ_META (1 << __REQ_META)
#define REQ_DISCARD (1 << __REQ_DISCARD)
+#define REQ_SANITIZE (1 << __REQ_SANITIZE)
#define REQ_NOIDLE (1 << __REQ_NOIDLE)
#define REQ_FAILFAST_MASK \
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
#define REQ_COMMON_MASK \
(REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_DISCARD | \
- REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE)
+ REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE | REQ_SANITIZE)
#define REQ_CLONE_MASK REQ_COMMON_MASK
#define REQ_RAHEAD (1 << __REQ_RAHEAD)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 1b13021..4dc4b3e 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -403,6 +403,7 @@
#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */
#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */
+#define QUEUE_FLAG_SANITIZE 19 /* supports SANITIZE */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
@@ -485,6 +486,7 @@
#define blk_queue_stackable(q) \
test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
+#define blk_queue_sanitize(q) test_bit(QUEUE_FLAG_SANITIZE, &(q)->queue_flags)
#define blk_queue_secdiscard(q) (blk_queue_discard(q) && \
test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags))
@@ -922,6 +924,7 @@
extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
+extern int blkdev_issue_sanitize(struct block_device *bdev, gfp_t gfp_mask);
extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask);
static inline int sb_issue_discard(struct super_block *sb, sector_t block,
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
index e50a054..537960b 100644
--- a/include/linux/diagchar.h
+++ b/include/linux/diagchar.h
@@ -52,7 +52,7 @@
#define APQ8030_TOOLS_ID 4079
#define MSM8627_TOOLS_ID 4080
#define MSM8227_TOOLS_ID 4081
-#define MSM8974_TOOLS_ID 4072
+#define MSM8974_TOOLS_ID 4083
#define MSG_MASK_0 (0x00000001)
#define MSG_MASK_1 (0x00000002)
diff --git a/include/linux/dvb/dmx.h b/include/linux/dvb/dmx.h
index 7f963e6..e0058d3 100644
--- a/include/linux/dvb/dmx.h
+++ b/include/linux/dvb/dmx.h
@@ -99,16 +99,40 @@
} dmx_filter_t;
+/* Filter flags */
+#define DMX_CHECK_CRC 0x01
+#define DMX_ONESHOT 0x02
+#define DMX_IMMEDIATE_START 0x04
+#define DMX_ENABLE_INDEXING 0x08
+#define DMX_KERNEL_CLIENT 0x8000
+
struct dmx_sct_filter_params
{
__u16 pid;
dmx_filter_t filter;
__u32 timeout;
__u32 flags;
-#define DMX_CHECK_CRC 1
-#define DMX_ONESHOT 2
-#define DMX_IMMEDIATE_START 4
-#define DMX_KERNEL_CLIENT 0x8000
+};
+
+
+/* Indexing: supported video standards */
+enum dmx_indexing_video_standard {
+ DMX_INDEXING_MPEG2,
+ DMX_INDEXING_H264,
+ DMX_INDEXING_VC1
+};
+
+/* Indexing: Supported video profiles */
+enum dmx_indexing_video_profile {
+ DMX_INDEXING_MPEG2_ANY,
+ DMX_INDEXING_H264_ANY,
+ DMX_INDEXING_VC1_ANY
+};
+
+/* Indexing: video configuration parameters */
+struct dmx_indexing_video_params {
+ enum dmx_indexing_video_standard standard;
+ enum dmx_indexing_video_profile profile;
};
@@ -119,6 +143,8 @@
dmx_output_t output;
dmx_pes_type_t pes_type;
__u32 flags;
+
+ struct dmx_indexing_video_params video_params;
};
struct dmx_buffer_status {
@@ -146,7 +172,60 @@
typedef struct dmx_caps {
__u32 caps;
+
+/* Indicates whether demux support playback from memory in pull mode */
+#define DMX_CAP_PULL_MODE 0x01
+
+/* Indicates whether demux support indexing of recorded video stream */
+#define DMX_CAP_VIDEO_INDEXING 0x02
+
+/* Indicates whether demux support sending data directly to video decoder */
+#define DMX_CAP_VIDEO_DECODER_DATA 0x04
+
+/* Indicates whether demux support sending data directly to audio decoder */
+#define DMX_CAP_AUDIO_DECODER_DATA 0x08
+
+/* Indicates whether demux support sending data directly to subtitle decoder */
+#define DMX_CAP_SUBTITLE_DECODER_DATA 0x10
+
+ /* Number of decoders demux can output data to */
int num_decoders;
+
+ /* Number of demux devices */
+ int num_demux_devices;
+
+ /* Max number of PID filters */
+ int num_pid_filters;
+
+ /* Max number of section filters */
+ int num_section_filters;
+
+ /*
+ * Max number of section filters using same PID,
+ * 0 if not supported
+ */
+ int num_section_filters_per_pid;
+
+ /*
+ * Length of section filter, not including section
+ * length field (2 bytes).
+ */
+ int section_filter_length;
+
+ /* Max number of demod based input */
+ int num_demod_inputs;
+
+ /* Max number of memory based input */
+ int num_memory_inputs;
+
+ /* Overall bitrate from all inputs concurrently. Mbit/sec */
+ int max_bitrate;
+
+ /* Max bitrate from single demod input. Mbit/sec */
+ int demod_input_max_bitrate;
+
+ /* Max bitrate from single memory input. Mbit/sec */
+ int memory_input_max_bitrate;
} dmx_caps_t;
typedef enum {
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 35e4edf..1c91125 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -317,6 +317,7 @@
#define BLKPBSZGET _IO(0x12,123)
#define BLKDISCARDZEROES _IO(0x12,124)
#define BLKSECDISCARD _IO(0x12,125)
+#define BLKSANITIZE _IO(0x12, 126)
#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
#define FIBMAP _IO(0x00,1) /* bmap access */
diff --git a/include/linux/i2c/isa1200.h b/include/linux/i2c/isa1200.h
index 4c36d59..9dab3eb 100644
--- a/include/linux/i2c/isa1200.h
+++ b/include/linux/i2c/isa1200.h
@@ -3,7 +3,7 @@
*
* Copyright (C) 2009 Samsung Electronics
* Kyungmin Park <kyungmin.park@samsung.com>
- * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -55,6 +55,7 @@
u8 num_regulators;
int (*power_on)(int on);
int (*dev_setup)(bool on);
+ int (*clk_enable)(bool on);
};
#endif /* __LINUX_ISA1200_H */
diff --git a/include/linux/input.h b/include/linux/input.h
index 6e7d6d9..191f7d7 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -817,7 +817,8 @@
#define SW_ROTATE_LOCK 0x0c /* set = rotate locked/disabled */
#define SW_HPHL_OVERCURRENT 0x0d /* set = over current on left hph */
#define SW_HPHR_OVERCURRENT 0x0e /* set = over current on right hph */
-#define SW_MAX 0x0f
+#define SW_UNSUPPORT_INSERT 0x0f /* set = unsupported device inserted */
+#define SW_MAX 0x10
#define SW_CNT (SW_MAX+1)
/*
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 7525e38..b693b75 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -84,6 +84,7 @@
extern void memblock_enforce_memory_limit(phys_addr_t memory_limit);
extern int memblock_is_memory(phys_addr_t addr);
extern int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
+extern int memblock_overlaps_memory(phys_addr_t base, phys_addr_t size);
extern int memblock_is_reserved(phys_addr_t addr);
extern int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
diff --git a/include/linux/mfd/wcd9xxx/core.h b/include/linux/mfd/wcd9xxx/core.h
index fca9a94..7917d24 100644
--- a/include/linux/mfd/wcd9xxx/core.h
+++ b/include/linux/mfd/wcd9xxx/core.h
@@ -30,6 +30,13 @@
(((ver == TABLA_VERSION_1_0) || (ver == TABLA_VERSION_1_1)) ? 1 : 0)
#define TABLA_IS_2_0(ver) ((ver == TABLA_VERSION_2_0) ? 1 : 0)
+#define SITAR_VERSION_1P0 0
+#define SITAR_VERSION_1P1 1
+#define SITAR_IS_1P0(ver) \
+ ((ver == SITAR_VERSION_1P0) ? 1 : 0)
+#define SITAR_IS_1P1(ver) \
+ ((ver == SITAR_VERSION_1P1) ? 1 : 0)
+
enum {
TABLA_IRQ_SLIMBUS = 0,
TABLA_IRQ_MBHC_REMOVAL,
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index aa808dc..c05134c 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -52,6 +52,9 @@
u8 part_config;
u8 cache_ctrl;
u8 rst_n_function;
+ u8 max_packed_writes;
+ u8 max_packed_reads;
+ u8 packed_event_en;
unsigned int part_time; /* Units: ms */
unsigned int sa_timeout; /* Units: 100ns */
unsigned int generic_cmd6_time; /* Units: 10ms */
@@ -72,6 +75,9 @@
bool hpi_en; /* HPI enablebit */
bool hpi; /* HPI support bit */
unsigned int hpi_cmd; /* cmd used as HPI */
+ bool bkops; /* background support bit */
+ bool bkops_en; /* background enable bit */
+ u8 raw_exception_status; /* 53 */
u8 raw_partition_support; /* 160 */
u8 raw_erased_mem_count; /* 181 */
u8 raw_ext_csd_structure; /* 194 */
@@ -85,6 +91,7 @@
u8 raw_sec_erase_mult; /* 230 */
u8 raw_sec_feature_support;/* 231 */
u8 raw_trim_mult; /* 232 */
+ u8 raw_bkops_status; /* 246 */
u8 raw_sectors[4]; /* 212 - 4 bytes */
unsigned int feature_support;
@@ -191,6 +198,9 @@
#define MMC_CARD_SDXC (1<<6) /* card is SDXC */
#define MMC_CARD_REMOVED (1<<7) /* card has been removed */
#define MMC_STATE_HIGHSPEED_200 (1<<8) /* card is in HS200 mode */
+#define MMC_STATE_NEED_BKOPS (1<<9) /* card need to do BKOPS */
+#define MMC_STATE_DOING_BKOPS (1<<10) /* card is doing BKOPS */
+#define MMC_STATE_CHECK_BKOPS (1<<11) /* card need to check BKOPS */
unsigned int quirks; /* card quirks */
#define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */
#define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1) /* use func->cur_blksize */
@@ -336,6 +346,9 @@
#define mmc_sd_card_uhs(c) ((c)->state & MMC_STATE_ULTRAHIGHSPEED)
#define mmc_card_ext_capacity(c) ((c)->state & MMC_CARD_SDXC)
#define mmc_card_removed(c) ((c) && ((c)->state & MMC_CARD_REMOVED))
+#define mmc_card_need_bkops(c) ((c)->state & MMC_STATE_NEED_BKOPS)
+#define mmc_card_doing_bkops(c) ((c)->state & MMC_STATE_DOING_BKOPS)
+#define mmc_card_check_bkops(c) ((c)->state & MMC_STATE_CHECK_BKOPS)
#define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT)
#define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY)
@@ -346,6 +359,14 @@
#define mmc_sd_card_set_uhs(c) ((c)->state |= MMC_STATE_ULTRAHIGHSPEED)
#define mmc_card_set_ext_capacity(c) ((c)->state |= MMC_CARD_SDXC)
#define mmc_card_set_removed(c) ((c)->state |= MMC_CARD_REMOVED)
+#define mmc_card_set_need_bkops(c) ((c)->state |= MMC_STATE_NEED_BKOPS)
+#define mmc_card_set_doing_bkops(c) ((c)->state |= MMC_STATE_DOING_BKOPS)
+#define mmc_card_set_check_bkops(c) ((c)->state |= MMC_STATE_CHECK_BKOPS)
+
+#define mmc_card_clr_need_bkops(c) ((c)->state &= ~MMC_STATE_NEED_BKOPS)
+#define mmc_card_clr_doing_bkops(c) ((c)->state &= ~MMC_STATE_DOING_BKOPS)
+#define mmc_card_clr_check_bkops(c) ((c)->state &= ~MMC_STATE_CHECK_BKOPS)
+
/*
* Quirk add/remove for MMC products.
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 7f30e24..2e8e6de 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -134,6 +134,9 @@
struct mmc_card;
struct mmc_async_req;
+extern int mmc_interrupt_bkops(struct mmc_card *);
+extern int mmc_read_bkops_status(struct mmc_card *);
+extern int mmc_is_exception_event(struct mmc_card *, unsigned int);
extern struct mmc_async_req *mmc_start_req(struct mmc_host *,
struct mmc_async_req *, int *);
extern int mmc_interrupt_hpi(struct mmc_card *);
@@ -163,6 +166,7 @@
extern int mmc_can_secure_erase_trim(struct mmc_card *card);
extern int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
unsigned int nr);
+extern void mmc_start_bkops(struct mmc_card *card);
extern unsigned int mmc_calc_max_discard(struct mmc_card *card);
extern int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen);
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index a2ee306..4416e95 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -245,11 +245,19 @@
#define MMC_CAP2_CACHE_CTRL (1 << 1) /* Allow cache control */
#define MMC_CAP2_POWEROFF_NOTIFY (1 << 2) /* Notify poweroff supported */
#define MMC_CAP2_NO_MULTI_READ (1 << 3) /* Multiblock reads don't work */
+#define MMC_CAP2_SANITIZE (1<<4) /* Support Sanitize */
#define MMC_CAP2_HS200_1_8V_SDR (1 << 5) /* can support */
#define MMC_CAP2_HS200_1_2V_SDR (1 << 6) /* can support */
#define MMC_CAP2_HS200 (MMC_CAP2_HS200_1_8V_SDR | \
MMC_CAP2_HS200_1_2V_SDR)
-#define MMC_CAP2_DETECT_ON_ERR (1 << 8) /* On I/O err check card removal */
+#define MMC_CAP2_DETECT_ON_ERR (1 << 7) /* On I/O err check card removal */
+#define MMC_CAP2_PACKED_RD (1 << 10) /* Allow packed read */
+#define MMC_CAP2_PACKED_WR (1 << 11) /* Allow packed write */
+#define MMC_CAP2_PACKED_CMD (MMC_CAP2_PACKED_RD | \
+ MMC_CAP2_PACKED_WR) /* Allow packed commands */
+#define MMC_CAP2_BKOPS (1 << 14) /* BKOPS supported */
+#define MMC_CAP2_INIT_BKOPS (1 << 15) /* Need to set BKOPS_EN */
+
mmc_pm_flag_t pm_caps; /* supported pm features */
unsigned int power_notify_type;
#define MMC_HOST_PW_NOTIFY_NONE 0
@@ -317,6 +325,7 @@
unsigned int sdio_irqs;
struct task_struct *sdio_irq_thread;
+ bool sdio_irq_pending;
atomic_t sdio_irq_thread_abort;
mmc_pm_flag_t pm_flags; /* requested pm features */
@@ -343,12 +352,8 @@
#ifdef CONFIG_MMC_PERF_PROFILING
struct {
- unsigned long rbytes_mmcq; /* Rd bytes MMC queue */
- unsigned long wbytes_mmcq; /* Wr bytes MMC queue */
unsigned long rbytes_drv; /* Rd bytes MMC Host */
unsigned long wbytes_drv; /* Wr bytes MMC Host */
- ktime_t rtime_mmcq; /* Rd time MMC queue */
- ktime_t wtime_mmcq; /* Wr time MMC queue */
ktime_t rtime_drv; /* Rd time MMC Host */
ktime_t wtime_drv; /* Wr time MMC Host */
ktime_t start;
@@ -411,6 +416,7 @@
static inline void mmc_signal_sdio_irq(struct mmc_host *host)
{
host->ops->enable_sdio_irq(host, 0);
+ host->sdio_irq_pending = true;
wake_up_process(host->sdio_irq_thread);
}
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index e124fbe..37b5344 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -139,7 +139,9 @@
#define R1_CURRENT_STATE(x) ((x & 0x00001E00) >> 9) /* sx, b (4 bits) */
#define R1_READY_FOR_DATA (1 << 8) /* sx, a */
#define R1_SWITCH_ERROR (1 << 7) /* sx, c */
+#define R1_EXCEPTION_EVENT (1 << 6) /* sx, a */
#define R1_APP_CMD (1 << 5) /* sr, c */
+#define R1_EXP_EVENT (1 << 6) /* sr, a */
#define R1_STATE_IDLE 0
#define R1_STATE_READY 1
@@ -275,10 +277,16 @@
#define EXT_CSD_FLUSH_CACHE 32 /* W */
#define EXT_CSD_CACHE_CTRL 33 /* R/W */
#define EXT_CSD_POWER_OFF_NOTIFICATION 34 /* R/W */
+#define EXT_CSD_PACKED_FAILURE_INDEX 35 /* RO */
+#define EXT_CSD_PACKED_CMD_STATUS 36 /* RO */
+#define EXT_CSD_EXP_EVENTS_STATUS 54 /* RO, 2 bytes */
+#define EXT_CSD_EXP_EVENTS_CTRL 56 /* R/W, 2 bytes */
#define EXT_CSD_PARTITION_ATTRIBUTE 156 /* R/W */
#define EXT_CSD_PARTITION_SUPPORT 160 /* RO */
#define EXT_CSD_HPI_MGMT 161 /* R/W */
#define EXT_CSD_RST_N_FUNCTION 162 /* R/W */
+#define EXT_CSD_BKOPS_EN 163 /* R/W */
+#define EXT_CSD_BKOPS_START 164 /* W */
#define EXT_CSD_SANITIZE_START 165 /* W */
#define EXT_CSD_WR_REL_PARAM 166 /* RO */
#define EXT_CSD_ERASE_GROUP_DEF 175 /* R/W */
@@ -311,9 +319,13 @@
#define EXT_CSD_PWR_CL_200_360 237 /* RO */
#define EXT_CSD_PWR_CL_DDR_52_195 238 /* RO */
#define EXT_CSD_PWR_CL_DDR_52_360 239 /* RO */
+#define EXT_CSD_BKOPS_STATUS 246 /* RO */
#define EXT_CSD_POWER_OFF_LONG_TIME 247 /* RO */
#define EXT_CSD_GENERIC_CMD6_TIME 248 /* RO */
#define EXT_CSD_CACHE_SIZE 249 /* RO, 4 bytes */
+#define EXT_CSD_MAX_PACKED_WRITES 500 /* RO */
+#define EXT_CSD_MAX_PACKED_READS 501 /* RO */
+#define EXT_CSD_BKOPS_SUPPORT 502 /* RO */
#define EXT_CSD_HPI_FEATURES 503 /* RO */
/*
@@ -426,6 +438,14 @@
#define EXT_CSD_PWR_CL_4BIT_MASK 0x0F /* 8 bit PWR CLS */
#define EXT_CSD_PWR_CL_8BIT_SHIFT 4
#define EXT_CSD_PWR_CL_4BIT_SHIFT 0
+
+#define EXT_CSD_PACKED_EVENT_EN (1 << 3)
+
+#define EXT_CSD_PACKED_FAILURE (1 << 3)
+
+#define EXT_CSD_PACKED_GENERIC_ERROR (1 << 0)
+#define EXT_CSD_PACKED_INDEXED_ERROR (1 << 1)
+
/*
* MMC_SWITCH access modes
*/
@@ -435,5 +455,16 @@
#define MMC_SWITCH_MODE_CLEAR_BITS 0x02 /* Clear bits which are 1 in value */
#define MMC_SWITCH_MODE_WRITE_BYTE 0x03 /* Set target to value */
+/*
+ * BKOPS status level
+ */
+#define EXT_CSD_BKOPS_LEVEL_2 0x2
+
+/*
+ * EXCEPTION_EVENT_STATUS field (eMMC4.5)
+ */
+#define EXT_CSD_URGENT_BKOPS BIT(0)
+#define EXT_CSD_DYNCAP_NEEDED BIT(1)
+#define EXT_CSD_SYSPOOL_EXHAUSTED BIT(2)
#endif /* MMC_MMC_PROTOCOL_H */
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index cb394e8..b4e14d2 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -292,7 +292,7 @@
#define INPUT_DEVICE_ID_LED_MAX 0x0f
#define INPUT_DEVICE_ID_SND_MAX 0x07
#define INPUT_DEVICE_ID_FF_MAX 0x7f
-#define INPUT_DEVICE_ID_SW_MAX 0x0f
+#define INPUT_DEVICE_ID_SW_MAX 0x10
#define INPUT_DEVICE_ID_MATCH_BUS 1
#define INPUT_DEVICE_ID_MATCH_VENDOR 2
diff --git a/include/linux/msm_audio_mvs.h b/include/linux/msm_audio_mvs.h
index 8ec9796..1807cb0 100644
--- a/include/linux/msm_audio_mvs.h
+++ b/include/linux/msm_audio_mvs.h
@@ -133,4 +133,12 @@
};
+#define Q5V2_MVS_MAX_VOC_PKT_SIZE 320
+
+struct q5v2_msm_audio_mvs_frame {
+ uint32_t frame_type;
+ uint32_t len;
+ uint8_t voc_pkt[Q5V2_MVS_MAX_VOC_PKT_SIZE];
+
+};
#endif /* __MSM_AUDIO_MVS_H */
diff --git a/include/linux/msm_charm.h b/include/linux/msm_charm.h
index 779fd38..c31e493 100644
--- a/include/linux/msm_charm.h
+++ b/include/linux/msm_charm.h
@@ -10,6 +10,7 @@
#define NORMAL_BOOT_DONE _IOW(CHARM_CODE, 5, int)
#define RAM_DUMP_DONE _IOW(CHARM_CODE, 6, int)
#define WAIT_FOR_RESTART _IOR(CHARM_CODE, 7, int)
+#define GET_DLOAD_STATUS _IOR(CHARM_CODE, 8, int)
enum charm_boot_type {
CHARM_NORMAL_BOOT = 0,
diff --git a/include/linux/smux.h b/include/linux/smux.h
new file mode 100644
index 0000000..308f969
--- /dev/null
+++ b/include/linux/smux.h
@@ -0,0 +1,297 @@
+/* include/linux/smux.h
+ *
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef SMUX_H
+#define SMUX_H
+
+/**
+ * Logical Channel IDs
+ *
+ * This must be identical between local and remote clients.
+ */
+enum {
+ /* Data Ports */
+ SMUX_DATA_0,
+ SMUX_DATA_1,
+ SMUX_DATA_2,
+ SMUX_DATA_3,
+ SMUX_DATA_4,
+ SMUX_DATA_5,
+ SMUX_DATA_6,
+ SMUX_DATA_7,
+ SMUX_DATA_8,
+ SMUX_DATA_9,
+ SMUX_USB_RMNET_DATA_0,
+ SMUX_USB_DUN_0,
+ SMUX_USB_DIAG_0,
+ SMUX_SYS_MONITOR_0,
+ SMUX_CSVT_0,
+ /* add new data ports here */
+
+ /* Control Ports */
+ SMUX_DATA_CTL_0 = 32,
+ SMUX_DATA_CTL_1,
+ SMUX_DATA_CTL_2,
+ SMUX_DATA_CTL_3,
+ SMUX_DATA_CTL_4,
+ SMUX_DATA_CTL_5,
+ SMUX_DATA_CTL_6,
+ SMUX_DATA_CTL_7,
+ SMUX_DATA_CTL_8,
+ SMUX_DATA_CTL_9,
+ SMUX_USB_RMNET_CTL_0,
+ SMUX_USB_DUN_CTL_0_UNUSED,
+ SMUX_USB_DIAG_CTL_0,
+ SMUX_SYS_MONITOR_CTL_0,
+ SMUX_CSVT_CTL_0,
+ /* add new control ports here */
+
+ SMUX_TEST_LCID,
+ SMUX_NUM_LOGICAL_CHANNELS,
+};
+
+/**
+ * Notification events that are passed to the notify() function.
+ *
+ * If the @metadata argument in the notifier is non-null, then it will
+ * point to the associated struct smux_meta_* structure.
+ */
+enum {
+ SMUX_CONNECTED, /* @metadata is null */
+ SMUX_DISCONNECTED,
+ SMUX_READ_DONE,
+ SMUX_READ_FAIL,
+ SMUX_WRITE_DONE,
+ SMUX_WRITE_FAIL,
+ SMUX_TIOCM_UPDATE,
+ SMUX_LOW_WM_HIT, /* @metadata is NULL */
+ SMUX_HIGH_WM_HIT, /* @metadata is NULL */
+};
+
+/**
+ * Channel options used to modify channel behavior.
+ */
+enum {
+ SMUX_CH_OPTION_LOCAL_LOOPBACK = 1 << 0,
+ SMUX_CH_OPTION_REMOTE_LOOPBACK = 1 << 1,
+ SMUX_CH_OPTION_REMOTE_TX_STOP = 1 << 2,
+};
+
+/**
+ * Metadata for SMUX_DISCONNECTED notification
+ *
+ * @is_ssr: Disconnect caused by subsystem restart
+ */
+struct smux_meta_disconnected {
+ int is_ssr;
+};
+
+/**
+ * Metadata for SMUX_READ_DONE/SMUX_READ_FAIL notification
+ *
+ * @pkt_priv: Packet-specific private data
+ * @buffer: Buffer pointer passed into msm_smux_write
+ * @len: Buffer length passed into msm_smux_write
+ */
+struct smux_meta_read {
+ void *pkt_priv;
+ void *buffer;
+ int len;
+};
+
+/**
+ * Metadata for SMUX_WRITE_DONE/SMUX_WRITE_FAIL notification
+ *
+ * @pkt_priv: Packet-specific private data
+ * @buffer: Buffer pointer returned by get_rx_buffer()
+ * @len: Buffer length returned by get_rx_buffer()
+ */
+struct smux_meta_write {
+ void *pkt_priv;
+ void *buffer;
+ int len;
+};
+
+/**
+ * Metadata for SMUX_TIOCM_UPDATE notification
+ *
+ * @tiocm_old: Previous TIOCM state
+ * @tiocm_new: Current TIOCM state
+ */
+struct smux_meta_tiocm {
+ uint32_t tiocm_old;
+ uint32_t tiocm_new;
+};
+
+
+#ifdef CONFIG_N_SMUX
+/**
+ * Starts the opening sequence for a logical channel.
+ *
+ * @lcid Logical channel ID
+ * @priv Free for client usage
+ * @notify Event notification function
+ * @get_rx_buffer Function used to provide a receive buffer to SMUX
+ *
+ * @returns 0 for success, <0 otherwise
+ *
+ * A channel must be fully closed (either not previously opened or
+ * msm_smux_close() has been called and the SMUX_DISCONNECTED has been
+ * recevied.
+ *
+ * One the remote side is opened, the client will receive a SMUX_CONNECTED
+ * event.
+ */
+int msm_smux_open(uint8_t lcid, void *priv,
+ void (*notify)(void *priv, int event_type, const void *metadata),
+ int (*get_rx_buffer)(void *priv, void **pkt_priv,
+ void **buffer, int size));
+
+/**
+ * Starts the closing sequence for a logical channel.
+ *
+ * @lcid Logical channel ID
+ * @returns 0 for success, <0 otherwise
+ *
+ * Once the close event has been acknowledge by the remote side, the client
+ * will receive a SMUX_DISCONNECTED notification.
+ */
+int msm_smux_close(uint8_t lcid);
+
+/**
+ * Write data to a logical channel.
+ *
+ * @lcid Logical channel ID
+ * @pkt_priv Client data that will be returned with the SMUX_WRITE_DONE or
+ * SMUX_WRITE_FAIL notification.
+ * @data Data to write
+ * @len Length of @data
+ *
+ * @returns 0 for success, <0 otherwise
+ *
+ * Data may be written immediately after msm_smux_open() is called, but
+ * the data will wait in the transmit queue until the channel has been
+ * fully opened.
+ *
+ * Once the data has been written, the client will receive either a completion
+ * (SMUX_WRITE_DONE) or a failure notice (SMUX_WRITE_FAIL).
+ */
+int msm_smux_write(uint8_t lcid, void *pkt_priv, const void *data, int len);
+
+/**
+ * Returns true if the TX queue is currently full (high water mark).
+ *
+ * @lcid Logical channel ID
+ *
+ * @returns 0 if channel is not full; 1 if it is full; < 0 for error
+ */
+int msm_smux_is_ch_full(uint8_t lcid);
+
+/**
+ * Returns true if the TX queue has space for more packets it is at or
+ * below the low water mark).
+ *
+ * @lcid Logical channel ID
+ *
+ * @returns 0 if channel is above low watermark
+ * 1 if it's at or below the low watermark
+ * < 0 for error
+ */
+int msm_smux_is_ch_low(uint8_t lcid);
+
+/**
+ * Get the TIOCM status bits.
+ *
+ * @lcid Logical channel ID
+ *
+ * @returns >= 0 TIOCM status bits
+ * < 0 Error condition
+ */
+long msm_smux_tiocm_get(uint8_t lcid);
+
+/**
+ * Set/clear the TIOCM status bits.
+ *
+ * @lcid Logical channel ID
+ * @set Bits to set
+ * @clear Bits to clear
+ *
+ * @returns 0 for success; < 0 for failure
+ *
+ * If a bit is specified in both the @set and @clear masks, then the clear bit
+ * definition will dominate and the bit will be cleared.
+ */
+int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear);
+
+/**
+ * Set or clear channel option using the SMUX_CH_OPTION_* channel
+ * flags.
+ *
+ * @lcid Logical channel ID
+ * @set Options to set
+ * @clear Options to clear
+ *
+ * @returns 0 for success, < 0 for failure
+ */
+int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear);
+
+#else
+static inline int msm_smux_open(uint8_t lcid, void *priv,
+ void (*notify)(void *priv, int event_type, const void *metadata),
+ int (*get_rx_buffer)(void *priv, void **pkt_priv,
+ void **buffer, int size))
+{
+ return -ENODEV;
+}
+
+static inline int msm_smux_close(uint8_t lcid)
+{
+ return -ENODEV;
+}
+
+static inline int msm_smux_write(uint8_t lcid, void *pkt_priv,
+ const void *data, int len)
+{
+ return -ENODEV;
+}
+
+static inline int msm_smux_is_ch_full(uint8_t lcid)
+{
+ return -ENODEV;
+}
+
+static inline int msm_smux_is_ch_low(uint8_t lcid)
+{
+ return -ENODEV;
+}
+
+static inline long msm_smux_tiocm_get(uint8_t lcid)
+{
+ return 0;
+}
+
+static inline int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear)
+{
+ return -ENODEV;
+}
+
+static inline int msm_smux_set_ch_option(uint8_t lcid, uint32_t set,
+ uint32_t clear)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_N_SMUX */
+
+#endif /* SMUX_H */
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 1ff6b62..818d189 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -52,6 +52,7 @@
#define N_TI_WL 22 /* for TI's WL BT, FM, GPS combo chips */
#define N_TRACESINK 23 /* Trace data routing for MIPI P1149.7 */
#define N_TRACEROUTER 24 /* Trace data routing for MIPI P1149.7 */
+#define N_SMUX 25 /* Serial MUX */
/*
* This character is the same as _POSIX_VDISABLE: it cannot be used as
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
index b53d9dd..c68457e 100644
--- a/include/linux/usb/msm_hsusb.h
+++ b/include/linux/usb/msm_hsusb.h
@@ -177,6 +177,9 @@
* @enable_dcd: Enable Data Contact Detection circuit. if not set
* wait for 600msec before proceeding to primary
* detection.
+ * @enable_lpm_on_suspend: Enable the USB core to go into Low
+ * Power Mode, when USB bus is suspended but cable
+ * is connected.
* @bus_scale_table: parameters for bus bandwidth requirements
*/
struct msm_otg_platform_data {
@@ -192,6 +195,7 @@
bool mhl_enable;
bool disable_reset_on_disconnect;
bool enable_dcd;
+ bool enable_lpm_on_dev_suspend;
struct msm_bus_scale_pdata *bus_scale_table;
};
@@ -293,6 +297,8 @@
#define B_BUS_REQ 16
unsigned long inputs;
struct work_struct sm_work;
+ bool sm_work_pending;
+ atomic_t pm_suspended;
atomic_t in_lpm;
int async_int;
unsigned cur_power;
@@ -320,6 +326,11 @@
* voltage regulator(VDDCX).
*/
#define ALLOW_PHY_RETENTION BIT(1)
+ /*
+ * Allow putting the core in Low Power mode, when
+ * USB bus is suspended but cable is connected.
+ */
+#define ALLOW_LPM_ON_DEV_SUSPEND BIT(2)
unsigned long lpm_flags;
#define PHY_PWR_COLLAPSED BIT(0)
#define PHY_RETENTIONED BIT(1)
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 147b068..eda60c0 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -656,6 +656,7 @@
/* Cache handling flags */
#define V4L2_BUF_FLAG_NO_CACHE_INVALIDATE 0x0800
#define V4L2_BUF_FLAG_NO_CACHE_CLEAN 0x1000
+#define V4L2_BUF_FLAG_EOS 0x2000
/*
* O V E R L A Y P R E V I E W
@@ -1458,6 +1459,7 @@
#define V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP (V4L2_CID_MPEG_BASE+403)
#define V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP (V4L2_CID_MPEG_BASE+404)
#define V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL (V4L2_CID_MPEG_BASE+405)
+
enum v4l2_mpeg_video_mpeg4_level {
V4L2_MPEG_VIDEO_MPEG4_LEVEL_0 = 0,
V4L2_MPEG_VIDEO_MPEG4_LEVEL_0B = 1,
@@ -1549,6 +1551,84 @@
#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_STATIC (V4L2_CID_MPEG_MFC51_BASE+53)
#define V4L2_CID_MPEG_MFC51_VIDEO_H264_NUM_REF_PIC_FOR_P (V4L2_CID_MPEG_MFC51_BASE+54)
+/* MPEG-class control IDs specific to the msm_vidc driver */
+#define V4L2_CID_MPEG_MSM_VIDC_BASE (V4L2_CTRL_CLASS_MPEG | 0x2000)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_ENABLE_PICTURE_TYPE \
+ (V4L2_CID_MPEG_MSM_VIDC_BASE+0)
+#define V4L2_CID_MPEG_VIDC_VIDEO_KEEP_ASPECT_RATIO \
+ (V4L2_CID_MPEG_MSM_VIDC_BASE+1)
+#define V4L2_CID_MPEG_VIDC_VIDEO_POST_LOOP_DEBLOCKER_MODE \
+ (V4L2_CID_MPEG_MSM_VIDC_BASE+2)
+#define V4L2_CID_MPEG_VIDC_VIDEO_DIVX_FORMAT \
+ (V4L2_CID_MPEG_MSM_VIDC_BASE+3)
+enum v4l2_mpeg_vidc_video_divx_format_type {
+ V4L2_MPEG_VIDC_VIDEO_DIVX_FORMAT_4 = 0,
+ V4L2_MPEG_VIDC_VIDEO_DIVX_FORMAT_5 = 1,
+ V4L2_MPEG_VIDC_VIDEO_DIVX_FORMAT_6 = 2,
+};
+#define V4L2_CID_MPEG_VIDC_VIDEO_MB_ERROR_MAP_REPORTING \
+ (V4L2_CID_MPEG_MSM_VIDC_BASE+4)
+#define V4L2_CID_MPEG_VIDC_VIDEO_CONTINUE_DATA_TRANSFER \
+ (V4L2_CID_MPEG_MSM_VIDC_BASE+5)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_STREAM_FORMAT (V4L2_CID_MPEG_MSM_VIDC_BASE+6)
+enum v4l2_mpeg_vidc_video_stream_format {
+ V4L2_MPEG_VIDC_VIDEO_NAL_FORMAT_STARTCODES = 0,
+ V4L2_MPEG_VIDC_VIDEO_NAL_FORMAT_ONE_NAL_PER_BUFFER = 1,
+ V4L2_MPEG_VIDC_VIDEO_NAL_FORMAT_ONE_BYTE_LENGTH = 2,
+ V4L2_MPEG_VIDC_VIDEO_NAL_FORMAT_TWO_BYTE_LENGTH = 3,
+ V4L2_MPEG_VIDC_VIDEO_NAL_FORMAT_FOUR_BYTE_LENGTH = 4,
+};
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_OUTPUT_ORDER (V4L2_CID_MPEG_MSM_VIDC_BASE+7)
+enum v4l2_mpeg_vidc_video_output_order {
+ V4L2_MPEG_VIDC_VIDEO_OUTPUT_ORDER_DISPLAY = 0,
+ V4L2_MPEG_VIDC_VIDEO_OUTPUT_ORDER_DECODE = 1,
+};
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_FRAME_RATE (V4L2_CID_MPEG_MSM_VIDC_BASE+8)
+#define V4L2_CID_MPEG_VIDC_VIDEO_IDR_PERIOD (V4L2_CID_MPEG_MSM_VIDC_BASE+9)
+#define V4L2_CID_MPEG_VIDC_VIDEO_NUM_P_FRAMES (V4L2_CID_MPEG_MSM_VIDC_BASE+10)
+#define V4L2_CID_MPEG_VIDC_VIDEO_NUM_B_FRAMES (V4L2_CID_MPEG_MSM_VIDC_BASE+11)
+#define V4L2_CID_MPEG_VIDC_VIDEO_REQUEST_IFRAME (V4L2_CID_MPEG_MSM_VIDC_BASE+12)
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL (V4L2_CID_MPEG_MSM_VIDC_BASE+13)
+enum v4l2_mpeg_vidc_video_rate_control {
+ V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_OFF = 0,
+ V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_VBR_VFR = 1,
+ V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_VBR_CFR = 2,
+ V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_CBR_VFR = 3,
+ V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_CBR_CFR = 4,
+};
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_ROTATION (V4L2_CID_MPEG_MSM_VIDC_BASE+14)
+enum v4l2_mpeg_vidc_video_rotation {
+ V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_NONE = 0,
+ V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_90 = 1,
+ V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_180 = 2,
+ V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_270 = 3,
+};
+#define MSM_VIDC_BASE V4L2_CID_MPEG_MSM_VIDC_BASE
+#define V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL (MSM_VIDC_BASE+15)
+enum v4l2_mpeg_vidc_h264_cabac_model {
+ V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL_0 = 0,
+ V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL_1 = 1,
+ V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL_2 = 2,
+};
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_MODE (MSM_VIDC_BASE+16)
+enum v4l2_mpeg_vidc_video_intra_refresh_mode {
+ V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_NONE = 0,
+ V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_CYCLIC = 1,
+ V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_ADAPTIVE = 2,
+ V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_CYCLIC_ADAPTIVE = 3,
+ V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_RANDOM = 4,
+};
+#define V4L2_CID_MPEG_VIDC_VIDEO_AIR_MBS (V4L2_CID_MPEG_MSM_VIDC_BASE+17)
+#define V4L2_CID_MPEG_VIDC_VIDEO_AIR_REF (V4L2_CID_MPEG_MSM_VIDC_BASE+18)
+#define V4L2_CID_MPEG_VIDC_VIDEO_CIR_MBS (V4L2_CID_MPEG_MSM_VIDC_BASE+19)
+
/* Camera class control IDs */
#define V4L2_CID_CAMERA_CLASS_BASE (V4L2_CTRL_CLASS_CAMERA | 0x900)
#define V4L2_CID_CAMERA_CLASS (V4L2_CTRL_CLASS_CAMERA | 1)
@@ -1780,6 +1860,54 @@
};
};
+/* Decoder commands */
+#define V4L2_DEC_CMD_START (0)
+#define V4L2_DEC_CMD_STOP (1)
+#define V4L2_DEC_CMD_PAUSE (2)
+#define V4L2_DEC_CMD_RESUME (3)
+
+/* Flags for V4L2_DEC_CMD_START */
+#define V4L2_DEC_CMD_START_MUTE_AUDIO (1 << 0)
+
+/* Flags for V4L2_DEC_CMD_PAUSE */
+#define V4L2_DEC_CMD_PAUSE_TO_BLACK (1 << 0)
+
+/* Flags for V4L2_DEC_CMD_STOP */
+#define V4L2_DEC_CMD_STOP_TO_BLACK (1 << 0)
+#define V4L2_DEC_CMD_STOP_IMMEDIATELY (1 << 1)
+
+/* Play format requirements (returned by the driver): */
+
+/* The decoder has no special format requirements */
+#define V4L2_DEC_START_FMT_NONE (0)
+/* The decoder requires full GOPs */
+#define V4L2_DEC_START_FMT_GOP (1)
+
+/* The structure must be zeroed before use by the application
+ This ensures it can be extended safely in the future. */
+struct v4l2_decoder_cmd {
+ __u32 cmd;
+ __u32 flags;
+ union {
+ struct {
+ __u64 pts;
+ } stop;
+
+ struct {
+ /* 0 or 1000 specifies normal speed,
+ 1 specifies forward single stepping,
+ -1 specifies backward single stepping,
+ >1: playback at speed/1000 of the normal speed,
+ <-1: reverse playback at (-speed/1000) of the normal speed. */
+ __s32 speed;
+ __u32 format;
+ } start;
+
+ struct {
+ __u32 data[16];
+ } raw;
+ };
+};
#endif
@@ -2146,6 +2274,15 @@
#define VIDIOC_CREATE_BUFS _IOWR('V', 92, struct v4l2_create_buffers)
#define VIDIOC_PREPARE_BUF _IOWR('V', 93, struct v4l2_buffer)
+/* Experimental selection API */
+#define VIDIOC_G_SELECTION _IOWR('V', 94, struct v4l2_selection)
+#define VIDIOC_S_SELECTION _IOWR('V', 95, struct v4l2_selection)
+
+/* Experimental, these two ioctls may change over the next couple of kernel
+ versions. */
+#define VIDIOC_DECODER_CMD _IOWR('V', 96, struct v4l2_decoder_cmd)
+#define VIDIOC_TRY_DECODER_CMD _IOWR('V', 97, struct v4l2_decoder_cmd)
+
/* Reminder: when adding new ioctls please add support for them to
drivers/media/video/v4l2-compat-ioctl32.c as well! */
diff --git a/include/linux/wcnss_wlan.h b/include/linux/wcnss_wlan.h
index 1c492f9..d7e65b0 100644
--- a/include/linux/wcnss_wlan.h
+++ b/include/linux/wcnss_wlan.h
@@ -35,8 +35,10 @@
const struct dev_pm_ops *pm_ops);
void wcnss_wlan_unregister_pm_ops(struct device *dev,
const struct dev_pm_ops *pm_ops);
-void wcnss_register_thermal_mitigation(void (*tm_notify)(int));
-void wcnss_unregister_thermal_mitigation(void (*tm_notify)(int));
+void wcnss_register_thermal_mitigation(struct device *dev,
+ void (*tm_notify)(struct device *dev, int));
+void wcnss_unregister_thermal_mitigation(
+ void (*tm_notify)(struct device *dev, int));
struct platform_device *wcnss_get_platform_device(void);
struct wcnss_wlan_config *wcnss_get_wlan_config(void);
int wcnss_wlan_power(struct device *dev,
diff --git a/include/media/Kbuild b/include/media/Kbuild
index 8dfb0fc..03951ce 100644
--- a/include/media/Kbuild
+++ b/include/media/Kbuild
@@ -4,4 +4,4 @@
header-y += vcap_fmt.h
header-y += msm_isp.h
header-y += msm_gemini.h
-header-y += msm_v4l2_overlay.h
+header-y += msm_gestures.h
diff --git a/include/media/msm/vcd_api.h b/include/media/msm/vcd_api.h
index 32a1759..c93b696 100644
--- a/include/media/msm/vcd_api.h
+++ b/include/media/msm/vcd_api.h
@@ -66,7 +66,7 @@
u32 alloc_len;
u32 data_len;
u32 offset;
- s64 time_stamp;
+ s64 time_stamp; /* in usecs*/
u32 flags;
u32 frm_clnt_data;
struct vcd_property_dec_output_buffer dec_op_prop;
diff --git a/include/media/msm/vcd_property.h b/include/media/msm/vcd_property.h
index e776d41..cd00800 100644
--- a/include/media/msm/vcd_property.h
+++ b/include/media/msm/vcd_property.h
@@ -53,6 +53,7 @@
#define VCD_I_ENABLE_SPS_PPS_FOR_IDR (VCD_START_BASE + 0x25)
#define VCD_REQ_PERF_LEVEL (VCD_START_BASE + 0x26)
#define VCD_I_SLICE_DELIVERY_MODE (VCD_START_BASE + 0x27)
+#define VCD_I_VOP_TIMING_CONSTANT_DELTA (VCD_START_BASE + 0x28)
#define VCD_START_REQ (VCD_START_BASE + 0x1000)
#define VCD_I_REQ_IFRAME (VCD_START_REQ + 0x1)
@@ -300,6 +301,10 @@
u32 vop_time_resolution;
};
+struct vcd_property_vop_timing_constant_delta {
+ u32 constant_delta; /*In usecs */
+};
+
struct vcd_property_short_header {
u32 short_header;
};
diff --git a/include/media/msm_camera.h b/include/media/msm_camera.h
index 3f647dc..d4cf1d2 100644
--- a/include/media/msm_camera.h
+++ b/include/media/msm_camera.h
@@ -1433,6 +1433,9 @@
#define MSM_CAM_V4L2_IOCTL_GET_EVENT_PAYLOAD \
_IOWR('V', BASE_VIDIOC_PRIVATE + 5, struct msm_camera_v4l2_ioctl_t *)
+#define MSM_CAM_IOCTL_SEND_EVENT \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 6, struct v4l2_event)
+
struct msm_camera_v4l2_ioctl_t {
void __user *ioctl_ptr;
};
diff --git a/include/media/msm_gestures.h b/include/media/msm_gestures.h
new file mode 100644
index 0000000..c9af034
--- /dev/null
+++ b/include/media/msm_gestures.h
@@ -0,0 +1,66 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __LINUX_MSM_GESTURES_H
+#define __LINUX_MSM_GESTURES_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <media/msm_camera.h>
+
+#define MSM_GES_IOCTL_CTRL_COMMAND \
+ _IOW('V', BASE_VIDIOC_PRIVATE + 20, struct v4l2_control)
+
+#define VIDIOC_MSM_GESTURE_EVT \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 21, struct v4l2_event)
+
+#define MSM_GES_GET_EVT_PAYLOAD \
+ _IOW('V', BASE_VIDIOC_PRIVATE + 22, struct msm_ges_evt)
+
+#define VIDIOC_MSM_GESTURE_CAM_EVT \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 23, int)
+
+#define MSM_GES_RESP_V4L2 MSM_CAM_RESP_MAX
+#define MSM_GES_RESP_MAX (MSM_GES_RESP_V4L2 + 1)
+
+#define MSM_SVR_RESP_MAX MSM_GES_RESP_MAX
+
+
+#define MSM_V4L2_GES_BASE 100
+#define MSM_V4L2_GES_OPEN (MSM_V4L2_GES_BASE + 0)
+#define MSM_V4L2_GES_CLOSE (MSM_V4L2_GES_BASE + 1)
+#define MSM_V4L2_GES_CAM_OPEN (MSM_V4L2_GES_BASE + 2)
+#define MSM_V4L2_GES_CAM_CLOSE (MSM_V4L2_GES_BASE + 3)
+
+#define MSM_GES_APP_EVT_MIN (V4L2_EVENT_PRIVATE_START + 0x14)
+#define MSM_GES_APP_NOTIFY_EVENT (MSM_GES_APP_EVT_MIN + 0)
+#define MSM_GES_APP_NOTIFY_ERROR_EVENT (MSM_GES_APP_EVT_MIN + 1)
+#define MSM_GES_APP_EVT_MAX (MSM_GES_APP_EVT_MIN + 2)
+
+#define MSM_GESTURE_CID_CTRL_CMD V4L2_CID_BRIGHTNESS
+
+#define MAX_GES_EVENTS 25
+
+struct msm_ges_ctrl_cmd {
+ int type;
+ void *value;
+ int len;
+ int fd;
+ uint32_t cookie;
+};
+
+struct msm_ges_evt {
+ void *evt_data;
+ int evt_len;
+};
+
+#endif /*__LINUX_MSM_GESTURES_H*/
diff --git a/include/media/msm_vidc.h b/include/media/msm_vidc.h
new file mode 100644
index 0000000..baa6a28
--- /dev/null
+++ b/include/media/msm_vidc.h
@@ -0,0 +1,49 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MSM_VIDC_H_
+#define _MSM_VIDC_H_
+
+#include <linux/videodev2.h>
+#include <linux/poll.h>
+
+enum core_id {
+ MSM_VIDC_CORE_0 = 0,
+ MSM_VIDC_CORES_MAX,
+};
+
+enum session_type {
+ MSM_VIDC_ENCODER = 0,
+ MSM_VIDC_DECODER,
+ MSM_VIDC_MAX_DEVICES,
+};
+
+int msm_vidc_open(void *vidc_inst, int core_id, int session_type);
+int msm_vidc_close(void *instance);
+int msm_vidc_querycap(void *instance, struct v4l2_capability *cap);
+int msm_vidc_enum_fmt(void *instance, struct v4l2_fmtdesc *f);
+int msm_vidc_s_fmt(void *instance, struct v4l2_format *f);
+int msm_vidc_g_fmt(void *instance, struct v4l2_format *f);
+int msm_vidc_s_ctrl(void *instance, struct v4l2_control *a);
+int msm_vidc_g_ctrl(void *instance, struct v4l2_control *a);
+int msm_vidc_reqbufs(void *instance, struct v4l2_requestbuffers *b);
+int msm_vidc_prepare_buf(void *instance, struct v4l2_buffer *b);
+int msm_vidc_release_buf(void *instance, struct v4l2_buffer *b);
+int msm_vidc_qbuf(void *instance, struct v4l2_buffer *b);
+int msm_vidc_dqbuf(void *instance, struct v4l2_buffer *b);
+int msm_vidc_streamon(void *instance, enum v4l2_buf_type i);
+int msm_vidc_streamoff(void *instance, enum v4l2_buf_type i);
+int msm_vidc_decoder_cmd(void *instance, struct v4l2_decoder_cmd *dec);
+int msm_vidc_poll(void *instance, struct file *filp,
+ struct poll_table_struct *pt);
+#endif
diff --git a/include/media/user-rc-input.h b/include/media/user-rc-input.h
new file mode 100644
index 0000000..e58e40f
--- /dev/null
+++ b/include/media/user-rc-input.h
@@ -0,0 +1,21 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __USER_RC_INPUT_H__
+#define __USER_RC_INPUT_H__
+
+#define USER_CONTROL_PRESSED 0x01
+#define USER_CONTROL_REPEATED 0x02
+#define USER_CONTROL_RELEASED 0x03
+
+#endif /* __USER_RC_INPUT_H__ */
+
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index 4d1c74a..46c13ba 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -207,6 +207,10 @@
struct v4l2_encoder_cmd *a);
int (*vidioc_try_encoder_cmd) (struct file *file, void *fh,
struct v4l2_encoder_cmd *a);
+ int (*vidioc_decoder_cmd) (struct file *file, void *fh,
+ struct v4l2_decoder_cmd *a);
+ int (*vidioc_try_decoder_cmd) (struct file *file, void *fh,
+ struct v4l2_decoder_cmd *a);
/* Stream type-dependent parameter ioctls */
int (*vidioc_g_parm) (struct file *file, void *fh,
diff --git a/include/media/vcap_fmt.h b/include/media/vcap_fmt.h
index 4a62bc3..51b45ac 100644
--- a/include/media/vcap_fmt.h
+++ b/include/media/vcap_fmt.h
@@ -14,8 +14,8 @@
#ifndef VCAP_FMT_H
#define VCAP_FMT_H
-#define V4L2_BUF_TYPE_INTERLACED_IN_AFE (V4L2_BUF_TYPE_PRIVATE)
-#define V4L2_BUF_TYPE_INTERLACED_IN_DECODER (V4L2_BUF_TYPE_PRIVATE + 1)
+#define V4L2_BUF_TYPE_INTERLACED_IN_DECODER (V4L2_BUF_TYPE_PRIVATE)
+#define V4L2_BUF_TYPE_VP_OUT (V4L2_BUF_TYPE_PRIVATE + 1)
enum hal_vcap_mode {
HAL_VCAP_MODE_PRO = 0,
@@ -32,87 +32,7 @@
HAL_VCAP_RGB,
};
-enum hal_vcap_vc_fmt {
- /* 1080p */
- HAL_VCAP_YUV_1080p_60_RH = 0,
- HAL_VCAP_YUV_1080p_60_FL,
- HAL_VCAP_RGB_1080p_60_FL,
- HAL_VCAP_YUV_1080p_24_FL,
- HAL_VCAP_YUV_1080p_24_RH,
- HAL_VCAP_YUV_1080p_24_RW,
- HAL_VCAP_YUV_1080p_60_RW,
- HAL_VCAP_YUV_1080p_50_FL,
- HAL_VCAP_YUV_1080p_50_RH,
- HAL_VCAP_YUV_1080p_25_FL,
- HAL_VCAP_YUV_1080p_25_RH,
- HAL_VCAP_YUV_1080p_30_RH,
- HAL_VCAP_RGB_1080p_25_FL,
- HAL_VCAP_RGB_1080p_25_RH,
- /* 1080i */
- HAL_VCAP_YUV_1080i_60_FL,
- HAL_VCAP_YUV_1080i_60_RH,
- HAL_VCAP_YUV_1080i_60_RW,
- HAL_VCAP_YUV_1080i_50_FL,
- HAL_VCAP_YUV_1080i_50_RH,
- HAL_VCAP_YUV_1080i_50_RW,
- HAL_VCAP_RGB_1080i_50_FL,
- HAL_VCAP_RGB_1080i_50_RH,
- /* 480i */
- HAL_VCAP_YUV_480i_60_RH,
- HAL_VCAP_YUV_480i_60_FL,
- HAL_VCAP_YUV_480i_60_RW,
- HAL_VCAP_YUV_2880_480i_60_FL,
- HAL_VCAP_YUV_2880_480i_60_RH,
- /* 480p */
- HAL_VCAP_YUV_480p_60_RH,
- HAL_VCAP_RGB_480p_60_RH,
- HAL_VCAP_RGB_480p_60_FL,
- HAL_VCAP_YUV_480p_60_FL,
- HAL_VCAP_YUV_480p_60_RW,
- HAL_VCAP_YUV_2880_480p_60_FL,
- HAL_VCAP_YUV_2880_480p_60_RH,
- /* 720p */
- HAL_VCAP_YUV_720p_60_FL,
- HAL_VCAP_RGB_720p_60_FL,
- HAL_VCAP_YUV_720p_60_RW,
- HAL_VCAP_YUV_720p_60_RH,
- HAL_VCAP_YUV_720p_50_FL,
- HAL_VCAP_YUV_720p_50_RW,
- HAL_VCAP_YUV_720p_50_RH,
- /* 576p */
- HAL_VCAP_YUV_576p_50_FL,
- HAL_VCAP_RGB_576p_50_FL,
- HAL_VCAP_YUV_576p_50_RW,
- HAL_VCAP_YUV_576p_50_RH,
- HAL_VCAP_YUV_1440_576p_50_RH,
- HAL_VCAP_YUV_2880_576p_50_FL,
- HAL_VCAP_YUV_2880_576p_50_RH,
- /* 576i */
- HAL_VCAP_YUV_576i_50_FL,
- HAL_VCAP_YUV_576i_50_RW,
- HAL_VCAP_YUV_576i_50_RH,
- /* XGA 1024x768 */
- HAL_VCAP_YUV_XGA_FL,
- HAL_VCAP_YUV_XGA_RH,
- HAL_VCAP_YUV_XGA_RB,
- /* SXGA 1280x1024 */
- HAL_VCAP_YUV_SXGA_FL,
- HAL_VCAP_RGB_SXGA_FL,
- HAL_VCAP_YUV_SXGA_RH,
- HAL_VCAP_YUV_SXGA_RB,
- /* UXGA 1600x1200 */
- HAL_VCAP_YUV_UXGA_FL,
- HAL_VCAP_RGB_UXGA_FL,
- HAL_VCAP_YUV_UXGA_RH,
- HAL_VCAP_YUV_UXGA_RB,
- /* test odd height */
- HAL_VCAP_ODD_HEIGHT,
- /* test odd width RGB only */
- HAL_VCAP_ODD_WIDTH,
-};
-
struct v4l2_format_vc_ext {
- enum hal_vcap_vc_fmt format;
enum hal_vcap_mode mode;
enum hal_vcap_polar h_polar;
enum hal_vcap_polar v_polar;
@@ -136,5 +56,22 @@
uint32_t f2_vsync_h_end;
uint32_t f2_vsync_v_start;
uint32_t f2_vsync_v_end;
+ uint32_t sizeimage;
+ uint32_t bytesperline;
+};
+
+enum vcap_type {
+ VC_TYPE,
+ VP_IN_TYPE,
+ VP_OUT_TYPE,
+};
+
+struct vcap_priv_fmt {
+ enum vcap_type type;
+ union {
+ struct v4l2_format_vc_ext timing;
+ struct v4l2_pix_format pix;
+ /* Once VP is created there will be another type in here */
+ } u;
};
#endif
diff --git a/include/media/vcap_v4l2.h b/include/media/vcap_v4l2.h
index 57f9703..374e681 100644
--- a/include/media/vcap_v4l2.h
+++ b/include/media/vcap_v4l2.h
@@ -14,6 +14,7 @@
#ifndef VCAP_V4L2_H
#define VCAP_V4L2_H
+#define TOP_FIELD_FIX
#ifdef __KERNEL__
#include <linux/types.h>
#include <linux/videodev2.h>
@@ -28,6 +29,12 @@
#include <media/vcap_fmt.h>
#include <mach/board.h>
+#define writel_iowmb(val, addr) \
+ do { \
+ __iowmb(); \
+ writel_relaxed(val, addr); \
+ } while (0)
+
struct vcap_client_data;
enum rdy_buf {
@@ -37,11 +44,34 @@
VC_BUF1N2 = 0x11 << 1,
};
+enum vp_state {
+ VP_UNKNOWN = 0,
+ VP_FRAME1,
+ VP_FRAME2,
+ VP_FRAME3,
+ VP_NORMAL,
+};
+
+enum nr_buf_pos {
+ BUF_NOT_IN_USE = 0,
+ NRT2_BUF,
+ T1_BUF,
+ T0_BUF,
+ TM1_BUF,
+};
+
struct vcap_buf_info {
unsigned long vaddr;
unsigned long size;
};
+enum vcap_op_mode {
+ UNKNOWN_VCAP_OP = 0,
+ VC_VCAP_OP,
+ VP_VCAP_OP,
+ VC_AND_VP_VCAP_OP,
+};
+
struct vcap_action {
struct list_head active;
@@ -61,13 +91,50 @@
int ini_jiffies;
};
+struct nr_buffer {
+ void *vaddr;
+ unsigned long paddr;
+ enum nr_buf_pos nr_pos;
+};
+
+struct vp_action {
+ struct list_head in_active;
+ struct list_head out_active;
+
+ /* Buffer index */
+ enum vp_state vp_state;
+#ifdef TOP_FIELD_FIX
+ bool top_field;
+#endif
+
+ /* Buffers inside vc */
+ struct vcap_buffer *bufTm1;
+ struct vcap_buffer *bufT0;
+ struct vcap_buffer *bufT1;
+ struct vcap_buffer *bufT2;
+ struct vcap_buffer *bufNRT2;
+
+ struct vcap_buffer *bufOut;
+
+ void *bufMotion;
+ struct nr_buffer bufNR;
+ bool nr_enabled;
+};
+
+struct vp_work_t {
+ struct work_struct work;
+ struct vcap_client_data *cd;
+ uint32_t irq;
+};
+
struct vcap_dev {
struct v4l2_device v4l2_dev;
struct video_device *vfd;
struct ion_client *ion_client;
- struct resource *vcapirq;
+ struct resource *vcirq;
+ struct resource *vpirq;
struct resource *vcapmem;
struct resource *vcapio;
@@ -87,15 +154,20 @@
struct vcap_client_data *vp_client;
atomic_t vc_enabled;
+ atomic_t vp_enabled;
+
atomic_t vc_resource;
atomic_t vp_resource;
+
+ struct workqueue_struct *vcap_wq;
+ struct vp_work_t vp_work;
+ struct vp_work_t vc_to_vp_work;
+ struct vp_work_t vp_to_vc_work;
};
struct vp_format_data {
unsigned int width, height;
- unsigned int pixelformat;
- enum v4l2_field field;
-
+ unsigned int pixfmt;
};
struct vcap_buffer {
@@ -107,18 +179,23 @@
};
struct vcap_client_data {
+ bool set_cap, set_decode, set_vp_o;
struct vcap_dev *dev;
struct vb2_queue vc_vidq;
- /*struct vb2_queue vb__vidq;*/
- /*struct vb2_queue vb_cap_vidq;*/
+ struct vb2_queue vp_in_vidq;
+ struct vb2_queue vp_out_vidq;
+
+ enum vcap_op_mode op_mode;
struct v4l2_format_vc_ext vc_format;
enum v4l2_buf_type vp_buf_type_field;
- struct vp_format_data vp_format;
+ struct vp_format_data vp_in_fmt;
+ struct vp_format_data vp_out_fmt;
struct vcap_action vid_vc_action;
+ struct vp_action vid_vp_action;
struct workqueue_struct *vcap_work_q;
struct ion_handle *vc_ion_handle;
@@ -126,7 +203,20 @@
uint32_t hold_vp;
spinlock_t cap_slock;
+ bool streaming;
};
+struct vcap_hacked_vals {
+ uint32_t value;
+ uint32_t offset;
+};
+
+extern struct vcap_hacked_vals hacked_buf[];
+
#endif
+int free_ion_handle(struct vcap_dev *dev, struct vb2_queue *q,
+ struct v4l2_buffer *b);
+
+int get_phys_addr(struct vcap_dev *dev, struct vb2_queue *q,
+ struct v4l2_buffer *b);
#endif
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 5749293..47b856c 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -26,7 +26,7 @@
#define __HCI_CORE_H
#include <net/bluetooth/hci.h>
-
+#include <linux/wakelock.h>
/* HCI upper protocols */
#define HCI_PROTO_L2CAP 0
#define HCI_PROTO_SCO 1
@@ -326,7 +326,7 @@
struct work_struct work_add;
struct work_struct work_del;
-
+ struct wake_lock idle_lock;
struct device dev;
atomic_t devref;
diff --git a/include/sound/jack.h b/include/sound/jack.h
index ccdc341..1b13cbb 100644
--- a/include/sound/jack.h
+++ b/include/sound/jack.h
@@ -35,24 +35,26 @@
* sound/core/jack.c.
*/
enum snd_jack_types {
- SND_JACK_HEADPHONE = 0x0001,
- SND_JACK_MICROPHONE = 0x0002,
+ SND_JACK_HEADPHONE = 0x0000001,
+ SND_JACK_MICROPHONE = 0x0000002,
SND_JACK_HEADSET = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE,
- SND_JACK_LINEOUT = 0x0004,
- SND_JACK_MECHANICAL = 0x0008, /* If detected separately */
- SND_JACK_VIDEOOUT = 0x0010,
+ SND_JACK_LINEOUT = 0x0000004,
+ SND_JACK_MECHANICAL = 0x0000008, /* If detected separately */
+ SND_JACK_VIDEOOUT = 0x0000010,
SND_JACK_AVOUT = SND_JACK_LINEOUT | SND_JACK_VIDEOOUT,
- SND_JACK_OC_HPHL = 0x0020,
- SND_JACK_OC_HPHR = 0x0040,
+ /* */
+ SND_JACK_OC_HPHL = 0x0000020,
+ SND_JACK_OC_HPHR = 0x0000040,
+ SND_JACK_UNSUPPORTED = 0x0000080,
/* Kept separate from switches to facilitate implementation */
- SND_JACK_BTN_0 = 0x4000,
- SND_JACK_BTN_1 = 0x2000,
- SND_JACK_BTN_2 = 0x1000,
- SND_JACK_BTN_3 = 0x0800,
- SND_JACK_BTN_4 = 0x0400,
- SND_JACK_BTN_5 = 0x0200,
- SND_JACK_BTN_6 = 0x0100,
- SND_JACK_BTN_7 = 0x0080,
+ SND_JACK_BTN_0 = 0x4000000,
+ SND_JACK_BTN_1 = 0x2000000,
+ SND_JACK_BTN_2 = 0x1000000,
+ SND_JACK_BTN_3 = 0x0800000,
+ SND_JACK_BTN_4 = 0x0400000,
+ SND_JACK_BTN_5 = 0x0200000,
+ SND_JACK_BTN_6 = 0x0100000,
+ SND_JACK_BTN_7 = 0x0080000,
};
struct snd_jack {
diff --git a/include/sound/msm-dai-q6.h b/include/sound/msm-dai-q6.h
index 6328256..042aa6f 100644
--- a/include/sound/msm-dai-q6.h
+++ b/include/sound/msm-dai-q6.h
@@ -21,8 +21,7 @@
#define MSM_MI2S_CAP_RX 0
#define MSM_MI2S_CAP_TX 1
-struct msm_dai_auxpcm_pdata {
- const char *clk;
+struct msm_dai_auxpcm_config {
u16 mode;
u16 sync;
u16 frame;
@@ -36,4 +35,11 @@
u16 rx_sd_lines;
u16 tx_sd_lines;
};
+
+struct msm_dai_auxpcm_pdata {
+ const char *clk;
+ struct msm_dai_auxpcm_config mode_8k;
+ struct msm_dai_auxpcm_config mode_16k;
+};
+
#endif
diff --git a/include/sound/q6asm.h b/include/sound/q6asm.h
index 968d46e..54a9187 100644
--- a/include/sound/q6asm.h
+++ b/include/sound/q6asm.h
@@ -13,7 +13,6 @@
#define __Q6_ASM_H__
#include <mach/qdsp6v2/apr.h>
-#include <mach/msm_subsystem_map.h>
#include <sound/apr_audio.h>
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
#include <linux/ion.h>
@@ -110,7 +109,7 @@
struct ion_handle *handle;
struct ion_client *client;
#else
- struct msm_mapped_buffer *mem_buffer;
+ void *mem_buffer;
#endif
};
diff --git a/include/sound/snd_compress_params.h b/include/sound/snd_compress_params.h
index e9f6748..f1e3ea2 100644
--- a/include/sound/snd_compress_params.h
+++ b/include/sound/snd_compress_params.h
@@ -72,6 +72,7 @@
#define SND_AUDIOCODEC_AC3 ((__u32) 0x0000000E)
#define SND_AUDIOCODEC_DTS ((__u32) 0x0000000F)
#define SND_AUDIOCODEC_AC3_PASS_THROUGH ((__u32) 0x00000010)
+#define SND_AUDIOCODEC_WMA_PRO ((__u32) 0x000000011)
/*
* Profile and modes are listed with bit masks. This allows for a
* more compact representation of fields that will not evolve
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 6957aa2..fa1d639 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -1792,6 +1792,8 @@
rwbs[i++] = 'W';
else if (rw & REQ_DISCARD)
rwbs[i++] = 'D';
+ else if (rw & REQ_SANITIZE)
+ rwbs[i++] = 'Z';
else if (bytes)
rwbs[i++] = 'R';
else
diff --git a/mm/memblock.c b/mm/memblock.c
index 5338237..b7abce5 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -720,6 +720,12 @@
memblock.memory.regions[idx].size) >= end;
}
+int __init_memblock memblock_overlaps_memory(phys_addr_t base, phys_addr_t size)
+{
+ memblock_cap_size(base, &size);
+ return memblock_overlaps_region(&memblock.memory, base, size) >= 0;
+}
+
int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
{
memblock_cap_size(base, &size);
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 25b559b1..02ea082 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -448,6 +448,7 @@
conn->power_save = 1;
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+ wake_lock_init(&conn->idle_lock, WAKE_LOCK_SUSPEND, "bt_idle");
switch (type) {
case ACL_LINK:
@@ -521,6 +522,7 @@
/* Make sure no timers are running */
del_timer(&conn->idle_timer);
+ wake_lock_destroy(&conn->idle_lock);
del_timer(&conn->disc_timer);
del_timer(&conn->smp_timer);
__cancel_delayed_work(&conn->rssi_update_work);
@@ -962,9 +964,11 @@
}
timer:
- if (hdev->idle_timeout > 0)
+ if (hdev->idle_timeout > 0) {
mod_timer(&conn->idle_timer,
jiffies + msecs_to_jiffies(hdev->idle_timeout));
+ wake_lock(&conn->idle_lock);
+ }
}
static inline void hci_conn_stop_rssi_timer(struct hci_conn *conn)
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 395a95c..9e545f3 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -2556,6 +2556,9 @@
else
conn->power_save = 0;
}
+ if (conn->mode == HCI_CM_SNIFF)
+ if (wake_lock_active(&conn->idle_lock))
+ wake_unlock(&conn->idle_lock);
if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
hci_sco_setup(conn, ev->status);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index a7b95d3..3ecc6d4 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -5397,9 +5397,6 @@
BT_DBG("sk %p", sk);
- if (!sk)
- return;
-
lock_sock(sk);
if (sk->sk_state != BT_CONNECTED && !l2cap_pi(sk)->amp_id) {
@@ -5539,8 +5536,11 @@
container_of(work, struct l2cap_logical_link_work, work);
struct sock *sk = log_link_work->chan->l2cap_sk;
- l2cap_logical_link_complete(log_link_work->chan, log_link_work->status);
- sock_put(sk);
+ if (sk) {
+ l2cap_logical_link_complete(log_link_work->chan,
+ log_link_work->status);
+ sock_put(sk);
+ }
hci_chan_put(log_link_work->chan);
kfree(log_link_work);
}
diff --git a/sound/core/jack.c b/sound/core/jack.c
index 80453a9..675f45b 100644
--- a/sound/core/jack.c
+++ b/sound/core/jack.c
@@ -32,6 +32,7 @@
SW_VIDEOOUT_INSERT,
SW_HPHL_OVERCURRENT,
SW_HPHR_OVERCURRENT,
+ SW_UNSUPPORT_INSERT,
};
static int snd_jack_dev_free(struct snd_device *device)
diff --git a/sound/soc/codecs/wcd9304.c b/sound/soc/codecs/wcd9304.c
index 9041bd7..ff83197 100644
--- a/sound/soc/codecs/wcd9304.c
+++ b/sound/soc/codecs/wcd9304.c
@@ -1775,6 +1775,9 @@
{"HEADPHONE", NULL, "HPHL"},
{"HEADPHONE", NULL, "HPHR"},
+ {"HPHL DAC", NULL, "CP"},
+ {"HPHR DAC", NULL, "CP"},
+
{"HPHL", NULL, "HPHL DAC"},
{"HPHL DAC", "NULL", "DAC4 MUX"},
{"HPHR", NULL, "HPHR DAC"},
@@ -1950,9 +1953,11 @@
static void sitar_codec_enable_audio_mode_bandgap(struct snd_soc_codec *codec)
{
+ struct wcd9xxx *sitar_core = dev_get_drvdata(codec->dev->parent);
- snd_soc_update_bits(codec, SITAR_A_LDO_H_MODE_1, 0x0C, 0x04);
- snd_soc_update_bits(codec, SITAR_A_LDO_H_MODE_1, 0x80, 0x80);
+ if (SITAR_IS_1P0(sitar_core->version))
+ snd_soc_update_bits(codec, SITAR_A_LDO_H_MODE_1, 0x80, 0x80);
+
snd_soc_update_bits(codec, SITAR_A_BIAS_CURR_CTL_2, 0x0C, 0x08);
usleep_range(1000, 1000);
snd_soc_write(codec, SITAR_A_BIAS_REF_CTL, 0x1C);
@@ -1971,6 +1976,7 @@
enum sitar_bandgap_type choice)
{
struct sitar_priv *sitar = snd_soc_codec_get_drvdata(codec);
+ struct wcd9xxx *sitar_core = dev_get_drvdata(codec->dev->parent);
/* TODO lock resources accessed by audio streams and threaded
* interrupt handlers
@@ -2005,7 +2011,9 @@
} else if (choice == SITAR_BANDGAP_OFF) {
snd_soc_update_bits(codec, SITAR_A_BIAS_CURR_CTL_2, 0x0C, 0x00);
snd_soc_write(codec, SITAR_A_BIAS_CENTRAL_BG_CTL, 0x50);
- snd_soc_update_bits(codec, SITAR_A_LDO_H_MODE_1, 0xFF, 0x65);
+ if (SITAR_IS_1P0(sitar_core->version))
+ snd_soc_update_bits(codec, SITAR_A_LDO_H_MODE_1,
+ 0xFF, 0x65);
usleep_range(1000, 1000);
} else {
pr_err("%s: Error, Invalid bandgap settings\n", __func__);
@@ -4327,6 +4335,8 @@
}
/* Set voltage level and always use LDO */
+ snd_soc_update_bits(codec, SITAR_A_LDO_H_MODE_1, 0x0C,
+ (pdata->micbias.ldoh_v << 2));
snd_soc_update_bits(codec, SITAR_A_MICB_CFILT_1_VAL, 0xFC,
(k1 << 2));
diff --git a/sound/soc/codecs/wcd9310.c b/sound/soc/codecs/wcd9310.c
index 2d5eab2..443114c 100644
--- a/sound/soc/codecs/wcd9310.c
+++ b/sound/soc/codecs/wcd9310.c
@@ -56,7 +56,8 @@
#define NUM_ATTEMPTS_INSERT_DETECT 25
#define NUM_ATTEMPTS_TO_REPORT 5
-#define TABLA_JACK_MASK (SND_JACK_HEADSET | SND_JACK_OC_HPHL | SND_JACK_OC_HPHR)
+#define TABLA_JACK_MASK (SND_JACK_HEADSET | SND_JACK_OC_HPHL | \
+ SND_JACK_OC_HPHR | SND_JACK_UNSUPPORTED)
#define TABLA_I2S_MASTER_MODE_MASK 0x08
@@ -102,6 +103,8 @@
#define TABLA_GPIO_IRQ_DEBOUNCE_TIME_US 5000
+#define TABLA_MBHC_GND_MIC_SWAP_THRESHOLD 2
+
#define TABLA_ACQUIRE_LOCK(x) do { mutex_lock(&x); } while (0)
#define TABLA_RELEASE_LOCK(x) do { mutex_unlock(&x); } while (0)
@@ -214,6 +217,7 @@
PLUG_TYPE_HEADSET,
PLUG_TYPE_HEADPHONE,
PLUG_TYPE_HIGH_HPH,
+ PLUG_TYPE_GND_MIC_SWAP,
};
enum tabla_mbhc_state {
@@ -1741,6 +1745,10 @@
(choice == TABLA_BANDGAP_AUDIO_MODE)) {
tabla_codec_enable_audio_mode_bandgap(codec);
} else if (choice == TABLA_BANDGAP_MBHC_MODE) {
+ /* bandgap mode becomes fast,
+ * mclk should be off or clk buff source souldn't be VBG
+ * Let's turn off mclk always */
+ WARN_ON(snd_soc_read(codec, TABLA_A_CLK_BUFF_EN2) & (1 << 2));
snd_soc_update_bits(codec, TABLA_A_BIAS_CENTRAL_BG_CTL, 0x2,
0x2);
snd_soc_update_bits(codec, TABLA_A_BIAS_CENTRAL_BG_CTL, 0x80,
@@ -1770,9 +1778,10 @@
struct tabla_priv *tabla = snd_soc_codec_get_drvdata(codec);
pr_debug("%s\n", __func__);
snd_soc_update_bits(codec, TABLA_A_CLK_BUFF_EN2, 0x04, 0x00);
- ndelay(160);
+ usleep_range(50, 50);
snd_soc_update_bits(codec, TABLA_A_CLK_BUFF_EN2, 0x02, 0x02);
snd_soc_update_bits(codec, TABLA_A_CLK_BUFF_EN1, 0x05, 0x00);
+ usleep_range(50, 50);
tabla->clock_active = false;
}
@@ -1813,21 +1822,23 @@
pr_debug("%s: enable = %d\n", __func__, enable);
if (enable) {
snd_soc_update_bits(codec, TABLA_A_CONFIG_MODE_FREQ, 0x10, 0);
+ /* bandgap mode to fast */
snd_soc_write(codec, TABLA_A_BIAS_CONFIG_MODE_BG_CTL, 0x17);
usleep_range(5, 5);
snd_soc_update_bits(codec, TABLA_A_CONFIG_MODE_FREQ, 0x80,
- 0x80);
+ 0x80);
snd_soc_update_bits(codec, TABLA_A_CONFIG_MODE_TEST, 0x80,
- 0x80);
+ 0x80);
usleep_range(10, 10);
snd_soc_update_bits(codec, TABLA_A_CONFIG_MODE_TEST, 0x80, 0);
- usleep_range(20, 20);
+ usleep_range(10000, 10000);
snd_soc_update_bits(codec, TABLA_A_CLK_BUFF_EN1, 0x08, 0x08);
} else {
snd_soc_update_bits(codec, TABLA_A_BIAS_CONFIG_MODE_BG_CTL, 0x1,
- 0);
+ 0);
snd_soc_update_bits(codec, TABLA_A_CONFIG_MODE_FREQ, 0x80, 0);
- snd_soc_update_bits(codec, TABLA_A_CLK_BUFF_EN1, 0x08, 0x00);
+ /* clk source to ext clk and clk buff ref to VBG */
+ snd_soc_update_bits(codec, TABLA_A_CLK_BUFF_EN1, 0x0C, 0x04);
}
tabla->config_mode_active = enable ? true : false;
@@ -1835,29 +1846,32 @@
}
static int tabla_codec_enable_clock_block(struct snd_soc_codec *codec,
- int config_mode)
+ int config_mode)
{
struct tabla_priv *tabla = snd_soc_codec_get_drvdata(codec);
pr_debug("%s: config_mode = %d\n", __func__, config_mode);
+ /* transit to RCO requires mclk off */
+ WARN_ON(snd_soc_read(codec, TABLA_A_CLK_BUFF_EN2) & (1 << 2));
if (config_mode) {
+ /* enable RCO and switch to it */
tabla_codec_enable_config_mode(codec, 1);
- snd_soc_write(codec, TABLA_A_CLK_BUFF_EN2, 0x00);
snd_soc_write(codec, TABLA_A_CLK_BUFF_EN2, 0x02);
- snd_soc_write(codec, TABLA_A_CLK_BUFF_EN1, 0x0D);
usleep_range(1000, 1000);
- } else
+ } else {
+ /* switch to MCLK */
snd_soc_update_bits(codec, TABLA_A_CLK_BUFF_EN1, 0x08, 0x00);
- if (!config_mode && tabla->mbhc_polling_active) {
- snd_soc_write(codec, TABLA_A_CLK_BUFF_EN2, 0x02);
- tabla_codec_enable_config_mode(codec, 0);
-
+ if (tabla->mbhc_polling_active) {
+ snd_soc_write(codec, TABLA_A_CLK_BUFF_EN2, 0x02);
+ tabla_codec_enable_config_mode(codec, 0);
+ }
}
- snd_soc_update_bits(codec, TABLA_A_CLK_BUFF_EN1, 0x05, 0x05);
+ snd_soc_update_bits(codec, TABLA_A_CLK_BUFF_EN1, 0x01, 0x01);
snd_soc_update_bits(codec, TABLA_A_CLK_BUFF_EN2, 0x02, 0x00);
+ /* on MCLK */
snd_soc_update_bits(codec, TABLA_A_CLK_BUFF_EN2, 0x04, 0x04);
snd_soc_update_bits(codec, TABLA_A_CDC_CLK_MCLK_CTL, 0x01, 0x01);
usleep_range(50, 50);
@@ -2447,7 +2461,6 @@
tabla->mbhc_micbias_switched = true;
pr_debug("%s: VDDIO switch enabled\n", __func__);
-
} else if (!vddio_switch && tabla->mbhc_micbias_switched) {
if ((!checkpolling || tabla->mbhc_polling_active) &&
restartpolling)
@@ -3536,6 +3549,10 @@
if (tabla_is_digital_gain_register(reg))
return 1;
+ /* HPH status registers */
+ if (reg == TABLA_A_RX_HPH_L_STATUS || reg == TABLA_A_RX_HPH_R_STATUS)
+ return 1;
+
return 0;
}
@@ -3689,16 +3706,18 @@
if (mclk_enable) {
tabla->mclk_enabled = true;
- if (tabla->mbhc_polling_active && (tabla->mclk_enabled)) {
+ if (tabla->mbhc_polling_active) {
tabla_codec_pause_hs_polling(codec);
+ tabla_codec_disable_clock_block(codec);
tabla_codec_enable_bandgap(codec,
- TABLA_BANDGAP_AUDIO_MODE);
+ TABLA_BANDGAP_AUDIO_MODE);
tabla_codec_enable_clock_block(codec, 0);
tabla_codec_calibrate_hs_polling(codec);
tabla_codec_start_hs_polling(codec);
} else {
+ tabla_codec_disable_clock_block(codec);
tabla_codec_enable_bandgap(codec,
- TABLA_BANDGAP_AUDIO_MODE);
+ TABLA_BANDGAP_AUDIO_MODE);
tabla_codec_enable_clock_block(codec, 0);
}
} else {
@@ -3712,21 +3731,20 @@
tabla->mclk_enabled = false;
if (tabla->mbhc_polling_active) {
- if (!tabla->mclk_enabled) {
- tabla_codec_pause_hs_polling(codec);
- tabla_codec_enable_bandgap(codec,
- TABLA_BANDGAP_MBHC_MODE);
- tabla_enable_rx_bias(codec, 1);
- tabla_codec_enable_clock_block(codec, 1);
- tabla_codec_calibrate_hs_polling(codec);
- tabla_codec_start_hs_polling(codec);
- }
+ tabla_codec_pause_hs_polling(codec);
+ tabla_codec_disable_clock_block(codec);
+ tabla_codec_enable_bandgap(codec,
+ TABLA_BANDGAP_MBHC_MODE);
+ tabla_enable_rx_bias(codec, 1);
+ tabla_codec_enable_clock_block(codec, 1);
+ tabla_codec_calibrate_hs_polling(codec);
+ tabla_codec_start_hs_polling(codec);
snd_soc_update_bits(codec, TABLA_A_CLK_BUFF_EN1,
0x05, 0x01);
} else {
tabla_codec_disable_clock_block(codec);
tabla_codec_enable_bandgap(codec,
- TABLA_BANDGAP_OFF);
+ TABLA_BANDGAP_OFF);
}
}
if (dapm)
@@ -3852,6 +3870,7 @@
tx_slot[0] = tx_ch[cnt];
tx_slot[1] = tx_ch[1 + cnt];
tx_slot[2] = tx_ch[5 + cnt];
+ tx_slot[3] = tx_ch[3 + cnt];
} else if (dai->id == AIF3_CAP) {
*tx_num = tabla_dai[dai->id - 1].capture.channels_max;
tx_slot[cnt] = tx_ch[2 + cnt];
@@ -4569,7 +4588,7 @@
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MUX("SLIM TX4 MUX", SND_SOC_NOPM, 0, 0, &sb_tx4_mux),
- SND_SOC_DAPM_AIF_OUT_E("SLIM TX4", "AIF1 Capture", 0, SND_SOC_NOPM, 0,
+ SND_SOC_DAPM_AIF_OUT_E("SLIM TX4", "AIF2 Capture", 0, SND_SOC_NOPM, 0,
0, tabla_codec_enable_slimtx,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
@@ -4762,6 +4781,7 @@
}
if (!tabla->mclk_enabled) {
+ tabla_codec_disable_clock_block(codec);
tabla_codec_enable_bandgap(codec, TABLA_BANDGAP_MBHC_MODE);
tabla_enable_rx_bias(codec, 1);
tabla_codec_enable_clock_block(codec, 1);
@@ -4915,8 +4935,8 @@
tabla->buttons_pressed &=
~TABLA_JACK_BUTTON_MASK;
}
- pr_debug("%s: Reporting removal %d\n", __func__,
- jack_type);
+ pr_debug("%s: Reporting removal %d(%x)\n", __func__,
+ jack_type, tabla->hph_status);
tabla_snd_soc_jack_report(tabla,
tabla->mbhc_cfg.headset_jack,
tabla->hph_status,
@@ -4935,13 +4955,15 @@
if (jack_type == SND_JACK_HEADPHONE)
tabla->current_plug = PLUG_TYPE_HEADPHONE;
+ else if (jack_type == SND_JACK_UNSUPPORTED)
+ tabla->current_plug = PLUG_TYPE_GND_MIC_SWAP;
else if (jack_type == SND_JACK_HEADSET) {
tabla->mbhc_polling_active = true;
tabla->current_plug = PLUG_TYPE_HEADSET;
}
if (tabla->mbhc_cfg.headset_jack) {
- pr_debug("%s: Reporting insertion %d\n", __func__,
- jack_type);
+ pr_debug("%s: Reporting insertion %d(%x)\n", __func__,
+ jack_type, tabla->hph_status);
tabla_snd_soc_jack_report(tabla,
tabla->mbhc_cfg.headset_jack,
tabla->hph_status,
@@ -5861,8 +5883,8 @@
return IRQ_HANDLED;
}
-static bool tabla_is_invalid_insertion_range(struct snd_soc_codec *codec,
- s32 mic_volt, bool highhph)
+static bool tabla_is_inval_ins_range(struct snd_soc_codec *codec,
+ s32 mic_volt, bool highhph, bool *highv)
{
struct tabla_priv *tabla = snd_soc_codec_get_drvdata(codec);
bool invalid = false;
@@ -5872,7 +5894,8 @@
* needs to be considered as invalid
*/
v_hs_max = tabla_get_current_v_hs_max(tabla);
- if (!highhph && (mic_volt > v_hs_max))
+ *highv = mic_volt > v_hs_max;
+ if (!highhph && *highv)
invalid = true;
else if (mic_volt < tabla->mbhc_data.v_inval_ins_high &&
(mic_volt > tabla->mbhc_data.v_inval_ins_low))
@@ -5881,16 +5904,11 @@
return invalid;
}
-static bool tabla_is_inval_insert_delta(struct snd_soc_codec *codec,
- int mic_volt, int mic_volt_prev,
- int threshold)
+static bool tabla_is_inval_ins_delta(struct snd_soc_codec *codec,
+ int mic_volt, int mic_volt_prev,
+ int threshold)
{
- int delta = abs(mic_volt - mic_volt_prev);
- if (delta > threshold) {
- pr_debug("%s: volt delta %dmv\n", __func__, delta);
- return true;
- }
- return false;
+ return abs(mic_volt - mic_volt_prev) > threshold;
}
/* called under codec_resource_lock acquisition */
@@ -5899,13 +5917,21 @@
{
struct tabla_priv *tabla = snd_soc_codec_get_drvdata(codec);
- if (plug_type == PLUG_TYPE_HEADPHONE
- && tabla->current_plug == PLUG_TYPE_NONE) {
+ if (plug_type == PLUG_TYPE_HEADPHONE &&
+ tabla->current_plug == PLUG_TYPE_NONE) {
/* Nothing was reported previously
- * reporte a headphone
+ * report a headphone or unsupported
*/
tabla_codec_report_plug(codec, 1, SND_JACK_HEADPHONE);
tabla_codec_cleanup_hs_polling(codec);
+ } else if (plug_type == PLUG_TYPE_GND_MIC_SWAP) {
+ if (tabla->current_plug == PLUG_TYPE_HEADSET)
+ tabla_codec_report_plug(codec, 0, SND_JACK_HEADSET);
+ else if (tabla->current_plug == PLUG_TYPE_HEADPHONE)
+ tabla_codec_report_plug(codec, 0, SND_JACK_HEADPHONE);
+
+ tabla_codec_report_plug(codec, 1, SND_JACK_UNSUPPORTED);
+ tabla_codec_cleanup_hs_polling(codec);
} else if (plug_type == PLUG_TYPE_HEADSET) {
/* If Headphone was reported previously, this will
* only report the mic line
@@ -5923,6 +5949,9 @@
MBHC_USE_MB_TRIGGER |
MBHC_USE_HPHL_TRIGGER,
false);
+ } else {
+ WARN(1, "Unexpected current plug_type %d, plug_type %d\n",
+ tabla->current_plug, plug_type);
}
}
@@ -5977,66 +6006,86 @@
enum tabla_mbhc_plug_type plug_type[num_det];
s16 mb_v[num_det];
s32 mic_mv[num_det];
- bool inval = false;
+ bool inval;
+ bool highdelta;
+ bool ahighv = false, highv;
/* make sure override is on */
WARN_ON(!(snd_soc_read(codec, TABLA_A_CDC_MBHC_B1_CTL) & 0x04));
+ /* GND and MIC swap detection requires at least 2 rounds of DCE */
+ BUG_ON(num_det < 2);
+
+ plug_type_ptr =
+ TABLA_MBHC_CAL_PLUG_TYPE_PTR(tabla->mbhc_cfg.calibration);
+
+ plug_type[0] = PLUG_TYPE_INVALID;
+
/* performs DCEs for N times
* 1st: check if voltage is in invalid range
* 2nd - N-2nd: check voltage range and delta
* N-1st: check voltage range, delta with HPHR GND switch
* Nth: check voltage range with VDDIO switch if micbias V != vddio V*/
- for (i = 0; i < num_det && !inval; i++) {
+ for (i = 0; i < num_det; i++) {
gndswitch = (i == (num_det - 1 - vddio));
- vddioswitch = (vddio && (i == num_det - 1));
+ vddioswitch = (vddio && ((i == num_det - 1) ||
+ (i == num_det - 2)));
if (i == 0) {
mb_v[i] = tabla_codec_setup_hs_polling(codec);
mic_mv[i] = tabla_codec_sta_dce_v(codec, 1 , mb_v[i]);
- inval = tabla_is_invalid_insertion_range(codec,
- mic_mv[i],
- highhph);
+ inval = tabla_is_inval_ins_range(codec, mic_mv[i],
+ highhph, &highv);
+ ahighv |= highv;
scaled = mic_mv[i];
- } else if (vddioswitch) {
- __tabla_codec_switch_micbias(tabla->codec, 1, false,
- false);
- mb_v[i] = __tabla_codec_sta_dce(codec, 1, true, true);
- mic_mv[i] = tabla_codec_sta_dce_v(codec, 1 , mb_v[i]);
- scaled = tabla_scale_v_micb_vddio(tabla, mic_mv[i],
- false);
- inval = (tabla_is_invalid_insertion_range(codec,
- mic_mv[i],
- highhph) ||
- tabla_is_inval_insert_delta(codec, scaled,
- mic_mv[i - 1],
- TABLA_MBHC_FAKE_INS_DELTA_SCALED_MV));
- __tabla_codec_switch_micbias(tabla->codec, 0, false,
- false);
} else {
+ if (vddioswitch)
+ __tabla_codec_switch_micbias(tabla->codec, 1,
+ false, false);
if (gndswitch)
tabla_codec_hphr_gnd_switch(codec, true);
mb_v[i] = __tabla_codec_sta_dce(codec, 1, true, true);
mic_mv[i] = tabla_codec_sta_dce_v(codec, 1 , mb_v[i]);
- inval = (tabla_is_invalid_insertion_range(codec,
+ if (vddioswitch)
+ scaled = tabla_scale_v_micb_vddio(tabla,
mic_mv[i],
- highhph) ||
- tabla_is_inval_insert_delta(codec, mic_mv[i],
- mic_mv[i - 1],
- TABLA_MBHC_FAKE_INS_DELTA_SCALED_MV));
+ false);
+ else
+ scaled = mic_mv[i];
+ /* !gndswitch & vddioswitch means the previous DCE
+ * was done with gndswitch, don't compare with DCE
+ * with gndswitch */
+ highdelta = tabla_is_inval_ins_delta(codec, scaled,
+ mic_mv[i - !gndswitch - vddioswitch],
+ TABLA_MBHC_FAKE_INS_DELTA_SCALED_MV);
+ inval = (tabla_is_inval_ins_range(codec, mic_mv[i],
+ highhph, &highv) ||
+ highdelta);
+ ahighv |= highv;
if (gndswitch)
tabla_codec_hphr_gnd_switch(codec, false);
- scaled = mic_mv[i];
+ if (vddioswitch)
+ __tabla_codec_switch_micbias(tabla->codec, 0,
+ false, false);
+ /* claim UNSUPPORTED plug insertion when
+ * good headset is detected but HPHR GND switch makes
+ * delta difference */
+ if (i == (num_det - 2) && highdelta && !ahighv)
+ plug_type[0] = PLUG_TYPE_GND_MIC_SWAP;
+ else if (i == (num_det - 1) && inval)
+ plug_type[0] = PLUG_TYPE_INVALID;
}
pr_debug("%s: DCE #%d, %04x, V %d, scaled V %d, GND %d, "
- "invalid %d\n", __func__,
+ "VDDIO %d, inval %d\n", __func__,
i + 1, mb_v[i] & 0xffff, mic_mv[i], scaled, gndswitch,
- inval);
+ vddioswitch, inval);
+ /* don't need to run further DCEs */
+ if (ahighv && inval)
+ break;
+ mic_mv[i] = scaled;
}
- plug_type_ptr =
- TABLA_MBHC_CAL_PLUG_TYPE_PTR(tabla->mbhc_cfg.calibration);
- plug_type[0] = PLUG_TYPE_INVALID;
- for (i = 0; !inval && i < num_det; i++) {
+ for (i = 0; (plug_type[0] != PLUG_TYPE_GND_MIC_SWAP && !inval) &&
+ i < num_det; i++) {
/*
* If we are here, means none of the all
* measurements are fake, continue plug type detection.
@@ -6066,6 +6115,7 @@
}
}
+ pr_debug("%s: Detected plug type %d\n", __func__, plug_type[0]);
return plug_type[0];
}
@@ -6073,7 +6123,7 @@
{
struct tabla_priv *tabla;
struct snd_soc_codec *codec;
- int retry = 0;
+ int retry = 0, pt_gnd_mic_swap_cnt = 0;
bool correction = false;
enum tabla_mbhc_plug_type plug_type;
unsigned long timeout;
@@ -6124,14 +6174,33 @@
}
} else if (plug_type == PLUG_TYPE_HEADPHONE) {
pr_debug("Good headphone detected, continue polling mic\n");
- if (tabla->current_plug == PLUG_TYPE_NONE) {
+ if (tabla->current_plug == PLUG_TYPE_NONE)
tabla_codec_report_plug(codec, 1,
SND_JACK_HEADPHONE);
- }
} else {
+ if (plug_type == PLUG_TYPE_GND_MIC_SWAP) {
+ pt_gnd_mic_swap_cnt++;
+ if (pt_gnd_mic_swap_cnt <
+ TABLA_MBHC_GND_MIC_SWAP_THRESHOLD)
+ continue;
+ else if (pt_gnd_mic_swap_cnt >
+ TABLA_MBHC_GND_MIC_SWAP_THRESHOLD) {
+ /* This is due to GND/MIC switch didn't
+ * work, Report unsupported plug */
+ } else if (tabla->mbhc_cfg.swap_gnd_mic) {
+ /* if switch is toggled, check again,
+ * otherwise report unsupported plug */
+ if (tabla->mbhc_cfg.swap_gnd_mic(codec))
+ continue;
+ }
+ } else
+ pt_gnd_mic_swap_cnt = 0;
+
TABLA_ACQUIRE_LOCK(tabla->codec_resource_lock);
/* Turn off override */
tabla_turn_onoff_override(codec, false);
+ /* The valid plug also includes PLUG_TYPE_GND_MIC_SWAP
+ */
tabla_find_plug_and_report(codec, plug_type);
TABLA_RELEASE_LOCK(tabla->codec_resource_lock);
pr_debug("Attempt %d found correct plug %d\n", retry,
@@ -6154,8 +6223,8 @@
/* called under codec_resource_lock acquisition */
static void tabla_codec_decide_gpio_plug(struct snd_soc_codec *codec)
{
- struct tabla_priv *tabla = snd_soc_codec_get_drvdata(codec);
enum tabla_mbhc_plug_type plug_type;
+ struct tabla_priv *tabla = snd_soc_codec_get_drvdata(codec);
pr_debug("%s: enter\n", __func__);
@@ -6169,7 +6238,8 @@
return;
}
- if (plug_type == PLUG_TYPE_INVALID) {
+ if (plug_type == PLUG_TYPE_INVALID ||
+ plug_type == PLUG_TYPE_GND_MIC_SWAP) {
tabla_schedule_hs_detect_plug(tabla);
} else if (plug_type == PLUG_TYPE_HEADPHONE) {
tabla_codec_report_plug(codec, 1, SND_JACK_HEADPHONE);
@@ -6211,18 +6281,21 @@
return;
}
- plug_type = tabla_codec_get_plug_type(codec, tabla->mbhc_cfg.gpio ?
- true : false);
+ plug_type = tabla_codec_get_plug_type(codec, false);
tabla_turn_onoff_override(codec, false);
if (plug_type == PLUG_TYPE_INVALID) {
pr_debug("%s: Invalid plug type detected\n", __func__);
- snd_soc_update_bits(codec, TABLA_A_CDC_MBHC_B1_CTL,
- 0x02, 0x02);
+ snd_soc_update_bits(codec, TABLA_A_CDC_MBHC_B1_CTL, 0x02, 0x02);
tabla_codec_cleanup_hs_polling(codec);
tabla_codec_enable_hs_detect(codec, 1,
MBHC_USE_MB_TRIGGER |
MBHC_USE_HPHL_TRIGGER, false);
+ } else if (plug_type == PLUG_TYPE_GND_MIC_SWAP) {
+ pr_debug("%s: GND-MIC swapped plug type detected\n", __func__);
+ tabla_codec_report_plug(codec, 1, SND_JACK_UNSUPPORTED);
+ tabla_codec_cleanup_hs_polling(codec);
+ tabla_codec_enable_hs_detect(codec, 0, 0, false);
} else if (plug_type == PLUG_TYPE_HEADPHONE) {
pr_debug("%s: Headphone Detected\n", __func__);
tabla_codec_report_plug(codec, 1, SND_JACK_HEADPHONE);
@@ -6284,7 +6357,13 @@
* it is possible that micbias will be switched to VDDIO.
*/
tabla_codec_switch_micbias(codec, 0);
- tabla_codec_report_plug(codec, 0, SND_JACK_HEADPHONE);
+ if (priv->current_plug == PLUG_TYPE_HEADPHONE)
+ tabla_codec_report_plug(codec, 0, SND_JACK_HEADPHONE);
+ else if (priv->current_plug == PLUG_TYPE_GND_MIC_SWAP)
+ tabla_codec_report_plug(codec, 0, SND_JACK_UNSUPPORTED);
+ else
+ WARN(1, "%s: Unexpected current plug type %d\n",
+ __func__, priv->current_plug);
tabla_codec_shutdown_hs_removal_detect(codec);
tabla_codec_enable_hs_detect(codec, 1,
MBHC_USE_MB_TRIGGER |
@@ -6598,6 +6677,9 @@
if (tabla->current_plug == PLUG_TYPE_HEADPHONE) {
tabla_codec_report_plug(codec, 0, SND_JACK_HEADPHONE);
is_removed = true;
+ } else if (tabla->current_plug == PLUG_TYPE_GND_MIC_SWAP) {
+ tabla_codec_report_plug(codec, 0, SND_JACK_UNSUPPORTED);
+ is_removed = true;
} else if (tabla->current_plug == PLUG_TYPE_HEADSET) {
tabla_codec_pause_hs_polling(codec);
tabla_codec_cleanup_hs_polling(codec);
@@ -6650,17 +6732,70 @@
return r;
}
+static int tabla_mbhc_init_and_calibrate(struct tabla_priv *tabla)
+{
+ int ret = 0;
+ struct snd_soc_codec *codec = tabla->codec;
+
+ tabla->mbhc_cfg.mclk_cb_fn(codec, 1, false);
+ tabla_mbhc_init(codec);
+ tabla_mbhc_cal(codec);
+ tabla_mbhc_calc_thres(codec);
+ tabla->mbhc_cfg.mclk_cb_fn(codec, 0, false);
+ tabla_codec_calibrate_hs_polling(codec);
+ if (!tabla->mbhc_cfg.gpio) {
+ ret = tabla_codec_enable_hs_detect(codec, 1,
+ MBHC_USE_MB_TRIGGER |
+ MBHC_USE_HPHL_TRIGGER,
+ false);
+
+ if (IS_ERR_VALUE(ret))
+ pr_err("%s: Failed to setup MBHC detection\n",
+ __func__);
+ } else {
+ /* Enable Mic Bias pull down and HPH Switch to GND */
+ snd_soc_update_bits(codec, tabla->mbhc_bias_regs.ctl_reg,
+ 0x01, 0x01);
+ snd_soc_update_bits(codec, TABLA_A_MBHC_HPH, 0x01, 0x01);
+ INIT_WORK(&tabla->hs_correct_plug_work,
+ tabla_hs_correct_gpio_plug);
+ }
+
+ if (!IS_ERR_VALUE(ret)) {
+ snd_soc_update_bits(codec, TABLA_A_RX_HPH_OCP_CTL, 0x10, 0x10);
+ wcd9xxx_enable_irq(codec->control_data,
+ TABLA_IRQ_HPH_PA_OCPL_FAULT);
+ wcd9xxx_enable_irq(codec->control_data,
+ TABLA_IRQ_HPH_PA_OCPR_FAULT);
+
+ if (tabla->mbhc_cfg.gpio) {
+ ret = request_threaded_irq(tabla->mbhc_cfg.gpio_irq,
+ NULL,
+ tabla_mechanical_plug_detect_irq,
+ (IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING),
+ "tabla-gpio", codec);
+ if (!IS_ERR_VALUE(ret)) {
+ ret = enable_irq_wake(tabla->mbhc_cfg.gpio_irq);
+ /* Bootup time detection */
+ tabla_hs_gpio_handler(codec);
+ }
+ }
+ }
+
+ return ret;
+}
+
static void mbhc_fw_read(struct work_struct *work)
{
struct delayed_work *dwork;
struct tabla_priv *tabla;
struct snd_soc_codec *codec;
const struct firmware *fw;
- int ret = -1, retry = 0, rc;
+ int ret = -1, retry = 0;
dwork = to_delayed_work(work);
- tabla = container_of(dwork, struct tabla_priv,
- mbhc_firmware_dwork);
+ tabla = container_of(dwork, struct tabla_priv, mbhc_firmware_dwork);
codec = tabla->codec;
while (retry < MBHC_FW_READ_ATTEMPTS) {
@@ -6672,7 +6807,7 @@
if (ret != 0) {
usleep_range(MBHC_FW_READ_TIMEOUT,
- MBHC_FW_READ_TIMEOUT);
+ MBHC_FW_READ_TIMEOUT);
} else {
pr_info("%s: MBHC Firmware read succesful\n", __func__);
break;
@@ -6691,32 +6826,7 @@
tabla->mbhc_fw = fw;
}
- tabla->mbhc_cfg.mclk_cb_fn(codec, 1, false);
- tabla_mbhc_init(codec);
- tabla_mbhc_cal(codec);
- tabla_mbhc_calc_thres(codec);
- tabla->mbhc_cfg.mclk_cb_fn(codec, 0, false);
- tabla_codec_calibrate_hs_polling(codec);
- if (!tabla->mbhc_cfg.gpio) {
- rc = tabla_codec_enable_hs_detect(codec, 1,
- MBHC_USE_MB_TRIGGER |
- MBHC_USE_HPHL_TRIGGER,
- false);
-
- if (IS_ERR_VALUE(rc))
- pr_err("%s: Failed to setup MBHC detection\n",
- __func__);
- } else {
- /* Enable Mic Bias pull down and HPH Switch to GND */
- snd_soc_update_bits(codec,
- tabla->mbhc_bias_regs.ctl_reg, 0x01,
- 0x01);
- snd_soc_update_bits(codec, TABLA_A_MBHC_HPH, 0x01,
- 0x01);
- INIT_WORK(&tabla->hs_correct_plug_work,
- tabla_hs_correct_gpio_plug);
- }
-
+ (void) tabla_mbhc_init_and_calibrate(tabla);
}
int tabla_hs_detect(struct snd_soc_codec *codec,
@@ -6756,53 +6866,11 @@
INIT_WORK(&tabla->hphrocp_work, hphrocp_off_report);
INIT_DELAYED_WORK(&tabla->mbhc_insert_dwork, mbhc_insert_work);
- if (!tabla->mbhc_cfg.read_fw_bin) {
- tabla->mbhc_cfg.mclk_cb_fn(codec, 1, false);
- tabla_mbhc_init(codec);
- tabla_mbhc_cal(codec);
- tabla_mbhc_calc_thres(codec);
- tabla->mbhc_cfg.mclk_cb_fn(codec, 0, false);
- tabla_codec_calibrate_hs_polling(codec);
- if (!tabla->mbhc_cfg.gpio) {
- rc = tabla_codec_enable_hs_detect(codec, 1,
- MBHC_USE_MB_TRIGGER |
- MBHC_USE_HPHL_TRIGGER,
- false);
- } else {
- /* Enable Mic Bias pull down and HPH Switch to GND */
- snd_soc_update_bits(codec,
- tabla->mbhc_bias_regs.ctl_reg, 0x01,
- 0x01);
- snd_soc_update_bits(codec, TABLA_A_MBHC_HPH, 0x01,
- 0x01);
- INIT_WORK(&tabla->hs_correct_plug_work,
- tabla_hs_correct_gpio_plug);
- }
- } else {
+ if (!tabla->mbhc_cfg.read_fw_bin)
+ rc = tabla_mbhc_init_and_calibrate(tabla);
+ else
schedule_delayed_work(&tabla->mbhc_firmware_dwork,
usecs_to_jiffies(MBHC_FW_READ_TIMEOUT));
- }
-
- if (!IS_ERR_VALUE(rc)) {
- snd_soc_update_bits(codec, TABLA_A_RX_HPH_OCP_CTL, 0x10, 0x10);
- wcd9xxx_enable_irq(codec->control_data,
- TABLA_IRQ_HPH_PA_OCPL_FAULT);
- wcd9xxx_enable_irq(codec->control_data,
- TABLA_IRQ_HPH_PA_OCPR_FAULT);
- }
-
- if (!IS_ERR_VALUE(rc) && tabla->mbhc_cfg.gpio) {
- rc = request_threaded_irq(tabla->mbhc_cfg.gpio_irq, NULL,
- tabla_mechanical_plug_detect_irq,
- (IRQF_TRIGGER_RISING |
- IRQF_TRIGGER_FALLING),
- "tabla-gpio", codec);
- if (!IS_ERR_VALUE(rc)) {
- rc = enable_irq_wake(tabla->mbhc_cfg.gpio_irq);
- /* Bootup time detection */
- tabla_hs_gpio_handler(codec);
- }
- }
return rc;
}
@@ -6837,7 +6905,6 @@
return IRQ_HANDLED;
}
-
static int tabla_handle_pdata(struct tabla_priv *tabla)
{
struct snd_soc_codec *codec = tabla->codec;
@@ -7248,6 +7315,9 @@
p->v_inval_ins_low);
n += scnprintf(buffer + n, size - n, "v_inval_ins_high = %d\n",
p->v_inval_ins_high);
+ if (tabla->mbhc_cfg.gpio)
+ n += scnprintf(buffer + n, size - n, "GPIO insert = %d\n",
+ tabla_hs_gpio_level_remove(tabla));
buffer[n] = 0;
return simple_read_from_buffer(buf, count, pos, buffer, n);
diff --git a/sound/soc/codecs/wcd9310.h b/sound/soc/codecs/wcd9310.h
index 38ec27c..1cca360 100644
--- a/sound/soc/codecs/wcd9310.h
+++ b/sound/soc/codecs/wcd9310.h
@@ -176,6 +176,8 @@
unsigned int gpio;
unsigned int gpio_irq;
int gpio_level_insert;
+ /* swap_gnd_mic returns true if extern GND/MIC swap switch toggled */
+ bool (*swap_gnd_mic) (struct snd_soc_codec *);
};
extern int tabla_hs_detect(struct snd_soc_codec *codec,
diff --git a/sound/soc/msm/apq8064.c b/sound/soc/msm/apq8064.c
index c8ef419..0c72880 100644
--- a/sound/soc/msm/apq8064.c
+++ b/sound/soc/msm/apq8064.c
@@ -58,6 +58,8 @@
#define TABLA_MBHC_DEF_BUTTONS 8
#define TABLA_MBHC_DEF_RLOADS 5
+#define JACK_DETECT_GPIO 38
+
/* Shared channel numbers for Slimbus ports that connect APQ to MDM. */
enum {
SLIM_1_RX_1 = 145, /* BT-SCO and USB TX */
@@ -97,6 +99,15 @@
static struct snd_soc_jack hs_jack;
static struct snd_soc_jack button_jack;
+static int apq8064_hs_detect_use_gpio = -1;
+module_param(apq8064_hs_detect_use_gpio, int, 0444);
+MODULE_PARM_DESC(apq8064_hs_detect_use_gpio, "Use GPIO for headset detection");
+
+static bool apq8064_hs_detect_use_firmware;
+module_param(apq8064_hs_detect_use_firmware, bool, 0444);
+MODULE_PARM_DESC(apq8064_hs_detect_use_firmware, "Use firmware for headset "
+ "detection");
+
static int msm_enable_codec_ext_clk(struct snd_soc_codec *codec, int enable,
bool dapm);
@@ -108,7 +119,7 @@
.micbias = TABLA_MICBIAS2,
.mclk_cb_fn = msm_enable_codec_ext_clk,
.mclk_rate = TABLA_EXT_CLK_RATE,
- .gpio = 0, /* MBHC GPIO is not configured */
+ .gpio = 0,
.gpio_irq = 0,
.gpio_level_insert = 1,
};
@@ -1037,10 +1048,10 @@
return ret;
}
-
static int msm_audrx_init(struct snd_soc_pcm_runtime *rtd)
{
int err;
+ uint32_t revision;
struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_dapm_context *dapm = &codec->dapm;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
@@ -1097,6 +1108,48 @@
codec_clk = clk_get(cpu_dai->dev, "osr_clk");
+ /* APQ8064 Rev 1.1 CDP and Liquid have mechanical switch */
+ revision = socinfo_get_version();
+ if (apq8064_hs_detect_use_gpio != -1) {
+ if (apq8064_hs_detect_use_gpio == 1)
+ pr_debug("%s: MBHC mechanical is enabled by request\n",
+ __func__);
+ else if (apq8064_hs_detect_use_gpio == 0)
+ pr_debug("%s: MBHC mechanical is disabled by request\n",
+ __func__);
+ else
+ pr_warn("%s: Invalid hs_detect_use_gpio %d\n", __func__,
+ apq8064_hs_detect_use_gpio);
+ } else if (SOCINFO_VERSION_MAJOR(revision) == 0) {
+ pr_warn("%s: Unknown HW revision detected %d.%d\n", __func__,
+ SOCINFO_VERSION_MAJOR(revision),
+ SOCINFO_VERSION_MINOR(revision));
+ } else if ((SOCINFO_VERSION_MAJOR(revision) == 1 &&
+ SOCINFO_VERSION_MINOR(revision) >= 1 &&
+ (machine_is_apq8064_cdp() ||
+ machine_is_apq8064_liquid())) ||
+ SOCINFO_VERSION_MAJOR(revision) > 1) {
+ pr_debug("%s: MBHC mechanical switch available APQ8064 "
+ "detected\n", __func__);
+ apq8064_hs_detect_use_gpio = 1;
+ }
+
+ if (apq8064_hs_detect_use_gpio == 1) {
+ pr_debug("%s: Using MBHC mechanical switch\n", __func__);
+ mbhc_cfg.gpio = JACK_DETECT_GPIO;
+ mbhc_cfg.gpio_irq = gpio_to_irq(JACK_DETECT_GPIO);
+ err = gpio_request(mbhc_cfg.gpio, "MBHC_HS_DETECT");
+ if (err < 0) {
+ pr_err("%s: gpio_request %d failed %d\n", __func__,
+ mbhc_cfg.gpio, err);
+ return err;
+ }
+ gpio_direction_input(JACK_DETECT_GPIO);
+ } else
+ pr_debug("%s: Not using MBHC mechanical switch\n", __func__);
+
+ mbhc_cfg.read_fw_bin = apq8064_hs_detect_use_firmware;
+
err = tabla_hs_detect(codec, &mbhc_cfg);
return err;
@@ -1119,7 +1172,8 @@
},
};
-static struct snd_soc_dsp_link slimbus0_hl_media = {
+/* bi-directional media definition for hostless PCM device */
+static struct snd_soc_dsp_link bidir_hl_media = {
.playback = true,
.capture = true,
.trigger = {
@@ -1128,9 +1182,8 @@
},
};
-static struct snd_soc_dsp_link int_fm_hl_media = {
+static struct snd_soc_dsp_link hdmi_rx_hl = {
.playback = true,
- .capture = true,
.trigger = {
SND_SOC_DSP_TRIGGER_POST,
SND_SOC_DSP_TRIGGER_POST
@@ -1377,7 +1430,7 @@
.name = "MSM8960 Media2",
.stream_name = "MultiMedia2",
.cpu_dai_name = "MultiMedia2",
- .platform_name = "msm-pcm-dsp",
+ .platform_name = "msm-multi-ch-pcm-dsp",
.dynamic = 1,
.dsp_link = &fe_media,
.be_id = MSM_FRONTEND_DAI_MULTIMEDIA2,
@@ -1418,7 +1471,7 @@
.cpu_dai_name = "SLIMBUS0_HOSTLESS",
.platform_name = "msm-pcm-hostless",
.dynamic = 1,
- .dsp_link = &slimbus0_hl_media,
+ .dsp_link = &bidir_hl_media,
.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
.ignore_suspend = 1,
/* .be_id = do not care */
@@ -1429,7 +1482,7 @@
.cpu_dai_name = "INT_FM_HOSTLESS",
.platform_name = "msm-pcm-hostless",
.dynamic = 1,
- .dsp_link = &int_fm_hl_media,
+ .dsp_link = &bidir_hl_media,
.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
.ignore_suspend = 1,
/* .be_id = do not care */
@@ -1453,6 +1506,37 @@
.ignore_suspend = 1,
},
{
+ .name = "MSM8960 Compr",
+ .stream_name = "COMPR",
+ .cpu_dai_name = "MultiMedia4",
+ .platform_name = "msm-compr-dsp",
+ .dynamic = 1,
+ .dsp_link = &lpa_fe_media,
+ .be_id = MSM_FRONTEND_DAI_MULTIMEDIA4,
+ },
+ {
+ .name = "AUXPCM Hostless",
+ .stream_name = "AUXPCM Hostless",
+ .cpu_dai_name = "AUXPCM_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dsp_link = &bidir_hl_media,
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ },
+ /* HDMI Hostless */
+ {
+ .name = "HDMI_RX_HOSTLESS",
+ .stream_name = "HDMI_RX_HOSTLESS",
+ .cpu_dai_name = "HDMI_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dsp_link = &hdmi_rx_hl,
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .no_codec = 1,
+ .ignore_suspend = 1,
+ },
+ {
.name = "Voice Stub",
.stream_name = "Voice Stub",
.cpu_dai_name = "VOICE_STUB",
@@ -1826,6 +1910,8 @@
}
msm_free_headset_mic_gpios();
platform_device_unregister(msm_snd_device);
+ if (mbhc_cfg.gpio)
+ gpio_free(mbhc_cfg.gpio);
kfree(mbhc_cfg.calibration);
}
module_exit(msm_audio_exit);
diff --git a/sound/soc/msm/mdm9615.c b/sound/soc/msm/mdm9615.c
index f02a7ef..7060677 100644
--- a/sound/soc/msm/mdm9615.c
+++ b/sound/soc/msm/mdm9615.c
@@ -16,6 +16,7 @@
#include <linux/mfd/pm8xxx/pm8018.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <sound/core.h>
#include <sound/soc.h>
#include <sound/soc-dapm.h>
@@ -26,6 +27,7 @@
#include <mach/socinfo.h>
#include "msm-pcm-routing.h"
#include "../codecs/wcd9310.h"
+#include <mach/gpiomux.h>
/* 9615 machine driver */
@@ -56,8 +58,189 @@
#define TABLA_MBHC_DEF_BUTTONS 8
#define TABLA_MBHC_DEF_RLOADS 5
-static u32 top_spk_pamp_gpio = PM8018_GPIO_PM_TO_SYS(18);
-static u32 bottom_spk_pamp_gpio = PM8018_GPIO_PM_TO_SYS(19);
+/*
+ * Added for I2S
+ */
+#define GPIO_SPKR_I2S_MCLK 24
+#define GPIO_PRIM_I2S_SCK 20
+#define GPIO_PRIM_I2S_DOUT 23
+#define GPIO_PRIM_I2S_WS 21
+#define GPIO_PRIM_I2S_DIN 22
+#define GPIO_SEC_I2S_SCK 25
+#define GPIO_SEC_I2S_WS 26
+#define GPIO_SEC_I2S_DOUT 28
+#define GPIO_SEC_I2S_DIN 27
+
+static struct gpiomux_setting cdc_i2s_mclk = {
+ .func = GPIOMUX_FUNC_1,
+ .drv = GPIOMUX_DRV_8MA,
+ .pull = GPIOMUX_PULL_NONE,
+};
+
+static struct gpiomux_setting cdc_i2s_sclk = {
+ .func = GPIOMUX_FUNC_1,
+ .drv = GPIOMUX_DRV_8MA,
+ .pull = GPIOMUX_PULL_NONE,
+};
+
+static struct gpiomux_setting cdc_i2s_dout = {
+ .func = GPIOMUX_FUNC_1,
+ .drv = GPIOMUX_DRV_8MA,
+ .pull = GPIOMUX_PULL_NONE,
+};
+
+static struct gpiomux_setting cdc_i2s_ws = {
+ .func = GPIOMUX_FUNC_1,
+ .drv = GPIOMUX_DRV_8MA,
+ .pull = GPIOMUX_PULL_NONE,
+};
+
+static struct gpiomux_setting cdc_i2s_din = {
+ .func = GPIOMUX_FUNC_1,
+ .drv = GPIOMUX_DRV_8MA,
+ .pull = GPIOMUX_PULL_NONE,
+};
+
+
+static struct msm_gpiomux_config msm9615_audio_prim_i2s_codec_configs[] = {
+ {
+ .gpio = GPIO_SPKR_I2S_MCLK,
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &cdc_i2s_mclk,
+ },
+ },
+ {
+ .gpio = GPIO_PRIM_I2S_SCK,
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &cdc_i2s_sclk,
+ },
+ },
+ {
+ .gpio = GPIO_PRIM_I2S_DOUT,
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &cdc_i2s_dout,
+ },
+ },
+ {
+ .gpio = GPIO_PRIM_I2S_WS,
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &cdc_i2s_ws,
+ },
+ },
+ {
+ .gpio = GPIO_PRIM_I2S_DIN,
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &cdc_i2s_din,
+ },
+ },
+};
+
+/* Physical address for LPA CSR
+ * LPA SIF mux registers. These are
+ * ioremap( ) for Virtual address.
+ */
+#define LPASS_CSR_BASE 0x28000000
+#define LPA_IF_BASE 0x28100000
+#define SIF_MUX_REG_BASE (LPASS_CSR_BASE + 0x00000000)
+#define LPA_IF_REG_BASE (LPA_IF_BASE + 0x00000000)
+#define LPASS_SIF_MUX_ADDR (SIF_MUX_REG_BASE + 0x00004000)
+#define LPAIF_SPARE_ADDR (LPA_IF_REG_BASE + 0x00000070)
+/* SIF & SPARE MUX Values */
+#define MSM_SIF_FUNC_PCM 0
+#define MSM_SIF_FUNC_I2S_MIC 1
+#define MSM_SIF_FUNC_I2S_SPKR 2
+#define MSM_LPAIF_SPARE_DISABLE 0x0
+#define MSM_LPAIF_SPARE_BOTH_ENABLE 0x3
+
+/* I2S INTF CTL */
+#define MSM_INTF_PRIM 0
+#define MSM_INTF_SECN 1
+#define MSM_INTF_BOTH 2
+
+/* I2S Dir CTL */
+#define MSM_DIR_RX 0
+#define MSM_DIR_TX 1
+#define MSM_DIR_BOTH 2
+#define MSM_DIR_MAX 3
+
+/* I2S HW Params */
+#define NO_OF_BITS_PER_SAMPLE 16
+#define I2S_MIC_SCLK_RATE 1536000
+static int msm9615_i2s_rx_ch = 1;
+static int msm9615_i2s_tx_ch = 1;
+static int msm9615_i2s_spk_control;
+/* SIF mux bit mask & shift */
+#define LPASS_SIF_MUX_CTL_PRI_MUX_SEL_BMSK 0x30000
+#define LPASS_SIF_MUX_CTL_PRI_MUX_SEL_SHFT 0x10
+#define LPASS_SIF_MUX_CTL_SEC_MUX_SEL_BMSK 0x3
+#define LPASS_SIF_MUX_CTL_SEC_MUX_SEL_SHFT 0x0
+
+#define LPAIF_SPARE_MUX_CTL_SEC_MUX_SEL_BMSK 0x3
+#define LPAIF_SPARE_MUX_CTL_SEC_MUX_SEL_SHFT 0x2
+#define LPAIF_SPARE_MUX_CTL_PRI_MUX_SEL_BMSK 0x3
+#define LPAIF_SPARE_MUX_CTL_PRI_MUX_SEL_SHFT 0x0
+
+static u32 spare_shadow;
+static u32 sif_shadow;
+
+
+struct msm_i2s_mux_ctl {
+ const u8 sifconfig;
+ const u8 spareconfig;
+};
+struct msm_clk {
+ struct clk *osr_clk;
+ struct clk *bit_clk;
+ int clk_enable;
+};
+struct msm_i2s_clk {
+ struct msm_clk rx_clk;
+ struct msm_clk tx_clk;
+};
+struct msm_i2s_ctl {
+ struct msm_i2s_clk prim_clk;
+ struct msm_i2s_clk sec_clk;
+ struct msm_i2s_mux_ctl mux_ctl[MSM_DIR_MAX];
+ u8 intf_status[MSM_INTF_BOTH][MSM_DIR_BOTH];
+ void *sif_virt_addr;
+ void *spare_virt_addr;
+};
+static struct msm_i2s_ctl msm9x15_i2s_ctl = {
+ {{NULL, NULL, 0}, {NULL, NULL, 0} }, /* prim_clk */
+ {{NULL, NULL, 0}, {NULL, NULL, 0} }, /* sec_clk */
+ /* mux_ctl */
+ {
+ /* Rx path only */
+ { MSM_SIF_FUNC_I2S_SPKR, MSM_LPAIF_SPARE_DISABLE },
+ /* Tx path only */
+ { MSM_SIF_FUNC_I2S_MIC, MSM_LPAIF_SPARE_DISABLE },
+ /* Rx + Tx path only */
+ { MSM_SIF_FUNC_I2S_SPKR, MSM_LPAIF_SPARE_BOTH_ENABLE },
+ },
+ /* intf_status */
+ {
+ /* Prim I2S */
+ {0, 0},
+ /* Sec I2S */
+ {0, 0}
+ },
+ /* sif_virt_addr */
+ NULL,
+ /* spare_virt_addr */
+ NULL,
+};
+
+enum msm9x15_set_i2s_clk {
+ MSM_I2S_CLK_SET_FALSE,
+ MSM_I2S_CLK_SET_TRUE,
+ MSM_I2S_CLK_SET_RATE0,
+};
+/*
+ * Added for I2S
+ */
+
+static u32 top_spk_pamp_gpio = PM8018_GPIO_PM_TO_SYS(3);
+static u32 bottom_spk_pamp_gpio = PM8018_GPIO_PM_TO_SYS(5);
static int mdm9615_spk_control;
static int mdm9615_ext_bottom_spk_pamp;
static int mdm9615_ext_top_spk_pamp;
@@ -318,16 +501,15 @@
pr_debug("%s: clk_users = %d\n", __func__, clk_users);
if (clk_users != 1)
return 0;
+ if (IS_ERR(codec_clk)) {
- if (codec_clk) {
- clk_set_rate(codec_clk, TABLA_EXT_CLK_RATE);
- clk_prepare_enable(codec_clk);
- tabla_mclk_enable(codec, 1, dapm);
- } else {
pr_err("%s: Error setting Tabla MCLK\n", __func__);
clk_users--;
return -EINVAL;
}
+ clk_set_rate(codec_clk, TABLA_EXT_CLK_RATE);
+ clk_prepare_enable(codec_clk);
+ tabla_mclk_enable(codec, 1, dapm);
} else {
pr_debug("%s: clk_users = %d\n", __func__, clk_users);
if (clk_users == 0)
@@ -651,6 +833,20 @@
return tabla_cal;
}
+static int msm9615_i2s_set_spk(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+
+ pr_debug("%s()\n", __func__);
+ if (msm9615_i2s_spk_control == ucontrol->value.integer.value[0])
+ return 0;
+
+ msm9615_i2s_spk_control = ucontrol->value.integer.value[0];
+ mdm9615_ext_control(codec);
+ return 1;
+}
+
static int mdm9615_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
@@ -704,13 +900,567 @@
__func__);
goto end;
}
-
-
}
end:
return ret;
}
+static int msm9615_i2s_rx_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm9615_i2s_rx_ch = %d\n", __func__,
+ msm9615_i2s_rx_ch);
+ ucontrol->value.integer.value[0] = msm9615_i2s_rx_ch - 1;
+ return 0;
+}
+
+static int msm9615_i2s_rx_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm9615_i2s_rx_ch = ucontrol->value.integer.value[0] + 1;
+
+ pr_debug("%s: msm9615_i2s_rx_ch = %d\n", __func__,
+ msm9615_i2s_rx_ch);
+ return 1;
+}
+
+static int msm9615_i2s_tx_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm9615_i2s_tx_ch = %d\n", __func__,
+ msm9615_i2s_tx_ch);
+ ucontrol->value.integer.value[0] = msm9615_i2s_tx_ch - 1;
+ return 0;
+}
+
+static int msm9615_i2s_tx_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm9615_i2s_tx_ch = ucontrol->value.integer.value[0] + 1;
+
+ pr_debug("%s: msm9615_i2s_tx_ch = %d\n", __func__,
+ msm9615_i2s_tx_ch);
+ return 1;
+}
+
+static int msm9615_i2s_get_spk(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm9615_spk_control = %d", __func__, mdm9615_spk_control);
+ ucontrol->value.integer.value[0] = msm9615_i2s_spk_control;
+ return 0;
+}
+
+static const struct snd_kcontrol_new tabla_msm9615_i2s_controls[] = {
+ SOC_ENUM_EXT("Speaker Function", mdm9615_enum[0], msm9615_i2s_get_spk,
+ msm9615_i2s_set_spk),
+ SOC_ENUM_EXT("PRI_RX Channels", mdm9615_enum[1],
+ msm9615_i2s_rx_ch_get, msm9615_i2s_rx_ch_put),
+ SOC_ENUM_EXT("PRI_TX Channels", mdm9615_enum[2],
+ msm9615_i2s_tx_ch_get, msm9615_i2s_tx_ch_put),
+};
+
+static int msm9615_i2s_audrx_init(struct snd_soc_pcm_runtime *rtd)
+{
+ int err;
+ struct snd_soc_codec *codec = rtd->codec;
+ struct snd_soc_dapm_context *dapm = &codec->dapm;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ err = snd_soc_add_controls(codec, tabla_msm9615_i2s_controls,
+ ARRAY_SIZE(tabla_msm9615_i2s_controls));
+ if (err < 0) {
+ pr_err("returning loc 1 err = %d\n", err);
+ return err;
+ }
+
+ snd_soc_dapm_new_controls(dapm, mdm9615_dapm_widgets,
+ ARRAY_SIZE(mdm9615_dapm_widgets));
+
+ snd_soc_dapm_add_routes(dapm, common_audio_map,
+ ARRAY_SIZE(common_audio_map));
+ snd_soc_dapm_enable_pin(dapm, "Ext Spk Bottom Pos");
+ snd_soc_dapm_enable_pin(dapm, "Ext Spk Bottom Neg");
+ snd_soc_dapm_enable_pin(dapm, "Ext Spk Top Pos");
+ snd_soc_dapm_enable_pin(dapm, "Ext Spk Top Neg");
+
+ snd_soc_dapm_sync(dapm);
+
+ err = snd_soc_jack_new(codec, "Headset Jack",
+ (SND_JACK_HEADSET | SND_JACK_OC_HPHL|
+ SND_JACK_OC_HPHR), &hs_jack);
+ if (err) {
+ pr_err("failed to create new jack\n");
+ return err;
+ }
+ err = snd_soc_jack_new(codec, "Button Jack",
+ TABLA_JACK_BUTTON_MASK, &button_jack);
+ if (err) {
+ pr_err("failed to create new jack\n");
+ return err;
+ }
+ codec_clk = clk_get(cpu_dai->dev, "osr_clk");
+ err = tabla_hs_detect(codec, &mbhc_cfg);
+ return err;
+}
+
+static int msm9615_i2s_rx_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+ rate->min = rate->max = 48000;
+ channels->min = channels->max = msm9615_i2s_rx_ch;
+
+ return 0;
+}
+
+static int msm9615_i2s_tx_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+ rate->min = rate->max = 48000;
+
+ channels->min = channels->max = msm9615_i2s_tx_ch;
+
+ return 0;
+}
+
+static int mdm9615_i2s_free_gpios(u8 i2s_intf, u8 i2s_dir)
+{
+ struct msm_i2s_ctl *pintf = &msm9x15_i2s_ctl;
+ if (i2s_intf == MSM_INTF_PRIM) {
+ if (i2s_dir == MSM_DIR_RX)
+ gpio_free(GPIO_PRIM_I2S_DOUT);
+ if (i2s_dir == MSM_DIR_TX)
+ gpio_free(GPIO_PRIM_I2S_DIN);
+ if (pintf->intf_status[i2s_intf][MSM_DIR_TX] == 0 &&
+ pintf->intf_status[i2s_intf][MSM_DIR_RX] == 0) {
+ gpio_free(GPIO_PRIM_I2S_SCK);
+ gpio_free(GPIO_PRIM_I2S_WS);
+ }
+ } else if (i2s_intf == MSM_INTF_SECN) {
+ if (i2s_dir == MSM_DIR_RX)
+ gpio_free(GPIO_SEC_I2S_DOUT);
+ if (i2s_dir == MSM_DIR_TX)
+ gpio_free(GPIO_SEC_I2S_DIN);
+ if (pintf->intf_status[i2s_intf][MSM_DIR_TX] == 0 &&
+ pintf->intf_status[i2s_intf][MSM_DIR_RX] == 0) {
+ gpio_free(GPIO_SEC_I2S_WS);
+ gpio_free(GPIO_SEC_I2S_SCK);
+ }
+ }
+ return 0;
+}
+
+int msm9615_i2s_intf_dir_sel(const char *cpu_dai_name,
+ u8 *i2s_intf, u8 *i2s_dir)
+{
+ int ret = 0;
+ if (i2s_intf == NULL || i2s_dir == NULL || cpu_dai_name == NULL) {
+ ret = 1;
+ goto err;
+ }
+ if (!strncmp(cpu_dai_name, "msm-dai-q6.0", 12)) {
+ *i2s_intf = MSM_INTF_PRIM;
+ *i2s_dir = MSM_DIR_RX;
+ } else if (!strncmp(cpu_dai_name, "msm-dai-q6.1", 12)) {
+ *i2s_intf = MSM_INTF_PRIM;
+ *i2s_dir = MSM_DIR_TX;
+ } else if (!strncmp(cpu_dai_name, "msm-dai-q6.4", 12)) {
+ *i2s_intf = MSM_INTF_SECN;
+ *i2s_dir = MSM_DIR_RX;
+ } else if (!strncmp(cpu_dai_name, "msm-dai-q6.5", 12)) {
+ *i2s_intf = MSM_INTF_SECN;
+ *i2s_dir = MSM_DIR_TX;
+ } else {
+ pr_err("Error in I2S cpu dai name\n");
+ ret = 1;
+ }
+err:
+ return ret;
+}
+
+int msm9615_enable_i2s_gpio(u8 i2s_intf, u8 i2s_dir)
+{
+ u8 ret = 0;
+ struct msm_i2s_ctl *pintf = &msm9x15_i2s_ctl;
+ if (i2s_intf == MSM_INTF_PRIM) {
+ if (i2s_dir == MSM_DIR_TX) {
+ ret = gpio_request(GPIO_PRIM_I2S_DIN, "I2S_PRIM_DIN");
+ if (ret) {
+ pr_err("%s: Failed to request gpio %d\n",
+ __func__, GPIO_PRIM_I2S_DIN);
+ goto err;
+ }
+ } else if (i2s_dir == MSM_DIR_RX) {
+ ret = gpio_request(GPIO_PRIM_I2S_DOUT,
+ "I2S_PRIM_DOUT");
+ if (ret) {
+ pr_err("%s: Failed to request gpio %d\n",
+ __func__, GPIO_PRIM_I2S_DOUT);
+ goto err;
+ }
+ } else if (pintf->intf_status[i2s_intf][MSM_DIR_TX] == 0 &&
+ pintf->intf_status[i2s_intf][MSM_DIR_RX] == 0) {
+ ret = gpio_request(GPIO_PRIM_I2S_SCK, "I2S_PRIM_SCK");
+ if (ret) {
+ pr_err("%s: Failed to request gpio %d\n",
+ __func__, GPIO_PRIM_I2S_SCK);
+ goto err;
+ }
+ ret = gpio_request(GPIO_PRIM_I2S_WS, "I2S_PRIM_WS");
+ if (ret) {
+ pr_err("%s: Failed to request gpio %d\n",
+ __func__, GPIO_PRIM_I2S_WS);
+ goto err;
+ }
+ }
+ } else if (i2s_intf == MSM_INTF_SECN) {
+ if (i2s_dir == MSM_DIR_RX) {
+ ret = gpio_request(GPIO_SEC_I2S_DOUT, "I2S_SEC_DOUT");
+ if (ret) {
+ pr_err("%s: Failed to request gpio %d\n",
+ __func__, GPIO_SEC_I2S_DOUT);
+ goto err;
+ }
+ } else if (i2s_dir == MSM_DIR_TX) {
+ ret = gpio_request(GPIO_SEC_I2S_DIN, "I2S_SEC_DIN");
+ if (ret) {
+ pr_err("%s: Failed to request gpio %d\n",
+ __func__, GPIO_SEC_I2S_DIN);
+ goto err;
+ }
+ } else if (pintf->intf_status[i2s_intf][MSM_DIR_TX] == 0 &&
+ pintf->intf_status[i2s_intf][MSM_DIR_RX] == 0) {
+ ret = gpio_request(GPIO_SEC_I2S_SCK, "I2S_SEC_SCK");
+ if (ret) {
+ pr_err("%s: Failed to request gpio %d\n",
+ __func__, GPIO_SEC_I2S_SCK);
+ goto err;
+ }
+ ret = gpio_request(GPIO_SEC_I2S_WS, "I2S_SEC_WS");
+ if (ret) {
+ pr_err("%s: Failed to request gpio %d\n",
+ __func__, GPIO_SEC_I2S_WS);
+ goto err;
+ }
+ }
+ }
+err:
+ return ret;
+}
+
+static int msm9615_set_i2s_osr_bit_clk(struct snd_soc_dai *cpu_dai,
+ u8 i2s_intf, u8 i2s_dir,
+ enum msm9x15_set_i2s_clk enable)
+{
+
+ struct msm_i2s_ctl *pintf = &msm9x15_i2s_ctl;
+ struct msm_i2s_clk *pclk = &pintf->prim_clk;
+ struct msm_clk *clk_ctl = &pclk->rx_clk;
+ u8 ret = 0;
+ pr_debug("Dev name %s Intf =%d, Dir = %d, Enable=%d\n",
+ cpu_dai->name, i2s_intf, i2s_dir, enable);
+ if (i2s_intf == MSM_INTF_PRIM)
+ pclk = &pintf->prim_clk;
+ else if (i2s_intf == MSM_INTF_SECN)
+ pclk = &pintf->sec_clk;
+
+ if (i2s_dir == MSM_DIR_TX)
+ clk_ctl = &pclk->tx_clk;
+ else if (i2s_dir == MSM_DIR_RX)
+ clk_ctl = &pclk->rx_clk;
+
+ if (enable == MSM_I2S_CLK_SET_TRUE ||
+ enable == MSM_I2S_CLK_SET_RATE0) {
+ if (clk_ctl->clk_enable != 0) {
+ pr_info("%s: I2S Clk is already enabled"
+ "clk users %d\n", __func__,
+ clk_ctl->clk_enable);
+ ret = 0;
+ goto err;
+ }
+ clk_ctl->osr_clk = clk_get(cpu_dai->dev, "osr_clk");
+ if (IS_ERR(clk_ctl->osr_clk)) {
+ pr_err("%s: Fail to get OSR CLK\n", __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+ ret = clk_prepare(clk_ctl->osr_clk);
+ if (ret != 0) {
+ pr_err("Unable to prepare i2s_spkr_osr_clk\n");
+ goto err;
+ }
+ clk_set_rate(clk_ctl->osr_clk, TABLA_EXT_CLK_RATE);
+ ret = clk_enable(clk_ctl->osr_clk);
+ if (ret != 0) {
+ pr_err("Fail to enable i2s_spkr_osr_clk\n");
+ clk_unprepare(clk_ctl->osr_clk);
+ goto err;
+ }
+ clk_ctl->bit_clk = clk_get(cpu_dai->dev, "bit_clk");
+ if (IS_ERR(clk_ctl->bit_clk)) {
+ pr_err("Fail to get i2s_spkr_bit_clk\n");
+ clk_disable(clk_ctl->osr_clk);
+ clk_unprepare(clk_ctl->osr_clk);
+ clk_put(clk_ctl->osr_clk);
+ ret = -EINVAL;
+ goto err;
+ }
+ ret = clk_prepare(clk_ctl->bit_clk);
+ if (ret != 0) {
+ clk_disable(clk_ctl->osr_clk);
+ clk_unprepare(clk_ctl->osr_clk);
+ clk_put(clk_ctl->osr_clk);
+ pr_err("Fail to prepare i2s_spkr_osr_clk\n");
+ goto err;
+ }
+ if (enable == MSM_I2S_CLK_SET_RATE0)
+ clk_set_rate(clk_ctl->bit_clk, 0);
+ else
+ clk_set_rate(clk_ctl->bit_clk, 8);
+ ret = clk_enable(clk_ctl->bit_clk);
+ if (ret != 0) {
+ clk_disable(clk_ctl->osr_clk);
+ clk_unprepare(clk_ctl->osr_clk);
+ clk_put(clk_ctl->osr_clk);
+ clk_unprepare(clk_ctl->bit_clk);
+ pr_err("Unable to enable i2s_spkr_osr_clk\n");
+ goto err;
+ }
+ clk_ctl->clk_enable++;
+ } else if (enable == MSM_I2S_CLK_SET_FALSE &&
+ clk_ctl->clk_enable != 0) {
+ clk_disable(clk_ctl->osr_clk);
+ clk_disable(clk_ctl->bit_clk);
+ clk_unprepare(clk_ctl->osr_clk);
+ clk_unprepare(clk_ctl->bit_clk);
+ clk_put(clk_ctl->bit_clk);
+ clk_put(clk_ctl->osr_clk);
+ clk_ctl->bit_clk = NULL;
+ clk_ctl->osr_clk = NULL;
+ clk_ctl->clk_enable--;
+ ret = 0;
+ }
+err:
+ return ret;
+}
+
+void msm9615_config_i2s_sif_mux(u8 value)
+{
+ struct msm_i2s_ctl *pintf = &msm9x15_i2s_ctl;
+ sif_shadow = 0x00000;
+ sif_shadow = (sif_shadow & LPASS_SIF_MUX_CTL_PRI_MUX_SEL_BMSK) |
+ (value << LPASS_SIF_MUX_CTL_PRI_MUX_SEL_SHFT);
+ iowrite32(sif_shadow, pintf->sif_virt_addr);
+ /* Dont read SIF register. Device crashes. */
+ pr_debug("%s() SIF Reg = 0x%x\n", __func__, sif_shadow);
+}
+
+void msm9615_config_i2s_spare_mux(u8 value, u8 i2s_intf)
+{
+ struct msm_i2s_ctl *pintf = &msm9x15_i2s_ctl;
+ if (i2s_intf == MSM_INTF_PRIM) {
+ /* Configure Primary SIF */
+ spare_shadow = (spare_shadow & LPAIF_SPARE_MUX_CTL_PRI_MUX_SEL_BMSK
+ ) | (value << LPAIF_SPARE_MUX_CTL_PRI_MUX_SEL_SHFT);
+ }
+ if (i2s_intf == MSM_INTF_SECN) {
+ /*Secondary interface configuration*/
+ spare_shadow = (spare_shadow & LPAIF_SPARE_MUX_CTL_SEC_MUX_SEL_BMSK
+ ) | (value << LPAIF_SPARE_MUX_CTL_SEC_MUX_SEL_SHFT);
+ }
+ iowrite32(spare_shadow, pintf->spare_virt_addr);
+ /* Dont read SPARE register. Device crashes. */
+ pr_debug("%s( ): SPARE Reg =0x%x\n", __func__, spare_shadow);
+}
+
+static int msm9615_i2s_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ int rate = params_rate(params);
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct msm_i2s_ctl *pintf = &msm9x15_i2s_ctl;
+ struct msm_i2s_clk *pclk = &pintf->prim_clk;
+ struct msm_clk *clk_ctl = &pclk->rx_clk;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ int bit_clk_set = 0;
+ u8 i2s_intf, i2s_dir;
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ if (!msm9615_i2s_intf_dir_sel(cpu_dai->name,
+ &i2s_intf, &i2s_dir)) {
+ bit_clk_set = TABLA_EXT_CLK_RATE /
+ (rate * 2 * NO_OF_BITS_PER_SAMPLE);
+ if (bit_clk_set != 8) {
+ if (i2s_intf == MSM_INTF_PRIM)
+ pclk = &pintf->prim_clk;
+ else if (i2s_intf == MSM_INTF_SECN)
+ pclk = &pintf->sec_clk;
+ clk_ctl = &pclk->rx_clk;
+ pr_debug("%s( ): New rate = %d",
+ __func__, bit_clk_set);
+ clk_set_rate(clk_ctl->bit_clk, bit_clk_set);
+ }
+ }
+ } else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+ bit_clk_set = I2S_MIC_SCLK_RATE / (rate * 2 *
+ NO_OF_BITS_PER_SAMPLE);
+ /* Not required to modify TX rate.
+ * Speaker clock are looped back
+ * to Mic.
+ */
+ }
+ return 1;
+}
+
+static int msm9615_i2s_startup(struct snd_pcm_substream *substream)
+{
+ u8 ret = 0;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct msm_i2s_ctl *pintf = &msm9x15_i2s_ctl;
+ u8 i2s_intf, i2s_dir;
+ if (!msm9615_i2s_intf_dir_sel(cpu_dai->name, &i2s_intf, &i2s_dir)) {
+ pr_debug("%s( ): cpu name = %s intf =%d dir = %d\n",
+ __func__, cpu_dai->name, i2s_intf, i2s_dir);
+ pr_debug("%s( ): Enable status Rx =%d Tx = %d\n", __func__,
+ pintf->intf_status[i2s_intf][MSM_DIR_RX],
+ pintf->intf_status[i2s_intf][MSM_DIR_TX]);
+ msm9615_enable_i2s_gpio(i2s_intf, i2s_dir);
+ if (i2s_dir == MSM_DIR_TX) {
+ if (pintf->intf_status[i2s_intf][MSM_DIR_RX] > 0) {
+ /* This means that Rx is enabled before */
+ ret = msm9615_set_i2s_osr_bit_clk(cpu_dai,
+ i2s_intf, i2s_dir,
+ MSM_I2S_CLK_SET_RATE0);
+ if (ret != 0) {
+ pr_err("%s: Fail enable I2S clock\n",
+ __func__);
+ return -EINVAL;
+ }
+ msm9615_config_i2s_sif_mux(
+ pintf->mux_ctl[MSM_DIR_BOTH].sifconfig);
+ msm9615_config_i2s_spare_mux(
+ pintf->mux_ctl[MSM_DIR_BOTH].spareconfig,
+ i2s_intf);
+ ret = snd_soc_dai_set_fmt(cpu_dai,
+ SND_SOC_DAIFMT_CBM_CFM);
+ if (ret < 0)
+ pr_err("set fmt cpu dai failed\n");
+ ret = snd_soc_dai_set_fmt(codec_dai,
+ SND_SOC_DAIFMT_CBS_CFS);
+ if (ret < 0)
+ pr_err("set fmt codec dai failed\n");
+ } else if (pintf->intf_status[i2s_intf][i2s_dir] == 0) {
+ /* This means that Rx is
+ * not enabled before.
+ * only Tx will be used.
+ */
+ ret = msm9615_set_i2s_osr_bit_clk(cpu_dai,
+ i2s_intf, i2s_dir,
+ MSM_I2S_CLK_SET_TRUE);
+ if (ret != 0) {
+ pr_err("%s: Fail Tx I2S clock\n",
+ __func__);
+ return -EINVAL;
+ }
+ msm9615_config_i2s_sif_mux(
+ pintf->mux_ctl[MSM_DIR_TX].sifconfig);
+ msm9615_config_i2s_spare_mux(
+ pintf->mux_ctl[MSM_DIR_TX].spareconfig,
+ i2s_intf);
+ ret = snd_soc_dai_set_fmt(cpu_dai,
+ SND_SOC_DAIFMT_CBS_CFS);
+ if (ret < 0)
+ pr_err("set fmt cpu dai failed\n");
+ ret = snd_soc_dai_set_fmt(codec_dai,
+ SND_SOC_DAIFMT_CBS_CFS);
+ if (ret < 0)
+ pr_err("set fmt codec dai failed\n");
+ }
+ } else if (i2s_dir == MSM_DIR_RX) {
+ if (pintf->intf_status[i2s_intf][MSM_DIR_TX] > 0) {
+ pr_err("%s: Error shutdown Tx first\n",
+ __func__);
+ return -EINVAL;
+ } else if (pintf->intf_status[i2s_intf][i2s_dir]
+ == 0) {
+ ret = msm9615_set_i2s_osr_bit_clk(cpu_dai,
+ i2s_intf, i2s_dir,
+ MSM_I2S_CLK_SET_TRUE);
+ if (ret != 0) {
+ pr_err("%s: Fail Rx I2S clock\n",
+ __func__);
+ return -EINVAL;
+ }
+ msm9615_config_i2s_sif_mux(
+ pintf->mux_ctl[MSM_DIR_RX].sifconfig);
+ msm9615_config_i2s_spare_mux(
+ pintf->mux_ctl[MSM_DIR_RX].spareconfig,
+ i2s_intf);
+ ret = snd_soc_dai_set_fmt(cpu_dai,
+ SND_SOC_DAIFMT_CBS_CFS);
+ if (ret < 0)
+ pr_err("set fmt cpu dai failed\n");
+ ret = snd_soc_dai_set_fmt(codec_dai,
+ SND_SOC_DAIFMT_CBS_CFS);
+ if (ret < 0)
+ pr_err("set fmt codec dai failed\n");
+ }
+ }
+ pintf->intf_status[i2s_intf][i2s_dir]++;
+ } else {
+ pr_err("%s: Err in i2s_intf_dir_sel\n", __func__);
+ return -EINVAL;
+ }
+ pr_debug("Exit %s() Enable status Rx =%d Tx = %d\n", __func__,
+ pintf->intf_status[i2s_intf][MSM_DIR_RX],
+ pintf->intf_status[i2s_intf][MSM_DIR_TX]);
+ return ret;
+}
+
+static void msm9615_i2s_shutdown(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct msm_i2s_ctl *pintf = &msm9x15_i2s_ctl;
+ u8 i2s_intf = 0, i2s_dir = 0, ret = 0;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ pr_debug("%s( ): Enable status Rx =%d Tx = %d\n",
+ __func__, pintf->intf_status[i2s_intf][MSM_DIR_RX],
+ pintf->intf_status[i2s_intf][MSM_DIR_TX]);
+ if (!msm9615_i2s_intf_dir_sel(cpu_dai->name, &i2s_intf, &i2s_dir)) {
+ pr_debug("%s( ): intf =%d dir = %d\n", __func__,
+ i2s_intf, i2s_dir);
+ if (i2s_dir == MSM_DIR_RX)
+ if (pintf->intf_status[i2s_intf][MSM_DIR_TX] > 0)
+ pr_err("%s: Shutdown Tx First then by RX\n",
+ __func__);
+ ret = msm9615_set_i2s_osr_bit_clk(cpu_dai, i2s_intf, i2s_dir,
+ MSM_I2S_CLK_SET_FALSE);
+ if (ret != 0)
+ pr_err("%s: Cannot disable I2S clock\n",
+ __func__);
+ pintf->intf_status[i2s_intf][i2s_dir]--;
+ mdm9615_i2s_free_gpios(i2s_intf, i2s_dir);
+ }
+ pr_debug("%s( ): Enable status Rx =%d Tx = %d\n", __func__,
+ pintf->intf_status[i2s_intf][MSM_DIR_RX],
+ pintf->intf_status[i2s_intf][MSM_DIR_TX]);
+}
+
+static struct snd_soc_ops msm9615_i2s_be_ops = {
+ .startup = msm9615_i2s_startup,
+ .shutdown = msm9615_i2s_shutdown,
+ .hw_params = msm9615_i2s_hw_params,
+};
+
static int mdm9615_audrx_init(struct snd_soc_pcm_runtime *rtd)
{
int err;
@@ -896,6 +1646,7 @@
return 0;
}
+
static int mdm9615_startup(struct snd_pcm_substream *substream)
{
pr_debug("%s(): substream = %s stream = %d\n", __func__,
@@ -1107,8 +1858,37 @@
};
-static struct snd_soc_dai_link mdm9615_dai_delta_tabla[] = {
- /* Backend DAI Links */
+static struct snd_soc_dai_link mdm9615_dai_i2s_tabla[] = {
+ /* Backend I2S DAI Links */
+ {
+ .name = LPASS_BE_PRI_I2S_RX,
+ .stream_name = "Primary I2S Playback",
+ .cpu_dai_name = "msm-dai-q6.0",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "tabla_codec",
+ .codec_dai_name = "tabla_i2s_rx1",
+ .no_pcm = 1,
+ .be_id = MSM_BACKEND_DAI_PRI_I2S_RX,
+ .init = &msm9615_i2s_audrx_init,
+ .be_hw_params_fixup = msm9615_i2s_rx_be_hw_params_fixup,
+ .ops = &msm9615_i2s_be_ops,
+ },
+ {
+ .name = LPASS_BE_PRI_I2S_TX,
+ .stream_name = "Primary I2S Capture",
+ .cpu_dai_name = "msm-dai-q6.1",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "tabla_codec",
+ .codec_dai_name = "tabla_i2s_tx1",
+ .no_pcm = 1,
+ .be_id = MSM_BACKEND_DAI_PRI_I2S_TX,
+ .be_hw_params_fixup = msm9615_i2s_tx_be_hw_params_fixup,
+ .ops = &msm9615_i2s_be_ops,
+ },
+};
+
+static struct snd_soc_dai_link mdm9615_dai_slimbus_tabla[] = {
+ /* Backend SlimBus DAI Links */
{
.name = LPASS_BE_SLIMBUS_0_RX,
.stream_name = "Slimbus Playback",
@@ -1136,14 +1916,17 @@
},
};
-static struct snd_soc_dai_link mdm9615_dai[
+static struct snd_soc_dai_link mdm9615_i2s_dai[
ARRAY_SIZE(mdm9615_dai_common) +
- ARRAY_SIZE(mdm9615_dai_delta_tabla)];
+ ARRAY_SIZE(mdm9615_dai_i2s_tabla)];
+
+static struct snd_soc_dai_link mdm9615_slimbus_dai[
+ ARRAY_SIZE(mdm9615_dai_common) +
+ ARRAY_SIZE(mdm9615_dai_slimbus_tabla)];
+
static struct snd_soc_card snd_soc_card_mdm9615 = {
.name = "mdm9615-tabla-snd-card",
- .dai_link = mdm9615_dai,
- .num_links = ARRAY_SIZE(mdm9615_dai),
};
static struct platform_device *mdm9615_snd_device;
@@ -1199,6 +1982,11 @@
}
}
+void __init install_codec_i2s_gpio(void)
+{
+ msm_gpiomux_install(msm9615_audio_prim_i2s_codec_configs,
+ ARRAY_SIZE(msm9615_audio_prim_i2s_codec_configs));
+}
static int __init mdm9615_audio_init(void)
{
int ret;
@@ -1220,11 +2008,28 @@
kfree(mbhc_cfg.calibration);
return -ENOMEM;
}
-
- memcpy(mdm9615_dai, mdm9615_dai_common, sizeof(mdm9615_dai_common));
- memcpy(mdm9615_dai + ARRAY_SIZE(mdm9615_dai_common),
- mdm9615_dai_delta_tabla, sizeof(mdm9615_dai_delta_tabla));
-
+ pr_err("%s: Interface Type = %d\n", __func__,
+ wcd9xxx_get_intf_type());
+ if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_SLIMBUS) {
+ memcpy(mdm9615_slimbus_dai, mdm9615_dai_common,
+ sizeof(mdm9615_dai_common));
+ memcpy(mdm9615_slimbus_dai + ARRAY_SIZE(mdm9615_dai_common),
+ mdm9615_dai_slimbus_tabla,
+ sizeof(mdm9615_dai_slimbus_tabla));
+ snd_soc_card_mdm9615.dai_link = mdm9615_slimbus_dai;
+ snd_soc_card_mdm9615.num_links =
+ ARRAY_SIZE(mdm9615_slimbus_dai);
+ } else if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C) {
+ install_codec_i2s_gpio();
+ memcpy(mdm9615_i2s_dai, mdm9615_dai_common,
+ sizeof(mdm9615_dai_common));
+ memcpy(mdm9615_i2s_dai + ARRAY_SIZE(mdm9615_dai_common),
+ mdm9615_dai_i2s_tabla,
+ sizeof(mdm9615_dai_i2s_tabla));
+ snd_soc_card_mdm9615.dai_link = mdm9615_i2s_dai;
+ snd_soc_card_mdm9615.num_links =
+ ARRAY_SIZE(mdm9615_i2s_dai);
+ }
platform_set_drvdata(mdm9615_snd_device, &snd_soc_card_mdm9615);
ret = platform_device_add(mdm9615_snd_device);
if (ret) {
@@ -1232,13 +2037,15 @@
kfree(mbhc_cfg.calibration);
return ret;
}
-
if (mdm9615_configure_headset_mic_gpios()) {
pr_err("%s Fail to configure headset mic gpios\n", __func__);
mdm9615_headset_gpios_configured = 0;
} else
mdm9615_headset_gpios_configured = 1;
+ msm9x15_i2s_ctl.sif_virt_addr = ioremap(LPASS_SIF_MUX_ADDR, 4);
+ msm9x15_i2s_ctl.spare_virt_addr = ioremap(LPAIF_SPARE_ADDR, 4);
+
return ret;
}
@@ -1253,6 +2060,9 @@
mdm9615_free_headset_mic_gpios();
platform_device_unregister(mdm9615_snd_device);
kfree(mbhc_cfg.calibration);
+ iounmap(msm9x15_i2s_ctl.sif_virt_addr);
+ iounmap(msm9x15_i2s_ctl.spare_virt_addr);
+
}
module_exit(mdm9615_audio_exit);
diff --git a/sound/soc/msm/mpq8064.c b/sound/soc/msm/mpq8064.c
index 50f527f..14f4f61 100644
--- a/sound/soc/msm/mpq8064.c
+++ b/sound/soc/msm/mpq8064.c
@@ -1219,18 +1219,6 @@
.ignore_suspend = 1,
/* .be_id = do not care */
},
- /* MI2S TX Hostless */
- {
- .name = "MI2S_TX Hostless",
- .stream_name = "MI2S_TX Hostless",
- .cpu_dai_name = "MI2S_TX_HOSTLESS",
- .platform_name = "msm-pcm-hostless",
- .dynamic = 1,
- .dsp_link = &tx_hl_media,
- .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
- .no_codec = 1,
- .ignore_suspend = 1,
- },
/* HDMI Hostless */
{
.name = "HDMI_RX_HOSTLESS",
@@ -1243,6 +1231,18 @@
.no_codec = 1,
.ignore_suspend = 1,
},
+ /* MI2S TX Hostless */
+ {
+ .name = "MI2S_TX Hostless",
+ .stream_name = "MI2S_TX Hostless",
+ .cpu_dai_name = "MI2S_TX_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dsp_link = &tx_hl_media,
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .no_codec = 1,
+ .ignore_suspend = 1,
+ },
/* Secondary I2S RX Hostless */
{
.name = "SEC_I2S_RX Hostless",
diff --git a/sound/soc/msm/msm-compr-q6.c b/sound/soc/msm/msm-compr-q6.c
index 776337d..68f218f 100644
--- a/sound/soc/msm/msm-compr-q6.c
+++ b/sound/soc/msm/msm-compr-q6.c
@@ -192,6 +192,7 @@
struct msm_audio *prtd = &compr->prtd;
struct asm_aac_cfg aac_cfg;
struct asm_wma_cfg wma_cfg;
+ struct asm_wmapro_cfg wma_pro_cfg;
int ret;
pr_debug("compressed stream prepare\n");
@@ -250,6 +251,26 @@
if (ret < 0)
pr_err("%s: CMD Format block failed\n", __func__);
break;
+ case SND_AUDIOCODEC_WMA_PRO:
+ pr_debug("SND_AUDIOCODEC_WMA_PRO\n");
+ memset(&wma_pro_cfg, 0x0, sizeof(struct asm_wmapro_cfg));
+ wma_pro_cfg.format_tag = compr->info.codec_param.codec.format;
+ wma_pro_cfg.ch_cfg = compr->info.codec_param.codec.ch_in;
+ wma_pro_cfg.sample_rate = runtime->rate;
+ wma_pro_cfg.avg_bytes_per_sec =
+ compr->info.codec_param.codec.bit_rate/8;
+ wma_pro_cfg.block_align = compr->info.codec_param.codec.align;
+ wma_pro_cfg.valid_bits_per_sample =
+ compr->info.codec_param.codec.options.wma.bits_per_sample;
+ wma_pro_cfg.ch_mask =
+ compr->info.codec_param.codec.options.wma.channelmask;
+ wma_pro_cfg.encode_opt =
+ compr->info.codec_param.codec.options.wma.encodeopt;
+ ret = q6asm_media_format_block_wmapro(prtd->audio_client,
+ &wma_pro_cfg);
+ if (ret < 0)
+ pr_err("%s: CMD Format block failed\n", __func__);
+ break;
default:
return -EINVAL;
}
@@ -316,6 +337,7 @@
compr->info.compr_cap.codecs[1] = SND_AUDIOCODEC_AAC;
compr->info.compr_cap.codecs[2] = SND_AUDIOCODEC_AC3_PASS_THROUGH;
compr->info.compr_cap.codecs[3] = SND_AUDIOCODEC_WMA;
+ compr->info.compr_cap.codecs[4] = SND_AUDIOCODEC_WMA_PRO;
/* Add new codecs here */
}
@@ -638,6 +660,10 @@
pr_debug("SND_AUDIOCODEC_WMA\n");
compr->codec = FORMAT_WMA_V9;
break;
+ case SND_AUDIOCODEC_WMA_PRO:
+ pr_debug("SND_AUDIOCODEC_WMA_PRO\n");
+ compr->codec = FORMAT_WMA_V10PRO;
+ break;
default:
pr_debug("FORMAT_LINEAR_PCM\n");
compr->codec = FORMAT_LINEAR_PCM;
diff --git a/sound/soc/msm/msm-dai-fe.c b/sound/soc/msm/msm-dai-fe.c
index 6c44cba..a050771 100644
--- a/sound/soc/msm/msm-dai-fe.c
+++ b/sound/soc/msm/msm-dai-fe.c
@@ -256,21 +256,21 @@
{
.playback = {
.stream_name = "AUXPCM Hostless Playback",
- .rates = SNDRV_PCM_RATE_8000,
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.channels_min = 1,
.channels_max = 1,
.rate_min = 8000,
- .rate_max = 8000,
+ .rate_max = 16000,
},
.capture = {
.stream_name = "AUXPCM Hostless Capture",
- .rates = SNDRV_PCM_RATE_8000,
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.channels_min = 1,
.channels_max = 1,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 16000,
},
.ops = &msm_fe_dai_ops,
.name = "AUXPCM_HOSTLESS",
diff --git a/sound/soc/msm/msm-dai-q6-hdmi.c b/sound/soc/msm/msm-dai-q6-hdmi.c
index 3333344..dfb090e 100644
--- a/sound/soc/msm/msm-dai-q6-hdmi.c
+++ b/sound/soc/msm/msm-dai-q6-hdmi.c
@@ -38,6 +38,46 @@
union afe_port_config port_config;
};
+static int msm_dai_q6_hdmi_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+
+ struct msm_dai_q6_hdmi_dai_data *dai_data = kcontrol->private_data;
+ int value = ucontrol->value.integer.value[0];
+ dai_data->port_config.hdmi_multi_ch.data_type = value;
+ pr_debug("%s: value = %d\n", __func__, value);
+ return 0;
+}
+
+static int msm_dai_q6_hdmi_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+
+ struct msm_dai_q6_hdmi_dai_data *dai_data = kcontrol->private_data;
+ ucontrol->value.integer.value[0] =
+ dai_data->port_config.hdmi_multi_ch.data_type;
+ return 0;
+}
+
+
+/* HDMI format field for AFE_PORT_MULTI_CHAN_HDMI_AUDIO_IF_CONFIG command
+ * 0: linear PCM
+ * 1: non-linear PCM
+ */
+static const char *hdmi_format[] = {
+ "LPCM",
+ "Compr"
+};
+
+static const struct soc_enum hdmi_config_enum[] = {
+ SOC_ENUM_SINGLE_EXT(2, hdmi_format),
+};
+
+static const struct snd_kcontrol_new hdmi_config_controls[] = {
+ SOC_ENUM_EXT("HDMI RX Format", hdmi_config_enum[0],
+ msm_dai_q6_hdmi_format_get,
+ msm_dai_q6_hdmi_format_put),
+};
/* Current implementation assumes hw_param is called once
* This may not be the case but what to do when ADM and AFE
@@ -54,7 +94,6 @@
dai_data->channels = params_channels(params);
dai_data->rate = params_rate(params);
- dai_data->port_config.hdmi_multi_ch.data_type = 0;
dai_data->port_config.hdmi_multi_ch.reserved = 0;
switch (dai_data->channels) {
@@ -78,9 +117,11 @@
return -EINVAL;
}
dev_dbg(dai->dev, "%s() num_ch = %u rate =%u"
- " channel_allocation = %u\n", __func__, dai_data->channels,
+ " channel_allocation = %u data type = %d\n", __func__,
+ dai_data->channels,
dai_data->rate,
- dai_data->port_config.hdmi_multi_ch.channel_allocation);
+ dai_data->port_config.hdmi_multi_ch.channel_allocation,
+ dai_data->port_config.hdmi_multi_ch.data_type);
return 0;
}
@@ -168,6 +209,7 @@
static int msm_dai_q6_hdmi_dai_probe(struct snd_soc_dai *dai)
{
struct msm_dai_q6_hdmi_dai_data *dai_data;
+ const struct snd_kcontrol_new *kcontrol;
int rc = 0;
dai_data = kzalloc(sizeof(struct msm_dai_q6_hdmi_dai_data),
@@ -180,6 +222,10 @@
} else
dev_set_drvdata(dai->dev, dai_data);
+ kcontrol = &hdmi_config_controls[0];
+
+ rc = snd_ctl_add(dai->card->snd_card,
+ snd_ctl_new1(kcontrol, dai_data));
return rc;
}
diff --git a/sound/soc/msm/msm-dai-q6.c b/sound/soc/msm/msm-dai-q6.c
index fb7756c..a62541a 100644
--- a/sound/soc/msm/msm-dai-q6.c
+++ b/sound/soc/msm/msm-dai-q6.c
@@ -532,17 +532,28 @@
}
dai_data->channels = params_channels(params);
- if (params_rate(params) != 8000) {
- dev_err(dai->dev, "AUX PCM supports only 8KHz sampling rate\n");
+ dai_data->rate = params_rate(params);
+ switch (dai_data->rate) {
+ case 8000:
+ dai_data->port_config.pcm.mode = auxpcm_pdata->mode_8k.mode;
+ dai_data->port_config.pcm.sync = auxpcm_pdata->mode_8k.sync;
+ dai_data->port_config.pcm.frame = auxpcm_pdata->mode_8k.frame;
+ dai_data->port_config.pcm.quant = auxpcm_pdata->mode_8k.quant;
+ dai_data->port_config.pcm.slot = auxpcm_pdata->mode_8k.slot;
+ dai_data->port_config.pcm.data = auxpcm_pdata->mode_8k.data;
+ break;
+ case 16000:
+ dai_data->port_config.pcm.mode = auxpcm_pdata->mode_16k.mode;
+ dai_data->port_config.pcm.sync = auxpcm_pdata->mode_16k.sync;
+ dai_data->port_config.pcm.frame = auxpcm_pdata->mode_16k.frame;
+ dai_data->port_config.pcm.quant = auxpcm_pdata->mode_16k.quant;
+ dai_data->port_config.pcm.slot = auxpcm_pdata->mode_16k.slot;
+ dai_data->port_config.pcm.data = auxpcm_pdata->mode_16k.data;
+ break;
+ default:
+ dev_err(dai->dev, "AUX PCM supports only 8kHz and 16kHz sampling rate\n");
return -EINVAL;
}
- dai_data->rate = params_rate(params);
- dai_data->port_config.pcm.mode = auxpcm_pdata->mode;
- dai_data->port_config.pcm.sync = auxpcm_pdata->sync;
- dai_data->port_config.pcm.frame = auxpcm_pdata->frame;
- dai_data->port_config.pcm.quant = auxpcm_pdata->quant;
- dai_data->port_config.pcm.slot = auxpcm_pdata->slot;
- dai_data->port_config.pcm.data = auxpcm_pdata->data;
return 0;
}
@@ -702,9 +713,9 @@
{
struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
int rc = 0;
-
struct msm_dai_auxpcm_pdata *auxpcm_pdata =
(struct msm_dai_auxpcm_pdata *) dai->dev->platform_data;
+ unsigned long pcm_clk_rate;
mutex_lock(&aux_pcm_mutex);
@@ -753,8 +764,17 @@
afe_open(PCM_RX, &dai_data->port_config, dai_data->rate);
afe_open(PCM_TX, &dai_data->port_config, dai_data->rate);
+ if (dai_data->rate == 8000) {
+ pcm_clk_rate = auxpcm_pdata->mode_8k.pcm_clk_rate;
+ } else if (dai_data->rate == 16000) {
+ pcm_clk_rate = auxpcm_pdata->mode_8k.pcm_clk_rate;
+ } else {
+ dev_err(dai->dev, "%s: Invalid AUX PCM rate %d\n", __func__,
+ dai_data->rate);
+ return -EINVAL;
+ }
- rc = clk_set_rate(pcm_clk, auxpcm_pdata->pcm_clk_rate);
+ rc = clk_set_rate(pcm_clk, pcm_clk_rate);
if (rc < 0) {
pr_err("%s: clk_set_rate failed\n", __func__);
return rc;
@@ -1377,11 +1397,11 @@
static struct snd_soc_dai_driver msm_dai_q6_aux_pcm_rx_dai = {
.playback = {
- .rates = SNDRV_PCM_RATE_8000,
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.channels_min = 1,
.channels_max = 1,
- .rate_max = 8000,
+ .rate_max = 16000,
.rate_min = 8000,
},
.ops = &msm_dai_q6_auxpcm_ops,
@@ -1391,11 +1411,11 @@
static struct snd_soc_dai_driver msm_dai_q6_aux_pcm_tx_dai = {
.capture = {
- .rates = SNDRV_PCM_RATE_8000,
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.channels_min = 1,
.channels_max = 1,
- .rate_max = 8000,
+ .rate_max = 16000,
.rate_min = 8000,
},
.ops = &msm_dai_q6_auxpcm_ops,
diff --git a/sound/soc/msm/msm-pcm-routing.c b/sound/soc/msm/msm-pcm-routing.c
index 02cc6ce..7fbb592 100644
--- a/sound/soc/msm/msm-pcm-routing.c
+++ b/sound/soc/msm/msm-pcm-routing.c
@@ -1125,6 +1125,18 @@
msm_routing_put_voice_mixer),
};
+static const struct snd_kcontrol_new mi2s_rx_voice_mixer_controls[] = {
+ SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_MI2S_RX,
+ MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("Voip", MSM_BACKEND_DAI_MI2S_RX,
+ MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_MI2S_RX,
+ MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
+};
+
static const struct snd_kcontrol_new afe_pcm_rx_voice_mixer_controls[] = {
SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_AFE_PCM_RX,
MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
@@ -1258,6 +1270,9 @@
SOC_SINGLE_EXT("STUB_1_TX_HL", MSM_BACKEND_DAI_EXTPROC_EC_TX,
MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
msm_routing_put_voice_stub_mixer),
+ SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_MI2S_TX,
+ MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
+ msm_routing_put_voice_stub_mixer),
};
static const struct snd_kcontrol_new sbus_0_rx_port_mixer_controls[] = {
@@ -1294,6 +1309,9 @@
SOC_SINGLE_EXT("INTERNAL_BT_SCO_RX", MSM_BACKEND_DAI_SLIMBUS_3_RX,
MSM_BACKEND_DAI_INT_BT_SCO_RX, 1, 0, msm_routing_get_port_mixer,
msm_routing_put_port_mixer),
+ SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_SLIMBUS_3_RX,
+ MSM_BACKEND_DAI_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
+ msm_routing_put_port_mixer),
};
static const struct snd_kcontrol_new bt_sco_rx_port_mixer_controls[] = {
SOC_SINGLE_EXT("SLIM_1_TX", MSM_BACKEND_DAI_INT_BT_SCO_RX,
@@ -1320,6 +1338,12 @@
msm_routing_put_port_mixer),
};
+static const struct snd_kcontrol_new mi2s_rx_port_mixer_controls[] = {
+ SOC_SINGLE_EXT("SLIM_1_TX", MSM_BACKEND_DAI_MI2S_RX,
+ MSM_BACKEND_DAI_SLIMBUS_1_TX, 1, 0, msm_routing_get_port_mixer,
+ msm_routing_put_port_mixer),
+};
+
static const struct snd_kcontrol_new fm_switch_mixer_controls =
SOC_SINGLE_EXT("Switch", SND_SOC_NOPM,
0, 1, 0, msm_routing_get_switch_mixer,
@@ -1667,6 +1691,10 @@
SND_SOC_NOPM, 0, 0,
hdmi_rx_voice_mixer_controls,
ARRAY_SIZE(hdmi_rx_voice_mixer_controls)),
+ SND_SOC_DAPM_MIXER("MI2S_RX_Voice Mixer",
+ SND_SOC_NOPM, 0, 0,
+ mi2s_rx_voice_mixer_controls,
+ ARRAY_SIZE(mi2s_rx_voice_mixer_controls)),
SND_SOC_DAPM_MIXER("Voice_Tx Mixer",
SND_SOC_NOPM, 0, 0, tx_voice_mixer_controls,
ARRAY_SIZE(tx_voice_mixer_controls)),
@@ -1714,6 +1742,8 @@
SND_SOC_DAPM_MIXER("SLIMBUS_3_RX Port Mixer",
SND_SOC_NOPM, 0, 0, sbus_3_rx_port_mixer_controls,
ARRAY_SIZE(sbus_3_rx_port_mixer_controls)),
+ SND_SOC_DAPM_MIXER("MI2S_RX Port Mixer", SND_SOC_NOPM, 0, 0,
+ mi2s_rx_port_mixer_controls, ARRAY_SIZE(mi2s_rx_port_mixer_controls)),
};
static const struct snd_soc_dapm_route intercon[] = {
@@ -1880,6 +1910,7 @@
{"Voice Stub Tx Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"},
{"Voice Stub Tx Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
{"Voice Stub Tx Mixer", "STUB_1_TX_HL", "STUB_1_TX"},
+ {"Voice Stub Tx Mixer", "MI2S_TX", "MI2S_TX"},
{"VOICE_STUB_UL", NULL, "Voice Stub Tx Mixer"},
{"STUB_RX Mixer", "Voice Stub", "VOICE_STUB_DL"},
@@ -1887,6 +1918,8 @@
{"SLIMBUS_1_RX Mixer", "Voice Stub", "VOICE_STUB_DL"},
{"SLIMBUS_1_RX", NULL, "SLIMBUS_1_RX Mixer"},
{"INTERNAL_BT_SCO_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
+ {"MI2S_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
+ {"MI2S_RX", NULL, "MI2S_RX_Voice Mixer"},
{"SLIMBUS_3_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
{"SLIMBUS_3_RX", NULL, "SLIMBUS_3_RX_Voice Mixer"},
@@ -1896,6 +1929,7 @@
{"INTERNAL_BT_SCO_RX Port Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"},
{"INT_BT_SCO_RX", NULL, "INTERNAL_BT_SCO_RX Port Mixer"},
{"SLIMBUS_3_RX Port Mixer", "INTERNAL_BT_SCO_RX", "INT_BT_SCO_RX"},
+ {"SLIMBUS_3_RX Port Mixer", "MI2S_TX", "MI2S_TX"},
{"SLIMBUS_3_RX", NULL, "SLIMBUS_3_RX Port Mixer"},
@@ -1904,6 +1938,9 @@
{"SEC_I2S_RX Port Mixer", "MI2S_TX", "MI2S_TX"},
{"SEC_I2S_RX", NULL, "SEC_I2S_RX Port Mixer"},
+
+ {"MI2S_RX Port Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"},
+ {"MI2S_RX", NULL, "MI2S_RX Port Mixer"},
};
static int msm_pcm_routing_hw_params(struct snd_pcm_substream *substream,
diff --git a/sound/soc/msm/msm8660-apq-wm8903.c b/sound/soc/msm/msm8660-apq-wm8903.c
index 15a01d7..e697c3f 100644
--- a/sound/soc/msm/msm8660-apq-wm8903.c
+++ b/sound/soc/msm/msm8660-apq-wm8903.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -188,7 +188,7 @@
return ret;
}
- wm8903_mclk = clk_get(NULL, "i2s_mic_osr_clk");
+ wm8903_mclk = clk_get_sys(NULL, "i2s_mic_osr_clk");
if (IS_ERR(wm8903_mclk)) {
pr_err("Failed to get i2s_mic_osr_clk\n");
gpio_free(MSM_CDC_MIC_I2S_MCLK);
@@ -308,7 +308,7 @@
pr_err("cpu_dai set_fmt error\n");
return ret;
}
- spkr_osr_clk = clk_get(NULL, "i2s_spkr_osr_clk");
+ spkr_osr_clk = clk_get_sys(NULL, "i2s_spkr_osr_clk");
if (IS_ERR(spkr_osr_clk)) {
pr_err("Failed to get i2s_spkr_osr_clk\n");
return PTR_ERR(spkr_osr_clk);
@@ -320,7 +320,7 @@
clk_put(spkr_osr_clk);
return ret;
}
- spkr_bit_clk = clk_get(NULL, "i2s_spkr_bit_clk");
+ spkr_bit_clk = clk_get_sys(NULL, "i2s_spkr_bit_clk");
if (IS_ERR(spkr_bit_clk)) {
pr_err("Failed to get i2s_spkr_bit_clk\n");
clk_disable_unprepare(spkr_osr_clk);
@@ -351,7 +351,7 @@
return ret;
}
- mic_bit_clk = clk_get(NULL, "i2s_mic_bit_clk");
+ mic_bit_clk = clk_get_sys(NULL, "i2s_mic_bit_clk");
if (IS_ERR(mic_bit_clk)) {
pr_err("Failed to get i2s_mic_bit_clk\n");
return PTR_ERR(mic_bit_clk);
diff --git a/sound/soc/msm/msm8930.c b/sound/soc/msm/msm8930.c
index a2c6f5f..2762bd6 100644
--- a/sound/soc/msm/msm8930.c
+++ b/sound/soc/msm/msm8930.c
@@ -13,7 +13,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio.h>
-#include <linux/mfd/pm8xxx/misc.h>
+#include <linux/mfd/pm8xxx/spk.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/slab.h>
@@ -36,6 +36,10 @@
#define BTSCO_RATE_8KHZ 8000
#define BTSCO_RATE_16KHZ 16000
+#define SPK_AMP_POS 0x1
+#define SPK_AMP_NEG 0x2
+#define SPKR_BOOST_GPIO 15
+#define DEFAULT_PMIC_SPK_GAIN 0x0D
#define SITAR_EXT_CLK_RATE 12288000
#define SITAR_MBHC_DEF_BUTTONS 3
@@ -44,7 +48,9 @@
static int msm8930_spk_control;
static int msm8930_slim_0_rx_ch = 1;
static int msm8930_slim_0_tx_ch = 1;
+static int msm8930_pmic_spk_gain = DEFAULT_PMIC_SPK_GAIN;
+static int msm8930_ext_spk_pamp;
static int msm8930_btsco_rate = BTSCO_RATE_8KHZ;
static int msm8930_btsco_ch = 1;
@@ -111,11 +117,107 @@
return 1;
}
+static void msm8960_ext_spk_power_amp_on(u32 spk)
+{
+ int ret = 0;
+
+ if (spk & (SPK_AMP_POS | SPK_AMP_NEG)) {
+ if ((msm8930_ext_spk_pamp & SPK_AMP_POS) &&
+ (msm8930_ext_spk_pamp & SPK_AMP_NEG)) {
+
+ pr_debug("%s() External Bottom Speaker Ampl already "
+ "turned on. spk = 0x%08x\n", __func__, spk);
+ return;
+ }
+
+ msm8930_ext_spk_pamp |= spk;
+
+ if ((msm8930_ext_spk_pamp & SPK_AMP_POS) &&
+ (msm8930_ext_spk_pamp & SPK_AMP_NEG)) {
+
+ if (machine_is_msm8930_mtp()
+ || machine_is_msm8930_fluid()) {
+ pr_debug("%s: Configure Speaker Boost GPIO %u",
+ __func__, SPKR_BOOST_GPIO);
+ ret = gpio_request(SPKR_BOOST_GPIO,
+ "SPKR_BOOST_EN");
+ if (ret) {
+ pr_err("%s: Failed to configure speaker boost "
+ "gpio %u\n", __func__, SPKR_BOOST_GPIO);
+ return;
+ }
+
+ pr_debug("%s: Enable Speaker boost gpio %u\n",
+ __func__, SPKR_BOOST_GPIO);
+ gpio_direction_output(SPKR_BOOST_GPIO, 1);
+ }
+
+ pm8xxx_spk_enable(MSM8930_SPK_ON);
+ pr_debug("%s: slepping 4 ms after turning on external "
+ " Left Speaker Ampl\n", __func__);
+ usleep_range(4000, 4000);
+ }
+
+ } else {
+
+ pr_err("%s: ERROR : Invalid External Speaker Ampl. spk = 0x%08x\n",
+ __func__, spk);
+ return;
+ }
+}
+
+static void msm8960_ext_spk_power_amp_off(u32 spk)
+{
+ if (spk & (SPK_AMP_POS | SPK_AMP_NEG)) {
+ if (!msm8930_ext_spk_pamp)
+ return;
+ if (machine_is_msm8930_mtp()
+ || machine_is_msm8930_fluid()) {
+ pr_debug("%s: Free speaker boost gpio %u\n",
+ __func__, SPKR_BOOST_GPIO);
+ gpio_direction_output(SPKR_BOOST_GPIO, 0);
+ gpio_free(SPKR_BOOST_GPIO);
+ }
+
+ pm8xxx_spk_enable(MSM8930_SPK_OFF);
+ msm8930_ext_spk_pamp = 0;
+ pr_debug("%s: slepping 4 ms after turning on external "
+ " Left Speaker Ampl\n", __func__);
+ usleep_range(4000, 4000);
+
+ } else {
+
+ pr_err("%s: ERROR : Invalid External Speaker Ampl. spk = 0x%08x\n",
+ __func__, spk);
+ return;
+ }
+}
+
static int msm8930_spkramp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
pr_debug("%s() %x\n", __func__, SND_SOC_DAPM_EVENT_ON(event));
- /* TODO: add external speaker power amps support */
+ if (SND_SOC_DAPM_EVENT_ON(event)) {
+ if (!strncmp(w->name, "Ext Spk Left Pos", 17))
+ msm8960_ext_spk_power_amp_on(SPK_AMP_POS);
+ else if (!strncmp(w->name, "Ext Spk Left Neg", 17))
+ msm8960_ext_spk_power_amp_on(SPK_AMP_NEG);
+ else {
+ pr_err("%s() Invalid Speaker Widget = %s\n",
+ __func__, w->name);
+ return -EINVAL;
+ }
+ } else {
+ if (!strncmp(w->name, "Ext Spk Left Pos", 17))
+ msm8960_ext_spk_power_amp_off(SPK_AMP_POS);
+ else if (!strncmp(w->name, "Ext Spk Left Neg", 17))
+ msm8960_ext_spk_power_amp_off(SPK_AMP_NEG);
+ else {
+ pr_err("%s() Invalid Speaker Widget = %s\n",
+ __func__, w->name);
+ return -EINVAL;
+ }
+ }
return 0;
}
@@ -174,7 +276,7 @@
msm8930_mclk_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_SPK("Ext Spk Left Pos", msm8930_spkramp_event),
- SND_SOC_DAPM_SPK("Ext Spk Left Neg", NULL),
+ SND_SOC_DAPM_SPK("Ext Spk Left Neg", msm8930_spkramp_event),
SND_SOC_DAPM_MIC("Headset Mic", NULL),
SND_SOC_DAPM_MIC("Digital Mic1", NULL),
@@ -331,6 +433,39 @@
return 0;
}
+static const char *pmic_spk_gain_text[] = {
+ "NEG_6_DB", "NEG_4_DB", "NEG_2_DB", "ZERO_DB", "POS_2_DB", "POS_4_DB",
+ "POS_6_DB", "POS_8_DB", "POS_10_DB", "POS_12_DB", "POS_14_DB",
+ "POS_16_DB", "POS_18_DB", "POS_20_DB", "POS_22_DB", "POS_24_DB"
+};
+
+static const struct soc_enum msm8960_pmic_spk_gain_enum[] = {
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(pmic_spk_gain_text),
+ pmic_spk_gain_text),
+};
+
+static int msm8930_pmic_gain_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm8930_pmic_spk_gain = %d\n", __func__,
+ msm8930_pmic_spk_gain);
+ ucontrol->value.integer.value[0] = msm8930_pmic_spk_gain;
+ return 0;
+}
+
+static int msm8930_pmic_gain_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int ret = 0;
+ msm8930_pmic_spk_gain = ucontrol->value.integer.value[0];
+ ret = pm8xxx_spk_gain(msm8930_pmic_spk_gain);
+ pr_debug("%s: msm8930_pmic_spk_gain = %d"
+ " ucontrol->value.integer.value[0] = %d\n", __func__,
+ msm8930_pmic_spk_gain,
+ (int) ucontrol->value.integer.value[0]);
+ return ret;
+}
+
static const struct snd_kcontrol_new sitar_msm8930_controls[] = {
SOC_ENUM_EXT("Speaker Function", msm8930_enum[0], msm8930_get_spk,
msm8930_set_spk),
@@ -338,6 +473,8 @@
msm8930_slim_0_rx_ch_get, msm8930_slim_0_rx_ch_put),
SOC_ENUM_EXT("SLIM_0_TX Channels", msm8930_enum[2],
msm8930_slim_0_tx_ch_get, msm8930_slim_0_tx_ch_put),
+ SOC_ENUM_EXT("PMIC SPK Gain", msm8960_pmic_spk_gain_enum[0],
+ msm8930_pmic_gain_get, msm8930_pmic_gain_put),
};
static const struct snd_kcontrol_new int_btsco_rate_mixer_controls[] = {
@@ -542,6 +679,9 @@
mbhc_cfg.gpio_irq = gpio_to_irq(mbhc_cfg.gpio);
sitar_hs_detect(codec, &mbhc_cfg);
+ /* Initialize default PMIC speaker gain */
+ pm8xxx_spk_gain(DEFAULT_PMIC_SPK_GAIN);
+
return 0;
}
diff --git a/sound/soc/msm/msm8960.c b/sound/soc/msm/msm8960.c
index f78f58d..8f0fa32 100644
--- a/sound/soc/msm/msm8960.c
+++ b/sound/soc/msm/msm8960.c
@@ -41,8 +41,8 @@
#define msm8960_SLIM_0_RX_MAX_CHANNELS 2
#define msm8960_SLIM_0_TX_MAX_CHANNELS 4
-#define BTSCO_RATE_8KHZ 8000
-#define BTSCO_RATE_16KHZ 16000
+#define SAMPLE_RATE_8KHZ 8000
+#define SAMPLE_RATE_16KHZ 16000
#define BOTTOM_SPK_AMP_POS 0x1
#define BOTTOM_SPK_AMP_NEG 0x2
@@ -61,6 +61,7 @@
#define JACK_DETECT_GPIO 38
#define JACK_DETECT_INT PM8921_GPIO_IRQ(PM8921_IRQ_BASE, JACK_DETECT_GPIO)
+#define JACK_US_EURO_SEL_GPIO 35
static u32 top_spk_pamp_gpio = PM8921_GPIO_PM_TO_SYS(18);
static u32 bottom_spk_pamp_gpio = PM8921_GPIO_PM_TO_SYS(19);
@@ -70,9 +71,11 @@
static int msm8960_slim_0_rx_ch = 1;
static int msm8960_slim_0_tx_ch = 1;
-static int msm8960_btsco_rate = BTSCO_RATE_8KHZ;
+static int msm8960_btsco_rate = SAMPLE_RATE_8KHZ;
static int msm8960_btsco_ch = 1;
+static int msm8960_auxpcm_rate = SAMPLE_RATE_8KHZ;
+
static struct clk *codec_clk;
static int clk_users;
@@ -91,6 +94,7 @@
static int msm8960_enable_codec_ext_clk(struct snd_soc_codec *codec, int enable,
bool dapm);
+static bool msm8960_swap_gnd_mic(struct snd_soc_codec *codec);
static struct tabla_mbhc_config mbhc_cfg = {
.headset_jack = &hs_jack,
@@ -103,8 +107,11 @@
.gpio = 0,
.gpio_irq = 0,
.gpio_level_insert = 1,
+ .swap_gnd_mic = NULL,
};
+static u32 us_euro_sel_gpio = PM8921_GPIO_PM_TO_SYS(JACK_US_EURO_SEL_GPIO);
+
static struct mutex cdc_mclk_mutex;
static void msm8960_enable_ext_spk_amp_gpio(u32 spk_amp_gpio)
@@ -372,6 +379,15 @@
return r;
}
+static bool msm8960_swap_gnd_mic(struct snd_soc_codec *codec)
+{
+ int value = gpio_get_value_cansleep(us_euro_sel_gpio);
+ pr_debug("%s: US EURO select switch %d to %d\n", __func__, value,
+ !value);
+ gpio_set_value_cansleep(us_euro_sel_gpio, !value);
+ return true;
+}
+
static int msm8960_mclk_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
@@ -512,6 +528,11 @@
SOC_ENUM_SINGLE_EXT(2, btsco_rate_text),
};
+static const char *auxpcm_rate_text[] = {"rate_8000", "rate_16000"};
+static const struct soc_enum msm8960_auxpcm_enum[] = {
+ SOC_ENUM_SINGLE_EXT(2, auxpcm_rate_text),
+};
+
static int msm8960_slim_0_rx_ch_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -563,19 +584,49 @@
{
switch (ucontrol->value.integer.value[0]) {
case 0:
- msm8960_btsco_rate = BTSCO_RATE_8KHZ;
+ msm8960_btsco_rate = SAMPLE_RATE_8KHZ;
break;
case 1:
- msm8960_btsco_rate = BTSCO_RATE_16KHZ;
+ msm8960_btsco_rate = SAMPLE_RATE_16KHZ;
break;
default:
- msm8960_btsco_rate = BTSCO_RATE_8KHZ;
+ msm8960_btsco_rate = SAMPLE_RATE_8KHZ;
break;
}
pr_debug("%s: msm8960_btsco_rate = %d\n", __func__, msm8960_btsco_rate);
return 0;
}
+static int msm8960_auxpcm_rate_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm8960_auxpcm_rate = %d", __func__,
+ msm8960_auxpcm_rate);
+ ucontrol->value.integer.value[0] = msm8960_auxpcm_rate;
+ return 0;
+}
+
+static int msm8960_auxpcm_rate_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 0:
+ msm8960_auxpcm_rate = SAMPLE_RATE_8KHZ;
+ break;
+ case 1:
+ msm8960_auxpcm_rate = SAMPLE_RATE_16KHZ;
+ break;
+ default:
+ msm8960_auxpcm_rate = SAMPLE_RATE_8KHZ;
+ break;
+ }
+ pr_debug("%s: msm8960_auxpcm_rate = %d"
+ "ucontrol->value.integer.value[0] = %d\n", __func__,
+ msm8960_auxpcm_rate,
+ (int)ucontrol->value.integer.value[0]);
+ return 0;
+}
+
static const struct snd_kcontrol_new tabla_msm8960_controls[] = {
SOC_ENUM_EXT("Speaker Function", msm8960_enum[0], msm8960_get_spk,
msm8960_set_spk),
@@ -590,6 +641,11 @@
msm8960_btsco_rate_get, msm8960_btsco_rate_put),
};
+static const struct snd_kcontrol_new auxpcm_rate_mixer_controls[] = {
+ SOC_ENUM_EXT("AUX PCM SampleRate", msm8960_auxpcm_enum[0],
+ msm8960_auxpcm_rate_get, msm8960_auxpcm_rate_put),
+};
+
static int msm8960_btsco_init(struct snd_soc_pcm_runtime *rtd)
{
int err = 0;
@@ -603,6 +659,19 @@
return 0;
}
+static int msm8960_auxpcm_init(struct snd_soc_pcm_runtime *rtd)
+{
+ int err = 0;
+ struct snd_soc_platform *platform = rtd->platform;
+
+ err = snd_soc_add_platform_controls(platform,
+ auxpcm_rate_mixer_controls,
+ ARRAY_SIZE(auxpcm_rate_mixer_controls));
+ if (err < 0)
+ return err;
+ return 0;
+}
+
static void *def_tabla_mbhc_cal(void)
{
void *tabla_cal;
@@ -795,7 +864,7 @@
err = snd_soc_jack_new(codec, "Headset Jack",
(SND_JACK_HEADSET | SND_JACK_OC_HPHL |
- SND_JACK_OC_HPHR),
+ SND_JACK_OC_HPHR | SND_JACK_UNSUPPORTED),
&hs_jack);
if (err) {
pr_err("failed to create new jack\n");
@@ -811,6 +880,9 @@
codec_clk = clk_get(cpu_dai->dev, "osr_clk");
+ if (machine_is_msm8960_cdp())
+ mbhc_cfg.swap_gnd_mic = msm8960_swap_gnd_mic;
+
if (hs_detect_use_gpio) {
mbhc_cfg.gpio = PM8921_GPIO_PM_TO_SYS(JACK_DETECT_GPIO);
mbhc_cfg.gpio_irq = JACK_DETECT_INT;
@@ -819,8 +891,8 @@
if (mbhc_cfg.gpio) {
err = pm8xxx_gpio_config(mbhc_cfg.gpio, &jack_gpio_cfg);
if (err) {
- pr_err("%s: pm8xxx_gpio_config failed %d\n", __func__,
- err);
+ pr_err("%s: pm8xxx_gpio_config JACK_DETECT failed %d\n",
+ __func__, err);
return err;
}
}
@@ -951,8 +1023,8 @@
struct snd_interval *channels = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_CHANNELS);
- /* PCM only supports mono output with 8khz sample rate */
- rate->min = rate->max = 8000;
+ rate->min = rate->max = msm8960_auxpcm_rate;
+ /* PCM only supports mono output */
channels->min = channels->max = 1;
return 0;
@@ -1279,6 +1351,7 @@
.platform_name = "msm-pcm-routing",
.codec_name = "msm-stub-codec.1",
.codec_dai_name = "msm-stub-rx",
+ .init = &msm8960_auxpcm_init,
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_AUXPCM_RX,
.be_hw_params_fixup = msm8960_auxpcm_be_params_fixup,
@@ -1468,19 +1541,19 @@
else
gpio_direction_output(PM8921_GPIO_PM_TO_SYS(23), 0);
- ret = gpio_request(PM8921_GPIO_PM_TO_SYS(35), "US_EURO_SWITCH");
+ ret = gpio_request(us_euro_sel_gpio, "US_EURO_SWITCH");
if (ret) {
pr_err("%s: Failed to request gpio %d\n", __func__,
- PM8921_GPIO_PM_TO_SYS(35));
+ us_euro_sel_gpio);
gpio_free(PM8921_GPIO_PM_TO_SYS(23));
return ret;
}
- ret = pm8xxx_gpio_config(PM8921_GPIO_PM_TO_SYS(35), ¶m);
+ ret = pm8xxx_gpio_config(us_euro_sel_gpio, ¶m);
if (ret)
pr_err("%s: Failed to configure gpio %d\n", __func__,
- PM8921_GPIO_PM_TO_SYS(35));
+ us_euro_sel_gpio);
else
- gpio_direction_output(PM8921_GPIO_PM_TO_SYS(35), 0);
+ gpio_direction_output(us_euro_sel_gpio, 0);
return 0;
}
@@ -1488,7 +1561,7 @@
{
if (msm8960_headset_gpios_configured) {
gpio_free(PM8921_GPIO_PM_TO_SYS(23));
- gpio_free(PM8921_GPIO_PM_TO_SYS(35));
+ gpio_free(us_euro_sel_gpio);
}
}
diff --git a/sound/soc/msm/qdsp6/q6asm.c b/sound/soc/msm/qdsp6/q6asm.c
index ba5c79d..09bfd94 100644
--- a/sound/soc/msm/qdsp6/q6asm.c
+++ b/sound/soc/msm/qdsp6/q6asm.c
@@ -38,7 +38,6 @@
#include <mach/peripheral-loader.h>
#include <mach/qdsp6v2/audio_acdb.h>
#include <mach/qdsp6v2/rtac.h>
-#include <mach/msm_subsystem_map.h>
#include <sound/apr_audio.h>
#include <sound/q6asm.h>
@@ -258,7 +257,7 @@
"%ld\n", __func__,
PTR_ERR((void *)port->buf[cnt].mem_buffer));
else {
- if (msm_subsystem_unmap_buffer(
+ if (iounmap(
port->buf[cnt].mem_buffer) < 0)
pr_err("%s: unmap buffer"
" failed\n", __func__);
@@ -328,7 +327,7 @@
"%ld\n", __func__,
PTR_ERR((void *)port->buf[0].mem_buffer));
else {
- if (msm_subsystem_unmap_buffer(
+ if (iounmap(
port->buf[0].mem_buffer) < 0)
pr_err("%s: unmap buffer"
" failed\n", __func__);
@@ -527,6 +526,7 @@
pr_err("%s: ION create client"
" for AUDIO failed\n",
__func__);
+ mutex_unlock(&ac->cmd_lock);
goto fail;
}
buf[cnt].handle = ion_alloc
@@ -537,6 +537,7 @@
pr_err("%s: ION memory"
" allocation for AUDIO failed\n",
__func__);
+ mutex_unlock(&ac->cmd_lock);
goto fail;
}
@@ -549,6 +550,7 @@
pr_err("%s: ION Get Physical"
" for AUDIO failed, rc = %d\n",
__func__, rc);
+ mutex_unlock(&ac->cmd_lock);
goto fail;
}
@@ -559,6 +561,7 @@
buf[cnt].data)) {
pr_err("%s: ION memory"
" mapping for AUDIO failed\n", __func__);
+ mutex_unlock(&ac->cmd_lock);
goto fail;
}
memset((void *)buf[cnt].data, 0, bufsz);
@@ -574,11 +577,8 @@
mutex_unlock(&ac->cmd_lock);
goto fail;
}
- flags = MSM_SUBSYSTEM_MAP_KADDR |
- MSM_SUBSYSTEM_MAP_CACHED;
buf[cnt].mem_buffer =
- msm_subsystem_map_buffer(buf[cnt].phys,
- bufsz, flags, NULL, 0);
+ ioremap(buf[cnt].phys, bufsz);
if (IS_ERR(
(void *)buf[cnt].mem_buffer)) {
pr_err("%s:map_buffer failed,"
@@ -588,7 +588,7 @@
goto fail;
}
buf[cnt].data =
- buf[cnt].mem_buffer->vaddr;
+ buf[cnt].mem_buffer;
if (!buf[cnt].data) {
pr_err("%s:invalid vaddr,"
" iomap failed\n", __func__);
@@ -665,6 +665,7 @@
buf[0].client = msm_ion_client_create(UINT_MAX, "audio_client");
if (IS_ERR_OR_NULL((void *)buf[0].client)) {
pr_err("%s: ION create client for AUDIO failed\n", __func__);
+ mutex_unlock(&ac->cmd_lock);
goto fail;
}
buf[0].handle = ion_alloc(buf[0].client, bufsz * bufcnt, SZ_4K,
@@ -672,6 +673,7 @@
if (IS_ERR_OR_NULL((void *) buf[0].handle)) {
pr_err("%s: ION memory allocation for AUDIO failed\n",
__func__);
+ mutex_unlock(&ac->cmd_lock);
goto fail;
}
@@ -680,12 +682,14 @@
if (rc) {
pr_err("%s: ION Get Physical for AUDIO failed, rc = %d\n",
__func__, rc);
+ mutex_unlock(&ac->cmd_lock);
goto fail;
}
buf[0].data = ion_map_kernel(buf[0].client, buf[0].handle, 0);
if (IS_ERR_OR_NULL((void *) buf[0].data)) {
pr_err("%s: ION memory mapping for AUDIO failed\n", __func__);
+ mutex_unlock(&ac->cmd_lock);
goto fail;
}
memset((void *)buf[0].data, 0, (bufsz * bufcnt));
@@ -700,9 +704,7 @@
goto fail;
}
- flags = MSM_SUBSYSTEM_MAP_KADDR | MSM_SUBSYSTEM_MAP_CACHED;
- buf[0].mem_buffer = msm_subsystem_map_buffer(buf[0].phys,
- bufsz * bufcnt, flags, NULL, 0);
+ buf[0].mem_buffer = ioremap(buf[0].phys, bufsz * bufcnt);
if (IS_ERR((void *)buf[cnt].mem_buffer)) {
pr_err("%s:map_buffer failed,"
"error = %ld\n",
@@ -711,7 +713,7 @@
mutex_unlock(&ac->cmd_lock);
goto fail;
}
- buf[0].data = buf[0].mem_buffer->vaddr;
+ buf[0].data = buf[0].mem_buffer;
#endif
if (!buf[0].data) {
pr_err("%s:invalid vaddr,"
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 2284f19..b6ddcc8a 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -1543,10 +1543,17 @@
trace_snd_soc_dapm_widget_power(w, power);
- if (power)
+ if (power) {
dapm_seq_insert(w, &up_list, true);
- else
+ dev_dbg(w->dapm->dev,
+ "%s(): power up . widget %s\n",
+ __func__, w->name);
+ } else {
dapm_seq_insert(w, &down_list, false);
+ dev_dbg(w->dapm->dev,
+ "%s(): power down . widget %s\n",
+ __func__, w->name);
+ }
w->power = power;
break;